1 //===- ScalarEvolution.cpp - Scalar Evolution Analysis --------------------===//
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7 //===----------------------------------------------------------------------===//
9 // This file contains the implementation of the scalar evolution analysis
10 // engine, which is used primarily to analyze expressions involving induction
11 // variables in loops.
13 // There are several aspects to this library. First is the representation of
14 // scalar expressions, which are represented as subclasses of the SCEV class.
15 // These classes are used to represent certain types of subexpressions that we
16 // can handle. We only create one SCEV of a particular shape, so
17 // pointer-comparisons for equality are legal.
19 // One important aspect of the SCEV objects is that they are never cyclic, even
20 // if there is a cycle in the dataflow for an expression (ie, a PHI node). If
21 // the PHI node is one of the idioms that we can represent (e.g., a polynomial
22 // recurrence) then we represent it directly as a recurrence node, otherwise we
23 // represent it as a SCEVUnknown node.
25 // In addition to being able to represent expressions of various types, we also
26 // have folders that are used to build the *canonical* representation for a
27 // particular expression. These folders are capable of using a variety of
28 // rewrite rules to simplify the expressions.
30 // Once the folders are defined, we can implement the more interesting
31 // higher-level code, such as the code that recognizes PHI nodes of various
32 // types, computes the execution count of a loop, etc.
34 // TODO: We should use these routines and value representations to implement
35 // dependence analysis!
37 //===----------------------------------------------------------------------===//
39 // There are several good references for the techniques used in this analysis.
41 // Chains of recurrences -- a method to expedite the evaluation
42 // of closed-form functions
43 // Olaf Bachmann, Paul S. Wang, Eugene V. Zima
45 // On computational properties of chains of recurrences
48 // Symbolic Evaluation of Chains of Recurrences for Loop Optimization
49 // Robert A. van Engelen
51 // Efficient Symbolic Analysis for Optimizing Compilers
52 // Robert A. van Engelen
54 // Using the chains of recurrences algebra for data dependence testing and
55 // induction variable substitution
56 // MS Thesis, Johnie Birch
58 //===----------------------------------------------------------------------===//
60 #include "llvm/Analysis/ScalarEvolution.h"
61 #include "llvm/ADT/APInt.h"
62 #include "llvm/ADT/ArrayRef.h"
63 #include "llvm/ADT/DenseMap.h"
64 #include "llvm/ADT/DepthFirstIterator.h"
65 #include "llvm/ADT/EquivalenceClasses.h"
66 #include "llvm/ADT/FoldingSet.h"
67 #include "llvm/ADT/None.h"
68 #include "llvm/ADT/Optional.h"
69 #include "llvm/ADT/STLExtras.h"
70 #include "llvm/ADT/ScopeExit.h"
71 #include "llvm/ADT/Sequence.h"
72 #include "llvm/ADT/SetVector.h"
73 #include "llvm/ADT/SmallPtrSet.h"
74 #include "llvm/ADT/SmallSet.h"
75 #include "llvm/ADT/SmallVector.h"
76 #include "llvm/ADT/Statistic.h"
77 #include "llvm/ADT/StringRef.h"
78 #include "llvm/Analysis/AssumptionCache.h"
79 #include "llvm/Analysis/ConstantFolding.h"
80 #include "llvm/Analysis/InstructionSimplify.h"
81 #include "llvm/Analysis/LoopInfo.h"
82 #include "llvm/Analysis/ScalarEvolutionExpressions.h"
83 #include "llvm/Analysis/TargetLibraryInfo.h"
84 #include "llvm/Analysis/ValueTracking.h"
85 #include "llvm/Config/llvm-config.h"
86 #include "llvm/IR/Argument.h"
87 #include "llvm/IR/BasicBlock.h"
88 #include "llvm/IR/CFG.h"
89 #include "llvm/IR/CallSite.h"
90 #include "llvm/IR/Constant.h"
91 #include "llvm/IR/ConstantRange.h"
92 #include "llvm/IR/Constants.h"
93 #include "llvm/IR/DataLayout.h"
94 #include "llvm/IR/DerivedTypes.h"
95 #include "llvm/IR/Dominators.h"
96 #include "llvm/IR/Function.h"
97 #include "llvm/IR/GlobalAlias.h"
98 #include "llvm/IR/GlobalValue.h"
99 #include "llvm/IR/GlobalVariable.h"
100 #include "llvm/IR/InstIterator.h"
101 #include "llvm/IR/InstrTypes.h"
102 #include "llvm/IR/Instruction.h"
103 #include "llvm/IR/Instructions.h"
104 #include "llvm/IR/IntrinsicInst.h"
105 #include "llvm/IR/Intrinsics.h"
106 #include "llvm/IR/LLVMContext.h"
107 #include "llvm/IR/Metadata.h"
108 #include "llvm/IR/Operator.h"
109 #include "llvm/IR/PatternMatch.h"
110 #include "llvm/IR/Type.h"
111 #include "llvm/IR/Use.h"
112 #include "llvm/IR/User.h"
113 #include "llvm/IR/Value.h"
114 #include "llvm/IR/Verifier.h"
115 #include "llvm/Pass.h"
116 #include "llvm/Support/Casting.h"
117 #include "llvm/Support/CommandLine.h"
118 #include "llvm/Support/Compiler.h"
119 #include "llvm/Support/Debug.h"
120 #include "llvm/Support/ErrorHandling.h"
121 #include "llvm/Support/KnownBits.h"
122 #include "llvm/Support/SaveAndRestore.h"
123 #include "llvm/Support/raw_ostream.h"
136 using namespace llvm
;
138 #define DEBUG_TYPE "scalar-evolution"
140 STATISTIC(NumArrayLenItCounts
,
141 "Number of trip counts computed with array length");
142 STATISTIC(NumTripCountsComputed
,
143 "Number of loops with predictable loop counts");
144 STATISTIC(NumTripCountsNotComputed
,
145 "Number of loops without predictable loop counts");
146 STATISTIC(NumBruteForceTripCountsComputed
,
147 "Number of loops with trip counts computed by force");
149 static cl::opt
<unsigned>
150 MaxBruteForceIterations("scalar-evolution-max-iterations", cl::ReallyHidden
,
151 cl::desc("Maximum number of iterations SCEV will "
152 "symbolically execute a constant "
156 // FIXME: Enable this with EXPENSIVE_CHECKS when the test suite is clean.
157 static cl::opt
<bool> VerifySCEV(
158 "verify-scev", cl::Hidden
,
159 cl::desc("Verify ScalarEvolution's backedge taken counts (slow)"));
161 VerifySCEVMap("verify-scev-maps", cl::Hidden
,
162 cl::desc("Verify no dangling value in ScalarEvolution's "
163 "ExprValueMap (slow)"));
165 static cl::opt
<bool> VerifyIR(
166 "scev-verify-ir", cl::Hidden
,
167 cl::desc("Verify IR correctness when making sensitive SCEV queries (slow)"),
170 static cl::opt
<unsigned> MulOpsInlineThreshold(
171 "scev-mulops-inline-threshold", cl::Hidden
,
172 cl::desc("Threshold for inlining multiplication operands into a SCEV"),
175 static cl::opt
<unsigned> AddOpsInlineThreshold(
176 "scev-addops-inline-threshold", cl::Hidden
,
177 cl::desc("Threshold for inlining addition operands into a SCEV"),
180 static cl::opt
<unsigned> MaxSCEVCompareDepth(
181 "scalar-evolution-max-scev-compare-depth", cl::Hidden
,
182 cl::desc("Maximum depth of recursive SCEV complexity comparisons"),
185 static cl::opt
<unsigned> MaxSCEVOperationsImplicationDepth(
186 "scalar-evolution-max-scev-operations-implication-depth", cl::Hidden
,
187 cl::desc("Maximum depth of recursive SCEV operations implication analysis"),
190 static cl::opt
<unsigned> MaxValueCompareDepth(
191 "scalar-evolution-max-value-compare-depth", cl::Hidden
,
192 cl::desc("Maximum depth of recursive value complexity comparisons"),
195 static cl::opt
<unsigned>
196 MaxArithDepth("scalar-evolution-max-arith-depth", cl::Hidden
,
197 cl::desc("Maximum depth of recursive arithmetics"),
200 static cl::opt
<unsigned> MaxConstantEvolvingDepth(
201 "scalar-evolution-max-constant-evolving-depth", cl::Hidden
,
202 cl::desc("Maximum depth of recursive constant evolving"), cl::init(32));
204 static cl::opt
<unsigned>
205 MaxCastDepth("scalar-evolution-max-cast-depth", cl::Hidden
,
206 cl::desc("Maximum depth of recursive SExt/ZExt/Trunc"),
209 static cl::opt
<unsigned>
210 MaxAddRecSize("scalar-evolution-max-add-rec-size", cl::Hidden
,
211 cl::desc("Max coefficients in AddRec during evolving"),
214 static cl::opt
<unsigned>
215 HugeExprThreshold("scalar-evolution-huge-expr-threshold", cl::Hidden
,
216 cl::desc("Size of the expression which is considered huge"),
219 //===----------------------------------------------------------------------===//
220 // SCEV class definitions
221 //===----------------------------------------------------------------------===//
223 //===----------------------------------------------------------------------===//
224 // Implementation of the SCEV class.
227 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
228 LLVM_DUMP_METHOD
void SCEV::dump() const {
234 void SCEV::print(raw_ostream
&OS
) const {
235 switch (static_cast<SCEVTypes
>(getSCEVType())) {
237 cast
<SCEVConstant
>(this)->getValue()->printAsOperand(OS
, false);
240 const SCEVTruncateExpr
*Trunc
= cast
<SCEVTruncateExpr
>(this);
241 const SCEV
*Op
= Trunc
->getOperand();
242 OS
<< "(trunc " << *Op
->getType() << " " << *Op
<< " to "
243 << *Trunc
->getType() << ")";
247 const SCEVZeroExtendExpr
*ZExt
= cast
<SCEVZeroExtendExpr
>(this);
248 const SCEV
*Op
= ZExt
->getOperand();
249 OS
<< "(zext " << *Op
->getType() << " " << *Op
<< " to "
250 << *ZExt
->getType() << ")";
254 const SCEVSignExtendExpr
*SExt
= cast
<SCEVSignExtendExpr
>(this);
255 const SCEV
*Op
= SExt
->getOperand();
256 OS
<< "(sext " << *Op
->getType() << " " << *Op
<< " to "
257 << *SExt
->getType() << ")";
261 const SCEVAddRecExpr
*AR
= cast
<SCEVAddRecExpr
>(this);
262 OS
<< "{" << *AR
->getOperand(0);
263 for (unsigned i
= 1, e
= AR
->getNumOperands(); i
!= e
; ++i
)
264 OS
<< ",+," << *AR
->getOperand(i
);
266 if (AR
->hasNoUnsignedWrap())
268 if (AR
->hasNoSignedWrap())
270 if (AR
->hasNoSelfWrap() &&
271 !AR
->getNoWrapFlags((NoWrapFlags
)(FlagNUW
| FlagNSW
)))
273 AR
->getLoop()->getHeader()->printAsOperand(OS
, /*PrintType=*/false);
283 const SCEVNAryExpr
*NAry
= cast
<SCEVNAryExpr
>(this);
284 const char *OpStr
= nullptr;
285 switch (NAry
->getSCEVType()) {
286 case scAddExpr
: OpStr
= " + "; break;
287 case scMulExpr
: OpStr
= " * "; break;
288 case scUMaxExpr
: OpStr
= " umax "; break;
289 case scSMaxExpr
: OpStr
= " smax "; break;
298 for (SCEVNAryExpr::op_iterator I
= NAry
->op_begin(), E
= NAry
->op_end();
301 if (std::next(I
) != E
)
305 switch (NAry
->getSCEVType()) {
308 if (NAry
->hasNoUnsignedWrap())
310 if (NAry
->hasNoSignedWrap())
316 const SCEVUDivExpr
*UDiv
= cast
<SCEVUDivExpr
>(this);
317 OS
<< "(" << *UDiv
->getLHS() << " /u " << *UDiv
->getRHS() << ")";
321 const SCEVUnknown
*U
= cast
<SCEVUnknown
>(this);
323 if (U
->isSizeOf(AllocTy
)) {
324 OS
<< "sizeof(" << *AllocTy
<< ")";
327 if (U
->isAlignOf(AllocTy
)) {
328 OS
<< "alignof(" << *AllocTy
<< ")";
334 if (U
->isOffsetOf(CTy
, FieldNo
)) {
335 OS
<< "offsetof(" << *CTy
<< ", ";
336 FieldNo
->printAsOperand(OS
, false);
341 // Otherwise just print it normally.
342 U
->getValue()->printAsOperand(OS
, false);
345 case scCouldNotCompute
:
346 OS
<< "***COULDNOTCOMPUTE***";
349 llvm_unreachable("Unknown SCEV kind!");
352 Type
*SCEV::getType() const {
353 switch (static_cast<SCEVTypes
>(getSCEVType())) {
355 return cast
<SCEVConstant
>(this)->getType();
359 return cast
<SCEVCastExpr
>(this)->getType();
366 return cast
<SCEVNAryExpr
>(this)->getType();
368 return cast
<SCEVAddExpr
>(this)->getType();
370 return cast
<SCEVUDivExpr
>(this)->getType();
372 return cast
<SCEVUnknown
>(this)->getType();
373 case scCouldNotCompute
:
374 llvm_unreachable("Attempt to use a SCEVCouldNotCompute object!");
376 llvm_unreachable("Unknown SCEV kind!");
379 bool SCEV::isZero() const {
380 if (const SCEVConstant
*SC
= dyn_cast
<SCEVConstant
>(this))
381 return SC
->getValue()->isZero();
385 bool SCEV::isOne() const {
386 if (const SCEVConstant
*SC
= dyn_cast
<SCEVConstant
>(this))
387 return SC
->getValue()->isOne();
391 bool SCEV::isAllOnesValue() const {
392 if (const SCEVConstant
*SC
= dyn_cast
<SCEVConstant
>(this))
393 return SC
->getValue()->isMinusOne();
397 bool SCEV::isNonConstantNegative() const {
398 const SCEVMulExpr
*Mul
= dyn_cast
<SCEVMulExpr
>(this);
399 if (!Mul
) return false;
401 // If there is a constant factor, it will be first.
402 const SCEVConstant
*SC
= dyn_cast
<SCEVConstant
>(Mul
->getOperand(0));
403 if (!SC
) return false;
405 // Return true if the value is negative, this matches things like (-42 * V).
406 return SC
->getAPInt().isNegative();
409 SCEVCouldNotCompute::SCEVCouldNotCompute() :
410 SCEV(FoldingSetNodeIDRef(), scCouldNotCompute
, 0) {}
412 bool SCEVCouldNotCompute::classof(const SCEV
*S
) {
413 return S
->getSCEVType() == scCouldNotCompute
;
416 const SCEV
*ScalarEvolution::getConstant(ConstantInt
*V
) {
418 ID
.AddInteger(scConstant
);
421 if (const SCEV
*S
= UniqueSCEVs
.FindNodeOrInsertPos(ID
, IP
)) return S
;
422 SCEV
*S
= new (SCEVAllocator
) SCEVConstant(ID
.Intern(SCEVAllocator
), V
);
423 UniqueSCEVs
.InsertNode(S
, IP
);
427 const SCEV
*ScalarEvolution::getConstant(const APInt
&Val
) {
428 return getConstant(ConstantInt::get(getContext(), Val
));
432 ScalarEvolution::getConstant(Type
*Ty
, uint64_t V
, bool isSigned
) {
433 IntegerType
*ITy
= cast
<IntegerType
>(getEffectiveSCEVType(Ty
));
434 return getConstant(ConstantInt::get(ITy
, V
, isSigned
));
437 SCEVCastExpr::SCEVCastExpr(const FoldingSetNodeIDRef ID
,
438 unsigned SCEVTy
, const SCEV
*op
, Type
*ty
)
439 : SCEV(ID
, SCEVTy
, computeExpressionSize(op
)), Op(op
), Ty(ty
) {}
441 SCEVTruncateExpr::SCEVTruncateExpr(const FoldingSetNodeIDRef ID
,
442 const SCEV
*op
, Type
*ty
)
443 : SCEVCastExpr(ID
, scTruncate
, op
, ty
) {
444 assert(Op
->getType()->isIntOrPtrTy() && Ty
->isIntOrPtrTy() &&
445 "Cannot truncate non-integer value!");
448 SCEVZeroExtendExpr::SCEVZeroExtendExpr(const FoldingSetNodeIDRef ID
,
449 const SCEV
*op
, Type
*ty
)
450 : SCEVCastExpr(ID
, scZeroExtend
, op
, ty
) {
451 assert(Op
->getType()->isIntOrPtrTy() && Ty
->isIntOrPtrTy() &&
452 "Cannot zero extend non-integer value!");
455 SCEVSignExtendExpr::SCEVSignExtendExpr(const FoldingSetNodeIDRef ID
,
456 const SCEV
*op
, Type
*ty
)
457 : SCEVCastExpr(ID
, scSignExtend
, op
, ty
) {
458 assert(Op
->getType()->isIntOrPtrTy() && Ty
->isIntOrPtrTy() &&
459 "Cannot sign extend non-integer value!");
462 void SCEVUnknown::deleted() {
463 // Clear this SCEVUnknown from various maps.
464 SE
->forgetMemoizedResults(this);
466 // Remove this SCEVUnknown from the uniquing map.
467 SE
->UniqueSCEVs
.RemoveNode(this);
469 // Release the value.
473 void SCEVUnknown::allUsesReplacedWith(Value
*New
) {
474 // Remove this SCEVUnknown from the uniquing map.
475 SE
->UniqueSCEVs
.RemoveNode(this);
477 // Update this SCEVUnknown to point to the new value. This is needed
478 // because there may still be outstanding SCEVs which still point to
483 bool SCEVUnknown::isSizeOf(Type
*&AllocTy
) const {
484 if (ConstantExpr
*VCE
= dyn_cast
<ConstantExpr
>(getValue()))
485 if (VCE
->getOpcode() == Instruction::PtrToInt
)
486 if (ConstantExpr
*CE
= dyn_cast
<ConstantExpr
>(VCE
->getOperand(0)))
487 if (CE
->getOpcode() == Instruction::GetElementPtr
&&
488 CE
->getOperand(0)->isNullValue() &&
489 CE
->getNumOperands() == 2)
490 if (ConstantInt
*CI
= dyn_cast
<ConstantInt
>(CE
->getOperand(1)))
492 AllocTy
= cast
<PointerType
>(CE
->getOperand(0)->getType())
500 bool SCEVUnknown::isAlignOf(Type
*&AllocTy
) const {
501 if (ConstantExpr
*VCE
= dyn_cast
<ConstantExpr
>(getValue()))
502 if (VCE
->getOpcode() == Instruction::PtrToInt
)
503 if (ConstantExpr
*CE
= dyn_cast
<ConstantExpr
>(VCE
->getOperand(0)))
504 if (CE
->getOpcode() == Instruction::GetElementPtr
&&
505 CE
->getOperand(0)->isNullValue()) {
507 cast
<PointerType
>(CE
->getOperand(0)->getType())->getElementType();
508 if (StructType
*STy
= dyn_cast
<StructType
>(Ty
))
509 if (!STy
->isPacked() &&
510 CE
->getNumOperands() == 3 &&
511 CE
->getOperand(1)->isNullValue()) {
512 if (ConstantInt
*CI
= dyn_cast
<ConstantInt
>(CE
->getOperand(2)))
514 STy
->getNumElements() == 2 &&
515 STy
->getElementType(0)->isIntegerTy(1)) {
516 AllocTy
= STy
->getElementType(1);
525 bool SCEVUnknown::isOffsetOf(Type
*&CTy
, Constant
*&FieldNo
) const {
526 if (ConstantExpr
*VCE
= dyn_cast
<ConstantExpr
>(getValue()))
527 if (VCE
->getOpcode() == Instruction::PtrToInt
)
528 if (ConstantExpr
*CE
= dyn_cast
<ConstantExpr
>(VCE
->getOperand(0)))
529 if (CE
->getOpcode() == Instruction::GetElementPtr
&&
530 CE
->getNumOperands() == 3 &&
531 CE
->getOperand(0)->isNullValue() &&
532 CE
->getOperand(1)->isNullValue()) {
534 cast
<PointerType
>(CE
->getOperand(0)->getType())->getElementType();
535 // Ignore vector types here so that ScalarEvolutionExpander doesn't
536 // emit getelementptrs that index into vectors.
537 if (Ty
->isStructTy() || Ty
->isArrayTy()) {
539 FieldNo
= CE
->getOperand(2);
547 //===----------------------------------------------------------------------===//
549 //===----------------------------------------------------------------------===//
551 /// Compare the two values \p LV and \p RV in terms of their "complexity" where
552 /// "complexity" is a partial (and somewhat ad-hoc) relation used to order
553 /// operands in SCEV expressions. \p EqCache is a set of pairs of values that
554 /// have been previously deemed to be "equally complex" by this routine. It is
555 /// intended to avoid exponential time complexity in cases like:
565 /// CompareValueComplexity(%f, %c)
567 /// Since we do not continue running this routine on expression trees once we
568 /// have seen unequal values, there is no need to track them in the cache.
570 CompareValueComplexity(EquivalenceClasses
<const Value
*> &EqCacheValue
,
571 const LoopInfo
*const LI
, Value
*LV
, Value
*RV
,
573 if (Depth
> MaxValueCompareDepth
|| EqCacheValue
.isEquivalent(LV
, RV
))
576 // Order pointer values after integer values. This helps SCEVExpander form
578 bool LIsPointer
= LV
->getType()->isPointerTy(),
579 RIsPointer
= RV
->getType()->isPointerTy();
580 if (LIsPointer
!= RIsPointer
)
581 return (int)LIsPointer
- (int)RIsPointer
;
583 // Compare getValueID values.
584 unsigned LID
= LV
->getValueID(), RID
= RV
->getValueID();
586 return (int)LID
- (int)RID
;
588 // Sort arguments by their position.
589 if (const auto *LA
= dyn_cast
<Argument
>(LV
)) {
590 const auto *RA
= cast
<Argument
>(RV
);
591 unsigned LArgNo
= LA
->getArgNo(), RArgNo
= RA
->getArgNo();
592 return (int)LArgNo
- (int)RArgNo
;
595 if (const auto *LGV
= dyn_cast
<GlobalValue
>(LV
)) {
596 const auto *RGV
= cast
<GlobalValue
>(RV
);
598 const auto IsGVNameSemantic
= [&](const GlobalValue
*GV
) {
599 auto LT
= GV
->getLinkage();
600 return !(GlobalValue::isPrivateLinkage(LT
) ||
601 GlobalValue::isInternalLinkage(LT
));
604 // Use the names to distinguish the two values, but only if the
605 // names are semantically important.
606 if (IsGVNameSemantic(LGV
) && IsGVNameSemantic(RGV
))
607 return LGV
->getName().compare(RGV
->getName());
610 // For instructions, compare their loop depth, and their operand count. This
612 if (const auto *LInst
= dyn_cast
<Instruction
>(LV
)) {
613 const auto *RInst
= cast
<Instruction
>(RV
);
615 // Compare loop depths.
616 const BasicBlock
*LParent
= LInst
->getParent(),
617 *RParent
= RInst
->getParent();
618 if (LParent
!= RParent
) {
619 unsigned LDepth
= LI
->getLoopDepth(LParent
),
620 RDepth
= LI
->getLoopDepth(RParent
);
621 if (LDepth
!= RDepth
)
622 return (int)LDepth
- (int)RDepth
;
625 // Compare the number of operands.
626 unsigned LNumOps
= LInst
->getNumOperands(),
627 RNumOps
= RInst
->getNumOperands();
628 if (LNumOps
!= RNumOps
)
629 return (int)LNumOps
- (int)RNumOps
;
631 for (unsigned Idx
: seq(0u, LNumOps
)) {
633 CompareValueComplexity(EqCacheValue
, LI
, LInst
->getOperand(Idx
),
634 RInst
->getOperand(Idx
), Depth
+ 1);
640 EqCacheValue
.unionSets(LV
, RV
);
644 // Return negative, zero, or positive, if LHS is less than, equal to, or greater
645 // than RHS, respectively. A three-way result allows recursive comparisons to be
647 static int CompareSCEVComplexity(
648 EquivalenceClasses
<const SCEV
*> &EqCacheSCEV
,
649 EquivalenceClasses
<const Value
*> &EqCacheValue
,
650 const LoopInfo
*const LI
, const SCEV
*LHS
, const SCEV
*RHS
,
651 DominatorTree
&DT
, unsigned Depth
= 0) {
652 // Fast-path: SCEVs are uniqued so we can do a quick equality check.
656 // Primarily, sort the SCEVs by their getSCEVType().
657 unsigned LType
= LHS
->getSCEVType(), RType
= RHS
->getSCEVType();
659 return (int)LType
- (int)RType
;
661 if (Depth
> MaxSCEVCompareDepth
|| EqCacheSCEV
.isEquivalent(LHS
, RHS
))
663 // Aside from the getSCEVType() ordering, the particular ordering
664 // isn't very important except that it's beneficial to be consistent,
665 // so that (a + b) and (b + a) don't end up as different expressions.
666 switch (static_cast<SCEVTypes
>(LType
)) {
668 const SCEVUnknown
*LU
= cast
<SCEVUnknown
>(LHS
);
669 const SCEVUnknown
*RU
= cast
<SCEVUnknown
>(RHS
);
671 int X
= CompareValueComplexity(EqCacheValue
, LI
, LU
->getValue(),
672 RU
->getValue(), Depth
+ 1);
674 EqCacheSCEV
.unionSets(LHS
, RHS
);
679 const SCEVConstant
*LC
= cast
<SCEVConstant
>(LHS
);
680 const SCEVConstant
*RC
= cast
<SCEVConstant
>(RHS
);
682 // Compare constant values.
683 const APInt
&LA
= LC
->getAPInt();
684 const APInt
&RA
= RC
->getAPInt();
685 unsigned LBitWidth
= LA
.getBitWidth(), RBitWidth
= RA
.getBitWidth();
686 if (LBitWidth
!= RBitWidth
)
687 return (int)LBitWidth
- (int)RBitWidth
;
688 return LA
.ult(RA
) ? -1 : 1;
692 const SCEVAddRecExpr
*LA
= cast
<SCEVAddRecExpr
>(LHS
);
693 const SCEVAddRecExpr
*RA
= cast
<SCEVAddRecExpr
>(RHS
);
695 // There is always a dominance between two recs that are used by one SCEV,
696 // so we can safely sort recs by loop header dominance. We require such
697 // order in getAddExpr.
698 const Loop
*LLoop
= LA
->getLoop(), *RLoop
= RA
->getLoop();
699 if (LLoop
!= RLoop
) {
700 const BasicBlock
*LHead
= LLoop
->getHeader(), *RHead
= RLoop
->getHeader();
701 assert(LHead
!= RHead
&& "Two loops share the same header?");
702 if (DT
.dominates(LHead
, RHead
))
705 assert(DT
.dominates(RHead
, LHead
) &&
706 "No dominance between recurrences used by one SCEV?");
710 // Addrec complexity grows with operand count.
711 unsigned LNumOps
= LA
->getNumOperands(), RNumOps
= RA
->getNumOperands();
712 if (LNumOps
!= RNumOps
)
713 return (int)LNumOps
- (int)RNumOps
;
715 // Lexicographically compare.
716 for (unsigned i
= 0; i
!= LNumOps
; ++i
) {
717 int X
= CompareSCEVComplexity(EqCacheSCEV
, EqCacheValue
, LI
,
718 LA
->getOperand(i
), RA
->getOperand(i
), DT
,
723 EqCacheSCEV
.unionSets(LHS
, RHS
);
733 const SCEVNAryExpr
*LC
= cast
<SCEVNAryExpr
>(LHS
);
734 const SCEVNAryExpr
*RC
= cast
<SCEVNAryExpr
>(RHS
);
736 // Lexicographically compare n-ary expressions.
737 unsigned LNumOps
= LC
->getNumOperands(), RNumOps
= RC
->getNumOperands();
738 if (LNumOps
!= RNumOps
)
739 return (int)LNumOps
- (int)RNumOps
;
741 for (unsigned i
= 0; i
!= LNumOps
; ++i
) {
742 int X
= CompareSCEVComplexity(EqCacheSCEV
, EqCacheValue
, LI
,
743 LC
->getOperand(i
), RC
->getOperand(i
), DT
,
748 EqCacheSCEV
.unionSets(LHS
, RHS
);
753 const SCEVUDivExpr
*LC
= cast
<SCEVUDivExpr
>(LHS
);
754 const SCEVUDivExpr
*RC
= cast
<SCEVUDivExpr
>(RHS
);
756 // Lexicographically compare udiv expressions.
757 int X
= CompareSCEVComplexity(EqCacheSCEV
, EqCacheValue
, LI
, LC
->getLHS(),
758 RC
->getLHS(), DT
, Depth
+ 1);
761 X
= CompareSCEVComplexity(EqCacheSCEV
, EqCacheValue
, LI
, LC
->getRHS(),
762 RC
->getRHS(), DT
, Depth
+ 1);
764 EqCacheSCEV
.unionSets(LHS
, RHS
);
771 const SCEVCastExpr
*LC
= cast
<SCEVCastExpr
>(LHS
);
772 const SCEVCastExpr
*RC
= cast
<SCEVCastExpr
>(RHS
);
774 // Compare cast expressions by operand.
775 int X
= CompareSCEVComplexity(EqCacheSCEV
, EqCacheValue
, LI
,
776 LC
->getOperand(), RC
->getOperand(), DT
,
779 EqCacheSCEV
.unionSets(LHS
, RHS
);
783 case scCouldNotCompute
:
784 llvm_unreachable("Attempt to use a SCEVCouldNotCompute object!");
786 llvm_unreachable("Unknown SCEV kind!");
789 /// Given a list of SCEV objects, order them by their complexity, and group
790 /// objects of the same complexity together by value. When this routine is
791 /// finished, we know that any duplicates in the vector are consecutive and that
792 /// complexity is monotonically increasing.
794 /// Note that we go take special precautions to ensure that we get deterministic
795 /// results from this routine. In other words, we don't want the results of
796 /// this to depend on where the addresses of various SCEV objects happened to
798 static void GroupByComplexity(SmallVectorImpl
<const SCEV
*> &Ops
,
799 LoopInfo
*LI
, DominatorTree
&DT
) {
800 if (Ops
.size() < 2) return; // Noop
802 EquivalenceClasses
<const SCEV
*> EqCacheSCEV
;
803 EquivalenceClasses
<const Value
*> EqCacheValue
;
804 if (Ops
.size() == 2) {
805 // This is the common case, which also happens to be trivially simple.
807 const SCEV
*&LHS
= Ops
[0], *&RHS
= Ops
[1];
808 if (CompareSCEVComplexity(EqCacheSCEV
, EqCacheValue
, LI
, RHS
, LHS
, DT
) < 0)
813 // Do the rough sort by complexity.
814 llvm::stable_sort(Ops
, [&](const SCEV
*LHS
, const SCEV
*RHS
) {
815 return CompareSCEVComplexity(EqCacheSCEV
, EqCacheValue
, LI
, LHS
, RHS
, DT
) <
819 // Now that we are sorted by complexity, group elements of the same
820 // complexity. Note that this is, at worst, N^2, but the vector is likely to
821 // be extremely short in practice. Note that we take this approach because we
822 // do not want to depend on the addresses of the objects we are grouping.
823 for (unsigned i
= 0, e
= Ops
.size(); i
!= e
-2; ++i
) {
824 const SCEV
*S
= Ops
[i
];
825 unsigned Complexity
= S
->getSCEVType();
827 // If there are any objects of the same complexity and same value as this
829 for (unsigned j
= i
+1; j
!= e
&& Ops
[j
]->getSCEVType() == Complexity
; ++j
) {
830 if (Ops
[j
] == S
) { // Found a duplicate.
831 // Move it to immediately after i'th element.
832 std::swap(Ops
[i
+1], Ops
[j
]);
833 ++i
; // no need to rescan it.
834 if (i
== e
-2) return; // Done!
840 // Returns the size of the SCEV S.
841 static inline int sizeOfSCEV(const SCEV
*S
) {
842 struct FindSCEVSize
{
845 FindSCEVSize() = default;
847 bool follow(const SCEV
*S
) {
849 // Keep looking at all operands of S.
853 bool isDone() const {
859 SCEVTraversal
<FindSCEVSize
> ST(F
);
864 /// Returns true if the subtree of \p S contains at least HugeExprThreshold
866 static bool isHugeExpression(const SCEV
*S
) {
867 return S
->getExpressionSize() >= HugeExprThreshold
;
870 /// Returns true of \p Ops contains a huge SCEV (see definition above).
871 static bool hasHugeExpression(ArrayRef
<const SCEV
*> Ops
) {
872 return any_of(Ops
, isHugeExpression
);
877 struct SCEVDivision
: public SCEVVisitor
<SCEVDivision
, void> {
879 // Computes the Quotient and Remainder of the division of Numerator by
881 static void divide(ScalarEvolution
&SE
, const SCEV
*Numerator
,
882 const SCEV
*Denominator
, const SCEV
**Quotient
,
883 const SCEV
**Remainder
) {
884 assert(Numerator
&& Denominator
&& "Uninitialized SCEV");
886 SCEVDivision
D(SE
, Numerator
, Denominator
);
888 // Check for the trivial case here to avoid having to check for it in the
890 if (Numerator
== Denominator
) {
896 if (Numerator
->isZero()) {
902 // A simple case when N/1. The quotient is N.
903 if (Denominator
->isOne()) {
904 *Quotient
= Numerator
;
909 // Split the Denominator when it is a product.
910 if (const SCEVMulExpr
*T
= dyn_cast
<SCEVMulExpr
>(Denominator
)) {
912 *Quotient
= Numerator
;
913 for (const SCEV
*Op
: T
->operands()) {
914 divide(SE
, *Quotient
, Op
, &Q
, &R
);
917 // Bail out when the Numerator is not divisible by one of the terms of
921 *Remainder
= Numerator
;
930 *Quotient
= D
.Quotient
;
931 *Remainder
= D
.Remainder
;
934 // Except in the trivial case described above, we do not know how to divide
935 // Expr by Denominator for the following functions with empty implementation.
936 void visitTruncateExpr(const SCEVTruncateExpr
*Numerator
) {}
937 void visitZeroExtendExpr(const SCEVZeroExtendExpr
*Numerator
) {}
938 void visitSignExtendExpr(const SCEVSignExtendExpr
*Numerator
) {}
939 void visitUDivExpr(const SCEVUDivExpr
*Numerator
) {}
940 void visitSMaxExpr(const SCEVSMaxExpr
*Numerator
) {}
941 void visitUMaxExpr(const SCEVUMaxExpr
*Numerator
) {}
942 void visitSMinExpr(const SCEVSMinExpr
*Numerator
) {}
943 void visitUMinExpr(const SCEVUMinExpr
*Numerator
) {}
944 void visitUnknown(const SCEVUnknown
*Numerator
) {}
945 void visitCouldNotCompute(const SCEVCouldNotCompute
*Numerator
) {}
947 void visitConstant(const SCEVConstant
*Numerator
) {
948 if (const SCEVConstant
*D
= dyn_cast
<SCEVConstant
>(Denominator
)) {
949 APInt NumeratorVal
= Numerator
->getAPInt();
950 APInt DenominatorVal
= D
->getAPInt();
951 uint32_t NumeratorBW
= NumeratorVal
.getBitWidth();
952 uint32_t DenominatorBW
= DenominatorVal
.getBitWidth();
954 if (NumeratorBW
> DenominatorBW
)
955 DenominatorVal
= DenominatorVal
.sext(NumeratorBW
);
956 else if (NumeratorBW
< DenominatorBW
)
957 NumeratorVal
= NumeratorVal
.sext(DenominatorBW
);
959 APInt
QuotientVal(NumeratorVal
.getBitWidth(), 0);
960 APInt
RemainderVal(NumeratorVal
.getBitWidth(), 0);
961 APInt::sdivrem(NumeratorVal
, DenominatorVal
, QuotientVal
, RemainderVal
);
962 Quotient
= SE
.getConstant(QuotientVal
);
963 Remainder
= SE
.getConstant(RemainderVal
);
968 void visitAddRecExpr(const SCEVAddRecExpr
*Numerator
) {
969 const SCEV
*StartQ
, *StartR
, *StepQ
, *StepR
;
970 if (!Numerator
->isAffine())
971 return cannotDivide(Numerator
);
972 divide(SE
, Numerator
->getStart(), Denominator
, &StartQ
, &StartR
);
973 divide(SE
, Numerator
->getStepRecurrence(SE
), Denominator
, &StepQ
, &StepR
);
974 // Bail out if the types do not match.
975 Type
*Ty
= Denominator
->getType();
976 if (Ty
!= StartQ
->getType() || Ty
!= StartR
->getType() ||
977 Ty
!= StepQ
->getType() || Ty
!= StepR
->getType())
978 return cannotDivide(Numerator
);
979 Quotient
= SE
.getAddRecExpr(StartQ
, StepQ
, Numerator
->getLoop(),
980 Numerator
->getNoWrapFlags());
981 Remainder
= SE
.getAddRecExpr(StartR
, StepR
, Numerator
->getLoop(),
982 Numerator
->getNoWrapFlags());
985 void visitAddExpr(const SCEVAddExpr
*Numerator
) {
986 SmallVector
<const SCEV
*, 2> Qs
, Rs
;
987 Type
*Ty
= Denominator
->getType();
989 for (const SCEV
*Op
: Numerator
->operands()) {
991 divide(SE
, Op
, Denominator
, &Q
, &R
);
993 // Bail out if types do not match.
994 if (Ty
!= Q
->getType() || Ty
!= R
->getType())
995 return cannotDivide(Numerator
);
1001 if (Qs
.size() == 1) {
1007 Quotient
= SE
.getAddExpr(Qs
);
1008 Remainder
= SE
.getAddExpr(Rs
);
1011 void visitMulExpr(const SCEVMulExpr
*Numerator
) {
1012 SmallVector
<const SCEV
*, 2> Qs
;
1013 Type
*Ty
= Denominator
->getType();
1015 bool FoundDenominatorTerm
= false;
1016 for (const SCEV
*Op
: Numerator
->operands()) {
1017 // Bail out if types do not match.
1018 if (Ty
!= Op
->getType())
1019 return cannotDivide(Numerator
);
1021 if (FoundDenominatorTerm
) {
1026 // Check whether Denominator divides one of the product operands.
1028 divide(SE
, Op
, Denominator
, &Q
, &R
);
1034 // Bail out if types do not match.
1035 if (Ty
!= Q
->getType())
1036 return cannotDivide(Numerator
);
1038 FoundDenominatorTerm
= true;
1042 if (FoundDenominatorTerm
) {
1047 Quotient
= SE
.getMulExpr(Qs
);
1051 if (!isa
<SCEVUnknown
>(Denominator
))
1052 return cannotDivide(Numerator
);
1054 // The Remainder is obtained by replacing Denominator by 0 in Numerator.
1055 ValueToValueMap RewriteMap
;
1056 RewriteMap
[cast
<SCEVUnknown
>(Denominator
)->getValue()] =
1057 cast
<SCEVConstant
>(Zero
)->getValue();
1058 Remainder
= SCEVParameterRewriter::rewrite(Numerator
, SE
, RewriteMap
, true);
1060 if (Remainder
->isZero()) {
1061 // The Quotient is obtained by replacing Denominator by 1 in Numerator.
1062 RewriteMap
[cast
<SCEVUnknown
>(Denominator
)->getValue()] =
1063 cast
<SCEVConstant
>(One
)->getValue();
1065 SCEVParameterRewriter::rewrite(Numerator
, SE
, RewriteMap
, true);
1069 // Quotient is (Numerator - Remainder) divided by Denominator.
1071 const SCEV
*Diff
= SE
.getMinusSCEV(Numerator
, Remainder
);
1072 // This SCEV does not seem to simplify: fail the division here.
1073 if (sizeOfSCEV(Diff
) > sizeOfSCEV(Numerator
))
1074 return cannotDivide(Numerator
);
1075 divide(SE
, Diff
, Denominator
, &Q
, &R
);
1077 return cannotDivide(Numerator
);
1082 SCEVDivision(ScalarEvolution
&S
, const SCEV
*Numerator
,
1083 const SCEV
*Denominator
)
1084 : SE(S
), Denominator(Denominator
) {
1085 Zero
= SE
.getZero(Denominator
->getType());
1086 One
= SE
.getOne(Denominator
->getType());
1088 // We generally do not know how to divide Expr by Denominator. We
1089 // initialize the division to a "cannot divide" state to simplify the rest
1091 cannotDivide(Numerator
);
1094 // Convenience function for giving up on the division. We set the quotient to
1095 // be equal to zero and the remainder to be equal to the numerator.
1096 void cannotDivide(const SCEV
*Numerator
) {
1098 Remainder
= Numerator
;
1101 ScalarEvolution
&SE
;
1102 const SCEV
*Denominator
, *Quotient
, *Remainder
, *Zero
, *One
;
1105 } // end anonymous namespace
1107 //===----------------------------------------------------------------------===//
1108 // Simple SCEV method implementations
1109 //===----------------------------------------------------------------------===//
1111 /// Compute BC(It, K). The result has width W. Assume, K > 0.
1112 static const SCEV
*BinomialCoefficient(const SCEV
*It
, unsigned K
,
1113 ScalarEvolution
&SE
,
1115 // Handle the simplest case efficiently.
1117 return SE
.getTruncateOrZeroExtend(It
, ResultTy
);
1119 // We are using the following formula for BC(It, K):
1121 // BC(It, K) = (It * (It - 1) * ... * (It - K + 1)) / K!
1123 // Suppose, W is the bitwidth of the return value. We must be prepared for
1124 // overflow. Hence, we must assure that the result of our computation is
1125 // equal to the accurate one modulo 2^W. Unfortunately, division isn't
1126 // safe in modular arithmetic.
1128 // However, this code doesn't use exactly that formula; the formula it uses
1129 // is something like the following, where T is the number of factors of 2 in
1130 // K! (i.e. trailing zeros in the binary representation of K!), and ^ is
1133 // BC(It, K) = (It * (It - 1) * ... * (It - K + 1)) / 2^T / (K! / 2^T)
1135 // This formula is trivially equivalent to the previous formula. However,
1136 // this formula can be implemented much more efficiently. The trick is that
1137 // K! / 2^T is odd, and exact division by an odd number *is* safe in modular
1138 // arithmetic. To do exact division in modular arithmetic, all we have
1139 // to do is multiply by the inverse. Therefore, this step can be done at
1142 // The next issue is how to safely do the division by 2^T. The way this
1143 // is done is by doing the multiplication step at a width of at least W + T
1144 // bits. This way, the bottom W+T bits of the product are accurate. Then,
1145 // when we perform the division by 2^T (which is equivalent to a right shift
1146 // by T), the bottom W bits are accurate. Extra bits are okay; they'll get
1147 // truncated out after the division by 2^T.
1149 // In comparison to just directly using the first formula, this technique
1150 // is much more efficient; using the first formula requires W * K bits,
1151 // but this formula less than W + K bits. Also, the first formula requires
1152 // a division step, whereas this formula only requires multiplies and shifts.
1154 // It doesn't matter whether the subtraction step is done in the calculation
1155 // width or the input iteration count's width; if the subtraction overflows,
1156 // the result must be zero anyway. We prefer here to do it in the width of
1157 // the induction variable because it helps a lot for certain cases; CodeGen
1158 // isn't smart enough to ignore the overflow, which leads to much less
1159 // efficient code if the width of the subtraction is wider than the native
1162 // (It's possible to not widen at all by pulling out factors of 2 before
1163 // the multiplication; for example, K=2 can be calculated as
1164 // It/2*(It+(It*INT_MIN/INT_MIN)+-1). However, it requires
1165 // extra arithmetic, so it's not an obvious win, and it gets
1166 // much more complicated for K > 3.)
1168 // Protection from insane SCEVs; this bound is conservative,
1169 // but it probably doesn't matter.
1171 return SE
.getCouldNotCompute();
1173 unsigned W
= SE
.getTypeSizeInBits(ResultTy
);
1175 // Calculate K! / 2^T and T; we divide out the factors of two before
1176 // multiplying for calculating K! / 2^T to avoid overflow.
1177 // Other overflow doesn't matter because we only care about the bottom
1178 // W bits of the result.
1179 APInt
OddFactorial(W
, 1);
1181 for (unsigned i
= 3; i
<= K
; ++i
) {
1183 unsigned TwoFactors
= Mult
.countTrailingZeros();
1185 Mult
.lshrInPlace(TwoFactors
);
1186 OddFactorial
*= Mult
;
1189 // We need at least W + T bits for the multiplication step
1190 unsigned CalculationBits
= W
+ T
;
1192 // Calculate 2^T, at width T+W.
1193 APInt DivFactor
= APInt::getOneBitSet(CalculationBits
, T
);
1195 // Calculate the multiplicative inverse of K! / 2^T;
1196 // this multiplication factor will perform the exact division by
1198 APInt Mod
= APInt::getSignedMinValue(W
+1);
1199 APInt MultiplyFactor
= OddFactorial
.zext(W
+1);
1200 MultiplyFactor
= MultiplyFactor
.multiplicativeInverse(Mod
);
1201 MultiplyFactor
= MultiplyFactor
.trunc(W
);
1203 // Calculate the product, at width T+W
1204 IntegerType
*CalculationTy
= IntegerType::get(SE
.getContext(),
1206 const SCEV
*Dividend
= SE
.getTruncateOrZeroExtend(It
, CalculationTy
);
1207 for (unsigned i
= 1; i
!= K
; ++i
) {
1208 const SCEV
*S
= SE
.getMinusSCEV(It
, SE
.getConstant(It
->getType(), i
));
1209 Dividend
= SE
.getMulExpr(Dividend
,
1210 SE
.getTruncateOrZeroExtend(S
, CalculationTy
));
1214 const SCEV
*DivResult
= SE
.getUDivExpr(Dividend
, SE
.getConstant(DivFactor
));
1216 // Truncate the result, and divide by K! / 2^T.
1218 return SE
.getMulExpr(SE
.getConstant(MultiplyFactor
),
1219 SE
.getTruncateOrZeroExtend(DivResult
, ResultTy
));
1222 /// Return the value of this chain of recurrences at the specified iteration
1223 /// number. We can evaluate this recurrence by multiplying each element in the
1224 /// chain by the binomial coefficient corresponding to it. In other words, we
1225 /// can evaluate {A,+,B,+,C,+,D} as:
1227 /// A*BC(It, 0) + B*BC(It, 1) + C*BC(It, 2) + D*BC(It, 3)
1229 /// where BC(It, k) stands for binomial coefficient.
1230 const SCEV
*SCEVAddRecExpr::evaluateAtIteration(const SCEV
*It
,
1231 ScalarEvolution
&SE
) const {
1232 const SCEV
*Result
= getStart();
1233 for (unsigned i
= 1, e
= getNumOperands(); i
!= e
; ++i
) {
1234 // The computation is correct in the face of overflow provided that the
1235 // multiplication is performed _after_ the evaluation of the binomial
1237 const SCEV
*Coeff
= BinomialCoefficient(It
, i
, SE
, getType());
1238 if (isa
<SCEVCouldNotCompute
>(Coeff
))
1241 Result
= SE
.getAddExpr(Result
, SE
.getMulExpr(getOperand(i
), Coeff
));
1246 //===----------------------------------------------------------------------===//
1247 // SCEV Expression folder implementations
1248 //===----------------------------------------------------------------------===//
1250 const SCEV
*ScalarEvolution::getTruncateExpr(const SCEV
*Op
, Type
*Ty
,
1252 assert(getTypeSizeInBits(Op
->getType()) > getTypeSizeInBits(Ty
) &&
1253 "This is not a truncating conversion!");
1254 assert(isSCEVable(Ty
) &&
1255 "This is not a conversion to a SCEVable type!");
1256 Ty
= getEffectiveSCEVType(Ty
);
1258 FoldingSetNodeID ID
;
1259 ID
.AddInteger(scTruncate
);
1263 if (const SCEV
*S
= UniqueSCEVs
.FindNodeOrInsertPos(ID
, IP
)) return S
;
1265 // Fold if the operand is constant.
1266 if (const SCEVConstant
*SC
= dyn_cast
<SCEVConstant
>(Op
))
1268 cast
<ConstantInt
>(ConstantExpr::getTrunc(SC
->getValue(), Ty
)));
1270 // trunc(trunc(x)) --> trunc(x)
1271 if (const SCEVTruncateExpr
*ST
= dyn_cast
<SCEVTruncateExpr
>(Op
))
1272 return getTruncateExpr(ST
->getOperand(), Ty
, Depth
+ 1);
1274 // trunc(sext(x)) --> sext(x) if widening or trunc(x) if narrowing
1275 if (const SCEVSignExtendExpr
*SS
= dyn_cast
<SCEVSignExtendExpr
>(Op
))
1276 return getTruncateOrSignExtend(SS
->getOperand(), Ty
, Depth
+ 1);
1278 // trunc(zext(x)) --> zext(x) if widening or trunc(x) if narrowing
1279 if (const SCEVZeroExtendExpr
*SZ
= dyn_cast
<SCEVZeroExtendExpr
>(Op
))
1280 return getTruncateOrZeroExtend(SZ
->getOperand(), Ty
, Depth
+ 1);
1282 if (Depth
> MaxCastDepth
) {
1284 new (SCEVAllocator
) SCEVTruncateExpr(ID
.Intern(SCEVAllocator
), Op
, Ty
);
1285 UniqueSCEVs
.InsertNode(S
, IP
);
1286 addToLoopUseLists(S
);
1290 // trunc(x1 + ... + xN) --> trunc(x1) + ... + trunc(xN) and
1291 // trunc(x1 * ... * xN) --> trunc(x1) * ... * trunc(xN),
1292 // if after transforming we have at most one truncate, not counting truncates
1293 // that replace other casts.
1294 if (isa
<SCEVAddExpr
>(Op
) || isa
<SCEVMulExpr
>(Op
)) {
1295 auto *CommOp
= cast
<SCEVCommutativeExpr
>(Op
);
1296 SmallVector
<const SCEV
*, 4> Operands
;
1297 unsigned numTruncs
= 0;
1298 for (unsigned i
= 0, e
= CommOp
->getNumOperands(); i
!= e
&& numTruncs
< 2;
1300 const SCEV
*S
= getTruncateExpr(CommOp
->getOperand(i
), Ty
, Depth
+ 1);
1301 if (!isa
<SCEVCastExpr
>(CommOp
->getOperand(i
)) && isa
<SCEVTruncateExpr
>(S
))
1303 Operands
.push_back(S
);
1305 if (numTruncs
< 2) {
1306 if (isa
<SCEVAddExpr
>(Op
))
1307 return getAddExpr(Operands
);
1308 else if (isa
<SCEVMulExpr
>(Op
))
1309 return getMulExpr(Operands
);
1311 llvm_unreachable("Unexpected SCEV type for Op.");
1313 // Although we checked in the beginning that ID is not in the cache, it is
1314 // possible that during recursion and different modification ID was inserted
1315 // into the cache. So if we find it, just return it.
1316 if (const SCEV
*S
= UniqueSCEVs
.FindNodeOrInsertPos(ID
, IP
))
1320 // If the input value is a chrec scev, truncate the chrec's operands.
1321 if (const SCEVAddRecExpr
*AddRec
= dyn_cast
<SCEVAddRecExpr
>(Op
)) {
1322 SmallVector
<const SCEV
*, 4> Operands
;
1323 for (const SCEV
*Op
: AddRec
->operands())
1324 Operands
.push_back(getTruncateExpr(Op
, Ty
, Depth
+ 1));
1325 return getAddRecExpr(Operands
, AddRec
->getLoop(), SCEV::FlagAnyWrap
);
1328 // The cast wasn't folded; create an explicit cast node. We can reuse
1329 // the existing insert position since if we get here, we won't have
1330 // made any changes which would invalidate it.
1331 SCEV
*S
= new (SCEVAllocator
) SCEVTruncateExpr(ID
.Intern(SCEVAllocator
),
1333 UniqueSCEVs
.InsertNode(S
, IP
);
1334 addToLoopUseLists(S
);
1338 // Get the limit of a recurrence such that incrementing by Step cannot cause
1339 // signed overflow as long as the value of the recurrence within the
1340 // loop does not exceed this limit before incrementing.
1341 static const SCEV
*getSignedOverflowLimitForStep(const SCEV
*Step
,
1342 ICmpInst::Predicate
*Pred
,
1343 ScalarEvolution
*SE
) {
1344 unsigned BitWidth
= SE
->getTypeSizeInBits(Step
->getType());
1345 if (SE
->isKnownPositive(Step
)) {
1346 *Pred
= ICmpInst::ICMP_SLT
;
1347 return SE
->getConstant(APInt::getSignedMinValue(BitWidth
) -
1348 SE
->getSignedRangeMax(Step
));
1350 if (SE
->isKnownNegative(Step
)) {
1351 *Pred
= ICmpInst::ICMP_SGT
;
1352 return SE
->getConstant(APInt::getSignedMaxValue(BitWidth
) -
1353 SE
->getSignedRangeMin(Step
));
1358 // Get the limit of a recurrence such that incrementing by Step cannot cause
1359 // unsigned overflow as long as the value of the recurrence within the loop does
1360 // not exceed this limit before incrementing.
1361 static const SCEV
*getUnsignedOverflowLimitForStep(const SCEV
*Step
,
1362 ICmpInst::Predicate
*Pred
,
1363 ScalarEvolution
*SE
) {
1364 unsigned BitWidth
= SE
->getTypeSizeInBits(Step
->getType());
1365 *Pred
= ICmpInst::ICMP_ULT
;
1367 return SE
->getConstant(APInt::getMinValue(BitWidth
) -
1368 SE
->getUnsignedRangeMax(Step
));
1373 struct ExtendOpTraitsBase
{
1374 typedef const SCEV
*(ScalarEvolution::*GetExtendExprTy
)(const SCEV
*, Type
*,
1378 // Used to make code generic over signed and unsigned overflow.
1379 template <typename ExtendOp
> struct ExtendOpTraits
{
1382 // static const SCEV::NoWrapFlags WrapType;
1384 // static const ExtendOpTraitsBase::GetExtendExprTy GetExtendExpr;
1386 // static const SCEV *getOverflowLimitForStep(const SCEV *Step,
1387 // ICmpInst::Predicate *Pred,
1388 // ScalarEvolution *SE);
1392 struct ExtendOpTraits
<SCEVSignExtendExpr
> : public ExtendOpTraitsBase
{
1393 static const SCEV::NoWrapFlags WrapType
= SCEV::FlagNSW
;
1395 static const GetExtendExprTy GetExtendExpr
;
1397 static const SCEV
*getOverflowLimitForStep(const SCEV
*Step
,
1398 ICmpInst::Predicate
*Pred
,
1399 ScalarEvolution
*SE
) {
1400 return getSignedOverflowLimitForStep(Step
, Pred
, SE
);
1404 const ExtendOpTraitsBase::GetExtendExprTy ExtendOpTraits
<
1405 SCEVSignExtendExpr
>::GetExtendExpr
= &ScalarEvolution::getSignExtendExpr
;
1408 struct ExtendOpTraits
<SCEVZeroExtendExpr
> : public ExtendOpTraitsBase
{
1409 static const SCEV::NoWrapFlags WrapType
= SCEV::FlagNUW
;
1411 static const GetExtendExprTy GetExtendExpr
;
1413 static const SCEV
*getOverflowLimitForStep(const SCEV
*Step
,
1414 ICmpInst::Predicate
*Pred
,
1415 ScalarEvolution
*SE
) {
1416 return getUnsignedOverflowLimitForStep(Step
, Pred
, SE
);
1420 const ExtendOpTraitsBase::GetExtendExprTy ExtendOpTraits
<
1421 SCEVZeroExtendExpr
>::GetExtendExpr
= &ScalarEvolution::getZeroExtendExpr
;
1423 } // end anonymous namespace
1425 // The recurrence AR has been shown to have no signed/unsigned wrap or something
1426 // close to it. Typically, if we can prove NSW/NUW for AR, then we can just as
1427 // easily prove NSW/NUW for its preincrement or postincrement sibling. This
1428 // allows normalizing a sign/zero extended AddRec as such: {sext/zext(Step +
1429 // Start),+,Step} => {(Step + sext/zext(Start),+,Step} As a result, the
1430 // expression "Step + sext/zext(PreIncAR)" is congruent with
1431 // "sext/zext(PostIncAR)"
1432 template <typename ExtendOpTy
>
1433 static const SCEV
*getPreStartForExtend(const SCEVAddRecExpr
*AR
, Type
*Ty
,
1434 ScalarEvolution
*SE
, unsigned Depth
) {
1435 auto WrapType
= ExtendOpTraits
<ExtendOpTy
>::WrapType
;
1436 auto GetExtendExpr
= ExtendOpTraits
<ExtendOpTy
>::GetExtendExpr
;
1438 const Loop
*L
= AR
->getLoop();
1439 const SCEV
*Start
= AR
->getStart();
1440 const SCEV
*Step
= AR
->getStepRecurrence(*SE
);
1442 // Check for a simple looking step prior to loop entry.
1443 const SCEVAddExpr
*SA
= dyn_cast
<SCEVAddExpr
>(Start
);
1447 // Create an AddExpr for "PreStart" after subtracting Step. Full SCEV
1448 // subtraction is expensive. For this purpose, perform a quick and dirty
1449 // difference, by checking for Step in the operand list.
1450 SmallVector
<const SCEV
*, 4> DiffOps
;
1451 for (const SCEV
*Op
: SA
->operands())
1453 DiffOps
.push_back(Op
);
1455 if (DiffOps
.size() == SA
->getNumOperands())
1458 // Try to prove `WrapType` (SCEV::FlagNSW or SCEV::FlagNUW) on `PreStart` +
1461 // 1. NSW/NUW flags on the step increment.
1462 auto PreStartFlags
=
1463 ScalarEvolution::maskFlags(SA
->getNoWrapFlags(), SCEV::FlagNUW
);
1464 const SCEV
*PreStart
= SE
->getAddExpr(DiffOps
, PreStartFlags
);
1465 const SCEVAddRecExpr
*PreAR
= dyn_cast
<SCEVAddRecExpr
>(
1466 SE
->getAddRecExpr(PreStart
, Step
, L
, SCEV::FlagAnyWrap
));
1468 // "{S,+,X} is <nsw>/<nuw>" and "the backedge is taken at least once" implies
1469 // "S+X does not sign/unsign-overflow".
1472 const SCEV
*BECount
= SE
->getBackedgeTakenCount(L
);
1473 if (PreAR
&& PreAR
->getNoWrapFlags(WrapType
) &&
1474 !isa
<SCEVCouldNotCompute
>(BECount
) && SE
->isKnownPositive(BECount
))
1477 // 2. Direct overflow check on the step operation's expression.
1478 unsigned BitWidth
= SE
->getTypeSizeInBits(AR
->getType());
1479 Type
*WideTy
= IntegerType::get(SE
->getContext(), BitWidth
* 2);
1480 const SCEV
*OperandExtendedStart
=
1481 SE
->getAddExpr((SE
->*GetExtendExpr
)(PreStart
, WideTy
, Depth
),
1482 (SE
->*GetExtendExpr
)(Step
, WideTy
, Depth
));
1483 if ((SE
->*GetExtendExpr
)(Start
, WideTy
, Depth
) == OperandExtendedStart
) {
1484 if (PreAR
&& AR
->getNoWrapFlags(WrapType
)) {
1485 // If we know `AR` == {`PreStart`+`Step`,+,`Step`} is `WrapType` (FlagNSW
1486 // or FlagNUW) and that `PreStart` + `Step` is `WrapType` too, then
1487 // `PreAR` == {`PreStart`,+,`Step`} is also `WrapType`. Cache this fact.
1488 const_cast<SCEVAddRecExpr
*>(PreAR
)->setNoWrapFlags(WrapType
);
1493 // 3. Loop precondition.
1494 ICmpInst::Predicate Pred
;
1495 const SCEV
*OverflowLimit
=
1496 ExtendOpTraits
<ExtendOpTy
>::getOverflowLimitForStep(Step
, &Pred
, SE
);
1498 if (OverflowLimit
&&
1499 SE
->isLoopEntryGuardedByCond(L
, Pred
, PreStart
, OverflowLimit
))
1505 // Get the normalized zero or sign extended expression for this AddRec's Start.
1506 template <typename ExtendOpTy
>
1507 static const SCEV
*getExtendAddRecStart(const SCEVAddRecExpr
*AR
, Type
*Ty
,
1508 ScalarEvolution
*SE
,
1510 auto GetExtendExpr
= ExtendOpTraits
<ExtendOpTy
>::GetExtendExpr
;
1512 const SCEV
*PreStart
= getPreStartForExtend
<ExtendOpTy
>(AR
, Ty
, SE
, Depth
);
1514 return (SE
->*GetExtendExpr
)(AR
->getStart(), Ty
, Depth
);
1516 return SE
->getAddExpr((SE
->*GetExtendExpr
)(AR
->getStepRecurrence(*SE
), Ty
,
1518 (SE
->*GetExtendExpr
)(PreStart
, Ty
, Depth
));
1521 // Try to prove away overflow by looking at "nearby" add recurrences. A
1522 // motivating example for this rule: if we know `{0,+,4}` is `ult` `-1` and it
1523 // does not itself wrap then we can conclude that `{1,+,4}` is `nuw`.
1527 // {S,+,X} == {S-T,+,X} + T
1528 // => Ext({S,+,X}) == Ext({S-T,+,X} + T)
1530 // If ({S-T,+,X} + T) does not overflow ... (1)
1532 // RHS == Ext({S-T,+,X} + T) == Ext({S-T,+,X}) + Ext(T)
1534 // If {S-T,+,X} does not overflow ... (2)
1536 // RHS == Ext({S-T,+,X}) + Ext(T) == {Ext(S-T),+,Ext(X)} + Ext(T)
1537 // == {Ext(S-T)+Ext(T),+,Ext(X)}
1539 // If (S-T)+T does not overflow ... (3)
1541 // RHS == {Ext(S-T)+Ext(T),+,Ext(X)} == {Ext(S-T+T),+,Ext(X)}
1542 // == {Ext(S),+,Ext(X)} == LHS
1544 // Thus, if (1), (2) and (3) are true for some T, then
1545 // Ext({S,+,X}) == {Ext(S),+,Ext(X)}
1547 // (3) is implied by (1) -- "(S-T)+T does not overflow" is simply "({S-T,+,X}+T)
1548 // does not overflow" restricted to the 0th iteration. Therefore we only need
1549 // to check for (1) and (2).
1551 // In the current context, S is `Start`, X is `Step`, Ext is `ExtendOpTy` and T
1552 // is `Delta` (defined below).
1553 template <typename ExtendOpTy
>
1554 bool ScalarEvolution::proveNoWrapByVaryingStart(const SCEV
*Start
,
1557 auto WrapType
= ExtendOpTraits
<ExtendOpTy
>::WrapType
;
1559 // We restrict `Start` to a constant to prevent SCEV from spending too much
1560 // time here. It is correct (but more expensive) to continue with a
1561 // non-constant `Start` and do a general SCEV subtraction to compute
1562 // `PreStart` below.
1563 const SCEVConstant
*StartC
= dyn_cast
<SCEVConstant
>(Start
);
1567 APInt StartAI
= StartC
->getAPInt();
1569 for (unsigned Delta
: {-2, -1, 1, 2}) {
1570 const SCEV
*PreStart
= getConstant(StartAI
- Delta
);
1572 FoldingSetNodeID ID
;
1573 ID
.AddInteger(scAddRecExpr
);
1574 ID
.AddPointer(PreStart
);
1575 ID
.AddPointer(Step
);
1579 static_cast<SCEVAddRecExpr
*>(UniqueSCEVs
.FindNodeOrInsertPos(ID
, IP
));
1581 // Give up if we don't already have the add recurrence we need because
1582 // actually constructing an add recurrence is relatively expensive.
1583 if (PreAR
&& PreAR
->getNoWrapFlags(WrapType
)) { // proves (2)
1584 const SCEV
*DeltaS
= getConstant(StartC
->getType(), Delta
);
1585 ICmpInst::Predicate Pred
= ICmpInst::BAD_ICMP_PREDICATE
;
1586 const SCEV
*Limit
= ExtendOpTraits
<ExtendOpTy
>::getOverflowLimitForStep(
1587 DeltaS
, &Pred
, this);
1588 if (Limit
&& isKnownPredicate(Pred
, PreAR
, Limit
)) // proves (1)
1596 // Finds an integer D for an expression (C + x + y + ...) such that the top
1597 // level addition in (D + (C - D + x + y + ...)) would not wrap (signed or
1598 // unsigned) and the number of trailing zeros of (C - D + x + y + ...) is
1599 // maximized, where C is the \p ConstantTerm, x, y, ... are arbitrary SCEVs, and
1600 // the (C + x + y + ...) expression is \p WholeAddExpr.
1601 static APInt
extractConstantWithoutWrapping(ScalarEvolution
&SE
,
1602 const SCEVConstant
*ConstantTerm
,
1603 const SCEVAddExpr
*WholeAddExpr
) {
1604 const APInt C
= ConstantTerm
->getAPInt();
1605 const unsigned BitWidth
= C
.getBitWidth();
1606 // Find number of trailing zeros of (x + y + ...) w/o the C first:
1607 uint32_t TZ
= BitWidth
;
1608 for (unsigned I
= 1, E
= WholeAddExpr
->getNumOperands(); I
< E
&& TZ
; ++I
)
1609 TZ
= std::min(TZ
, SE
.GetMinTrailingZeros(WholeAddExpr
->getOperand(I
)));
1611 // Set D to be as many least significant bits of C as possible while still
1612 // guaranteeing that adding D to (C - D + x + y + ...) won't cause a wrap:
1613 return TZ
< BitWidth
? C
.trunc(TZ
).zext(BitWidth
) : C
;
1615 return APInt(BitWidth
, 0);
1618 // Finds an integer D for an affine AddRec expression {C,+,x} such that the top
1619 // level addition in (D + {C-D,+,x}) would not wrap (signed or unsigned) and the
1620 // number of trailing zeros of (C - D + x * n) is maximized, where C is the \p
1621 // ConstantStart, x is an arbitrary \p Step, and n is the loop trip count.
1622 static APInt
extractConstantWithoutWrapping(ScalarEvolution
&SE
,
1623 const APInt
&ConstantStart
,
1625 const unsigned BitWidth
= ConstantStart
.getBitWidth();
1626 const uint32_t TZ
= SE
.GetMinTrailingZeros(Step
);
1628 return TZ
< BitWidth
? ConstantStart
.trunc(TZ
).zext(BitWidth
)
1630 return APInt(BitWidth
, 0);
1634 ScalarEvolution::getZeroExtendExpr(const SCEV
*Op
, Type
*Ty
, unsigned Depth
) {
1635 assert(getTypeSizeInBits(Op
->getType()) < getTypeSizeInBits(Ty
) &&
1636 "This is not an extending conversion!");
1637 assert(isSCEVable(Ty
) &&
1638 "This is not a conversion to a SCEVable type!");
1639 Ty
= getEffectiveSCEVType(Ty
);
1641 // Fold if the operand is constant.
1642 if (const SCEVConstant
*SC
= dyn_cast
<SCEVConstant
>(Op
))
1644 cast
<ConstantInt
>(ConstantExpr::getZExt(SC
->getValue(), Ty
)));
1646 // zext(zext(x)) --> zext(x)
1647 if (const SCEVZeroExtendExpr
*SZ
= dyn_cast
<SCEVZeroExtendExpr
>(Op
))
1648 return getZeroExtendExpr(SZ
->getOperand(), Ty
, Depth
+ 1);
1650 // Before doing any expensive analysis, check to see if we've already
1651 // computed a SCEV for this Op and Ty.
1652 FoldingSetNodeID ID
;
1653 ID
.AddInteger(scZeroExtend
);
1657 if (const SCEV
*S
= UniqueSCEVs
.FindNodeOrInsertPos(ID
, IP
)) return S
;
1658 if (Depth
> MaxCastDepth
) {
1659 SCEV
*S
= new (SCEVAllocator
) SCEVZeroExtendExpr(ID
.Intern(SCEVAllocator
),
1661 UniqueSCEVs
.InsertNode(S
, IP
);
1662 addToLoopUseLists(S
);
1666 // zext(trunc(x)) --> zext(x) or x or trunc(x)
1667 if (const SCEVTruncateExpr
*ST
= dyn_cast
<SCEVTruncateExpr
>(Op
)) {
1668 // It's possible the bits taken off by the truncate were all zero bits. If
1669 // so, we should be able to simplify this further.
1670 const SCEV
*X
= ST
->getOperand();
1671 ConstantRange CR
= getUnsignedRange(X
);
1672 unsigned TruncBits
= getTypeSizeInBits(ST
->getType());
1673 unsigned NewBits
= getTypeSizeInBits(Ty
);
1674 if (CR
.truncate(TruncBits
).zeroExtend(NewBits
).contains(
1675 CR
.zextOrTrunc(NewBits
)))
1676 return getTruncateOrZeroExtend(X
, Ty
, Depth
);
1679 // If the input value is a chrec scev, and we can prove that the value
1680 // did not overflow the old, smaller, value, we can zero extend all of the
1681 // operands (often constants). This allows analysis of something like
1682 // this: for (unsigned char X = 0; X < 100; ++X) { int Y = X; }
1683 if (const SCEVAddRecExpr
*AR
= dyn_cast
<SCEVAddRecExpr
>(Op
))
1684 if (AR
->isAffine()) {
1685 const SCEV
*Start
= AR
->getStart();
1686 const SCEV
*Step
= AR
->getStepRecurrence(*this);
1687 unsigned BitWidth
= getTypeSizeInBits(AR
->getType());
1688 const Loop
*L
= AR
->getLoop();
1690 if (!AR
->hasNoUnsignedWrap()) {
1691 auto NewFlags
= proveNoWrapViaConstantRanges(AR
);
1692 const_cast<SCEVAddRecExpr
*>(AR
)->setNoWrapFlags(NewFlags
);
1695 // If we have special knowledge that this addrec won't overflow,
1696 // we don't need to do any further analysis.
1697 if (AR
->hasNoUnsignedWrap())
1698 return getAddRecExpr(
1699 getExtendAddRecStart
<SCEVZeroExtendExpr
>(AR
, Ty
, this, Depth
+ 1),
1700 getZeroExtendExpr(Step
, Ty
, Depth
+ 1), L
, AR
->getNoWrapFlags());
1702 // Check whether the backedge-taken count is SCEVCouldNotCompute.
1703 // Note that this serves two purposes: It filters out loops that are
1704 // simply not analyzable, and it covers the case where this code is
1705 // being called from within backedge-taken count analysis, such that
1706 // attempting to ask for the backedge-taken count would likely result
1707 // in infinite recursion. In the later case, the analysis code will
1708 // cope with a conservative value, and it will take care to purge
1709 // that value once it has finished.
1710 const SCEV
*MaxBECount
= getMaxBackedgeTakenCount(L
);
1711 if (!isa
<SCEVCouldNotCompute
>(MaxBECount
)) {
1712 // Manually compute the final value for AR, checking for
1715 // Check whether the backedge-taken count can be losslessly casted to
1716 // the addrec's type. The count is always unsigned.
1717 const SCEV
*CastedMaxBECount
=
1718 getTruncateOrZeroExtend(MaxBECount
, Start
->getType(), Depth
);
1719 const SCEV
*RecastedMaxBECount
= getTruncateOrZeroExtend(
1720 CastedMaxBECount
, MaxBECount
->getType(), Depth
);
1721 if (MaxBECount
== RecastedMaxBECount
) {
1722 Type
*WideTy
= IntegerType::get(getContext(), BitWidth
* 2);
1723 // Check whether Start+Step*MaxBECount has no unsigned overflow.
1724 const SCEV
*ZMul
= getMulExpr(CastedMaxBECount
, Step
,
1725 SCEV::FlagAnyWrap
, Depth
+ 1);
1726 const SCEV
*ZAdd
= getZeroExtendExpr(getAddExpr(Start
, ZMul
,
1730 const SCEV
*WideStart
= getZeroExtendExpr(Start
, WideTy
, Depth
+ 1);
1731 const SCEV
*WideMaxBECount
=
1732 getZeroExtendExpr(CastedMaxBECount
, WideTy
, Depth
+ 1);
1733 const SCEV
*OperandExtendedAdd
=
1734 getAddExpr(WideStart
,
1735 getMulExpr(WideMaxBECount
,
1736 getZeroExtendExpr(Step
, WideTy
, Depth
+ 1),
1737 SCEV::FlagAnyWrap
, Depth
+ 1),
1738 SCEV::FlagAnyWrap
, Depth
+ 1);
1739 if (ZAdd
== OperandExtendedAdd
) {
1740 // Cache knowledge of AR NUW, which is propagated to this AddRec.
1741 const_cast<SCEVAddRecExpr
*>(AR
)->setNoWrapFlags(SCEV::FlagNUW
);
1742 // Return the expression with the addrec on the outside.
1743 return getAddRecExpr(
1744 getExtendAddRecStart
<SCEVZeroExtendExpr
>(AR
, Ty
, this,
1746 getZeroExtendExpr(Step
, Ty
, Depth
+ 1), L
,
1747 AR
->getNoWrapFlags());
1749 // Similar to above, only this time treat the step value as signed.
1750 // This covers loops that count down.
1751 OperandExtendedAdd
=
1752 getAddExpr(WideStart
,
1753 getMulExpr(WideMaxBECount
,
1754 getSignExtendExpr(Step
, WideTy
, Depth
+ 1),
1755 SCEV::FlagAnyWrap
, Depth
+ 1),
1756 SCEV::FlagAnyWrap
, Depth
+ 1);
1757 if (ZAdd
== OperandExtendedAdd
) {
1758 // Cache knowledge of AR NW, which is propagated to this AddRec.
1759 // Negative step causes unsigned wrap, but it still can't self-wrap.
1760 const_cast<SCEVAddRecExpr
*>(AR
)->setNoWrapFlags(SCEV::FlagNW
);
1761 // Return the expression with the addrec on the outside.
1762 return getAddRecExpr(
1763 getExtendAddRecStart
<SCEVZeroExtendExpr
>(AR
, Ty
, this,
1765 getSignExtendExpr(Step
, Ty
, Depth
+ 1), L
,
1766 AR
->getNoWrapFlags());
1771 // Normally, in the cases we can prove no-overflow via a
1772 // backedge guarding condition, we can also compute a backedge
1773 // taken count for the loop. The exceptions are assumptions and
1774 // guards present in the loop -- SCEV is not great at exploiting
1775 // these to compute max backedge taken counts, but can still use
1776 // these to prove lack of overflow. Use this fact to avoid
1777 // doing extra work that may not pay off.
1778 if (!isa
<SCEVCouldNotCompute
>(MaxBECount
) || HasGuards
||
1779 !AC
.assumptions().empty()) {
1780 // If the backedge is guarded by a comparison with the pre-inc
1781 // value the addrec is safe. Also, if the entry is guarded by
1782 // a comparison with the start value and the backedge is
1783 // guarded by a comparison with the post-inc value, the addrec
1785 if (isKnownPositive(Step
)) {
1786 const SCEV
*N
= getConstant(APInt::getMinValue(BitWidth
) -
1787 getUnsignedRangeMax(Step
));
1788 if (isLoopBackedgeGuardedByCond(L
, ICmpInst::ICMP_ULT
, AR
, N
) ||
1789 isKnownOnEveryIteration(ICmpInst::ICMP_ULT
, AR
, N
)) {
1790 // Cache knowledge of AR NUW, which is propagated to this
1792 const_cast<SCEVAddRecExpr
*>(AR
)->setNoWrapFlags(SCEV::FlagNUW
);
1793 // Return the expression with the addrec on the outside.
1794 return getAddRecExpr(
1795 getExtendAddRecStart
<SCEVZeroExtendExpr
>(AR
, Ty
, this,
1797 getZeroExtendExpr(Step
, Ty
, Depth
+ 1), L
,
1798 AR
->getNoWrapFlags());
1800 } else if (isKnownNegative(Step
)) {
1801 const SCEV
*N
= getConstant(APInt::getMaxValue(BitWidth
) -
1802 getSignedRangeMin(Step
));
1803 if (isLoopBackedgeGuardedByCond(L
, ICmpInst::ICMP_UGT
, AR
, N
) ||
1804 isKnownOnEveryIteration(ICmpInst::ICMP_UGT
, AR
, N
)) {
1805 // Cache knowledge of AR NW, which is propagated to this
1806 // AddRec. Negative step causes unsigned wrap, but it
1807 // still can't self-wrap.
1808 const_cast<SCEVAddRecExpr
*>(AR
)->setNoWrapFlags(SCEV::FlagNW
);
1809 // Return the expression with the addrec on the outside.
1810 return getAddRecExpr(
1811 getExtendAddRecStart
<SCEVZeroExtendExpr
>(AR
, Ty
, this,
1813 getSignExtendExpr(Step
, Ty
, Depth
+ 1), L
,
1814 AR
->getNoWrapFlags());
1819 // zext({C,+,Step}) --> (zext(D) + zext({C-D,+,Step}))<nuw><nsw>
1820 // if D + (C - D + Step * n) could be proven to not unsigned wrap
1821 // where D maximizes the number of trailing zeros of (C - D + Step * n)
1822 if (const auto *SC
= dyn_cast
<SCEVConstant
>(Start
)) {
1823 const APInt
&C
= SC
->getAPInt();
1824 const APInt
&D
= extractConstantWithoutWrapping(*this, C
, Step
);
1826 const SCEV
*SZExtD
= getZeroExtendExpr(getConstant(D
), Ty
, Depth
);
1827 const SCEV
*SResidual
=
1828 getAddRecExpr(getConstant(C
- D
), Step
, L
, AR
->getNoWrapFlags());
1829 const SCEV
*SZExtR
= getZeroExtendExpr(SResidual
, Ty
, Depth
+ 1);
1830 return getAddExpr(SZExtD
, SZExtR
,
1831 (SCEV::NoWrapFlags
)(SCEV::FlagNSW
| SCEV::FlagNUW
),
1836 if (proveNoWrapByVaryingStart
<SCEVZeroExtendExpr
>(Start
, Step
, L
)) {
1837 const_cast<SCEVAddRecExpr
*>(AR
)->setNoWrapFlags(SCEV::FlagNUW
);
1838 return getAddRecExpr(
1839 getExtendAddRecStart
<SCEVZeroExtendExpr
>(AR
, Ty
, this, Depth
+ 1),
1840 getZeroExtendExpr(Step
, Ty
, Depth
+ 1), L
, AR
->getNoWrapFlags());
1844 // zext(A % B) --> zext(A) % zext(B)
1848 if (matchURem(Op
, LHS
, RHS
))
1849 return getURemExpr(getZeroExtendExpr(LHS
, Ty
, Depth
+ 1),
1850 getZeroExtendExpr(RHS
, Ty
, Depth
+ 1));
1853 // zext(A / B) --> zext(A) / zext(B).
1854 if (auto *Div
= dyn_cast
<SCEVUDivExpr
>(Op
))
1855 return getUDivExpr(getZeroExtendExpr(Div
->getLHS(), Ty
, Depth
+ 1),
1856 getZeroExtendExpr(Div
->getRHS(), Ty
, Depth
+ 1));
1858 if (auto *SA
= dyn_cast
<SCEVAddExpr
>(Op
)) {
1859 // zext((A + B + ...)<nuw>) --> (zext(A) + zext(B) + ...)<nuw>
1860 if (SA
->hasNoUnsignedWrap()) {
1861 // If the addition does not unsign overflow then we can, by definition,
1862 // commute the zero extension with the addition operation.
1863 SmallVector
<const SCEV
*, 4> Ops
;
1864 for (const auto *Op
: SA
->operands())
1865 Ops
.push_back(getZeroExtendExpr(Op
, Ty
, Depth
+ 1));
1866 return getAddExpr(Ops
, SCEV::FlagNUW
, Depth
+ 1);
1869 // zext(C + x + y + ...) --> (zext(D) + zext((C - D) + x + y + ...))
1870 // if D + (C - D + x + y + ...) could be proven to not unsigned wrap
1871 // where D maximizes the number of trailing zeros of (C - D + x + y + ...)
1873 // Often address arithmetics contain expressions like
1874 // (zext (add (shl X, C1), C2)), for instance, (zext (5 + (4 * X))).
1875 // This transformation is useful while proving that such expressions are
1876 // equal or differ by a small constant amount, see LoadStoreVectorizer pass.
1877 if (const auto *SC
= dyn_cast
<SCEVConstant
>(SA
->getOperand(0))) {
1878 const APInt
&D
= extractConstantWithoutWrapping(*this, SC
, SA
);
1880 const SCEV
*SZExtD
= getZeroExtendExpr(getConstant(D
), Ty
, Depth
);
1881 const SCEV
*SResidual
=
1882 getAddExpr(getConstant(-D
), SA
, SCEV::FlagAnyWrap
, Depth
);
1883 const SCEV
*SZExtR
= getZeroExtendExpr(SResidual
, Ty
, Depth
+ 1);
1884 return getAddExpr(SZExtD
, SZExtR
,
1885 (SCEV::NoWrapFlags
)(SCEV::FlagNSW
| SCEV::FlagNUW
),
1891 if (auto *SM
= dyn_cast
<SCEVMulExpr
>(Op
)) {
1892 // zext((A * B * ...)<nuw>) --> (zext(A) * zext(B) * ...)<nuw>
1893 if (SM
->hasNoUnsignedWrap()) {
1894 // If the multiply does not unsign overflow then we can, by definition,
1895 // commute the zero extension with the multiply operation.
1896 SmallVector
<const SCEV
*, 4> Ops
;
1897 for (const auto *Op
: SM
->operands())
1898 Ops
.push_back(getZeroExtendExpr(Op
, Ty
, Depth
+ 1));
1899 return getMulExpr(Ops
, SCEV::FlagNUW
, Depth
+ 1);
1902 // zext(2^K * (trunc X to iN)) to iM ->
1903 // 2^K * (zext(trunc X to i{N-K}) to iM)<nuw>
1907 // zext(2^K * (trunc X to iN)) to iM
1908 // = zext((trunc X to iN) << K) to iM
1909 // = zext((trunc X to i{N-K}) << K)<nuw> to iM
1910 // (because shl removes the top K bits)
1911 // = zext((2^K * (trunc X to i{N-K}))<nuw>) to iM
1912 // = (2^K * (zext(trunc X to i{N-K}) to iM))<nuw>.
1914 if (SM
->getNumOperands() == 2)
1915 if (auto *MulLHS
= dyn_cast
<SCEVConstant
>(SM
->getOperand(0)))
1916 if (MulLHS
->getAPInt().isPowerOf2())
1917 if (auto *TruncRHS
= dyn_cast
<SCEVTruncateExpr
>(SM
->getOperand(1))) {
1918 int NewTruncBits
= getTypeSizeInBits(TruncRHS
->getType()) -
1919 MulLHS
->getAPInt().logBase2();
1920 Type
*NewTruncTy
= IntegerType::get(getContext(), NewTruncBits
);
1922 getZeroExtendExpr(MulLHS
, Ty
),
1924 getTruncateExpr(TruncRHS
->getOperand(), NewTruncTy
), Ty
),
1925 SCEV::FlagNUW
, Depth
+ 1);
1929 // The cast wasn't folded; create an explicit cast node.
1930 // Recompute the insert position, as it may have been invalidated.
1931 if (const SCEV
*S
= UniqueSCEVs
.FindNodeOrInsertPos(ID
, IP
)) return S
;
1932 SCEV
*S
= new (SCEVAllocator
) SCEVZeroExtendExpr(ID
.Intern(SCEVAllocator
),
1934 UniqueSCEVs
.InsertNode(S
, IP
);
1935 addToLoopUseLists(S
);
1940 ScalarEvolution::getSignExtendExpr(const SCEV
*Op
, Type
*Ty
, unsigned Depth
) {
1941 assert(getTypeSizeInBits(Op
->getType()) < getTypeSizeInBits(Ty
) &&
1942 "This is not an extending conversion!");
1943 assert(isSCEVable(Ty
) &&
1944 "This is not a conversion to a SCEVable type!");
1945 Ty
= getEffectiveSCEVType(Ty
);
1947 // Fold if the operand is constant.
1948 if (const SCEVConstant
*SC
= dyn_cast
<SCEVConstant
>(Op
))
1950 cast
<ConstantInt
>(ConstantExpr::getSExt(SC
->getValue(), Ty
)));
1952 // sext(sext(x)) --> sext(x)
1953 if (const SCEVSignExtendExpr
*SS
= dyn_cast
<SCEVSignExtendExpr
>(Op
))
1954 return getSignExtendExpr(SS
->getOperand(), Ty
, Depth
+ 1);
1956 // sext(zext(x)) --> zext(x)
1957 if (const SCEVZeroExtendExpr
*SZ
= dyn_cast
<SCEVZeroExtendExpr
>(Op
))
1958 return getZeroExtendExpr(SZ
->getOperand(), Ty
, Depth
+ 1);
1960 // Before doing any expensive analysis, check to see if we've already
1961 // computed a SCEV for this Op and Ty.
1962 FoldingSetNodeID ID
;
1963 ID
.AddInteger(scSignExtend
);
1967 if (const SCEV
*S
= UniqueSCEVs
.FindNodeOrInsertPos(ID
, IP
)) return S
;
1968 // Limit recursion depth.
1969 if (Depth
> MaxCastDepth
) {
1970 SCEV
*S
= new (SCEVAllocator
) SCEVSignExtendExpr(ID
.Intern(SCEVAllocator
),
1972 UniqueSCEVs
.InsertNode(S
, IP
);
1973 addToLoopUseLists(S
);
1977 // sext(trunc(x)) --> sext(x) or x or trunc(x)
1978 if (const SCEVTruncateExpr
*ST
= dyn_cast
<SCEVTruncateExpr
>(Op
)) {
1979 // It's possible the bits taken off by the truncate were all sign bits. If
1980 // so, we should be able to simplify this further.
1981 const SCEV
*X
= ST
->getOperand();
1982 ConstantRange CR
= getSignedRange(X
);
1983 unsigned TruncBits
= getTypeSizeInBits(ST
->getType());
1984 unsigned NewBits
= getTypeSizeInBits(Ty
);
1985 if (CR
.truncate(TruncBits
).signExtend(NewBits
).contains(
1986 CR
.sextOrTrunc(NewBits
)))
1987 return getTruncateOrSignExtend(X
, Ty
, Depth
);
1990 if (auto *SA
= dyn_cast
<SCEVAddExpr
>(Op
)) {
1991 // sext((A + B + ...)<nsw>) --> (sext(A) + sext(B) + ...)<nsw>
1992 if (SA
->hasNoSignedWrap()) {
1993 // If the addition does not sign overflow then we can, by definition,
1994 // commute the sign extension with the addition operation.
1995 SmallVector
<const SCEV
*, 4> Ops
;
1996 for (const auto *Op
: SA
->operands())
1997 Ops
.push_back(getSignExtendExpr(Op
, Ty
, Depth
+ 1));
1998 return getAddExpr(Ops
, SCEV::FlagNSW
, Depth
+ 1);
2001 // sext(C + x + y + ...) --> (sext(D) + sext((C - D) + x + y + ...))
2002 // if D + (C - D + x + y + ...) could be proven to not signed wrap
2003 // where D maximizes the number of trailing zeros of (C - D + x + y + ...)
2005 // For instance, this will bring two seemingly different expressions:
2006 // 1 + sext(5 + 20 * %x + 24 * %y) and
2007 // sext(6 + 20 * %x + 24 * %y)
2008 // to the same form:
2009 // 2 + sext(4 + 20 * %x + 24 * %y)
2010 if (const auto *SC
= dyn_cast
<SCEVConstant
>(SA
->getOperand(0))) {
2011 const APInt
&D
= extractConstantWithoutWrapping(*this, SC
, SA
);
2013 const SCEV
*SSExtD
= getSignExtendExpr(getConstant(D
), Ty
, Depth
);
2014 const SCEV
*SResidual
=
2015 getAddExpr(getConstant(-D
), SA
, SCEV::FlagAnyWrap
, Depth
);
2016 const SCEV
*SSExtR
= getSignExtendExpr(SResidual
, Ty
, Depth
+ 1);
2017 return getAddExpr(SSExtD
, SSExtR
,
2018 (SCEV::NoWrapFlags
)(SCEV::FlagNSW
| SCEV::FlagNUW
),
2023 // If the input value is a chrec scev, and we can prove that the value
2024 // did not overflow the old, smaller, value, we can sign extend all of the
2025 // operands (often constants). This allows analysis of something like
2026 // this: for (signed char X = 0; X < 100; ++X) { int Y = X; }
2027 if (const SCEVAddRecExpr
*AR
= dyn_cast
<SCEVAddRecExpr
>(Op
))
2028 if (AR
->isAffine()) {
2029 const SCEV
*Start
= AR
->getStart();
2030 const SCEV
*Step
= AR
->getStepRecurrence(*this);
2031 unsigned BitWidth
= getTypeSizeInBits(AR
->getType());
2032 const Loop
*L
= AR
->getLoop();
2034 if (!AR
->hasNoSignedWrap()) {
2035 auto NewFlags
= proveNoWrapViaConstantRanges(AR
);
2036 const_cast<SCEVAddRecExpr
*>(AR
)->setNoWrapFlags(NewFlags
);
2039 // If we have special knowledge that this addrec won't overflow,
2040 // we don't need to do any further analysis.
2041 if (AR
->hasNoSignedWrap())
2042 return getAddRecExpr(
2043 getExtendAddRecStart
<SCEVSignExtendExpr
>(AR
, Ty
, this, Depth
+ 1),
2044 getSignExtendExpr(Step
, Ty
, Depth
+ 1), L
, SCEV::FlagNSW
);
2046 // Check whether the backedge-taken count is SCEVCouldNotCompute.
2047 // Note that this serves two purposes: It filters out loops that are
2048 // simply not analyzable, and it covers the case where this code is
2049 // being called from within backedge-taken count analysis, such that
2050 // attempting to ask for the backedge-taken count would likely result
2051 // in infinite recursion. In the later case, the analysis code will
2052 // cope with a conservative value, and it will take care to purge
2053 // that value once it has finished.
2054 const SCEV
*MaxBECount
= getMaxBackedgeTakenCount(L
);
2055 if (!isa
<SCEVCouldNotCompute
>(MaxBECount
)) {
2056 // Manually compute the final value for AR, checking for
2059 // Check whether the backedge-taken count can be losslessly casted to
2060 // the addrec's type. The count is always unsigned.
2061 const SCEV
*CastedMaxBECount
=
2062 getTruncateOrZeroExtend(MaxBECount
, Start
->getType(), Depth
);
2063 const SCEV
*RecastedMaxBECount
= getTruncateOrZeroExtend(
2064 CastedMaxBECount
, MaxBECount
->getType(), Depth
);
2065 if (MaxBECount
== RecastedMaxBECount
) {
2066 Type
*WideTy
= IntegerType::get(getContext(), BitWidth
* 2);
2067 // Check whether Start+Step*MaxBECount has no signed overflow.
2068 const SCEV
*SMul
= getMulExpr(CastedMaxBECount
, Step
,
2069 SCEV::FlagAnyWrap
, Depth
+ 1);
2070 const SCEV
*SAdd
= getSignExtendExpr(getAddExpr(Start
, SMul
,
2074 const SCEV
*WideStart
= getSignExtendExpr(Start
, WideTy
, Depth
+ 1);
2075 const SCEV
*WideMaxBECount
=
2076 getZeroExtendExpr(CastedMaxBECount
, WideTy
, Depth
+ 1);
2077 const SCEV
*OperandExtendedAdd
=
2078 getAddExpr(WideStart
,
2079 getMulExpr(WideMaxBECount
,
2080 getSignExtendExpr(Step
, WideTy
, Depth
+ 1),
2081 SCEV::FlagAnyWrap
, Depth
+ 1),
2082 SCEV::FlagAnyWrap
, Depth
+ 1);
2083 if (SAdd
== OperandExtendedAdd
) {
2084 // Cache knowledge of AR NSW, which is propagated to this AddRec.
2085 const_cast<SCEVAddRecExpr
*>(AR
)->setNoWrapFlags(SCEV::FlagNSW
);
2086 // Return the expression with the addrec on the outside.
2087 return getAddRecExpr(
2088 getExtendAddRecStart
<SCEVSignExtendExpr
>(AR
, Ty
, this,
2090 getSignExtendExpr(Step
, Ty
, Depth
+ 1), L
,
2091 AR
->getNoWrapFlags());
2093 // Similar to above, only this time treat the step value as unsigned.
2094 // This covers loops that count up with an unsigned step.
2095 OperandExtendedAdd
=
2096 getAddExpr(WideStart
,
2097 getMulExpr(WideMaxBECount
,
2098 getZeroExtendExpr(Step
, WideTy
, Depth
+ 1),
2099 SCEV::FlagAnyWrap
, Depth
+ 1),
2100 SCEV::FlagAnyWrap
, Depth
+ 1);
2101 if (SAdd
== OperandExtendedAdd
) {
2102 // If AR wraps around then
2104 // abs(Step) * MaxBECount > unsigned-max(AR->getType())
2105 // => SAdd != OperandExtendedAdd
2107 // Thus (AR is not NW => SAdd != OperandExtendedAdd) <=>
2108 // (SAdd == OperandExtendedAdd => AR is NW)
2110 const_cast<SCEVAddRecExpr
*>(AR
)->setNoWrapFlags(SCEV::FlagNW
);
2112 // Return the expression with the addrec on the outside.
2113 return getAddRecExpr(
2114 getExtendAddRecStart
<SCEVSignExtendExpr
>(AR
, Ty
, this,
2116 getZeroExtendExpr(Step
, Ty
, Depth
+ 1), L
,
2117 AR
->getNoWrapFlags());
2122 // Normally, in the cases we can prove no-overflow via a
2123 // backedge guarding condition, we can also compute a backedge
2124 // taken count for the loop. The exceptions are assumptions and
2125 // guards present in the loop -- SCEV is not great at exploiting
2126 // these to compute max backedge taken counts, but can still use
2127 // these to prove lack of overflow. Use this fact to avoid
2128 // doing extra work that may not pay off.
2130 if (!isa
<SCEVCouldNotCompute
>(MaxBECount
) || HasGuards
||
2131 !AC
.assumptions().empty()) {
2132 // If the backedge is guarded by a comparison with the pre-inc
2133 // value the addrec is safe. Also, if the entry is guarded by
2134 // a comparison with the start value and the backedge is
2135 // guarded by a comparison with the post-inc value, the addrec
2137 ICmpInst::Predicate Pred
;
2138 const SCEV
*OverflowLimit
=
2139 getSignedOverflowLimitForStep(Step
, &Pred
, this);
2140 if (OverflowLimit
&&
2141 (isLoopBackedgeGuardedByCond(L
, Pred
, AR
, OverflowLimit
) ||
2142 isKnownOnEveryIteration(Pred
, AR
, OverflowLimit
))) {
2143 // Cache knowledge of AR NSW, then propagate NSW to the wide AddRec.
2144 const_cast<SCEVAddRecExpr
*>(AR
)->setNoWrapFlags(SCEV::FlagNSW
);
2145 return getAddRecExpr(
2146 getExtendAddRecStart
<SCEVSignExtendExpr
>(AR
, Ty
, this, Depth
+ 1),
2147 getSignExtendExpr(Step
, Ty
, Depth
+ 1), L
, AR
->getNoWrapFlags());
2151 // sext({C,+,Step}) --> (sext(D) + sext({C-D,+,Step}))<nuw><nsw>
2152 // if D + (C - D + Step * n) could be proven to not signed wrap
2153 // where D maximizes the number of trailing zeros of (C - D + Step * n)
2154 if (const auto *SC
= dyn_cast
<SCEVConstant
>(Start
)) {
2155 const APInt
&C
= SC
->getAPInt();
2156 const APInt
&D
= extractConstantWithoutWrapping(*this, C
, Step
);
2158 const SCEV
*SSExtD
= getSignExtendExpr(getConstant(D
), Ty
, Depth
);
2159 const SCEV
*SResidual
=
2160 getAddRecExpr(getConstant(C
- D
), Step
, L
, AR
->getNoWrapFlags());
2161 const SCEV
*SSExtR
= getSignExtendExpr(SResidual
, Ty
, Depth
+ 1);
2162 return getAddExpr(SSExtD
, SSExtR
,
2163 (SCEV::NoWrapFlags
)(SCEV::FlagNSW
| SCEV::FlagNUW
),
2168 if (proveNoWrapByVaryingStart
<SCEVSignExtendExpr
>(Start
, Step
, L
)) {
2169 const_cast<SCEVAddRecExpr
*>(AR
)->setNoWrapFlags(SCEV::FlagNSW
);
2170 return getAddRecExpr(
2171 getExtendAddRecStart
<SCEVSignExtendExpr
>(AR
, Ty
, this, Depth
+ 1),
2172 getSignExtendExpr(Step
, Ty
, Depth
+ 1), L
, AR
->getNoWrapFlags());
2176 // If the input value is provably positive and we could not simplify
2177 // away the sext build a zext instead.
2178 if (isKnownNonNegative(Op
))
2179 return getZeroExtendExpr(Op
, Ty
, Depth
+ 1);
2181 // The cast wasn't folded; create an explicit cast node.
2182 // Recompute the insert position, as it may have been invalidated.
2183 if (const SCEV
*S
= UniqueSCEVs
.FindNodeOrInsertPos(ID
, IP
)) return S
;
2184 SCEV
*S
= new (SCEVAllocator
) SCEVSignExtendExpr(ID
.Intern(SCEVAllocator
),
2186 UniqueSCEVs
.InsertNode(S
, IP
);
2187 addToLoopUseLists(S
);
2191 /// getAnyExtendExpr - Return a SCEV for the given operand extended with
2192 /// unspecified bits out to the given type.
2193 const SCEV
*ScalarEvolution::getAnyExtendExpr(const SCEV
*Op
,
2195 assert(getTypeSizeInBits(Op
->getType()) < getTypeSizeInBits(Ty
) &&
2196 "This is not an extending conversion!");
2197 assert(isSCEVable(Ty
) &&
2198 "This is not a conversion to a SCEVable type!");
2199 Ty
= getEffectiveSCEVType(Ty
);
2201 // Sign-extend negative constants.
2202 if (const SCEVConstant
*SC
= dyn_cast
<SCEVConstant
>(Op
))
2203 if (SC
->getAPInt().isNegative())
2204 return getSignExtendExpr(Op
, Ty
);
2206 // Peel off a truncate cast.
2207 if (const SCEVTruncateExpr
*T
= dyn_cast
<SCEVTruncateExpr
>(Op
)) {
2208 const SCEV
*NewOp
= T
->getOperand();
2209 if (getTypeSizeInBits(NewOp
->getType()) < getTypeSizeInBits(Ty
))
2210 return getAnyExtendExpr(NewOp
, Ty
);
2211 return getTruncateOrNoop(NewOp
, Ty
);
2214 // Next try a zext cast. If the cast is folded, use it.
2215 const SCEV
*ZExt
= getZeroExtendExpr(Op
, Ty
);
2216 if (!isa
<SCEVZeroExtendExpr
>(ZExt
))
2219 // Next try a sext cast. If the cast is folded, use it.
2220 const SCEV
*SExt
= getSignExtendExpr(Op
, Ty
);
2221 if (!isa
<SCEVSignExtendExpr
>(SExt
))
2224 // Force the cast to be folded into the operands of an addrec.
2225 if (const SCEVAddRecExpr
*AR
= dyn_cast
<SCEVAddRecExpr
>(Op
)) {
2226 SmallVector
<const SCEV
*, 4> Ops
;
2227 for (const SCEV
*Op
: AR
->operands())
2228 Ops
.push_back(getAnyExtendExpr(Op
, Ty
));
2229 return getAddRecExpr(Ops
, AR
->getLoop(), SCEV::FlagNW
);
2232 // If the expression is obviously signed, use the sext cast value.
2233 if (isa
<SCEVSMaxExpr
>(Op
))
2236 // Absent any other information, use the zext cast value.
2240 /// Process the given Ops list, which is a list of operands to be added under
2241 /// the given scale, update the given map. This is a helper function for
2242 /// getAddRecExpr. As an example of what it does, given a sequence of operands
2243 /// that would form an add expression like this:
2245 /// m + n + 13 + (A * (o + p + (B * (q + m + 29)))) + r + (-1 * r)
2247 /// where A and B are constants, update the map with these values:
2249 /// (m, 1+A*B), (n, 1), (o, A), (p, A), (q, A*B), (r, 0)
2251 /// and add 13 + A*B*29 to AccumulatedConstant.
2252 /// This will allow getAddRecExpr to produce this:
2254 /// 13+A*B*29 + n + (m * (1+A*B)) + ((o + p) * A) + (q * A*B)
2256 /// This form often exposes folding opportunities that are hidden in
2257 /// the original operand list.
2259 /// Return true iff it appears that any interesting folding opportunities
2260 /// may be exposed. This helps getAddRecExpr short-circuit extra work in
2261 /// the common case where no interesting opportunities are present, and
2262 /// is also used as a check to avoid infinite recursion.
2264 CollectAddOperandsWithScales(DenseMap
<const SCEV
*, APInt
> &M
,
2265 SmallVectorImpl
<const SCEV
*> &NewOps
,
2266 APInt
&AccumulatedConstant
,
2267 const SCEV
*const *Ops
, size_t NumOperands
,
2269 ScalarEvolution
&SE
) {
2270 bool Interesting
= false;
2272 // Iterate over the add operands. They are sorted, with constants first.
2274 while (const SCEVConstant
*C
= dyn_cast
<SCEVConstant
>(Ops
[i
])) {
2276 // Pull a buried constant out to the outside.
2277 if (Scale
!= 1 || AccumulatedConstant
!= 0 || C
->getValue()->isZero())
2279 AccumulatedConstant
+= Scale
* C
->getAPInt();
2282 // Next comes everything else. We're especially interested in multiplies
2283 // here, but they're in the middle, so just visit the rest with one loop.
2284 for (; i
!= NumOperands
; ++i
) {
2285 const SCEVMulExpr
*Mul
= dyn_cast
<SCEVMulExpr
>(Ops
[i
]);
2286 if (Mul
&& isa
<SCEVConstant
>(Mul
->getOperand(0))) {
2288 Scale
* cast
<SCEVConstant
>(Mul
->getOperand(0))->getAPInt();
2289 if (Mul
->getNumOperands() == 2 && isa
<SCEVAddExpr
>(Mul
->getOperand(1))) {
2290 // A multiplication of a constant with another add; recurse.
2291 const SCEVAddExpr
*Add
= cast
<SCEVAddExpr
>(Mul
->getOperand(1));
2293 CollectAddOperandsWithScales(M
, NewOps
, AccumulatedConstant
,
2294 Add
->op_begin(), Add
->getNumOperands(),
2297 // A multiplication of a constant with some other value. Update
2299 SmallVector
<const SCEV
*, 4> MulOps(Mul
->op_begin()+1, Mul
->op_end());
2300 const SCEV
*Key
= SE
.getMulExpr(MulOps
);
2301 auto Pair
= M
.insert({Key
, NewScale
});
2303 NewOps
.push_back(Pair
.first
->first
);
2305 Pair
.first
->second
+= NewScale
;
2306 // The map already had an entry for this value, which may indicate
2307 // a folding opportunity.
2312 // An ordinary operand. Update the map.
2313 std::pair
<DenseMap
<const SCEV
*, APInt
>::iterator
, bool> Pair
=
2314 M
.insert({Ops
[i
], Scale
});
2316 NewOps
.push_back(Pair
.first
->first
);
2318 Pair
.first
->second
+= Scale
;
2319 // The map already had an entry for this value, which may indicate
2320 // a folding opportunity.
2329 // We're trying to construct a SCEV of type `Type' with `Ops' as operands and
2330 // `OldFlags' as can't-wrap behavior. Infer a more aggressive set of
2331 // can't-overflow flags for the operation if possible.
2332 static SCEV::NoWrapFlags
2333 StrengthenNoWrapFlags(ScalarEvolution
*SE
, SCEVTypes Type
,
2334 const ArrayRef
<const SCEV
*> Ops
,
2335 SCEV::NoWrapFlags Flags
) {
2336 using namespace std::placeholders
;
2338 using OBO
= OverflowingBinaryOperator
;
2341 Type
== scAddExpr
|| Type
== scAddRecExpr
|| Type
== scMulExpr
;
2343 assert(CanAnalyze
&& "don't call from other places!");
2345 int SignOrUnsignMask
= SCEV::FlagNUW
| SCEV::FlagNSW
;
2346 SCEV::NoWrapFlags SignOrUnsignWrap
=
2347 ScalarEvolution::maskFlags(Flags
, SignOrUnsignMask
);
2349 // If FlagNSW is true and all the operands are non-negative, infer FlagNUW.
2350 auto IsKnownNonNegative
= [&](const SCEV
*S
) {
2351 return SE
->isKnownNonNegative(S
);
2354 if (SignOrUnsignWrap
== SCEV::FlagNSW
&& all_of(Ops
, IsKnownNonNegative
))
2356 ScalarEvolution::setFlags(Flags
, (SCEV::NoWrapFlags
)SignOrUnsignMask
);
2358 SignOrUnsignWrap
= ScalarEvolution::maskFlags(Flags
, SignOrUnsignMask
);
2360 if (SignOrUnsignWrap
!= SignOrUnsignMask
&&
2361 (Type
== scAddExpr
|| Type
== scMulExpr
) && Ops
.size() == 2 &&
2362 isa
<SCEVConstant
>(Ops
[0])) {
2367 return Instruction::Add
;
2369 return Instruction::Mul
;
2371 llvm_unreachable("Unexpected SCEV op.");
2375 const APInt
&C
= cast
<SCEVConstant
>(Ops
[0])->getAPInt();
2377 // (A <opcode> C) --> (A <opcode> C)<nsw> if the op doesn't sign overflow.
2378 if (!(SignOrUnsignWrap
& SCEV::FlagNSW
)) {
2379 auto NSWRegion
= ConstantRange::makeGuaranteedNoWrapRegion(
2380 Opcode
, C
, OBO::NoSignedWrap
);
2381 if (NSWRegion
.contains(SE
->getSignedRange(Ops
[1])))
2382 Flags
= ScalarEvolution::setFlags(Flags
, SCEV::FlagNSW
);
2385 // (A <opcode> C) --> (A <opcode> C)<nuw> if the op doesn't unsign overflow.
2386 if (!(SignOrUnsignWrap
& SCEV::FlagNUW
)) {
2387 auto NUWRegion
= ConstantRange::makeGuaranteedNoWrapRegion(
2388 Opcode
, C
, OBO::NoUnsignedWrap
);
2389 if (NUWRegion
.contains(SE
->getUnsignedRange(Ops
[1])))
2390 Flags
= ScalarEvolution::setFlags(Flags
, SCEV::FlagNUW
);
2397 bool ScalarEvolution::isAvailableAtLoopEntry(const SCEV
*S
, const Loop
*L
) {
2398 return isLoopInvariant(S
, L
) && properlyDominates(S
, L
->getHeader());
2401 /// Get a canonical add expression, or something simpler if possible.
2402 const SCEV
*ScalarEvolution::getAddExpr(SmallVectorImpl
<const SCEV
*> &Ops
,
2403 SCEV::NoWrapFlags Flags
,
2405 assert(!(Flags
& ~(SCEV::FlagNUW
| SCEV::FlagNSW
)) &&
2406 "only nuw or nsw allowed");
2407 assert(!Ops
.empty() && "Cannot get empty add!");
2408 if (Ops
.size() == 1) return Ops
[0];
2410 Type
*ETy
= getEffectiveSCEVType(Ops
[0]->getType());
2411 for (unsigned i
= 1, e
= Ops
.size(); i
!= e
; ++i
)
2412 assert(getEffectiveSCEVType(Ops
[i
]->getType()) == ETy
&&
2413 "SCEVAddExpr operand types don't match!");
2416 // Sort by complexity, this groups all similar expression types together.
2417 GroupByComplexity(Ops
, &LI
, DT
);
2419 Flags
= StrengthenNoWrapFlags(this, scAddExpr
, Ops
, Flags
);
2421 // If there are any constants, fold them together.
2423 if (const SCEVConstant
*LHSC
= dyn_cast
<SCEVConstant
>(Ops
[0])) {
2425 assert(Idx
< Ops
.size());
2426 while (const SCEVConstant
*RHSC
= dyn_cast
<SCEVConstant
>(Ops
[Idx
])) {
2427 // We found two constants, fold them together!
2428 Ops
[0] = getConstant(LHSC
->getAPInt() + RHSC
->getAPInt());
2429 if (Ops
.size() == 2) return Ops
[0];
2430 Ops
.erase(Ops
.begin()+1); // Erase the folded element
2431 LHSC
= cast
<SCEVConstant
>(Ops
[0]);
2434 // If we are left with a constant zero being added, strip it off.
2435 if (LHSC
->getValue()->isZero()) {
2436 Ops
.erase(Ops
.begin());
2440 if (Ops
.size() == 1) return Ops
[0];
2443 // Limit recursion calls depth.
2444 if (Depth
> MaxArithDepth
|| hasHugeExpression(Ops
))
2445 return getOrCreateAddExpr(Ops
, Flags
);
2447 // Okay, check to see if the same value occurs in the operand list more than
2448 // once. If so, merge them together into an multiply expression. Since we
2449 // sorted the list, these values are required to be adjacent.
2450 Type
*Ty
= Ops
[0]->getType();
2451 bool FoundMatch
= false;
2452 for (unsigned i
= 0, e
= Ops
.size(); i
!= e
-1; ++i
)
2453 if (Ops
[i
] == Ops
[i
+1]) { // X + Y + Y --> X + Y*2
2454 // Scan ahead to count how many equal operands there are.
2456 while (i
+Count
!= e
&& Ops
[i
+Count
] == Ops
[i
])
2458 // Merge the values into a multiply.
2459 const SCEV
*Scale
= getConstant(Ty
, Count
);
2460 const SCEV
*Mul
= getMulExpr(Scale
, Ops
[i
], SCEV::FlagAnyWrap
, Depth
+ 1);
2461 if (Ops
.size() == Count
)
2464 Ops
.erase(Ops
.begin()+i
+1, Ops
.begin()+i
+Count
);
2465 --i
; e
-= Count
- 1;
2469 return getAddExpr(Ops
, Flags
, Depth
+ 1);
2471 // Check for truncates. If all the operands are truncated from the same
2472 // type, see if factoring out the truncate would permit the result to be
2473 // folded. eg., n*trunc(x) + m*trunc(y) --> trunc(trunc(m)*x + trunc(n)*y)
2474 // if the contents of the resulting outer trunc fold to something simple.
2475 auto FindTruncSrcType
= [&]() -> Type
* {
2476 // We're ultimately looking to fold an addrec of truncs and muls of only
2477 // constants and truncs, so if we find any other types of SCEV
2478 // as operands of the addrec then we bail and return nullptr here.
2479 // Otherwise, we return the type of the operand of a trunc that we find.
2480 if (auto *T
= dyn_cast
<SCEVTruncateExpr
>(Ops
[Idx
]))
2481 return T
->getOperand()->getType();
2482 if (const auto *Mul
= dyn_cast
<SCEVMulExpr
>(Ops
[Idx
])) {
2483 const auto *LastOp
= Mul
->getOperand(Mul
->getNumOperands() - 1);
2484 if (const auto *T
= dyn_cast
<SCEVTruncateExpr
>(LastOp
))
2485 return T
->getOperand()->getType();
2489 if (auto *SrcType
= FindTruncSrcType()) {
2490 SmallVector
<const SCEV
*, 8> LargeOps
;
2492 // Check all the operands to see if they can be represented in the
2493 // source type of the truncate.
2494 for (unsigned i
= 0, e
= Ops
.size(); i
!= e
; ++i
) {
2495 if (const SCEVTruncateExpr
*T
= dyn_cast
<SCEVTruncateExpr
>(Ops
[i
])) {
2496 if (T
->getOperand()->getType() != SrcType
) {
2500 LargeOps
.push_back(T
->getOperand());
2501 } else if (const SCEVConstant
*C
= dyn_cast
<SCEVConstant
>(Ops
[i
])) {
2502 LargeOps
.push_back(getAnyExtendExpr(C
, SrcType
));
2503 } else if (const SCEVMulExpr
*M
= dyn_cast
<SCEVMulExpr
>(Ops
[i
])) {
2504 SmallVector
<const SCEV
*, 8> LargeMulOps
;
2505 for (unsigned j
= 0, f
= M
->getNumOperands(); j
!= f
&& Ok
; ++j
) {
2506 if (const SCEVTruncateExpr
*T
=
2507 dyn_cast
<SCEVTruncateExpr
>(M
->getOperand(j
))) {
2508 if (T
->getOperand()->getType() != SrcType
) {
2512 LargeMulOps
.push_back(T
->getOperand());
2513 } else if (const auto *C
= dyn_cast
<SCEVConstant
>(M
->getOperand(j
))) {
2514 LargeMulOps
.push_back(getAnyExtendExpr(C
, SrcType
));
2521 LargeOps
.push_back(getMulExpr(LargeMulOps
, SCEV::FlagAnyWrap
, Depth
+ 1));
2528 // Evaluate the expression in the larger type.
2529 const SCEV
*Fold
= getAddExpr(LargeOps
, SCEV::FlagAnyWrap
, Depth
+ 1);
2530 // If it folds to something simple, use it. Otherwise, don't.
2531 if (isa
<SCEVConstant
>(Fold
) || isa
<SCEVUnknown
>(Fold
))
2532 return getTruncateExpr(Fold
, Ty
);
2536 // Skip past any other cast SCEVs.
2537 while (Idx
< Ops
.size() && Ops
[Idx
]->getSCEVType() < scAddExpr
)
2540 // If there are add operands they would be next.
2541 if (Idx
< Ops
.size()) {
2542 bool DeletedAdd
= false;
2543 while (const SCEVAddExpr
*Add
= dyn_cast
<SCEVAddExpr
>(Ops
[Idx
])) {
2544 if (Ops
.size() > AddOpsInlineThreshold
||
2545 Add
->getNumOperands() > AddOpsInlineThreshold
)
2547 // If we have an add, expand the add operands onto the end of the operands
2549 Ops
.erase(Ops
.begin()+Idx
);
2550 Ops
.append(Add
->op_begin(), Add
->op_end());
2554 // If we deleted at least one add, we added operands to the end of the list,
2555 // and they are not necessarily sorted. Recurse to resort and resimplify
2556 // any operands we just acquired.
2558 return getAddExpr(Ops
, SCEV::FlagAnyWrap
, Depth
+ 1);
2561 // Skip over the add expression until we get to a multiply.
2562 while (Idx
< Ops
.size() && Ops
[Idx
]->getSCEVType() < scMulExpr
)
2565 // Check to see if there are any folding opportunities present with
2566 // operands multiplied by constant values.
2567 if (Idx
< Ops
.size() && isa
<SCEVMulExpr
>(Ops
[Idx
])) {
2568 uint64_t BitWidth
= getTypeSizeInBits(Ty
);
2569 DenseMap
<const SCEV
*, APInt
> M
;
2570 SmallVector
<const SCEV
*, 8> NewOps
;
2571 APInt
AccumulatedConstant(BitWidth
, 0);
2572 if (CollectAddOperandsWithScales(M
, NewOps
, AccumulatedConstant
,
2573 Ops
.data(), Ops
.size(),
2574 APInt(BitWidth
, 1), *this)) {
2575 struct APIntCompare
{
2576 bool operator()(const APInt
&LHS
, const APInt
&RHS
) const {
2577 return LHS
.ult(RHS
);
2581 // Some interesting folding opportunity is present, so its worthwhile to
2582 // re-generate the operands list. Group the operands by constant scale,
2583 // to avoid multiplying by the same constant scale multiple times.
2584 std::map
<APInt
, SmallVector
<const SCEV
*, 4>, APIntCompare
> MulOpLists
;
2585 for (const SCEV
*NewOp
: NewOps
)
2586 MulOpLists
[M
.find(NewOp
)->second
].push_back(NewOp
);
2587 // Re-generate the operands list.
2589 if (AccumulatedConstant
!= 0)
2590 Ops
.push_back(getConstant(AccumulatedConstant
));
2591 for (auto &MulOp
: MulOpLists
)
2592 if (MulOp
.first
!= 0)
2593 Ops
.push_back(getMulExpr(
2594 getConstant(MulOp
.first
),
2595 getAddExpr(MulOp
.second
, SCEV::FlagAnyWrap
, Depth
+ 1),
2596 SCEV::FlagAnyWrap
, Depth
+ 1));
2599 if (Ops
.size() == 1)
2601 return getAddExpr(Ops
, SCEV::FlagAnyWrap
, Depth
+ 1);
2605 // If we are adding something to a multiply expression, make sure the
2606 // something is not already an operand of the multiply. If so, merge it into
2608 for (; Idx
< Ops
.size() && isa
<SCEVMulExpr
>(Ops
[Idx
]); ++Idx
) {
2609 const SCEVMulExpr
*Mul
= cast
<SCEVMulExpr
>(Ops
[Idx
]);
2610 for (unsigned MulOp
= 0, e
= Mul
->getNumOperands(); MulOp
!= e
; ++MulOp
) {
2611 const SCEV
*MulOpSCEV
= Mul
->getOperand(MulOp
);
2612 if (isa
<SCEVConstant
>(MulOpSCEV
))
2614 for (unsigned AddOp
= 0, e
= Ops
.size(); AddOp
!= e
; ++AddOp
)
2615 if (MulOpSCEV
== Ops
[AddOp
]) {
2616 // Fold W + X + (X * Y * Z) --> W + (X * ((Y*Z)+1))
2617 const SCEV
*InnerMul
= Mul
->getOperand(MulOp
== 0);
2618 if (Mul
->getNumOperands() != 2) {
2619 // If the multiply has more than two operands, we must get the
2621 SmallVector
<const SCEV
*, 4> MulOps(Mul
->op_begin(),
2622 Mul
->op_begin()+MulOp
);
2623 MulOps
.append(Mul
->op_begin()+MulOp
+1, Mul
->op_end());
2624 InnerMul
= getMulExpr(MulOps
, SCEV::FlagAnyWrap
, Depth
+ 1);
2626 SmallVector
<const SCEV
*, 2> TwoOps
= {getOne(Ty
), InnerMul
};
2627 const SCEV
*AddOne
= getAddExpr(TwoOps
, SCEV::FlagAnyWrap
, Depth
+ 1);
2628 const SCEV
*OuterMul
= getMulExpr(AddOne
, MulOpSCEV
,
2629 SCEV::FlagAnyWrap
, Depth
+ 1);
2630 if (Ops
.size() == 2) return OuterMul
;
2632 Ops
.erase(Ops
.begin()+AddOp
);
2633 Ops
.erase(Ops
.begin()+Idx
-1);
2635 Ops
.erase(Ops
.begin()+Idx
);
2636 Ops
.erase(Ops
.begin()+AddOp
-1);
2638 Ops
.push_back(OuterMul
);
2639 return getAddExpr(Ops
, SCEV::FlagAnyWrap
, Depth
+ 1);
2642 // Check this multiply against other multiplies being added together.
2643 for (unsigned OtherMulIdx
= Idx
+1;
2644 OtherMulIdx
< Ops
.size() && isa
<SCEVMulExpr
>(Ops
[OtherMulIdx
]);
2646 const SCEVMulExpr
*OtherMul
= cast
<SCEVMulExpr
>(Ops
[OtherMulIdx
]);
2647 // If MulOp occurs in OtherMul, we can fold the two multiplies
2649 for (unsigned OMulOp
= 0, e
= OtherMul
->getNumOperands();
2650 OMulOp
!= e
; ++OMulOp
)
2651 if (OtherMul
->getOperand(OMulOp
) == MulOpSCEV
) {
2652 // Fold X + (A*B*C) + (A*D*E) --> X + (A*(B*C+D*E))
2653 const SCEV
*InnerMul1
= Mul
->getOperand(MulOp
== 0);
2654 if (Mul
->getNumOperands() != 2) {
2655 SmallVector
<const SCEV
*, 4> MulOps(Mul
->op_begin(),
2656 Mul
->op_begin()+MulOp
);
2657 MulOps
.append(Mul
->op_begin()+MulOp
+1, Mul
->op_end());
2658 InnerMul1
= getMulExpr(MulOps
, SCEV::FlagAnyWrap
, Depth
+ 1);
2660 const SCEV
*InnerMul2
= OtherMul
->getOperand(OMulOp
== 0);
2661 if (OtherMul
->getNumOperands() != 2) {
2662 SmallVector
<const SCEV
*, 4> MulOps(OtherMul
->op_begin(),
2663 OtherMul
->op_begin()+OMulOp
);
2664 MulOps
.append(OtherMul
->op_begin()+OMulOp
+1, OtherMul
->op_end());
2665 InnerMul2
= getMulExpr(MulOps
, SCEV::FlagAnyWrap
, Depth
+ 1);
2667 SmallVector
<const SCEV
*, 2> TwoOps
= {InnerMul1
, InnerMul2
};
2668 const SCEV
*InnerMulSum
=
2669 getAddExpr(TwoOps
, SCEV::FlagAnyWrap
, Depth
+ 1);
2670 const SCEV
*OuterMul
= getMulExpr(MulOpSCEV
, InnerMulSum
,
2671 SCEV::FlagAnyWrap
, Depth
+ 1);
2672 if (Ops
.size() == 2) return OuterMul
;
2673 Ops
.erase(Ops
.begin()+Idx
);
2674 Ops
.erase(Ops
.begin()+OtherMulIdx
-1);
2675 Ops
.push_back(OuterMul
);
2676 return getAddExpr(Ops
, SCEV::FlagAnyWrap
, Depth
+ 1);
2682 // If there are any add recurrences in the operands list, see if any other
2683 // added values are loop invariant. If so, we can fold them into the
2685 while (Idx
< Ops
.size() && Ops
[Idx
]->getSCEVType() < scAddRecExpr
)
2688 // Scan over all recurrences, trying to fold loop invariants into them.
2689 for (; Idx
< Ops
.size() && isa
<SCEVAddRecExpr
>(Ops
[Idx
]); ++Idx
) {
2690 // Scan all of the other operands to this add and add them to the vector if
2691 // they are loop invariant w.r.t. the recurrence.
2692 SmallVector
<const SCEV
*, 8> LIOps
;
2693 const SCEVAddRecExpr
*AddRec
= cast
<SCEVAddRecExpr
>(Ops
[Idx
]);
2694 const Loop
*AddRecLoop
= AddRec
->getLoop();
2695 for (unsigned i
= 0, e
= Ops
.size(); i
!= e
; ++i
)
2696 if (isAvailableAtLoopEntry(Ops
[i
], AddRecLoop
)) {
2697 LIOps
.push_back(Ops
[i
]);
2698 Ops
.erase(Ops
.begin()+i
);
2702 // If we found some loop invariants, fold them into the recurrence.
2703 if (!LIOps
.empty()) {
2704 // NLI + LI + {Start,+,Step} --> NLI + {LI+Start,+,Step}
2705 LIOps
.push_back(AddRec
->getStart());
2707 SmallVector
<const SCEV
*, 4> AddRecOps(AddRec
->op_begin(),
2709 // This follows from the fact that the no-wrap flags on the outer add
2710 // expression are applicable on the 0th iteration, when the add recurrence
2711 // will be equal to its start value.
2712 AddRecOps
[0] = getAddExpr(LIOps
, Flags
, Depth
+ 1);
2714 // Build the new addrec. Propagate the NUW and NSW flags if both the
2715 // outer add and the inner addrec are guaranteed to have no overflow.
2716 // Always propagate NW.
2717 Flags
= AddRec
->getNoWrapFlags(setFlags(Flags
, SCEV::FlagNW
));
2718 const SCEV
*NewRec
= getAddRecExpr(AddRecOps
, AddRecLoop
, Flags
);
2720 // If all of the other operands were loop invariant, we are done.
2721 if (Ops
.size() == 1) return NewRec
;
2723 // Otherwise, add the folded AddRec by the non-invariant parts.
2724 for (unsigned i
= 0;; ++i
)
2725 if (Ops
[i
] == AddRec
) {
2729 return getAddExpr(Ops
, SCEV::FlagAnyWrap
, Depth
+ 1);
2732 // Okay, if there weren't any loop invariants to be folded, check to see if
2733 // there are multiple AddRec's with the same loop induction variable being
2734 // added together. If so, we can fold them.
2735 for (unsigned OtherIdx
= Idx
+1;
2736 OtherIdx
< Ops
.size() && isa
<SCEVAddRecExpr
>(Ops
[OtherIdx
]);
2738 // We expect the AddRecExpr's to be sorted in reverse dominance order,
2739 // so that the 1st found AddRecExpr is dominated by all others.
2740 assert(DT
.dominates(
2741 cast
<SCEVAddRecExpr
>(Ops
[OtherIdx
])->getLoop()->getHeader(),
2742 AddRec
->getLoop()->getHeader()) &&
2743 "AddRecExprs are not sorted in reverse dominance order?");
2744 if (AddRecLoop
== cast
<SCEVAddRecExpr
>(Ops
[OtherIdx
])->getLoop()) {
2745 // Other + {A,+,B}<L> + {C,+,D}<L> --> Other + {A+C,+,B+D}<L>
2746 SmallVector
<const SCEV
*, 4> AddRecOps(AddRec
->op_begin(),
2748 for (; OtherIdx
!= Ops
.size() && isa
<SCEVAddRecExpr
>(Ops
[OtherIdx
]);
2750 const auto *OtherAddRec
= cast
<SCEVAddRecExpr
>(Ops
[OtherIdx
]);
2751 if (OtherAddRec
->getLoop() == AddRecLoop
) {
2752 for (unsigned i
= 0, e
= OtherAddRec
->getNumOperands();
2754 if (i
>= AddRecOps
.size()) {
2755 AddRecOps
.append(OtherAddRec
->op_begin()+i
,
2756 OtherAddRec
->op_end());
2759 SmallVector
<const SCEV
*, 2> TwoOps
= {
2760 AddRecOps
[i
], OtherAddRec
->getOperand(i
)};
2761 AddRecOps
[i
] = getAddExpr(TwoOps
, SCEV::FlagAnyWrap
, Depth
+ 1);
2763 Ops
.erase(Ops
.begin() + OtherIdx
); --OtherIdx
;
2766 // Step size has changed, so we cannot guarantee no self-wraparound.
2767 Ops
[Idx
] = getAddRecExpr(AddRecOps
, AddRecLoop
, SCEV::FlagAnyWrap
);
2768 return getAddExpr(Ops
, SCEV::FlagAnyWrap
, Depth
+ 1);
2772 // Otherwise couldn't fold anything into this recurrence. Move onto the
2776 // Okay, it looks like we really DO need an add expr. Check to see if we
2777 // already have one, otherwise create a new one.
2778 return getOrCreateAddExpr(Ops
, Flags
);
2782 ScalarEvolution::getOrCreateAddExpr(ArrayRef
<const SCEV
*> Ops
,
2783 SCEV::NoWrapFlags Flags
) {
2784 FoldingSetNodeID ID
;
2785 ID
.AddInteger(scAddExpr
);
2786 for (const SCEV
*Op
: Ops
)
2790 static_cast<SCEVAddExpr
*>(UniqueSCEVs
.FindNodeOrInsertPos(ID
, IP
));
2792 const SCEV
**O
= SCEVAllocator
.Allocate
<const SCEV
*>(Ops
.size());
2793 std::uninitialized_copy(Ops
.begin(), Ops
.end(), O
);
2794 S
= new (SCEVAllocator
)
2795 SCEVAddExpr(ID
.Intern(SCEVAllocator
), O
, Ops
.size());
2796 UniqueSCEVs
.InsertNode(S
, IP
);
2797 addToLoopUseLists(S
);
2799 S
->setNoWrapFlags(Flags
);
2804 ScalarEvolution::getOrCreateAddRecExpr(ArrayRef
<const SCEV
*> Ops
,
2805 const Loop
*L
, SCEV::NoWrapFlags Flags
) {
2806 FoldingSetNodeID ID
;
2807 ID
.AddInteger(scAddRecExpr
);
2808 for (unsigned i
= 0, e
= Ops
.size(); i
!= e
; ++i
)
2809 ID
.AddPointer(Ops
[i
]);
2813 static_cast<SCEVAddRecExpr
*>(UniqueSCEVs
.FindNodeOrInsertPos(ID
, IP
));
2815 const SCEV
**O
= SCEVAllocator
.Allocate
<const SCEV
*>(Ops
.size());
2816 std::uninitialized_copy(Ops
.begin(), Ops
.end(), O
);
2817 S
= new (SCEVAllocator
)
2818 SCEVAddRecExpr(ID
.Intern(SCEVAllocator
), O
, Ops
.size(), L
);
2819 UniqueSCEVs
.InsertNode(S
, IP
);
2820 addToLoopUseLists(S
);
2822 S
->setNoWrapFlags(Flags
);
2827 ScalarEvolution::getOrCreateMulExpr(ArrayRef
<const SCEV
*> Ops
,
2828 SCEV::NoWrapFlags Flags
) {
2829 FoldingSetNodeID ID
;
2830 ID
.AddInteger(scMulExpr
);
2831 for (unsigned i
= 0, e
= Ops
.size(); i
!= e
; ++i
)
2832 ID
.AddPointer(Ops
[i
]);
2835 static_cast<SCEVMulExpr
*>(UniqueSCEVs
.FindNodeOrInsertPos(ID
, IP
));
2837 const SCEV
**O
= SCEVAllocator
.Allocate
<const SCEV
*>(Ops
.size());
2838 std::uninitialized_copy(Ops
.begin(), Ops
.end(), O
);
2839 S
= new (SCEVAllocator
) SCEVMulExpr(ID
.Intern(SCEVAllocator
),
2841 UniqueSCEVs
.InsertNode(S
, IP
);
2842 addToLoopUseLists(S
);
2844 S
->setNoWrapFlags(Flags
);
2848 static uint64_t umul_ov(uint64_t i
, uint64_t j
, bool &Overflow
) {
2850 if (j
> 1 && k
/ j
!= i
) Overflow
= true;
2854 /// Compute the result of "n choose k", the binomial coefficient. If an
2855 /// intermediate computation overflows, Overflow will be set and the return will
2856 /// be garbage. Overflow is not cleared on absence of overflow.
2857 static uint64_t Choose(uint64_t n
, uint64_t k
, bool &Overflow
) {
2858 // We use the multiplicative formula:
2859 // n(n-1)(n-2)...(n-(k-1)) / k(k-1)(k-2)...1 .
2860 // At each iteration, we take the n-th term of the numeral and divide by the
2861 // (k-n)th term of the denominator. This division will always produce an
2862 // integral result, and helps reduce the chance of overflow in the
2863 // intermediate computations. However, we can still overflow even when the
2864 // final result would fit.
2866 if (n
== 0 || n
== k
) return 1;
2867 if (k
> n
) return 0;
2873 for (uint64_t i
= 1; i
<= k
; ++i
) {
2874 r
= umul_ov(r
, n
-(i
-1), Overflow
);
2880 /// Determine if any of the operands in this SCEV are a constant or if
2881 /// any of the add or multiply expressions in this SCEV contain a constant.
2882 static bool containsConstantInAddMulChain(const SCEV
*StartExpr
) {
2883 struct FindConstantInAddMulChain
{
2884 bool FoundConstant
= false;
2886 bool follow(const SCEV
*S
) {
2887 FoundConstant
|= isa
<SCEVConstant
>(S
);
2888 return isa
<SCEVAddExpr
>(S
) || isa
<SCEVMulExpr
>(S
);
2891 bool isDone() const {
2892 return FoundConstant
;
2896 FindConstantInAddMulChain F
;
2897 SCEVTraversal
<FindConstantInAddMulChain
> ST(F
);
2898 ST
.visitAll(StartExpr
);
2899 return F
.FoundConstant
;
2902 /// Get a canonical multiply expression, or something simpler if possible.
2903 const SCEV
*ScalarEvolution::getMulExpr(SmallVectorImpl
<const SCEV
*> &Ops
,
2904 SCEV::NoWrapFlags Flags
,
2906 assert(Flags
== maskFlags(Flags
, SCEV::FlagNUW
| SCEV::FlagNSW
) &&
2907 "only nuw or nsw allowed");
2908 assert(!Ops
.empty() && "Cannot get empty mul!");
2909 if (Ops
.size() == 1) return Ops
[0];
2911 Type
*ETy
= getEffectiveSCEVType(Ops
[0]->getType());
2912 for (unsigned i
= 1, e
= Ops
.size(); i
!= e
; ++i
)
2913 assert(getEffectiveSCEVType(Ops
[i
]->getType()) == ETy
&&
2914 "SCEVMulExpr operand types don't match!");
2917 // Sort by complexity, this groups all similar expression types together.
2918 GroupByComplexity(Ops
, &LI
, DT
);
2920 Flags
= StrengthenNoWrapFlags(this, scMulExpr
, Ops
, Flags
);
2922 // Limit recursion calls depth.
2923 if (Depth
> MaxArithDepth
|| hasHugeExpression(Ops
))
2924 return getOrCreateMulExpr(Ops
, Flags
);
2926 // If there are any constants, fold them together.
2928 if (const SCEVConstant
*LHSC
= dyn_cast
<SCEVConstant
>(Ops
[0])) {
2930 if (Ops
.size() == 2)
2931 // C1*(C2+V) -> C1*C2 + C1*V
2932 if (const SCEVAddExpr
*Add
= dyn_cast
<SCEVAddExpr
>(Ops
[1]))
2933 // If any of Add's ops are Adds or Muls with a constant, apply this
2934 // transformation as well.
2936 // TODO: There are some cases where this transformation is not
2937 // profitable; for example, Add = (C0 + X) * Y + Z. Maybe the scope of
2938 // this transformation should be narrowed down.
2939 if (Add
->getNumOperands() == 2 && containsConstantInAddMulChain(Add
))
2940 return getAddExpr(getMulExpr(LHSC
, Add
->getOperand(0),
2941 SCEV::FlagAnyWrap
, Depth
+ 1),
2942 getMulExpr(LHSC
, Add
->getOperand(1),
2943 SCEV::FlagAnyWrap
, Depth
+ 1),
2944 SCEV::FlagAnyWrap
, Depth
+ 1);
2947 while (const SCEVConstant
*RHSC
= dyn_cast
<SCEVConstant
>(Ops
[Idx
])) {
2948 // We found two constants, fold them together!
2950 ConstantInt::get(getContext(), LHSC
->getAPInt() * RHSC
->getAPInt());
2951 Ops
[0] = getConstant(Fold
);
2952 Ops
.erase(Ops
.begin()+1); // Erase the folded element
2953 if (Ops
.size() == 1) return Ops
[0];
2954 LHSC
= cast
<SCEVConstant
>(Ops
[0]);
2957 // If we are left with a constant one being multiplied, strip it off.
2958 if (cast
<SCEVConstant
>(Ops
[0])->getValue()->isOne()) {
2959 Ops
.erase(Ops
.begin());
2961 } else if (cast
<SCEVConstant
>(Ops
[0])->getValue()->isZero()) {
2962 // If we have a multiply of zero, it will always be zero.
2964 } else if (Ops
[0]->isAllOnesValue()) {
2965 // If we have a mul by -1 of an add, try distributing the -1 among the
2967 if (Ops
.size() == 2) {
2968 if (const SCEVAddExpr
*Add
= dyn_cast
<SCEVAddExpr
>(Ops
[1])) {
2969 SmallVector
<const SCEV
*, 4> NewOps
;
2970 bool AnyFolded
= false;
2971 for (const SCEV
*AddOp
: Add
->operands()) {
2972 const SCEV
*Mul
= getMulExpr(Ops
[0], AddOp
, SCEV::FlagAnyWrap
,
2974 if (!isa
<SCEVMulExpr
>(Mul
)) AnyFolded
= true;
2975 NewOps
.push_back(Mul
);
2978 return getAddExpr(NewOps
, SCEV::FlagAnyWrap
, Depth
+ 1);
2979 } else if (const auto *AddRec
= dyn_cast
<SCEVAddRecExpr
>(Ops
[1])) {
2980 // Negation preserves a recurrence's no self-wrap property.
2981 SmallVector
<const SCEV
*, 4> Operands
;
2982 for (const SCEV
*AddRecOp
: AddRec
->operands())
2983 Operands
.push_back(getMulExpr(Ops
[0], AddRecOp
, SCEV::FlagAnyWrap
,
2986 return getAddRecExpr(Operands
, AddRec
->getLoop(),
2987 AddRec
->getNoWrapFlags(SCEV::FlagNW
));
2992 if (Ops
.size() == 1)
2996 // Skip over the add expression until we get to a multiply.
2997 while (Idx
< Ops
.size() && Ops
[Idx
]->getSCEVType() < scMulExpr
)
3000 // If there are mul operands inline them all into this expression.
3001 if (Idx
< Ops
.size()) {
3002 bool DeletedMul
= false;
3003 while (const SCEVMulExpr
*Mul
= dyn_cast
<SCEVMulExpr
>(Ops
[Idx
])) {
3004 if (Ops
.size() > MulOpsInlineThreshold
)
3006 // If we have an mul, expand the mul operands onto the end of the
3008 Ops
.erase(Ops
.begin()+Idx
);
3009 Ops
.append(Mul
->op_begin(), Mul
->op_end());
3013 // If we deleted at least one mul, we added operands to the end of the
3014 // list, and they are not necessarily sorted. Recurse to resort and
3015 // resimplify any operands we just acquired.
3017 return getMulExpr(Ops
, SCEV::FlagAnyWrap
, Depth
+ 1);
3020 // If there are any add recurrences in the operands list, see if any other
3021 // added values are loop invariant. If so, we can fold them into the
3023 while (Idx
< Ops
.size() && Ops
[Idx
]->getSCEVType() < scAddRecExpr
)
3026 // Scan over all recurrences, trying to fold loop invariants into them.
3027 for (; Idx
< Ops
.size() && isa
<SCEVAddRecExpr
>(Ops
[Idx
]); ++Idx
) {
3028 // Scan all of the other operands to this mul and add them to the vector
3029 // if they are loop invariant w.r.t. the recurrence.
3030 SmallVector
<const SCEV
*, 8> LIOps
;
3031 const SCEVAddRecExpr
*AddRec
= cast
<SCEVAddRecExpr
>(Ops
[Idx
]);
3032 const Loop
*AddRecLoop
= AddRec
->getLoop();
3033 for (unsigned i
= 0, e
= Ops
.size(); i
!= e
; ++i
)
3034 if (isAvailableAtLoopEntry(Ops
[i
], AddRecLoop
)) {
3035 LIOps
.push_back(Ops
[i
]);
3036 Ops
.erase(Ops
.begin()+i
);
3040 // If we found some loop invariants, fold them into the recurrence.
3041 if (!LIOps
.empty()) {
3042 // NLI * LI * {Start,+,Step} --> NLI * {LI*Start,+,LI*Step}
3043 SmallVector
<const SCEV
*, 4> NewOps
;
3044 NewOps
.reserve(AddRec
->getNumOperands());
3045 const SCEV
*Scale
= getMulExpr(LIOps
, SCEV::FlagAnyWrap
, Depth
+ 1);
3046 for (unsigned i
= 0, e
= AddRec
->getNumOperands(); i
!= e
; ++i
)
3047 NewOps
.push_back(getMulExpr(Scale
, AddRec
->getOperand(i
),
3048 SCEV::FlagAnyWrap
, Depth
+ 1));
3050 // Build the new addrec. Propagate the NUW and NSW flags if both the
3051 // outer mul and the inner addrec are guaranteed to have no overflow.
3053 // No self-wrap cannot be guaranteed after changing the step size, but
3054 // will be inferred if either NUW or NSW is true.
3055 Flags
= AddRec
->getNoWrapFlags(clearFlags(Flags
, SCEV::FlagNW
));
3056 const SCEV
*NewRec
= getAddRecExpr(NewOps
, AddRecLoop
, Flags
);
3058 // If all of the other operands were loop invariant, we are done.
3059 if (Ops
.size() == 1) return NewRec
;
3061 // Otherwise, multiply the folded AddRec by the non-invariant parts.
3062 for (unsigned i
= 0;; ++i
)
3063 if (Ops
[i
] == AddRec
) {
3067 return getMulExpr(Ops
, SCEV::FlagAnyWrap
, Depth
+ 1);
3070 // Okay, if there weren't any loop invariants to be folded, check to see
3071 // if there are multiple AddRec's with the same loop induction variable
3072 // being multiplied together. If so, we can fold them.
3074 // {A1,+,A2,+,...,+,An}<L> * {B1,+,B2,+,...,+,Bn}<L>
3075 // = {x=1 in [ sum y=x..2x [ sum z=max(y-x, y-n)..min(x,n) [
3076 // choose(x, 2x)*choose(2x-y, x-z)*A_{y-z}*B_z
3077 // ]]],+,...up to x=2n}.
3078 // Note that the arguments to choose() are always integers with values
3079 // known at compile time, never SCEV objects.
3081 // The implementation avoids pointless extra computations when the two
3082 // addrec's are of different length (mathematically, it's equivalent to
3083 // an infinite stream of zeros on the right).
3084 bool OpsModified
= false;
3085 for (unsigned OtherIdx
= Idx
+1;
3086 OtherIdx
!= Ops
.size() && isa
<SCEVAddRecExpr
>(Ops
[OtherIdx
]);
3088 const SCEVAddRecExpr
*OtherAddRec
=
3089 dyn_cast
<SCEVAddRecExpr
>(Ops
[OtherIdx
]);
3090 if (!OtherAddRec
|| OtherAddRec
->getLoop() != AddRecLoop
)
3093 // Limit max number of arguments to avoid creation of unreasonably big
3094 // SCEVAddRecs with very complex operands.
3095 if (AddRec
->getNumOperands() + OtherAddRec
->getNumOperands() - 1 >
3096 MaxAddRecSize
|| isHugeExpression(AddRec
) ||
3097 isHugeExpression(OtherAddRec
))
3100 bool Overflow
= false;
3101 Type
*Ty
= AddRec
->getType();
3102 bool LargerThan64Bits
= getTypeSizeInBits(Ty
) > 64;
3103 SmallVector
<const SCEV
*, 7> AddRecOps
;
3104 for (int x
= 0, xe
= AddRec
->getNumOperands() +
3105 OtherAddRec
->getNumOperands() - 1; x
!= xe
&& !Overflow
; ++x
) {
3106 SmallVector
<const SCEV
*, 7> SumOps
;
3107 for (int y
= x
, ye
= 2*x
+1; y
!= ye
&& !Overflow
; ++y
) {
3108 uint64_t Coeff1
= Choose(x
, 2*x
- y
, Overflow
);
3109 for (int z
= std::max(y
-x
, y
-(int)AddRec
->getNumOperands()+1),
3110 ze
= std::min(x
+1, (int)OtherAddRec
->getNumOperands());
3111 z
< ze
&& !Overflow
; ++z
) {
3112 uint64_t Coeff2
= Choose(2*x
- y
, x
-z
, Overflow
);
3114 if (LargerThan64Bits
)
3115 Coeff
= umul_ov(Coeff1
, Coeff2
, Overflow
);
3117 Coeff
= Coeff1
*Coeff2
;
3118 const SCEV
*CoeffTerm
= getConstant(Ty
, Coeff
);
3119 const SCEV
*Term1
= AddRec
->getOperand(y
-z
);
3120 const SCEV
*Term2
= OtherAddRec
->getOperand(z
);
3121 SumOps
.push_back(getMulExpr(CoeffTerm
, Term1
, Term2
,
3122 SCEV::FlagAnyWrap
, Depth
+ 1));
3126 SumOps
.push_back(getZero(Ty
));
3127 AddRecOps
.push_back(getAddExpr(SumOps
, SCEV::FlagAnyWrap
, Depth
+ 1));
3130 const SCEV
*NewAddRec
= getAddRecExpr(AddRecOps
, AddRecLoop
,
3132 if (Ops
.size() == 2) return NewAddRec
;
3133 Ops
[Idx
] = NewAddRec
;
3134 Ops
.erase(Ops
.begin() + OtherIdx
); --OtherIdx
;
3136 AddRec
= dyn_cast
<SCEVAddRecExpr
>(NewAddRec
);
3142 return getMulExpr(Ops
, SCEV::FlagAnyWrap
, Depth
+ 1);
3144 // Otherwise couldn't fold anything into this recurrence. Move onto the
3148 // Okay, it looks like we really DO need an mul expr. Check to see if we
3149 // already have one, otherwise create a new one.
3150 return getOrCreateMulExpr(Ops
, Flags
);
3153 /// Represents an unsigned remainder expression based on unsigned division.
3154 const SCEV
*ScalarEvolution::getURemExpr(const SCEV
*LHS
,
3156 assert(getEffectiveSCEVType(LHS
->getType()) ==
3157 getEffectiveSCEVType(RHS
->getType()) &&
3158 "SCEVURemExpr operand types don't match!");
3160 // Short-circuit easy cases
3161 if (const SCEVConstant
*RHSC
= dyn_cast
<SCEVConstant
>(RHS
)) {
3162 // If constant is one, the result is trivial
3163 if (RHSC
->getValue()->isOne())
3164 return getZero(LHS
->getType()); // X urem 1 --> 0
3166 // If constant is a power of two, fold into a zext(trunc(LHS)).
3167 if (RHSC
->getAPInt().isPowerOf2()) {
3168 Type
*FullTy
= LHS
->getType();
3170 IntegerType::get(getContext(), RHSC
->getAPInt().logBase2());
3171 return getZeroExtendExpr(getTruncateExpr(LHS
, TruncTy
), FullTy
);
3175 // Fallback to %a == %x urem %y == %x -<nuw> ((%x udiv %y) *<nuw> %y)
3176 const SCEV
*UDiv
= getUDivExpr(LHS
, RHS
);
3177 const SCEV
*Mult
= getMulExpr(UDiv
, RHS
, SCEV::FlagNUW
);
3178 return getMinusSCEV(LHS
, Mult
, SCEV::FlagNUW
);
3181 /// Get a canonical unsigned division expression, or something simpler if
3183 const SCEV
*ScalarEvolution::getUDivExpr(const SCEV
*LHS
,
3185 assert(getEffectiveSCEVType(LHS
->getType()) ==
3186 getEffectiveSCEVType(RHS
->getType()) &&
3187 "SCEVUDivExpr operand types don't match!");
3189 if (const SCEVConstant
*RHSC
= dyn_cast
<SCEVConstant
>(RHS
)) {
3190 if (RHSC
->getValue()->isOne())
3191 return LHS
; // X udiv 1 --> x
3192 // If the denominator is zero, the result of the udiv is undefined. Don't
3193 // try to analyze it, because the resolution chosen here may differ from
3194 // the resolution chosen in other parts of the compiler.
3195 if (!RHSC
->getValue()->isZero()) {
3196 // Determine if the division can be folded into the operands of
3198 // TODO: Generalize this to non-constants by using known-bits information.
3199 Type
*Ty
= LHS
->getType();
3200 unsigned LZ
= RHSC
->getAPInt().countLeadingZeros();
3201 unsigned MaxShiftAmt
= getTypeSizeInBits(Ty
) - LZ
- 1;
3202 // For non-power-of-two values, effectively round the value up to the
3203 // nearest power of two.
3204 if (!RHSC
->getAPInt().isPowerOf2())
3206 IntegerType
*ExtTy
=
3207 IntegerType::get(getContext(), getTypeSizeInBits(Ty
) + MaxShiftAmt
);
3208 if (const SCEVAddRecExpr
*AR
= dyn_cast
<SCEVAddRecExpr
>(LHS
))
3209 if (const SCEVConstant
*Step
=
3210 dyn_cast
<SCEVConstant
>(AR
->getStepRecurrence(*this))) {
3211 // {X,+,N}/C --> {X/C,+,N/C} if safe and N/C can be folded.
3212 const APInt
&StepInt
= Step
->getAPInt();
3213 const APInt
&DivInt
= RHSC
->getAPInt();
3214 if (!StepInt
.urem(DivInt
) &&
3215 getZeroExtendExpr(AR
, ExtTy
) ==
3216 getAddRecExpr(getZeroExtendExpr(AR
->getStart(), ExtTy
),
3217 getZeroExtendExpr(Step
, ExtTy
),
3218 AR
->getLoop(), SCEV::FlagAnyWrap
)) {
3219 SmallVector
<const SCEV
*, 4> Operands
;
3220 for (const SCEV
*Op
: AR
->operands())
3221 Operands
.push_back(getUDivExpr(Op
, RHS
));
3222 return getAddRecExpr(Operands
, AR
->getLoop(), SCEV::FlagNW
);
3224 /// Get a canonical UDivExpr for a recurrence.
3225 /// {X,+,N}/C => {Y,+,N}/C where Y=X-(X%N). Safe when C%N=0.
3226 // We can currently only fold X%N if X is constant.
3227 const SCEVConstant
*StartC
= dyn_cast
<SCEVConstant
>(AR
->getStart());
3228 if (StartC
&& !DivInt
.urem(StepInt
) &&
3229 getZeroExtendExpr(AR
, ExtTy
) ==
3230 getAddRecExpr(getZeroExtendExpr(AR
->getStart(), ExtTy
),
3231 getZeroExtendExpr(Step
, ExtTy
),
3232 AR
->getLoop(), SCEV::FlagAnyWrap
)) {
3233 const APInt
&StartInt
= StartC
->getAPInt();
3234 const APInt
&StartRem
= StartInt
.urem(StepInt
);
3236 LHS
= getAddRecExpr(getConstant(StartInt
- StartRem
), Step
,
3237 AR
->getLoop(), SCEV::FlagNW
);
3240 // (A*B)/C --> A*(B/C) if safe and B/C can be folded.
3241 if (const SCEVMulExpr
*M
= dyn_cast
<SCEVMulExpr
>(LHS
)) {
3242 SmallVector
<const SCEV
*, 4> Operands
;
3243 for (const SCEV
*Op
: M
->operands())
3244 Operands
.push_back(getZeroExtendExpr(Op
, ExtTy
));
3245 if (getZeroExtendExpr(M
, ExtTy
) == getMulExpr(Operands
))
3246 // Find an operand that's safely divisible.
3247 for (unsigned i
= 0, e
= M
->getNumOperands(); i
!= e
; ++i
) {
3248 const SCEV
*Op
= M
->getOperand(i
);
3249 const SCEV
*Div
= getUDivExpr(Op
, RHSC
);
3250 if (!isa
<SCEVUDivExpr
>(Div
) && getMulExpr(Div
, RHSC
) == Op
) {
3251 Operands
= SmallVector
<const SCEV
*, 4>(M
->op_begin(),
3254 return getMulExpr(Operands
);
3259 // (A/B)/C --> A/(B*C) if safe and B*C can be folded.
3260 if (const SCEVUDivExpr
*OtherDiv
= dyn_cast
<SCEVUDivExpr
>(LHS
)) {
3261 if (auto *DivisorConstant
=
3262 dyn_cast
<SCEVConstant
>(OtherDiv
->getRHS())) {
3263 bool Overflow
= false;
3265 DivisorConstant
->getAPInt().umul_ov(RHSC
->getAPInt(), Overflow
);
3267 return getConstant(RHSC
->getType(), 0, false);
3269 return getUDivExpr(OtherDiv
->getLHS(), getConstant(NewRHS
));
3273 // (A+B)/C --> (A/C + B/C) if safe and A/C and B/C can be folded.
3274 if (const SCEVAddExpr
*A
= dyn_cast
<SCEVAddExpr
>(LHS
)) {
3275 SmallVector
<const SCEV
*, 4> Operands
;
3276 for (const SCEV
*Op
: A
->operands())
3277 Operands
.push_back(getZeroExtendExpr(Op
, ExtTy
));
3278 if (getZeroExtendExpr(A
, ExtTy
) == getAddExpr(Operands
)) {
3280 for (unsigned i
= 0, e
= A
->getNumOperands(); i
!= e
; ++i
) {
3281 const SCEV
*Op
= getUDivExpr(A
->getOperand(i
), RHS
);
3282 if (isa
<SCEVUDivExpr
>(Op
) ||
3283 getMulExpr(Op
, RHS
) != A
->getOperand(i
))
3285 Operands
.push_back(Op
);
3287 if (Operands
.size() == A
->getNumOperands())
3288 return getAddExpr(Operands
);
3292 // Fold if both operands are constant.
3293 if (const SCEVConstant
*LHSC
= dyn_cast
<SCEVConstant
>(LHS
)) {
3294 Constant
*LHSCV
= LHSC
->getValue();
3295 Constant
*RHSCV
= RHSC
->getValue();
3296 return getConstant(cast
<ConstantInt
>(ConstantExpr::getUDiv(LHSCV
,
3302 FoldingSetNodeID ID
;
3303 ID
.AddInteger(scUDivExpr
);
3307 if (const SCEV
*S
= UniqueSCEVs
.FindNodeOrInsertPos(ID
, IP
)) return S
;
3308 SCEV
*S
= new (SCEVAllocator
) SCEVUDivExpr(ID
.Intern(SCEVAllocator
),
3310 UniqueSCEVs
.InsertNode(S
, IP
);
3311 addToLoopUseLists(S
);
3315 static const APInt
gcd(const SCEVConstant
*C1
, const SCEVConstant
*C2
) {
3316 APInt A
= C1
->getAPInt().abs();
3317 APInt B
= C2
->getAPInt().abs();
3318 uint32_t ABW
= A
.getBitWidth();
3319 uint32_t BBW
= B
.getBitWidth();
3326 return APIntOps::GreatestCommonDivisor(std::move(A
), std::move(B
));
3329 /// Get a canonical unsigned division expression, or something simpler if
3330 /// possible. There is no representation for an exact udiv in SCEV IR, but we
3331 /// can attempt to remove factors from the LHS and RHS. We can't do this when
3332 /// it's not exact because the udiv may be clearing bits.
3333 const SCEV
*ScalarEvolution::getUDivExactExpr(const SCEV
*LHS
,
3335 // TODO: we could try to find factors in all sorts of things, but for now we
3336 // just deal with u/exact (multiply, constant). See SCEVDivision towards the
3337 // end of this file for inspiration.
3339 const SCEVMulExpr
*Mul
= dyn_cast
<SCEVMulExpr
>(LHS
);
3340 if (!Mul
|| !Mul
->hasNoUnsignedWrap())
3341 return getUDivExpr(LHS
, RHS
);
3343 if (const SCEVConstant
*RHSCst
= dyn_cast
<SCEVConstant
>(RHS
)) {
3344 // If the mulexpr multiplies by a constant, then that constant must be the
3345 // first element of the mulexpr.
3346 if (const auto *LHSCst
= dyn_cast
<SCEVConstant
>(Mul
->getOperand(0))) {
3347 if (LHSCst
== RHSCst
) {
3348 SmallVector
<const SCEV
*, 2> Operands
;
3349 Operands
.append(Mul
->op_begin() + 1, Mul
->op_end());
3350 return getMulExpr(Operands
);
3353 // We can't just assume that LHSCst divides RHSCst cleanly, it could be
3354 // that there's a factor provided by one of the other terms. We need to
3356 APInt Factor
= gcd(LHSCst
, RHSCst
);
3357 if (!Factor
.isIntN(1)) {
3359 cast
<SCEVConstant
>(getConstant(LHSCst
->getAPInt().udiv(Factor
)));
3361 cast
<SCEVConstant
>(getConstant(RHSCst
->getAPInt().udiv(Factor
)));
3362 SmallVector
<const SCEV
*, 2> Operands
;
3363 Operands
.push_back(LHSCst
);
3364 Operands
.append(Mul
->op_begin() + 1, Mul
->op_end());
3365 LHS
= getMulExpr(Operands
);
3367 Mul
= dyn_cast
<SCEVMulExpr
>(LHS
);
3369 return getUDivExactExpr(LHS
, RHS
);
3374 for (int i
= 0, e
= Mul
->getNumOperands(); i
!= e
; ++i
) {
3375 if (Mul
->getOperand(i
) == RHS
) {
3376 SmallVector
<const SCEV
*, 2> Operands
;
3377 Operands
.append(Mul
->op_begin(), Mul
->op_begin() + i
);
3378 Operands
.append(Mul
->op_begin() + i
+ 1, Mul
->op_end());
3379 return getMulExpr(Operands
);
3383 return getUDivExpr(LHS
, RHS
);
3386 /// Get an add recurrence expression for the specified loop. Simplify the
3387 /// expression as much as possible.
3388 const SCEV
*ScalarEvolution::getAddRecExpr(const SCEV
*Start
, const SCEV
*Step
,
3390 SCEV::NoWrapFlags Flags
) {
3391 SmallVector
<const SCEV
*, 4> Operands
;
3392 Operands
.push_back(Start
);
3393 if (const SCEVAddRecExpr
*StepChrec
= dyn_cast
<SCEVAddRecExpr
>(Step
))
3394 if (StepChrec
->getLoop() == L
) {
3395 Operands
.append(StepChrec
->op_begin(), StepChrec
->op_end());
3396 return getAddRecExpr(Operands
, L
, maskFlags(Flags
, SCEV::FlagNW
));
3399 Operands
.push_back(Step
);
3400 return getAddRecExpr(Operands
, L
, Flags
);
3403 /// Get an add recurrence expression for the specified loop. Simplify the
3404 /// expression as much as possible.
3406 ScalarEvolution::getAddRecExpr(SmallVectorImpl
<const SCEV
*> &Operands
,
3407 const Loop
*L
, SCEV::NoWrapFlags Flags
) {
3408 if (Operands
.size() == 1) return Operands
[0];
3410 Type
*ETy
= getEffectiveSCEVType(Operands
[0]->getType());
3411 for (unsigned i
= 1, e
= Operands
.size(); i
!= e
; ++i
)
3412 assert(getEffectiveSCEVType(Operands
[i
]->getType()) == ETy
&&
3413 "SCEVAddRecExpr operand types don't match!");
3414 for (unsigned i
= 0, e
= Operands
.size(); i
!= e
; ++i
)
3415 assert(isLoopInvariant(Operands
[i
], L
) &&
3416 "SCEVAddRecExpr operand is not loop-invariant!");
3419 if (Operands
.back()->isZero()) {
3420 Operands
.pop_back();
3421 return getAddRecExpr(Operands
, L
, SCEV::FlagAnyWrap
); // {X,+,0} --> X
3424 // It's tempting to want to call getMaxBackedgeTakenCount count here and
3425 // use that information to infer NUW and NSW flags. However, computing a
3426 // BE count requires calling getAddRecExpr, so we may not yet have a
3427 // meaningful BE count at this point (and if we don't, we'd be stuck
3428 // with a SCEVCouldNotCompute as the cached BE count).
3430 Flags
= StrengthenNoWrapFlags(this, scAddRecExpr
, Operands
, Flags
);
3432 // Canonicalize nested AddRecs in by nesting them in order of loop depth.
3433 if (const SCEVAddRecExpr
*NestedAR
= dyn_cast
<SCEVAddRecExpr
>(Operands
[0])) {
3434 const Loop
*NestedLoop
= NestedAR
->getLoop();
3435 if (L
->contains(NestedLoop
)
3436 ? (L
->getLoopDepth() < NestedLoop
->getLoopDepth())
3437 : (!NestedLoop
->contains(L
) &&
3438 DT
.dominates(L
->getHeader(), NestedLoop
->getHeader()))) {
3439 SmallVector
<const SCEV
*, 4> NestedOperands(NestedAR
->op_begin(),
3440 NestedAR
->op_end());
3441 Operands
[0] = NestedAR
->getStart();
3442 // AddRecs require their operands be loop-invariant with respect to their
3443 // loops. Don't perform this transformation if it would break this
3445 bool AllInvariant
= all_of(
3446 Operands
, [&](const SCEV
*Op
) { return isLoopInvariant(Op
, L
); });
3449 // Create a recurrence for the outer loop with the same step size.
3451 // The outer recurrence keeps its NW flag but only keeps NUW/NSW if the
3452 // inner recurrence has the same property.
3453 SCEV::NoWrapFlags OuterFlags
=
3454 maskFlags(Flags
, SCEV::FlagNW
| NestedAR
->getNoWrapFlags());
3456 NestedOperands
[0] = getAddRecExpr(Operands
, L
, OuterFlags
);
3457 AllInvariant
= all_of(NestedOperands
, [&](const SCEV
*Op
) {
3458 return isLoopInvariant(Op
, NestedLoop
);
3462 // Ok, both add recurrences are valid after the transformation.
3464 // The inner recurrence keeps its NW flag but only keeps NUW/NSW if
3465 // the outer recurrence has the same property.
3466 SCEV::NoWrapFlags InnerFlags
=
3467 maskFlags(NestedAR
->getNoWrapFlags(), SCEV::FlagNW
| Flags
);
3468 return getAddRecExpr(NestedOperands
, NestedLoop
, InnerFlags
);
3471 // Reset Operands to its original state.
3472 Operands
[0] = NestedAR
;
3476 // Okay, it looks like we really DO need an addrec expr. Check to see if we
3477 // already have one, otherwise create a new one.
3478 return getOrCreateAddRecExpr(Operands
, L
, Flags
);
3482 ScalarEvolution::getGEPExpr(GEPOperator
*GEP
,
3483 const SmallVectorImpl
<const SCEV
*> &IndexExprs
) {
3484 const SCEV
*BaseExpr
= getSCEV(GEP
->getPointerOperand());
3485 // getSCEV(Base)->getType() has the same address space as Base->getType()
3486 // because SCEV::getType() preserves the address space.
3487 Type
*IntPtrTy
= getEffectiveSCEVType(BaseExpr
->getType());
3488 // FIXME(PR23527): Don't blindly transfer the inbounds flag from the GEP
3489 // instruction to its SCEV, because the Instruction may be guarded by control
3490 // flow and the no-overflow bits may not be valid for the expression in any
3491 // context. This can be fixed similarly to how these flags are handled for
3493 SCEV::NoWrapFlags Wrap
= GEP
->isInBounds() ? SCEV::FlagNSW
3494 : SCEV::FlagAnyWrap
;
3496 const SCEV
*TotalOffset
= getZero(IntPtrTy
);
3497 // The array size is unimportant. The first thing we do on CurTy is getting
3498 // its element type.
3499 Type
*CurTy
= ArrayType::get(GEP
->getSourceElementType(), 0);
3500 for (const SCEV
*IndexExpr
: IndexExprs
) {
3501 // Compute the (potentially symbolic) offset in bytes for this index.
3502 if (StructType
*STy
= dyn_cast
<StructType
>(CurTy
)) {
3503 // For a struct, add the member offset.
3504 ConstantInt
*Index
= cast
<SCEVConstant
>(IndexExpr
)->getValue();
3505 unsigned FieldNo
= Index
->getZExtValue();
3506 const SCEV
*FieldOffset
= getOffsetOfExpr(IntPtrTy
, STy
, FieldNo
);
3508 // Add the field offset to the running total offset.
3509 TotalOffset
= getAddExpr(TotalOffset
, FieldOffset
);
3511 // Update CurTy to the type of the field at Index.
3512 CurTy
= STy
->getTypeAtIndex(Index
);
3514 // Update CurTy to its element type.
3515 CurTy
= cast
<SequentialType
>(CurTy
)->getElementType();
3516 // For an array, add the element offset, explicitly scaled.
3517 const SCEV
*ElementSize
= getSizeOfExpr(IntPtrTy
, CurTy
);
3518 // Getelementptr indices are signed.
3519 IndexExpr
= getTruncateOrSignExtend(IndexExpr
, IntPtrTy
);
3521 // Multiply the index by the element size to compute the element offset.
3522 const SCEV
*LocalOffset
= getMulExpr(IndexExpr
, ElementSize
, Wrap
);
3524 // Add the element offset to the running total offset.
3525 TotalOffset
= getAddExpr(TotalOffset
, LocalOffset
);
3529 // Add the total offset from all the GEP indices to the base.
3530 return getAddExpr(BaseExpr
, TotalOffset
, Wrap
);
3533 std::tuple
<const SCEV
*, FoldingSetNodeID
, void *>
3534 ScalarEvolution::findExistingSCEVInCache(int SCEVType
,
3535 ArrayRef
<const SCEV
*> Ops
) {
3536 FoldingSetNodeID ID
;
3538 ID
.AddInteger(SCEVType
);
3539 for (unsigned i
= 0, e
= Ops
.size(); i
!= e
; ++i
)
3540 ID
.AddPointer(Ops
[i
]);
3541 return std::tuple
<const SCEV
*, FoldingSetNodeID
, void *>(
3542 UniqueSCEVs
.FindNodeOrInsertPos(ID
, IP
), std::move(ID
), IP
);
3545 const SCEV
*ScalarEvolution::getMinMaxExpr(unsigned Kind
,
3546 SmallVectorImpl
<const SCEV
*> &Ops
) {
3547 assert(!Ops
.empty() && "Cannot get empty (u|s)(min|max)!");
3548 if (Ops
.size() == 1) return Ops
[0];
3550 Type
*ETy
= getEffectiveSCEVType(Ops
[0]->getType());
3551 for (unsigned i
= 1, e
= Ops
.size(); i
!= e
; ++i
)
3552 assert(getEffectiveSCEVType(Ops
[i
]->getType()) == ETy
&&
3553 "Operand types don't match!");
3556 bool IsSigned
= Kind
== scSMaxExpr
|| Kind
== scSMinExpr
;
3557 bool IsMax
= Kind
== scSMaxExpr
|| Kind
== scUMaxExpr
;
3559 // Sort by complexity, this groups all similar expression types together.
3560 GroupByComplexity(Ops
, &LI
, DT
);
3562 // Check if we have created the same expression before.
3563 if (const SCEV
*S
= std::get
<0>(findExistingSCEVInCache(Kind
, Ops
))) {
3567 // If there are any constants, fold them together.
3569 if (const SCEVConstant
*LHSC
= dyn_cast
<SCEVConstant
>(Ops
[0])) {
3571 assert(Idx
< Ops
.size());
3572 auto FoldOp
= [&](const APInt
&LHS
, const APInt
&RHS
) {
3573 if (Kind
== scSMaxExpr
)
3574 return APIntOps::smax(LHS
, RHS
);
3575 else if (Kind
== scSMinExpr
)
3576 return APIntOps::smin(LHS
, RHS
);
3577 else if (Kind
== scUMaxExpr
)
3578 return APIntOps::umax(LHS
, RHS
);
3579 else if (Kind
== scUMinExpr
)
3580 return APIntOps::umin(LHS
, RHS
);
3581 llvm_unreachable("Unknown SCEV min/max opcode");
3584 while (const SCEVConstant
*RHSC
= dyn_cast
<SCEVConstant
>(Ops
[Idx
])) {
3585 // We found two constants, fold them together!
3586 ConstantInt
*Fold
= ConstantInt::get(
3587 getContext(), FoldOp(LHSC
->getAPInt(), RHSC
->getAPInt()));
3588 Ops
[0] = getConstant(Fold
);
3589 Ops
.erase(Ops
.begin()+1); // Erase the folded element
3590 if (Ops
.size() == 1) return Ops
[0];
3591 LHSC
= cast
<SCEVConstant
>(Ops
[0]);
3594 bool IsMinV
= LHSC
->getValue()->isMinValue(IsSigned
);
3595 bool IsMaxV
= LHSC
->getValue()->isMaxValue(IsSigned
);
3597 if (IsMax
? IsMinV
: IsMaxV
) {
3598 // If we are left with a constant minimum(/maximum)-int, strip it off.
3599 Ops
.erase(Ops
.begin());
3601 } else if (IsMax
? IsMaxV
: IsMinV
) {
3602 // If we have a max(/min) with a constant maximum(/minimum)-int,
3603 // it will always be the extremum.
3607 if (Ops
.size() == 1) return Ops
[0];
3610 // Find the first operation of the same kind
3611 while (Idx
< Ops
.size() && Ops
[Idx
]->getSCEVType() < Kind
)
3614 // Check to see if one of the operands is of the same kind. If so, expand its
3615 // operands onto our operand list, and recurse to simplify.
3616 if (Idx
< Ops
.size()) {
3617 bool DeletedAny
= false;
3618 while (Ops
[Idx
]->getSCEVType() == Kind
) {
3619 const SCEVMinMaxExpr
*SMME
= cast
<SCEVMinMaxExpr
>(Ops
[Idx
]);
3620 Ops
.erase(Ops
.begin()+Idx
);
3621 Ops
.append(SMME
->op_begin(), SMME
->op_end());
3626 return getMinMaxExpr(Kind
, Ops
);
3629 // Okay, check to see if the same value occurs in the operand list twice. If
3630 // so, delete one. Since we sorted the list, these values are required to
3632 llvm::CmpInst::Predicate GEPred
=
3633 IsSigned
? ICmpInst::ICMP_SGE
: ICmpInst::ICMP_UGE
;
3634 llvm::CmpInst::Predicate LEPred
=
3635 IsSigned
? ICmpInst::ICMP_SLE
: ICmpInst::ICMP_ULE
;
3636 llvm::CmpInst::Predicate FirstPred
= IsMax
? GEPred
: LEPred
;
3637 llvm::CmpInst::Predicate SecondPred
= IsMax
? LEPred
: GEPred
;
3638 for (unsigned i
= 0, e
= Ops
.size() - 1; i
!= e
; ++i
) {
3639 if (Ops
[i
] == Ops
[i
+ 1] ||
3640 isKnownViaNonRecursiveReasoning(FirstPred
, Ops
[i
], Ops
[i
+ 1])) {
3641 // X op Y op Y --> X op Y
3642 // X op Y --> X, if we know X, Y are ordered appropriately
3643 Ops
.erase(Ops
.begin() + i
+ 1, Ops
.begin() + i
+ 2);
3646 } else if (isKnownViaNonRecursiveReasoning(SecondPred
, Ops
[i
],
3648 // X op Y --> Y, if we know X, Y are ordered appropriately
3649 Ops
.erase(Ops
.begin() + i
, Ops
.begin() + i
+ 1);
3655 if (Ops
.size() == 1) return Ops
[0];
3657 assert(!Ops
.empty() && "Reduced smax down to nothing!");
3659 // Okay, it looks like we really DO need an expr. Check to see if we
3660 // already have one, otherwise create a new one.
3661 const SCEV
*ExistingSCEV
;
3662 FoldingSetNodeID ID
;
3664 std::tie(ExistingSCEV
, ID
, IP
) = findExistingSCEVInCache(Kind
, Ops
);
3666 return ExistingSCEV
;
3667 const SCEV
**O
= SCEVAllocator
.Allocate
<const SCEV
*>(Ops
.size());
3668 std::uninitialized_copy(Ops
.begin(), Ops
.end(), O
);
3669 SCEV
*S
= new (SCEVAllocator
) SCEVMinMaxExpr(
3670 ID
.Intern(SCEVAllocator
), static_cast<SCEVTypes
>(Kind
), O
, Ops
.size());
3672 UniqueSCEVs
.InsertNode(S
, IP
);
3673 addToLoopUseLists(S
);
3677 const SCEV
*ScalarEvolution::getSMaxExpr(const SCEV
*LHS
, const SCEV
*RHS
) {
3678 SmallVector
<const SCEV
*, 2> Ops
= {LHS
, RHS
};
3679 return getSMaxExpr(Ops
);
3682 const SCEV
*ScalarEvolution::getSMaxExpr(SmallVectorImpl
<const SCEV
*> &Ops
) {
3683 return getMinMaxExpr(scSMaxExpr
, Ops
);
3686 const SCEV
*ScalarEvolution::getUMaxExpr(const SCEV
*LHS
, const SCEV
*RHS
) {
3687 SmallVector
<const SCEV
*, 2> Ops
= {LHS
, RHS
};
3688 return getUMaxExpr(Ops
);
3691 const SCEV
*ScalarEvolution::getUMaxExpr(SmallVectorImpl
<const SCEV
*> &Ops
) {
3692 return getMinMaxExpr(scUMaxExpr
, Ops
);
3695 const SCEV
*ScalarEvolution::getSMinExpr(const SCEV
*LHS
,
3697 SmallVector
<const SCEV
*, 2> Ops
= { LHS
, RHS
};
3698 return getSMinExpr(Ops
);
3701 const SCEV
*ScalarEvolution::getSMinExpr(SmallVectorImpl
<const SCEV
*> &Ops
) {
3702 return getMinMaxExpr(scSMinExpr
, Ops
);
3705 const SCEV
*ScalarEvolution::getUMinExpr(const SCEV
*LHS
,
3707 SmallVector
<const SCEV
*, 2> Ops
= { LHS
, RHS
};
3708 return getUMinExpr(Ops
);
3711 const SCEV
*ScalarEvolution::getUMinExpr(SmallVectorImpl
<const SCEV
*> &Ops
) {
3712 return getMinMaxExpr(scUMinExpr
, Ops
);
3715 const SCEV
*ScalarEvolution::getSizeOfExpr(Type
*IntTy
, Type
*AllocTy
) {
3716 // We can bypass creating a target-independent
3717 // constant expression and then folding it back into a ConstantInt.
3718 // This is just a compile-time optimization.
3719 return getConstant(IntTy
, getDataLayout().getTypeAllocSize(AllocTy
));
3722 const SCEV
*ScalarEvolution::getOffsetOfExpr(Type
*IntTy
,
3725 // We can bypass creating a target-independent
3726 // constant expression and then folding it back into a ConstantInt.
3727 // This is just a compile-time optimization.
3729 IntTy
, getDataLayout().getStructLayout(STy
)->getElementOffset(FieldNo
));
3732 const SCEV
*ScalarEvolution::getUnknown(Value
*V
) {
3733 // Don't attempt to do anything other than create a SCEVUnknown object
3734 // here. createSCEV only calls getUnknown after checking for all other
3735 // interesting possibilities, and any other code that calls getUnknown
3736 // is doing so in order to hide a value from SCEV canonicalization.
3738 FoldingSetNodeID ID
;
3739 ID
.AddInteger(scUnknown
);
3742 if (SCEV
*S
= UniqueSCEVs
.FindNodeOrInsertPos(ID
, IP
)) {
3743 assert(cast
<SCEVUnknown
>(S
)->getValue() == V
&&
3744 "Stale SCEVUnknown in uniquing map!");
3747 SCEV
*S
= new (SCEVAllocator
) SCEVUnknown(ID
.Intern(SCEVAllocator
), V
, this,
3749 FirstUnknown
= cast
<SCEVUnknown
>(S
);
3750 UniqueSCEVs
.InsertNode(S
, IP
);
3754 //===----------------------------------------------------------------------===//
3755 // Basic SCEV Analysis and PHI Idiom Recognition Code
3758 /// Test if values of the given type are analyzable within the SCEV
3759 /// framework. This primarily includes integer types, and it can optionally
3760 /// include pointer types if the ScalarEvolution class has access to
3761 /// target-specific information.
3762 bool ScalarEvolution::isSCEVable(Type
*Ty
) const {
3763 // Integers and pointers are always SCEVable.
3764 return Ty
->isIntOrPtrTy();
3767 /// Return the size in bits of the specified type, for which isSCEVable must
3769 uint64_t ScalarEvolution::getTypeSizeInBits(Type
*Ty
) const {
3770 assert(isSCEVable(Ty
) && "Type is not SCEVable!");
3771 if (Ty
->isPointerTy())
3772 return getDataLayout().getIndexTypeSizeInBits(Ty
);
3773 return getDataLayout().getTypeSizeInBits(Ty
);
3776 /// Return a type with the same bitwidth as the given type and which represents
3777 /// how SCEV will treat the given type, for which isSCEVable must return
3778 /// true. For pointer types, this is the pointer-sized integer type.
3779 Type
*ScalarEvolution::getEffectiveSCEVType(Type
*Ty
) const {
3780 assert(isSCEVable(Ty
) && "Type is not SCEVable!");
3782 if (Ty
->isIntegerTy())
3785 // The only other support type is pointer.
3786 assert(Ty
->isPointerTy() && "Unexpected non-pointer non-integer type!");
3787 return getDataLayout().getIntPtrType(Ty
);
3790 Type
*ScalarEvolution::getWiderType(Type
*T1
, Type
*T2
) const {
3791 return getTypeSizeInBits(T1
) >= getTypeSizeInBits(T2
) ? T1
: T2
;
3794 const SCEV
*ScalarEvolution::getCouldNotCompute() {
3795 return CouldNotCompute
.get();
3798 bool ScalarEvolution::checkValidity(const SCEV
*S
) const {
3799 bool ContainsNulls
= SCEVExprContains(S
, [](const SCEV
*S
) {
3800 auto *SU
= dyn_cast
<SCEVUnknown
>(S
);
3801 return SU
&& SU
->getValue() == nullptr;
3804 return !ContainsNulls
;
3807 bool ScalarEvolution::containsAddRecurrence(const SCEV
*S
) {
3808 HasRecMapType::iterator I
= HasRecMap
.find(S
);
3809 if (I
!= HasRecMap
.end())
3812 bool FoundAddRec
= SCEVExprContains(S
, isa
<SCEVAddRecExpr
, const SCEV
*>);
3813 HasRecMap
.insert({S
, FoundAddRec
});
3817 /// Try to split a SCEVAddExpr into a pair of {SCEV, ConstantInt}.
3818 /// If \p S is a SCEVAddExpr and is composed of a sub SCEV S' and an
3819 /// offset I, then return {S', I}, else return {\p S, nullptr}.
3820 static std::pair
<const SCEV
*, ConstantInt
*> splitAddExpr(const SCEV
*S
) {
3821 const auto *Add
= dyn_cast
<SCEVAddExpr
>(S
);
3823 return {S
, nullptr};
3825 if (Add
->getNumOperands() != 2)
3826 return {S
, nullptr};
3828 auto *ConstOp
= dyn_cast
<SCEVConstant
>(Add
->getOperand(0));
3830 return {S
, nullptr};
3832 return {Add
->getOperand(1), ConstOp
->getValue()};
3835 /// Return the ValueOffsetPair set for \p S. \p S can be represented
3836 /// by the value and offset from any ValueOffsetPair in the set.
3837 SetVector
<ScalarEvolution::ValueOffsetPair
> *
3838 ScalarEvolution::getSCEVValues(const SCEV
*S
) {
3839 ExprValueMapType::iterator SI
= ExprValueMap
.find_as(S
);
3840 if (SI
== ExprValueMap
.end())
3843 if (VerifySCEVMap
) {
3844 // Check there is no dangling Value in the set returned.
3845 for (const auto &VE
: SI
->second
)
3846 assert(ValueExprMap
.count(VE
.first
));
3852 /// Erase Value from ValueExprMap and ExprValueMap. ValueExprMap.erase(V)
3853 /// cannot be used separately. eraseValueFromMap should be used to remove
3854 /// V from ValueExprMap and ExprValueMap at the same time.
3855 void ScalarEvolution::eraseValueFromMap(Value
*V
) {
3856 ValueExprMapType::iterator I
= ValueExprMap
.find_as(V
);
3857 if (I
!= ValueExprMap
.end()) {
3858 const SCEV
*S
= I
->second
;
3859 // Remove {V, 0} from the set of ExprValueMap[S]
3860 if (SetVector
<ValueOffsetPair
> *SV
= getSCEVValues(S
))
3861 SV
->remove({V
, nullptr});
3863 // Remove {V, Offset} from the set of ExprValueMap[Stripped]
3864 const SCEV
*Stripped
;
3865 ConstantInt
*Offset
;
3866 std::tie(Stripped
, Offset
) = splitAddExpr(S
);
3867 if (Offset
!= nullptr) {
3868 if (SetVector
<ValueOffsetPair
> *SV
= getSCEVValues(Stripped
))
3869 SV
->remove({V
, Offset
});
3871 ValueExprMap
.erase(V
);
3875 /// Check whether value has nuw/nsw/exact set but SCEV does not.
3876 /// TODO: In reality it is better to check the poison recursively
3877 /// but this is better than nothing.
3878 static bool SCEVLostPoisonFlags(const SCEV
*S
, const Value
*V
) {
3879 if (auto *I
= dyn_cast
<Instruction
>(V
)) {
3880 if (isa
<OverflowingBinaryOperator
>(I
)) {
3881 if (auto *NS
= dyn_cast
<SCEVNAryExpr
>(S
)) {
3882 if (I
->hasNoSignedWrap() && !NS
->hasNoSignedWrap())
3884 if (I
->hasNoUnsignedWrap() && !NS
->hasNoUnsignedWrap())
3887 } else if (isa
<PossiblyExactOperator
>(I
) && I
->isExact())
3893 /// Return an existing SCEV if it exists, otherwise analyze the expression and
3894 /// create a new one.
3895 const SCEV
*ScalarEvolution::getSCEV(Value
*V
) {
3896 assert(isSCEVable(V
->getType()) && "Value is not SCEVable!");
3898 const SCEV
*S
= getExistingSCEV(V
);
3901 // During PHI resolution, it is possible to create two SCEVs for the same
3902 // V, so it is needed to double check whether V->S is inserted into
3903 // ValueExprMap before insert S->{V, 0} into ExprValueMap.
3904 std::pair
<ValueExprMapType::iterator
, bool> Pair
=
3905 ValueExprMap
.insert({SCEVCallbackVH(V
, this), S
});
3906 if (Pair
.second
&& !SCEVLostPoisonFlags(S
, V
)) {
3907 ExprValueMap
[S
].insert({V
, nullptr});
3909 // If S == Stripped + Offset, add Stripped -> {V, Offset} into
3911 const SCEV
*Stripped
= S
;
3912 ConstantInt
*Offset
= nullptr;
3913 std::tie(Stripped
, Offset
) = splitAddExpr(S
);
3914 // If stripped is SCEVUnknown, don't bother to save
3915 // Stripped -> {V, offset}. It doesn't simplify and sometimes even
3916 // increase the complexity of the expansion code.
3917 // If V is GetElementPtrInst, don't save Stripped -> {V, offset}
3918 // because it may generate add/sub instead of GEP in SCEV expansion.
3919 if (Offset
!= nullptr && !isa
<SCEVUnknown
>(Stripped
) &&
3920 !isa
<GetElementPtrInst
>(V
))
3921 ExprValueMap
[Stripped
].insert({V
, Offset
});
3927 const SCEV
*ScalarEvolution::getExistingSCEV(Value
*V
) {
3928 assert(isSCEVable(V
->getType()) && "Value is not SCEVable!");
3930 ValueExprMapType::iterator I
= ValueExprMap
.find_as(V
);
3931 if (I
!= ValueExprMap
.end()) {
3932 const SCEV
*S
= I
->second
;
3933 if (checkValidity(S
))
3935 eraseValueFromMap(V
);
3936 forgetMemoizedResults(S
);
3941 /// Return a SCEV corresponding to -V = -1*V
3942 const SCEV
*ScalarEvolution::getNegativeSCEV(const SCEV
*V
,
3943 SCEV::NoWrapFlags Flags
) {
3944 if (const SCEVConstant
*VC
= dyn_cast
<SCEVConstant
>(V
))
3946 cast
<ConstantInt
>(ConstantExpr::getNeg(VC
->getValue())));
3948 Type
*Ty
= V
->getType();
3949 Ty
= getEffectiveSCEVType(Ty
);
3951 V
, getConstant(cast
<ConstantInt
>(Constant::getAllOnesValue(Ty
))), Flags
);
3954 /// If Expr computes ~A, return A else return nullptr
3955 static const SCEV
*MatchNotExpr(const SCEV
*Expr
) {
3956 const SCEVAddExpr
*Add
= dyn_cast
<SCEVAddExpr
>(Expr
);
3957 if (!Add
|| Add
->getNumOperands() != 2 ||
3958 !Add
->getOperand(0)->isAllOnesValue())
3961 const SCEVMulExpr
*AddRHS
= dyn_cast
<SCEVMulExpr
>(Add
->getOperand(1));
3962 if (!AddRHS
|| AddRHS
->getNumOperands() != 2 ||
3963 !AddRHS
->getOperand(0)->isAllOnesValue())
3966 return AddRHS
->getOperand(1);
3969 /// Return a SCEV corresponding to ~V = -1-V
3970 const SCEV
*ScalarEvolution::getNotSCEV(const SCEV
*V
) {
3971 if (const SCEVConstant
*VC
= dyn_cast
<SCEVConstant
>(V
))
3973 cast
<ConstantInt
>(ConstantExpr::getNot(VC
->getValue())));
3975 // Fold ~(u|s)(min|max)(~x, ~y) to (u|s)(max|min)(x, y)
3976 if (const SCEVMinMaxExpr
*MME
= dyn_cast
<SCEVMinMaxExpr
>(V
)) {
3977 auto MatchMinMaxNegation
= [&](const SCEVMinMaxExpr
*MME
) {
3978 SmallVector
<const SCEV
*, 2> MatchedOperands
;
3979 for (const SCEV
*Operand
: MME
->operands()) {
3980 const SCEV
*Matched
= MatchNotExpr(Operand
);
3982 return (const SCEV
*)nullptr;
3983 MatchedOperands
.push_back(Matched
);
3985 return getMinMaxExpr(
3986 SCEVMinMaxExpr::negate(static_cast<SCEVTypes
>(MME
->getSCEVType())),
3989 if (const SCEV
*Replaced
= MatchMinMaxNegation(MME
))
3993 Type
*Ty
= V
->getType();
3994 Ty
= getEffectiveSCEVType(Ty
);
3995 const SCEV
*AllOnes
=
3996 getConstant(cast
<ConstantInt
>(Constant::getAllOnesValue(Ty
)));
3997 return getMinusSCEV(AllOnes
, V
);
4000 const SCEV
*ScalarEvolution::getMinusSCEV(const SCEV
*LHS
, const SCEV
*RHS
,
4001 SCEV::NoWrapFlags Flags
,
4003 // Fast path: X - X --> 0.
4005 return getZero(LHS
->getType());
4007 // We represent LHS - RHS as LHS + (-1)*RHS. This transformation
4008 // makes it so that we cannot make much use of NUW.
4009 auto AddFlags
= SCEV::FlagAnyWrap
;
4010 const bool RHSIsNotMinSigned
=
4011 !getSignedRangeMin(RHS
).isMinSignedValue();
4012 if (maskFlags(Flags
, SCEV::FlagNSW
) == SCEV::FlagNSW
) {
4013 // Let M be the minimum representable signed value. Then (-1)*RHS
4014 // signed-wraps if and only if RHS is M. That can happen even for
4015 // a NSW subtraction because e.g. (-1)*M signed-wraps even though
4016 // -1 - M does not. So to transfer NSW from LHS - RHS to LHS +
4017 // (-1)*RHS, we need to prove that RHS != M.
4019 // If LHS is non-negative and we know that LHS - RHS does not
4020 // signed-wrap, then RHS cannot be M. So we can rule out signed-wrap
4021 // either by proving that RHS > M or that LHS >= 0.
4022 if (RHSIsNotMinSigned
|| isKnownNonNegative(LHS
)) {
4023 AddFlags
= SCEV::FlagNSW
;
4027 // FIXME: Find a correct way to transfer NSW to (-1)*M when LHS -
4028 // RHS is NSW and LHS >= 0.
4030 // The difficulty here is that the NSW flag may have been proven
4031 // relative to a loop that is to be found in a recurrence in LHS and
4032 // not in RHS. Applying NSW to (-1)*M may then let the NSW have a
4033 // larger scope than intended.
4034 auto NegFlags
= RHSIsNotMinSigned
? SCEV::FlagNSW
: SCEV::FlagAnyWrap
;
4036 return getAddExpr(LHS
, getNegativeSCEV(RHS
, NegFlags
), AddFlags
, Depth
);
4039 const SCEV
*ScalarEvolution::getTruncateOrZeroExtend(const SCEV
*V
, Type
*Ty
,
4041 Type
*SrcTy
= V
->getType();
4042 assert(SrcTy
->isIntOrPtrTy() && Ty
->isIntOrPtrTy() &&
4043 "Cannot truncate or zero extend with non-integer arguments!");
4044 if (getTypeSizeInBits(SrcTy
) == getTypeSizeInBits(Ty
))
4045 return V
; // No conversion
4046 if (getTypeSizeInBits(SrcTy
) > getTypeSizeInBits(Ty
))
4047 return getTruncateExpr(V
, Ty
, Depth
);
4048 return getZeroExtendExpr(V
, Ty
, Depth
);
4051 const SCEV
*ScalarEvolution::getTruncateOrSignExtend(const SCEV
*V
, Type
*Ty
,
4053 Type
*SrcTy
= V
->getType();
4054 assert(SrcTy
->isIntOrPtrTy() && Ty
->isIntOrPtrTy() &&
4055 "Cannot truncate or zero extend with non-integer arguments!");
4056 if (getTypeSizeInBits(SrcTy
) == getTypeSizeInBits(Ty
))
4057 return V
; // No conversion
4058 if (getTypeSizeInBits(SrcTy
) > getTypeSizeInBits(Ty
))
4059 return getTruncateExpr(V
, Ty
, Depth
);
4060 return getSignExtendExpr(V
, Ty
, Depth
);
4064 ScalarEvolution::getNoopOrZeroExtend(const SCEV
*V
, Type
*Ty
) {
4065 Type
*SrcTy
= V
->getType();
4066 assert(SrcTy
->isIntOrPtrTy() && Ty
->isIntOrPtrTy() &&
4067 "Cannot noop or zero extend with non-integer arguments!");
4068 assert(getTypeSizeInBits(SrcTy
) <= getTypeSizeInBits(Ty
) &&
4069 "getNoopOrZeroExtend cannot truncate!");
4070 if (getTypeSizeInBits(SrcTy
) == getTypeSizeInBits(Ty
))
4071 return V
; // No conversion
4072 return getZeroExtendExpr(V
, Ty
);
4076 ScalarEvolution::getNoopOrSignExtend(const SCEV
*V
, Type
*Ty
) {
4077 Type
*SrcTy
= V
->getType();
4078 assert(SrcTy
->isIntOrPtrTy() && Ty
->isIntOrPtrTy() &&
4079 "Cannot noop or sign extend with non-integer arguments!");
4080 assert(getTypeSizeInBits(SrcTy
) <= getTypeSizeInBits(Ty
) &&
4081 "getNoopOrSignExtend cannot truncate!");
4082 if (getTypeSizeInBits(SrcTy
) == getTypeSizeInBits(Ty
))
4083 return V
; // No conversion
4084 return getSignExtendExpr(V
, Ty
);
4088 ScalarEvolution::getNoopOrAnyExtend(const SCEV
*V
, Type
*Ty
) {
4089 Type
*SrcTy
= V
->getType();
4090 assert(SrcTy
->isIntOrPtrTy() && Ty
->isIntOrPtrTy() &&
4091 "Cannot noop or any extend with non-integer arguments!");
4092 assert(getTypeSizeInBits(SrcTy
) <= getTypeSizeInBits(Ty
) &&
4093 "getNoopOrAnyExtend cannot truncate!");
4094 if (getTypeSizeInBits(SrcTy
) == getTypeSizeInBits(Ty
))
4095 return V
; // No conversion
4096 return getAnyExtendExpr(V
, Ty
);
4100 ScalarEvolution::getTruncateOrNoop(const SCEV
*V
, Type
*Ty
) {
4101 Type
*SrcTy
= V
->getType();
4102 assert(SrcTy
->isIntOrPtrTy() && Ty
->isIntOrPtrTy() &&
4103 "Cannot truncate or noop with non-integer arguments!");
4104 assert(getTypeSizeInBits(SrcTy
) >= getTypeSizeInBits(Ty
) &&
4105 "getTruncateOrNoop cannot extend!");
4106 if (getTypeSizeInBits(SrcTy
) == getTypeSizeInBits(Ty
))
4107 return V
; // No conversion
4108 return getTruncateExpr(V
, Ty
);
4111 const SCEV
*ScalarEvolution::getUMaxFromMismatchedTypes(const SCEV
*LHS
,
4113 const SCEV
*PromotedLHS
= LHS
;
4114 const SCEV
*PromotedRHS
= RHS
;
4116 if (getTypeSizeInBits(LHS
->getType()) > getTypeSizeInBits(RHS
->getType()))
4117 PromotedRHS
= getZeroExtendExpr(RHS
, LHS
->getType());
4119 PromotedLHS
= getNoopOrZeroExtend(LHS
, RHS
->getType());
4121 return getUMaxExpr(PromotedLHS
, PromotedRHS
);
4124 const SCEV
*ScalarEvolution::getUMinFromMismatchedTypes(const SCEV
*LHS
,
4126 SmallVector
<const SCEV
*, 2> Ops
= { LHS
, RHS
};
4127 return getUMinFromMismatchedTypes(Ops
);
4130 const SCEV
*ScalarEvolution::getUMinFromMismatchedTypes(
4131 SmallVectorImpl
<const SCEV
*> &Ops
) {
4132 assert(!Ops
.empty() && "At least one operand must be!");
4134 if (Ops
.size() == 1)
4137 // Find the max type first.
4138 Type
*MaxType
= nullptr;
4141 MaxType
= getWiderType(MaxType
, S
->getType());
4143 MaxType
= S
->getType();
4145 // Extend all ops to max type.
4146 SmallVector
<const SCEV
*, 2> PromotedOps
;
4148 PromotedOps
.push_back(getNoopOrZeroExtend(S
, MaxType
));
4151 return getUMinExpr(PromotedOps
);
4154 const SCEV
*ScalarEvolution::getPointerBase(const SCEV
*V
) {
4155 // A pointer operand may evaluate to a nonpointer expression, such as null.
4156 if (!V
->getType()->isPointerTy())
4159 if (const SCEVCastExpr
*Cast
= dyn_cast
<SCEVCastExpr
>(V
)) {
4160 return getPointerBase(Cast
->getOperand());
4161 } else if (const SCEVNAryExpr
*NAry
= dyn_cast
<SCEVNAryExpr
>(V
)) {
4162 const SCEV
*PtrOp
= nullptr;
4163 for (const SCEV
*NAryOp
: NAry
->operands()) {
4164 if (NAryOp
->getType()->isPointerTy()) {
4165 // Cannot find the base of an expression with multiple pointer operands.
4173 return getPointerBase(PtrOp
);
4178 /// Push users of the given Instruction onto the given Worklist.
4180 PushDefUseChildren(Instruction
*I
,
4181 SmallVectorImpl
<Instruction
*> &Worklist
) {
4182 // Push the def-use children onto the Worklist stack.
4183 for (User
*U
: I
->users())
4184 Worklist
.push_back(cast
<Instruction
>(U
));
4187 void ScalarEvolution::forgetSymbolicName(Instruction
*PN
, const SCEV
*SymName
) {
4188 SmallVector
<Instruction
*, 16> Worklist
;
4189 PushDefUseChildren(PN
, Worklist
);
4191 SmallPtrSet
<Instruction
*, 8> Visited
;
4193 while (!Worklist
.empty()) {
4194 Instruction
*I
= Worklist
.pop_back_val();
4195 if (!Visited
.insert(I
).second
)
4198 auto It
= ValueExprMap
.find_as(static_cast<Value
*>(I
));
4199 if (It
!= ValueExprMap
.end()) {
4200 const SCEV
*Old
= It
->second
;
4202 // Short-circuit the def-use traversal if the symbolic name
4203 // ceases to appear in expressions.
4204 if (Old
!= SymName
&& !hasOperand(Old
, SymName
))
4207 // SCEVUnknown for a PHI either means that it has an unrecognized
4208 // structure, it's a PHI that's in the progress of being computed
4209 // by createNodeForPHI, or it's a single-value PHI. In the first case,
4210 // additional loop trip count information isn't going to change anything.
4211 // In the second case, createNodeForPHI will perform the necessary
4212 // updates on its own when it gets to that point. In the third, we do
4213 // want to forget the SCEVUnknown.
4214 if (!isa
<PHINode
>(I
) ||
4215 !isa
<SCEVUnknown
>(Old
) ||
4216 (I
!= PN
&& Old
== SymName
)) {
4217 eraseValueFromMap(It
->first
);
4218 forgetMemoizedResults(Old
);
4222 PushDefUseChildren(I
, Worklist
);
4228 /// Takes SCEV S and Loop L. For each AddRec sub-expression, use its start
4229 /// expression in case its Loop is L. If it is not L then
4230 /// if IgnoreOtherLoops is true then use AddRec itself
4231 /// otherwise rewrite cannot be done.
4232 /// If SCEV contains non-invariant unknown SCEV rewrite cannot be done.
4233 class SCEVInitRewriter
: public SCEVRewriteVisitor
<SCEVInitRewriter
> {
4235 static const SCEV
*rewrite(const SCEV
*S
, const Loop
*L
, ScalarEvolution
&SE
,
4236 bool IgnoreOtherLoops
= true) {
4237 SCEVInitRewriter
Rewriter(L
, SE
);
4238 const SCEV
*Result
= Rewriter
.visit(S
);
4239 if (Rewriter
.hasSeenLoopVariantSCEVUnknown())
4240 return SE
.getCouldNotCompute();
4241 return Rewriter
.hasSeenOtherLoops() && !IgnoreOtherLoops
4242 ? SE
.getCouldNotCompute()
4246 const SCEV
*visitUnknown(const SCEVUnknown
*Expr
) {
4247 if (!SE
.isLoopInvariant(Expr
, L
))
4248 SeenLoopVariantSCEVUnknown
= true;
4252 const SCEV
*visitAddRecExpr(const SCEVAddRecExpr
*Expr
) {
4253 // Only re-write AddRecExprs for this loop.
4254 if (Expr
->getLoop() == L
)
4255 return Expr
->getStart();
4256 SeenOtherLoops
= true;
4260 bool hasSeenLoopVariantSCEVUnknown() { return SeenLoopVariantSCEVUnknown
; }
4262 bool hasSeenOtherLoops() { return SeenOtherLoops
; }
4265 explicit SCEVInitRewriter(const Loop
*L
, ScalarEvolution
&SE
)
4266 : SCEVRewriteVisitor(SE
), L(L
) {}
4269 bool SeenLoopVariantSCEVUnknown
= false;
4270 bool SeenOtherLoops
= false;
4273 /// Takes SCEV S and Loop L. For each AddRec sub-expression, use its post
4274 /// increment expression in case its Loop is L. If it is not L then
4275 /// use AddRec itself.
4276 /// If SCEV contains non-invariant unknown SCEV rewrite cannot be done.
4277 class SCEVPostIncRewriter
: public SCEVRewriteVisitor
<SCEVPostIncRewriter
> {
4279 static const SCEV
*rewrite(const SCEV
*S
, const Loop
*L
, ScalarEvolution
&SE
) {
4280 SCEVPostIncRewriter
Rewriter(L
, SE
);
4281 const SCEV
*Result
= Rewriter
.visit(S
);
4282 return Rewriter
.hasSeenLoopVariantSCEVUnknown()
4283 ? SE
.getCouldNotCompute()
4287 const SCEV
*visitUnknown(const SCEVUnknown
*Expr
) {
4288 if (!SE
.isLoopInvariant(Expr
, L
))
4289 SeenLoopVariantSCEVUnknown
= true;
4293 const SCEV
*visitAddRecExpr(const SCEVAddRecExpr
*Expr
) {
4294 // Only re-write AddRecExprs for this loop.
4295 if (Expr
->getLoop() == L
)
4296 return Expr
->getPostIncExpr(SE
);
4297 SeenOtherLoops
= true;
4301 bool hasSeenLoopVariantSCEVUnknown() { return SeenLoopVariantSCEVUnknown
; }
4303 bool hasSeenOtherLoops() { return SeenOtherLoops
; }
4306 explicit SCEVPostIncRewriter(const Loop
*L
, ScalarEvolution
&SE
)
4307 : SCEVRewriteVisitor(SE
), L(L
) {}
4310 bool SeenLoopVariantSCEVUnknown
= false;
4311 bool SeenOtherLoops
= false;
4314 /// This class evaluates the compare condition by matching it against the
4315 /// condition of loop latch. If there is a match we assume a true value
4316 /// for the condition while building SCEV nodes.
4317 class SCEVBackedgeConditionFolder
4318 : public SCEVRewriteVisitor
<SCEVBackedgeConditionFolder
> {
4320 static const SCEV
*rewrite(const SCEV
*S
, const Loop
*L
,
4321 ScalarEvolution
&SE
) {
4322 bool IsPosBECond
= false;
4323 Value
*BECond
= nullptr;
4324 if (BasicBlock
*Latch
= L
->getLoopLatch()) {
4325 BranchInst
*BI
= dyn_cast
<BranchInst
>(Latch
->getTerminator());
4326 if (BI
&& BI
->isConditional()) {
4327 assert(BI
->getSuccessor(0) != BI
->getSuccessor(1) &&
4328 "Both outgoing branches should not target same header!");
4329 BECond
= BI
->getCondition();
4330 IsPosBECond
= BI
->getSuccessor(0) == L
->getHeader();
4335 SCEVBackedgeConditionFolder
Rewriter(L
, BECond
, IsPosBECond
, SE
);
4336 return Rewriter
.visit(S
);
4339 const SCEV
*visitUnknown(const SCEVUnknown
*Expr
) {
4340 const SCEV
*Result
= Expr
;
4341 bool InvariantF
= SE
.isLoopInvariant(Expr
, L
);
4344 Instruction
*I
= cast
<Instruction
>(Expr
->getValue());
4345 switch (I
->getOpcode()) {
4346 case Instruction::Select
: {
4347 SelectInst
*SI
= cast
<SelectInst
>(I
);
4348 Optional
<const SCEV
*> Res
=
4349 compareWithBackedgeCondition(SI
->getCondition());
4350 if (Res
.hasValue()) {
4351 bool IsOne
= cast
<SCEVConstant
>(Res
.getValue())->getValue()->isOne();
4352 Result
= SE
.getSCEV(IsOne
? SI
->getTrueValue() : SI
->getFalseValue());
4357 Optional
<const SCEV
*> Res
= compareWithBackedgeCondition(I
);
4359 Result
= Res
.getValue();
4368 explicit SCEVBackedgeConditionFolder(const Loop
*L
, Value
*BECond
,
4369 bool IsPosBECond
, ScalarEvolution
&SE
)
4370 : SCEVRewriteVisitor(SE
), L(L
), BackedgeCond(BECond
),
4371 IsPositiveBECond(IsPosBECond
) {}
4373 Optional
<const SCEV
*> compareWithBackedgeCondition(Value
*IC
);
4376 /// Loop back condition.
4377 Value
*BackedgeCond
= nullptr;
4378 /// Set to true if loop back is on positive branch condition.
4379 bool IsPositiveBECond
;
4382 Optional
<const SCEV
*>
4383 SCEVBackedgeConditionFolder::compareWithBackedgeCondition(Value
*IC
) {
4385 // If value matches the backedge condition for loop latch,
4386 // then return a constant evolution node based on loopback
4388 if (BackedgeCond
== IC
)
4389 return IsPositiveBECond
? SE
.getOne(Type::getInt1Ty(SE
.getContext()))
4390 : SE
.getZero(Type::getInt1Ty(SE
.getContext()));
4394 class SCEVShiftRewriter
: public SCEVRewriteVisitor
<SCEVShiftRewriter
> {
4396 static const SCEV
*rewrite(const SCEV
*S
, const Loop
*L
,
4397 ScalarEvolution
&SE
) {
4398 SCEVShiftRewriter
Rewriter(L
, SE
);
4399 const SCEV
*Result
= Rewriter
.visit(S
);
4400 return Rewriter
.isValid() ? Result
: SE
.getCouldNotCompute();
4403 const SCEV
*visitUnknown(const SCEVUnknown
*Expr
) {
4404 // Only allow AddRecExprs for this loop.
4405 if (!SE
.isLoopInvariant(Expr
, L
))
4410 const SCEV
*visitAddRecExpr(const SCEVAddRecExpr
*Expr
) {
4411 if (Expr
->getLoop() == L
&& Expr
->isAffine())
4412 return SE
.getMinusSCEV(Expr
, Expr
->getStepRecurrence(SE
));
4417 bool isValid() { return Valid
; }
4420 explicit SCEVShiftRewriter(const Loop
*L
, ScalarEvolution
&SE
)
4421 : SCEVRewriteVisitor(SE
), L(L
) {}
4427 } // end anonymous namespace
4430 ScalarEvolution::proveNoWrapViaConstantRanges(const SCEVAddRecExpr
*AR
) {
4431 if (!AR
->isAffine())
4432 return SCEV::FlagAnyWrap
;
4434 using OBO
= OverflowingBinaryOperator
;
4436 SCEV::NoWrapFlags Result
= SCEV::FlagAnyWrap
;
4438 if (!AR
->hasNoSignedWrap()) {
4439 ConstantRange AddRecRange
= getSignedRange(AR
);
4440 ConstantRange IncRange
= getSignedRange(AR
->getStepRecurrence(*this));
4442 auto NSWRegion
= ConstantRange::makeGuaranteedNoWrapRegion(
4443 Instruction::Add
, IncRange
, OBO::NoSignedWrap
);
4444 if (NSWRegion
.contains(AddRecRange
))
4445 Result
= ScalarEvolution::setFlags(Result
, SCEV::FlagNSW
);
4448 if (!AR
->hasNoUnsignedWrap()) {
4449 ConstantRange AddRecRange
= getUnsignedRange(AR
);
4450 ConstantRange IncRange
= getUnsignedRange(AR
->getStepRecurrence(*this));
4452 auto NUWRegion
= ConstantRange::makeGuaranteedNoWrapRegion(
4453 Instruction::Add
, IncRange
, OBO::NoUnsignedWrap
);
4454 if (NUWRegion
.contains(AddRecRange
))
4455 Result
= ScalarEvolution::setFlags(Result
, SCEV::FlagNUW
);
4463 /// Represents an abstract binary operation. This may exist as a
4464 /// normal instruction or constant expression, or may have been
4465 /// derived from an expression tree.
4473 /// Op is set if this BinaryOp corresponds to a concrete LLVM instruction or
4474 /// constant expression.
4475 Operator
*Op
= nullptr;
4477 explicit BinaryOp(Operator
*Op
)
4478 : Opcode(Op
->getOpcode()), LHS(Op
->getOperand(0)), RHS(Op
->getOperand(1)),
4480 if (auto *OBO
= dyn_cast
<OverflowingBinaryOperator
>(Op
)) {
4481 IsNSW
= OBO
->hasNoSignedWrap();
4482 IsNUW
= OBO
->hasNoUnsignedWrap();
4486 explicit BinaryOp(unsigned Opcode
, Value
*LHS
, Value
*RHS
, bool IsNSW
= false,
4488 : Opcode(Opcode
), LHS(LHS
), RHS(RHS
), IsNSW(IsNSW
), IsNUW(IsNUW
) {}
4491 } // end anonymous namespace
4493 /// Try to map \p V into a BinaryOp, and return \c None on failure.
4494 static Optional
<BinaryOp
> MatchBinaryOp(Value
*V
, DominatorTree
&DT
) {
4495 auto *Op
= dyn_cast
<Operator
>(V
);
4499 // Implementation detail: all the cleverness here should happen without
4500 // creating new SCEV expressions -- our caller knowns tricks to avoid creating
4501 // SCEV expressions when possible, and we should not break that.
4503 switch (Op
->getOpcode()) {
4504 case Instruction::Add
:
4505 case Instruction::Sub
:
4506 case Instruction::Mul
:
4507 case Instruction::UDiv
:
4508 case Instruction::URem
:
4509 case Instruction::And
:
4510 case Instruction::Or
:
4511 case Instruction::AShr
:
4512 case Instruction::Shl
:
4513 return BinaryOp(Op
);
4515 case Instruction::Xor
:
4516 if (auto *RHSC
= dyn_cast
<ConstantInt
>(Op
->getOperand(1)))
4517 // If the RHS of the xor is a signmask, then this is just an add.
4518 // Instcombine turns add of signmask into xor as a strength reduction step.
4519 if (RHSC
->getValue().isSignMask())
4520 return BinaryOp(Instruction::Add
, Op
->getOperand(0), Op
->getOperand(1));
4521 return BinaryOp(Op
);
4523 case Instruction::LShr
:
4524 // Turn logical shift right of a constant into a unsigned divide.
4525 if (ConstantInt
*SA
= dyn_cast
<ConstantInt
>(Op
->getOperand(1))) {
4526 uint32_t BitWidth
= cast
<IntegerType
>(Op
->getType())->getBitWidth();
4528 // If the shift count is not less than the bitwidth, the result of
4529 // the shift is undefined. Don't try to analyze it, because the
4530 // resolution chosen here may differ from the resolution chosen in
4531 // other parts of the compiler.
4532 if (SA
->getValue().ult(BitWidth
)) {
4534 ConstantInt::get(SA
->getContext(),
4535 APInt::getOneBitSet(BitWidth
, SA
->getZExtValue()));
4536 return BinaryOp(Instruction::UDiv
, Op
->getOperand(0), X
);
4539 return BinaryOp(Op
);
4541 case Instruction::ExtractValue
: {
4542 auto *EVI
= cast
<ExtractValueInst
>(Op
);
4543 if (EVI
->getNumIndices() != 1 || EVI
->getIndices()[0] != 0)
4546 auto *WO
= dyn_cast
<WithOverflowInst
>(EVI
->getAggregateOperand());
4550 Instruction::BinaryOps BinOp
= WO
->getBinaryOp();
4551 bool Signed
= WO
->isSigned();
4552 // TODO: Should add nuw/nsw flags for mul as well.
4553 if (BinOp
== Instruction::Mul
|| !isOverflowIntrinsicNoWrap(WO
, DT
))
4554 return BinaryOp(BinOp
, WO
->getLHS(), WO
->getRHS());
4556 // Now that we know that all uses of the arithmetic-result component of
4557 // CI are guarded by the overflow check, we can go ahead and pretend
4558 // that the arithmetic is non-overflowing.
4559 return BinaryOp(BinOp
, WO
->getLHS(), WO
->getRHS(),
4560 /* IsNSW = */ Signed
, /* IsNUW = */ !Signed
);
4570 /// Helper function to createAddRecFromPHIWithCasts. We have a phi
4571 /// node whose symbolic (unknown) SCEV is \p SymbolicPHI, which is updated via
4572 /// the loop backedge by a SCEVAddExpr, possibly also with a few casts on the
4573 /// way. This function checks if \p Op, an operand of this SCEVAddExpr,
4574 /// follows one of the following patterns:
4575 /// Op == (SExt ix (Trunc iy (%SymbolicPHI) to ix) to iy)
4576 /// Op == (ZExt ix (Trunc iy (%SymbolicPHI) to ix) to iy)
4577 /// If the SCEV expression of \p Op conforms with one of the expected patterns
4578 /// we return the type of the truncation operation, and indicate whether the
4579 /// truncated type should be treated as signed/unsigned by setting
4580 /// \p Signed to true/false, respectively.
4581 static Type
*isSimpleCastedPHI(const SCEV
*Op
, const SCEVUnknown
*SymbolicPHI
,
4582 bool &Signed
, ScalarEvolution
&SE
) {
4583 // The case where Op == SymbolicPHI (that is, with no type conversions on
4584 // the way) is handled by the regular add recurrence creating logic and
4585 // would have already been triggered in createAddRecForPHI. Reaching it here
4586 // means that createAddRecFromPHI had failed for this PHI before (e.g.,
4587 // because one of the other operands of the SCEVAddExpr updating this PHI is
4590 // Here we look for the case where Op = (ext(trunc(SymbolicPHI))), and in
4591 // this case predicates that allow us to prove that Op == SymbolicPHI will
4593 if (Op
== SymbolicPHI
)
4596 unsigned SourceBits
= SE
.getTypeSizeInBits(SymbolicPHI
->getType());
4597 unsigned NewBits
= SE
.getTypeSizeInBits(Op
->getType());
4598 if (SourceBits
!= NewBits
)
4601 const SCEVSignExtendExpr
*SExt
= dyn_cast
<SCEVSignExtendExpr
>(Op
);
4602 const SCEVZeroExtendExpr
*ZExt
= dyn_cast
<SCEVZeroExtendExpr
>(Op
);
4605 const SCEVTruncateExpr
*Trunc
=
4606 SExt
? dyn_cast
<SCEVTruncateExpr
>(SExt
->getOperand())
4607 : dyn_cast
<SCEVTruncateExpr
>(ZExt
->getOperand());
4610 const SCEV
*X
= Trunc
->getOperand();
4611 if (X
!= SymbolicPHI
)
4613 Signed
= SExt
!= nullptr;
4614 return Trunc
->getType();
4617 static const Loop
*isIntegerLoopHeaderPHI(const PHINode
*PN
, LoopInfo
&LI
) {
4618 if (!PN
->getType()->isIntegerTy())
4620 const Loop
*L
= LI
.getLoopFor(PN
->getParent());
4621 if (!L
|| L
->getHeader() != PN
->getParent())
4626 // Analyze \p SymbolicPHI, a SCEV expression of a phi node, and check if the
4627 // computation that updates the phi follows the following pattern:
4628 // (SExt/ZExt ix (Trunc iy (%SymbolicPHI) to ix) to iy) + InvariantAccum
4629 // which correspond to a phi->trunc->sext/zext->add->phi update chain.
4630 // If so, try to see if it can be rewritten as an AddRecExpr under some
4631 // Predicates. If successful, return them as a pair. Also cache the results
4634 // Example usage scenario:
4635 // Say the Rewriter is called for the following SCEV:
4636 // 8 * ((sext i32 (trunc i64 %X to i32) to i64) + %Step)
4638 // %X = phi i64 (%Start, %BEValue)
4639 // It will visitMul->visitAdd->visitSExt->visitTrunc->visitUnknown(%X),
4640 // and call this function with %SymbolicPHI = %X.
4642 // The analysis will find that the value coming around the backedge has
4643 // the following SCEV:
4644 // BEValue = ((sext i32 (trunc i64 %X to i32) to i64) + %Step)
4645 // Upon concluding that this matches the desired pattern, the function
4646 // will return the pair {NewAddRec, SmallPredsVec} where:
4647 // NewAddRec = {%Start,+,%Step}
4648 // SmallPredsVec = {P1, P2, P3} as follows:
4649 // P1(WrapPred): AR: {trunc(%Start),+,(trunc %Step)}<nsw> Flags: <nssw>
4650 // P2(EqualPred): %Start == (sext i32 (trunc i64 %Start to i32) to i64)
4651 // P3(EqualPred): %Step == (sext i32 (trunc i64 %Step to i32) to i64)
4652 // The returned pair means that SymbolicPHI can be rewritten into NewAddRec
4653 // under the predicates {P1,P2,P3}.
4654 // This predicated rewrite will be cached in PredicatedSCEVRewrites:
4655 // PredicatedSCEVRewrites[{%X,L}] = {NewAddRec, {P1,P2,P3)}
4659 // 1) Extend the Induction descriptor to also support inductions that involve
4660 // casts: When needed (namely, when we are called in the context of the
4661 // vectorizer induction analysis), a Set of cast instructions will be
4662 // populated by this method, and provided back to isInductionPHI. This is
4663 // needed to allow the vectorizer to properly record them to be ignored by
4664 // the cost model and to avoid vectorizing them (otherwise these casts,
4665 // which are redundant under the runtime overflow checks, will be
4666 // vectorized, which can be costly).
4668 // 2) Support additional induction/PHISCEV patterns: We also want to support
4669 // inductions where the sext-trunc / zext-trunc operations (partly) occur
4670 // after the induction update operation (the induction increment):
4672 // (Trunc iy (SExt/ZExt ix (%SymbolicPHI + InvariantAccum) to iy) to ix)
4673 // which correspond to a phi->add->trunc->sext/zext->phi update chain.
4675 // (Trunc iy ((SExt/ZExt ix (%SymbolicPhi) to iy) + InvariantAccum) to ix)
4676 // which correspond to a phi->trunc->add->sext/zext->phi update chain.
4678 // 3) Outline common code with createAddRecFromPHI to avoid duplication.
4679 Optional
<std::pair
<const SCEV
*, SmallVector
<const SCEVPredicate
*, 3>>>
4680 ScalarEvolution::createAddRecFromPHIWithCastsImpl(const SCEVUnknown
*SymbolicPHI
) {
4681 SmallVector
<const SCEVPredicate
*, 3> Predicates
;
4683 // *** Part1: Analyze if we have a phi-with-cast pattern for which we can
4684 // return an AddRec expression under some predicate.
4686 auto *PN
= cast
<PHINode
>(SymbolicPHI
->getValue());
4687 const Loop
*L
= isIntegerLoopHeaderPHI(PN
, LI
);
4688 assert(L
&& "Expecting an integer loop header phi");
4690 // The loop may have multiple entrances or multiple exits; we can analyze
4691 // this phi as an addrec if it has a unique entry value and a unique
4693 Value
*BEValueV
= nullptr, *StartValueV
= nullptr;
4694 for (unsigned i
= 0, e
= PN
->getNumIncomingValues(); i
!= e
; ++i
) {
4695 Value
*V
= PN
->getIncomingValue(i
);
4696 if (L
->contains(PN
->getIncomingBlock(i
))) {
4699 } else if (BEValueV
!= V
) {
4703 } else if (!StartValueV
) {
4705 } else if (StartValueV
!= V
) {
4706 StartValueV
= nullptr;
4710 if (!BEValueV
|| !StartValueV
)
4713 const SCEV
*BEValue
= getSCEV(BEValueV
);
4715 // If the value coming around the backedge is an add with the symbolic
4716 // value we just inserted, possibly with casts that we can ignore under
4717 // an appropriate runtime guard, then we found a simple induction variable!
4718 const auto *Add
= dyn_cast
<SCEVAddExpr
>(BEValue
);
4722 // If there is a single occurrence of the symbolic value, possibly
4723 // casted, replace it with a recurrence.
4724 unsigned FoundIndex
= Add
->getNumOperands();
4725 Type
*TruncTy
= nullptr;
4727 for (unsigned i
= 0, e
= Add
->getNumOperands(); i
!= e
; ++i
)
4729 isSimpleCastedPHI(Add
->getOperand(i
), SymbolicPHI
, Signed
, *this)))
4730 if (FoundIndex
== e
) {
4735 if (FoundIndex
== Add
->getNumOperands())
4738 // Create an add with everything but the specified operand.
4739 SmallVector
<const SCEV
*, 8> Ops
;
4740 for (unsigned i
= 0, e
= Add
->getNumOperands(); i
!= e
; ++i
)
4741 if (i
!= FoundIndex
)
4742 Ops
.push_back(Add
->getOperand(i
));
4743 const SCEV
*Accum
= getAddExpr(Ops
);
4745 // The runtime checks will not be valid if the step amount is
4746 // varying inside the loop.
4747 if (!isLoopInvariant(Accum
, L
))
4750 // *** Part2: Create the predicates
4752 // Analysis was successful: we have a phi-with-cast pattern for which we
4753 // can return an AddRec expression under the following predicates:
4755 // P1: A Wrap predicate that guarantees that Trunc(Start) + i*Trunc(Accum)
4756 // fits within the truncated type (does not overflow) for i = 0 to n-1.
4757 // P2: An Equal predicate that guarantees that
4758 // Start = (Ext ix (Trunc iy (Start) to ix) to iy)
4759 // P3: An Equal predicate that guarantees that
4760 // Accum = (Ext ix (Trunc iy (Accum) to ix) to iy)
4762 // As we next prove, the above predicates guarantee that:
4763 // Start + i*Accum = (Ext ix (Trunc iy ( Start + i*Accum ) to ix) to iy)
4766 // More formally, we want to prove that:
4767 // Expr(i+1) = Start + (i+1) * Accum
4768 // = (Ext ix (Trunc iy (Expr(i)) to ix) to iy) + Accum
4771 // 1) Expr(0) = Start
4772 // 2) Expr(1) = Start + Accum
4773 // = (Ext ix (Trunc iy (Start) to ix) to iy) + Accum :: from P2
4774 // 3) Induction hypothesis (step i):
4775 // Expr(i) = (Ext ix (Trunc iy (Expr(i-1)) to ix) to iy) + Accum
4779 // = Start + (i+1)*Accum
4780 // = (Start + i*Accum) + Accum
4781 // = Expr(i) + Accum
4782 // = (Ext ix (Trunc iy (Expr(i-1)) to ix) to iy) + Accum + Accum
4785 // = (Ext ix (Trunc iy (Start + (i-1)*Accum) to ix) to iy) + Accum + Accum
4787 // = (Ext ix (Trunc iy (Start + (i-1)*Accum) to ix) to iy)
4788 // + (Ext ix (Trunc iy (Accum) to ix) to iy)
4789 // + Accum :: from P3
4791 // = (Ext ix (Trunc iy ((Start + (i-1)*Accum) + Accum) to ix) to iy)
4792 // + Accum :: from P1: Ext(x)+Ext(y)=>Ext(x+y)
4794 // = (Ext ix (Trunc iy (Start + i*Accum) to ix) to iy) + Accum
4795 // = (Ext ix (Trunc iy (Expr(i)) to ix) to iy) + Accum
4797 // By induction, the same applies to all iterations 1<=i<n:
4800 // Create a truncated addrec for which we will add a no overflow check (P1).
4801 const SCEV
*StartVal
= getSCEV(StartValueV
);
4802 const SCEV
*PHISCEV
=
4803 getAddRecExpr(getTruncateExpr(StartVal
, TruncTy
),
4804 getTruncateExpr(Accum
, TruncTy
), L
, SCEV::FlagAnyWrap
);
4806 // PHISCEV can be either a SCEVConstant or a SCEVAddRecExpr.
4807 // ex: If truncated Accum is 0 and StartVal is a constant, then PHISCEV
4808 // will be constant.
4810 // If PHISCEV is a constant, then P1 degenerates into P2 or P3, so we don't
4812 if (const auto *AR
= dyn_cast
<SCEVAddRecExpr
>(PHISCEV
)) {
4813 SCEVWrapPredicate::IncrementWrapFlags AddedFlags
=
4814 Signed
? SCEVWrapPredicate::IncrementNSSW
4815 : SCEVWrapPredicate::IncrementNUSW
;
4816 const SCEVPredicate
*AddRecPred
= getWrapPredicate(AR
, AddedFlags
);
4817 Predicates
.push_back(AddRecPred
);
4820 // Create the Equal Predicates P2,P3:
4822 // It is possible that the predicates P2 and/or P3 are computable at
4823 // compile time due to StartVal and/or Accum being constants.
4824 // If either one is, then we can check that now and escape if either P2
4827 // Construct the extended SCEV: (Ext ix (Trunc iy (Expr) to ix) to iy)
4828 // for each of StartVal and Accum
4829 auto getExtendedExpr
= [&](const SCEV
*Expr
,
4830 bool CreateSignExtend
) -> const SCEV
* {
4831 assert(isLoopInvariant(Expr
, L
) && "Expr is expected to be invariant");
4832 const SCEV
*TruncatedExpr
= getTruncateExpr(Expr
, TruncTy
);
4833 const SCEV
*ExtendedExpr
=
4834 CreateSignExtend
? getSignExtendExpr(TruncatedExpr
, Expr
->getType())
4835 : getZeroExtendExpr(TruncatedExpr
, Expr
->getType());
4836 return ExtendedExpr
;
4840 // ExtendedExpr = (Ext ix (Trunc iy (Expr) to ix) to iy
4841 // = getExtendedExpr(Expr)
4842 // Determine whether the predicate P: Expr == ExtendedExpr
4843 // is known to be false at compile time
4844 auto PredIsKnownFalse
= [&](const SCEV
*Expr
,
4845 const SCEV
*ExtendedExpr
) -> bool {
4846 return Expr
!= ExtendedExpr
&&
4847 isKnownPredicate(ICmpInst::ICMP_NE
, Expr
, ExtendedExpr
);
4850 const SCEV
*StartExtended
= getExtendedExpr(StartVal
, Signed
);
4851 if (PredIsKnownFalse(StartVal
, StartExtended
)) {
4852 LLVM_DEBUG(dbgs() << "P2 is compile-time false\n";);
4856 // The Step is always Signed (because the overflow checks are either
4858 const SCEV
*AccumExtended
= getExtendedExpr(Accum
, /*CreateSignExtend=*/true);
4859 if (PredIsKnownFalse(Accum
, AccumExtended
)) {
4860 LLVM_DEBUG(dbgs() << "P3 is compile-time false\n";);
4864 auto AppendPredicate
= [&](const SCEV
*Expr
,
4865 const SCEV
*ExtendedExpr
) -> void {
4866 if (Expr
!= ExtendedExpr
&&
4867 !isKnownPredicate(ICmpInst::ICMP_EQ
, Expr
, ExtendedExpr
)) {
4868 const SCEVPredicate
*Pred
= getEqualPredicate(Expr
, ExtendedExpr
);
4869 LLVM_DEBUG(dbgs() << "Added Predicate: " << *Pred
);
4870 Predicates
.push_back(Pred
);
4874 AppendPredicate(StartVal
, StartExtended
);
4875 AppendPredicate(Accum
, AccumExtended
);
4877 // *** Part3: Predicates are ready. Now go ahead and create the new addrec in
4878 // which the casts had been folded away. The caller can rewrite SymbolicPHI
4879 // into NewAR if it will also add the runtime overflow checks specified in
4881 auto *NewAR
= getAddRecExpr(StartVal
, Accum
, L
, SCEV::FlagAnyWrap
);
4883 std::pair
<const SCEV
*, SmallVector
<const SCEVPredicate
*, 3>> PredRewrite
=
4884 std::make_pair(NewAR
, Predicates
);
4885 // Remember the result of the analysis for this SCEV at this locayyytion.
4886 PredicatedSCEVRewrites
[{SymbolicPHI
, L
}] = PredRewrite
;
4890 Optional
<std::pair
<const SCEV
*, SmallVector
<const SCEVPredicate
*, 3>>>
4891 ScalarEvolution::createAddRecFromPHIWithCasts(const SCEVUnknown
*SymbolicPHI
) {
4892 auto *PN
= cast
<PHINode
>(SymbolicPHI
->getValue());
4893 const Loop
*L
= isIntegerLoopHeaderPHI(PN
, LI
);
4897 // Check to see if we already analyzed this PHI.
4898 auto I
= PredicatedSCEVRewrites
.find({SymbolicPHI
, L
});
4899 if (I
!= PredicatedSCEVRewrites
.end()) {
4900 std::pair
<const SCEV
*, SmallVector
<const SCEVPredicate
*, 3>> Rewrite
=
4902 // Analysis was done before and failed to create an AddRec:
4903 if (Rewrite
.first
== SymbolicPHI
)
4905 // Analysis was done before and succeeded to create an AddRec under
4907 assert(isa
<SCEVAddRecExpr
>(Rewrite
.first
) && "Expected an AddRec");
4908 assert(!(Rewrite
.second
).empty() && "Expected to find Predicates");
4912 Optional
<std::pair
<const SCEV
*, SmallVector
<const SCEVPredicate
*, 3>>>
4913 Rewrite
= createAddRecFromPHIWithCastsImpl(SymbolicPHI
);
4915 // Record in the cache that the analysis failed
4917 SmallVector
<const SCEVPredicate
*, 3> Predicates
;
4918 PredicatedSCEVRewrites
[{SymbolicPHI
, L
}] = {SymbolicPHI
, Predicates
};
4925 // FIXME: This utility is currently required because the Rewriter currently
4926 // does not rewrite this expression:
4927 // {0, +, (sext ix (trunc iy to ix) to iy)}
4928 // into {0, +, %step},
4929 // even when the following Equal predicate exists:
4930 // "%step == (sext ix (trunc iy to ix) to iy)".
4931 bool PredicatedScalarEvolution::areAddRecsEqualWithPreds(
4932 const SCEVAddRecExpr
*AR1
, const SCEVAddRecExpr
*AR2
) const {
4936 auto areExprsEqual
= [&](const SCEV
*Expr1
, const SCEV
*Expr2
) -> bool {
4937 if (Expr1
!= Expr2
&& !Preds
.implies(SE
.getEqualPredicate(Expr1
, Expr2
)) &&
4938 !Preds
.implies(SE
.getEqualPredicate(Expr2
, Expr1
)))
4943 if (!areExprsEqual(AR1
->getStart(), AR2
->getStart()) ||
4944 !areExprsEqual(AR1
->getStepRecurrence(SE
), AR2
->getStepRecurrence(SE
)))
4949 /// A helper function for createAddRecFromPHI to handle simple cases.
4951 /// This function tries to find an AddRec expression for the simplest (yet most
4952 /// common) cases: PN = PHI(Start, OP(Self, LoopInvariant)).
4953 /// If it fails, createAddRecFromPHI will use a more general, but slow,
4954 /// technique for finding the AddRec expression.
4955 const SCEV
*ScalarEvolution::createSimpleAffineAddRec(PHINode
*PN
,
4957 Value
*StartValueV
) {
4958 const Loop
*L
= LI
.getLoopFor(PN
->getParent());
4959 assert(L
&& L
->getHeader() == PN
->getParent());
4960 assert(BEValueV
&& StartValueV
);
4962 auto BO
= MatchBinaryOp(BEValueV
, DT
);
4966 if (BO
->Opcode
!= Instruction::Add
)
4969 const SCEV
*Accum
= nullptr;
4970 if (BO
->LHS
== PN
&& L
->isLoopInvariant(BO
->RHS
))
4971 Accum
= getSCEV(BO
->RHS
);
4972 else if (BO
->RHS
== PN
&& L
->isLoopInvariant(BO
->LHS
))
4973 Accum
= getSCEV(BO
->LHS
);
4978 SCEV::NoWrapFlags Flags
= SCEV::FlagAnyWrap
;
4980 Flags
= setFlags(Flags
, SCEV::FlagNUW
);
4982 Flags
= setFlags(Flags
, SCEV::FlagNSW
);
4984 const SCEV
*StartVal
= getSCEV(StartValueV
);
4985 const SCEV
*PHISCEV
= getAddRecExpr(StartVal
, Accum
, L
, Flags
);
4987 ValueExprMap
[SCEVCallbackVH(PN
, this)] = PHISCEV
;
4989 // We can add Flags to the post-inc expression only if we
4990 // know that it is *undefined behavior* for BEValueV to
4992 if (auto *BEInst
= dyn_cast
<Instruction
>(BEValueV
))
4993 if (isLoopInvariant(Accum
, L
) && isAddRecNeverPoison(BEInst
, L
))
4994 (void)getAddRecExpr(getAddExpr(StartVal
, Accum
, Flags
), Accum
, L
, Flags
);
4999 const SCEV
*ScalarEvolution::createAddRecFromPHI(PHINode
*PN
) {
5000 const Loop
*L
= LI
.getLoopFor(PN
->getParent());
5001 if (!L
|| L
->getHeader() != PN
->getParent())
5004 // The loop may have multiple entrances or multiple exits; we can analyze
5005 // this phi as an addrec if it has a unique entry value and a unique
5007 Value
*BEValueV
= nullptr, *StartValueV
= nullptr;
5008 for (unsigned i
= 0, e
= PN
->getNumIncomingValues(); i
!= e
; ++i
) {
5009 Value
*V
= PN
->getIncomingValue(i
);
5010 if (L
->contains(PN
->getIncomingBlock(i
))) {
5013 } else if (BEValueV
!= V
) {
5017 } else if (!StartValueV
) {
5019 } else if (StartValueV
!= V
) {
5020 StartValueV
= nullptr;
5024 if (!BEValueV
|| !StartValueV
)
5027 assert(ValueExprMap
.find_as(PN
) == ValueExprMap
.end() &&
5028 "PHI node already processed?");
5030 // First, try to find AddRec expression without creating a fictituos symbolic
5032 if (auto *S
= createSimpleAffineAddRec(PN
, BEValueV
, StartValueV
))
5035 // Handle PHI node value symbolically.
5036 const SCEV
*SymbolicName
= getUnknown(PN
);
5037 ValueExprMap
.insert({SCEVCallbackVH(PN
, this), SymbolicName
});
5039 // Using this symbolic name for the PHI, analyze the value coming around
5041 const SCEV
*BEValue
= getSCEV(BEValueV
);
5043 // NOTE: If BEValue is loop invariant, we know that the PHI node just
5044 // has a special value for the first iteration of the loop.
5046 // If the value coming around the backedge is an add with the symbolic
5047 // value we just inserted, then we found a simple induction variable!
5048 if (const SCEVAddExpr
*Add
= dyn_cast
<SCEVAddExpr
>(BEValue
)) {
5049 // If there is a single occurrence of the symbolic value, replace it
5050 // with a recurrence.
5051 unsigned FoundIndex
= Add
->getNumOperands();
5052 for (unsigned i
= 0, e
= Add
->getNumOperands(); i
!= e
; ++i
)
5053 if (Add
->getOperand(i
) == SymbolicName
)
5054 if (FoundIndex
== e
) {
5059 if (FoundIndex
!= Add
->getNumOperands()) {
5060 // Create an add with everything but the specified operand.
5061 SmallVector
<const SCEV
*, 8> Ops
;
5062 for (unsigned i
= 0, e
= Add
->getNumOperands(); i
!= e
; ++i
)
5063 if (i
!= FoundIndex
)
5064 Ops
.push_back(SCEVBackedgeConditionFolder::rewrite(Add
->getOperand(i
),
5066 const SCEV
*Accum
= getAddExpr(Ops
);
5068 // This is not a valid addrec if the step amount is varying each
5069 // loop iteration, but is not itself an addrec in this loop.
5070 if (isLoopInvariant(Accum
, L
) ||
5071 (isa
<SCEVAddRecExpr
>(Accum
) &&
5072 cast
<SCEVAddRecExpr
>(Accum
)->getLoop() == L
)) {
5073 SCEV::NoWrapFlags Flags
= SCEV::FlagAnyWrap
;
5075 if (auto BO
= MatchBinaryOp(BEValueV
, DT
)) {
5076 if (BO
->Opcode
== Instruction::Add
&& BO
->LHS
== PN
) {
5078 Flags
= setFlags(Flags
, SCEV::FlagNUW
);
5080 Flags
= setFlags(Flags
, SCEV::FlagNSW
);
5082 } else if (GEPOperator
*GEP
= dyn_cast
<GEPOperator
>(BEValueV
)) {
5083 // If the increment is an inbounds GEP, then we know the address
5084 // space cannot be wrapped around. We cannot make any guarantee
5085 // about signed or unsigned overflow because pointers are
5086 // unsigned but we may have a negative index from the base
5087 // pointer. We can guarantee that no unsigned wrap occurs if the
5088 // indices form a positive value.
5089 if (GEP
->isInBounds() && GEP
->getOperand(0) == PN
) {
5090 Flags
= setFlags(Flags
, SCEV::FlagNW
);
5092 const SCEV
*Ptr
= getSCEV(GEP
->getPointerOperand());
5093 if (isKnownPositive(getMinusSCEV(getSCEV(GEP
), Ptr
)))
5094 Flags
= setFlags(Flags
, SCEV::FlagNUW
);
5097 // We cannot transfer nuw and nsw flags from subtraction
5098 // operations -- sub nuw X, Y is not the same as add nuw X, -Y
5102 const SCEV
*StartVal
= getSCEV(StartValueV
);
5103 const SCEV
*PHISCEV
= getAddRecExpr(StartVal
, Accum
, L
, Flags
);
5105 // Okay, for the entire analysis of this edge we assumed the PHI
5106 // to be symbolic. We now need to go back and purge all of the
5107 // entries for the scalars that use the symbolic expression.
5108 forgetSymbolicName(PN
, SymbolicName
);
5109 ValueExprMap
[SCEVCallbackVH(PN
, this)] = PHISCEV
;
5111 // We can add Flags to the post-inc expression only if we
5112 // know that it is *undefined behavior* for BEValueV to
5114 if (auto *BEInst
= dyn_cast
<Instruction
>(BEValueV
))
5115 if (isLoopInvariant(Accum
, L
) && isAddRecNeverPoison(BEInst
, L
))
5116 (void)getAddRecExpr(getAddExpr(StartVal
, Accum
), Accum
, L
, Flags
);
5122 // Otherwise, this could be a loop like this:
5123 // i = 0; for (j = 1; ..; ++j) { .... i = j; }
5124 // In this case, j = {1,+,1} and BEValue is j.
5125 // Because the other in-value of i (0) fits the evolution of BEValue
5126 // i really is an addrec evolution.
5128 // We can generalize this saying that i is the shifted value of BEValue
5129 // by one iteration:
5130 // PHI(f(0), f({1,+,1})) --> f({0,+,1})
5131 const SCEV
*Shifted
= SCEVShiftRewriter::rewrite(BEValue
, L
, *this);
5132 const SCEV
*Start
= SCEVInitRewriter::rewrite(Shifted
, L
, *this, false);
5133 if (Shifted
!= getCouldNotCompute() &&
5134 Start
!= getCouldNotCompute()) {
5135 const SCEV
*StartVal
= getSCEV(StartValueV
);
5136 if (Start
== StartVal
) {
5137 // Okay, for the entire analysis of this edge we assumed the PHI
5138 // to be symbolic. We now need to go back and purge all of the
5139 // entries for the scalars that use the symbolic expression.
5140 forgetSymbolicName(PN
, SymbolicName
);
5141 ValueExprMap
[SCEVCallbackVH(PN
, this)] = Shifted
;
5147 // Remove the temporary PHI node SCEV that has been inserted while intending
5148 // to create an AddRecExpr for this PHI node. We can not keep this temporary
5149 // as it will prevent later (possibly simpler) SCEV expressions to be added
5150 // to the ValueExprMap.
5151 eraseValueFromMap(PN
);
5156 // Checks if the SCEV S is available at BB. S is considered available at BB
5157 // if S can be materialized at BB without introducing a fault.
5158 static bool IsAvailableOnEntry(const Loop
*L
, DominatorTree
&DT
, const SCEV
*S
,
5160 struct CheckAvailable
{
5161 bool TraversalDone
= false;
5162 bool Available
= true;
5164 const Loop
*L
= nullptr; // The loop BB is in (can be nullptr)
5165 BasicBlock
*BB
= nullptr;
5168 CheckAvailable(const Loop
*L
, BasicBlock
*BB
, DominatorTree
&DT
)
5169 : L(L
), BB(BB
), DT(DT
) {}
5171 bool setUnavailable() {
5172 TraversalDone
= true;
5177 bool follow(const SCEV
*S
) {
5178 switch (S
->getSCEVType()) {
5179 case scConstant
: case scTruncate
: case scZeroExtend
: case scSignExtend
:
5180 case scAddExpr
: case scMulExpr
: case scUMaxExpr
: case scSMaxExpr
:
5183 // These expressions are available if their operand(s) is/are.
5186 case scAddRecExpr
: {
5187 // We allow add recurrences that are on the loop BB is in, or some
5188 // outer loop. This guarantees availability because the value of the
5189 // add recurrence at BB is simply the "current" value of the induction
5190 // variable. We can relax this in the future; for instance an add
5191 // recurrence on a sibling dominating loop is also available at BB.
5192 const auto *ARLoop
= cast
<SCEVAddRecExpr
>(S
)->getLoop();
5193 if (L
&& (ARLoop
== L
|| ARLoop
->contains(L
)))
5196 return setUnavailable();
5200 // For SCEVUnknown, we check for simple dominance.
5201 const auto *SU
= cast
<SCEVUnknown
>(S
);
5202 Value
*V
= SU
->getValue();
5204 if (isa
<Argument
>(V
))
5207 if (isa
<Instruction
>(V
) && DT
.dominates(cast
<Instruction
>(V
), BB
))
5210 return setUnavailable();
5214 case scCouldNotCompute
:
5215 // We do not try to smart about these at all.
5216 return setUnavailable();
5218 llvm_unreachable("switch should be fully covered!");
5221 bool isDone() { return TraversalDone
; }
5224 CheckAvailable
CA(L
, BB
, DT
);
5225 SCEVTraversal
<CheckAvailable
> ST(CA
);
5228 return CA
.Available
;
5231 // Try to match a control flow sequence that branches out at BI and merges back
5232 // at Merge into a "C ? LHS : RHS" select pattern. Return true on a successful
5234 static bool BrPHIToSelect(DominatorTree
&DT
, BranchInst
*BI
, PHINode
*Merge
,
5235 Value
*&C
, Value
*&LHS
, Value
*&RHS
) {
5236 C
= BI
->getCondition();
5238 BasicBlockEdge
LeftEdge(BI
->getParent(), BI
->getSuccessor(0));
5239 BasicBlockEdge
RightEdge(BI
->getParent(), BI
->getSuccessor(1));
5241 if (!LeftEdge
.isSingleEdge())
5244 assert(RightEdge
.isSingleEdge() && "Follows from LeftEdge.isSingleEdge()");
5246 Use
&LeftUse
= Merge
->getOperandUse(0);
5247 Use
&RightUse
= Merge
->getOperandUse(1);
5249 if (DT
.dominates(LeftEdge
, LeftUse
) && DT
.dominates(RightEdge
, RightUse
)) {
5255 if (DT
.dominates(LeftEdge
, RightUse
) && DT
.dominates(RightEdge
, LeftUse
)) {
5264 const SCEV
*ScalarEvolution::createNodeFromSelectLikePHI(PHINode
*PN
) {
5266 [&](BasicBlock
*BB
) { return DT
.isReachableFromEntry(BB
); };
5267 if (PN
->getNumIncomingValues() == 2 && all_of(PN
->blocks(), IsReachable
)) {
5268 const Loop
*L
= LI
.getLoopFor(PN
->getParent());
5270 // We don't want to break LCSSA, even in a SCEV expression tree.
5271 for (unsigned i
= 0, e
= PN
->getNumIncomingValues(); i
!= e
; ++i
)
5272 if (LI
.getLoopFor(PN
->getIncomingBlock(i
)) != L
)
5277 // br %cond, label %left, label %right
5283 // V = phi [ %x, %left ], [ %y, %right ]
5285 // as "select %cond, %x, %y"
5287 BasicBlock
*IDom
= DT
[PN
->getParent()]->getIDom()->getBlock();
5288 assert(IDom
&& "At least the entry block should dominate PN");
5290 auto *BI
= dyn_cast
<BranchInst
>(IDom
->getTerminator());
5291 Value
*Cond
= nullptr, *LHS
= nullptr, *RHS
= nullptr;
5293 if (BI
&& BI
->isConditional() &&
5294 BrPHIToSelect(DT
, BI
, PN
, Cond
, LHS
, RHS
) &&
5295 IsAvailableOnEntry(L
, DT
, getSCEV(LHS
), PN
->getParent()) &&
5296 IsAvailableOnEntry(L
, DT
, getSCEV(RHS
), PN
->getParent()))
5297 return createNodeForSelectOrPHI(PN
, Cond
, LHS
, RHS
);
5303 const SCEV
*ScalarEvolution::createNodeForPHI(PHINode
*PN
) {
5304 if (const SCEV
*S
= createAddRecFromPHI(PN
))
5307 if (const SCEV
*S
= createNodeFromSelectLikePHI(PN
))
5310 // If the PHI has a single incoming value, follow that value, unless the
5311 // PHI's incoming blocks are in a different loop, in which case doing so
5312 // risks breaking LCSSA form. Instcombine would normally zap these, but
5313 // it doesn't have DominatorTree information, so it may miss cases.
5314 if (Value
*V
= SimplifyInstruction(PN
, {getDataLayout(), &TLI
, &DT
, &AC
}))
5315 if (LI
.replacementPreservesLCSSAForm(PN
, V
))
5318 // If it's not a loop phi, we can't handle it yet.
5319 return getUnknown(PN
);
5322 const SCEV
*ScalarEvolution::createNodeForSelectOrPHI(Instruction
*I
,
5326 // Handle "constant" branch or select. This can occur for instance when a
5327 // loop pass transforms an inner loop and moves on to process the outer loop.
5328 if (auto *CI
= dyn_cast
<ConstantInt
>(Cond
))
5329 return getSCEV(CI
->isOne() ? TrueVal
: FalseVal
);
5331 // Try to match some simple smax or umax patterns.
5332 auto *ICI
= dyn_cast
<ICmpInst
>(Cond
);
5334 return getUnknown(I
);
5336 Value
*LHS
= ICI
->getOperand(0);
5337 Value
*RHS
= ICI
->getOperand(1);
5339 switch (ICI
->getPredicate()) {
5340 case ICmpInst::ICMP_SLT
:
5341 case ICmpInst::ICMP_SLE
:
5342 std::swap(LHS
, RHS
);
5344 case ICmpInst::ICMP_SGT
:
5345 case ICmpInst::ICMP_SGE
:
5346 // a >s b ? a+x : b+x -> smax(a, b)+x
5347 // a >s b ? b+x : a+x -> smin(a, b)+x
5348 if (getTypeSizeInBits(LHS
->getType()) <= getTypeSizeInBits(I
->getType())) {
5349 const SCEV
*LS
= getNoopOrSignExtend(getSCEV(LHS
), I
->getType());
5350 const SCEV
*RS
= getNoopOrSignExtend(getSCEV(RHS
), I
->getType());
5351 const SCEV
*LA
= getSCEV(TrueVal
);
5352 const SCEV
*RA
= getSCEV(FalseVal
);
5353 const SCEV
*LDiff
= getMinusSCEV(LA
, LS
);
5354 const SCEV
*RDiff
= getMinusSCEV(RA
, RS
);
5356 return getAddExpr(getSMaxExpr(LS
, RS
), LDiff
);
5357 LDiff
= getMinusSCEV(LA
, RS
);
5358 RDiff
= getMinusSCEV(RA
, LS
);
5360 return getAddExpr(getSMinExpr(LS
, RS
), LDiff
);
5363 case ICmpInst::ICMP_ULT
:
5364 case ICmpInst::ICMP_ULE
:
5365 std::swap(LHS
, RHS
);
5367 case ICmpInst::ICMP_UGT
:
5368 case ICmpInst::ICMP_UGE
:
5369 // a >u b ? a+x : b+x -> umax(a, b)+x
5370 // a >u b ? b+x : a+x -> umin(a, b)+x
5371 if (getTypeSizeInBits(LHS
->getType()) <= getTypeSizeInBits(I
->getType())) {
5372 const SCEV
*LS
= getNoopOrZeroExtend(getSCEV(LHS
), I
->getType());
5373 const SCEV
*RS
= getNoopOrZeroExtend(getSCEV(RHS
), I
->getType());
5374 const SCEV
*LA
= getSCEV(TrueVal
);
5375 const SCEV
*RA
= getSCEV(FalseVal
);
5376 const SCEV
*LDiff
= getMinusSCEV(LA
, LS
);
5377 const SCEV
*RDiff
= getMinusSCEV(RA
, RS
);
5379 return getAddExpr(getUMaxExpr(LS
, RS
), LDiff
);
5380 LDiff
= getMinusSCEV(LA
, RS
);
5381 RDiff
= getMinusSCEV(RA
, LS
);
5383 return getAddExpr(getUMinExpr(LS
, RS
), LDiff
);
5386 case ICmpInst::ICMP_NE
:
5387 // n != 0 ? n+x : 1+x -> umax(n, 1)+x
5388 if (getTypeSizeInBits(LHS
->getType()) <= getTypeSizeInBits(I
->getType()) &&
5389 isa
<ConstantInt
>(RHS
) && cast
<ConstantInt
>(RHS
)->isZero()) {
5390 const SCEV
*One
= getOne(I
->getType());
5391 const SCEV
*LS
= getNoopOrZeroExtend(getSCEV(LHS
), I
->getType());
5392 const SCEV
*LA
= getSCEV(TrueVal
);
5393 const SCEV
*RA
= getSCEV(FalseVal
);
5394 const SCEV
*LDiff
= getMinusSCEV(LA
, LS
);
5395 const SCEV
*RDiff
= getMinusSCEV(RA
, One
);
5397 return getAddExpr(getUMaxExpr(One
, LS
), LDiff
);
5400 case ICmpInst::ICMP_EQ
:
5401 // n == 0 ? 1+x : n+x -> umax(n, 1)+x
5402 if (getTypeSizeInBits(LHS
->getType()) <= getTypeSizeInBits(I
->getType()) &&
5403 isa
<ConstantInt
>(RHS
) && cast
<ConstantInt
>(RHS
)->isZero()) {
5404 const SCEV
*One
= getOne(I
->getType());
5405 const SCEV
*LS
= getNoopOrZeroExtend(getSCEV(LHS
), I
->getType());
5406 const SCEV
*LA
= getSCEV(TrueVal
);
5407 const SCEV
*RA
= getSCEV(FalseVal
);
5408 const SCEV
*LDiff
= getMinusSCEV(LA
, One
);
5409 const SCEV
*RDiff
= getMinusSCEV(RA
, LS
);
5411 return getAddExpr(getUMaxExpr(One
, LS
), LDiff
);
5418 return getUnknown(I
);
5421 /// Expand GEP instructions into add and multiply operations. This allows them
5422 /// to be analyzed by regular SCEV code.
5423 const SCEV
*ScalarEvolution::createNodeForGEP(GEPOperator
*GEP
) {
5424 // Don't attempt to analyze GEPs over unsized objects.
5425 if (!GEP
->getSourceElementType()->isSized())
5426 return getUnknown(GEP
);
5428 SmallVector
<const SCEV
*, 4> IndexExprs
;
5429 for (auto Index
= GEP
->idx_begin(); Index
!= GEP
->idx_end(); ++Index
)
5430 IndexExprs
.push_back(getSCEV(*Index
));
5431 return getGEPExpr(GEP
, IndexExprs
);
5434 uint32_t ScalarEvolution::GetMinTrailingZerosImpl(const SCEV
*S
) {
5435 if (const SCEVConstant
*C
= dyn_cast
<SCEVConstant
>(S
))
5436 return C
->getAPInt().countTrailingZeros();
5438 if (const SCEVTruncateExpr
*T
= dyn_cast
<SCEVTruncateExpr
>(S
))
5439 return std::min(GetMinTrailingZeros(T
->getOperand()),
5440 (uint32_t)getTypeSizeInBits(T
->getType()));
5442 if (const SCEVZeroExtendExpr
*E
= dyn_cast
<SCEVZeroExtendExpr
>(S
)) {
5443 uint32_t OpRes
= GetMinTrailingZeros(E
->getOperand());
5444 return OpRes
== getTypeSizeInBits(E
->getOperand()->getType())
5445 ? getTypeSizeInBits(E
->getType())
5449 if (const SCEVSignExtendExpr
*E
= dyn_cast
<SCEVSignExtendExpr
>(S
)) {
5450 uint32_t OpRes
= GetMinTrailingZeros(E
->getOperand());
5451 return OpRes
== getTypeSizeInBits(E
->getOperand()->getType())
5452 ? getTypeSizeInBits(E
->getType())
5456 if (const SCEVAddExpr
*A
= dyn_cast
<SCEVAddExpr
>(S
)) {
5457 // The result is the min of all operands results.
5458 uint32_t MinOpRes
= GetMinTrailingZeros(A
->getOperand(0));
5459 for (unsigned i
= 1, e
= A
->getNumOperands(); MinOpRes
&& i
!= e
; ++i
)
5460 MinOpRes
= std::min(MinOpRes
, GetMinTrailingZeros(A
->getOperand(i
)));
5464 if (const SCEVMulExpr
*M
= dyn_cast
<SCEVMulExpr
>(S
)) {
5465 // The result is the sum of all operands results.
5466 uint32_t SumOpRes
= GetMinTrailingZeros(M
->getOperand(0));
5467 uint32_t BitWidth
= getTypeSizeInBits(M
->getType());
5468 for (unsigned i
= 1, e
= M
->getNumOperands();
5469 SumOpRes
!= BitWidth
&& i
!= e
; ++i
)
5471 std::min(SumOpRes
+ GetMinTrailingZeros(M
->getOperand(i
)), BitWidth
);
5475 if (const SCEVAddRecExpr
*A
= dyn_cast
<SCEVAddRecExpr
>(S
)) {
5476 // The result is the min of all operands results.
5477 uint32_t MinOpRes
= GetMinTrailingZeros(A
->getOperand(0));
5478 for (unsigned i
= 1, e
= A
->getNumOperands(); MinOpRes
&& i
!= e
; ++i
)
5479 MinOpRes
= std::min(MinOpRes
, GetMinTrailingZeros(A
->getOperand(i
)));
5483 if (const SCEVSMaxExpr
*M
= dyn_cast
<SCEVSMaxExpr
>(S
)) {
5484 // The result is the min of all operands results.
5485 uint32_t MinOpRes
= GetMinTrailingZeros(M
->getOperand(0));
5486 for (unsigned i
= 1, e
= M
->getNumOperands(); MinOpRes
&& i
!= e
; ++i
)
5487 MinOpRes
= std::min(MinOpRes
, GetMinTrailingZeros(M
->getOperand(i
)));
5491 if (const SCEVUMaxExpr
*M
= dyn_cast
<SCEVUMaxExpr
>(S
)) {
5492 // The result is the min of all operands results.
5493 uint32_t MinOpRes
= GetMinTrailingZeros(M
->getOperand(0));
5494 for (unsigned i
= 1, e
= M
->getNumOperands(); MinOpRes
&& i
!= e
; ++i
)
5495 MinOpRes
= std::min(MinOpRes
, GetMinTrailingZeros(M
->getOperand(i
)));
5499 if (const SCEVUnknown
*U
= dyn_cast
<SCEVUnknown
>(S
)) {
5500 // For a SCEVUnknown, ask ValueTracking.
5501 KnownBits Known
= computeKnownBits(U
->getValue(), getDataLayout(), 0, &AC
, nullptr, &DT
);
5502 return Known
.countMinTrailingZeros();
5509 uint32_t ScalarEvolution::GetMinTrailingZeros(const SCEV
*S
) {
5510 auto I
= MinTrailingZerosCache
.find(S
);
5511 if (I
!= MinTrailingZerosCache
.end())
5514 uint32_t Result
= GetMinTrailingZerosImpl(S
);
5515 auto InsertPair
= MinTrailingZerosCache
.insert({S
, Result
});
5516 assert(InsertPair
.second
&& "Should insert a new key");
5517 return InsertPair
.first
->second
;
5520 /// Helper method to assign a range to V from metadata present in the IR.
5521 static Optional
<ConstantRange
> GetRangeFromMetadata(Value
*V
) {
5522 if (Instruction
*I
= dyn_cast
<Instruction
>(V
))
5523 if (MDNode
*MD
= I
->getMetadata(LLVMContext::MD_range
))
5524 return getConstantRangeFromMetadata(*MD
);
5529 /// Determine the range for a particular SCEV. If SignHint is
5530 /// HINT_RANGE_UNSIGNED (resp. HINT_RANGE_SIGNED) then getRange prefers ranges
5531 /// with a "cleaner" unsigned (resp. signed) representation.
5532 const ConstantRange
&
5533 ScalarEvolution::getRangeRef(const SCEV
*S
,
5534 ScalarEvolution::RangeSignHint SignHint
) {
5535 DenseMap
<const SCEV
*, ConstantRange
> &Cache
=
5536 SignHint
== ScalarEvolution::HINT_RANGE_UNSIGNED
? UnsignedRanges
5538 ConstantRange::PreferredRangeType RangeType
=
5539 SignHint
== ScalarEvolution::HINT_RANGE_UNSIGNED
5540 ? ConstantRange::Unsigned
: ConstantRange::Signed
;
5542 // See if we've computed this range already.
5543 DenseMap
<const SCEV
*, ConstantRange
>::iterator I
= Cache
.find(S
);
5544 if (I
!= Cache
.end())
5547 if (const SCEVConstant
*C
= dyn_cast
<SCEVConstant
>(S
))
5548 return setRange(C
, SignHint
, ConstantRange(C
->getAPInt()));
5550 unsigned BitWidth
= getTypeSizeInBits(S
->getType());
5551 ConstantRange
ConservativeResult(BitWidth
, /*isFullSet=*/true);
5553 // If the value has known zeros, the maximum value will have those known zeros
5555 uint32_t TZ
= GetMinTrailingZeros(S
);
5557 if (SignHint
== ScalarEvolution::HINT_RANGE_UNSIGNED
)
5558 ConservativeResult
=
5559 ConstantRange(APInt::getMinValue(BitWidth
),
5560 APInt::getMaxValue(BitWidth
).lshr(TZ
).shl(TZ
) + 1);
5562 ConservativeResult
= ConstantRange(
5563 APInt::getSignedMinValue(BitWidth
),
5564 APInt::getSignedMaxValue(BitWidth
).ashr(TZ
).shl(TZ
) + 1);
5567 if (const SCEVAddExpr
*Add
= dyn_cast
<SCEVAddExpr
>(S
)) {
5568 ConstantRange X
= getRangeRef(Add
->getOperand(0), SignHint
);
5569 for (unsigned i
= 1, e
= Add
->getNumOperands(); i
!= e
; ++i
)
5570 X
= X
.add(getRangeRef(Add
->getOperand(i
), SignHint
));
5571 return setRange(Add
, SignHint
,
5572 ConservativeResult
.intersectWith(X
, RangeType
));
5575 if (const SCEVMulExpr
*Mul
= dyn_cast
<SCEVMulExpr
>(S
)) {
5576 ConstantRange X
= getRangeRef(Mul
->getOperand(0), SignHint
);
5577 for (unsigned i
= 1, e
= Mul
->getNumOperands(); i
!= e
; ++i
)
5578 X
= X
.multiply(getRangeRef(Mul
->getOperand(i
), SignHint
));
5579 return setRange(Mul
, SignHint
,
5580 ConservativeResult
.intersectWith(X
, RangeType
));
5583 if (const SCEVSMaxExpr
*SMax
= dyn_cast
<SCEVSMaxExpr
>(S
)) {
5584 ConstantRange X
= getRangeRef(SMax
->getOperand(0), SignHint
);
5585 for (unsigned i
= 1, e
= SMax
->getNumOperands(); i
!= e
; ++i
)
5586 X
= X
.smax(getRangeRef(SMax
->getOperand(i
), SignHint
));
5587 return setRange(SMax
, SignHint
,
5588 ConservativeResult
.intersectWith(X
, RangeType
));
5591 if (const SCEVUMaxExpr
*UMax
= dyn_cast
<SCEVUMaxExpr
>(S
)) {
5592 ConstantRange X
= getRangeRef(UMax
->getOperand(0), SignHint
);
5593 for (unsigned i
= 1, e
= UMax
->getNumOperands(); i
!= e
; ++i
)
5594 X
= X
.umax(getRangeRef(UMax
->getOperand(i
), SignHint
));
5595 return setRange(UMax
, SignHint
,
5596 ConservativeResult
.intersectWith(X
, RangeType
));
5599 if (const SCEVUDivExpr
*UDiv
= dyn_cast
<SCEVUDivExpr
>(S
)) {
5600 ConstantRange X
= getRangeRef(UDiv
->getLHS(), SignHint
);
5601 ConstantRange Y
= getRangeRef(UDiv
->getRHS(), SignHint
);
5602 return setRange(UDiv
, SignHint
,
5603 ConservativeResult
.intersectWith(X
.udiv(Y
), RangeType
));
5606 if (const SCEVZeroExtendExpr
*ZExt
= dyn_cast
<SCEVZeroExtendExpr
>(S
)) {
5607 ConstantRange X
= getRangeRef(ZExt
->getOperand(), SignHint
);
5608 return setRange(ZExt
, SignHint
,
5609 ConservativeResult
.intersectWith(X
.zeroExtend(BitWidth
),
5613 if (const SCEVSignExtendExpr
*SExt
= dyn_cast
<SCEVSignExtendExpr
>(S
)) {
5614 ConstantRange X
= getRangeRef(SExt
->getOperand(), SignHint
);
5615 return setRange(SExt
, SignHint
,
5616 ConservativeResult
.intersectWith(X
.signExtend(BitWidth
),
5620 if (const SCEVTruncateExpr
*Trunc
= dyn_cast
<SCEVTruncateExpr
>(S
)) {
5621 ConstantRange X
= getRangeRef(Trunc
->getOperand(), SignHint
);
5622 return setRange(Trunc
, SignHint
,
5623 ConservativeResult
.intersectWith(X
.truncate(BitWidth
),
5627 if (const SCEVAddRecExpr
*AddRec
= dyn_cast
<SCEVAddRecExpr
>(S
)) {
5628 // If there's no unsigned wrap, the value will never be less than its
5630 if (AddRec
->hasNoUnsignedWrap())
5631 if (const SCEVConstant
*C
= dyn_cast
<SCEVConstant
>(AddRec
->getStart()))
5632 if (!C
->getValue()->isZero())
5633 ConservativeResult
= ConservativeResult
.intersectWith(
5634 ConstantRange(C
->getAPInt(), APInt(BitWidth
, 0)), RangeType
);
5636 // If there's no signed wrap, and all the operands have the same sign or
5637 // zero, the value won't ever change sign.
5638 if (AddRec
->hasNoSignedWrap()) {
5639 bool AllNonNeg
= true;
5640 bool AllNonPos
= true;
5641 for (unsigned i
= 0, e
= AddRec
->getNumOperands(); i
!= e
; ++i
) {
5642 if (!isKnownNonNegative(AddRec
->getOperand(i
))) AllNonNeg
= false;
5643 if (!isKnownNonPositive(AddRec
->getOperand(i
))) AllNonPos
= false;
5646 ConservativeResult
= ConservativeResult
.intersectWith(
5647 ConstantRange(APInt(BitWidth
, 0),
5648 APInt::getSignedMinValue(BitWidth
)), RangeType
);
5650 ConservativeResult
= ConservativeResult
.intersectWith(
5651 ConstantRange(APInt::getSignedMinValue(BitWidth
),
5652 APInt(BitWidth
, 1)), RangeType
);
5655 // TODO: non-affine addrec
5656 if (AddRec
->isAffine()) {
5657 const SCEV
*MaxBECount
= getMaxBackedgeTakenCount(AddRec
->getLoop());
5658 if (!isa
<SCEVCouldNotCompute
>(MaxBECount
) &&
5659 getTypeSizeInBits(MaxBECount
->getType()) <= BitWidth
) {
5660 auto RangeFromAffine
= getRangeForAffineAR(
5661 AddRec
->getStart(), AddRec
->getStepRecurrence(*this), MaxBECount
,
5663 if (!RangeFromAffine
.isFullSet())
5664 ConservativeResult
=
5665 ConservativeResult
.intersectWith(RangeFromAffine
, RangeType
);
5667 auto RangeFromFactoring
= getRangeViaFactoring(
5668 AddRec
->getStart(), AddRec
->getStepRecurrence(*this), MaxBECount
,
5670 if (!RangeFromFactoring
.isFullSet())
5671 ConservativeResult
=
5672 ConservativeResult
.intersectWith(RangeFromFactoring
, RangeType
);
5676 return setRange(AddRec
, SignHint
, std::move(ConservativeResult
));
5679 if (const SCEVUnknown
*U
= dyn_cast
<SCEVUnknown
>(S
)) {
5680 // Check if the IR explicitly contains !range metadata.
5681 Optional
<ConstantRange
> MDRange
= GetRangeFromMetadata(U
->getValue());
5682 if (MDRange
.hasValue())
5683 ConservativeResult
= ConservativeResult
.intersectWith(MDRange
.getValue(),
5686 // Split here to avoid paying the compile-time cost of calling both
5687 // computeKnownBits and ComputeNumSignBits. This restriction can be lifted
5689 const DataLayout
&DL
= getDataLayout();
5690 if (SignHint
== ScalarEvolution::HINT_RANGE_UNSIGNED
) {
5691 // For a SCEVUnknown, ask ValueTracking.
5692 KnownBits Known
= computeKnownBits(U
->getValue(), DL
, 0, &AC
, nullptr, &DT
);
5693 if (Known
.One
!= ~Known
.Zero
+ 1)
5694 ConservativeResult
=
5695 ConservativeResult
.intersectWith(
5696 ConstantRange(Known
.One
, ~Known
.Zero
+ 1), RangeType
);
5698 assert(SignHint
== ScalarEvolution::HINT_RANGE_SIGNED
&&
5699 "generalize as needed!");
5700 unsigned NS
= ComputeNumSignBits(U
->getValue(), DL
, 0, &AC
, nullptr, &DT
);
5702 ConservativeResult
= ConservativeResult
.intersectWith(
5703 ConstantRange(APInt::getSignedMinValue(BitWidth
).ashr(NS
- 1),
5704 APInt::getSignedMaxValue(BitWidth
).ashr(NS
- 1) + 1),
5708 // A range of Phi is a subset of union of all ranges of its input.
5709 if (const PHINode
*Phi
= dyn_cast
<PHINode
>(U
->getValue())) {
5710 // Make sure that we do not run over cycled Phis.
5711 if (PendingPhiRanges
.insert(Phi
).second
) {
5712 ConstantRange
RangeFromOps(BitWidth
, /*isFullSet=*/false);
5713 for (auto &Op
: Phi
->operands()) {
5714 auto OpRange
= getRangeRef(getSCEV(Op
), SignHint
);
5715 RangeFromOps
= RangeFromOps
.unionWith(OpRange
);
5716 // No point to continue if we already have a full set.
5717 if (RangeFromOps
.isFullSet())
5720 ConservativeResult
=
5721 ConservativeResult
.intersectWith(RangeFromOps
, RangeType
);
5722 bool Erased
= PendingPhiRanges
.erase(Phi
);
5723 assert(Erased
&& "Failed to erase Phi properly?");
5728 return setRange(U
, SignHint
, std::move(ConservativeResult
));
5731 return setRange(S
, SignHint
, std::move(ConservativeResult
));
5734 // Given a StartRange, Step and MaxBECount for an expression compute a range of
5735 // values that the expression can take. Initially, the expression has a value
5736 // from StartRange and then is changed by Step up to MaxBECount times. Signed
5737 // argument defines if we treat Step as signed or unsigned.
5738 static ConstantRange
getRangeForAffineARHelper(APInt Step
,
5739 const ConstantRange
&StartRange
,
5740 const APInt
&MaxBECount
,
5741 unsigned BitWidth
, bool Signed
) {
5742 // If either Step or MaxBECount is 0, then the expression won't change, and we
5743 // just need to return the initial range.
5744 if (Step
== 0 || MaxBECount
== 0)
5747 // If we don't know anything about the initial value (i.e. StartRange is
5748 // FullRange), then we don't know anything about the final range either.
5749 // Return FullRange.
5750 if (StartRange
.isFullSet())
5751 return ConstantRange::getFull(BitWidth
);
5753 // If Step is signed and negative, then we use its absolute value, but we also
5754 // note that we're moving in the opposite direction.
5755 bool Descending
= Signed
&& Step
.isNegative();
5758 // This is correct even for INT_SMIN. Let's look at i8 to illustrate this:
5759 // abs(INT_SMIN) = abs(-128) = abs(0x80) = -0x80 = 0x80 = 128.
5760 // This equations hold true due to the well-defined wrap-around behavior of
5764 // Check if Offset is more than full span of BitWidth. If it is, the
5765 // expression is guaranteed to overflow.
5766 if (APInt::getMaxValue(StartRange
.getBitWidth()).udiv(Step
).ult(MaxBECount
))
5767 return ConstantRange::getFull(BitWidth
);
5769 // Offset is by how much the expression can change. Checks above guarantee no
5771 APInt Offset
= Step
* MaxBECount
;
5773 // Minimum value of the final range will match the minimal value of StartRange
5774 // if the expression is increasing and will be decreased by Offset otherwise.
5775 // Maximum value of the final range will match the maximal value of StartRange
5776 // if the expression is decreasing and will be increased by Offset otherwise.
5777 APInt StartLower
= StartRange
.getLower();
5778 APInt StartUpper
= StartRange
.getUpper() - 1;
5779 APInt MovedBoundary
= Descending
? (StartLower
- std::move(Offset
))
5780 : (StartUpper
+ std::move(Offset
));
5782 // It's possible that the new minimum/maximum value will fall into the initial
5783 // range (due to wrap around). This means that the expression can take any
5784 // value in this bitwidth, and we have to return full range.
5785 if (StartRange
.contains(MovedBoundary
))
5786 return ConstantRange::getFull(BitWidth
);
5789 Descending
? std::move(MovedBoundary
) : std::move(StartLower
);
5791 Descending
? std::move(StartUpper
) : std::move(MovedBoundary
);
5794 // No overflow detected, return [StartLower, StartUpper + Offset + 1) range.
5795 return ConstantRange::getNonEmpty(std::move(NewLower
), std::move(NewUpper
));
5798 ConstantRange
ScalarEvolution::getRangeForAffineAR(const SCEV
*Start
,
5800 const SCEV
*MaxBECount
,
5801 unsigned BitWidth
) {
5802 assert(!isa
<SCEVCouldNotCompute
>(MaxBECount
) &&
5803 getTypeSizeInBits(MaxBECount
->getType()) <= BitWidth
&&
5806 MaxBECount
= getNoopOrZeroExtend(MaxBECount
, Start
->getType());
5807 APInt MaxBECountValue
= getUnsignedRangeMax(MaxBECount
);
5809 // First, consider step signed.
5810 ConstantRange StartSRange
= getSignedRange(Start
);
5811 ConstantRange StepSRange
= getSignedRange(Step
);
5813 // If Step can be both positive and negative, we need to find ranges for the
5814 // maximum absolute step values in both directions and union them.
5816 getRangeForAffineARHelper(StepSRange
.getSignedMin(), StartSRange
,
5817 MaxBECountValue
, BitWidth
, /* Signed = */ true);
5818 SR
= SR
.unionWith(getRangeForAffineARHelper(StepSRange
.getSignedMax(),
5819 StartSRange
, MaxBECountValue
,
5820 BitWidth
, /* Signed = */ true));
5822 // Next, consider step unsigned.
5823 ConstantRange UR
= getRangeForAffineARHelper(
5824 getUnsignedRangeMax(Step
), getUnsignedRange(Start
),
5825 MaxBECountValue
, BitWidth
, /* Signed = */ false);
5827 // Finally, intersect signed and unsigned ranges.
5828 return SR
.intersectWith(UR
, ConstantRange::Smallest
);
5831 ConstantRange
ScalarEvolution::getRangeViaFactoring(const SCEV
*Start
,
5833 const SCEV
*MaxBECount
,
5834 unsigned BitWidth
) {
5835 // RangeOf({C?A:B,+,C?P:Q}) == RangeOf(C?{A,+,P}:{B,+,Q})
5836 // == RangeOf({A,+,P}) union RangeOf({B,+,Q})
5838 struct SelectPattern
{
5839 Value
*Condition
= nullptr;
5843 explicit SelectPattern(ScalarEvolution
&SE
, unsigned BitWidth
,
5845 Optional
<unsigned> CastOp
;
5846 APInt
Offset(BitWidth
, 0);
5848 assert(SE
.getTypeSizeInBits(S
->getType()) == BitWidth
&&
5851 // Peel off a constant offset:
5852 if (auto *SA
= dyn_cast
<SCEVAddExpr
>(S
)) {
5853 // In the future we could consider being smarter here and handle
5854 // {Start+Step,+,Step} too.
5855 if (SA
->getNumOperands() != 2 || !isa
<SCEVConstant
>(SA
->getOperand(0)))
5858 Offset
= cast
<SCEVConstant
>(SA
->getOperand(0))->getAPInt();
5859 S
= SA
->getOperand(1);
5862 // Peel off a cast operation
5863 if (auto *SCast
= dyn_cast
<SCEVCastExpr
>(S
)) {
5864 CastOp
= SCast
->getSCEVType();
5865 S
= SCast
->getOperand();
5868 using namespace llvm::PatternMatch
;
5870 auto *SU
= dyn_cast
<SCEVUnknown
>(S
);
5871 const APInt
*TrueVal
, *FalseVal
;
5873 !match(SU
->getValue(), m_Select(m_Value(Condition
), m_APInt(TrueVal
),
5874 m_APInt(FalseVal
)))) {
5875 Condition
= nullptr;
5879 TrueValue
= *TrueVal
;
5880 FalseValue
= *FalseVal
;
5882 // Re-apply the cast we peeled off earlier
5883 if (CastOp
.hasValue())
5886 llvm_unreachable("Unknown SCEV cast type!");
5889 TrueValue
= TrueValue
.trunc(BitWidth
);
5890 FalseValue
= FalseValue
.trunc(BitWidth
);
5893 TrueValue
= TrueValue
.zext(BitWidth
);
5894 FalseValue
= FalseValue
.zext(BitWidth
);
5897 TrueValue
= TrueValue
.sext(BitWidth
);
5898 FalseValue
= FalseValue
.sext(BitWidth
);
5902 // Re-apply the constant offset we peeled off earlier
5903 TrueValue
+= Offset
;
5904 FalseValue
+= Offset
;
5907 bool isRecognized() { return Condition
!= nullptr; }
5910 SelectPattern
StartPattern(*this, BitWidth
, Start
);
5911 if (!StartPattern
.isRecognized())
5912 return ConstantRange::getFull(BitWidth
);
5914 SelectPattern
StepPattern(*this, BitWidth
, Step
);
5915 if (!StepPattern
.isRecognized())
5916 return ConstantRange::getFull(BitWidth
);
5918 if (StartPattern
.Condition
!= StepPattern
.Condition
) {
5919 // We don't handle this case today; but we could, by considering four
5920 // possibilities below instead of two. I'm not sure if there are cases where
5921 // that will help over what getRange already does, though.
5922 return ConstantRange::getFull(BitWidth
);
5925 // NB! Calling ScalarEvolution::getConstant is fine, but we should not try to
5926 // construct arbitrary general SCEV expressions here. This function is called
5927 // from deep in the call stack, and calling getSCEV (on a sext instruction,
5928 // say) can end up caching a suboptimal value.
5930 // FIXME: without the explicit `this` receiver below, MSVC errors out with
5931 // C2352 and C2512 (otherwise it isn't needed).
5933 const SCEV
*TrueStart
= this->getConstant(StartPattern
.TrueValue
);
5934 const SCEV
*TrueStep
= this->getConstant(StepPattern
.TrueValue
);
5935 const SCEV
*FalseStart
= this->getConstant(StartPattern
.FalseValue
);
5936 const SCEV
*FalseStep
= this->getConstant(StepPattern
.FalseValue
);
5938 ConstantRange TrueRange
=
5939 this->getRangeForAffineAR(TrueStart
, TrueStep
, MaxBECount
, BitWidth
);
5940 ConstantRange FalseRange
=
5941 this->getRangeForAffineAR(FalseStart
, FalseStep
, MaxBECount
, BitWidth
);
5943 return TrueRange
.unionWith(FalseRange
);
5946 SCEV::NoWrapFlags
ScalarEvolution::getNoWrapFlagsFromUB(const Value
*V
) {
5947 if (isa
<ConstantExpr
>(V
)) return SCEV::FlagAnyWrap
;
5948 const BinaryOperator
*BinOp
= cast
<BinaryOperator
>(V
);
5950 // Return early if there are no flags to propagate to the SCEV.
5951 SCEV::NoWrapFlags Flags
= SCEV::FlagAnyWrap
;
5952 if (BinOp
->hasNoUnsignedWrap())
5953 Flags
= ScalarEvolution::setFlags(Flags
, SCEV::FlagNUW
);
5954 if (BinOp
->hasNoSignedWrap())
5955 Flags
= ScalarEvolution::setFlags(Flags
, SCEV::FlagNSW
);
5956 if (Flags
== SCEV::FlagAnyWrap
)
5957 return SCEV::FlagAnyWrap
;
5959 return isSCEVExprNeverPoison(BinOp
) ? Flags
: SCEV::FlagAnyWrap
;
5962 bool ScalarEvolution::isSCEVExprNeverPoison(const Instruction
*I
) {
5963 // Here we check that I is in the header of the innermost loop containing I,
5964 // since we only deal with instructions in the loop header. The actual loop we
5965 // need to check later will come from an add recurrence, but getting that
5966 // requires computing the SCEV of the operands, which can be expensive. This
5967 // check we can do cheaply to rule out some cases early.
5968 Loop
*InnermostContainingLoop
= LI
.getLoopFor(I
->getParent());
5969 if (InnermostContainingLoop
== nullptr ||
5970 InnermostContainingLoop
->getHeader() != I
->getParent())
5973 // Only proceed if we can prove that I does not yield poison.
5974 if (!programUndefinedIfFullPoison(I
))
5977 // At this point we know that if I is executed, then it does not wrap
5978 // according to at least one of NSW or NUW. If I is not executed, then we do
5979 // not know if the calculation that I represents would wrap. Multiple
5980 // instructions can map to the same SCEV. If we apply NSW or NUW from I to
5981 // the SCEV, we must guarantee no wrapping for that SCEV also when it is
5982 // derived from other instructions that map to the same SCEV. We cannot make
5983 // that guarantee for cases where I is not executed. So we need to find the
5984 // loop that I is considered in relation to and prove that I is executed for
5985 // every iteration of that loop. That implies that the value that I
5986 // calculates does not wrap anywhere in the loop, so then we can apply the
5987 // flags to the SCEV.
5989 // We check isLoopInvariant to disambiguate in case we are adding recurrences
5990 // from different loops, so that we know which loop to prove that I is
5992 for (unsigned OpIndex
= 0; OpIndex
< I
->getNumOperands(); ++OpIndex
) {
5993 // I could be an extractvalue from a call to an overflow intrinsic.
5994 // TODO: We can do better here in some cases.
5995 if (!isSCEVable(I
->getOperand(OpIndex
)->getType()))
5997 const SCEV
*Op
= getSCEV(I
->getOperand(OpIndex
));
5998 if (auto *AddRec
= dyn_cast
<SCEVAddRecExpr
>(Op
)) {
5999 bool AllOtherOpsLoopInvariant
= true;
6000 for (unsigned OtherOpIndex
= 0; OtherOpIndex
< I
->getNumOperands();
6002 if (OtherOpIndex
!= OpIndex
) {
6003 const SCEV
*OtherOp
= getSCEV(I
->getOperand(OtherOpIndex
));
6004 if (!isLoopInvariant(OtherOp
, AddRec
->getLoop())) {
6005 AllOtherOpsLoopInvariant
= false;
6010 if (AllOtherOpsLoopInvariant
&&
6011 isGuaranteedToExecuteForEveryIteration(I
, AddRec
->getLoop()))
6018 bool ScalarEvolution::isAddRecNeverPoison(const Instruction
*I
, const Loop
*L
) {
6019 // If we know that \c I can never be poison period, then that's enough.
6020 if (isSCEVExprNeverPoison(I
))
6023 // For an add recurrence specifically, we assume that infinite loops without
6024 // side effects are undefined behavior, and then reason as follows:
6026 // If the add recurrence is poison in any iteration, it is poison on all
6027 // future iterations (since incrementing poison yields poison). If the result
6028 // of the add recurrence is fed into the loop latch condition and the loop
6029 // does not contain any throws or exiting blocks other than the latch, we now
6030 // have the ability to "choose" whether the backedge is taken or not (by
6031 // choosing a sufficiently evil value for the poison feeding into the branch)
6032 // for every iteration including and after the one in which \p I first became
6033 // poison. There are two possibilities (let's call the iteration in which \p
6034 // I first became poison as K):
6036 // 1. In the set of iterations including and after K, the loop body executes
6037 // no side effects. In this case executing the backege an infinte number
6038 // of times will yield undefined behavior.
6040 // 2. In the set of iterations including and after K, the loop body executes
6041 // at least one side effect. In this case, that specific instance of side
6042 // effect is control dependent on poison, which also yields undefined
6045 auto *ExitingBB
= L
->getExitingBlock();
6046 auto *LatchBB
= L
->getLoopLatch();
6047 if (!ExitingBB
|| !LatchBB
|| ExitingBB
!= LatchBB
)
6050 SmallPtrSet
<const Instruction
*, 16> Pushed
;
6051 SmallVector
<const Instruction
*, 8> PoisonStack
;
6053 // We start by assuming \c I, the post-inc add recurrence, is poison. Only
6054 // things that are known to be fully poison under that assumption go on the
6057 PoisonStack
.push_back(I
);
6059 bool LatchControlDependentOnPoison
= false;
6060 while (!PoisonStack
.empty() && !LatchControlDependentOnPoison
) {
6061 const Instruction
*Poison
= PoisonStack
.pop_back_val();
6063 for (auto *PoisonUser
: Poison
->users()) {
6064 if (propagatesFullPoison(cast
<Instruction
>(PoisonUser
))) {
6065 if (Pushed
.insert(cast
<Instruction
>(PoisonUser
)).second
)
6066 PoisonStack
.push_back(cast
<Instruction
>(PoisonUser
));
6067 } else if (auto *BI
= dyn_cast
<BranchInst
>(PoisonUser
)) {
6068 assert(BI
->isConditional() && "Only possibility!");
6069 if (BI
->getParent() == LatchBB
) {
6070 LatchControlDependentOnPoison
= true;
6077 return LatchControlDependentOnPoison
&& loopHasNoAbnormalExits(L
);
6080 ScalarEvolution::LoopProperties
6081 ScalarEvolution::getLoopProperties(const Loop
*L
) {
6082 using LoopProperties
= ScalarEvolution::LoopProperties
;
6084 auto Itr
= LoopPropertiesCache
.find(L
);
6085 if (Itr
== LoopPropertiesCache
.end()) {
6086 auto HasSideEffects
= [](Instruction
*I
) {
6087 if (auto *SI
= dyn_cast
<StoreInst
>(I
))
6088 return !SI
->isSimple();
6090 return I
->mayHaveSideEffects();
6093 LoopProperties LP
= {/* HasNoAbnormalExits */ true,
6094 /*HasNoSideEffects*/ true};
6096 for (auto *BB
: L
->getBlocks())
6097 for (auto &I
: *BB
) {
6098 if (!isGuaranteedToTransferExecutionToSuccessor(&I
))
6099 LP
.HasNoAbnormalExits
= false;
6100 if (HasSideEffects(&I
))
6101 LP
.HasNoSideEffects
= false;
6102 if (!LP
.HasNoAbnormalExits
&& !LP
.HasNoSideEffects
)
6103 break; // We're already as pessimistic as we can get.
6106 auto InsertPair
= LoopPropertiesCache
.insert({L
, LP
});
6107 assert(InsertPair
.second
&& "We just checked!");
6108 Itr
= InsertPair
.first
;
6114 const SCEV
*ScalarEvolution::createSCEV(Value
*V
) {
6115 if (!isSCEVable(V
->getType()))
6116 return getUnknown(V
);
6118 if (Instruction
*I
= dyn_cast
<Instruction
>(V
)) {
6119 // Don't attempt to analyze instructions in blocks that aren't
6120 // reachable. Such instructions don't matter, and they aren't required
6121 // to obey basic rules for definitions dominating uses which this
6122 // analysis depends on.
6123 if (!DT
.isReachableFromEntry(I
->getParent()))
6124 return getUnknown(UndefValue::get(V
->getType()));
6125 } else if (ConstantInt
*CI
= dyn_cast
<ConstantInt
>(V
))
6126 return getConstant(CI
);
6127 else if (isa
<ConstantPointerNull
>(V
))
6128 return getZero(V
->getType());
6129 else if (GlobalAlias
*GA
= dyn_cast
<GlobalAlias
>(V
))
6130 return GA
->isInterposable() ? getUnknown(V
) : getSCEV(GA
->getAliasee());
6131 else if (!isa
<ConstantExpr
>(V
))
6132 return getUnknown(V
);
6134 Operator
*U
= cast
<Operator
>(V
);
6135 if (auto BO
= MatchBinaryOp(U
, DT
)) {
6136 switch (BO
->Opcode
) {
6137 case Instruction::Add
: {
6138 // The simple thing to do would be to just call getSCEV on both operands
6139 // and call getAddExpr with the result. However if we're looking at a
6140 // bunch of things all added together, this can be quite inefficient,
6141 // because it leads to N-1 getAddExpr calls for N ultimate operands.
6142 // Instead, gather up all the operands and make a single getAddExpr call.
6143 // LLVM IR canonical form means we need only traverse the left operands.
6144 SmallVector
<const SCEV
*, 4> AddOps
;
6147 if (auto *OpSCEV
= getExistingSCEV(BO
->Op
)) {
6148 AddOps
.push_back(OpSCEV
);
6152 // If a NUW or NSW flag can be applied to the SCEV for this
6153 // addition, then compute the SCEV for this addition by itself
6154 // with a separate call to getAddExpr. We need to do that
6155 // instead of pushing the operands of the addition onto AddOps,
6156 // since the flags are only known to apply to this particular
6157 // addition - they may not apply to other additions that can be
6158 // formed with operands from AddOps.
6159 const SCEV
*RHS
= getSCEV(BO
->RHS
);
6160 SCEV::NoWrapFlags Flags
= getNoWrapFlagsFromUB(BO
->Op
);
6161 if (Flags
!= SCEV::FlagAnyWrap
) {
6162 const SCEV
*LHS
= getSCEV(BO
->LHS
);
6163 if (BO
->Opcode
== Instruction::Sub
)
6164 AddOps
.push_back(getMinusSCEV(LHS
, RHS
, Flags
));
6166 AddOps
.push_back(getAddExpr(LHS
, RHS
, Flags
));
6171 if (BO
->Opcode
== Instruction::Sub
)
6172 AddOps
.push_back(getNegativeSCEV(getSCEV(BO
->RHS
)));
6174 AddOps
.push_back(getSCEV(BO
->RHS
));
6176 auto NewBO
= MatchBinaryOp(BO
->LHS
, DT
);
6177 if (!NewBO
|| (NewBO
->Opcode
!= Instruction::Add
&&
6178 NewBO
->Opcode
!= Instruction::Sub
)) {
6179 AddOps
.push_back(getSCEV(BO
->LHS
));
6185 return getAddExpr(AddOps
);
6188 case Instruction::Mul
: {
6189 SmallVector
<const SCEV
*, 4> MulOps
;
6192 if (auto *OpSCEV
= getExistingSCEV(BO
->Op
)) {
6193 MulOps
.push_back(OpSCEV
);
6197 SCEV::NoWrapFlags Flags
= getNoWrapFlagsFromUB(BO
->Op
);
6198 if (Flags
!= SCEV::FlagAnyWrap
) {
6200 getMulExpr(getSCEV(BO
->LHS
), getSCEV(BO
->RHS
), Flags
));
6205 MulOps
.push_back(getSCEV(BO
->RHS
));
6206 auto NewBO
= MatchBinaryOp(BO
->LHS
, DT
);
6207 if (!NewBO
|| NewBO
->Opcode
!= Instruction::Mul
) {
6208 MulOps
.push_back(getSCEV(BO
->LHS
));
6214 return getMulExpr(MulOps
);
6216 case Instruction::UDiv
:
6217 return getUDivExpr(getSCEV(BO
->LHS
), getSCEV(BO
->RHS
));
6218 case Instruction::URem
:
6219 return getURemExpr(getSCEV(BO
->LHS
), getSCEV(BO
->RHS
));
6220 case Instruction::Sub
: {
6221 SCEV::NoWrapFlags Flags
= SCEV::FlagAnyWrap
;
6223 Flags
= getNoWrapFlagsFromUB(BO
->Op
);
6224 return getMinusSCEV(getSCEV(BO
->LHS
), getSCEV(BO
->RHS
), Flags
);
6226 case Instruction::And
:
6227 // For an expression like x&255 that merely masks off the high bits,
6228 // use zext(trunc(x)) as the SCEV expression.
6229 if (ConstantInt
*CI
= dyn_cast
<ConstantInt
>(BO
->RHS
)) {
6231 return getSCEV(BO
->RHS
);
6232 if (CI
->isMinusOne())
6233 return getSCEV(BO
->LHS
);
6234 const APInt
&A
= CI
->getValue();
6236 // Instcombine's ShrinkDemandedConstant may strip bits out of
6237 // constants, obscuring what would otherwise be a low-bits mask.
6238 // Use computeKnownBits to compute what ShrinkDemandedConstant
6239 // knew about to reconstruct a low-bits mask value.
6240 unsigned LZ
= A
.countLeadingZeros();
6241 unsigned TZ
= A
.countTrailingZeros();
6242 unsigned BitWidth
= A
.getBitWidth();
6243 KnownBits
Known(BitWidth
);
6244 computeKnownBits(BO
->LHS
, Known
, getDataLayout(),
6245 0, &AC
, nullptr, &DT
);
6247 APInt EffectiveMask
=
6248 APInt::getLowBitsSet(BitWidth
, BitWidth
- LZ
- TZ
).shl(TZ
);
6249 if ((LZ
!= 0 || TZ
!= 0) && !((~A
& ~Known
.Zero
) & EffectiveMask
)) {
6250 const SCEV
*MulCount
= getConstant(APInt::getOneBitSet(BitWidth
, TZ
));
6251 const SCEV
*LHS
= getSCEV(BO
->LHS
);
6252 const SCEV
*ShiftedLHS
= nullptr;
6253 if (auto *LHSMul
= dyn_cast
<SCEVMulExpr
>(LHS
)) {
6254 if (auto *OpC
= dyn_cast
<SCEVConstant
>(LHSMul
->getOperand(0))) {
6255 // For an expression like (x * 8) & 8, simplify the multiply.
6256 unsigned MulZeros
= OpC
->getAPInt().countTrailingZeros();
6257 unsigned GCD
= std::min(MulZeros
, TZ
);
6258 APInt DivAmt
= APInt::getOneBitSet(BitWidth
, TZ
- GCD
);
6259 SmallVector
<const SCEV
*, 4> MulOps
;
6260 MulOps
.push_back(getConstant(OpC
->getAPInt().lshr(GCD
)));
6261 MulOps
.append(LHSMul
->op_begin() + 1, LHSMul
->op_end());
6262 auto *NewMul
= getMulExpr(MulOps
, LHSMul
->getNoWrapFlags());
6263 ShiftedLHS
= getUDivExpr(NewMul
, getConstant(DivAmt
));
6267 ShiftedLHS
= getUDivExpr(LHS
, MulCount
);
6270 getTruncateExpr(ShiftedLHS
,
6271 IntegerType::get(getContext(), BitWidth
- LZ
- TZ
)),
6272 BO
->LHS
->getType()),
6278 case Instruction::Or
:
6279 // If the RHS of the Or is a constant, we may have something like:
6280 // X*4+1 which got turned into X*4|1. Handle this as an Add so loop
6281 // optimizations will transparently handle this case.
6283 // In order for this transformation to be safe, the LHS must be of the
6284 // form X*(2^n) and the Or constant must be less than 2^n.
6285 if (ConstantInt
*CI
= dyn_cast
<ConstantInt
>(BO
->RHS
)) {
6286 const SCEV
*LHS
= getSCEV(BO
->LHS
);
6287 const APInt
&CIVal
= CI
->getValue();
6288 if (GetMinTrailingZeros(LHS
) >=
6289 (CIVal
.getBitWidth() - CIVal
.countLeadingZeros())) {
6290 // Build a plain add SCEV.
6291 const SCEV
*S
= getAddExpr(LHS
, getSCEV(CI
));
6292 // If the LHS of the add was an addrec and it has no-wrap flags,
6293 // transfer the no-wrap flags, since an or won't introduce a wrap.
6294 if (const SCEVAddRecExpr
*NewAR
= dyn_cast
<SCEVAddRecExpr
>(S
)) {
6295 const SCEVAddRecExpr
*OldAR
= cast
<SCEVAddRecExpr
>(LHS
);
6296 const_cast<SCEVAddRecExpr
*>(NewAR
)->setNoWrapFlags(
6297 OldAR
->getNoWrapFlags());
6304 case Instruction::Xor
:
6305 if (ConstantInt
*CI
= dyn_cast
<ConstantInt
>(BO
->RHS
)) {
6306 // If the RHS of xor is -1, then this is a not operation.
6307 if (CI
->isMinusOne())
6308 return getNotSCEV(getSCEV(BO
->LHS
));
6310 // Model xor(and(x, C), C) as and(~x, C), if C is a low-bits mask.
6311 // This is a variant of the check for xor with -1, and it handles
6312 // the case where instcombine has trimmed non-demanded bits out
6313 // of an xor with -1.
6314 if (auto *LBO
= dyn_cast
<BinaryOperator
>(BO
->LHS
))
6315 if (ConstantInt
*LCI
= dyn_cast
<ConstantInt
>(LBO
->getOperand(1)))
6316 if (LBO
->getOpcode() == Instruction::And
&&
6317 LCI
->getValue() == CI
->getValue())
6318 if (const SCEVZeroExtendExpr
*Z
=
6319 dyn_cast
<SCEVZeroExtendExpr
>(getSCEV(BO
->LHS
))) {
6320 Type
*UTy
= BO
->LHS
->getType();
6321 const SCEV
*Z0
= Z
->getOperand();
6322 Type
*Z0Ty
= Z0
->getType();
6323 unsigned Z0TySize
= getTypeSizeInBits(Z0Ty
);
6325 // If C is a low-bits mask, the zero extend is serving to
6326 // mask off the high bits. Complement the operand and
6327 // re-apply the zext.
6328 if (CI
->getValue().isMask(Z0TySize
))
6329 return getZeroExtendExpr(getNotSCEV(Z0
), UTy
);
6331 // If C is a single bit, it may be in the sign-bit position
6332 // before the zero-extend. In this case, represent the xor
6333 // using an add, which is equivalent, and re-apply the zext.
6334 APInt Trunc
= CI
->getValue().trunc(Z0TySize
);
6335 if (Trunc
.zext(getTypeSizeInBits(UTy
)) == CI
->getValue() &&
6337 return getZeroExtendExpr(getAddExpr(Z0
, getConstant(Trunc
)),
6343 case Instruction::Shl
:
6344 // Turn shift left of a constant amount into a multiply.
6345 if (ConstantInt
*SA
= dyn_cast
<ConstantInt
>(BO
->RHS
)) {
6346 uint32_t BitWidth
= cast
<IntegerType
>(SA
->getType())->getBitWidth();
6348 // If the shift count is not less than the bitwidth, the result of
6349 // the shift is undefined. Don't try to analyze it, because the
6350 // resolution chosen here may differ from the resolution chosen in
6351 // other parts of the compiler.
6352 if (SA
->getValue().uge(BitWidth
))
6355 // It is currently not resolved how to interpret NSW for left
6356 // shift by BitWidth - 1, so we avoid applying flags in that
6357 // case. Remove this check (or this comment) once the situation
6359 // http://lists.llvm.org/pipermail/llvm-dev/2015-April/084195.html
6360 // and http://reviews.llvm.org/D8890 .
6361 auto Flags
= SCEV::FlagAnyWrap
;
6362 if (BO
->Op
&& SA
->getValue().ult(BitWidth
- 1))
6363 Flags
= getNoWrapFlagsFromUB(BO
->Op
);
6365 Constant
*X
= ConstantInt::get(
6366 getContext(), APInt::getOneBitSet(BitWidth
, SA
->getZExtValue()));
6367 return getMulExpr(getSCEV(BO
->LHS
), getSCEV(X
), Flags
);
6371 case Instruction::AShr
: {
6372 // AShr X, C, where C is a constant.
6373 ConstantInt
*CI
= dyn_cast
<ConstantInt
>(BO
->RHS
);
6377 Type
*OuterTy
= BO
->LHS
->getType();
6378 uint64_t BitWidth
= getTypeSizeInBits(OuterTy
);
6379 // If the shift count is not less than the bitwidth, the result of
6380 // the shift is undefined. Don't try to analyze it, because the
6381 // resolution chosen here may differ from the resolution chosen in
6382 // other parts of the compiler.
6383 if (CI
->getValue().uge(BitWidth
))
6387 return getSCEV(BO
->LHS
); // shift by zero --> noop
6389 uint64_t AShrAmt
= CI
->getZExtValue();
6390 Type
*TruncTy
= IntegerType::get(getContext(), BitWidth
- AShrAmt
);
6392 Operator
*L
= dyn_cast
<Operator
>(BO
->LHS
);
6393 if (L
&& L
->getOpcode() == Instruction::Shl
) {
6396 // Both n and m are constant.
6398 const SCEV
*ShlOp0SCEV
= getSCEV(L
->getOperand(0));
6399 if (L
->getOperand(1) == BO
->RHS
)
6400 // For a two-shift sext-inreg, i.e. n = m,
6401 // use sext(trunc(x)) as the SCEV expression.
6402 return getSignExtendExpr(
6403 getTruncateExpr(ShlOp0SCEV
, TruncTy
), OuterTy
);
6405 ConstantInt
*ShlAmtCI
= dyn_cast
<ConstantInt
>(L
->getOperand(1));
6406 if (ShlAmtCI
&& ShlAmtCI
->getValue().ult(BitWidth
)) {
6407 uint64_t ShlAmt
= ShlAmtCI
->getZExtValue();
6408 if (ShlAmt
> AShrAmt
) {
6409 // When n > m, use sext(mul(trunc(x), 2^(n-m)))) as the SCEV
6410 // expression. We already checked that ShlAmt < BitWidth, so
6411 // the multiplier, 1 << (ShlAmt - AShrAmt), fits into TruncTy as
6412 // ShlAmt - AShrAmt < Amt.
6413 APInt Mul
= APInt::getOneBitSet(BitWidth
- AShrAmt
,
6415 return getSignExtendExpr(
6416 getMulExpr(getTruncateExpr(ShlOp0SCEV
, TruncTy
),
6417 getConstant(Mul
)), OuterTy
);
6426 switch (U
->getOpcode()) {
6427 case Instruction::Trunc
:
6428 return getTruncateExpr(getSCEV(U
->getOperand(0)), U
->getType());
6430 case Instruction::ZExt
:
6431 return getZeroExtendExpr(getSCEV(U
->getOperand(0)), U
->getType());
6433 case Instruction::SExt
:
6434 if (auto BO
= MatchBinaryOp(U
->getOperand(0), DT
)) {
6435 // The NSW flag of a subtract does not always survive the conversion to
6436 // A + (-1)*B. By pushing sign extension onto its operands we are much
6437 // more likely to preserve NSW and allow later AddRec optimisations.
6439 // NOTE: This is effectively duplicating this logic from getSignExtend:
6440 // sext((A + B + ...)<nsw>) --> (sext(A) + sext(B) + ...)<nsw>
6441 // but by that point the NSW information has potentially been lost.
6442 if (BO
->Opcode
== Instruction::Sub
&& BO
->IsNSW
) {
6443 Type
*Ty
= U
->getType();
6444 auto *V1
= getSignExtendExpr(getSCEV(BO
->LHS
), Ty
);
6445 auto *V2
= getSignExtendExpr(getSCEV(BO
->RHS
), Ty
);
6446 return getMinusSCEV(V1
, V2
, SCEV::FlagNSW
);
6449 return getSignExtendExpr(getSCEV(U
->getOperand(0)), U
->getType());
6451 case Instruction::BitCast
:
6452 // BitCasts are no-op casts so we just eliminate the cast.
6453 if (isSCEVable(U
->getType()) && isSCEVable(U
->getOperand(0)->getType()))
6454 return getSCEV(U
->getOperand(0));
6457 // It's tempting to handle inttoptr and ptrtoint as no-ops, however this can
6458 // lead to pointer expressions which cannot safely be expanded to GEPs,
6459 // because ScalarEvolution doesn't respect the GEP aliasing rules when
6460 // simplifying integer expressions.
6462 case Instruction::GetElementPtr
:
6463 return createNodeForGEP(cast
<GEPOperator
>(U
));
6465 case Instruction::PHI
:
6466 return createNodeForPHI(cast
<PHINode
>(U
));
6468 case Instruction::Select
:
6469 // U can also be a select constant expr, which let fall through. Since
6470 // createNodeForSelect only works for a condition that is an `ICmpInst`, and
6471 // constant expressions cannot have instructions as operands, we'd have
6472 // returned getUnknown for a select constant expressions anyway.
6473 if (isa
<Instruction
>(U
))
6474 return createNodeForSelectOrPHI(cast
<Instruction
>(U
), U
->getOperand(0),
6475 U
->getOperand(1), U
->getOperand(2));
6478 case Instruction::Call
:
6479 case Instruction::Invoke
:
6480 if (Value
*RV
= CallSite(U
).getReturnedArgOperand())
6485 return getUnknown(V
);
6488 //===----------------------------------------------------------------------===//
6489 // Iteration Count Computation Code
6492 static unsigned getConstantTripCount(const SCEVConstant
*ExitCount
) {
6496 ConstantInt
*ExitConst
= ExitCount
->getValue();
6498 // Guard against huge trip counts.
6499 if (ExitConst
->getValue().getActiveBits() > 32)
6502 // In case of integer overflow, this returns 0, which is correct.
6503 return ((unsigned)ExitConst
->getZExtValue()) + 1;
6506 unsigned ScalarEvolution::getSmallConstantTripCount(const Loop
*L
) {
6507 if (BasicBlock
*ExitingBB
= L
->getExitingBlock())
6508 return getSmallConstantTripCount(L
, ExitingBB
);
6510 // No trip count information for multiple exits.
6514 unsigned ScalarEvolution::getSmallConstantTripCount(const Loop
*L
,
6515 BasicBlock
*ExitingBlock
) {
6516 assert(ExitingBlock
&& "Must pass a non-null exiting block!");
6517 assert(L
->isLoopExiting(ExitingBlock
) &&
6518 "Exiting block must actually branch out of the loop!");
6519 const SCEVConstant
*ExitCount
=
6520 dyn_cast
<SCEVConstant
>(getExitCount(L
, ExitingBlock
));
6521 return getConstantTripCount(ExitCount
);
6524 unsigned ScalarEvolution::getSmallConstantMaxTripCount(const Loop
*L
) {
6525 const auto *MaxExitCount
=
6526 dyn_cast
<SCEVConstant
>(getMaxBackedgeTakenCount(L
));
6527 return getConstantTripCount(MaxExitCount
);
6530 unsigned ScalarEvolution::getSmallConstantTripMultiple(const Loop
*L
) {
6531 if (BasicBlock
*ExitingBB
= L
->getExitingBlock())
6532 return getSmallConstantTripMultiple(L
, ExitingBB
);
6534 // No trip multiple information for multiple exits.
6538 /// Returns the largest constant divisor of the trip count of this loop as a
6539 /// normal unsigned value, if possible. This means that the actual trip count is
6540 /// always a multiple of the returned value (don't forget the trip count could
6541 /// very well be zero as well!).
6543 /// Returns 1 if the trip count is unknown or not guaranteed to be the
6544 /// multiple of a constant (which is also the case if the trip count is simply
6545 /// constant, use getSmallConstantTripCount for that case), Will also return 1
6546 /// if the trip count is very large (>= 2^32).
6548 /// As explained in the comments for getSmallConstantTripCount, this assumes
6549 /// that control exits the loop via ExitingBlock.
6551 ScalarEvolution::getSmallConstantTripMultiple(const Loop
*L
,
6552 BasicBlock
*ExitingBlock
) {
6553 assert(ExitingBlock
&& "Must pass a non-null exiting block!");
6554 assert(L
->isLoopExiting(ExitingBlock
) &&
6555 "Exiting block must actually branch out of the loop!");
6556 const SCEV
*ExitCount
= getExitCount(L
, ExitingBlock
);
6557 if (ExitCount
== getCouldNotCompute())
6560 // Get the trip count from the BE count by adding 1.
6561 const SCEV
*TCExpr
= getAddExpr(ExitCount
, getOne(ExitCount
->getType()));
6563 const SCEVConstant
*TC
= dyn_cast
<SCEVConstant
>(TCExpr
);
6565 // Attempt to factor more general cases. Returns the greatest power of
6566 // two divisor. If overflow happens, the trip count expression is still
6567 // divisible by the greatest power of 2 divisor returned.
6568 return 1U << std::min((uint32_t)31, GetMinTrailingZeros(TCExpr
));
6570 ConstantInt
*Result
= TC
->getValue();
6572 // Guard against huge trip counts (this requires checking
6573 // for zero to handle the case where the trip count == -1 and the
6575 if (!Result
|| Result
->getValue().getActiveBits() > 32 ||
6576 Result
->getValue().getActiveBits() == 0)
6579 return (unsigned)Result
->getZExtValue();
6582 /// Get the expression for the number of loop iterations for which this loop is
6583 /// guaranteed not to exit via ExitingBlock. Otherwise return
6584 /// SCEVCouldNotCompute.
6585 const SCEV
*ScalarEvolution::getExitCount(const Loop
*L
,
6586 BasicBlock
*ExitingBlock
) {
6587 return getBackedgeTakenInfo(L
).getExact(ExitingBlock
, this);
6591 ScalarEvolution::getPredicatedBackedgeTakenCount(const Loop
*L
,
6592 SCEVUnionPredicate
&Preds
) {
6593 return getPredicatedBackedgeTakenInfo(L
).getExact(L
, this, &Preds
);
6596 const SCEV
*ScalarEvolution::getBackedgeTakenCount(const Loop
*L
) {
6597 return getBackedgeTakenInfo(L
).getExact(L
, this);
6600 /// Similar to getBackedgeTakenCount, except return the least SCEV value that is
6601 /// known never to be less than the actual backedge taken count.
6602 const SCEV
*ScalarEvolution::getMaxBackedgeTakenCount(const Loop
*L
) {
6603 return getBackedgeTakenInfo(L
).getMax(this);
6606 bool ScalarEvolution::isBackedgeTakenCountMaxOrZero(const Loop
*L
) {
6607 return getBackedgeTakenInfo(L
).isMaxOrZero(this);
6610 /// Push PHI nodes in the header of the given loop onto the given Worklist.
6612 PushLoopPHIs(const Loop
*L
, SmallVectorImpl
<Instruction
*> &Worklist
) {
6613 BasicBlock
*Header
= L
->getHeader();
6615 // Push all Loop-header PHIs onto the Worklist stack.
6616 for (PHINode
&PN
: Header
->phis())
6617 Worklist
.push_back(&PN
);
6620 const ScalarEvolution::BackedgeTakenInfo
&
6621 ScalarEvolution::getPredicatedBackedgeTakenInfo(const Loop
*L
) {
6622 auto &BTI
= getBackedgeTakenInfo(L
);
6623 if (BTI
.hasFullInfo())
6626 auto Pair
= PredicatedBackedgeTakenCounts
.insert({L
, BackedgeTakenInfo()});
6629 return Pair
.first
->second
;
6631 BackedgeTakenInfo Result
=
6632 computeBackedgeTakenCount(L
, /*AllowPredicates=*/true);
6634 return PredicatedBackedgeTakenCounts
.find(L
)->second
= std::move(Result
);
6637 const ScalarEvolution::BackedgeTakenInfo
&
6638 ScalarEvolution::getBackedgeTakenInfo(const Loop
*L
) {
6639 // Initially insert an invalid entry for this loop. If the insertion
6640 // succeeds, proceed to actually compute a backedge-taken count and
6641 // update the value. The temporary CouldNotCompute value tells SCEV
6642 // code elsewhere that it shouldn't attempt to request a new
6643 // backedge-taken count, which could result in infinite recursion.
6644 std::pair
<DenseMap
<const Loop
*, BackedgeTakenInfo
>::iterator
, bool> Pair
=
6645 BackedgeTakenCounts
.insert({L
, BackedgeTakenInfo()});
6647 return Pair
.first
->second
;
6649 // computeBackedgeTakenCount may allocate memory for its result. Inserting it
6650 // into the BackedgeTakenCounts map transfers ownership. Otherwise, the result
6651 // must be cleared in this scope.
6652 BackedgeTakenInfo Result
= computeBackedgeTakenCount(L
);
6654 // In product build, there are no usage of statistic.
6655 (void)NumTripCountsComputed
;
6656 (void)NumTripCountsNotComputed
;
6657 #if LLVM_ENABLE_STATS || !defined(NDEBUG)
6658 const SCEV
*BEExact
= Result
.getExact(L
, this);
6659 if (BEExact
!= getCouldNotCompute()) {
6660 assert(isLoopInvariant(BEExact
, L
) &&
6661 isLoopInvariant(Result
.getMax(this), L
) &&
6662 "Computed backedge-taken count isn't loop invariant for loop!");
6663 ++NumTripCountsComputed
;
6665 else if (Result
.getMax(this) == getCouldNotCompute() &&
6666 isa
<PHINode
>(L
->getHeader()->begin())) {
6667 // Only count loops that have phi nodes as not being computable.
6668 ++NumTripCountsNotComputed
;
6670 #endif // LLVM_ENABLE_STATS || !defined(NDEBUG)
6672 // Now that we know more about the trip count for this loop, forget any
6673 // existing SCEV values for PHI nodes in this loop since they are only
6674 // conservative estimates made without the benefit of trip count
6675 // information. This is similar to the code in forgetLoop, except that
6676 // it handles SCEVUnknown PHI nodes specially.
6677 if (Result
.hasAnyInfo()) {
6678 SmallVector
<Instruction
*, 16> Worklist
;
6679 PushLoopPHIs(L
, Worklist
);
6681 SmallPtrSet
<Instruction
*, 8> Discovered
;
6682 while (!Worklist
.empty()) {
6683 Instruction
*I
= Worklist
.pop_back_val();
6685 ValueExprMapType::iterator It
=
6686 ValueExprMap
.find_as(static_cast<Value
*>(I
));
6687 if (It
!= ValueExprMap
.end()) {
6688 const SCEV
*Old
= It
->second
;
6690 // SCEVUnknown for a PHI either means that it has an unrecognized
6691 // structure, or it's a PHI that's in the progress of being computed
6692 // by createNodeForPHI. In the former case, additional loop trip
6693 // count information isn't going to change anything. In the later
6694 // case, createNodeForPHI will perform the necessary updates on its
6695 // own when it gets to that point.
6696 if (!isa
<PHINode
>(I
) || !isa
<SCEVUnknown
>(Old
)) {
6697 eraseValueFromMap(It
->first
);
6698 forgetMemoizedResults(Old
);
6700 if (PHINode
*PN
= dyn_cast
<PHINode
>(I
))
6701 ConstantEvolutionLoopExitValue
.erase(PN
);
6704 // Since we don't need to invalidate anything for correctness and we're
6705 // only invalidating to make SCEV's results more precise, we get to stop
6706 // early to avoid invalidating too much. This is especially important in
6709 // %v = f(pn0, pn1) // pn0 and pn1 used through some other phi node
6717 // where both loop0 and loop1's backedge taken count uses the SCEV
6718 // expression for %v. If we don't have the early stop below then in cases
6719 // like the above, getBackedgeTakenInfo(loop1) will clear out the trip
6720 // count for loop0 and getBackedgeTakenInfo(loop0) will clear out the trip
6721 // count for loop1, effectively nullifying SCEV's trip count cache.
6722 for (auto *U
: I
->users())
6723 if (auto *I
= dyn_cast
<Instruction
>(U
)) {
6724 auto *LoopForUser
= LI
.getLoopFor(I
->getParent());
6725 if (LoopForUser
&& L
->contains(LoopForUser
) &&
6726 Discovered
.insert(I
).second
)
6727 Worklist
.push_back(I
);
6732 // Re-lookup the insert position, since the call to
6733 // computeBackedgeTakenCount above could result in a
6734 // recusive call to getBackedgeTakenInfo (on a different
6735 // loop), which would invalidate the iterator computed
6737 return BackedgeTakenCounts
.find(L
)->second
= std::move(Result
);
6740 void ScalarEvolution::forgetAllLoops() {
6741 // This method is intended to forget all info about loops. It should
6742 // invalidate caches as if the following happened:
6743 // - The trip counts of all loops have changed arbitrarily
6744 // - Every llvm::Value has been updated in place to produce a different
6746 BackedgeTakenCounts
.clear();
6747 PredicatedBackedgeTakenCounts
.clear();
6748 LoopPropertiesCache
.clear();
6749 ConstantEvolutionLoopExitValue
.clear();
6750 ValueExprMap
.clear();
6751 ValuesAtScopes
.clear();
6752 LoopDispositions
.clear();
6753 BlockDispositions
.clear();
6754 UnsignedRanges
.clear();
6755 SignedRanges
.clear();
6756 ExprValueMap
.clear();
6758 MinTrailingZerosCache
.clear();
6759 PredicatedSCEVRewrites
.clear();
6762 void ScalarEvolution::forgetLoop(const Loop
*L
) {
6763 // Drop any stored trip count value.
6764 auto RemoveLoopFromBackedgeMap
=
6765 [](DenseMap
<const Loop
*, BackedgeTakenInfo
> &Map
, const Loop
*L
) {
6766 auto BTCPos
= Map
.find(L
);
6767 if (BTCPos
!= Map
.end()) {
6768 BTCPos
->second
.clear();
6773 SmallVector
<const Loop
*, 16> LoopWorklist(1, L
);
6774 SmallVector
<Instruction
*, 32> Worklist
;
6775 SmallPtrSet
<Instruction
*, 16> Visited
;
6777 // Iterate over all the loops and sub-loops to drop SCEV information.
6778 while (!LoopWorklist
.empty()) {
6779 auto *CurrL
= LoopWorklist
.pop_back_val();
6781 RemoveLoopFromBackedgeMap(BackedgeTakenCounts
, CurrL
);
6782 RemoveLoopFromBackedgeMap(PredicatedBackedgeTakenCounts
, CurrL
);
6784 // Drop information about predicated SCEV rewrites for this loop.
6785 for (auto I
= PredicatedSCEVRewrites
.begin();
6786 I
!= PredicatedSCEVRewrites
.end();) {
6787 std::pair
<const SCEV
*, const Loop
*> Entry
= I
->first
;
6788 if (Entry
.second
== CurrL
)
6789 PredicatedSCEVRewrites
.erase(I
++);
6794 auto LoopUsersItr
= LoopUsers
.find(CurrL
);
6795 if (LoopUsersItr
!= LoopUsers
.end()) {
6796 for (auto *S
: LoopUsersItr
->second
)
6797 forgetMemoizedResults(S
);
6798 LoopUsers
.erase(LoopUsersItr
);
6801 // Drop information about expressions based on loop-header PHIs.
6802 PushLoopPHIs(CurrL
, Worklist
);
6804 while (!Worklist
.empty()) {
6805 Instruction
*I
= Worklist
.pop_back_val();
6806 if (!Visited
.insert(I
).second
)
6809 ValueExprMapType::iterator It
=
6810 ValueExprMap
.find_as(static_cast<Value
*>(I
));
6811 if (It
!= ValueExprMap
.end()) {
6812 eraseValueFromMap(It
->first
);
6813 forgetMemoizedResults(It
->second
);
6814 if (PHINode
*PN
= dyn_cast
<PHINode
>(I
))
6815 ConstantEvolutionLoopExitValue
.erase(PN
);
6818 PushDefUseChildren(I
, Worklist
);
6821 LoopPropertiesCache
.erase(CurrL
);
6822 // Forget all contained loops too, to avoid dangling entries in the
6823 // ValuesAtScopes map.
6824 LoopWorklist
.append(CurrL
->begin(), CurrL
->end());
6828 void ScalarEvolution::forgetTopmostLoop(const Loop
*L
) {
6829 while (Loop
*Parent
= L
->getParentLoop())
6834 void ScalarEvolution::forgetValue(Value
*V
) {
6835 Instruction
*I
= dyn_cast
<Instruction
>(V
);
6838 // Drop information about expressions based on loop-header PHIs.
6839 SmallVector
<Instruction
*, 16> Worklist
;
6840 Worklist
.push_back(I
);
6842 SmallPtrSet
<Instruction
*, 8> Visited
;
6843 while (!Worklist
.empty()) {
6844 I
= Worklist
.pop_back_val();
6845 if (!Visited
.insert(I
).second
)
6848 ValueExprMapType::iterator It
=
6849 ValueExprMap
.find_as(static_cast<Value
*>(I
));
6850 if (It
!= ValueExprMap
.end()) {
6851 eraseValueFromMap(It
->first
);
6852 forgetMemoizedResults(It
->second
);
6853 if (PHINode
*PN
= dyn_cast
<PHINode
>(I
))
6854 ConstantEvolutionLoopExitValue
.erase(PN
);
6857 PushDefUseChildren(I
, Worklist
);
6861 /// Get the exact loop backedge taken count considering all loop exits. A
6862 /// computable result can only be returned for loops with all exiting blocks
6863 /// dominating the latch. howFarToZero assumes that the limit of each loop test
6864 /// is never skipped. This is a valid assumption as long as the loop exits via
6865 /// that test. For precise results, it is the caller's responsibility to specify
6866 /// the relevant loop exiting block using getExact(ExitingBlock, SE).
6868 ScalarEvolution::BackedgeTakenInfo::getExact(const Loop
*L
, ScalarEvolution
*SE
,
6869 SCEVUnionPredicate
*Preds
) const {
6870 // If any exits were not computable, the loop is not computable.
6871 if (!isComplete() || ExitNotTaken
.empty())
6872 return SE
->getCouldNotCompute();
6874 const BasicBlock
*Latch
= L
->getLoopLatch();
6875 // All exiting blocks we have collected must dominate the only backedge.
6877 return SE
->getCouldNotCompute();
6879 // All exiting blocks we have gathered dominate loop's latch, so exact trip
6880 // count is simply a minimum out of all these calculated exit counts.
6881 SmallVector
<const SCEV
*, 2> Ops
;
6882 for (auto &ENT
: ExitNotTaken
) {
6883 const SCEV
*BECount
= ENT
.ExactNotTaken
;
6884 assert(BECount
!= SE
->getCouldNotCompute() && "Bad exit SCEV!");
6885 assert(SE
->DT
.dominates(ENT
.ExitingBlock
, Latch
) &&
6886 "We should only have known counts for exiting blocks that dominate "
6889 Ops
.push_back(BECount
);
6891 if (Preds
&& !ENT
.hasAlwaysTruePredicate())
6892 Preds
->add(ENT
.Predicate
.get());
6894 assert((Preds
|| ENT
.hasAlwaysTruePredicate()) &&
6895 "Predicate should be always true!");
6898 return SE
->getUMinFromMismatchedTypes(Ops
);
6901 /// Get the exact not taken count for this loop exit.
6903 ScalarEvolution::BackedgeTakenInfo::getExact(BasicBlock
*ExitingBlock
,
6904 ScalarEvolution
*SE
) const {
6905 for (auto &ENT
: ExitNotTaken
)
6906 if (ENT
.ExitingBlock
== ExitingBlock
&& ENT
.hasAlwaysTruePredicate())
6907 return ENT
.ExactNotTaken
;
6909 return SE
->getCouldNotCompute();
6912 /// getMax - Get the max backedge taken count for the loop.
6914 ScalarEvolution::BackedgeTakenInfo::getMax(ScalarEvolution
*SE
) const {
6915 auto PredicateNotAlwaysTrue
= [](const ExitNotTakenInfo
&ENT
) {
6916 return !ENT
.hasAlwaysTruePredicate();
6919 if (any_of(ExitNotTaken
, PredicateNotAlwaysTrue
) || !getMax())
6920 return SE
->getCouldNotCompute();
6922 assert((isa
<SCEVCouldNotCompute
>(getMax()) || isa
<SCEVConstant
>(getMax())) &&
6923 "No point in having a non-constant max backedge taken count!");
6927 bool ScalarEvolution::BackedgeTakenInfo::isMaxOrZero(ScalarEvolution
*SE
) const {
6928 auto PredicateNotAlwaysTrue
= [](const ExitNotTakenInfo
&ENT
) {
6929 return !ENT
.hasAlwaysTruePredicate();
6931 return MaxOrZero
&& !any_of(ExitNotTaken
, PredicateNotAlwaysTrue
);
6934 bool ScalarEvolution::BackedgeTakenInfo::hasOperand(const SCEV
*S
,
6935 ScalarEvolution
*SE
) const {
6936 if (getMax() && getMax() != SE
->getCouldNotCompute() &&
6937 SE
->hasOperand(getMax(), S
))
6940 for (auto &ENT
: ExitNotTaken
)
6941 if (ENT
.ExactNotTaken
!= SE
->getCouldNotCompute() &&
6942 SE
->hasOperand(ENT
.ExactNotTaken
, S
))
6948 ScalarEvolution::ExitLimit::ExitLimit(const SCEV
*E
)
6949 : ExactNotTaken(E
), MaxNotTaken(E
) {
6950 assert((isa
<SCEVCouldNotCompute
>(MaxNotTaken
) ||
6951 isa
<SCEVConstant
>(MaxNotTaken
)) &&
6952 "No point in having a non-constant max backedge taken count!");
6955 ScalarEvolution::ExitLimit::ExitLimit(
6956 const SCEV
*E
, const SCEV
*M
, bool MaxOrZero
,
6957 ArrayRef
<const SmallPtrSetImpl
<const SCEVPredicate
*> *> PredSetList
)
6958 : ExactNotTaken(E
), MaxNotTaken(M
), MaxOrZero(MaxOrZero
) {
6959 assert((isa
<SCEVCouldNotCompute
>(ExactNotTaken
) ||
6960 !isa
<SCEVCouldNotCompute
>(MaxNotTaken
)) &&
6961 "Exact is not allowed to be less precise than Max");
6962 assert((isa
<SCEVCouldNotCompute
>(MaxNotTaken
) ||
6963 isa
<SCEVConstant
>(MaxNotTaken
)) &&
6964 "No point in having a non-constant max backedge taken count!");
6965 for (auto *PredSet
: PredSetList
)
6966 for (auto *P
: *PredSet
)
6970 ScalarEvolution::ExitLimit::ExitLimit(
6971 const SCEV
*E
, const SCEV
*M
, bool MaxOrZero
,
6972 const SmallPtrSetImpl
<const SCEVPredicate
*> &PredSet
)
6973 : ExitLimit(E
, M
, MaxOrZero
, {&PredSet
}) {
6974 assert((isa
<SCEVCouldNotCompute
>(MaxNotTaken
) ||
6975 isa
<SCEVConstant
>(MaxNotTaken
)) &&
6976 "No point in having a non-constant max backedge taken count!");
6979 ScalarEvolution::ExitLimit::ExitLimit(const SCEV
*E
, const SCEV
*M
,
6981 : ExitLimit(E
, M
, MaxOrZero
, None
) {
6982 assert((isa
<SCEVCouldNotCompute
>(MaxNotTaken
) ||
6983 isa
<SCEVConstant
>(MaxNotTaken
)) &&
6984 "No point in having a non-constant max backedge taken count!");
6987 /// Allocate memory for BackedgeTakenInfo and copy the not-taken count of each
6988 /// computable exit into a persistent ExitNotTakenInfo array.
6989 ScalarEvolution::BackedgeTakenInfo::BackedgeTakenInfo(
6990 ArrayRef
<ScalarEvolution::BackedgeTakenInfo::EdgeExitInfo
>
6992 bool Complete
, const SCEV
*MaxCount
, bool MaxOrZero
)
6993 : MaxAndComplete(MaxCount
, Complete
), MaxOrZero(MaxOrZero
) {
6994 using EdgeExitInfo
= ScalarEvolution::BackedgeTakenInfo::EdgeExitInfo
;
6996 ExitNotTaken
.reserve(ExitCounts
.size());
6998 ExitCounts
.begin(), ExitCounts
.end(), std::back_inserter(ExitNotTaken
),
6999 [&](const EdgeExitInfo
&EEI
) {
7000 BasicBlock
*ExitBB
= EEI
.first
;
7001 const ExitLimit
&EL
= EEI
.second
;
7002 if (EL
.Predicates
.empty())
7003 return ExitNotTakenInfo(ExitBB
, EL
.ExactNotTaken
, nullptr);
7005 std::unique_ptr
<SCEVUnionPredicate
> Predicate(new SCEVUnionPredicate
);
7006 for (auto *Pred
: EL
.Predicates
)
7007 Predicate
->add(Pred
);
7009 return ExitNotTakenInfo(ExitBB
, EL
.ExactNotTaken
, std::move(Predicate
));
7011 assert((isa
<SCEVCouldNotCompute
>(MaxCount
) || isa
<SCEVConstant
>(MaxCount
)) &&
7012 "No point in having a non-constant max backedge taken count!");
7015 /// Invalidate this result and free the ExitNotTakenInfo array.
7016 void ScalarEvolution::BackedgeTakenInfo::clear() {
7017 ExitNotTaken
.clear();
7020 /// Compute the number of times the backedge of the specified loop will execute.
7021 ScalarEvolution::BackedgeTakenInfo
7022 ScalarEvolution::computeBackedgeTakenCount(const Loop
*L
,
7023 bool AllowPredicates
) {
7024 SmallVector
<BasicBlock
*, 8> ExitingBlocks
;
7025 L
->getExitingBlocks(ExitingBlocks
);
7027 using EdgeExitInfo
= ScalarEvolution::BackedgeTakenInfo::EdgeExitInfo
;
7029 SmallVector
<EdgeExitInfo
, 4> ExitCounts
;
7030 bool CouldComputeBECount
= true;
7031 BasicBlock
*Latch
= L
->getLoopLatch(); // may be NULL.
7032 const SCEV
*MustExitMaxBECount
= nullptr;
7033 const SCEV
*MayExitMaxBECount
= nullptr;
7034 bool MustExitMaxOrZero
= false;
7036 // Compute the ExitLimit for each loop exit. Use this to populate ExitCounts
7037 // and compute maxBECount.
7038 // Do a union of all the predicates here.
7039 for (unsigned i
= 0, e
= ExitingBlocks
.size(); i
!= e
; ++i
) {
7040 BasicBlock
*ExitBB
= ExitingBlocks
[i
];
7041 ExitLimit EL
= computeExitLimit(L
, ExitBB
, AllowPredicates
);
7043 assert((AllowPredicates
|| EL
.Predicates
.empty()) &&
7044 "Predicated exit limit when predicates are not allowed!");
7046 // 1. For each exit that can be computed, add an entry to ExitCounts.
7047 // CouldComputeBECount is true only if all exits can be computed.
7048 if (EL
.ExactNotTaken
== getCouldNotCompute())
7049 // We couldn't compute an exact value for this exit, so
7050 // we won't be able to compute an exact value for the loop.
7051 CouldComputeBECount
= false;
7053 ExitCounts
.emplace_back(ExitBB
, EL
);
7055 // 2. Derive the loop's MaxBECount from each exit's max number of
7056 // non-exiting iterations. Partition the loop exits into two kinds:
7057 // LoopMustExits and LoopMayExits.
7059 // If the exit dominates the loop latch, it is a LoopMustExit otherwise it
7060 // is a LoopMayExit. If any computable LoopMustExit is found, then
7061 // MaxBECount is the minimum EL.MaxNotTaken of computable
7062 // LoopMustExits. Otherwise, MaxBECount is conservatively the maximum
7063 // EL.MaxNotTaken, where CouldNotCompute is considered greater than any
7064 // computable EL.MaxNotTaken.
7065 if (EL
.MaxNotTaken
!= getCouldNotCompute() && Latch
&&
7066 DT
.dominates(ExitBB
, Latch
)) {
7067 if (!MustExitMaxBECount
) {
7068 MustExitMaxBECount
= EL
.MaxNotTaken
;
7069 MustExitMaxOrZero
= EL
.MaxOrZero
;
7071 MustExitMaxBECount
=
7072 getUMinFromMismatchedTypes(MustExitMaxBECount
, EL
.MaxNotTaken
);
7074 } else if (MayExitMaxBECount
!= getCouldNotCompute()) {
7075 if (!MayExitMaxBECount
|| EL
.MaxNotTaken
== getCouldNotCompute())
7076 MayExitMaxBECount
= EL
.MaxNotTaken
;
7079 getUMaxFromMismatchedTypes(MayExitMaxBECount
, EL
.MaxNotTaken
);
7083 const SCEV
*MaxBECount
= MustExitMaxBECount
? MustExitMaxBECount
:
7084 (MayExitMaxBECount
? MayExitMaxBECount
: getCouldNotCompute());
7085 // The loop backedge will be taken the maximum or zero times if there's
7086 // a single exit that must be taken the maximum or zero times.
7087 bool MaxOrZero
= (MustExitMaxOrZero
&& ExitingBlocks
.size() == 1);
7088 return BackedgeTakenInfo(std::move(ExitCounts
), CouldComputeBECount
,
7089 MaxBECount
, MaxOrZero
);
7092 ScalarEvolution::ExitLimit
7093 ScalarEvolution::computeExitLimit(const Loop
*L
, BasicBlock
*ExitingBlock
,
7094 bool AllowPredicates
) {
7095 assert(L
->contains(ExitingBlock
) && "Exit count for non-loop block?");
7096 // If our exiting block does not dominate the latch, then its connection with
7097 // loop's exit limit may be far from trivial.
7098 const BasicBlock
*Latch
= L
->getLoopLatch();
7099 if (!Latch
|| !DT
.dominates(ExitingBlock
, Latch
))
7100 return getCouldNotCompute();
7102 bool IsOnlyExit
= (L
->getExitingBlock() != nullptr);
7103 Instruction
*Term
= ExitingBlock
->getTerminator();
7104 if (BranchInst
*BI
= dyn_cast
<BranchInst
>(Term
)) {
7105 assert(BI
->isConditional() && "If unconditional, it can't be in loop!");
7106 bool ExitIfTrue
= !L
->contains(BI
->getSuccessor(0));
7107 assert(ExitIfTrue
== L
->contains(BI
->getSuccessor(1)) &&
7108 "It should have one successor in loop and one exit block!");
7109 // Proceed to the next level to examine the exit condition expression.
7110 return computeExitLimitFromCond(
7111 L
, BI
->getCondition(), ExitIfTrue
,
7112 /*ControlsExit=*/IsOnlyExit
, AllowPredicates
);
7115 if (SwitchInst
*SI
= dyn_cast
<SwitchInst
>(Term
)) {
7116 // For switch, make sure that there is a single exit from the loop.
7117 BasicBlock
*Exit
= nullptr;
7118 for (auto *SBB
: successors(ExitingBlock
))
7119 if (!L
->contains(SBB
)) {
7120 if (Exit
) // Multiple exit successors.
7121 return getCouldNotCompute();
7124 assert(Exit
&& "Exiting block must have at least one exit");
7125 return computeExitLimitFromSingleExitSwitch(L
, SI
, Exit
,
7126 /*ControlsExit=*/IsOnlyExit
);
7129 return getCouldNotCompute();
7132 ScalarEvolution::ExitLimit
ScalarEvolution::computeExitLimitFromCond(
7133 const Loop
*L
, Value
*ExitCond
, bool ExitIfTrue
,
7134 bool ControlsExit
, bool AllowPredicates
) {
7135 ScalarEvolution::ExitLimitCacheTy
Cache(L
, ExitIfTrue
, AllowPredicates
);
7136 return computeExitLimitFromCondCached(Cache
, L
, ExitCond
, ExitIfTrue
,
7137 ControlsExit
, AllowPredicates
);
7140 Optional
<ScalarEvolution::ExitLimit
>
7141 ScalarEvolution::ExitLimitCache::find(const Loop
*L
, Value
*ExitCond
,
7142 bool ExitIfTrue
, bool ControlsExit
,
7143 bool AllowPredicates
) {
7145 (void)this->ExitIfTrue
;
7146 (void)this->AllowPredicates
;
7148 assert(this->L
== L
&& this->ExitIfTrue
== ExitIfTrue
&&
7149 this->AllowPredicates
== AllowPredicates
&&
7150 "Variance in assumed invariant key components!");
7151 auto Itr
= TripCountMap
.find({ExitCond
, ControlsExit
});
7152 if (Itr
== TripCountMap
.end())
7157 void ScalarEvolution::ExitLimitCache::insert(const Loop
*L
, Value
*ExitCond
,
7160 bool AllowPredicates
,
7161 const ExitLimit
&EL
) {
7162 assert(this->L
== L
&& this->ExitIfTrue
== ExitIfTrue
&&
7163 this->AllowPredicates
== AllowPredicates
&&
7164 "Variance in assumed invariant key components!");
7166 auto InsertResult
= TripCountMap
.insert({{ExitCond
, ControlsExit
}, EL
});
7167 assert(InsertResult
.second
&& "Expected successful insertion!");
7172 ScalarEvolution::ExitLimit
ScalarEvolution::computeExitLimitFromCondCached(
7173 ExitLimitCacheTy
&Cache
, const Loop
*L
, Value
*ExitCond
, bool ExitIfTrue
,
7174 bool ControlsExit
, bool AllowPredicates
) {
7177 Cache
.find(L
, ExitCond
, ExitIfTrue
, ControlsExit
, AllowPredicates
))
7180 ExitLimit EL
= computeExitLimitFromCondImpl(Cache
, L
, ExitCond
, ExitIfTrue
,
7181 ControlsExit
, AllowPredicates
);
7182 Cache
.insert(L
, ExitCond
, ExitIfTrue
, ControlsExit
, AllowPredicates
, EL
);
7186 ScalarEvolution::ExitLimit
ScalarEvolution::computeExitLimitFromCondImpl(
7187 ExitLimitCacheTy
&Cache
, const Loop
*L
, Value
*ExitCond
, bool ExitIfTrue
,
7188 bool ControlsExit
, bool AllowPredicates
) {
7189 // Check if the controlling expression for this loop is an And or Or.
7190 if (BinaryOperator
*BO
= dyn_cast
<BinaryOperator
>(ExitCond
)) {
7191 if (BO
->getOpcode() == Instruction::And
) {
7192 // Recurse on the operands of the and.
7193 bool EitherMayExit
= !ExitIfTrue
;
7194 ExitLimit EL0
= computeExitLimitFromCondCached(
7195 Cache
, L
, BO
->getOperand(0), ExitIfTrue
,
7196 ControlsExit
&& !EitherMayExit
, AllowPredicates
);
7197 ExitLimit EL1
= computeExitLimitFromCondCached(
7198 Cache
, L
, BO
->getOperand(1), ExitIfTrue
,
7199 ControlsExit
&& !EitherMayExit
, AllowPredicates
);
7200 const SCEV
*BECount
= getCouldNotCompute();
7201 const SCEV
*MaxBECount
= getCouldNotCompute();
7202 if (EitherMayExit
) {
7203 // Both conditions must be true for the loop to continue executing.
7204 // Choose the less conservative count.
7205 if (EL0
.ExactNotTaken
== getCouldNotCompute() ||
7206 EL1
.ExactNotTaken
== getCouldNotCompute())
7207 BECount
= getCouldNotCompute();
7210 getUMinFromMismatchedTypes(EL0
.ExactNotTaken
, EL1
.ExactNotTaken
);
7211 if (EL0
.MaxNotTaken
== getCouldNotCompute())
7212 MaxBECount
= EL1
.MaxNotTaken
;
7213 else if (EL1
.MaxNotTaken
== getCouldNotCompute())
7214 MaxBECount
= EL0
.MaxNotTaken
;
7217 getUMinFromMismatchedTypes(EL0
.MaxNotTaken
, EL1
.MaxNotTaken
);
7219 // Both conditions must be true at the same time for the loop to exit.
7220 // For now, be conservative.
7221 if (EL0
.MaxNotTaken
== EL1
.MaxNotTaken
)
7222 MaxBECount
= EL0
.MaxNotTaken
;
7223 if (EL0
.ExactNotTaken
== EL1
.ExactNotTaken
)
7224 BECount
= EL0
.ExactNotTaken
;
7227 // There are cases (e.g. PR26207) where computeExitLimitFromCond is able
7228 // to be more aggressive when computing BECount than when computing
7229 // MaxBECount. In these cases it is possible for EL0.ExactNotTaken and
7230 // EL1.ExactNotTaken to match, but for EL0.MaxNotTaken and EL1.MaxNotTaken
7232 if (isa
<SCEVCouldNotCompute
>(MaxBECount
) &&
7233 !isa
<SCEVCouldNotCompute
>(BECount
))
7234 MaxBECount
= getConstant(getUnsignedRangeMax(BECount
));
7236 return ExitLimit(BECount
, MaxBECount
, false,
7237 {&EL0
.Predicates
, &EL1
.Predicates
});
7239 if (BO
->getOpcode() == Instruction::Or
) {
7240 // Recurse on the operands of the or.
7241 bool EitherMayExit
= ExitIfTrue
;
7242 ExitLimit EL0
= computeExitLimitFromCondCached(
7243 Cache
, L
, BO
->getOperand(0), ExitIfTrue
,
7244 ControlsExit
&& !EitherMayExit
, AllowPredicates
);
7245 ExitLimit EL1
= computeExitLimitFromCondCached(
7246 Cache
, L
, BO
->getOperand(1), ExitIfTrue
,
7247 ControlsExit
&& !EitherMayExit
, AllowPredicates
);
7248 const SCEV
*BECount
= getCouldNotCompute();
7249 const SCEV
*MaxBECount
= getCouldNotCompute();
7250 if (EitherMayExit
) {
7251 // Both conditions must be false for the loop to continue executing.
7252 // Choose the less conservative count.
7253 if (EL0
.ExactNotTaken
== getCouldNotCompute() ||
7254 EL1
.ExactNotTaken
== getCouldNotCompute())
7255 BECount
= getCouldNotCompute();
7258 getUMinFromMismatchedTypes(EL0
.ExactNotTaken
, EL1
.ExactNotTaken
);
7259 if (EL0
.MaxNotTaken
== getCouldNotCompute())
7260 MaxBECount
= EL1
.MaxNotTaken
;
7261 else if (EL1
.MaxNotTaken
== getCouldNotCompute())
7262 MaxBECount
= EL0
.MaxNotTaken
;
7265 getUMinFromMismatchedTypes(EL0
.MaxNotTaken
, EL1
.MaxNotTaken
);
7267 // Both conditions must be false at the same time for the loop to exit.
7268 // For now, be conservative.
7269 if (EL0
.MaxNotTaken
== EL1
.MaxNotTaken
)
7270 MaxBECount
= EL0
.MaxNotTaken
;
7271 if (EL0
.ExactNotTaken
== EL1
.ExactNotTaken
)
7272 BECount
= EL0
.ExactNotTaken
;
7274 // There are cases (e.g. PR26207) where computeExitLimitFromCond is able
7275 // to be more aggressive when computing BECount than when computing
7276 // MaxBECount. In these cases it is possible for EL0.ExactNotTaken and
7277 // EL1.ExactNotTaken to match, but for EL0.MaxNotTaken and EL1.MaxNotTaken
7279 if (isa
<SCEVCouldNotCompute
>(MaxBECount
) &&
7280 !isa
<SCEVCouldNotCompute
>(BECount
))
7281 MaxBECount
= getConstant(getUnsignedRangeMax(BECount
));
7283 return ExitLimit(BECount
, MaxBECount
, false,
7284 {&EL0
.Predicates
, &EL1
.Predicates
});
7288 // With an icmp, it may be feasible to compute an exact backedge-taken count.
7289 // Proceed to the next level to examine the icmp.
7290 if (ICmpInst
*ExitCondICmp
= dyn_cast
<ICmpInst
>(ExitCond
)) {
7292 computeExitLimitFromICmp(L
, ExitCondICmp
, ExitIfTrue
, ControlsExit
);
7293 if (EL
.hasFullInfo() || !AllowPredicates
)
7296 // Try again, but use SCEV predicates this time.
7297 return computeExitLimitFromICmp(L
, ExitCondICmp
, ExitIfTrue
, ControlsExit
,
7298 /*AllowPredicates=*/true);
7301 // Check for a constant condition. These are normally stripped out by
7302 // SimplifyCFG, but ScalarEvolution may be used by a pass which wishes to
7303 // preserve the CFG and is temporarily leaving constant conditions
7305 if (ConstantInt
*CI
= dyn_cast
<ConstantInt
>(ExitCond
)) {
7306 if (ExitIfTrue
== !CI
->getZExtValue())
7307 // The backedge is always taken.
7308 return getCouldNotCompute();
7310 // The backedge is never taken.
7311 return getZero(CI
->getType());
7314 // If it's not an integer or pointer comparison then compute it the hard way.
7315 return computeExitCountExhaustively(L
, ExitCond
, ExitIfTrue
);
7318 ScalarEvolution::ExitLimit
7319 ScalarEvolution::computeExitLimitFromICmp(const Loop
*L
,
7323 bool AllowPredicates
) {
7324 // If the condition was exit on true, convert the condition to exit on false
7325 ICmpInst::Predicate Pred
;
7327 Pred
= ExitCond
->getPredicate();
7329 Pred
= ExitCond
->getInversePredicate();
7330 const ICmpInst::Predicate OriginalPred
= Pred
;
7332 // Handle common loops like: for (X = "string"; *X; ++X)
7333 if (LoadInst
*LI
= dyn_cast
<LoadInst
>(ExitCond
->getOperand(0)))
7334 if (Constant
*RHS
= dyn_cast
<Constant
>(ExitCond
->getOperand(1))) {
7336 computeLoadConstantCompareExitLimit(LI
, RHS
, L
, Pred
);
7337 if (ItCnt
.hasAnyInfo())
7341 const SCEV
*LHS
= getSCEV(ExitCond
->getOperand(0));
7342 const SCEV
*RHS
= getSCEV(ExitCond
->getOperand(1));
7344 // Try to evaluate any dependencies out of the loop.
7345 LHS
= getSCEVAtScope(LHS
, L
);
7346 RHS
= getSCEVAtScope(RHS
, L
);
7348 // At this point, we would like to compute how many iterations of the
7349 // loop the predicate will return true for these inputs.
7350 if (isLoopInvariant(LHS
, L
) && !isLoopInvariant(RHS
, L
)) {
7351 // If there is a loop-invariant, force it into the RHS.
7352 std::swap(LHS
, RHS
);
7353 Pred
= ICmpInst::getSwappedPredicate(Pred
);
7356 // Simplify the operands before analyzing them.
7357 (void)SimplifyICmpOperands(Pred
, LHS
, RHS
);
7359 // If we have a comparison of a chrec against a constant, try to use value
7360 // ranges to answer this query.
7361 if (const SCEVConstant
*RHSC
= dyn_cast
<SCEVConstant
>(RHS
))
7362 if (const SCEVAddRecExpr
*AddRec
= dyn_cast
<SCEVAddRecExpr
>(LHS
))
7363 if (AddRec
->getLoop() == L
) {
7364 // Form the constant range.
7365 ConstantRange CompRange
=
7366 ConstantRange::makeExactICmpRegion(Pred
, RHSC
->getAPInt());
7368 const SCEV
*Ret
= AddRec
->getNumIterationsInRange(CompRange
, *this);
7369 if (!isa
<SCEVCouldNotCompute
>(Ret
)) return Ret
;
7373 case ICmpInst::ICMP_NE
: { // while (X != Y)
7374 // Convert to: while (X-Y != 0)
7375 ExitLimit EL
= howFarToZero(getMinusSCEV(LHS
, RHS
), L
, ControlsExit
,
7377 if (EL
.hasAnyInfo()) return EL
;
7380 case ICmpInst::ICMP_EQ
: { // while (X == Y)
7381 // Convert to: while (X-Y == 0)
7382 ExitLimit EL
= howFarToNonZero(getMinusSCEV(LHS
, RHS
), L
);
7383 if (EL
.hasAnyInfo()) return EL
;
7386 case ICmpInst::ICMP_SLT
:
7387 case ICmpInst::ICMP_ULT
: { // while (X < Y)
7388 bool IsSigned
= Pred
== ICmpInst::ICMP_SLT
;
7389 ExitLimit EL
= howManyLessThans(LHS
, RHS
, L
, IsSigned
, ControlsExit
,
7391 if (EL
.hasAnyInfo()) return EL
;
7394 case ICmpInst::ICMP_SGT
:
7395 case ICmpInst::ICMP_UGT
: { // while (X > Y)
7396 bool IsSigned
= Pred
== ICmpInst::ICMP_SGT
;
7398 howManyGreaterThans(LHS
, RHS
, L
, IsSigned
, ControlsExit
,
7400 if (EL
.hasAnyInfo()) return EL
;
7407 auto *ExhaustiveCount
=
7408 computeExitCountExhaustively(L
, ExitCond
, ExitIfTrue
);
7410 if (!isa
<SCEVCouldNotCompute
>(ExhaustiveCount
))
7411 return ExhaustiveCount
;
7413 return computeShiftCompareExitLimit(ExitCond
->getOperand(0),
7414 ExitCond
->getOperand(1), L
, OriginalPred
);
7417 ScalarEvolution::ExitLimit
7418 ScalarEvolution::computeExitLimitFromSingleExitSwitch(const Loop
*L
,
7420 BasicBlock
*ExitingBlock
,
7421 bool ControlsExit
) {
7422 assert(!L
->contains(ExitingBlock
) && "Not an exiting block!");
7424 // Give up if the exit is the default dest of a switch.
7425 if (Switch
->getDefaultDest() == ExitingBlock
)
7426 return getCouldNotCompute();
7428 assert(L
->contains(Switch
->getDefaultDest()) &&
7429 "Default case must not exit the loop!");
7430 const SCEV
*LHS
= getSCEVAtScope(Switch
->getCondition(), L
);
7431 const SCEV
*RHS
= getConstant(Switch
->findCaseDest(ExitingBlock
));
7433 // while (X != Y) --> while (X-Y != 0)
7434 ExitLimit EL
= howFarToZero(getMinusSCEV(LHS
, RHS
), L
, ControlsExit
);
7435 if (EL
.hasAnyInfo())
7438 return getCouldNotCompute();
7441 static ConstantInt
*
7442 EvaluateConstantChrecAtConstant(const SCEVAddRecExpr
*AddRec
, ConstantInt
*C
,
7443 ScalarEvolution
&SE
) {
7444 const SCEV
*InVal
= SE
.getConstant(C
);
7445 const SCEV
*Val
= AddRec
->evaluateAtIteration(InVal
, SE
);
7446 assert(isa
<SCEVConstant
>(Val
) &&
7447 "Evaluation of SCEV at constant didn't fold correctly?");
7448 return cast
<SCEVConstant
>(Val
)->getValue();
7451 /// Given an exit condition of 'icmp op load X, cst', try to see if we can
7452 /// compute the backedge execution count.
7453 ScalarEvolution::ExitLimit
7454 ScalarEvolution::computeLoadConstantCompareExitLimit(
7458 ICmpInst::Predicate predicate
) {
7459 if (LI
->isVolatile()) return getCouldNotCompute();
7461 // Check to see if the loaded pointer is a getelementptr of a global.
7462 // TODO: Use SCEV instead of manually grubbing with GEPs.
7463 GetElementPtrInst
*GEP
= dyn_cast
<GetElementPtrInst
>(LI
->getOperand(0));
7464 if (!GEP
) return getCouldNotCompute();
7466 // Make sure that it is really a constant global we are gepping, with an
7467 // initializer, and make sure the first IDX is really 0.
7468 GlobalVariable
*GV
= dyn_cast
<GlobalVariable
>(GEP
->getOperand(0));
7469 if (!GV
|| !GV
->isConstant() || !GV
->hasDefinitiveInitializer() ||
7470 GEP
->getNumOperands() < 3 || !isa
<Constant
>(GEP
->getOperand(1)) ||
7471 !cast
<Constant
>(GEP
->getOperand(1))->isNullValue())
7472 return getCouldNotCompute();
7474 // Okay, we allow one non-constant index into the GEP instruction.
7475 Value
*VarIdx
= nullptr;
7476 std::vector
<Constant
*> Indexes
;
7477 unsigned VarIdxNum
= 0;
7478 for (unsigned i
= 2, e
= GEP
->getNumOperands(); i
!= e
; ++i
)
7479 if (ConstantInt
*CI
= dyn_cast
<ConstantInt
>(GEP
->getOperand(i
))) {
7480 Indexes
.push_back(CI
);
7481 } else if (!isa
<ConstantInt
>(GEP
->getOperand(i
))) {
7482 if (VarIdx
) return getCouldNotCompute(); // Multiple non-constant idx's.
7483 VarIdx
= GEP
->getOperand(i
);
7485 Indexes
.push_back(nullptr);
7488 // Loop-invariant loads may be a byproduct of loop optimization. Skip them.
7490 return getCouldNotCompute();
7492 // Okay, we know we have a (load (gep GV, 0, X)) comparison with a constant.
7493 // Check to see if X is a loop variant variable value now.
7494 const SCEV
*Idx
= getSCEV(VarIdx
);
7495 Idx
= getSCEVAtScope(Idx
, L
);
7497 // We can only recognize very limited forms of loop index expressions, in
7498 // particular, only affine AddRec's like {C1,+,C2}.
7499 const SCEVAddRecExpr
*IdxExpr
= dyn_cast
<SCEVAddRecExpr
>(Idx
);
7500 if (!IdxExpr
|| !IdxExpr
->isAffine() || isLoopInvariant(IdxExpr
, L
) ||
7501 !isa
<SCEVConstant
>(IdxExpr
->getOperand(0)) ||
7502 !isa
<SCEVConstant
>(IdxExpr
->getOperand(1)))
7503 return getCouldNotCompute();
7505 unsigned MaxSteps
= MaxBruteForceIterations
;
7506 for (unsigned IterationNum
= 0; IterationNum
!= MaxSteps
; ++IterationNum
) {
7507 ConstantInt
*ItCst
= ConstantInt::get(
7508 cast
<IntegerType
>(IdxExpr
->getType()), IterationNum
);
7509 ConstantInt
*Val
= EvaluateConstantChrecAtConstant(IdxExpr
, ItCst
, *this);
7511 // Form the GEP offset.
7512 Indexes
[VarIdxNum
] = Val
;
7514 Constant
*Result
= ConstantFoldLoadThroughGEPIndices(GV
->getInitializer(),
7516 if (!Result
) break; // Cannot compute!
7518 // Evaluate the condition for this iteration.
7519 Result
= ConstantExpr::getICmp(predicate
, Result
, RHS
);
7520 if (!isa
<ConstantInt
>(Result
)) break; // Couldn't decide for sure
7521 if (cast
<ConstantInt
>(Result
)->getValue().isMinValue()) {
7522 ++NumArrayLenItCounts
;
7523 return getConstant(ItCst
); // Found terminating iteration!
7526 return getCouldNotCompute();
7529 ScalarEvolution::ExitLimit
ScalarEvolution::computeShiftCompareExitLimit(
7530 Value
*LHS
, Value
*RHSV
, const Loop
*L
, ICmpInst::Predicate Pred
) {
7531 ConstantInt
*RHS
= dyn_cast
<ConstantInt
>(RHSV
);
7533 return getCouldNotCompute();
7535 const BasicBlock
*Latch
= L
->getLoopLatch();
7537 return getCouldNotCompute();
7539 const BasicBlock
*Predecessor
= L
->getLoopPredecessor();
7541 return getCouldNotCompute();
7543 // Return true if V is of the form "LHS `shift_op` <positive constant>".
7544 // Return LHS in OutLHS and shift_opt in OutOpCode.
7545 auto MatchPositiveShift
=
7546 [](Value
*V
, Value
*&OutLHS
, Instruction::BinaryOps
&OutOpCode
) {
7548 using namespace PatternMatch
;
7550 ConstantInt
*ShiftAmt
;
7551 if (match(V
, m_LShr(m_Value(OutLHS
), m_ConstantInt(ShiftAmt
))))
7552 OutOpCode
= Instruction::LShr
;
7553 else if (match(V
, m_AShr(m_Value(OutLHS
), m_ConstantInt(ShiftAmt
))))
7554 OutOpCode
= Instruction::AShr
;
7555 else if (match(V
, m_Shl(m_Value(OutLHS
), m_ConstantInt(ShiftAmt
))))
7556 OutOpCode
= Instruction::Shl
;
7560 return ShiftAmt
->getValue().isStrictlyPositive();
7563 // Recognize a "shift recurrence" either of the form %iv or of %iv.shifted in
7566 // %iv = phi i32 [ %iv.shifted, %loop ], [ %val, %preheader ]
7567 // %iv.shifted = lshr i32 %iv, <positive constant>
7569 // Return true on a successful match. Return the corresponding PHI node (%iv
7570 // above) in PNOut and the opcode of the shift operation in OpCodeOut.
7571 auto MatchShiftRecurrence
=
7572 [&](Value
*V
, PHINode
*&PNOut
, Instruction::BinaryOps
&OpCodeOut
) {
7573 Optional
<Instruction::BinaryOps
> PostShiftOpCode
;
7576 Instruction::BinaryOps OpC
;
7579 // If we encounter a shift instruction, "peel off" the shift operation,
7580 // and remember that we did so. Later when we inspect %iv's backedge
7581 // value, we will make sure that the backedge value uses the same
7584 // Note: the peeled shift operation does not have to be the same
7585 // instruction as the one feeding into the PHI's backedge value. We only
7586 // really care about it being the same *kind* of shift instruction --
7587 // that's all that is required for our later inferences to hold.
7588 if (MatchPositiveShift(LHS
, V
, OpC
)) {
7589 PostShiftOpCode
= OpC
;
7594 PNOut
= dyn_cast
<PHINode
>(LHS
);
7595 if (!PNOut
|| PNOut
->getParent() != L
->getHeader())
7598 Value
*BEValue
= PNOut
->getIncomingValueForBlock(Latch
);
7602 // The backedge value for the PHI node must be a shift by a positive
7604 MatchPositiveShift(BEValue
, OpLHS
, OpCodeOut
) &&
7606 // of the PHI node itself
7609 // and the kind of shift should be match the kind of shift we peeled
7611 (!PostShiftOpCode
.hasValue() || *PostShiftOpCode
== OpCodeOut
);
7615 Instruction::BinaryOps OpCode
;
7616 if (!MatchShiftRecurrence(LHS
, PN
, OpCode
))
7617 return getCouldNotCompute();
7619 const DataLayout
&DL
= getDataLayout();
7621 // The key rationale for this optimization is that for some kinds of shift
7622 // recurrences, the value of the recurrence "stabilizes" to either 0 or -1
7623 // within a finite number of iterations. If the condition guarding the
7624 // backedge (in the sense that the backedge is taken if the condition is true)
7625 // is false for the value the shift recurrence stabilizes to, then we know
7626 // that the backedge is taken only a finite number of times.
7628 ConstantInt
*StableValue
= nullptr;
7631 llvm_unreachable("Impossible case!");
7633 case Instruction::AShr
: {
7634 // {K,ashr,<positive-constant>} stabilizes to signum(K) in at most
7635 // bitwidth(K) iterations.
7636 Value
*FirstValue
= PN
->getIncomingValueForBlock(Predecessor
);
7637 KnownBits Known
= computeKnownBits(FirstValue
, DL
, 0, nullptr,
7638 Predecessor
->getTerminator(), &DT
);
7639 auto *Ty
= cast
<IntegerType
>(RHS
->getType());
7640 if (Known
.isNonNegative())
7641 StableValue
= ConstantInt::get(Ty
, 0);
7642 else if (Known
.isNegative())
7643 StableValue
= ConstantInt::get(Ty
, -1, true);
7645 return getCouldNotCompute();
7649 case Instruction::LShr
:
7650 case Instruction::Shl
:
7651 // Both {K,lshr,<positive-constant>} and {K,shl,<positive-constant>}
7652 // stabilize to 0 in at most bitwidth(K) iterations.
7653 StableValue
= ConstantInt::get(cast
<IntegerType
>(RHS
->getType()), 0);
7658 ConstantFoldCompareInstOperands(Pred
, StableValue
, RHS
, DL
, &TLI
);
7659 assert(Result
->getType()->isIntegerTy(1) &&
7660 "Otherwise cannot be an operand to a branch instruction");
7662 if (Result
->isZeroValue()) {
7663 unsigned BitWidth
= getTypeSizeInBits(RHS
->getType());
7664 const SCEV
*UpperBound
=
7665 getConstant(getEffectiveSCEVType(RHS
->getType()), BitWidth
);
7666 return ExitLimit(getCouldNotCompute(), UpperBound
, false);
7669 return getCouldNotCompute();
7672 /// Return true if we can constant fold an instruction of the specified type,
7673 /// assuming that all operands were constants.
7674 static bool CanConstantFold(const Instruction
*I
) {
7675 if (isa
<BinaryOperator
>(I
) || isa
<CmpInst
>(I
) ||
7676 isa
<SelectInst
>(I
) || isa
<CastInst
>(I
) || isa
<GetElementPtrInst
>(I
) ||
7677 isa
<LoadInst
>(I
) || isa
<ExtractValueInst
>(I
))
7680 if (const CallInst
*CI
= dyn_cast
<CallInst
>(I
))
7681 if (const Function
*F
= CI
->getCalledFunction())
7682 return canConstantFoldCallTo(CI
, F
);
7686 /// Determine whether this instruction can constant evolve within this loop
7687 /// assuming its operands can all constant evolve.
7688 static bool canConstantEvolve(Instruction
*I
, const Loop
*L
) {
7689 // An instruction outside of the loop can't be derived from a loop PHI.
7690 if (!L
->contains(I
)) return false;
7692 if (isa
<PHINode
>(I
)) {
7693 // We don't currently keep track of the control flow needed to evaluate
7694 // PHIs, so we cannot handle PHIs inside of loops.
7695 return L
->getHeader() == I
->getParent();
7698 // If we won't be able to constant fold this expression even if the operands
7699 // are constants, bail early.
7700 return CanConstantFold(I
);
7703 /// getConstantEvolvingPHIOperands - Implement getConstantEvolvingPHI by
7704 /// recursing through each instruction operand until reaching a loop header phi.
7706 getConstantEvolvingPHIOperands(Instruction
*UseInst
, const Loop
*L
,
7707 DenseMap
<Instruction
*, PHINode
*> &PHIMap
,
7709 if (Depth
> MaxConstantEvolvingDepth
)
7712 // Otherwise, we can evaluate this instruction if all of its operands are
7713 // constant or derived from a PHI node themselves.
7714 PHINode
*PHI
= nullptr;
7715 for (Value
*Op
: UseInst
->operands()) {
7716 if (isa
<Constant
>(Op
)) continue;
7718 Instruction
*OpInst
= dyn_cast
<Instruction
>(Op
);
7719 if (!OpInst
|| !canConstantEvolve(OpInst
, L
)) return nullptr;
7721 PHINode
*P
= dyn_cast
<PHINode
>(OpInst
);
7723 // If this operand is already visited, reuse the prior result.
7724 // We may have P != PHI if this is the deepest point at which the
7725 // inconsistent paths meet.
7726 P
= PHIMap
.lookup(OpInst
);
7728 // Recurse and memoize the results, whether a phi is found or not.
7729 // This recursive call invalidates pointers into PHIMap.
7730 P
= getConstantEvolvingPHIOperands(OpInst
, L
, PHIMap
, Depth
+ 1);
7734 return nullptr; // Not evolving from PHI
7735 if (PHI
&& PHI
!= P
)
7736 return nullptr; // Evolving from multiple different PHIs.
7739 // This is a expression evolving from a constant PHI!
7743 /// getConstantEvolvingPHI - Given an LLVM value and a loop, return a PHI node
7744 /// in the loop that V is derived from. We allow arbitrary operations along the
7745 /// way, but the operands of an operation must either be constants or a value
7746 /// derived from a constant PHI. If this expression does not fit with these
7747 /// constraints, return null.
7748 static PHINode
*getConstantEvolvingPHI(Value
*V
, const Loop
*L
) {
7749 Instruction
*I
= dyn_cast
<Instruction
>(V
);
7750 if (!I
|| !canConstantEvolve(I
, L
)) return nullptr;
7752 if (PHINode
*PN
= dyn_cast
<PHINode
>(I
))
7755 // Record non-constant instructions contained by the loop.
7756 DenseMap
<Instruction
*, PHINode
*> PHIMap
;
7757 return getConstantEvolvingPHIOperands(I
, L
, PHIMap
, 0);
7760 /// EvaluateExpression - Given an expression that passes the
7761 /// getConstantEvolvingPHI predicate, evaluate its value assuming the PHI node
7762 /// in the loop has the value PHIVal. If we can't fold this expression for some
7763 /// reason, return null.
7764 static Constant
*EvaluateExpression(Value
*V
, const Loop
*L
,
7765 DenseMap
<Instruction
*, Constant
*> &Vals
,
7766 const DataLayout
&DL
,
7767 const TargetLibraryInfo
*TLI
) {
7768 // Convenient constant check, but redundant for recursive calls.
7769 if (Constant
*C
= dyn_cast
<Constant
>(V
)) return C
;
7770 Instruction
*I
= dyn_cast
<Instruction
>(V
);
7771 if (!I
) return nullptr;
7773 if (Constant
*C
= Vals
.lookup(I
)) return C
;
7775 // An instruction inside the loop depends on a value outside the loop that we
7776 // weren't given a mapping for, or a value such as a call inside the loop.
7777 if (!canConstantEvolve(I
, L
)) return nullptr;
7779 // An unmapped PHI can be due to a branch or another loop inside this loop,
7780 // or due to this not being the initial iteration through a loop where we
7781 // couldn't compute the evolution of this particular PHI last time.
7782 if (isa
<PHINode
>(I
)) return nullptr;
7784 std::vector
<Constant
*> Operands(I
->getNumOperands());
7786 for (unsigned i
= 0, e
= I
->getNumOperands(); i
!= e
; ++i
) {
7787 Instruction
*Operand
= dyn_cast
<Instruction
>(I
->getOperand(i
));
7789 Operands
[i
] = dyn_cast
<Constant
>(I
->getOperand(i
));
7790 if (!Operands
[i
]) return nullptr;
7793 Constant
*C
= EvaluateExpression(Operand
, L
, Vals
, DL
, TLI
);
7795 if (!C
) return nullptr;
7799 if (CmpInst
*CI
= dyn_cast
<CmpInst
>(I
))
7800 return ConstantFoldCompareInstOperands(CI
->getPredicate(), Operands
[0],
7801 Operands
[1], DL
, TLI
);
7802 if (LoadInst
*LI
= dyn_cast
<LoadInst
>(I
)) {
7803 if (!LI
->isVolatile())
7804 return ConstantFoldLoadFromConstPtr(Operands
[0], LI
->getType(), DL
);
7806 return ConstantFoldInstOperands(I
, Operands
, DL
, TLI
);
7810 // If every incoming value to PN except the one for BB is a specific Constant,
7811 // return that, else return nullptr.
7812 static Constant
*getOtherIncomingValue(PHINode
*PN
, BasicBlock
*BB
) {
7813 Constant
*IncomingVal
= nullptr;
7815 for (unsigned i
= 0, e
= PN
->getNumIncomingValues(); i
!= e
; ++i
) {
7816 if (PN
->getIncomingBlock(i
) == BB
)
7819 auto *CurrentVal
= dyn_cast
<Constant
>(PN
->getIncomingValue(i
));
7823 if (IncomingVal
!= CurrentVal
) {
7826 IncomingVal
= CurrentVal
;
7833 /// getConstantEvolutionLoopExitValue - If we know that the specified Phi is
7834 /// in the header of its containing loop, we know the loop executes a
7835 /// constant number of times, and the PHI node is just a recurrence
7836 /// involving constants, fold it.
7838 ScalarEvolution::getConstantEvolutionLoopExitValue(PHINode
*PN
,
7841 auto I
= ConstantEvolutionLoopExitValue
.find(PN
);
7842 if (I
!= ConstantEvolutionLoopExitValue
.end())
7845 if (BEs
.ugt(MaxBruteForceIterations
))
7846 return ConstantEvolutionLoopExitValue
[PN
] = nullptr; // Not going to evaluate it.
7848 Constant
*&RetVal
= ConstantEvolutionLoopExitValue
[PN
];
7850 DenseMap
<Instruction
*, Constant
*> CurrentIterVals
;
7851 BasicBlock
*Header
= L
->getHeader();
7852 assert(PN
->getParent() == Header
&& "Can't evaluate PHI not in loop header!");
7854 BasicBlock
*Latch
= L
->getLoopLatch();
7858 for (PHINode
&PHI
: Header
->phis()) {
7859 if (auto *StartCST
= getOtherIncomingValue(&PHI
, Latch
))
7860 CurrentIterVals
[&PHI
] = StartCST
;
7862 if (!CurrentIterVals
.count(PN
))
7863 return RetVal
= nullptr;
7865 Value
*BEValue
= PN
->getIncomingValueForBlock(Latch
);
7867 // Execute the loop symbolically to determine the exit value.
7868 assert(BEs
.getActiveBits() < CHAR_BIT
* sizeof(unsigned) &&
7869 "BEs is <= MaxBruteForceIterations which is an 'unsigned'!");
7871 unsigned NumIterations
= BEs
.getZExtValue(); // must be in range
7872 unsigned IterationNum
= 0;
7873 const DataLayout
&DL
= getDataLayout();
7874 for (; ; ++IterationNum
) {
7875 if (IterationNum
== NumIterations
)
7876 return RetVal
= CurrentIterVals
[PN
]; // Got exit value!
7878 // Compute the value of the PHIs for the next iteration.
7879 // EvaluateExpression adds non-phi values to the CurrentIterVals map.
7880 DenseMap
<Instruction
*, Constant
*> NextIterVals
;
7882 EvaluateExpression(BEValue
, L
, CurrentIterVals
, DL
, &TLI
);
7884 return nullptr; // Couldn't evaluate!
7885 NextIterVals
[PN
] = NextPHI
;
7887 bool StoppedEvolving
= NextPHI
== CurrentIterVals
[PN
];
7889 // Also evaluate the other PHI nodes. However, we don't get to stop if we
7890 // cease to be able to evaluate one of them or if they stop evolving,
7891 // because that doesn't necessarily prevent us from computing PN.
7892 SmallVector
<std::pair
<PHINode
*, Constant
*>, 8> PHIsToCompute
;
7893 for (const auto &I
: CurrentIterVals
) {
7894 PHINode
*PHI
= dyn_cast
<PHINode
>(I
.first
);
7895 if (!PHI
|| PHI
== PN
|| PHI
->getParent() != Header
) continue;
7896 PHIsToCompute
.emplace_back(PHI
, I
.second
);
7898 // We use two distinct loops because EvaluateExpression may invalidate any
7899 // iterators into CurrentIterVals.
7900 for (const auto &I
: PHIsToCompute
) {
7901 PHINode
*PHI
= I
.first
;
7902 Constant
*&NextPHI
= NextIterVals
[PHI
];
7903 if (!NextPHI
) { // Not already computed.
7904 Value
*BEValue
= PHI
->getIncomingValueForBlock(Latch
);
7905 NextPHI
= EvaluateExpression(BEValue
, L
, CurrentIterVals
, DL
, &TLI
);
7907 if (NextPHI
!= I
.second
)
7908 StoppedEvolving
= false;
7911 // If all entries in CurrentIterVals == NextIterVals then we can stop
7912 // iterating, the loop can't continue to change.
7913 if (StoppedEvolving
)
7914 return RetVal
= CurrentIterVals
[PN
];
7916 CurrentIterVals
.swap(NextIterVals
);
7920 const SCEV
*ScalarEvolution::computeExitCountExhaustively(const Loop
*L
,
7923 PHINode
*PN
= getConstantEvolvingPHI(Cond
, L
);
7924 if (!PN
) return getCouldNotCompute();
7926 // If the loop is canonicalized, the PHI will have exactly two entries.
7927 // That's the only form we support here.
7928 if (PN
->getNumIncomingValues() != 2) return getCouldNotCompute();
7930 DenseMap
<Instruction
*, Constant
*> CurrentIterVals
;
7931 BasicBlock
*Header
= L
->getHeader();
7932 assert(PN
->getParent() == Header
&& "Can't evaluate PHI not in loop header!");
7934 BasicBlock
*Latch
= L
->getLoopLatch();
7935 assert(Latch
&& "Should follow from NumIncomingValues == 2!");
7937 for (PHINode
&PHI
: Header
->phis()) {
7938 if (auto *StartCST
= getOtherIncomingValue(&PHI
, Latch
))
7939 CurrentIterVals
[&PHI
] = StartCST
;
7941 if (!CurrentIterVals
.count(PN
))
7942 return getCouldNotCompute();
7944 // Okay, we find a PHI node that defines the trip count of this loop. Execute
7945 // the loop symbolically to determine when the condition gets a value of
7947 unsigned MaxIterations
= MaxBruteForceIterations
; // Limit analysis.
7948 const DataLayout
&DL
= getDataLayout();
7949 for (unsigned IterationNum
= 0; IterationNum
!= MaxIterations
;++IterationNum
){
7950 auto *CondVal
= dyn_cast_or_null
<ConstantInt
>(
7951 EvaluateExpression(Cond
, L
, CurrentIterVals
, DL
, &TLI
));
7953 // Couldn't symbolically evaluate.
7954 if (!CondVal
) return getCouldNotCompute();
7956 if (CondVal
->getValue() == uint64_t(ExitWhen
)) {
7957 ++NumBruteForceTripCountsComputed
;
7958 return getConstant(Type::getInt32Ty(getContext()), IterationNum
);
7961 // Update all the PHI nodes for the next iteration.
7962 DenseMap
<Instruction
*, Constant
*> NextIterVals
;
7964 // Create a list of which PHIs we need to compute. We want to do this before
7965 // calling EvaluateExpression on them because that may invalidate iterators
7966 // into CurrentIterVals.
7967 SmallVector
<PHINode
*, 8> PHIsToCompute
;
7968 for (const auto &I
: CurrentIterVals
) {
7969 PHINode
*PHI
= dyn_cast
<PHINode
>(I
.first
);
7970 if (!PHI
|| PHI
->getParent() != Header
) continue;
7971 PHIsToCompute
.push_back(PHI
);
7973 for (PHINode
*PHI
: PHIsToCompute
) {
7974 Constant
*&NextPHI
= NextIterVals
[PHI
];
7975 if (NextPHI
) continue; // Already computed!
7977 Value
*BEValue
= PHI
->getIncomingValueForBlock(Latch
);
7978 NextPHI
= EvaluateExpression(BEValue
, L
, CurrentIterVals
, DL
, &TLI
);
7980 CurrentIterVals
.swap(NextIterVals
);
7983 // Too many iterations were needed to evaluate.
7984 return getCouldNotCompute();
7987 const SCEV
*ScalarEvolution::getSCEVAtScope(const SCEV
*V
, const Loop
*L
) {
7988 SmallVector
<std::pair
<const Loop
*, const SCEV
*>, 2> &Values
=
7990 // Check to see if we've folded this expression at this loop before.
7991 for (auto &LS
: Values
)
7993 return LS
.second
? LS
.second
: V
;
7995 Values
.emplace_back(L
, nullptr);
7997 // Otherwise compute it.
7998 const SCEV
*C
= computeSCEVAtScope(V
, L
);
7999 for (auto &LS
: reverse(ValuesAtScopes
[V
]))
8000 if (LS
.first
== L
) {
8007 /// This builds up a Constant using the ConstantExpr interface. That way, we
8008 /// will return Constants for objects which aren't represented by a
8009 /// SCEVConstant, because SCEVConstant is restricted to ConstantInt.
8010 /// Returns NULL if the SCEV isn't representable as a Constant.
8011 static Constant
*BuildConstantFromSCEV(const SCEV
*V
) {
8012 switch (static_cast<SCEVTypes
>(V
->getSCEVType())) {
8013 case scCouldNotCompute
:
8017 return cast
<SCEVConstant
>(V
)->getValue();
8019 return dyn_cast
<Constant
>(cast
<SCEVUnknown
>(V
)->getValue());
8020 case scSignExtend
: {
8021 const SCEVSignExtendExpr
*SS
= cast
<SCEVSignExtendExpr
>(V
);
8022 if (Constant
*CastOp
= BuildConstantFromSCEV(SS
->getOperand()))
8023 return ConstantExpr::getSExt(CastOp
, SS
->getType());
8026 case scZeroExtend
: {
8027 const SCEVZeroExtendExpr
*SZ
= cast
<SCEVZeroExtendExpr
>(V
);
8028 if (Constant
*CastOp
= BuildConstantFromSCEV(SZ
->getOperand()))
8029 return ConstantExpr::getZExt(CastOp
, SZ
->getType());
8033 const SCEVTruncateExpr
*ST
= cast
<SCEVTruncateExpr
>(V
);
8034 if (Constant
*CastOp
= BuildConstantFromSCEV(ST
->getOperand()))
8035 return ConstantExpr::getTrunc(CastOp
, ST
->getType());
8039 const SCEVAddExpr
*SA
= cast
<SCEVAddExpr
>(V
);
8040 if (Constant
*C
= BuildConstantFromSCEV(SA
->getOperand(0))) {
8041 if (PointerType
*PTy
= dyn_cast
<PointerType
>(C
->getType())) {
8042 unsigned AS
= PTy
->getAddressSpace();
8043 Type
*DestPtrTy
= Type::getInt8PtrTy(C
->getContext(), AS
);
8044 C
= ConstantExpr::getBitCast(C
, DestPtrTy
);
8046 for (unsigned i
= 1, e
= SA
->getNumOperands(); i
!= e
; ++i
) {
8047 Constant
*C2
= BuildConstantFromSCEV(SA
->getOperand(i
));
8048 if (!C2
) return nullptr;
8051 if (!C
->getType()->isPointerTy() && C2
->getType()->isPointerTy()) {
8052 unsigned AS
= C2
->getType()->getPointerAddressSpace();
8054 Type
*DestPtrTy
= Type::getInt8PtrTy(C
->getContext(), AS
);
8055 // The offsets have been converted to bytes. We can add bytes to an
8056 // i8* by GEP with the byte count in the first index.
8057 C
= ConstantExpr::getBitCast(C
, DestPtrTy
);
8060 // Don't bother trying to sum two pointers. We probably can't
8061 // statically compute a load that results from it anyway.
8062 if (C2
->getType()->isPointerTy())
8065 if (PointerType
*PTy
= dyn_cast
<PointerType
>(C
->getType())) {
8066 if (PTy
->getElementType()->isStructTy())
8067 C2
= ConstantExpr::getIntegerCast(
8068 C2
, Type::getInt32Ty(C
->getContext()), true);
8069 C
= ConstantExpr::getGetElementPtr(PTy
->getElementType(), C
, C2
);
8071 C
= ConstantExpr::getAdd(C
, C2
);
8078 const SCEVMulExpr
*SM
= cast
<SCEVMulExpr
>(V
);
8079 if (Constant
*C
= BuildConstantFromSCEV(SM
->getOperand(0))) {
8080 // Don't bother with pointers at all.
8081 if (C
->getType()->isPointerTy()) return nullptr;
8082 for (unsigned i
= 1, e
= SM
->getNumOperands(); i
!= e
; ++i
) {
8083 Constant
*C2
= BuildConstantFromSCEV(SM
->getOperand(i
));
8084 if (!C2
|| C2
->getType()->isPointerTy()) return nullptr;
8085 C
= ConstantExpr::getMul(C
, C2
);
8092 const SCEVUDivExpr
*SU
= cast
<SCEVUDivExpr
>(V
);
8093 if (Constant
*LHS
= BuildConstantFromSCEV(SU
->getLHS()))
8094 if (Constant
*RHS
= BuildConstantFromSCEV(SU
->getRHS()))
8095 if (LHS
->getType() == RHS
->getType())
8096 return ConstantExpr::getUDiv(LHS
, RHS
);
8103 break; // TODO: smax, umax, smin, umax.
8108 const SCEV
*ScalarEvolution::computeSCEVAtScope(const SCEV
*V
, const Loop
*L
) {
8109 if (isa
<SCEVConstant
>(V
)) return V
;
8111 // If this instruction is evolved from a constant-evolving PHI, compute the
8112 // exit value from the loop without using SCEVs.
8113 if (const SCEVUnknown
*SU
= dyn_cast
<SCEVUnknown
>(V
)) {
8114 if (Instruction
*I
= dyn_cast
<Instruction
>(SU
->getValue())) {
8115 if (PHINode
*PN
= dyn_cast
<PHINode
>(I
)) {
8116 const Loop
*LI
= this->LI
[I
->getParent()];
8117 // Looking for loop exit value.
8118 if (LI
&& LI
->getParentLoop() == L
&&
8119 PN
->getParent() == LI
->getHeader()) {
8120 // Okay, there is no closed form solution for the PHI node. Check
8121 // to see if the loop that contains it has a known backedge-taken
8122 // count. If so, we may be able to force computation of the exit
8124 const SCEV
*BackedgeTakenCount
= getBackedgeTakenCount(LI
);
8125 // This trivial case can show up in some degenerate cases where
8126 // the incoming IR has not yet been fully simplified.
8127 if (BackedgeTakenCount
->isZero()) {
8128 Value
*InitValue
= nullptr;
8129 bool MultipleInitValues
= false;
8130 for (unsigned i
= 0; i
< PN
->getNumIncomingValues(); i
++) {
8131 if (!LI
->contains(PN
->getIncomingBlock(i
))) {
8133 InitValue
= PN
->getIncomingValue(i
);
8134 else if (InitValue
!= PN
->getIncomingValue(i
)) {
8135 MultipleInitValues
= true;
8140 if (!MultipleInitValues
&& InitValue
)
8141 return getSCEV(InitValue
);
8143 // Do we have a loop invariant value flowing around the backedge
8144 // for a loop which must execute the backedge?
8145 if (!isa
<SCEVCouldNotCompute
>(BackedgeTakenCount
) &&
8146 isKnownPositive(BackedgeTakenCount
) &&
8147 PN
->getNumIncomingValues() == 2) {
8148 unsigned InLoopPred
= LI
->contains(PN
->getIncomingBlock(0)) ? 0 : 1;
8149 const SCEV
*OnBackedge
= getSCEV(PN
->getIncomingValue(InLoopPred
));
8150 if (IsAvailableOnEntry(LI
, DT
, OnBackedge
, PN
->getParent()))
8153 if (auto *BTCC
= dyn_cast
<SCEVConstant
>(BackedgeTakenCount
)) {
8154 // Okay, we know how many times the containing loop executes. If
8155 // this is a constant evolving PHI node, get the final value at
8156 // the specified iteration number.
8158 getConstantEvolutionLoopExitValue(PN
, BTCC
->getAPInt(), LI
);
8159 if (RV
) return getSCEV(RV
);
8163 // If there is a single-input Phi, evaluate it at our scope. If we can
8164 // prove that this replacement does not break LCSSA form, use new value.
8165 if (PN
->getNumOperands() == 1) {
8166 const SCEV
*Input
= getSCEV(PN
->getOperand(0));
8167 const SCEV
*InputAtScope
= getSCEVAtScope(Input
, L
);
8168 // TODO: We can generalize it using LI.replacementPreservesLCSSAForm,
8169 // for the simplest case just support constants.
8170 if (isa
<SCEVConstant
>(InputAtScope
)) return InputAtScope
;
8174 // Okay, this is an expression that we cannot symbolically evaluate
8175 // into a SCEV. Check to see if it's possible to symbolically evaluate
8176 // the arguments into constants, and if so, try to constant propagate the
8177 // result. This is particularly useful for computing loop exit values.
8178 if (CanConstantFold(I
)) {
8179 SmallVector
<Constant
*, 4> Operands
;
8180 bool MadeImprovement
= false;
8181 for (Value
*Op
: I
->operands()) {
8182 if (Constant
*C
= dyn_cast
<Constant
>(Op
)) {
8183 Operands
.push_back(C
);
8187 // If any of the operands is non-constant and if they are
8188 // non-integer and non-pointer, don't even try to analyze them
8189 // with scev techniques.
8190 if (!isSCEVable(Op
->getType()))
8193 const SCEV
*OrigV
= getSCEV(Op
);
8194 const SCEV
*OpV
= getSCEVAtScope(OrigV
, L
);
8195 MadeImprovement
|= OrigV
!= OpV
;
8197 Constant
*C
= BuildConstantFromSCEV(OpV
);
8199 if (C
->getType() != Op
->getType())
8200 C
= ConstantExpr::getCast(CastInst::getCastOpcode(C
, false,
8204 Operands
.push_back(C
);
8207 // Check to see if getSCEVAtScope actually made an improvement.
8208 if (MadeImprovement
) {
8209 Constant
*C
= nullptr;
8210 const DataLayout
&DL
= getDataLayout();
8211 if (const CmpInst
*CI
= dyn_cast
<CmpInst
>(I
))
8212 C
= ConstantFoldCompareInstOperands(CI
->getPredicate(), Operands
[0],
8213 Operands
[1], DL
, &TLI
);
8214 else if (const LoadInst
*LI
= dyn_cast
<LoadInst
>(I
)) {
8215 if (!LI
->isVolatile())
8216 C
= ConstantFoldLoadFromConstPtr(Operands
[0], LI
->getType(), DL
);
8218 C
= ConstantFoldInstOperands(I
, Operands
, DL
, &TLI
);
8225 // This is some other type of SCEVUnknown, just return it.
8229 if (const SCEVCommutativeExpr
*Comm
= dyn_cast
<SCEVCommutativeExpr
>(V
)) {
8230 // Avoid performing the look-up in the common case where the specified
8231 // expression has no loop-variant portions.
8232 for (unsigned i
= 0, e
= Comm
->getNumOperands(); i
!= e
; ++i
) {
8233 const SCEV
*OpAtScope
= getSCEVAtScope(Comm
->getOperand(i
), L
);
8234 if (OpAtScope
!= Comm
->getOperand(i
)) {
8235 // Okay, at least one of these operands is loop variant but might be
8236 // foldable. Build a new instance of the folded commutative expression.
8237 SmallVector
<const SCEV
*, 8> NewOps(Comm
->op_begin(),
8238 Comm
->op_begin()+i
);
8239 NewOps
.push_back(OpAtScope
);
8241 for (++i
; i
!= e
; ++i
) {
8242 OpAtScope
= getSCEVAtScope(Comm
->getOperand(i
), L
);
8243 NewOps
.push_back(OpAtScope
);
8245 if (isa
<SCEVAddExpr
>(Comm
))
8246 return getAddExpr(NewOps
, Comm
->getNoWrapFlags());
8247 if (isa
<SCEVMulExpr
>(Comm
))
8248 return getMulExpr(NewOps
, Comm
->getNoWrapFlags());
8249 if (isa
<SCEVMinMaxExpr
>(Comm
))
8250 return getMinMaxExpr(Comm
->getSCEVType(), NewOps
);
8251 llvm_unreachable("Unknown commutative SCEV type!");
8254 // If we got here, all operands are loop invariant.
8258 if (const SCEVUDivExpr
*Div
= dyn_cast
<SCEVUDivExpr
>(V
)) {
8259 const SCEV
*LHS
= getSCEVAtScope(Div
->getLHS(), L
);
8260 const SCEV
*RHS
= getSCEVAtScope(Div
->getRHS(), L
);
8261 if (LHS
== Div
->getLHS() && RHS
== Div
->getRHS())
8262 return Div
; // must be loop invariant
8263 return getUDivExpr(LHS
, RHS
);
8266 // If this is a loop recurrence for a loop that does not contain L, then we
8267 // are dealing with the final value computed by the loop.
8268 if (const SCEVAddRecExpr
*AddRec
= dyn_cast
<SCEVAddRecExpr
>(V
)) {
8269 // First, attempt to evaluate each operand.
8270 // Avoid performing the look-up in the common case where the specified
8271 // expression has no loop-variant portions.
8272 for (unsigned i
= 0, e
= AddRec
->getNumOperands(); i
!= e
; ++i
) {
8273 const SCEV
*OpAtScope
= getSCEVAtScope(AddRec
->getOperand(i
), L
);
8274 if (OpAtScope
== AddRec
->getOperand(i
))
8277 // Okay, at least one of these operands is loop variant but might be
8278 // foldable. Build a new instance of the folded commutative expression.
8279 SmallVector
<const SCEV
*, 8> NewOps(AddRec
->op_begin(),
8280 AddRec
->op_begin()+i
);
8281 NewOps
.push_back(OpAtScope
);
8282 for (++i
; i
!= e
; ++i
)
8283 NewOps
.push_back(getSCEVAtScope(AddRec
->getOperand(i
), L
));
8285 const SCEV
*FoldedRec
=
8286 getAddRecExpr(NewOps
, AddRec
->getLoop(),
8287 AddRec
->getNoWrapFlags(SCEV::FlagNW
));
8288 AddRec
= dyn_cast
<SCEVAddRecExpr
>(FoldedRec
);
8289 // The addrec may be folded to a nonrecurrence, for example, if the
8290 // induction variable is multiplied by zero after constant folding. Go
8291 // ahead and return the folded value.
8297 // If the scope is outside the addrec's loop, evaluate it by using the
8298 // loop exit value of the addrec.
8299 if (!AddRec
->getLoop()->contains(L
)) {
8300 // To evaluate this recurrence, we need to know how many times the AddRec
8301 // loop iterates. Compute this now.
8302 const SCEV
*BackedgeTakenCount
= getBackedgeTakenCount(AddRec
->getLoop());
8303 if (BackedgeTakenCount
== getCouldNotCompute()) return AddRec
;
8305 // Then, evaluate the AddRec.
8306 return AddRec
->evaluateAtIteration(BackedgeTakenCount
, *this);
8312 if (const SCEVZeroExtendExpr
*Cast
= dyn_cast
<SCEVZeroExtendExpr
>(V
)) {
8313 const SCEV
*Op
= getSCEVAtScope(Cast
->getOperand(), L
);
8314 if (Op
== Cast
->getOperand())
8315 return Cast
; // must be loop invariant
8316 return getZeroExtendExpr(Op
, Cast
->getType());
8319 if (const SCEVSignExtendExpr
*Cast
= dyn_cast
<SCEVSignExtendExpr
>(V
)) {
8320 const SCEV
*Op
= getSCEVAtScope(Cast
->getOperand(), L
);
8321 if (Op
== Cast
->getOperand())
8322 return Cast
; // must be loop invariant
8323 return getSignExtendExpr(Op
, Cast
->getType());
8326 if (const SCEVTruncateExpr
*Cast
= dyn_cast
<SCEVTruncateExpr
>(V
)) {
8327 const SCEV
*Op
= getSCEVAtScope(Cast
->getOperand(), L
);
8328 if (Op
== Cast
->getOperand())
8329 return Cast
; // must be loop invariant
8330 return getTruncateExpr(Op
, Cast
->getType());
8333 llvm_unreachable("Unknown SCEV type!");
8336 const SCEV
*ScalarEvolution::getSCEVAtScope(Value
*V
, const Loop
*L
) {
8337 return getSCEVAtScope(getSCEV(V
), L
);
8340 const SCEV
*ScalarEvolution::stripInjectiveFunctions(const SCEV
*S
) const {
8341 if (const SCEVZeroExtendExpr
*ZExt
= dyn_cast
<SCEVZeroExtendExpr
>(S
))
8342 return stripInjectiveFunctions(ZExt
->getOperand());
8343 if (const SCEVSignExtendExpr
*SExt
= dyn_cast
<SCEVSignExtendExpr
>(S
))
8344 return stripInjectiveFunctions(SExt
->getOperand());
8348 /// Finds the minimum unsigned root of the following equation:
8350 /// A * X = B (mod N)
8352 /// where N = 2^BW and BW is the common bit width of A and B. The signedness of
8353 /// A and B isn't important.
8355 /// If the equation does not have a solution, SCEVCouldNotCompute is returned.
8356 static const SCEV
*SolveLinEquationWithOverflow(const APInt
&A
, const SCEV
*B
,
8357 ScalarEvolution
&SE
) {
8358 uint32_t BW
= A
.getBitWidth();
8359 assert(BW
== SE
.getTypeSizeInBits(B
->getType()));
8360 assert(A
!= 0 && "A must be non-zero.");
8364 // The gcd of A and N may have only one prime factor: 2. The number of
8365 // trailing zeros in A is its multiplicity
8366 uint32_t Mult2
= A
.countTrailingZeros();
8369 // 2. Check if B is divisible by D.
8371 // B is divisible by D if and only if the multiplicity of prime factor 2 for B
8372 // is not less than multiplicity of this prime factor for D.
8373 if (SE
.GetMinTrailingZeros(B
) < Mult2
)
8374 return SE
.getCouldNotCompute();
8376 // 3. Compute I: the multiplicative inverse of (A / D) in arithmetic
8379 // If D == 1, (N / D) == N == 2^BW, so we need one extra bit to represent
8380 // (N / D) in general. The inverse itself always fits into BW bits, though,
8381 // so we immediately truncate it.
8382 APInt AD
= A
.lshr(Mult2
).zext(BW
+ 1); // AD = A / D
8383 APInt
Mod(BW
+ 1, 0);
8384 Mod
.setBit(BW
- Mult2
); // Mod = N / D
8385 APInt I
= AD
.multiplicativeInverse(Mod
).trunc(BW
);
8387 // 4. Compute the minimum unsigned root of the equation:
8388 // I * (B / D) mod (N / D)
8389 // To simplify the computation, we factor out the divide by D:
8390 // (I * B mod N) / D
8391 const SCEV
*D
= SE
.getConstant(APInt::getOneBitSet(BW
, Mult2
));
8392 return SE
.getUDivExactExpr(SE
.getMulExpr(B
, SE
.getConstant(I
)), D
);
8395 /// For a given quadratic addrec, generate coefficients of the corresponding
8396 /// quadratic equation, multiplied by a common value to ensure that they are
8398 /// The returned value is a tuple { A, B, C, M, BitWidth }, where
8399 /// Ax^2 + Bx + C is the quadratic function, M is the value that A, B and C
8400 /// were multiplied by, and BitWidth is the bit width of the original addrec
8402 /// This function returns None if the addrec coefficients are not compile-
8404 static Optional
<std::tuple
<APInt
, APInt
, APInt
, APInt
, unsigned>>
8405 GetQuadraticEquation(const SCEVAddRecExpr
*AddRec
) {
8406 assert(AddRec
->getNumOperands() == 3 && "This is not a quadratic chrec!");
8407 const SCEVConstant
*LC
= dyn_cast
<SCEVConstant
>(AddRec
->getOperand(0));
8408 const SCEVConstant
*MC
= dyn_cast
<SCEVConstant
>(AddRec
->getOperand(1));
8409 const SCEVConstant
*NC
= dyn_cast
<SCEVConstant
>(AddRec
->getOperand(2));
8410 LLVM_DEBUG(dbgs() << __func__
<< ": analyzing quadratic addrec: "
8411 << *AddRec
<< '\n');
8413 // We currently can only solve this if the coefficients are constants.
8414 if (!LC
|| !MC
|| !NC
) {
8415 LLVM_DEBUG(dbgs() << __func__
<< ": coefficients are not constant\n");
8419 APInt L
= LC
->getAPInt();
8420 APInt M
= MC
->getAPInt();
8421 APInt N
= NC
->getAPInt();
8422 assert(!N
.isNullValue() && "This is not a quadratic addrec");
8424 unsigned BitWidth
= LC
->getAPInt().getBitWidth();
8425 unsigned NewWidth
= BitWidth
+ 1;
8426 LLVM_DEBUG(dbgs() << __func__
<< ": addrec coeff bw: "
8427 << BitWidth
<< '\n');
8428 // The sign-extension (as opposed to a zero-extension) here matches the
8429 // extension used in SolveQuadraticEquationWrap (with the same motivation).
8430 N
= N
.sext(NewWidth
);
8431 M
= M
.sext(NewWidth
);
8432 L
= L
.sext(NewWidth
);
8434 // The increments are M, M+N, M+2N, ..., so the accumulated values are
8435 // L+M, (L+M)+(M+N), (L+M)+(M+N)+(M+2N), ..., that is,
8436 // L+M, L+2M+N, L+3M+3N, ...
8437 // After n iterations the accumulated value Acc is L + nM + n(n-1)/2 N.
8439 // The equation Acc = 0 is then
8440 // L + nM + n(n-1)/2 N = 0, or 2L + 2M n + n(n-1) N = 0.
8441 // In a quadratic form it becomes:
8442 // N n^2 + (2M-N) n + 2L = 0.
8445 APInt B
= 2 * M
- A
;
8447 APInt T
= APInt(NewWidth
, 2);
8448 LLVM_DEBUG(dbgs() << __func__
<< ": equation " << A
<< "x^2 + " << B
8449 << "x + " << C
<< ", coeff bw: " << NewWidth
8450 << ", multiplied by " << T
<< '\n');
8451 return std::make_tuple(A
, B
, C
, T
, BitWidth
);
8454 /// Helper function to compare optional APInts:
8455 /// (a) if X and Y both exist, return min(X, Y),
8456 /// (b) if neither X nor Y exist, return None,
8457 /// (c) if exactly one of X and Y exists, return that value.
8458 static Optional
<APInt
> MinOptional(Optional
<APInt
> X
, Optional
<APInt
> Y
) {
8459 if (X
.hasValue() && Y
.hasValue()) {
8460 unsigned W
= std::max(X
->getBitWidth(), Y
->getBitWidth());
8461 APInt XW
= X
->sextOrSelf(W
);
8462 APInt YW
= Y
->sextOrSelf(W
);
8463 return XW
.slt(YW
) ? *X
: *Y
;
8465 if (!X
.hasValue() && !Y
.hasValue())
8467 return X
.hasValue() ? *X
: *Y
;
8470 /// Helper function to truncate an optional APInt to a given BitWidth.
8471 /// When solving addrec-related equations, it is preferable to return a value
8472 /// that has the same bit width as the original addrec's coefficients. If the
8473 /// solution fits in the original bit width, truncate it (except for i1).
8474 /// Returning a value of a different bit width may inhibit some optimizations.
8476 /// In general, a solution to a quadratic equation generated from an addrec
8477 /// may require BW+1 bits, where BW is the bit width of the addrec's
8478 /// coefficients. The reason is that the coefficients of the quadratic
8479 /// equation are BW+1 bits wide (to avoid truncation when converting from
8480 /// the addrec to the equation).
8481 static Optional
<APInt
> TruncIfPossible(Optional
<APInt
> X
, unsigned BitWidth
) {
8484 unsigned W
= X
->getBitWidth();
8485 if (BitWidth
> 1 && BitWidth
< W
&& X
->isIntN(BitWidth
))
8486 return X
->trunc(BitWidth
);
8490 /// Let c(n) be the value of the quadratic chrec {L,+,M,+,N} after n
8491 /// iterations. The values L, M, N are assumed to be signed, and they
8492 /// should all have the same bit widths.
8493 /// Find the least n >= 0 such that c(n) = 0 in the arithmetic modulo 2^BW,
8494 /// where BW is the bit width of the addrec's coefficients.
8495 /// If the calculated value is a BW-bit integer (for BW > 1), it will be
8496 /// returned as such, otherwise the bit width of the returned value may
8497 /// be greater than BW.
8499 /// This function returns None if
8500 /// (a) the addrec coefficients are not constant, or
8501 /// (b) SolveQuadraticEquationWrap was unable to find a solution. For cases
8502 /// like x^2 = 5, no integer solutions exist, in other cases an integer
8503 /// solution may exist, but SolveQuadraticEquationWrap may fail to find it.
8504 static Optional
<APInt
>
8505 SolveQuadraticAddRecExact(const SCEVAddRecExpr
*AddRec
, ScalarEvolution
&SE
) {
8508 auto T
= GetQuadraticEquation(AddRec
);
8512 std::tie(A
, B
, C
, M
, BitWidth
) = *T
;
8513 LLVM_DEBUG(dbgs() << __func__
<< ": solving for unsigned overflow\n");
8514 Optional
<APInt
> X
= APIntOps::SolveQuadraticEquationWrap(A
, B
, C
, BitWidth
+1);
8518 ConstantInt
*CX
= ConstantInt::get(SE
.getContext(), *X
);
8519 ConstantInt
*V
= EvaluateConstantChrecAtConstant(AddRec
, CX
, SE
);
8523 return TruncIfPossible(X
, BitWidth
);
8526 /// Let c(n) be the value of the quadratic chrec {0,+,M,+,N} after n
8527 /// iterations. The values M, N are assumed to be signed, and they
8528 /// should all have the same bit widths.
8529 /// Find the least n such that c(n) does not belong to the given range,
8530 /// while c(n-1) does.
8532 /// This function returns None if
8533 /// (a) the addrec coefficients are not constant, or
8534 /// (b) SolveQuadraticEquationWrap was unable to find a solution for the
8535 /// bounds of the range.
8536 static Optional
<APInt
>
8537 SolveQuadraticAddRecRange(const SCEVAddRecExpr
*AddRec
,
8538 const ConstantRange
&Range
, ScalarEvolution
&SE
) {
8539 assert(AddRec
->getOperand(0)->isZero() &&
8540 "Starting value of addrec should be 0");
8541 LLVM_DEBUG(dbgs() << __func__
<< ": solving boundary crossing for range "
8542 << Range
<< ", addrec " << *AddRec
<< '\n');
8543 // This case is handled in getNumIterationsInRange. Here we can assume that
8544 // we start in the range.
8545 assert(Range
.contains(APInt(SE
.getTypeSizeInBits(AddRec
->getType()), 0)) &&
8546 "Addrec's initial value should be in range");
8550 auto T
= GetQuadraticEquation(AddRec
);
8554 // Be careful about the return value: there can be two reasons for not
8555 // returning an actual number. First, if no solutions to the equations
8556 // were found, and second, if the solutions don't leave the given range.
8557 // The first case means that the actual solution is "unknown", the second
8558 // means that it's known, but not valid. If the solution is unknown, we
8559 // cannot make any conclusions.
8560 // Return a pair: the optional solution and a flag indicating if the
8561 // solution was found.
8562 auto SolveForBoundary
= [&](APInt Bound
) -> std::pair
<Optional
<APInt
>,bool> {
8563 // Solve for signed overflow and unsigned overflow, pick the lower
8565 LLVM_DEBUG(dbgs() << "SolveQuadraticAddRecRange: checking boundary "
8566 << Bound
<< " (before multiplying by " << M
<< ")\n");
8567 Bound
*= M
; // The quadratic equation multiplier.
8569 Optional
<APInt
> SO
= None
;
8571 LLVM_DEBUG(dbgs() << "SolveQuadraticAddRecRange: solving for "
8572 "signed overflow\n");
8573 SO
= APIntOps::SolveQuadraticEquationWrap(A
, B
, -Bound
, BitWidth
);
8575 LLVM_DEBUG(dbgs() << "SolveQuadraticAddRecRange: solving for "
8576 "unsigned overflow\n");
8577 Optional
<APInt
> UO
= APIntOps::SolveQuadraticEquationWrap(A
, B
, -Bound
,
8580 auto LeavesRange
= [&] (const APInt
&X
) {
8581 ConstantInt
*C0
= ConstantInt::get(SE
.getContext(), X
);
8582 ConstantInt
*V0
= EvaluateConstantChrecAtConstant(AddRec
, C0
, SE
);
8583 if (Range
.contains(V0
->getValue()))
8585 // X should be at least 1, so X-1 is non-negative.
8586 ConstantInt
*C1
= ConstantInt::get(SE
.getContext(), X
-1);
8587 ConstantInt
*V1
= EvaluateConstantChrecAtConstant(AddRec
, C1
, SE
);
8588 if (Range
.contains(V1
->getValue()))
8593 // If SolveQuadraticEquationWrap returns None, it means that there can
8594 // be a solution, but the function failed to find it. We cannot treat it
8595 // as "no solution".
8596 if (!SO
.hasValue() || !UO
.hasValue())
8597 return { None
, false };
8599 // Check the smaller value first to see if it leaves the range.
8600 // At this point, both SO and UO must have values.
8601 Optional
<APInt
> Min
= MinOptional(SO
, UO
);
8602 if (LeavesRange(*Min
))
8603 return { Min
, true };
8604 Optional
<APInt
> Max
= Min
== SO
? UO
: SO
;
8605 if (LeavesRange(*Max
))
8606 return { Max
, true };
8608 // Solutions were found, but were eliminated, hence the "true".
8609 return { None
, true };
8612 std::tie(A
, B
, C
, M
, BitWidth
) = *T
;
8613 // Lower bound is inclusive, subtract 1 to represent the exiting value.
8614 APInt Lower
= Range
.getLower().sextOrSelf(A
.getBitWidth()) - 1;
8615 APInt Upper
= Range
.getUpper().sextOrSelf(A
.getBitWidth());
8616 auto SL
= SolveForBoundary(Lower
);
8617 auto SU
= SolveForBoundary(Upper
);
8618 // If any of the solutions was unknown, no meaninigful conclusions can
8620 if (!SL
.second
|| !SU
.second
)
8623 // Claim: The correct solution is not some value between Min and Max.
8625 // Justification: Assuming that Min and Max are different values, one of
8626 // them is when the first signed overflow happens, the other is when the
8627 // first unsigned overflow happens. Crossing the range boundary is only
8628 // possible via an overflow (treating 0 as a special case of it, modeling
8629 // an overflow as crossing k*2^W for some k).
8631 // The interesting case here is when Min was eliminated as an invalid
8632 // solution, but Max was not. The argument is that if there was another
8633 // overflow between Min and Max, it would also have been eliminated if
8634 // it was considered.
8636 // For a given boundary, it is possible to have two overflows of the same
8637 // type (signed/unsigned) without having the other type in between: this
8638 // can happen when the vertex of the parabola is between the iterations
8639 // corresponding to the overflows. This is only possible when the two
8640 // overflows cross k*2^W for the same k. In such case, if the second one
8641 // left the range (and was the first one to do so), the first overflow
8642 // would have to enter the range, which would mean that either we had left
8643 // the range before or that we started outside of it. Both of these cases
8644 // are contradictions.
8646 // Claim: In the case where SolveForBoundary returns None, the correct
8647 // solution is not some value between the Max for this boundary and the
8648 // Min of the other boundary.
8650 // Justification: Assume that we had such Max_A and Min_B corresponding
8651 // to range boundaries A and B and such that Max_A < Min_B. If there was
8652 // a solution between Max_A and Min_B, it would have to be caused by an
8653 // overflow corresponding to either A or B. It cannot correspond to B,
8654 // since Min_B is the first occurrence of such an overflow. If it
8655 // corresponded to A, it would have to be either a signed or an unsigned
8656 // overflow that is larger than both eliminated overflows for A. But
8657 // between the eliminated overflows and this overflow, the values would
8658 // cover the entire value space, thus crossing the other boundary, which
8659 // is a contradiction.
8661 return TruncIfPossible(MinOptional(SL
.first
, SU
.first
), BitWidth
);
8664 ScalarEvolution::ExitLimit
8665 ScalarEvolution::howFarToZero(const SCEV
*V
, const Loop
*L
, bool ControlsExit
,
8666 bool AllowPredicates
) {
8668 // This is only used for loops with a "x != y" exit test. The exit condition
8669 // is now expressed as a single expression, V = x-y. So the exit test is
8670 // effectively V != 0. We know and take advantage of the fact that this
8671 // expression only being used in a comparison by zero context.
8673 SmallPtrSet
<const SCEVPredicate
*, 4> Predicates
;
8674 // If the value is a constant
8675 if (const SCEVConstant
*C
= dyn_cast
<SCEVConstant
>(V
)) {
8676 // If the value is already zero, the branch will execute zero times.
8677 if (C
->getValue()->isZero()) return C
;
8678 return getCouldNotCompute(); // Otherwise it will loop infinitely.
8681 const SCEVAddRecExpr
*AddRec
=
8682 dyn_cast
<SCEVAddRecExpr
>(stripInjectiveFunctions(V
));
8684 if (!AddRec
&& AllowPredicates
)
8685 // Try to make this an AddRec using runtime tests, in the first X
8686 // iterations of this loop, where X is the SCEV expression found by the
8688 AddRec
= convertSCEVToAddRecWithPredicates(V
, L
, Predicates
);
8690 if (!AddRec
|| AddRec
->getLoop() != L
)
8691 return getCouldNotCompute();
8693 // If this is a quadratic (3-term) AddRec {L,+,M,+,N}, find the roots of
8694 // the quadratic equation to solve it.
8695 if (AddRec
->isQuadratic() && AddRec
->getType()->isIntegerTy()) {
8696 // We can only use this value if the chrec ends up with an exact zero
8697 // value at this index. When solving for "X*X != 5", for example, we
8698 // should not accept a root of 2.
8699 if (auto S
= SolveQuadraticAddRecExact(AddRec
, *this)) {
8700 const auto *R
= cast
<SCEVConstant
>(getConstant(S
.getValue()));
8701 return ExitLimit(R
, R
, false, Predicates
);
8703 return getCouldNotCompute();
8706 // Otherwise we can only handle this if it is affine.
8707 if (!AddRec
->isAffine())
8708 return getCouldNotCompute();
8710 // If this is an affine expression, the execution count of this branch is
8711 // the minimum unsigned root of the following equation:
8713 // Start + Step*N = 0 (mod 2^BW)
8717 // Step*N = -Start (mod 2^BW)
8719 // where BW is the common bit width of Start and Step.
8721 // Get the initial value for the loop.
8722 const SCEV
*Start
= getSCEVAtScope(AddRec
->getStart(), L
->getParentLoop());
8723 const SCEV
*Step
= getSCEVAtScope(AddRec
->getOperand(1), L
->getParentLoop());
8725 // For now we handle only constant steps.
8727 // TODO: Handle a nonconstant Step given AddRec<NUW>. If the
8728 // AddRec is NUW, then (in an unsigned sense) it cannot be counting up to wrap
8729 // to 0, it must be counting down to equal 0. Consequently, N = Start / -Step.
8730 // We have not yet seen any such cases.
8731 const SCEVConstant
*StepC
= dyn_cast
<SCEVConstant
>(Step
);
8732 if (!StepC
|| StepC
->getValue()->isZero())
8733 return getCouldNotCompute();
8735 // For positive steps (counting up until unsigned overflow):
8736 // N = -Start/Step (as unsigned)
8737 // For negative steps (counting down to zero):
8739 // First compute the unsigned distance from zero in the direction of Step.
8740 bool CountDown
= StepC
->getAPInt().isNegative();
8741 const SCEV
*Distance
= CountDown
? Start
: getNegativeSCEV(Start
);
8743 // Handle unitary steps, which cannot wraparound.
8744 // 1*N = -Start; -1*N = Start (mod 2^BW), so:
8745 // N = Distance (as unsigned)
8746 if (StepC
->getValue()->isOne() || StepC
->getValue()->isMinusOne()) {
8747 APInt MaxBECount
= getUnsignedRangeMax(Distance
);
8749 // When a loop like "for (int i = 0; i != n; ++i) { /* body */ }" is rotated,
8750 // we end up with a loop whose backedge-taken count is n - 1. Detect this
8751 // case, and see if we can improve the bound.
8753 // Explicitly handling this here is necessary because getUnsignedRange
8754 // isn't context-sensitive; it doesn't know that we only care about the
8755 // range inside the loop.
8756 const SCEV
*Zero
= getZero(Distance
->getType());
8757 const SCEV
*One
= getOne(Distance
->getType());
8758 const SCEV
*DistancePlusOne
= getAddExpr(Distance
, One
);
8759 if (isLoopEntryGuardedByCond(L
, ICmpInst::ICMP_NE
, DistancePlusOne
, Zero
)) {
8760 // If Distance + 1 doesn't overflow, we can compute the maximum distance
8761 // as "unsigned_max(Distance + 1) - 1".
8762 ConstantRange CR
= getUnsignedRange(DistancePlusOne
);
8763 MaxBECount
= APIntOps::umin(MaxBECount
, CR
.getUnsignedMax() - 1);
8765 return ExitLimit(Distance
, getConstant(MaxBECount
), false, Predicates
);
8768 // If the condition controls loop exit (the loop exits only if the expression
8769 // is true) and the addition is no-wrap we can use unsigned divide to
8770 // compute the backedge count. In this case, the step may not divide the
8771 // distance, but we don't care because if the condition is "missed" the loop
8772 // will have undefined behavior due to wrapping.
8773 if (ControlsExit
&& AddRec
->hasNoSelfWrap() &&
8774 loopHasNoAbnormalExits(AddRec
->getLoop())) {
8776 getUDivExpr(Distance
, CountDown
? getNegativeSCEV(Step
) : Step
);
8778 Exact
== getCouldNotCompute()
8780 : getConstant(getUnsignedRangeMax(Exact
));
8781 return ExitLimit(Exact
, Max
, false, Predicates
);
8784 // Solve the general equation.
8785 const SCEV
*E
= SolveLinEquationWithOverflow(StepC
->getAPInt(),
8786 getNegativeSCEV(Start
), *this);
8787 const SCEV
*M
= E
== getCouldNotCompute()
8789 : getConstant(getUnsignedRangeMax(E
));
8790 return ExitLimit(E
, M
, false, Predicates
);
8793 ScalarEvolution::ExitLimit
8794 ScalarEvolution::howFarToNonZero(const SCEV
*V
, const Loop
*L
) {
8795 // Loops that look like: while (X == 0) are very strange indeed. We don't
8796 // handle them yet except for the trivial case. This could be expanded in the
8797 // future as needed.
8799 // If the value is a constant, check to see if it is known to be non-zero
8800 // already. If so, the backedge will execute zero times.
8801 if (const SCEVConstant
*C
= dyn_cast
<SCEVConstant
>(V
)) {
8802 if (!C
->getValue()->isZero())
8803 return getZero(C
->getType());
8804 return getCouldNotCompute(); // Otherwise it will loop infinitely.
8807 // We could implement others, but I really doubt anyone writes loops like
8808 // this, and if they did, they would already be constant folded.
8809 return getCouldNotCompute();
8812 std::pair
<BasicBlock
*, BasicBlock
*>
8813 ScalarEvolution::getPredecessorWithUniqueSuccessorForBB(BasicBlock
*BB
) {
8814 // If the block has a unique predecessor, then there is no path from the
8815 // predecessor to the block that does not go through the direct edge
8816 // from the predecessor to the block.
8817 if (BasicBlock
*Pred
= BB
->getSinglePredecessor())
8820 // A loop's header is defined to be a block that dominates the loop.
8821 // If the header has a unique predecessor outside the loop, it must be
8822 // a block that has exactly one successor that can reach the loop.
8823 if (Loop
*L
= LI
.getLoopFor(BB
))
8824 return {L
->getLoopPredecessor(), L
->getHeader()};
8826 return {nullptr, nullptr};
8829 /// SCEV structural equivalence is usually sufficient for testing whether two
8830 /// expressions are equal, however for the purposes of looking for a condition
8831 /// guarding a loop, it can be useful to be a little more general, since a
8832 /// front-end may have replicated the controlling expression.
8833 static bool HasSameValue(const SCEV
*A
, const SCEV
*B
) {
8834 // Quick check to see if they are the same SCEV.
8835 if (A
== B
) return true;
8837 auto ComputesEqualValues
= [](const Instruction
*A
, const Instruction
*B
) {
8838 // Not all instructions that are "identical" compute the same value. For
8839 // instance, two distinct alloca instructions allocating the same type are
8840 // identical and do not read memory; but compute distinct values.
8841 return A
->isIdenticalTo(B
) && (isa
<BinaryOperator
>(A
) || isa
<GetElementPtrInst
>(A
));
8844 // Otherwise, if they're both SCEVUnknown, it's possible that they hold
8845 // two different instructions with the same value. Check for this case.
8846 if (const SCEVUnknown
*AU
= dyn_cast
<SCEVUnknown
>(A
))
8847 if (const SCEVUnknown
*BU
= dyn_cast
<SCEVUnknown
>(B
))
8848 if (const Instruction
*AI
= dyn_cast
<Instruction
>(AU
->getValue()))
8849 if (const Instruction
*BI
= dyn_cast
<Instruction
>(BU
->getValue()))
8850 if (ComputesEqualValues(AI
, BI
))
8853 // Otherwise assume they may have a different value.
8857 bool ScalarEvolution::SimplifyICmpOperands(ICmpInst::Predicate
&Pred
,
8858 const SCEV
*&LHS
, const SCEV
*&RHS
,
8860 bool Changed
= false;
8861 // Simplifies ICMP to trivial true or false by turning it into '0 == 0' or
8863 auto TrivialCase
= [&](bool TriviallyTrue
) {
8864 LHS
= RHS
= getConstant(ConstantInt::getFalse(getContext()));
8865 Pred
= TriviallyTrue
? ICmpInst::ICMP_EQ
: ICmpInst::ICMP_NE
;
8868 // If we hit the max recursion limit bail out.
8872 // Canonicalize a constant to the right side.
8873 if (const SCEVConstant
*LHSC
= dyn_cast
<SCEVConstant
>(LHS
)) {
8874 // Check for both operands constant.
8875 if (const SCEVConstant
*RHSC
= dyn_cast
<SCEVConstant
>(RHS
)) {
8876 if (ConstantExpr::getICmp(Pred
,
8878 RHSC
->getValue())->isNullValue())
8879 return TrivialCase(false);
8881 return TrivialCase(true);
8883 // Otherwise swap the operands to put the constant on the right.
8884 std::swap(LHS
, RHS
);
8885 Pred
= ICmpInst::getSwappedPredicate(Pred
);
8889 // If we're comparing an addrec with a value which is loop-invariant in the
8890 // addrec's loop, put the addrec on the left. Also make a dominance check,
8891 // as both operands could be addrecs loop-invariant in each other's loop.
8892 if (const SCEVAddRecExpr
*AR
= dyn_cast
<SCEVAddRecExpr
>(RHS
)) {
8893 const Loop
*L
= AR
->getLoop();
8894 if (isLoopInvariant(LHS
, L
) && properlyDominates(LHS
, L
->getHeader())) {
8895 std::swap(LHS
, RHS
);
8896 Pred
= ICmpInst::getSwappedPredicate(Pred
);
8901 // If there's a constant operand, canonicalize comparisons with boundary
8902 // cases, and canonicalize *-or-equal comparisons to regular comparisons.
8903 if (const SCEVConstant
*RC
= dyn_cast
<SCEVConstant
>(RHS
)) {
8904 const APInt
&RA
= RC
->getAPInt();
8906 bool SimplifiedByConstantRange
= false;
8908 if (!ICmpInst::isEquality(Pred
)) {
8909 ConstantRange ExactCR
= ConstantRange::makeExactICmpRegion(Pred
, RA
);
8910 if (ExactCR
.isFullSet())
8911 return TrivialCase(true);
8912 else if (ExactCR
.isEmptySet())
8913 return TrivialCase(false);
8916 CmpInst::Predicate NewPred
;
8917 if (ExactCR
.getEquivalentICmp(NewPred
, NewRHS
) &&
8918 ICmpInst::isEquality(NewPred
)) {
8919 // We were able to convert an inequality to an equality.
8921 RHS
= getConstant(NewRHS
);
8922 Changed
= SimplifiedByConstantRange
= true;
8926 if (!SimplifiedByConstantRange
) {
8930 case ICmpInst::ICMP_EQ
:
8931 case ICmpInst::ICMP_NE
:
8932 // Fold ((-1) * %a) + %b == 0 (equivalent to %b-%a == 0) into %a == %b.
8934 if (const SCEVAddExpr
*AE
= dyn_cast
<SCEVAddExpr
>(LHS
))
8935 if (const SCEVMulExpr
*ME
=
8936 dyn_cast
<SCEVMulExpr
>(AE
->getOperand(0)))
8937 if (AE
->getNumOperands() == 2 && ME
->getNumOperands() == 2 &&
8938 ME
->getOperand(0)->isAllOnesValue()) {
8939 RHS
= AE
->getOperand(1);
8940 LHS
= ME
->getOperand(1);
8946 // The "Should have been caught earlier!" messages refer to the fact
8947 // that the ExactCR.isFullSet() or ExactCR.isEmptySet() check above
8948 // should have fired on the corresponding cases, and canonicalized the
8949 // check to trivial case.
8951 case ICmpInst::ICMP_UGE
:
8952 assert(!RA
.isMinValue() && "Should have been caught earlier!");
8953 Pred
= ICmpInst::ICMP_UGT
;
8954 RHS
= getConstant(RA
- 1);
8957 case ICmpInst::ICMP_ULE
:
8958 assert(!RA
.isMaxValue() && "Should have been caught earlier!");
8959 Pred
= ICmpInst::ICMP_ULT
;
8960 RHS
= getConstant(RA
+ 1);
8963 case ICmpInst::ICMP_SGE
:
8964 assert(!RA
.isMinSignedValue() && "Should have been caught earlier!");
8965 Pred
= ICmpInst::ICMP_SGT
;
8966 RHS
= getConstant(RA
- 1);
8969 case ICmpInst::ICMP_SLE
:
8970 assert(!RA
.isMaxSignedValue() && "Should have been caught earlier!");
8971 Pred
= ICmpInst::ICMP_SLT
;
8972 RHS
= getConstant(RA
+ 1);
8979 // Check for obvious equality.
8980 if (HasSameValue(LHS
, RHS
)) {
8981 if (ICmpInst::isTrueWhenEqual(Pred
))
8982 return TrivialCase(true);
8983 if (ICmpInst::isFalseWhenEqual(Pred
))
8984 return TrivialCase(false);
8987 // If possible, canonicalize GE/LE comparisons to GT/LT comparisons, by
8988 // adding or subtracting 1 from one of the operands.
8990 case ICmpInst::ICMP_SLE
:
8991 if (!getSignedRangeMax(RHS
).isMaxSignedValue()) {
8992 RHS
= getAddExpr(getConstant(RHS
->getType(), 1, true), RHS
,
8994 Pred
= ICmpInst::ICMP_SLT
;
8996 } else if (!getSignedRangeMin(LHS
).isMinSignedValue()) {
8997 LHS
= getAddExpr(getConstant(RHS
->getType(), (uint64_t)-1, true), LHS
,
8999 Pred
= ICmpInst::ICMP_SLT
;
9003 case ICmpInst::ICMP_SGE
:
9004 if (!getSignedRangeMin(RHS
).isMinSignedValue()) {
9005 RHS
= getAddExpr(getConstant(RHS
->getType(), (uint64_t)-1, true), RHS
,
9007 Pred
= ICmpInst::ICMP_SGT
;
9009 } else if (!getSignedRangeMax(LHS
).isMaxSignedValue()) {
9010 LHS
= getAddExpr(getConstant(RHS
->getType(), 1, true), LHS
,
9012 Pred
= ICmpInst::ICMP_SGT
;
9016 case ICmpInst::ICMP_ULE
:
9017 if (!getUnsignedRangeMax(RHS
).isMaxValue()) {
9018 RHS
= getAddExpr(getConstant(RHS
->getType(), 1, true), RHS
,
9020 Pred
= ICmpInst::ICMP_ULT
;
9022 } else if (!getUnsignedRangeMin(LHS
).isMinValue()) {
9023 LHS
= getAddExpr(getConstant(RHS
->getType(), (uint64_t)-1, true), LHS
);
9024 Pred
= ICmpInst::ICMP_ULT
;
9028 case ICmpInst::ICMP_UGE
:
9029 if (!getUnsignedRangeMin(RHS
).isMinValue()) {
9030 RHS
= getAddExpr(getConstant(RHS
->getType(), (uint64_t)-1, true), RHS
);
9031 Pred
= ICmpInst::ICMP_UGT
;
9033 } else if (!getUnsignedRangeMax(LHS
).isMaxValue()) {
9034 LHS
= getAddExpr(getConstant(RHS
->getType(), 1, true), LHS
,
9036 Pred
= ICmpInst::ICMP_UGT
;
9044 // TODO: More simplifications are possible here.
9046 // Recursively simplify until we either hit a recursion limit or nothing
9049 return SimplifyICmpOperands(Pred
, LHS
, RHS
, Depth
+1);
9054 bool ScalarEvolution::isKnownNegative(const SCEV
*S
) {
9055 return getSignedRangeMax(S
).isNegative();
9058 bool ScalarEvolution::isKnownPositive(const SCEV
*S
) {
9059 return getSignedRangeMin(S
).isStrictlyPositive();
9062 bool ScalarEvolution::isKnownNonNegative(const SCEV
*S
) {
9063 return !getSignedRangeMin(S
).isNegative();
9066 bool ScalarEvolution::isKnownNonPositive(const SCEV
*S
) {
9067 return !getSignedRangeMax(S
).isStrictlyPositive();
9070 bool ScalarEvolution::isKnownNonZero(const SCEV
*S
) {
9071 return isKnownNegative(S
) || isKnownPositive(S
);
9074 std::pair
<const SCEV
*, const SCEV
*>
9075 ScalarEvolution::SplitIntoInitAndPostInc(const Loop
*L
, const SCEV
*S
) {
9076 // Compute SCEV on entry of loop L.
9077 const SCEV
*Start
= SCEVInitRewriter::rewrite(S
, L
, *this);
9078 if (Start
== getCouldNotCompute())
9079 return { Start
, Start
};
9080 // Compute post increment SCEV for loop L.
9081 const SCEV
*PostInc
= SCEVPostIncRewriter::rewrite(S
, L
, *this);
9082 assert(PostInc
!= getCouldNotCompute() && "Unexpected could not compute");
9083 return { Start
, PostInc
};
9086 bool ScalarEvolution::isKnownViaInduction(ICmpInst::Predicate Pred
,
9087 const SCEV
*LHS
, const SCEV
*RHS
) {
9088 // First collect all loops.
9089 SmallPtrSet
<const Loop
*, 8> LoopsUsed
;
9090 getUsedLoops(LHS
, LoopsUsed
);
9091 getUsedLoops(RHS
, LoopsUsed
);
9093 if (LoopsUsed
.empty())
9096 // Domination relationship must be a linear order on collected loops.
9098 for (auto *L1
: LoopsUsed
)
9099 for (auto *L2
: LoopsUsed
)
9100 assert((DT
.dominates(L1
->getHeader(), L2
->getHeader()) ||
9101 DT
.dominates(L2
->getHeader(), L1
->getHeader())) &&
9102 "Domination relationship is not a linear order");
9106 *std::max_element(LoopsUsed
.begin(), LoopsUsed
.end(),
9107 [&](const Loop
*L1
, const Loop
*L2
) {
9108 return DT
.properlyDominates(L1
->getHeader(), L2
->getHeader());
9111 // Get init and post increment value for LHS.
9112 auto SplitLHS
= SplitIntoInitAndPostInc(MDL
, LHS
);
9113 // if LHS contains unknown non-invariant SCEV then bail out.
9114 if (SplitLHS
.first
== getCouldNotCompute())
9116 assert (SplitLHS
.second
!= getCouldNotCompute() && "Unexpected CNC");
9117 // Get init and post increment value for RHS.
9118 auto SplitRHS
= SplitIntoInitAndPostInc(MDL
, RHS
);
9119 // if RHS contains unknown non-invariant SCEV then bail out.
9120 if (SplitRHS
.first
== getCouldNotCompute())
9122 assert (SplitRHS
.second
!= getCouldNotCompute() && "Unexpected CNC");
9123 // It is possible that init SCEV contains an invariant load but it does
9124 // not dominate MDL and is not available at MDL loop entry, so we should
9126 if (!isAvailableAtLoopEntry(SplitLHS
.first
, MDL
) ||
9127 !isAvailableAtLoopEntry(SplitRHS
.first
, MDL
))
9130 return isLoopEntryGuardedByCond(MDL
, Pred
, SplitLHS
.first
, SplitRHS
.first
) &&
9131 isLoopBackedgeGuardedByCond(MDL
, Pred
, SplitLHS
.second
,
9135 bool ScalarEvolution::isKnownPredicate(ICmpInst::Predicate Pred
,
9136 const SCEV
*LHS
, const SCEV
*RHS
) {
9137 // Canonicalize the inputs first.
9138 (void)SimplifyICmpOperands(Pred
, LHS
, RHS
);
9140 if (isKnownViaInduction(Pred
, LHS
, RHS
))
9143 if (isKnownPredicateViaSplitting(Pred
, LHS
, RHS
))
9146 // Otherwise see what can be done with some simple reasoning.
9147 return isKnownViaNonRecursiveReasoning(Pred
, LHS
, RHS
);
9150 bool ScalarEvolution::isKnownOnEveryIteration(ICmpInst::Predicate Pred
,
9151 const SCEVAddRecExpr
*LHS
,
9153 const Loop
*L
= LHS
->getLoop();
9154 return isLoopEntryGuardedByCond(L
, Pred
, LHS
->getStart(), RHS
) &&
9155 isLoopBackedgeGuardedByCond(L
, Pred
, LHS
->getPostIncExpr(*this), RHS
);
9158 bool ScalarEvolution::isMonotonicPredicate(const SCEVAddRecExpr
*LHS
,
9159 ICmpInst::Predicate Pred
,
9161 bool Result
= isMonotonicPredicateImpl(LHS
, Pred
, Increasing
);
9164 // Verify an invariant: inverting the predicate should turn a monotonically
9165 // increasing change to a monotonically decreasing one, and vice versa.
9166 bool IncreasingSwapped
;
9167 bool ResultSwapped
= isMonotonicPredicateImpl(
9168 LHS
, ICmpInst::getSwappedPredicate(Pred
), IncreasingSwapped
);
9170 assert(Result
== ResultSwapped
&& "should be able to analyze both!");
9172 assert(Increasing
== !IncreasingSwapped
&&
9173 "monotonicity should flip as we flip the predicate");
9179 bool ScalarEvolution::isMonotonicPredicateImpl(const SCEVAddRecExpr
*LHS
,
9180 ICmpInst::Predicate Pred
,
9183 // A zero step value for LHS means the induction variable is essentially a
9184 // loop invariant value. We don't really depend on the predicate actually
9185 // flipping from false to true (for increasing predicates, and the other way
9186 // around for decreasing predicates), all we care about is that *if* the
9187 // predicate changes then it only changes from false to true.
9189 // A zero step value in itself is not very useful, but there may be places
9190 // where SCEV can prove X >= 0 but not prove X > 0, so it is helpful to be
9191 // as general as possible.
9195 return false; // Conservative answer
9197 case ICmpInst::ICMP_UGT
:
9198 case ICmpInst::ICMP_UGE
:
9199 case ICmpInst::ICMP_ULT
:
9200 case ICmpInst::ICMP_ULE
:
9201 if (!LHS
->hasNoUnsignedWrap())
9204 Increasing
= Pred
== ICmpInst::ICMP_UGT
|| Pred
== ICmpInst::ICMP_UGE
;
9207 case ICmpInst::ICMP_SGT
:
9208 case ICmpInst::ICMP_SGE
:
9209 case ICmpInst::ICMP_SLT
:
9210 case ICmpInst::ICMP_SLE
: {
9211 if (!LHS
->hasNoSignedWrap())
9214 const SCEV
*Step
= LHS
->getStepRecurrence(*this);
9216 if (isKnownNonNegative(Step
)) {
9217 Increasing
= Pred
== ICmpInst::ICMP_SGT
|| Pred
== ICmpInst::ICMP_SGE
;
9221 if (isKnownNonPositive(Step
)) {
9222 Increasing
= Pred
== ICmpInst::ICMP_SLT
|| Pred
== ICmpInst::ICMP_SLE
;
9231 llvm_unreachable("switch has default clause!");
9234 bool ScalarEvolution::isLoopInvariantPredicate(
9235 ICmpInst::Predicate Pred
, const SCEV
*LHS
, const SCEV
*RHS
, const Loop
*L
,
9236 ICmpInst::Predicate
&InvariantPred
, const SCEV
*&InvariantLHS
,
9237 const SCEV
*&InvariantRHS
) {
9239 // If there is a loop-invariant, force it into the RHS, otherwise bail out.
9240 if (!isLoopInvariant(RHS
, L
)) {
9241 if (!isLoopInvariant(LHS
, L
))
9244 std::swap(LHS
, RHS
);
9245 Pred
= ICmpInst::getSwappedPredicate(Pred
);
9248 const SCEVAddRecExpr
*ArLHS
= dyn_cast
<SCEVAddRecExpr
>(LHS
);
9249 if (!ArLHS
|| ArLHS
->getLoop() != L
)
9253 if (!isMonotonicPredicate(ArLHS
, Pred
, Increasing
))
9256 // If the predicate "ArLHS `Pred` RHS" monotonically increases from false to
9257 // true as the loop iterates, and the backedge is control dependent on
9258 // "ArLHS `Pred` RHS" == true then we can reason as follows:
9260 // * if the predicate was false in the first iteration then the predicate
9261 // is never evaluated again, since the loop exits without taking the
9263 // * if the predicate was true in the first iteration then it will
9264 // continue to be true for all future iterations since it is
9265 // monotonically increasing.
9267 // For both the above possibilities, we can replace the loop varying
9268 // predicate with its value on the first iteration of the loop (which is
9271 // A similar reasoning applies for a monotonically decreasing predicate, by
9272 // replacing true with false and false with true in the above two bullets.
9274 auto P
= Increasing
? Pred
: ICmpInst::getInversePredicate(Pred
);
9276 if (!isLoopBackedgeGuardedByCond(L
, P
, LHS
, RHS
))
9279 InvariantPred
= Pred
;
9280 InvariantLHS
= ArLHS
->getStart();
9285 bool ScalarEvolution::isKnownPredicateViaConstantRanges(
9286 ICmpInst::Predicate Pred
, const SCEV
*LHS
, const SCEV
*RHS
) {
9287 if (HasSameValue(LHS
, RHS
))
9288 return ICmpInst::isTrueWhenEqual(Pred
);
9290 // This code is split out from isKnownPredicate because it is called from
9291 // within isLoopEntryGuardedByCond.
9294 [&](const ConstantRange
&RangeLHS
, const ConstantRange
&RangeRHS
) {
9295 return ConstantRange::makeSatisfyingICmpRegion(Pred
, RangeRHS
)
9296 .contains(RangeLHS
);
9299 // The check at the top of the function catches the case where the values are
9300 // known to be equal.
9301 if (Pred
== CmpInst::ICMP_EQ
)
9304 if (Pred
== CmpInst::ICMP_NE
)
9305 return CheckRanges(getSignedRange(LHS
), getSignedRange(RHS
)) ||
9306 CheckRanges(getUnsignedRange(LHS
), getUnsignedRange(RHS
)) ||
9307 isKnownNonZero(getMinusSCEV(LHS
, RHS
));
9309 if (CmpInst::isSigned(Pred
))
9310 return CheckRanges(getSignedRange(LHS
), getSignedRange(RHS
));
9312 return CheckRanges(getUnsignedRange(LHS
), getUnsignedRange(RHS
));
9315 bool ScalarEvolution::isKnownPredicateViaNoOverflow(ICmpInst::Predicate Pred
,
9318 // Match Result to (X + Y)<ExpectedFlags> where Y is a constant integer.
9319 // Return Y via OutY.
9320 auto MatchBinaryAddToConst
=
9321 [this](const SCEV
*Result
, const SCEV
*X
, APInt
&OutY
,
9322 SCEV::NoWrapFlags ExpectedFlags
) {
9323 const SCEV
*NonConstOp
, *ConstOp
;
9324 SCEV::NoWrapFlags FlagsPresent
;
9326 if (!splitBinaryAdd(Result
, ConstOp
, NonConstOp
, FlagsPresent
) ||
9327 !isa
<SCEVConstant
>(ConstOp
) || NonConstOp
!= X
)
9330 OutY
= cast
<SCEVConstant
>(ConstOp
)->getAPInt();
9331 return (FlagsPresent
& ExpectedFlags
) == ExpectedFlags
;
9340 case ICmpInst::ICMP_SGE
:
9341 std::swap(LHS
, RHS
);
9343 case ICmpInst::ICMP_SLE
:
9344 // X s<= (X + C)<nsw> if C >= 0
9345 if (MatchBinaryAddToConst(RHS
, LHS
, C
, SCEV::FlagNSW
) && C
.isNonNegative())
9348 // (X + C)<nsw> s<= X if C <= 0
9349 if (MatchBinaryAddToConst(LHS
, RHS
, C
, SCEV::FlagNSW
) &&
9350 !C
.isStrictlyPositive())
9354 case ICmpInst::ICMP_SGT
:
9355 std::swap(LHS
, RHS
);
9357 case ICmpInst::ICMP_SLT
:
9358 // X s< (X + C)<nsw> if C > 0
9359 if (MatchBinaryAddToConst(RHS
, LHS
, C
, SCEV::FlagNSW
) &&
9360 C
.isStrictlyPositive())
9363 // (X + C)<nsw> s< X if C < 0
9364 if (MatchBinaryAddToConst(LHS
, RHS
, C
, SCEV::FlagNSW
) && C
.isNegative())
9372 bool ScalarEvolution::isKnownPredicateViaSplitting(ICmpInst::Predicate Pred
,
9375 if (Pred
!= ICmpInst::ICMP_ULT
|| ProvingSplitPredicate
)
9378 // Allowing arbitrary number of activations of isKnownPredicateViaSplitting on
9379 // the stack can result in exponential time complexity.
9380 SaveAndRestore
<bool> Restore(ProvingSplitPredicate
, true);
9382 // If L >= 0 then I `ult` L <=> I >= 0 && I `slt` L
9384 // To prove L >= 0 we use isKnownNonNegative whereas to prove I >= 0 we use
9385 // isKnownPredicate. isKnownPredicate is more powerful, but also more
9386 // expensive; and using isKnownNonNegative(RHS) is sufficient for most of the
9387 // interesting cases seen in practice. We can consider "upgrading" L >= 0 to
9388 // use isKnownPredicate later if needed.
9389 return isKnownNonNegative(RHS
) &&
9390 isKnownPredicate(CmpInst::ICMP_SGE
, LHS
, getZero(LHS
->getType())) &&
9391 isKnownPredicate(CmpInst::ICMP_SLT
, LHS
, RHS
);
9394 bool ScalarEvolution::isImpliedViaGuard(BasicBlock
*BB
,
9395 ICmpInst::Predicate Pred
,
9396 const SCEV
*LHS
, const SCEV
*RHS
) {
9397 // No need to even try if we know the module has no guards.
9401 return any_of(*BB
, [&](Instruction
&I
) {
9402 using namespace llvm::PatternMatch
;
9405 return match(&I
, m_Intrinsic
<Intrinsic::experimental_guard
>(
9406 m_Value(Condition
))) &&
9407 isImpliedCond(Pred
, LHS
, RHS
, Condition
, false);
9411 /// isLoopBackedgeGuardedByCond - Test whether the backedge of the loop is
9412 /// protected by a conditional between LHS and RHS. This is used to
9413 /// to eliminate casts.
9415 ScalarEvolution::isLoopBackedgeGuardedByCond(const Loop
*L
,
9416 ICmpInst::Predicate Pred
,
9417 const SCEV
*LHS
, const SCEV
*RHS
) {
9418 // Interpret a null as meaning no loop, where there is obviously no guard
9419 // (interprocedural conditions notwithstanding).
9420 if (!L
) return true;
9423 assert(!verifyFunction(*L
->getHeader()->getParent(), &dbgs()) &&
9424 "This cannot be done on broken IR!");
9427 if (isKnownViaNonRecursiveReasoning(Pred
, LHS
, RHS
))
9430 BasicBlock
*Latch
= L
->getLoopLatch();
9434 BranchInst
*LoopContinuePredicate
=
9435 dyn_cast
<BranchInst
>(Latch
->getTerminator());
9436 if (LoopContinuePredicate
&& LoopContinuePredicate
->isConditional() &&
9437 isImpliedCond(Pred
, LHS
, RHS
,
9438 LoopContinuePredicate
->getCondition(),
9439 LoopContinuePredicate
->getSuccessor(0) != L
->getHeader()))
9442 // We don't want more than one activation of the following loops on the stack
9443 // -- that can lead to O(n!) time complexity.
9444 if (WalkingBEDominatingConds
)
9447 SaveAndRestore
<bool> ClearOnExit(WalkingBEDominatingConds
, true);
9449 // See if we can exploit a trip count to prove the predicate.
9450 const auto &BETakenInfo
= getBackedgeTakenInfo(L
);
9451 const SCEV
*LatchBECount
= BETakenInfo
.getExact(Latch
, this);
9452 if (LatchBECount
!= getCouldNotCompute()) {
9453 // We know that Latch branches back to the loop header exactly
9454 // LatchBECount times. This means the backdege condition at Latch is
9455 // equivalent to "{0,+,1} u< LatchBECount".
9456 Type
*Ty
= LatchBECount
->getType();
9457 auto NoWrapFlags
= SCEV::NoWrapFlags(SCEV::FlagNUW
| SCEV::FlagNW
);
9458 const SCEV
*LoopCounter
=
9459 getAddRecExpr(getZero(Ty
), getOne(Ty
), L
, NoWrapFlags
);
9460 if (isImpliedCond(Pred
, LHS
, RHS
, ICmpInst::ICMP_ULT
, LoopCounter
,
9465 // Check conditions due to any @llvm.assume intrinsics.
9466 for (auto &AssumeVH
: AC
.assumptions()) {
9469 auto *CI
= cast
<CallInst
>(AssumeVH
);
9470 if (!DT
.dominates(CI
, Latch
->getTerminator()))
9473 if (isImpliedCond(Pred
, LHS
, RHS
, CI
->getArgOperand(0), false))
9477 // If the loop is not reachable from the entry block, we risk running into an
9478 // infinite loop as we walk up into the dom tree. These loops do not matter
9479 // anyway, so we just return a conservative answer when we see them.
9480 if (!DT
.isReachableFromEntry(L
->getHeader()))
9483 if (isImpliedViaGuard(Latch
, Pred
, LHS
, RHS
))
9486 for (DomTreeNode
*DTN
= DT
[Latch
], *HeaderDTN
= DT
[L
->getHeader()];
9487 DTN
!= HeaderDTN
; DTN
= DTN
->getIDom()) {
9488 assert(DTN
&& "should reach the loop header before reaching the root!");
9490 BasicBlock
*BB
= DTN
->getBlock();
9491 if (isImpliedViaGuard(BB
, Pred
, LHS
, RHS
))
9494 BasicBlock
*PBB
= BB
->getSinglePredecessor();
9498 BranchInst
*ContinuePredicate
= dyn_cast
<BranchInst
>(PBB
->getTerminator());
9499 if (!ContinuePredicate
|| !ContinuePredicate
->isConditional())
9502 Value
*Condition
= ContinuePredicate
->getCondition();
9504 // If we have an edge `E` within the loop body that dominates the only
9505 // latch, the condition guarding `E` also guards the backedge. This
9506 // reasoning works only for loops with a single latch.
9508 BasicBlockEdge
DominatingEdge(PBB
, BB
);
9509 if (DominatingEdge
.isSingleEdge()) {
9510 // We're constructively (and conservatively) enumerating edges within the
9511 // loop body that dominate the latch. The dominator tree better agree
9513 assert(DT
.dominates(DominatingEdge
, Latch
) && "should be!");
9515 if (isImpliedCond(Pred
, LHS
, RHS
, Condition
,
9516 BB
!= ContinuePredicate
->getSuccessor(0)))
9525 ScalarEvolution::isLoopEntryGuardedByCond(const Loop
*L
,
9526 ICmpInst::Predicate Pred
,
9527 const SCEV
*LHS
, const SCEV
*RHS
) {
9528 // Interpret a null as meaning no loop, where there is obviously no guard
9529 // (interprocedural conditions notwithstanding).
9530 if (!L
) return false;
9533 assert(!verifyFunction(*L
->getHeader()->getParent(), &dbgs()) &&
9534 "This cannot be done on broken IR!");
9536 // Both LHS and RHS must be available at loop entry.
9537 assert(isAvailableAtLoopEntry(LHS
, L
) &&
9538 "LHS is not available at Loop Entry");
9539 assert(isAvailableAtLoopEntry(RHS
, L
) &&
9540 "RHS is not available at Loop Entry");
9542 if (isKnownViaNonRecursiveReasoning(Pred
, LHS
, RHS
))
9545 // If we cannot prove strict comparison (e.g. a > b), maybe we can prove
9546 // the facts (a >= b && a != b) separately. A typical situation is when the
9547 // non-strict comparison is known from ranges and non-equality is known from
9548 // dominating predicates. If we are proving strict comparison, we always try
9549 // to prove non-equality and non-strict comparison separately.
9550 auto NonStrictPredicate
= ICmpInst::getNonStrictPredicate(Pred
);
9551 const bool ProvingStrictComparison
= (Pred
!= NonStrictPredicate
);
9552 bool ProvedNonStrictComparison
= false;
9553 bool ProvedNonEquality
= false;
9555 if (ProvingStrictComparison
) {
9556 ProvedNonStrictComparison
=
9557 isKnownViaNonRecursiveReasoning(NonStrictPredicate
, LHS
, RHS
);
9559 isKnownViaNonRecursiveReasoning(ICmpInst::ICMP_NE
, LHS
, RHS
);
9560 if (ProvedNonStrictComparison
&& ProvedNonEquality
)
9564 // Try to prove (Pred, LHS, RHS) using isImpliedViaGuard.
9565 auto ProveViaGuard
= [&](BasicBlock
*Block
) {
9566 if (isImpliedViaGuard(Block
, Pred
, LHS
, RHS
))
9568 if (ProvingStrictComparison
) {
9569 if (!ProvedNonStrictComparison
)
9570 ProvedNonStrictComparison
=
9571 isImpliedViaGuard(Block
, NonStrictPredicate
, LHS
, RHS
);
9572 if (!ProvedNonEquality
)
9574 isImpliedViaGuard(Block
, ICmpInst::ICMP_NE
, LHS
, RHS
);
9575 if (ProvedNonStrictComparison
&& ProvedNonEquality
)
9581 // Try to prove (Pred, LHS, RHS) using isImpliedCond.
9582 auto ProveViaCond
= [&](Value
*Condition
, bool Inverse
) {
9583 if (isImpliedCond(Pred
, LHS
, RHS
, Condition
, Inverse
))
9585 if (ProvingStrictComparison
) {
9586 if (!ProvedNonStrictComparison
)
9587 ProvedNonStrictComparison
=
9588 isImpliedCond(NonStrictPredicate
, LHS
, RHS
, Condition
, Inverse
);
9589 if (!ProvedNonEquality
)
9591 isImpliedCond(ICmpInst::ICMP_NE
, LHS
, RHS
, Condition
, Inverse
);
9592 if (ProvedNonStrictComparison
&& ProvedNonEquality
)
9598 // Starting at the loop predecessor, climb up the predecessor chain, as long
9599 // as there are predecessors that can be found that have unique successors
9600 // leading to the original header.
9601 for (std::pair
<BasicBlock
*, BasicBlock
*>
9602 Pair(L
->getLoopPredecessor(), L
->getHeader());
9604 Pair
= getPredecessorWithUniqueSuccessorForBB(Pair
.first
)) {
9606 if (ProveViaGuard(Pair
.first
))
9609 BranchInst
*LoopEntryPredicate
=
9610 dyn_cast
<BranchInst
>(Pair
.first
->getTerminator());
9611 if (!LoopEntryPredicate
||
9612 LoopEntryPredicate
->isUnconditional())
9615 if (ProveViaCond(LoopEntryPredicate
->getCondition(),
9616 LoopEntryPredicate
->getSuccessor(0) != Pair
.second
))
9620 // Check conditions due to any @llvm.assume intrinsics.
9621 for (auto &AssumeVH
: AC
.assumptions()) {
9624 auto *CI
= cast
<CallInst
>(AssumeVH
);
9625 if (!DT
.dominates(CI
, L
->getHeader()))
9628 if (ProveViaCond(CI
->getArgOperand(0), false))
9635 bool ScalarEvolution::isImpliedCond(ICmpInst::Predicate Pred
,
9636 const SCEV
*LHS
, const SCEV
*RHS
,
9637 Value
*FoundCondValue
,
9639 if (!PendingLoopPredicates
.insert(FoundCondValue
).second
)
9643 make_scope_exit([&]() { PendingLoopPredicates
.erase(FoundCondValue
); });
9645 // Recursively handle And and Or conditions.
9646 if (BinaryOperator
*BO
= dyn_cast
<BinaryOperator
>(FoundCondValue
)) {
9647 if (BO
->getOpcode() == Instruction::And
) {
9649 return isImpliedCond(Pred
, LHS
, RHS
, BO
->getOperand(0), Inverse
) ||
9650 isImpliedCond(Pred
, LHS
, RHS
, BO
->getOperand(1), Inverse
);
9651 } else if (BO
->getOpcode() == Instruction::Or
) {
9653 return isImpliedCond(Pred
, LHS
, RHS
, BO
->getOperand(0), Inverse
) ||
9654 isImpliedCond(Pred
, LHS
, RHS
, BO
->getOperand(1), Inverse
);
9658 ICmpInst
*ICI
= dyn_cast
<ICmpInst
>(FoundCondValue
);
9659 if (!ICI
) return false;
9661 // Now that we found a conditional branch that dominates the loop or controls
9662 // the loop latch. Check to see if it is the comparison we are looking for.
9663 ICmpInst::Predicate FoundPred
;
9665 FoundPred
= ICI
->getInversePredicate();
9667 FoundPred
= ICI
->getPredicate();
9669 const SCEV
*FoundLHS
= getSCEV(ICI
->getOperand(0));
9670 const SCEV
*FoundRHS
= getSCEV(ICI
->getOperand(1));
9672 return isImpliedCond(Pred
, LHS
, RHS
, FoundPred
, FoundLHS
, FoundRHS
);
9675 bool ScalarEvolution::isImpliedCond(ICmpInst::Predicate Pred
, const SCEV
*LHS
,
9677 ICmpInst::Predicate FoundPred
,
9678 const SCEV
*FoundLHS
,
9679 const SCEV
*FoundRHS
) {
9680 // Balance the types.
9681 if (getTypeSizeInBits(LHS
->getType()) <
9682 getTypeSizeInBits(FoundLHS
->getType())) {
9683 if (CmpInst::isSigned(Pred
)) {
9684 LHS
= getSignExtendExpr(LHS
, FoundLHS
->getType());
9685 RHS
= getSignExtendExpr(RHS
, FoundLHS
->getType());
9687 LHS
= getZeroExtendExpr(LHS
, FoundLHS
->getType());
9688 RHS
= getZeroExtendExpr(RHS
, FoundLHS
->getType());
9690 } else if (getTypeSizeInBits(LHS
->getType()) >
9691 getTypeSizeInBits(FoundLHS
->getType())) {
9692 if (CmpInst::isSigned(FoundPred
)) {
9693 FoundLHS
= getSignExtendExpr(FoundLHS
, LHS
->getType());
9694 FoundRHS
= getSignExtendExpr(FoundRHS
, LHS
->getType());
9696 FoundLHS
= getZeroExtendExpr(FoundLHS
, LHS
->getType());
9697 FoundRHS
= getZeroExtendExpr(FoundRHS
, LHS
->getType());
9701 // Canonicalize the query to match the way instcombine will have
9702 // canonicalized the comparison.
9703 if (SimplifyICmpOperands(Pred
, LHS
, RHS
))
9705 return CmpInst::isTrueWhenEqual(Pred
);
9706 if (SimplifyICmpOperands(FoundPred
, FoundLHS
, FoundRHS
))
9707 if (FoundLHS
== FoundRHS
)
9708 return CmpInst::isFalseWhenEqual(FoundPred
);
9710 // Check to see if we can make the LHS or RHS match.
9711 if (LHS
== FoundRHS
|| RHS
== FoundLHS
) {
9712 if (isa
<SCEVConstant
>(RHS
)) {
9713 std::swap(FoundLHS
, FoundRHS
);
9714 FoundPred
= ICmpInst::getSwappedPredicate(FoundPred
);
9716 std::swap(LHS
, RHS
);
9717 Pred
= ICmpInst::getSwappedPredicate(Pred
);
9721 // Check whether the found predicate is the same as the desired predicate.
9722 if (FoundPred
== Pred
)
9723 return isImpliedCondOperands(Pred
, LHS
, RHS
, FoundLHS
, FoundRHS
);
9725 // Check whether swapping the found predicate makes it the same as the
9726 // desired predicate.
9727 if (ICmpInst::getSwappedPredicate(FoundPred
) == Pred
) {
9728 if (isa
<SCEVConstant
>(RHS
))
9729 return isImpliedCondOperands(Pred
, LHS
, RHS
, FoundRHS
, FoundLHS
);
9731 return isImpliedCondOperands(ICmpInst::getSwappedPredicate(Pred
),
9732 RHS
, LHS
, FoundLHS
, FoundRHS
);
9735 // Unsigned comparison is the same as signed comparison when both the operands
9736 // are non-negative.
9737 if (CmpInst::isUnsigned(FoundPred
) &&
9738 CmpInst::getSignedPredicate(FoundPred
) == Pred
&&
9739 isKnownNonNegative(FoundLHS
) && isKnownNonNegative(FoundRHS
))
9740 return isImpliedCondOperands(Pred
, LHS
, RHS
, FoundLHS
, FoundRHS
);
9742 // Check if we can make progress by sharpening ranges.
9743 if (FoundPred
== ICmpInst::ICMP_NE
&&
9744 (isa
<SCEVConstant
>(FoundLHS
) || isa
<SCEVConstant
>(FoundRHS
))) {
9746 const SCEVConstant
*C
= nullptr;
9747 const SCEV
*V
= nullptr;
9749 if (isa
<SCEVConstant
>(FoundLHS
)) {
9750 C
= cast
<SCEVConstant
>(FoundLHS
);
9753 C
= cast
<SCEVConstant
>(FoundRHS
);
9757 // The guarding predicate tells us that C != V. If the known range
9758 // of V is [C, t), we can sharpen the range to [C + 1, t). The
9759 // range we consider has to correspond to same signedness as the
9760 // predicate we're interested in folding.
9762 APInt Min
= ICmpInst::isSigned(Pred
) ?
9763 getSignedRangeMin(V
) : getUnsignedRangeMin(V
);
9765 if (Min
== C
->getAPInt()) {
9766 // Given (V >= Min && V != Min) we conclude V >= (Min + 1).
9767 // This is true even if (Min + 1) wraps around -- in case of
9768 // wraparound, (Min + 1) < Min, so (V >= Min => V >= (Min + 1)).
9770 APInt SharperMin
= Min
+ 1;
9773 case ICmpInst::ICMP_SGE
:
9774 case ICmpInst::ICMP_UGE
:
9775 // We know V `Pred` SharperMin. If this implies LHS `Pred`
9777 if (isImpliedCondOperands(Pred
, LHS
, RHS
, V
,
9778 getConstant(SharperMin
)))
9782 case ICmpInst::ICMP_SGT
:
9783 case ICmpInst::ICMP_UGT
:
9784 // We know from the range information that (V `Pred` Min ||
9785 // V == Min). We know from the guarding condition that !(V
9786 // == Min). This gives us
9788 // V `Pred` Min || V == Min && !(V == Min)
9791 // If V `Pred` Min implies LHS `Pred` RHS, we're done.
9793 if (isImpliedCondOperands(Pred
, LHS
, RHS
, V
, getConstant(Min
)))
9804 // Check whether the actual condition is beyond sufficient.
9805 if (FoundPred
== ICmpInst::ICMP_EQ
)
9806 if (ICmpInst::isTrueWhenEqual(Pred
))
9807 if (isImpliedCondOperands(Pred
, LHS
, RHS
, FoundLHS
, FoundRHS
))
9809 if (Pred
== ICmpInst::ICMP_NE
)
9810 if (!ICmpInst::isTrueWhenEqual(FoundPred
))
9811 if (isImpliedCondOperands(FoundPred
, LHS
, RHS
, FoundLHS
, FoundRHS
))
9814 // Otherwise assume the worst.
9818 bool ScalarEvolution::splitBinaryAdd(const SCEV
*Expr
,
9819 const SCEV
*&L
, const SCEV
*&R
,
9820 SCEV::NoWrapFlags
&Flags
) {
9821 const auto *AE
= dyn_cast
<SCEVAddExpr
>(Expr
);
9822 if (!AE
|| AE
->getNumOperands() != 2)
9825 L
= AE
->getOperand(0);
9826 R
= AE
->getOperand(1);
9827 Flags
= AE
->getNoWrapFlags();
9831 Optional
<APInt
> ScalarEvolution::computeConstantDifference(const SCEV
*More
,
9833 // We avoid subtracting expressions here because this function is usually
9834 // fairly deep in the call stack (i.e. is called many times).
9836 if (isa
<SCEVAddRecExpr
>(Less
) && isa
<SCEVAddRecExpr
>(More
)) {
9837 const auto *LAR
= cast
<SCEVAddRecExpr
>(Less
);
9838 const auto *MAR
= cast
<SCEVAddRecExpr
>(More
);
9840 if (LAR
->getLoop() != MAR
->getLoop())
9843 // We look at affine expressions only; not for correctness but to keep
9844 // getStepRecurrence cheap.
9845 if (!LAR
->isAffine() || !MAR
->isAffine())
9848 if (LAR
->getStepRecurrence(*this) != MAR
->getStepRecurrence(*this))
9851 Less
= LAR
->getStart();
9852 More
= MAR
->getStart();
9857 if (isa
<SCEVConstant
>(Less
) && isa
<SCEVConstant
>(More
)) {
9858 const auto &M
= cast
<SCEVConstant
>(More
)->getAPInt();
9859 const auto &L
= cast
<SCEVConstant
>(Less
)->getAPInt();
9863 SCEV::NoWrapFlags Flags
;
9864 const SCEV
*LLess
= nullptr, *RLess
= nullptr;
9865 const SCEV
*LMore
= nullptr, *RMore
= nullptr;
9866 const SCEVConstant
*C1
= nullptr, *C2
= nullptr;
9867 // Compare (X + C1) vs X.
9868 if (splitBinaryAdd(Less
, LLess
, RLess
, Flags
))
9869 if ((C1
= dyn_cast
<SCEVConstant
>(LLess
)))
9871 return -(C1
->getAPInt());
9873 // Compare X vs (X + C2).
9874 if (splitBinaryAdd(More
, LMore
, RMore
, Flags
))
9875 if ((C2
= dyn_cast
<SCEVConstant
>(LMore
)))
9877 return C2
->getAPInt();
9879 // Compare (X + C1) vs (X + C2).
9880 if (C1
&& C2
&& RLess
== RMore
)
9881 return C2
->getAPInt() - C1
->getAPInt();
9886 bool ScalarEvolution::isImpliedCondOperandsViaNoOverflow(
9887 ICmpInst::Predicate Pred
, const SCEV
*LHS
, const SCEV
*RHS
,
9888 const SCEV
*FoundLHS
, const SCEV
*FoundRHS
) {
9889 if (Pred
!= CmpInst::ICMP_SLT
&& Pred
!= CmpInst::ICMP_ULT
)
9892 const auto *AddRecLHS
= dyn_cast
<SCEVAddRecExpr
>(LHS
);
9896 const auto *AddRecFoundLHS
= dyn_cast
<SCEVAddRecExpr
>(FoundLHS
);
9897 if (!AddRecFoundLHS
)
9900 // We'd like to let SCEV reason about control dependencies, so we constrain
9901 // both the inequalities to be about add recurrences on the same loop. This
9902 // way we can use isLoopEntryGuardedByCond later.
9904 const Loop
*L
= AddRecFoundLHS
->getLoop();
9905 if (L
!= AddRecLHS
->getLoop())
9908 // FoundLHS u< FoundRHS u< -C => (FoundLHS + C) u< (FoundRHS + C) ... (1)
9910 // FoundLHS s< FoundRHS s< INT_MIN - C => (FoundLHS + C) s< (FoundRHS + C)
9913 // Informal proof for (2), assuming (1) [*]:
9915 // We'll also assume (A s< B) <=> ((A + INT_MIN) u< (B + INT_MIN)) ... (3)[**]
9919 // FoundLHS s< FoundRHS s< INT_MIN - C
9920 // <=> (FoundLHS + INT_MIN) u< (FoundRHS + INT_MIN) u< -C [ using (3) ]
9921 // <=> (FoundLHS + INT_MIN + C) u< (FoundRHS + INT_MIN + C) [ using (1) ]
9922 // <=> (FoundLHS + INT_MIN + C + INT_MIN) s<
9923 // (FoundRHS + INT_MIN + C + INT_MIN) [ using (3) ]
9924 // <=> FoundLHS + C s< FoundRHS + C
9926 // [*]: (1) can be proved by ruling out overflow.
9928 // [**]: This can be proved by analyzing all the four possibilities:
9929 // (A s< 0, B s< 0), (A s< 0, B s>= 0), (A s>= 0, B s< 0) and
9930 // (A s>= 0, B s>= 0).
9933 // Despite (2), "FoundRHS s< INT_MIN - C" does not mean that "FoundRHS + C"
9934 // will not sign underflow. For instance, say FoundLHS = (i8 -128), FoundRHS
9935 // = (i8 -127) and C = (i8 -100). Then INT_MIN - C = (i8 -28), and FoundRHS
9936 // s< (INT_MIN - C). Lack of sign overflow / underflow in "FoundRHS + C" is
9937 // neither necessary nor sufficient to prove "(FoundLHS + C) s< (FoundRHS +
9940 Optional
<APInt
> LDiff
= computeConstantDifference(LHS
, FoundLHS
);
9941 Optional
<APInt
> RDiff
= computeConstantDifference(RHS
, FoundRHS
);
9942 if (!LDiff
|| !RDiff
|| *LDiff
!= *RDiff
)
9945 if (LDiff
->isMinValue())
9948 APInt FoundRHSLimit
;
9950 if (Pred
== CmpInst::ICMP_ULT
) {
9951 FoundRHSLimit
= -(*RDiff
);
9953 assert(Pred
== CmpInst::ICMP_SLT
&& "Checked above!");
9954 FoundRHSLimit
= APInt::getSignedMinValue(getTypeSizeInBits(RHS
->getType())) - *RDiff
;
9957 // Try to prove (1) or (2), as needed.
9958 return isAvailableAtLoopEntry(FoundRHS
, L
) &&
9959 isLoopEntryGuardedByCond(L
, Pred
, FoundRHS
,
9960 getConstant(FoundRHSLimit
));
9963 bool ScalarEvolution::isImpliedViaMerge(ICmpInst::Predicate Pred
,
9964 const SCEV
*LHS
, const SCEV
*RHS
,
9965 const SCEV
*FoundLHS
,
9966 const SCEV
*FoundRHS
, unsigned Depth
) {
9967 const PHINode
*LPhi
= nullptr, *RPhi
= nullptr;
9969 auto ClearOnExit
= make_scope_exit([&]() {
9971 bool Erased
= PendingMerges
.erase(LPhi
);
9972 assert(Erased
&& "Failed to erase LPhi!");
9976 bool Erased
= PendingMerges
.erase(RPhi
);
9977 assert(Erased
&& "Failed to erase RPhi!");
9982 // Find respective Phis and check that they are not being pending.
9983 if (const SCEVUnknown
*LU
= dyn_cast
<SCEVUnknown
>(LHS
))
9984 if (auto *Phi
= dyn_cast
<PHINode
>(LU
->getValue())) {
9985 if (!PendingMerges
.insert(Phi
).second
)
9989 if (const SCEVUnknown
*RU
= dyn_cast
<SCEVUnknown
>(RHS
))
9990 if (auto *Phi
= dyn_cast
<PHINode
>(RU
->getValue())) {
9991 // If we detect a loop of Phi nodes being processed by this method, for
9994 // %a = phi i32 [ %some1, %preheader ], [ %b, %latch ]
9995 // %b = phi i32 [ %some2, %preheader ], [ %a, %latch ]
9997 // we don't want to deal with a case that complex, so return conservative
9999 if (!PendingMerges
.insert(Phi
).second
)
10004 // If none of LHS, RHS is a Phi, nothing to do here.
10005 if (!LPhi
&& !RPhi
)
10008 // If there is a SCEVUnknown Phi we are interested in, make it left.
10010 std::swap(LHS
, RHS
);
10011 std::swap(FoundLHS
, FoundRHS
);
10012 std::swap(LPhi
, RPhi
);
10013 Pred
= ICmpInst::getSwappedPredicate(Pred
);
10016 assert(LPhi
&& "LPhi should definitely be a SCEVUnknown Phi!");
10017 const BasicBlock
*LBB
= LPhi
->getParent();
10018 const SCEVAddRecExpr
*RAR
= dyn_cast
<SCEVAddRecExpr
>(RHS
);
10020 auto ProvedEasily
= [&](const SCEV
*S1
, const SCEV
*S2
) {
10021 return isKnownViaNonRecursiveReasoning(Pred
, S1
, S2
) ||
10022 isImpliedCondOperandsViaRanges(Pred
, S1
, S2
, FoundLHS
, FoundRHS
) ||
10023 isImpliedViaOperations(Pred
, S1
, S2
, FoundLHS
, FoundRHS
, Depth
);
10026 if (RPhi
&& RPhi
->getParent() == LBB
) {
10027 // Case one: RHS is also a SCEVUnknown Phi from the same basic block.
10028 // If we compare two Phis from the same block, and for each entry block
10029 // the predicate is true for incoming values from this block, then the
10030 // predicate is also true for the Phis.
10031 for (const BasicBlock
*IncBB
: predecessors(LBB
)) {
10032 const SCEV
*L
= getSCEV(LPhi
->getIncomingValueForBlock(IncBB
));
10033 const SCEV
*R
= getSCEV(RPhi
->getIncomingValueForBlock(IncBB
));
10034 if (!ProvedEasily(L
, R
))
10037 } else if (RAR
&& RAR
->getLoop()->getHeader() == LBB
) {
10038 // Case two: RHS is also a Phi from the same basic block, and it is an
10039 // AddRec. It means that there is a loop which has both AddRec and Unknown
10040 // PHIs, for it we can compare incoming values of AddRec from above the loop
10041 // and latch with their respective incoming values of LPhi.
10042 // TODO: Generalize to handle loops with many inputs in a header.
10043 if (LPhi
->getNumIncomingValues() != 2) return false;
10045 auto *RLoop
= RAR
->getLoop();
10046 auto *Predecessor
= RLoop
->getLoopPredecessor();
10047 assert(Predecessor
&& "Loop with AddRec with no predecessor?");
10048 const SCEV
*L1
= getSCEV(LPhi
->getIncomingValueForBlock(Predecessor
));
10049 if (!ProvedEasily(L1
, RAR
->getStart()))
10051 auto *Latch
= RLoop
->getLoopLatch();
10052 assert(Latch
&& "Loop with AddRec with no latch?");
10053 const SCEV
*L2
= getSCEV(LPhi
->getIncomingValueForBlock(Latch
));
10054 if (!ProvedEasily(L2
, RAR
->getPostIncExpr(*this)))
10057 // In all other cases go over inputs of LHS and compare each of them to RHS,
10058 // the predicate is true for (LHS, RHS) if it is true for all such pairs.
10059 // At this point RHS is either a non-Phi, or it is a Phi from some block
10060 // different from LBB.
10061 for (const BasicBlock
*IncBB
: predecessors(LBB
)) {
10062 // Check that RHS is available in this block.
10063 if (!dominates(RHS
, IncBB
))
10065 const SCEV
*L
= getSCEV(LPhi
->getIncomingValueForBlock(IncBB
));
10066 if (!ProvedEasily(L
, RHS
))
10073 bool ScalarEvolution::isImpliedCondOperands(ICmpInst::Predicate Pred
,
10074 const SCEV
*LHS
, const SCEV
*RHS
,
10075 const SCEV
*FoundLHS
,
10076 const SCEV
*FoundRHS
) {
10077 if (isImpliedCondOperandsViaRanges(Pred
, LHS
, RHS
, FoundLHS
, FoundRHS
))
10080 if (isImpliedCondOperandsViaNoOverflow(Pred
, LHS
, RHS
, FoundLHS
, FoundRHS
))
10083 return isImpliedCondOperandsHelper(Pred
, LHS
, RHS
,
10084 FoundLHS
, FoundRHS
) ||
10085 // ~x < ~y --> x > y
10086 isImpliedCondOperandsHelper(Pred
, LHS
, RHS
,
10087 getNotSCEV(FoundRHS
),
10088 getNotSCEV(FoundLHS
));
10091 /// Is MaybeMinMaxExpr an (U|S)(Min|Max) of Candidate and some other values?
10092 template <typename MinMaxExprType
>
10093 static bool IsMinMaxConsistingOf(const SCEV
*MaybeMinMaxExpr
,
10094 const SCEV
*Candidate
) {
10095 const MinMaxExprType
*MinMaxExpr
= dyn_cast
<MinMaxExprType
>(MaybeMinMaxExpr
);
10099 return find(MinMaxExpr
->operands(), Candidate
) != MinMaxExpr
->op_end();
10102 static bool IsKnownPredicateViaAddRecStart(ScalarEvolution
&SE
,
10103 ICmpInst::Predicate Pred
,
10104 const SCEV
*LHS
, const SCEV
*RHS
) {
10105 // If both sides are affine addrecs for the same loop, with equal
10106 // steps, and we know the recurrences don't wrap, then we only
10107 // need to check the predicate on the starting values.
10109 if (!ICmpInst::isRelational(Pred
))
10112 const SCEVAddRecExpr
*LAR
= dyn_cast
<SCEVAddRecExpr
>(LHS
);
10115 const SCEVAddRecExpr
*RAR
= dyn_cast
<SCEVAddRecExpr
>(RHS
);
10118 if (LAR
->getLoop() != RAR
->getLoop())
10120 if (!LAR
->isAffine() || !RAR
->isAffine())
10123 if (LAR
->getStepRecurrence(SE
) != RAR
->getStepRecurrence(SE
))
10126 SCEV::NoWrapFlags NW
= ICmpInst::isSigned(Pred
) ?
10127 SCEV::FlagNSW
: SCEV::FlagNUW
;
10128 if (!LAR
->getNoWrapFlags(NW
) || !RAR
->getNoWrapFlags(NW
))
10131 return SE
.isKnownPredicate(Pred
, LAR
->getStart(), RAR
->getStart());
10134 /// Is LHS `Pred` RHS true on the virtue of LHS or RHS being a Min or Max
10136 static bool IsKnownPredicateViaMinOrMax(ScalarEvolution
&SE
,
10137 ICmpInst::Predicate Pred
,
10138 const SCEV
*LHS
, const SCEV
*RHS
) {
10143 case ICmpInst::ICMP_SGE
:
10144 std::swap(LHS
, RHS
);
10146 case ICmpInst::ICMP_SLE
:
10148 // min(A, ...) <= A
10149 IsMinMaxConsistingOf
<SCEVSMinExpr
>(LHS
, RHS
) ||
10150 // A <= max(A, ...)
10151 IsMinMaxConsistingOf
<SCEVSMaxExpr
>(RHS
, LHS
);
10153 case ICmpInst::ICMP_UGE
:
10154 std::swap(LHS
, RHS
);
10156 case ICmpInst::ICMP_ULE
:
10158 // min(A, ...) <= A
10159 IsMinMaxConsistingOf
<SCEVUMinExpr
>(LHS
, RHS
) ||
10160 // A <= max(A, ...)
10161 IsMinMaxConsistingOf
<SCEVUMaxExpr
>(RHS
, LHS
);
10164 llvm_unreachable("covered switch fell through?!");
10167 bool ScalarEvolution::isImpliedViaOperations(ICmpInst::Predicate Pred
,
10168 const SCEV
*LHS
, const SCEV
*RHS
,
10169 const SCEV
*FoundLHS
,
10170 const SCEV
*FoundRHS
,
10172 assert(getTypeSizeInBits(LHS
->getType()) ==
10173 getTypeSizeInBits(RHS
->getType()) &&
10174 "LHS and RHS have different sizes?");
10175 assert(getTypeSizeInBits(FoundLHS
->getType()) ==
10176 getTypeSizeInBits(FoundRHS
->getType()) &&
10177 "FoundLHS and FoundRHS have different sizes?");
10178 // We want to avoid hurting the compile time with analysis of too big trees.
10179 if (Depth
> MaxSCEVOperationsImplicationDepth
)
10181 // We only want to work with ICMP_SGT comparison so far.
10182 // TODO: Extend to ICMP_UGT?
10183 if (Pred
== ICmpInst::ICMP_SLT
) {
10184 Pred
= ICmpInst::ICMP_SGT
;
10185 std::swap(LHS
, RHS
);
10186 std::swap(FoundLHS
, FoundRHS
);
10188 if (Pred
!= ICmpInst::ICMP_SGT
)
10191 auto GetOpFromSExt
= [&](const SCEV
*S
) {
10192 if (auto *Ext
= dyn_cast
<SCEVSignExtendExpr
>(S
))
10193 return Ext
->getOperand();
10194 // TODO: If S is a SCEVConstant then you can cheaply "strip" the sext off
10195 // the constant in some cases.
10199 // Acquire values from extensions.
10200 auto *OrigLHS
= LHS
;
10201 auto *OrigFoundLHS
= FoundLHS
;
10202 LHS
= GetOpFromSExt(LHS
);
10203 FoundLHS
= GetOpFromSExt(FoundLHS
);
10205 // Is the SGT predicate can be proved trivially or using the found context.
10206 auto IsSGTViaContext
= [&](const SCEV
*S1
, const SCEV
*S2
) {
10207 return isKnownViaNonRecursiveReasoning(ICmpInst::ICMP_SGT
, S1
, S2
) ||
10208 isImpliedViaOperations(ICmpInst::ICMP_SGT
, S1
, S2
, OrigFoundLHS
,
10209 FoundRHS
, Depth
+ 1);
10212 if (auto *LHSAddExpr
= dyn_cast
<SCEVAddExpr
>(LHS
)) {
10213 // We want to avoid creation of any new non-constant SCEV. Since we are
10214 // going to compare the operands to RHS, we should be certain that we don't
10215 // need any size extensions for this. So let's decline all cases when the
10216 // sizes of types of LHS and RHS do not match.
10217 // TODO: Maybe try to get RHS from sext to catch more cases?
10218 if (getTypeSizeInBits(LHS
->getType()) != getTypeSizeInBits(RHS
->getType()))
10221 // Should not overflow.
10222 if (!LHSAddExpr
->hasNoSignedWrap())
10225 auto *LL
= LHSAddExpr
->getOperand(0);
10226 auto *LR
= LHSAddExpr
->getOperand(1);
10227 auto *MinusOne
= getNegativeSCEV(getOne(RHS
->getType()));
10229 // Checks that S1 >= 0 && S2 > RHS, trivially or using the found context.
10230 auto IsSumGreaterThanRHS
= [&](const SCEV
*S1
, const SCEV
*S2
) {
10231 return IsSGTViaContext(S1
, MinusOne
) && IsSGTViaContext(S2
, RHS
);
10233 // Try to prove the following rule:
10234 // (LHS = LL + LR) && (LL >= 0) && (LR > RHS) => (LHS > RHS).
10235 // (LHS = LL + LR) && (LR >= 0) && (LL > RHS) => (LHS > RHS).
10236 if (IsSumGreaterThanRHS(LL
, LR
) || IsSumGreaterThanRHS(LR
, LL
))
10238 } else if (auto *LHSUnknownExpr
= dyn_cast
<SCEVUnknown
>(LHS
)) {
10240 // FIXME: Once we have SDiv implemented, we can get rid of this matching.
10242 using namespace llvm::PatternMatch
;
10244 if (match(LHSUnknownExpr
->getValue(), m_SDiv(m_Value(LL
), m_Value(LR
)))) {
10245 // Rules for division.
10246 // We are going to perform some comparisons with Denominator and its
10247 // derivative expressions. In general case, creating a SCEV for it may
10248 // lead to a complex analysis of the entire graph, and in particular it
10249 // can request trip count recalculation for the same loop. This would
10250 // cache as SCEVCouldNotCompute to avoid the infinite recursion. To avoid
10251 // this, we only want to create SCEVs that are constants in this section.
10252 // So we bail if Denominator is not a constant.
10253 if (!isa
<ConstantInt
>(LR
))
10256 auto *Denominator
= cast
<SCEVConstant
>(getSCEV(LR
));
10258 // We want to make sure that LHS = FoundLHS / Denominator. If it is so,
10259 // then a SCEV for the numerator already exists and matches with FoundLHS.
10260 auto *Numerator
= getExistingSCEV(LL
);
10261 if (!Numerator
|| Numerator
->getType() != FoundLHS
->getType())
10264 // Make sure that the numerator matches with FoundLHS and the denominator
10266 if (!HasSameValue(Numerator
, FoundLHS
) || !isKnownPositive(Denominator
))
10269 auto *DTy
= Denominator
->getType();
10270 auto *FRHSTy
= FoundRHS
->getType();
10271 if (DTy
->isPointerTy() != FRHSTy
->isPointerTy())
10272 // One of types is a pointer and another one is not. We cannot extend
10273 // them properly to a wider type, so let us just reject this case.
10274 // TODO: Usage of getEffectiveSCEVType for DTy, FRHSTy etc should help
10275 // to avoid this check.
10279 // FoundLHS > FoundRHS, LHS = FoundLHS / Denominator, Denominator > 0.
10280 auto *WTy
= getWiderType(DTy
, FRHSTy
);
10281 auto *DenominatorExt
= getNoopOrSignExtend(Denominator
, WTy
);
10282 auto *FoundRHSExt
= getNoopOrSignExtend(FoundRHS
, WTy
);
10284 // Try to prove the following rule:
10285 // (FoundRHS > Denominator - 2) && (RHS <= 0) => (LHS > RHS).
10286 // For example, given that FoundLHS > 2. It means that FoundLHS is at
10287 // least 3. If we divide it by Denominator < 4, we will have at least 1.
10288 auto *DenomMinusTwo
= getMinusSCEV(DenominatorExt
, getConstant(WTy
, 2));
10289 if (isKnownNonPositive(RHS
) &&
10290 IsSGTViaContext(FoundRHSExt
, DenomMinusTwo
))
10293 // Try to prove the following rule:
10294 // (FoundRHS > -1 - Denominator) && (RHS < 0) => (LHS > RHS).
10295 // For example, given that FoundLHS > -3. Then FoundLHS is at least -2.
10296 // If we divide it by Denominator > 2, then:
10297 // 1. If FoundLHS is negative, then the result is 0.
10298 // 2. If FoundLHS is non-negative, then the result is non-negative.
10299 // Anyways, the result is non-negative.
10300 auto *MinusOne
= getNegativeSCEV(getOne(WTy
));
10301 auto *NegDenomMinusOne
= getMinusSCEV(MinusOne
, DenominatorExt
);
10302 if (isKnownNegative(RHS
) &&
10303 IsSGTViaContext(FoundRHSExt
, NegDenomMinusOne
))
10308 // If our expression contained SCEVUnknown Phis, and we split it down and now
10309 // need to prove something for them, try to prove the predicate for every
10310 // possible incoming values of those Phis.
10311 if (isImpliedViaMerge(Pred
, OrigLHS
, RHS
, OrigFoundLHS
, FoundRHS
, Depth
+ 1))
10318 ScalarEvolution::isKnownViaNonRecursiveReasoning(ICmpInst::Predicate Pred
,
10319 const SCEV
*LHS
, const SCEV
*RHS
) {
10320 return isKnownPredicateViaConstantRanges(Pred
, LHS
, RHS
) ||
10321 IsKnownPredicateViaMinOrMax(*this, Pred
, LHS
, RHS
) ||
10322 IsKnownPredicateViaAddRecStart(*this, Pred
, LHS
, RHS
) ||
10323 isKnownPredicateViaNoOverflow(Pred
, LHS
, RHS
);
10327 ScalarEvolution::isImpliedCondOperandsHelper(ICmpInst::Predicate Pred
,
10328 const SCEV
*LHS
, const SCEV
*RHS
,
10329 const SCEV
*FoundLHS
,
10330 const SCEV
*FoundRHS
) {
10332 default: llvm_unreachable("Unexpected ICmpInst::Predicate value!");
10333 case ICmpInst::ICMP_EQ
:
10334 case ICmpInst::ICMP_NE
:
10335 if (HasSameValue(LHS
, FoundLHS
) && HasSameValue(RHS
, FoundRHS
))
10338 case ICmpInst::ICMP_SLT
:
10339 case ICmpInst::ICMP_SLE
:
10340 if (isKnownViaNonRecursiveReasoning(ICmpInst::ICMP_SLE
, LHS
, FoundLHS
) &&
10341 isKnownViaNonRecursiveReasoning(ICmpInst::ICMP_SGE
, RHS
, FoundRHS
))
10344 case ICmpInst::ICMP_SGT
:
10345 case ICmpInst::ICMP_SGE
:
10346 if (isKnownViaNonRecursiveReasoning(ICmpInst::ICMP_SGE
, LHS
, FoundLHS
) &&
10347 isKnownViaNonRecursiveReasoning(ICmpInst::ICMP_SLE
, RHS
, FoundRHS
))
10350 case ICmpInst::ICMP_ULT
:
10351 case ICmpInst::ICMP_ULE
:
10352 if (isKnownViaNonRecursiveReasoning(ICmpInst::ICMP_ULE
, LHS
, FoundLHS
) &&
10353 isKnownViaNonRecursiveReasoning(ICmpInst::ICMP_UGE
, RHS
, FoundRHS
))
10356 case ICmpInst::ICMP_UGT
:
10357 case ICmpInst::ICMP_UGE
:
10358 if (isKnownViaNonRecursiveReasoning(ICmpInst::ICMP_UGE
, LHS
, FoundLHS
) &&
10359 isKnownViaNonRecursiveReasoning(ICmpInst::ICMP_ULE
, RHS
, FoundRHS
))
10364 // Maybe it can be proved via operations?
10365 if (isImpliedViaOperations(Pred
, LHS
, RHS
, FoundLHS
, FoundRHS
))
10371 bool ScalarEvolution::isImpliedCondOperandsViaRanges(ICmpInst::Predicate Pred
,
10374 const SCEV
*FoundLHS
,
10375 const SCEV
*FoundRHS
) {
10376 if (!isa
<SCEVConstant
>(RHS
) || !isa
<SCEVConstant
>(FoundRHS
))
10377 // The restriction on `FoundRHS` be lifted easily -- it exists only to
10378 // reduce the compile time impact of this optimization.
10381 Optional
<APInt
> Addend
= computeConstantDifference(LHS
, FoundLHS
);
10385 const APInt
&ConstFoundRHS
= cast
<SCEVConstant
>(FoundRHS
)->getAPInt();
10387 // `FoundLHSRange` is the range we know `FoundLHS` to be in by virtue of the
10388 // antecedent "`FoundLHS` `Pred` `FoundRHS`".
10389 ConstantRange FoundLHSRange
=
10390 ConstantRange::makeAllowedICmpRegion(Pred
, ConstFoundRHS
);
10392 // Since `LHS` is `FoundLHS` + `Addend`, we can compute a range for `LHS`:
10393 ConstantRange LHSRange
= FoundLHSRange
.add(ConstantRange(*Addend
));
10395 // We can also compute the range of values for `LHS` that satisfy the
10396 // consequent, "`LHS` `Pred` `RHS`":
10397 const APInt
&ConstRHS
= cast
<SCEVConstant
>(RHS
)->getAPInt();
10398 ConstantRange SatisfyingLHSRange
=
10399 ConstantRange::makeSatisfyingICmpRegion(Pred
, ConstRHS
);
10401 // The antecedent implies the consequent if every value of `LHS` that
10402 // satisfies the antecedent also satisfies the consequent.
10403 return SatisfyingLHSRange
.contains(LHSRange
);
10406 bool ScalarEvolution::doesIVOverflowOnLT(const SCEV
*RHS
, const SCEV
*Stride
,
10407 bool IsSigned
, bool NoWrap
) {
10408 assert(isKnownPositive(Stride
) && "Positive stride expected!");
10410 if (NoWrap
) return false;
10412 unsigned BitWidth
= getTypeSizeInBits(RHS
->getType());
10413 const SCEV
*One
= getOne(Stride
->getType());
10416 APInt MaxRHS
= getSignedRangeMax(RHS
);
10417 APInt MaxValue
= APInt::getSignedMaxValue(BitWidth
);
10418 APInt MaxStrideMinusOne
= getSignedRangeMax(getMinusSCEV(Stride
, One
));
10420 // SMaxRHS + SMaxStrideMinusOne > SMaxValue => overflow!
10421 return (std::move(MaxValue
) - MaxStrideMinusOne
).slt(MaxRHS
);
10424 APInt MaxRHS
= getUnsignedRangeMax(RHS
);
10425 APInt MaxValue
= APInt::getMaxValue(BitWidth
);
10426 APInt MaxStrideMinusOne
= getUnsignedRangeMax(getMinusSCEV(Stride
, One
));
10428 // UMaxRHS + UMaxStrideMinusOne > UMaxValue => overflow!
10429 return (std::move(MaxValue
) - MaxStrideMinusOne
).ult(MaxRHS
);
10432 bool ScalarEvolution::doesIVOverflowOnGT(const SCEV
*RHS
, const SCEV
*Stride
,
10433 bool IsSigned
, bool NoWrap
) {
10434 if (NoWrap
) return false;
10436 unsigned BitWidth
= getTypeSizeInBits(RHS
->getType());
10437 const SCEV
*One
= getOne(Stride
->getType());
10440 APInt MinRHS
= getSignedRangeMin(RHS
);
10441 APInt MinValue
= APInt::getSignedMinValue(BitWidth
);
10442 APInt MaxStrideMinusOne
= getSignedRangeMax(getMinusSCEV(Stride
, One
));
10444 // SMinRHS - SMaxStrideMinusOne < SMinValue => overflow!
10445 return (std::move(MinValue
) + MaxStrideMinusOne
).sgt(MinRHS
);
10448 APInt MinRHS
= getUnsignedRangeMin(RHS
);
10449 APInt MinValue
= APInt::getMinValue(BitWidth
);
10450 APInt MaxStrideMinusOne
= getUnsignedRangeMax(getMinusSCEV(Stride
, One
));
10452 // UMinRHS - UMaxStrideMinusOne < UMinValue => overflow!
10453 return (std::move(MinValue
) + MaxStrideMinusOne
).ugt(MinRHS
);
10456 const SCEV
*ScalarEvolution::computeBECount(const SCEV
*Delta
, const SCEV
*Step
,
10458 const SCEV
*One
= getOne(Step
->getType());
10459 Delta
= Equality
? getAddExpr(Delta
, Step
)
10460 : getAddExpr(Delta
, getMinusSCEV(Step
, One
));
10461 return getUDivExpr(Delta
, Step
);
10464 const SCEV
*ScalarEvolution::computeMaxBECountForLT(const SCEV
*Start
,
10465 const SCEV
*Stride
,
10470 assert(!isKnownNonPositive(Stride
) &&
10471 "Stride is expected strictly positive!");
10472 // Calculate the maximum backedge count based on the range of values
10473 // permitted by Start, End, and Stride.
10474 const SCEV
*MaxBECount
;
10476 IsSigned
? getSignedRangeMin(Start
) : getUnsignedRangeMin(Start
);
10478 APInt StrideForMaxBECount
=
10479 IsSigned
? getSignedRangeMin(Stride
) : getUnsignedRangeMin(Stride
);
10481 // We already know that the stride is positive, so we paper over conservatism
10482 // in our range computation by forcing StrideForMaxBECount to be at least one.
10483 // In theory this is unnecessary, but we expect MaxBECount to be a
10484 // SCEVConstant, and (udiv <constant> 0) is not constant folded by SCEV (there
10485 // is nothing to constant fold it to).
10486 APInt
One(BitWidth
, 1, IsSigned
);
10487 StrideForMaxBECount
= APIntOps::smax(One
, StrideForMaxBECount
);
10489 APInt MaxValue
= IsSigned
? APInt::getSignedMaxValue(BitWidth
)
10490 : APInt::getMaxValue(BitWidth
);
10491 APInt Limit
= MaxValue
- (StrideForMaxBECount
- 1);
10493 // Although End can be a MAX expression we estimate MaxEnd considering only
10494 // the case End = RHS of the loop termination condition. This is safe because
10495 // in the other case (End - Start) is zero, leading to a zero maximum backedge
10497 APInt MaxEnd
= IsSigned
? APIntOps::smin(getSignedRangeMax(End
), Limit
)
10498 : APIntOps::umin(getUnsignedRangeMax(End
), Limit
);
10500 MaxBECount
= computeBECount(getConstant(MaxEnd
- MinStart
) /* Delta */,
10501 getConstant(StrideForMaxBECount
) /* Step */,
10502 false /* Equality */);
10507 ScalarEvolution::ExitLimit
10508 ScalarEvolution::howManyLessThans(const SCEV
*LHS
, const SCEV
*RHS
,
10509 const Loop
*L
, bool IsSigned
,
10510 bool ControlsExit
, bool AllowPredicates
) {
10511 SmallPtrSet
<const SCEVPredicate
*, 4> Predicates
;
10513 const SCEVAddRecExpr
*IV
= dyn_cast
<SCEVAddRecExpr
>(LHS
);
10514 bool PredicatedIV
= false;
10516 if (!IV
&& AllowPredicates
) {
10517 // Try to make this an AddRec using runtime tests, in the first X
10518 // iterations of this loop, where X is the SCEV expression found by the
10519 // algorithm below.
10520 IV
= convertSCEVToAddRecWithPredicates(LHS
, L
, Predicates
);
10521 PredicatedIV
= true;
10524 // Avoid weird loops
10525 if (!IV
|| IV
->getLoop() != L
|| !IV
->isAffine())
10526 return getCouldNotCompute();
10528 bool NoWrap
= ControlsExit
&&
10529 IV
->getNoWrapFlags(IsSigned
? SCEV::FlagNSW
: SCEV::FlagNUW
);
10531 const SCEV
*Stride
= IV
->getStepRecurrence(*this);
10533 bool PositiveStride
= isKnownPositive(Stride
);
10535 // Avoid negative or zero stride values.
10536 if (!PositiveStride
) {
10537 // We can compute the correct backedge taken count for loops with unknown
10538 // strides if we can prove that the loop is not an infinite loop with side
10539 // effects. Here's the loop structure we are trying to handle -
10545 // } while (i < end);
10547 // The backedge taken count for such loops is evaluated as -
10548 // (max(end, start + stride) - start - 1) /u stride
10550 // The additional preconditions that we need to check to prove correctness
10551 // of the above formula is as follows -
10553 // a) IV is either nuw or nsw depending upon signedness (indicated by the
10555 // b) loop is single exit with no side effects.
10558 // Precondition a) implies that if the stride is negative, this is a single
10559 // trip loop. The backedge taken count formula reduces to zero in this case.
10561 // Precondition b) implies that the unknown stride cannot be zero otherwise
10564 // The positive stride case is the same as isKnownPositive(Stride) returning
10565 // true (original behavior of the function).
10567 // We want to make sure that the stride is truly unknown as there are edge
10568 // cases where ScalarEvolution propagates no wrap flags to the
10569 // post-increment/decrement IV even though the increment/decrement operation
10570 // itself is wrapping. The computed backedge taken count may be wrong in
10571 // such cases. This is prevented by checking that the stride is not known to
10572 // be either positive or non-positive. For example, no wrap flags are
10573 // propagated to the post-increment IV of this loop with a trip count of 2 -
10575 // unsigned char i;
10576 // for(i=127; i<128; i+=129)
10579 if (PredicatedIV
|| !NoWrap
|| isKnownNonPositive(Stride
) ||
10580 !loopHasNoSideEffects(L
))
10581 return getCouldNotCompute();
10582 } else if (!Stride
->isOne() &&
10583 doesIVOverflowOnLT(RHS
, Stride
, IsSigned
, NoWrap
))
10584 // Avoid proven overflow cases: this will ensure that the backedge taken
10585 // count will not generate any unsigned overflow. Relaxed no-overflow
10586 // conditions exploit NoWrapFlags, allowing to optimize in presence of
10587 // undefined behaviors like the case of C language.
10588 return getCouldNotCompute();
10590 ICmpInst::Predicate Cond
= IsSigned
? ICmpInst::ICMP_SLT
10591 : ICmpInst::ICMP_ULT
;
10592 const SCEV
*Start
= IV
->getStart();
10593 const SCEV
*End
= RHS
;
10594 // When the RHS is not invariant, we do not know the end bound of the loop and
10595 // cannot calculate the ExactBECount needed by ExitLimit. However, we can
10596 // calculate the MaxBECount, given the start, stride and max value for the end
10597 // bound of the loop (RHS), and the fact that IV does not overflow (which is
10599 if (!isLoopInvariant(RHS
, L
)) {
10600 const SCEV
*MaxBECount
= computeMaxBECountForLT(
10601 Start
, Stride
, RHS
, getTypeSizeInBits(LHS
->getType()), IsSigned
);
10602 return ExitLimit(getCouldNotCompute() /* ExactNotTaken */, MaxBECount
,
10603 false /*MaxOrZero*/, Predicates
);
10605 // If the backedge is taken at least once, then it will be taken
10606 // (End-Start)/Stride times (rounded up to a multiple of Stride), where Start
10607 // is the LHS value of the less-than comparison the first time it is evaluated
10608 // and End is the RHS.
10609 const SCEV
*BECountIfBackedgeTaken
=
10610 computeBECount(getMinusSCEV(End
, Start
), Stride
, false);
10611 // If the loop entry is guarded by the result of the backedge test of the
10612 // first loop iteration, then we know the backedge will be taken at least
10613 // once and so the backedge taken count is as above. If not then we use the
10614 // expression (max(End,Start)-Start)/Stride to describe the backedge count,
10615 // as if the backedge is taken at least once max(End,Start) is End and so the
10616 // result is as above, and if not max(End,Start) is Start so we get a backedge
10618 const SCEV
*BECount
;
10619 if (isLoopEntryGuardedByCond(L
, Cond
, getMinusSCEV(Start
, Stride
), RHS
))
10620 BECount
= BECountIfBackedgeTaken
;
10622 End
= IsSigned
? getSMaxExpr(RHS
, Start
) : getUMaxExpr(RHS
, Start
);
10623 BECount
= computeBECount(getMinusSCEV(End
, Start
), Stride
, false);
10626 const SCEV
*MaxBECount
;
10627 bool MaxOrZero
= false;
10628 if (isa
<SCEVConstant
>(BECount
))
10629 MaxBECount
= BECount
;
10630 else if (isa
<SCEVConstant
>(BECountIfBackedgeTaken
)) {
10631 // If we know exactly how many times the backedge will be taken if it's
10632 // taken at least once, then the backedge count will either be that or
10634 MaxBECount
= BECountIfBackedgeTaken
;
10637 MaxBECount
= computeMaxBECountForLT(
10638 Start
, Stride
, RHS
, getTypeSizeInBits(LHS
->getType()), IsSigned
);
10641 if (isa
<SCEVCouldNotCompute
>(MaxBECount
) &&
10642 !isa
<SCEVCouldNotCompute
>(BECount
))
10643 MaxBECount
= getConstant(getUnsignedRangeMax(BECount
));
10645 return ExitLimit(BECount
, MaxBECount
, MaxOrZero
, Predicates
);
10648 ScalarEvolution::ExitLimit
10649 ScalarEvolution::howManyGreaterThans(const SCEV
*LHS
, const SCEV
*RHS
,
10650 const Loop
*L
, bool IsSigned
,
10651 bool ControlsExit
, bool AllowPredicates
) {
10652 SmallPtrSet
<const SCEVPredicate
*, 4> Predicates
;
10653 // We handle only IV > Invariant
10654 if (!isLoopInvariant(RHS
, L
))
10655 return getCouldNotCompute();
10657 const SCEVAddRecExpr
*IV
= dyn_cast
<SCEVAddRecExpr
>(LHS
);
10658 if (!IV
&& AllowPredicates
)
10659 // Try to make this an AddRec using runtime tests, in the first X
10660 // iterations of this loop, where X is the SCEV expression found by the
10661 // algorithm below.
10662 IV
= convertSCEVToAddRecWithPredicates(LHS
, L
, Predicates
);
10664 // Avoid weird loops
10665 if (!IV
|| IV
->getLoop() != L
|| !IV
->isAffine())
10666 return getCouldNotCompute();
10668 bool NoWrap
= ControlsExit
&&
10669 IV
->getNoWrapFlags(IsSigned
? SCEV::FlagNSW
: SCEV::FlagNUW
);
10671 const SCEV
*Stride
= getNegativeSCEV(IV
->getStepRecurrence(*this));
10673 // Avoid negative or zero stride values
10674 if (!isKnownPositive(Stride
))
10675 return getCouldNotCompute();
10677 // Avoid proven overflow cases: this will ensure that the backedge taken count
10678 // will not generate any unsigned overflow. Relaxed no-overflow conditions
10679 // exploit NoWrapFlags, allowing to optimize in presence of undefined
10680 // behaviors like the case of C language.
10681 if (!Stride
->isOne() && doesIVOverflowOnGT(RHS
, Stride
, IsSigned
, NoWrap
))
10682 return getCouldNotCompute();
10684 ICmpInst::Predicate Cond
= IsSigned
? ICmpInst::ICMP_SGT
10685 : ICmpInst::ICMP_UGT
;
10687 const SCEV
*Start
= IV
->getStart();
10688 const SCEV
*End
= RHS
;
10689 if (!isLoopEntryGuardedByCond(L
, Cond
, getAddExpr(Start
, Stride
), RHS
))
10690 End
= IsSigned
? getSMinExpr(RHS
, Start
) : getUMinExpr(RHS
, Start
);
10692 const SCEV
*BECount
= computeBECount(getMinusSCEV(Start
, End
), Stride
, false);
10694 APInt MaxStart
= IsSigned
? getSignedRangeMax(Start
)
10695 : getUnsignedRangeMax(Start
);
10697 APInt MinStride
= IsSigned
? getSignedRangeMin(Stride
)
10698 : getUnsignedRangeMin(Stride
);
10700 unsigned BitWidth
= getTypeSizeInBits(LHS
->getType());
10701 APInt Limit
= IsSigned
? APInt::getSignedMinValue(BitWidth
) + (MinStride
- 1)
10702 : APInt::getMinValue(BitWidth
) + (MinStride
- 1);
10704 // Although End can be a MIN expression we estimate MinEnd considering only
10705 // the case End = RHS. This is safe because in the other case (Start - End)
10706 // is zero, leading to a zero maximum backedge taken count.
10708 IsSigned
? APIntOps::smax(getSignedRangeMin(RHS
), Limit
)
10709 : APIntOps::umax(getUnsignedRangeMin(RHS
), Limit
);
10711 const SCEV
*MaxBECount
= isa
<SCEVConstant
>(BECount
)
10713 : computeBECount(getConstant(MaxStart
- MinEnd
),
10714 getConstant(MinStride
), false);
10716 if (isa
<SCEVCouldNotCompute
>(MaxBECount
))
10717 MaxBECount
= BECount
;
10719 return ExitLimit(BECount
, MaxBECount
, false, Predicates
);
10722 const SCEV
*SCEVAddRecExpr::getNumIterationsInRange(const ConstantRange
&Range
,
10723 ScalarEvolution
&SE
) const {
10724 if (Range
.isFullSet()) // Infinite loop.
10725 return SE
.getCouldNotCompute();
10727 // If the start is a non-zero constant, shift the range to simplify things.
10728 if (const SCEVConstant
*SC
= dyn_cast
<SCEVConstant
>(getStart()))
10729 if (!SC
->getValue()->isZero()) {
10730 SmallVector
<const SCEV
*, 4> Operands(op_begin(), op_end());
10731 Operands
[0] = SE
.getZero(SC
->getType());
10732 const SCEV
*Shifted
= SE
.getAddRecExpr(Operands
, getLoop(),
10733 getNoWrapFlags(FlagNW
));
10734 if (const auto *ShiftedAddRec
= dyn_cast
<SCEVAddRecExpr
>(Shifted
))
10735 return ShiftedAddRec
->getNumIterationsInRange(
10736 Range
.subtract(SC
->getAPInt()), SE
);
10737 // This is strange and shouldn't happen.
10738 return SE
.getCouldNotCompute();
10741 // The only time we can solve this is when we have all constant indices.
10742 // Otherwise, we cannot determine the overflow conditions.
10743 if (any_of(operands(), [](const SCEV
*Op
) { return !isa
<SCEVConstant
>(Op
); }))
10744 return SE
.getCouldNotCompute();
10746 // Okay at this point we know that all elements of the chrec are constants and
10747 // that the start element is zero.
10749 // First check to see if the range contains zero. If not, the first
10750 // iteration exits.
10751 unsigned BitWidth
= SE
.getTypeSizeInBits(getType());
10752 if (!Range
.contains(APInt(BitWidth
, 0)))
10753 return SE
.getZero(getType());
10756 // If this is an affine expression then we have this situation:
10757 // Solve {0,+,A} in Range === Ax in Range
10759 // We know that zero is in the range. If A is positive then we know that
10760 // the upper value of the range must be the first possible exit value.
10761 // If A is negative then the lower of the range is the last possible loop
10762 // value. Also note that we already checked for a full range.
10763 APInt A
= cast
<SCEVConstant
>(getOperand(1))->getAPInt();
10764 APInt End
= A
.sge(1) ? (Range
.getUpper() - 1) : Range
.getLower();
10766 // The exit value should be (End+A)/A.
10767 APInt ExitVal
= (End
+ A
).udiv(A
);
10768 ConstantInt
*ExitValue
= ConstantInt::get(SE
.getContext(), ExitVal
);
10770 // Evaluate at the exit value. If we really did fall out of the valid
10771 // range, then we computed our trip count, otherwise wrap around or other
10772 // things must have happened.
10773 ConstantInt
*Val
= EvaluateConstantChrecAtConstant(this, ExitValue
, SE
);
10774 if (Range
.contains(Val
->getValue()))
10775 return SE
.getCouldNotCompute(); // Something strange happened
10777 // Ensure that the previous value is in the range. This is a sanity check.
10778 assert(Range
.contains(
10779 EvaluateConstantChrecAtConstant(this,
10780 ConstantInt::get(SE
.getContext(), ExitVal
- 1), SE
)->getValue()) &&
10781 "Linear scev computation is off in a bad way!");
10782 return SE
.getConstant(ExitValue
);
10785 if (isQuadratic()) {
10786 if (auto S
= SolveQuadraticAddRecRange(this, Range
, SE
))
10787 return SE
.getConstant(S
.getValue());
10790 return SE
.getCouldNotCompute();
10793 const SCEVAddRecExpr
*
10794 SCEVAddRecExpr::getPostIncExpr(ScalarEvolution
&SE
) const {
10795 assert(getNumOperands() > 1 && "AddRec with zero step?");
10796 // There is a temptation to just call getAddExpr(this, getStepRecurrence(SE)),
10797 // but in this case we cannot guarantee that the value returned will be an
10798 // AddRec because SCEV does not have a fixed point where it stops
10799 // simplification: it is legal to return ({rec1} + {rec2}). For example, it
10800 // may happen if we reach arithmetic depth limit while simplifying. So we
10801 // construct the returned value explicitly.
10802 SmallVector
<const SCEV
*, 3> Ops
;
10803 // If this is {A,+,B,+,C,...,+,N}, then its step is {B,+,C,+,...,+,N}, and
10804 // (this + Step) is {A+B,+,B+C,+...,+,N}.
10805 for (unsigned i
= 0, e
= getNumOperands() - 1; i
< e
; ++i
)
10806 Ops
.push_back(SE
.getAddExpr(getOperand(i
), getOperand(i
+ 1)));
10807 // We know that the last operand is not a constant zero (otherwise it would
10808 // have been popped out earlier). This guarantees us that if the result has
10809 // the same last operand, then it will also not be popped out, meaning that
10810 // the returned value will be an AddRec.
10811 const SCEV
*Last
= getOperand(getNumOperands() - 1);
10812 assert(!Last
->isZero() && "Recurrency with zero step?");
10813 Ops
.push_back(Last
);
10814 return cast
<SCEVAddRecExpr
>(SE
.getAddRecExpr(Ops
, getLoop(),
10815 SCEV::FlagAnyWrap
));
10818 // Return true when S contains at least an undef value.
10819 static inline bool containsUndefs(const SCEV
*S
) {
10820 return SCEVExprContains(S
, [](const SCEV
*S
) {
10821 if (const auto *SU
= dyn_cast
<SCEVUnknown
>(S
))
10822 return isa
<UndefValue
>(SU
->getValue());
10829 // Collect all steps of SCEV expressions.
10830 struct SCEVCollectStrides
{
10831 ScalarEvolution
&SE
;
10832 SmallVectorImpl
<const SCEV
*> &Strides
;
10834 SCEVCollectStrides(ScalarEvolution
&SE
, SmallVectorImpl
<const SCEV
*> &S
)
10835 : SE(SE
), Strides(S
) {}
10837 bool follow(const SCEV
*S
) {
10838 if (const SCEVAddRecExpr
*AR
= dyn_cast
<SCEVAddRecExpr
>(S
))
10839 Strides
.push_back(AR
->getStepRecurrence(SE
));
10843 bool isDone() const { return false; }
10846 // Collect all SCEVUnknown and SCEVMulExpr expressions.
10847 struct SCEVCollectTerms
{
10848 SmallVectorImpl
<const SCEV
*> &Terms
;
10850 SCEVCollectTerms(SmallVectorImpl
<const SCEV
*> &T
) : Terms(T
) {}
10852 bool follow(const SCEV
*S
) {
10853 if (isa
<SCEVUnknown
>(S
) || isa
<SCEVMulExpr
>(S
) ||
10854 isa
<SCEVSignExtendExpr
>(S
)) {
10855 if (!containsUndefs(S
))
10856 Terms
.push_back(S
);
10858 // Stop recursion: once we collected a term, do not walk its operands.
10866 bool isDone() const { return false; }
10869 // Check if a SCEV contains an AddRecExpr.
10870 struct SCEVHasAddRec
{
10871 bool &ContainsAddRec
;
10873 SCEVHasAddRec(bool &ContainsAddRec
) : ContainsAddRec(ContainsAddRec
) {
10874 ContainsAddRec
= false;
10877 bool follow(const SCEV
*S
) {
10878 if (isa
<SCEVAddRecExpr
>(S
)) {
10879 ContainsAddRec
= true;
10881 // Stop recursion: once we collected a term, do not walk its operands.
10889 bool isDone() const { return false; }
10892 // Find factors that are multiplied with an expression that (possibly as a
10893 // subexpression) contains an AddRecExpr. In the expression:
10895 // 8 * (100 + %p * %q * (%a + {0, +, 1}_loop))
10897 // "%p * %q" are factors multiplied by the expression "(%a + {0, +, 1}_loop)"
10898 // that contains the AddRec {0, +, 1}_loop. %p * %q are likely to be array size
10899 // parameters as they form a product with an induction variable.
10901 // This collector expects all array size parameters to be in the same MulExpr.
10902 // It might be necessary to later add support for collecting parameters that are
10903 // spread over different nested MulExpr.
10904 struct SCEVCollectAddRecMultiplies
{
10905 SmallVectorImpl
<const SCEV
*> &Terms
;
10906 ScalarEvolution
&SE
;
10908 SCEVCollectAddRecMultiplies(SmallVectorImpl
<const SCEV
*> &T
, ScalarEvolution
&SE
)
10909 : Terms(T
), SE(SE
) {}
10911 bool follow(const SCEV
*S
) {
10912 if (auto *Mul
= dyn_cast
<SCEVMulExpr
>(S
)) {
10913 bool HasAddRec
= false;
10914 SmallVector
<const SCEV
*, 0> Operands
;
10915 for (auto Op
: Mul
->operands()) {
10916 const SCEVUnknown
*Unknown
= dyn_cast
<SCEVUnknown
>(Op
);
10917 if (Unknown
&& !isa
<CallInst
>(Unknown
->getValue())) {
10918 Operands
.push_back(Op
);
10919 } else if (Unknown
) {
10922 bool ContainsAddRec
;
10923 SCEVHasAddRec
ContiansAddRec(ContainsAddRec
);
10924 visitAll(Op
, ContiansAddRec
);
10925 HasAddRec
|= ContainsAddRec
;
10928 if (Operands
.size() == 0)
10934 Terms
.push_back(SE
.getMulExpr(Operands
));
10935 // Stop recursion: once we collected a term, do not walk its operands.
10943 bool isDone() const { return false; }
10946 } // end anonymous namespace
10948 /// Find parametric terms in this SCEVAddRecExpr. We first for parameters in
10950 /// 1) The strides of AddRec expressions.
10951 /// 2) Unknowns that are multiplied with AddRec expressions.
10952 void ScalarEvolution::collectParametricTerms(const SCEV
*Expr
,
10953 SmallVectorImpl
<const SCEV
*> &Terms
) {
10954 SmallVector
<const SCEV
*, 4> Strides
;
10955 SCEVCollectStrides
StrideCollector(*this, Strides
);
10956 visitAll(Expr
, StrideCollector
);
10959 dbgs() << "Strides:\n";
10960 for (const SCEV
*S
: Strides
)
10961 dbgs() << *S
<< "\n";
10964 for (const SCEV
*S
: Strides
) {
10965 SCEVCollectTerms
TermCollector(Terms
);
10966 visitAll(S
, TermCollector
);
10970 dbgs() << "Terms:\n";
10971 for (const SCEV
*T
: Terms
)
10972 dbgs() << *T
<< "\n";
10975 SCEVCollectAddRecMultiplies
MulCollector(Terms
, *this);
10976 visitAll(Expr
, MulCollector
);
10979 static bool findArrayDimensionsRec(ScalarEvolution
&SE
,
10980 SmallVectorImpl
<const SCEV
*> &Terms
,
10981 SmallVectorImpl
<const SCEV
*> &Sizes
) {
10982 int Last
= Terms
.size() - 1;
10983 const SCEV
*Step
= Terms
[Last
];
10985 // End of recursion.
10987 if (const SCEVMulExpr
*M
= dyn_cast
<SCEVMulExpr
>(Step
)) {
10988 SmallVector
<const SCEV
*, 2> Qs
;
10989 for (const SCEV
*Op
: M
->operands())
10990 if (!isa
<SCEVConstant
>(Op
))
10993 Step
= SE
.getMulExpr(Qs
);
10996 Sizes
.push_back(Step
);
11000 for (const SCEV
*&Term
: Terms
) {
11001 // Normalize the terms before the next call to findArrayDimensionsRec.
11003 SCEVDivision::divide(SE
, Term
, Step
, &Q
, &R
);
11005 // Bail out when GCD does not evenly divide one of the terms.
11012 // Remove all SCEVConstants.
11014 remove_if(Terms
, [](const SCEV
*E
) { return isa
<SCEVConstant
>(E
); }),
11017 if (Terms
.size() > 0)
11018 if (!findArrayDimensionsRec(SE
, Terms
, Sizes
))
11021 Sizes
.push_back(Step
);
11025 // Returns true when one of the SCEVs of Terms contains a SCEVUnknown parameter.
11026 static inline bool containsParameters(SmallVectorImpl
<const SCEV
*> &Terms
) {
11027 for (const SCEV
*T
: Terms
)
11028 if (SCEVExprContains(T
, isa
<SCEVUnknown
, const SCEV
*>))
11033 // Return the number of product terms in S.
11034 static inline int numberOfTerms(const SCEV
*S
) {
11035 if (const SCEVMulExpr
*Expr
= dyn_cast
<SCEVMulExpr
>(S
))
11036 return Expr
->getNumOperands();
11040 static const SCEV
*removeConstantFactors(ScalarEvolution
&SE
, const SCEV
*T
) {
11041 if (isa
<SCEVConstant
>(T
))
11044 if (isa
<SCEVUnknown
>(T
))
11047 if (const SCEVMulExpr
*M
= dyn_cast
<SCEVMulExpr
>(T
)) {
11048 SmallVector
<const SCEV
*, 2> Factors
;
11049 for (const SCEV
*Op
: M
->operands())
11050 if (!isa
<SCEVConstant
>(Op
))
11051 Factors
.push_back(Op
);
11053 return SE
.getMulExpr(Factors
);
11059 /// Return the size of an element read or written by Inst.
11060 const SCEV
*ScalarEvolution::getElementSize(Instruction
*Inst
) {
11062 if (StoreInst
*Store
= dyn_cast
<StoreInst
>(Inst
))
11063 Ty
= Store
->getValueOperand()->getType();
11064 else if (LoadInst
*Load
= dyn_cast
<LoadInst
>(Inst
))
11065 Ty
= Load
->getType();
11069 Type
*ETy
= getEffectiveSCEVType(PointerType::getUnqual(Ty
));
11070 return getSizeOfExpr(ETy
, Ty
);
11073 void ScalarEvolution::findArrayDimensions(SmallVectorImpl
<const SCEV
*> &Terms
,
11074 SmallVectorImpl
<const SCEV
*> &Sizes
,
11075 const SCEV
*ElementSize
) {
11076 if (Terms
.size() < 1 || !ElementSize
)
11079 // Early return when Terms do not contain parameters: we do not delinearize
11080 // non parametric SCEVs.
11081 if (!containsParameters(Terms
))
11085 dbgs() << "Terms:\n";
11086 for (const SCEV
*T
: Terms
)
11087 dbgs() << *T
<< "\n";
11090 // Remove duplicates.
11091 array_pod_sort(Terms
.begin(), Terms
.end());
11092 Terms
.erase(std::unique(Terms
.begin(), Terms
.end()), Terms
.end());
11094 // Put larger terms first.
11095 llvm::sort(Terms
, [](const SCEV
*LHS
, const SCEV
*RHS
) {
11096 return numberOfTerms(LHS
) > numberOfTerms(RHS
);
11099 // Try to divide all terms by the element size. If term is not divisible by
11100 // element size, proceed with the original term.
11101 for (const SCEV
*&Term
: Terms
) {
11103 SCEVDivision::divide(*this, Term
, ElementSize
, &Q
, &R
);
11108 SmallVector
<const SCEV
*, 4> NewTerms
;
11110 // Remove constant factors.
11111 for (const SCEV
*T
: Terms
)
11112 if (const SCEV
*NewT
= removeConstantFactors(*this, T
))
11113 NewTerms
.push_back(NewT
);
11116 dbgs() << "Terms after sorting:\n";
11117 for (const SCEV
*T
: NewTerms
)
11118 dbgs() << *T
<< "\n";
11121 if (NewTerms
.empty() || !findArrayDimensionsRec(*this, NewTerms
, Sizes
)) {
11126 // The last element to be pushed into Sizes is the size of an element.
11127 Sizes
.push_back(ElementSize
);
11130 dbgs() << "Sizes:\n";
11131 for (const SCEV
*S
: Sizes
)
11132 dbgs() << *S
<< "\n";
11136 void ScalarEvolution::computeAccessFunctions(
11137 const SCEV
*Expr
, SmallVectorImpl
<const SCEV
*> &Subscripts
,
11138 SmallVectorImpl
<const SCEV
*> &Sizes
) {
11139 // Early exit in case this SCEV is not an affine multivariate function.
11143 if (auto *AR
= dyn_cast
<SCEVAddRecExpr
>(Expr
))
11144 if (!AR
->isAffine())
11147 const SCEV
*Res
= Expr
;
11148 int Last
= Sizes
.size() - 1;
11149 for (int i
= Last
; i
>= 0; i
--) {
11151 SCEVDivision::divide(*this, Res
, Sizes
[i
], &Q
, &R
);
11154 dbgs() << "Res: " << *Res
<< "\n";
11155 dbgs() << "Sizes[i]: " << *Sizes
[i
] << "\n";
11156 dbgs() << "Res divided by Sizes[i]:\n";
11157 dbgs() << "Quotient: " << *Q
<< "\n";
11158 dbgs() << "Remainder: " << *R
<< "\n";
11163 // Do not record the last subscript corresponding to the size of elements in
11167 // Bail out if the remainder is too complex.
11168 if (isa
<SCEVAddRecExpr
>(R
)) {
11169 Subscripts
.clear();
11177 // Record the access function for the current subscript.
11178 Subscripts
.push_back(R
);
11181 // Also push in last position the remainder of the last division: it will be
11182 // the access function of the innermost dimension.
11183 Subscripts
.push_back(Res
);
11185 std::reverse(Subscripts
.begin(), Subscripts
.end());
11188 dbgs() << "Subscripts:\n";
11189 for (const SCEV
*S
: Subscripts
)
11190 dbgs() << *S
<< "\n";
11194 /// Splits the SCEV into two vectors of SCEVs representing the subscripts and
11195 /// sizes of an array access. Returns the remainder of the delinearization that
11196 /// is the offset start of the array. The SCEV->delinearize algorithm computes
11197 /// the multiples of SCEV coefficients: that is a pattern matching of sub
11198 /// expressions in the stride and base of a SCEV corresponding to the
11199 /// computation of a GCD (greatest common divisor) of base and stride. When
11200 /// SCEV->delinearize fails, it returns the SCEV unchanged.
11202 /// For example: when analyzing the memory access A[i][j][k] in this loop nest
11204 /// void foo(long n, long m, long o, double A[n][m][o]) {
11206 /// for (long i = 0; i < n; i++)
11207 /// for (long j = 0; j < m; j++)
11208 /// for (long k = 0; k < o; k++)
11209 /// A[i][j][k] = 1.0;
11212 /// the delinearization input is the following AddRec SCEV:
11214 /// AddRec: {{{%A,+,(8 * %m * %o)}<%for.i>,+,(8 * %o)}<%for.j>,+,8}<%for.k>
11216 /// From this SCEV, we are able to say that the base offset of the access is %A
11217 /// because it appears as an offset that does not divide any of the strides in
11220 /// CHECK: Base offset: %A
11222 /// and then SCEV->delinearize determines the size of some of the dimensions of
11223 /// the array as these are the multiples by which the strides are happening:
11225 /// CHECK: ArrayDecl[UnknownSize][%m][%o] with elements of sizeof(double) bytes.
11227 /// Note that the outermost dimension remains of UnknownSize because there are
11228 /// no strides that would help identifying the size of the last dimension: when
11229 /// the array has been statically allocated, one could compute the size of that
11230 /// dimension by dividing the overall size of the array by the size of the known
11231 /// dimensions: %m * %o * 8.
11233 /// Finally delinearize provides the access functions for the array reference
11234 /// that does correspond to A[i][j][k] of the above C testcase:
11236 /// CHECK: ArrayRef[{0,+,1}<%for.i>][{0,+,1}<%for.j>][{0,+,1}<%for.k>]
11238 /// The testcases are checking the output of a function pass:
11239 /// DelinearizationPass that walks through all loads and stores of a function
11240 /// asking for the SCEV of the memory access with respect to all enclosing
11241 /// loops, calling SCEV->delinearize on that and printing the results.
11242 void ScalarEvolution::delinearize(const SCEV
*Expr
,
11243 SmallVectorImpl
<const SCEV
*> &Subscripts
,
11244 SmallVectorImpl
<const SCEV
*> &Sizes
,
11245 const SCEV
*ElementSize
) {
11246 // First step: collect parametric terms.
11247 SmallVector
<const SCEV
*, 4> Terms
;
11248 collectParametricTerms(Expr
, Terms
);
11253 // Second step: find subscript sizes.
11254 findArrayDimensions(Terms
, Sizes
, ElementSize
);
11259 // Third step: compute the access functions for each subscript.
11260 computeAccessFunctions(Expr
, Subscripts
, Sizes
);
11262 if (Subscripts
.empty())
11266 dbgs() << "succeeded to delinearize " << *Expr
<< "\n";
11267 dbgs() << "ArrayDecl[UnknownSize]";
11268 for (const SCEV
*S
: Sizes
)
11269 dbgs() << "[" << *S
<< "]";
11271 dbgs() << "\nArrayRef";
11272 for (const SCEV
*S
: Subscripts
)
11273 dbgs() << "[" << *S
<< "]";
11278 //===----------------------------------------------------------------------===//
11279 // SCEVCallbackVH Class Implementation
11280 //===----------------------------------------------------------------------===//
11282 void ScalarEvolution::SCEVCallbackVH::deleted() {
11283 assert(SE
&& "SCEVCallbackVH called with a null ScalarEvolution!");
11284 if (PHINode
*PN
= dyn_cast
<PHINode
>(getValPtr()))
11285 SE
->ConstantEvolutionLoopExitValue
.erase(PN
);
11286 SE
->eraseValueFromMap(getValPtr());
11287 // this now dangles!
11290 void ScalarEvolution::SCEVCallbackVH::allUsesReplacedWith(Value
*V
) {
11291 assert(SE
&& "SCEVCallbackVH called with a null ScalarEvolution!");
11293 // Forget all the expressions associated with users of the old value,
11294 // so that future queries will recompute the expressions using the new
11296 Value
*Old
= getValPtr();
11297 SmallVector
<User
*, 16> Worklist(Old
->user_begin(), Old
->user_end());
11298 SmallPtrSet
<User
*, 8> Visited
;
11299 while (!Worklist
.empty()) {
11300 User
*U
= Worklist
.pop_back_val();
11301 // Deleting the Old value will cause this to dangle. Postpone
11302 // that until everything else is done.
11305 if (!Visited
.insert(U
).second
)
11307 if (PHINode
*PN
= dyn_cast
<PHINode
>(U
))
11308 SE
->ConstantEvolutionLoopExitValue
.erase(PN
);
11309 SE
->eraseValueFromMap(U
);
11310 Worklist
.insert(Worklist
.end(), U
->user_begin(), U
->user_end());
11312 // Delete the Old value.
11313 if (PHINode
*PN
= dyn_cast
<PHINode
>(Old
))
11314 SE
->ConstantEvolutionLoopExitValue
.erase(PN
);
11315 SE
->eraseValueFromMap(Old
);
11316 // this now dangles!
11319 ScalarEvolution::SCEVCallbackVH::SCEVCallbackVH(Value
*V
, ScalarEvolution
*se
)
11320 : CallbackVH(V
), SE(se
) {}
11322 //===----------------------------------------------------------------------===//
11323 // ScalarEvolution Class Implementation
11324 //===----------------------------------------------------------------------===//
11326 ScalarEvolution::ScalarEvolution(Function
&F
, TargetLibraryInfo
&TLI
,
11327 AssumptionCache
&AC
, DominatorTree
&DT
,
11329 : F(F
), TLI(TLI
), AC(AC
), DT(DT
), LI(LI
),
11330 CouldNotCompute(new SCEVCouldNotCompute()), ValuesAtScopes(64),
11331 LoopDispositions(64), BlockDispositions(64) {
11332 // To use guards for proving predicates, we need to scan every instruction in
11333 // relevant basic blocks, and not just terminators. Doing this is a waste of
11334 // time if the IR does not actually contain any calls to
11335 // @llvm.experimental.guard, so do a quick check and remember this beforehand.
11337 // This pessimizes the case where a pass that preserves ScalarEvolution wants
11338 // to _add_ guards to the module when there weren't any before, and wants
11339 // ScalarEvolution to optimize based on those guards. For now we prefer to be
11340 // efficient in lieu of being smart in that rather obscure case.
11342 auto *GuardDecl
= F
.getParent()->getFunction(
11343 Intrinsic::getName(Intrinsic::experimental_guard
));
11344 HasGuards
= GuardDecl
&& !GuardDecl
->use_empty();
11347 ScalarEvolution::ScalarEvolution(ScalarEvolution
&&Arg
)
11348 : F(Arg
.F
), HasGuards(Arg
.HasGuards
), TLI(Arg
.TLI
), AC(Arg
.AC
), DT(Arg
.DT
),
11349 LI(Arg
.LI
), CouldNotCompute(std::move(Arg
.CouldNotCompute
)),
11350 ValueExprMap(std::move(Arg
.ValueExprMap
)),
11351 PendingLoopPredicates(std::move(Arg
.PendingLoopPredicates
)),
11352 PendingPhiRanges(std::move(Arg
.PendingPhiRanges
)),
11353 PendingMerges(std::move(Arg
.PendingMerges
)),
11354 MinTrailingZerosCache(std::move(Arg
.MinTrailingZerosCache
)),
11355 BackedgeTakenCounts(std::move(Arg
.BackedgeTakenCounts
)),
11356 PredicatedBackedgeTakenCounts(
11357 std::move(Arg
.PredicatedBackedgeTakenCounts
)),
11358 ConstantEvolutionLoopExitValue(
11359 std::move(Arg
.ConstantEvolutionLoopExitValue
)),
11360 ValuesAtScopes(std::move(Arg
.ValuesAtScopes
)),
11361 LoopDispositions(std::move(Arg
.LoopDispositions
)),
11362 LoopPropertiesCache(std::move(Arg
.LoopPropertiesCache
)),
11363 BlockDispositions(std::move(Arg
.BlockDispositions
)),
11364 UnsignedRanges(std::move(Arg
.UnsignedRanges
)),
11365 SignedRanges(std::move(Arg
.SignedRanges
)),
11366 UniqueSCEVs(std::move(Arg
.UniqueSCEVs
)),
11367 UniquePreds(std::move(Arg
.UniquePreds
)),
11368 SCEVAllocator(std::move(Arg
.SCEVAllocator
)),
11369 LoopUsers(std::move(Arg
.LoopUsers
)),
11370 PredicatedSCEVRewrites(std::move(Arg
.PredicatedSCEVRewrites
)),
11371 FirstUnknown(Arg
.FirstUnknown
) {
11372 Arg
.FirstUnknown
= nullptr;
11375 ScalarEvolution::~ScalarEvolution() {
11376 // Iterate through all the SCEVUnknown instances and call their
11377 // destructors, so that they release their references to their values.
11378 for (SCEVUnknown
*U
= FirstUnknown
; U
;) {
11379 SCEVUnknown
*Tmp
= U
;
11381 Tmp
->~SCEVUnknown();
11383 FirstUnknown
= nullptr;
11385 ExprValueMap
.clear();
11386 ValueExprMap
.clear();
11389 // Free any extra memory created for ExitNotTakenInfo in the unlikely event
11390 // that a loop had multiple computable exits.
11391 for (auto &BTCI
: BackedgeTakenCounts
)
11392 BTCI
.second
.clear();
11393 for (auto &BTCI
: PredicatedBackedgeTakenCounts
)
11394 BTCI
.second
.clear();
11396 assert(PendingLoopPredicates
.empty() && "isImpliedCond garbage");
11397 assert(PendingPhiRanges
.empty() && "getRangeRef garbage");
11398 assert(PendingMerges
.empty() && "isImpliedViaMerge garbage");
11399 assert(!WalkingBEDominatingConds
&& "isLoopBackedgeGuardedByCond garbage!");
11400 assert(!ProvingSplitPredicate
&& "ProvingSplitPredicate garbage!");
11403 bool ScalarEvolution::hasLoopInvariantBackedgeTakenCount(const Loop
*L
) {
11404 return !isa
<SCEVCouldNotCompute
>(getBackedgeTakenCount(L
));
11407 static void PrintLoopInfo(raw_ostream
&OS
, ScalarEvolution
*SE
,
11409 // Print all inner loops first
11411 PrintLoopInfo(OS
, SE
, I
);
11414 L
->getHeader()->printAsOperand(OS
, /*PrintType=*/false);
11417 SmallVector
<BasicBlock
*, 8> ExitingBlocks
;
11418 L
->getExitingBlocks(ExitingBlocks
);
11419 if (ExitingBlocks
.size() != 1)
11420 OS
<< "<multiple exits> ";
11422 if (SE
->hasLoopInvariantBackedgeTakenCount(L
))
11423 OS
<< "backedge-taken count is " << *SE
->getBackedgeTakenCount(L
) << "\n";
11425 OS
<< "Unpredictable backedge-taken count.\n";
11427 if (ExitingBlocks
.size() > 1)
11428 for (BasicBlock
*ExitingBlock
: ExitingBlocks
) {
11429 OS
<< " exit count for " << ExitingBlock
->getName() << ": "
11430 << *SE
->getExitCount(L
, ExitingBlock
) << "\n";
11434 L
->getHeader()->printAsOperand(OS
, /*PrintType=*/false);
11437 if (!isa
<SCEVCouldNotCompute
>(SE
->getMaxBackedgeTakenCount(L
))) {
11438 OS
<< "max backedge-taken count is " << *SE
->getMaxBackedgeTakenCount(L
);
11439 if (SE
->isBackedgeTakenCountMaxOrZero(L
))
11440 OS
<< ", actual taken count either this or zero.";
11442 OS
<< "Unpredictable max backedge-taken count. ";
11447 L
->getHeader()->printAsOperand(OS
, /*PrintType=*/false);
11450 SCEVUnionPredicate Pred
;
11451 auto PBT
= SE
->getPredicatedBackedgeTakenCount(L
, Pred
);
11452 if (!isa
<SCEVCouldNotCompute
>(PBT
)) {
11453 OS
<< "Predicated backedge-taken count is " << *PBT
<< "\n";
11454 OS
<< " Predicates:\n";
11457 OS
<< "Unpredictable predicated backedge-taken count. ";
11461 if (SE
->hasLoopInvariantBackedgeTakenCount(L
)) {
11463 L
->getHeader()->printAsOperand(OS
, /*PrintType=*/false);
11465 OS
<< "Trip multiple is " << SE
->getSmallConstantTripMultiple(L
) << "\n";
11469 static StringRef
loopDispositionToStr(ScalarEvolution::LoopDisposition LD
) {
11471 case ScalarEvolution::LoopVariant
:
11473 case ScalarEvolution::LoopInvariant
:
11474 return "Invariant";
11475 case ScalarEvolution::LoopComputable
:
11476 return "Computable";
11478 llvm_unreachable("Unknown ScalarEvolution::LoopDisposition kind!");
11481 void ScalarEvolution::print(raw_ostream
&OS
) const {
11482 // ScalarEvolution's implementation of the print method is to print
11483 // out SCEV values of all instructions that are interesting. Doing
11484 // this potentially causes it to create new SCEV objects though,
11485 // which technically conflicts with the const qualifier. This isn't
11486 // observable from outside the class though, so casting away the
11487 // const isn't dangerous.
11488 ScalarEvolution
&SE
= *const_cast<ScalarEvolution
*>(this);
11490 OS
<< "Classifying expressions for: ";
11491 F
.printAsOperand(OS
, /*PrintType=*/false);
11493 for (Instruction
&I
: instructions(F
))
11494 if (isSCEVable(I
.getType()) && !isa
<CmpInst
>(I
)) {
11497 const SCEV
*SV
= SE
.getSCEV(&I
);
11499 if (!isa
<SCEVCouldNotCompute
>(SV
)) {
11501 SE
.getUnsignedRange(SV
).print(OS
);
11503 SE
.getSignedRange(SV
).print(OS
);
11506 const Loop
*L
= LI
.getLoopFor(I
.getParent());
11508 const SCEV
*AtUse
= SE
.getSCEVAtScope(SV
, L
);
11512 if (!isa
<SCEVCouldNotCompute
>(AtUse
)) {
11514 SE
.getUnsignedRange(AtUse
).print(OS
);
11516 SE
.getSignedRange(AtUse
).print(OS
);
11521 OS
<< "\t\t" "Exits: ";
11522 const SCEV
*ExitValue
= SE
.getSCEVAtScope(SV
, L
->getParentLoop());
11523 if (!SE
.isLoopInvariant(ExitValue
, L
)) {
11524 OS
<< "<<Unknown>>";
11530 for (auto *Iter
= L
; Iter
; Iter
= Iter
->getParentLoop()) {
11532 OS
<< "\t\t" "LoopDispositions: { ";
11538 Iter
->getHeader()->printAsOperand(OS
, /*PrintType=*/false);
11539 OS
<< ": " << loopDispositionToStr(SE
.getLoopDisposition(SV
, Iter
));
11542 for (auto *InnerL
: depth_first(L
)) {
11546 OS
<< "\t\t" "LoopDispositions: { ";
11552 InnerL
->getHeader()->printAsOperand(OS
, /*PrintType=*/false);
11553 OS
<< ": " << loopDispositionToStr(SE
.getLoopDisposition(SV
, InnerL
));
11562 OS
<< "Determining loop execution counts for: ";
11563 F
.printAsOperand(OS
, /*PrintType=*/false);
11566 PrintLoopInfo(OS
, &SE
, I
);
11569 ScalarEvolution::LoopDisposition
11570 ScalarEvolution::getLoopDisposition(const SCEV
*S
, const Loop
*L
) {
11571 auto &Values
= LoopDispositions
[S
];
11572 for (auto &V
: Values
) {
11573 if (V
.getPointer() == L
)
11576 Values
.emplace_back(L
, LoopVariant
);
11577 LoopDisposition D
= computeLoopDisposition(S
, L
);
11578 auto &Values2
= LoopDispositions
[S
];
11579 for (auto &V
: make_range(Values2
.rbegin(), Values2
.rend())) {
11580 if (V
.getPointer() == L
) {
11588 ScalarEvolution::LoopDisposition
11589 ScalarEvolution::computeLoopDisposition(const SCEV
*S
, const Loop
*L
) {
11590 switch (static_cast<SCEVTypes
>(S
->getSCEVType())) {
11592 return LoopInvariant
;
11596 return getLoopDisposition(cast
<SCEVCastExpr
>(S
)->getOperand(), L
);
11597 case scAddRecExpr
: {
11598 const SCEVAddRecExpr
*AR
= cast
<SCEVAddRecExpr
>(S
);
11600 // If L is the addrec's loop, it's computable.
11601 if (AR
->getLoop() == L
)
11602 return LoopComputable
;
11604 // Add recurrences are never invariant in the function-body (null loop).
11606 return LoopVariant
;
11608 // Everything that is not defined at loop entry is variant.
11609 if (DT
.dominates(L
->getHeader(), AR
->getLoop()->getHeader()))
11610 return LoopVariant
;
11611 assert(!L
->contains(AR
->getLoop()) && "Containing loop's header does not"
11612 " dominate the contained loop's header?");
11614 // This recurrence is invariant w.r.t. L if AR's loop contains L.
11615 if (AR
->getLoop()->contains(L
))
11616 return LoopInvariant
;
11618 // This recurrence is variant w.r.t. L if any of its operands
11620 for (auto *Op
: AR
->operands())
11621 if (!isLoopInvariant(Op
, L
))
11622 return LoopVariant
;
11624 // Otherwise it's loop-invariant.
11625 return LoopInvariant
;
11633 bool HasVarying
= false;
11634 for (auto *Op
: cast
<SCEVNAryExpr
>(S
)->operands()) {
11635 LoopDisposition D
= getLoopDisposition(Op
, L
);
11636 if (D
== LoopVariant
)
11637 return LoopVariant
;
11638 if (D
== LoopComputable
)
11641 return HasVarying
? LoopComputable
: LoopInvariant
;
11644 const SCEVUDivExpr
*UDiv
= cast
<SCEVUDivExpr
>(S
);
11645 LoopDisposition LD
= getLoopDisposition(UDiv
->getLHS(), L
);
11646 if (LD
== LoopVariant
)
11647 return LoopVariant
;
11648 LoopDisposition RD
= getLoopDisposition(UDiv
->getRHS(), L
);
11649 if (RD
== LoopVariant
)
11650 return LoopVariant
;
11651 return (LD
== LoopInvariant
&& RD
== LoopInvariant
) ?
11652 LoopInvariant
: LoopComputable
;
11655 // All non-instruction values are loop invariant. All instructions are loop
11656 // invariant if they are not contained in the specified loop.
11657 // Instructions are never considered invariant in the function body
11658 // (null loop) because they are defined within the "loop".
11659 if (auto *I
= dyn_cast
<Instruction
>(cast
<SCEVUnknown
>(S
)->getValue()))
11660 return (L
&& !L
->contains(I
)) ? LoopInvariant
: LoopVariant
;
11661 return LoopInvariant
;
11662 case scCouldNotCompute
:
11663 llvm_unreachable("Attempt to use a SCEVCouldNotCompute object!");
11665 llvm_unreachable("Unknown SCEV kind!");
11668 bool ScalarEvolution::isLoopInvariant(const SCEV
*S
, const Loop
*L
) {
11669 return getLoopDisposition(S
, L
) == LoopInvariant
;
11672 bool ScalarEvolution::hasComputableLoopEvolution(const SCEV
*S
, const Loop
*L
) {
11673 return getLoopDisposition(S
, L
) == LoopComputable
;
11676 ScalarEvolution::BlockDisposition
11677 ScalarEvolution::getBlockDisposition(const SCEV
*S
, const BasicBlock
*BB
) {
11678 auto &Values
= BlockDispositions
[S
];
11679 for (auto &V
: Values
) {
11680 if (V
.getPointer() == BB
)
11683 Values
.emplace_back(BB
, DoesNotDominateBlock
);
11684 BlockDisposition D
= computeBlockDisposition(S
, BB
);
11685 auto &Values2
= BlockDispositions
[S
];
11686 for (auto &V
: make_range(Values2
.rbegin(), Values2
.rend())) {
11687 if (V
.getPointer() == BB
) {
11695 ScalarEvolution::BlockDisposition
11696 ScalarEvolution::computeBlockDisposition(const SCEV
*S
, const BasicBlock
*BB
) {
11697 switch (static_cast<SCEVTypes
>(S
->getSCEVType())) {
11699 return ProperlyDominatesBlock
;
11703 return getBlockDisposition(cast
<SCEVCastExpr
>(S
)->getOperand(), BB
);
11704 case scAddRecExpr
: {
11705 // This uses a "dominates" query instead of "properly dominates" query
11706 // to test for proper dominance too, because the instruction which
11707 // produces the addrec's value is a PHI, and a PHI effectively properly
11708 // dominates its entire containing block.
11709 const SCEVAddRecExpr
*AR
= cast
<SCEVAddRecExpr
>(S
);
11710 if (!DT
.dominates(AR
->getLoop()->getHeader(), BB
))
11711 return DoesNotDominateBlock
;
11713 // Fall through into SCEVNAryExpr handling.
11722 const SCEVNAryExpr
*NAry
= cast
<SCEVNAryExpr
>(S
);
11723 bool Proper
= true;
11724 for (const SCEV
*NAryOp
: NAry
->operands()) {
11725 BlockDisposition D
= getBlockDisposition(NAryOp
, BB
);
11726 if (D
== DoesNotDominateBlock
)
11727 return DoesNotDominateBlock
;
11728 if (D
== DominatesBlock
)
11731 return Proper
? ProperlyDominatesBlock
: DominatesBlock
;
11734 const SCEVUDivExpr
*UDiv
= cast
<SCEVUDivExpr
>(S
);
11735 const SCEV
*LHS
= UDiv
->getLHS(), *RHS
= UDiv
->getRHS();
11736 BlockDisposition LD
= getBlockDisposition(LHS
, BB
);
11737 if (LD
== DoesNotDominateBlock
)
11738 return DoesNotDominateBlock
;
11739 BlockDisposition RD
= getBlockDisposition(RHS
, BB
);
11740 if (RD
== DoesNotDominateBlock
)
11741 return DoesNotDominateBlock
;
11742 return (LD
== ProperlyDominatesBlock
&& RD
== ProperlyDominatesBlock
) ?
11743 ProperlyDominatesBlock
: DominatesBlock
;
11746 if (Instruction
*I
=
11747 dyn_cast
<Instruction
>(cast
<SCEVUnknown
>(S
)->getValue())) {
11748 if (I
->getParent() == BB
)
11749 return DominatesBlock
;
11750 if (DT
.properlyDominates(I
->getParent(), BB
))
11751 return ProperlyDominatesBlock
;
11752 return DoesNotDominateBlock
;
11754 return ProperlyDominatesBlock
;
11755 case scCouldNotCompute
:
11756 llvm_unreachable("Attempt to use a SCEVCouldNotCompute object!");
11758 llvm_unreachable("Unknown SCEV kind!");
11761 bool ScalarEvolution::dominates(const SCEV
*S
, const BasicBlock
*BB
) {
11762 return getBlockDisposition(S
, BB
) >= DominatesBlock
;
11765 bool ScalarEvolution::properlyDominates(const SCEV
*S
, const BasicBlock
*BB
) {
11766 return getBlockDisposition(S
, BB
) == ProperlyDominatesBlock
;
11769 bool ScalarEvolution::hasOperand(const SCEV
*S
, const SCEV
*Op
) const {
11770 return SCEVExprContains(S
, [&](const SCEV
*Expr
) { return Expr
== Op
; });
11773 bool ScalarEvolution::ExitLimit::hasOperand(const SCEV
*S
) const {
11774 auto IsS
= [&](const SCEV
*X
) { return S
== X
; };
11775 auto ContainsS
= [&](const SCEV
*X
) {
11776 return !isa
<SCEVCouldNotCompute
>(X
) && SCEVExprContains(X
, IsS
);
11778 return ContainsS(ExactNotTaken
) || ContainsS(MaxNotTaken
);
11782 ScalarEvolution::forgetMemoizedResults(const SCEV
*S
) {
11783 ValuesAtScopes
.erase(S
);
11784 LoopDispositions
.erase(S
);
11785 BlockDispositions
.erase(S
);
11786 UnsignedRanges
.erase(S
);
11787 SignedRanges
.erase(S
);
11788 ExprValueMap
.erase(S
);
11789 HasRecMap
.erase(S
);
11790 MinTrailingZerosCache
.erase(S
);
11792 for (auto I
= PredicatedSCEVRewrites
.begin();
11793 I
!= PredicatedSCEVRewrites
.end();) {
11794 std::pair
<const SCEV
*, const Loop
*> Entry
= I
->first
;
11795 if (Entry
.first
== S
)
11796 PredicatedSCEVRewrites
.erase(I
++);
11801 auto RemoveSCEVFromBackedgeMap
=
11802 [S
, this](DenseMap
<const Loop
*, BackedgeTakenInfo
> &Map
) {
11803 for (auto I
= Map
.begin(), E
= Map
.end(); I
!= E
;) {
11804 BackedgeTakenInfo
&BEInfo
= I
->second
;
11805 if (BEInfo
.hasOperand(S
, this)) {
11813 RemoveSCEVFromBackedgeMap(BackedgeTakenCounts
);
11814 RemoveSCEVFromBackedgeMap(PredicatedBackedgeTakenCounts
);
11818 ScalarEvolution::getUsedLoops(const SCEV
*S
,
11819 SmallPtrSetImpl
<const Loop
*> &LoopsUsed
) {
11820 struct FindUsedLoops
{
11821 FindUsedLoops(SmallPtrSetImpl
<const Loop
*> &LoopsUsed
)
11822 : LoopsUsed(LoopsUsed
) {}
11823 SmallPtrSetImpl
<const Loop
*> &LoopsUsed
;
11824 bool follow(const SCEV
*S
) {
11825 if (auto *AR
= dyn_cast
<SCEVAddRecExpr
>(S
))
11826 LoopsUsed
.insert(AR
->getLoop());
11830 bool isDone() const { return false; }
11833 FindUsedLoops
F(LoopsUsed
);
11834 SCEVTraversal
<FindUsedLoops
>(F
).visitAll(S
);
11837 void ScalarEvolution::addToLoopUseLists(const SCEV
*S
) {
11838 SmallPtrSet
<const Loop
*, 8> LoopsUsed
;
11839 getUsedLoops(S
, LoopsUsed
);
11840 for (auto *L
: LoopsUsed
)
11841 LoopUsers
[L
].push_back(S
);
11844 void ScalarEvolution::verify() const {
11845 ScalarEvolution
&SE
= *const_cast<ScalarEvolution
*>(this);
11846 ScalarEvolution
SE2(F
, TLI
, AC
, DT
, LI
);
11848 SmallVector
<Loop
*, 8> LoopStack(LI
.begin(), LI
.end());
11850 // Map's SCEV expressions from one ScalarEvolution "universe" to another.
11851 struct SCEVMapper
: public SCEVRewriteVisitor
<SCEVMapper
> {
11852 SCEVMapper(ScalarEvolution
&SE
) : SCEVRewriteVisitor
<SCEVMapper
>(SE
) {}
11854 const SCEV
*visitConstant(const SCEVConstant
*Constant
) {
11855 return SE
.getConstant(Constant
->getAPInt());
11858 const SCEV
*visitUnknown(const SCEVUnknown
*Expr
) {
11859 return SE
.getUnknown(Expr
->getValue());
11862 const SCEV
*visitCouldNotCompute(const SCEVCouldNotCompute
*Expr
) {
11863 return SE
.getCouldNotCompute();
11867 SCEVMapper
SCM(SE2
);
11869 while (!LoopStack
.empty()) {
11870 auto *L
= LoopStack
.pop_back_val();
11871 LoopStack
.insert(LoopStack
.end(), L
->begin(), L
->end());
11873 auto *CurBECount
= SCM
.visit(
11874 const_cast<ScalarEvolution
*>(this)->getBackedgeTakenCount(L
));
11875 auto *NewBECount
= SE2
.getBackedgeTakenCount(L
);
11877 if (CurBECount
== SE2
.getCouldNotCompute() ||
11878 NewBECount
== SE2
.getCouldNotCompute()) {
11879 // NB! This situation is legal, but is very suspicious -- whatever pass
11880 // change the loop to make a trip count go from could not compute to
11881 // computable or vice-versa *should have* invalidated SCEV. However, we
11882 // choose not to assert here (for now) since we don't want false
11887 if (containsUndefs(CurBECount
) || containsUndefs(NewBECount
)) {
11888 // SCEV treats "undef" as an unknown but consistent value (i.e. it does
11889 // not propagate undef aggressively). This means we can (and do) fail
11890 // verification in cases where a transform makes the trip count of a loop
11891 // go from "undef" to "undef+1" (say). The transform is fine, since in
11892 // both cases the loop iterates "undef" times, but SCEV thinks we
11893 // increased the trip count of the loop by 1 incorrectly.
11897 if (SE
.getTypeSizeInBits(CurBECount
->getType()) >
11898 SE
.getTypeSizeInBits(NewBECount
->getType()))
11899 NewBECount
= SE2
.getZeroExtendExpr(NewBECount
, CurBECount
->getType());
11900 else if (SE
.getTypeSizeInBits(CurBECount
->getType()) <
11901 SE
.getTypeSizeInBits(NewBECount
->getType()))
11902 CurBECount
= SE2
.getZeroExtendExpr(CurBECount
, NewBECount
->getType());
11904 auto *ConstantDelta
=
11905 dyn_cast
<SCEVConstant
>(SE2
.getMinusSCEV(CurBECount
, NewBECount
));
11907 if (ConstantDelta
&& ConstantDelta
->getAPInt() != 0) {
11908 dbgs() << "Trip Count Changed!\n";
11909 dbgs() << "Old: " << *CurBECount
<< "\n";
11910 dbgs() << "New: " << *NewBECount
<< "\n";
11911 dbgs() << "Delta: " << *ConstantDelta
<< "\n";
11917 bool ScalarEvolution::invalidate(
11918 Function
&F
, const PreservedAnalyses
&PA
,
11919 FunctionAnalysisManager::Invalidator
&Inv
) {
11920 // Invalidate the ScalarEvolution object whenever it isn't preserved or one
11921 // of its dependencies is invalidated.
11922 auto PAC
= PA
.getChecker
<ScalarEvolutionAnalysis
>();
11923 return !(PAC
.preserved() || PAC
.preservedSet
<AllAnalysesOn
<Function
>>()) ||
11924 Inv
.invalidate
<AssumptionAnalysis
>(F
, PA
) ||
11925 Inv
.invalidate
<DominatorTreeAnalysis
>(F
, PA
) ||
11926 Inv
.invalidate
<LoopAnalysis
>(F
, PA
);
11929 AnalysisKey
ScalarEvolutionAnalysis::Key
;
11931 ScalarEvolution
ScalarEvolutionAnalysis::run(Function
&F
,
11932 FunctionAnalysisManager
&AM
) {
11933 return ScalarEvolution(F
, AM
.getResult
<TargetLibraryAnalysis
>(F
),
11934 AM
.getResult
<AssumptionAnalysis
>(F
),
11935 AM
.getResult
<DominatorTreeAnalysis
>(F
),
11936 AM
.getResult
<LoopAnalysis
>(F
));
11940 ScalarEvolutionPrinterPass::run(Function
&F
, FunctionAnalysisManager
&AM
) {
11941 AM
.getResult
<ScalarEvolutionAnalysis
>(F
).print(OS
);
11942 return PreservedAnalyses::all();
11945 INITIALIZE_PASS_BEGIN(ScalarEvolutionWrapperPass
, "scalar-evolution",
11946 "Scalar Evolution Analysis", false, true)
11947 INITIALIZE_PASS_DEPENDENCY(AssumptionCacheTracker
)
11948 INITIALIZE_PASS_DEPENDENCY(LoopInfoWrapperPass
)
11949 INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass
)
11950 INITIALIZE_PASS_DEPENDENCY(TargetLibraryInfoWrapperPass
)
11951 INITIALIZE_PASS_END(ScalarEvolutionWrapperPass
, "scalar-evolution",
11952 "Scalar Evolution Analysis", false, true)
11954 char ScalarEvolutionWrapperPass::ID
= 0;
11956 ScalarEvolutionWrapperPass::ScalarEvolutionWrapperPass() : FunctionPass(ID
) {
11957 initializeScalarEvolutionWrapperPassPass(*PassRegistry::getPassRegistry());
11960 bool ScalarEvolutionWrapperPass::runOnFunction(Function
&F
) {
11961 SE
.reset(new ScalarEvolution(
11962 F
, getAnalysis
<TargetLibraryInfoWrapperPass
>().getTLI(),
11963 getAnalysis
<AssumptionCacheTracker
>().getAssumptionCache(F
),
11964 getAnalysis
<DominatorTreeWrapperPass
>().getDomTree(),
11965 getAnalysis
<LoopInfoWrapperPass
>().getLoopInfo()));
11969 void ScalarEvolutionWrapperPass::releaseMemory() { SE
.reset(); }
11971 void ScalarEvolutionWrapperPass::print(raw_ostream
&OS
, const Module
*) const {
11975 void ScalarEvolutionWrapperPass::verifyAnalysis() const {
11982 void ScalarEvolutionWrapperPass::getAnalysisUsage(AnalysisUsage
&AU
) const {
11983 AU
.setPreservesAll();
11984 AU
.addRequiredTransitive
<AssumptionCacheTracker
>();
11985 AU
.addRequiredTransitive
<LoopInfoWrapperPass
>();
11986 AU
.addRequiredTransitive
<DominatorTreeWrapperPass
>();
11987 AU
.addRequiredTransitive
<TargetLibraryInfoWrapperPass
>();
11990 const SCEVPredicate
*ScalarEvolution::getEqualPredicate(const SCEV
*LHS
,
11992 FoldingSetNodeID ID
;
11993 assert(LHS
->getType() == RHS
->getType() &&
11994 "Type mismatch between LHS and RHS");
11995 // Unique this node based on the arguments
11996 ID
.AddInteger(SCEVPredicate::P_Equal
);
11997 ID
.AddPointer(LHS
);
11998 ID
.AddPointer(RHS
);
11999 void *IP
= nullptr;
12000 if (const auto *S
= UniquePreds
.FindNodeOrInsertPos(ID
, IP
))
12002 SCEVEqualPredicate
*Eq
= new (SCEVAllocator
)
12003 SCEVEqualPredicate(ID
.Intern(SCEVAllocator
), LHS
, RHS
);
12004 UniquePreds
.InsertNode(Eq
, IP
);
12008 const SCEVPredicate
*ScalarEvolution::getWrapPredicate(
12009 const SCEVAddRecExpr
*AR
,
12010 SCEVWrapPredicate::IncrementWrapFlags AddedFlags
) {
12011 FoldingSetNodeID ID
;
12012 // Unique this node based on the arguments
12013 ID
.AddInteger(SCEVPredicate::P_Wrap
);
12015 ID
.AddInteger(AddedFlags
);
12016 void *IP
= nullptr;
12017 if (const auto *S
= UniquePreds
.FindNodeOrInsertPos(ID
, IP
))
12019 auto *OF
= new (SCEVAllocator
)
12020 SCEVWrapPredicate(ID
.Intern(SCEVAllocator
), AR
, AddedFlags
);
12021 UniquePreds
.InsertNode(OF
, IP
);
12027 class SCEVPredicateRewriter
: public SCEVRewriteVisitor
<SCEVPredicateRewriter
> {
12030 /// Rewrites \p S in the context of a loop L and the SCEV predication
12031 /// infrastructure.
12033 /// If \p Pred is non-null, the SCEV expression is rewritten to respect the
12034 /// equivalences present in \p Pred.
12036 /// If \p NewPreds is non-null, rewrite is free to add further predicates to
12037 /// \p NewPreds such that the result will be an AddRecExpr.
12038 static const SCEV
*rewrite(const SCEV
*S
, const Loop
*L
, ScalarEvolution
&SE
,
12039 SmallPtrSetImpl
<const SCEVPredicate
*> *NewPreds
,
12040 SCEVUnionPredicate
*Pred
) {
12041 SCEVPredicateRewriter
Rewriter(L
, SE
, NewPreds
, Pred
);
12042 return Rewriter
.visit(S
);
12045 const SCEV
*visitUnknown(const SCEVUnknown
*Expr
) {
12047 auto ExprPreds
= Pred
->getPredicatesForExpr(Expr
);
12048 for (auto *Pred
: ExprPreds
)
12049 if (const auto *IPred
= dyn_cast
<SCEVEqualPredicate
>(Pred
))
12050 if (IPred
->getLHS() == Expr
)
12051 return IPred
->getRHS();
12053 return convertToAddRecWithPreds(Expr
);
12056 const SCEV
*visitZeroExtendExpr(const SCEVZeroExtendExpr
*Expr
) {
12057 const SCEV
*Operand
= visit(Expr
->getOperand());
12058 const SCEVAddRecExpr
*AR
= dyn_cast
<SCEVAddRecExpr
>(Operand
);
12059 if (AR
&& AR
->getLoop() == L
&& AR
->isAffine()) {
12060 // This couldn't be folded because the operand didn't have the nuw
12061 // flag. Add the nusw flag as an assumption that we could make.
12062 const SCEV
*Step
= AR
->getStepRecurrence(SE
);
12063 Type
*Ty
= Expr
->getType();
12064 if (addOverflowAssumption(AR
, SCEVWrapPredicate::IncrementNUSW
))
12065 return SE
.getAddRecExpr(SE
.getZeroExtendExpr(AR
->getStart(), Ty
),
12066 SE
.getSignExtendExpr(Step
, Ty
), L
,
12067 AR
->getNoWrapFlags());
12069 return SE
.getZeroExtendExpr(Operand
, Expr
->getType());
12072 const SCEV
*visitSignExtendExpr(const SCEVSignExtendExpr
*Expr
) {
12073 const SCEV
*Operand
= visit(Expr
->getOperand());
12074 const SCEVAddRecExpr
*AR
= dyn_cast
<SCEVAddRecExpr
>(Operand
);
12075 if (AR
&& AR
->getLoop() == L
&& AR
->isAffine()) {
12076 // This couldn't be folded because the operand didn't have the nsw
12077 // flag. Add the nssw flag as an assumption that we could make.
12078 const SCEV
*Step
= AR
->getStepRecurrence(SE
);
12079 Type
*Ty
= Expr
->getType();
12080 if (addOverflowAssumption(AR
, SCEVWrapPredicate::IncrementNSSW
))
12081 return SE
.getAddRecExpr(SE
.getSignExtendExpr(AR
->getStart(), Ty
),
12082 SE
.getSignExtendExpr(Step
, Ty
), L
,
12083 AR
->getNoWrapFlags());
12085 return SE
.getSignExtendExpr(Operand
, Expr
->getType());
12089 explicit SCEVPredicateRewriter(const Loop
*L
, ScalarEvolution
&SE
,
12090 SmallPtrSetImpl
<const SCEVPredicate
*> *NewPreds
,
12091 SCEVUnionPredicate
*Pred
)
12092 : SCEVRewriteVisitor(SE
), NewPreds(NewPreds
), Pred(Pred
), L(L
) {}
12094 bool addOverflowAssumption(const SCEVPredicate
*P
) {
12096 // Check if we've already made this assumption.
12097 return Pred
&& Pred
->implies(P
);
12099 NewPreds
->insert(P
);
12103 bool addOverflowAssumption(const SCEVAddRecExpr
*AR
,
12104 SCEVWrapPredicate::IncrementWrapFlags AddedFlags
) {
12105 auto *A
= SE
.getWrapPredicate(AR
, AddedFlags
);
12106 return addOverflowAssumption(A
);
12109 // If \p Expr represents a PHINode, we try to see if it can be represented
12110 // as an AddRec, possibly under a predicate (PHISCEVPred). If it is possible
12111 // to add this predicate as a runtime overflow check, we return the AddRec.
12112 // If \p Expr does not meet these conditions (is not a PHI node, or we
12113 // couldn't create an AddRec for it, or couldn't add the predicate), we just
12115 const SCEV
*convertToAddRecWithPreds(const SCEVUnknown
*Expr
) {
12116 if (!isa
<PHINode
>(Expr
->getValue()))
12118 Optional
<std::pair
<const SCEV
*, SmallVector
<const SCEVPredicate
*, 3>>>
12119 PredicatedRewrite
= SE
.createAddRecFromPHIWithCasts(Expr
);
12120 if (!PredicatedRewrite
)
12122 for (auto *P
: PredicatedRewrite
->second
){
12123 // Wrap predicates from outer loops are not supported.
12124 if (auto *WP
= dyn_cast
<const SCEVWrapPredicate
>(P
)) {
12125 auto *AR
= cast
<const SCEVAddRecExpr
>(WP
->getExpr());
12126 if (L
!= AR
->getLoop())
12129 if (!addOverflowAssumption(P
))
12132 return PredicatedRewrite
->first
;
12135 SmallPtrSetImpl
<const SCEVPredicate
*> *NewPreds
;
12136 SCEVUnionPredicate
*Pred
;
12140 } // end anonymous namespace
12142 const SCEV
*ScalarEvolution::rewriteUsingPredicate(const SCEV
*S
, const Loop
*L
,
12143 SCEVUnionPredicate
&Preds
) {
12144 return SCEVPredicateRewriter::rewrite(S
, L
, *this, nullptr, &Preds
);
12147 const SCEVAddRecExpr
*ScalarEvolution::convertSCEVToAddRecWithPredicates(
12148 const SCEV
*S
, const Loop
*L
,
12149 SmallPtrSetImpl
<const SCEVPredicate
*> &Preds
) {
12150 SmallPtrSet
<const SCEVPredicate
*, 4> TransformPreds
;
12151 S
= SCEVPredicateRewriter::rewrite(S
, L
, *this, &TransformPreds
, nullptr);
12152 auto *AddRec
= dyn_cast
<SCEVAddRecExpr
>(S
);
12157 // Since the transformation was successful, we can now transfer the SCEV
12159 for (auto *P
: TransformPreds
)
12165 /// SCEV predicates
12166 SCEVPredicate::SCEVPredicate(const FoldingSetNodeIDRef ID
,
12167 SCEVPredicateKind Kind
)
12168 : FastID(ID
), Kind(Kind
) {}
12170 SCEVEqualPredicate::SCEVEqualPredicate(const FoldingSetNodeIDRef ID
,
12171 const SCEV
*LHS
, const SCEV
*RHS
)
12172 : SCEVPredicate(ID
, P_Equal
), LHS(LHS
), RHS(RHS
) {
12173 assert(LHS
->getType() == RHS
->getType() && "LHS and RHS types don't match");
12174 assert(LHS
!= RHS
&& "LHS and RHS are the same SCEV");
12177 bool SCEVEqualPredicate::implies(const SCEVPredicate
*N
) const {
12178 const auto *Op
= dyn_cast
<SCEVEqualPredicate
>(N
);
12183 return Op
->LHS
== LHS
&& Op
->RHS
== RHS
;
12186 bool SCEVEqualPredicate::isAlwaysTrue() const { return false; }
12188 const SCEV
*SCEVEqualPredicate::getExpr() const { return LHS
; }
12190 void SCEVEqualPredicate::print(raw_ostream
&OS
, unsigned Depth
) const {
12191 OS
.indent(Depth
) << "Equal predicate: " << *LHS
<< " == " << *RHS
<< "\n";
12194 SCEVWrapPredicate::SCEVWrapPredicate(const FoldingSetNodeIDRef ID
,
12195 const SCEVAddRecExpr
*AR
,
12196 IncrementWrapFlags Flags
)
12197 : SCEVPredicate(ID
, P_Wrap
), AR(AR
), Flags(Flags
) {}
12199 const SCEV
*SCEVWrapPredicate::getExpr() const { return AR
; }
12201 bool SCEVWrapPredicate::implies(const SCEVPredicate
*N
) const {
12202 const auto *Op
= dyn_cast
<SCEVWrapPredicate
>(N
);
12204 return Op
&& Op
->AR
== AR
&& setFlags(Flags
, Op
->Flags
) == Flags
;
12207 bool SCEVWrapPredicate::isAlwaysTrue() const {
12208 SCEV::NoWrapFlags ScevFlags
= AR
->getNoWrapFlags();
12209 IncrementWrapFlags IFlags
= Flags
;
12211 if (ScalarEvolution::setFlags(ScevFlags
, SCEV::FlagNSW
) == ScevFlags
)
12212 IFlags
= clearFlags(IFlags
, IncrementNSSW
);
12214 return IFlags
== IncrementAnyWrap
;
12217 void SCEVWrapPredicate::print(raw_ostream
&OS
, unsigned Depth
) const {
12218 OS
.indent(Depth
) << *getExpr() << " Added Flags: ";
12219 if (SCEVWrapPredicate::IncrementNUSW
& getFlags())
12221 if (SCEVWrapPredicate::IncrementNSSW
& getFlags())
12226 SCEVWrapPredicate::IncrementWrapFlags
12227 SCEVWrapPredicate::getImpliedFlags(const SCEVAddRecExpr
*AR
,
12228 ScalarEvolution
&SE
) {
12229 IncrementWrapFlags ImpliedFlags
= IncrementAnyWrap
;
12230 SCEV::NoWrapFlags StaticFlags
= AR
->getNoWrapFlags();
12232 // We can safely transfer the NSW flag as NSSW.
12233 if (ScalarEvolution::setFlags(StaticFlags
, SCEV::FlagNSW
) == StaticFlags
)
12234 ImpliedFlags
= IncrementNSSW
;
12236 if (ScalarEvolution::setFlags(StaticFlags
, SCEV::FlagNUW
) == StaticFlags
) {
12237 // If the increment is positive, the SCEV NUW flag will also imply the
12238 // WrapPredicate NUSW flag.
12239 if (const auto *Step
= dyn_cast
<SCEVConstant
>(AR
->getStepRecurrence(SE
)))
12240 if (Step
->getValue()->getValue().isNonNegative())
12241 ImpliedFlags
= setFlags(ImpliedFlags
, IncrementNUSW
);
12244 return ImpliedFlags
;
12247 /// Union predicates don't get cached so create a dummy set ID for it.
12248 SCEVUnionPredicate::SCEVUnionPredicate()
12249 : SCEVPredicate(FoldingSetNodeIDRef(nullptr, 0), P_Union
) {}
12251 bool SCEVUnionPredicate::isAlwaysTrue() const {
12252 return all_of(Preds
,
12253 [](const SCEVPredicate
*I
) { return I
->isAlwaysTrue(); });
12256 ArrayRef
<const SCEVPredicate
*>
12257 SCEVUnionPredicate::getPredicatesForExpr(const SCEV
*Expr
) {
12258 auto I
= SCEVToPreds
.find(Expr
);
12259 if (I
== SCEVToPreds
.end())
12260 return ArrayRef
<const SCEVPredicate
*>();
12264 bool SCEVUnionPredicate::implies(const SCEVPredicate
*N
) const {
12265 if (const auto *Set
= dyn_cast
<SCEVUnionPredicate
>(N
))
12266 return all_of(Set
->Preds
,
12267 [this](const SCEVPredicate
*I
) { return this->implies(I
); });
12269 auto ScevPredsIt
= SCEVToPreds
.find(N
->getExpr());
12270 if (ScevPredsIt
== SCEVToPreds
.end())
12272 auto &SCEVPreds
= ScevPredsIt
->second
;
12274 return any_of(SCEVPreds
,
12275 [N
](const SCEVPredicate
*I
) { return I
->implies(N
); });
12278 const SCEV
*SCEVUnionPredicate::getExpr() const { return nullptr; }
12280 void SCEVUnionPredicate::print(raw_ostream
&OS
, unsigned Depth
) const {
12281 for (auto Pred
: Preds
)
12282 Pred
->print(OS
, Depth
);
12285 void SCEVUnionPredicate::add(const SCEVPredicate
*N
) {
12286 if (const auto *Set
= dyn_cast
<SCEVUnionPredicate
>(N
)) {
12287 for (auto Pred
: Set
->Preds
)
12295 const SCEV
*Key
= N
->getExpr();
12296 assert(Key
&& "Only SCEVUnionPredicate doesn't have an "
12297 " associated expression!");
12299 SCEVToPreds
[Key
].push_back(N
);
12300 Preds
.push_back(N
);
12303 PredicatedScalarEvolution::PredicatedScalarEvolution(ScalarEvolution
&SE
,
12307 const SCEV
*PredicatedScalarEvolution::getSCEV(Value
*V
) {
12308 const SCEV
*Expr
= SE
.getSCEV(V
);
12309 RewriteEntry
&Entry
= RewriteMap
[Expr
];
12311 // If we already have an entry and the version matches, return it.
12312 if (Entry
.second
&& Generation
== Entry
.first
)
12313 return Entry
.second
;
12315 // We found an entry but it's stale. Rewrite the stale entry
12316 // according to the current predicate.
12318 Expr
= Entry
.second
;
12320 const SCEV
*NewSCEV
= SE
.rewriteUsingPredicate(Expr
, &L
, Preds
);
12321 Entry
= {Generation
, NewSCEV
};
12326 const SCEV
*PredicatedScalarEvolution::getBackedgeTakenCount() {
12327 if (!BackedgeCount
) {
12328 SCEVUnionPredicate BackedgePred
;
12329 BackedgeCount
= SE
.getPredicatedBackedgeTakenCount(&L
, BackedgePred
);
12330 addPredicate(BackedgePred
);
12332 return BackedgeCount
;
12335 void PredicatedScalarEvolution::addPredicate(const SCEVPredicate
&Pred
) {
12336 if (Preds
.implies(&Pred
))
12339 updateGeneration();
12342 const SCEVUnionPredicate
&PredicatedScalarEvolution::getUnionPredicate() const {
12346 void PredicatedScalarEvolution::updateGeneration() {
12347 // If the generation number wrapped recompute everything.
12348 if (++Generation
== 0) {
12349 for (auto &II
: RewriteMap
) {
12350 const SCEV
*Rewritten
= II
.second
.second
;
12351 II
.second
= {Generation
, SE
.rewriteUsingPredicate(Rewritten
, &L
, Preds
)};
12356 void PredicatedScalarEvolution::setNoOverflow(
12357 Value
*V
, SCEVWrapPredicate::IncrementWrapFlags Flags
) {
12358 const SCEV
*Expr
= getSCEV(V
);
12359 const auto *AR
= cast
<SCEVAddRecExpr
>(Expr
);
12361 auto ImpliedFlags
= SCEVWrapPredicate::getImpliedFlags(AR
, SE
);
12363 // Clear the statically implied flags.
12364 Flags
= SCEVWrapPredicate::clearFlags(Flags
, ImpliedFlags
);
12365 addPredicate(*SE
.getWrapPredicate(AR
, Flags
));
12367 auto II
= FlagsMap
.insert({V
, Flags
});
12369 II
.first
->second
= SCEVWrapPredicate::setFlags(Flags
, II
.first
->second
);
12372 bool PredicatedScalarEvolution::hasNoOverflow(
12373 Value
*V
, SCEVWrapPredicate::IncrementWrapFlags Flags
) {
12374 const SCEV
*Expr
= getSCEV(V
);
12375 const auto *AR
= cast
<SCEVAddRecExpr
>(Expr
);
12377 Flags
= SCEVWrapPredicate::clearFlags(
12378 Flags
, SCEVWrapPredicate::getImpliedFlags(AR
, SE
));
12380 auto II
= FlagsMap
.find(V
);
12382 if (II
!= FlagsMap
.end())
12383 Flags
= SCEVWrapPredicate::clearFlags(Flags
, II
->second
);
12385 return Flags
== SCEVWrapPredicate::IncrementAnyWrap
;
12388 const SCEVAddRecExpr
*PredicatedScalarEvolution::getAsAddRec(Value
*V
) {
12389 const SCEV
*Expr
= this->getSCEV(V
);
12390 SmallPtrSet
<const SCEVPredicate
*, 4> NewPreds
;
12391 auto *New
= SE
.convertSCEVToAddRecWithPredicates(Expr
, &L
, NewPreds
);
12396 for (auto *P
: NewPreds
)
12399 updateGeneration();
12400 RewriteMap
[SE
.getSCEV(V
)] = {Generation
, New
};
12404 PredicatedScalarEvolution::PredicatedScalarEvolution(
12405 const PredicatedScalarEvolution
&Init
)
12406 : RewriteMap(Init
.RewriteMap
), SE(Init
.SE
), L(Init
.L
), Preds(Init
.Preds
),
12407 Generation(Init
.Generation
), BackedgeCount(Init
.BackedgeCount
) {
12408 for (const auto &I
: Init
.FlagsMap
)
12409 FlagsMap
.insert(I
);
12412 void PredicatedScalarEvolution::print(raw_ostream
&OS
, unsigned Depth
) const {
12414 for (auto *BB
: L
.getBlocks())
12415 for (auto &I
: *BB
) {
12416 if (!SE
.isSCEVable(I
.getType()))
12419 auto *Expr
= SE
.getSCEV(&I
);
12420 auto II
= RewriteMap
.find(Expr
);
12422 if (II
== RewriteMap
.end())
12425 // Don't print things that are not interesting.
12426 if (II
->second
.second
== Expr
)
12429 OS
.indent(Depth
) << "[PSE]" << I
<< ":\n";
12430 OS
.indent(Depth
+ 2) << *Expr
<< "\n";
12431 OS
.indent(Depth
+ 2) << "--> " << *II
->second
.second
<< "\n";
12435 // Match the mathematical pattern A - (A / B) * B, where A and B can be
12436 // arbitrary expressions.
12437 // It's not always easy, as A and B can be folded (imagine A is X / 2, and B is
12438 // 4, A / B becomes X / 8).
12439 bool ScalarEvolution::matchURem(const SCEV
*Expr
, const SCEV
*&LHS
,
12440 const SCEV
*&RHS
) {
12441 const auto *Add
= dyn_cast
<SCEVAddExpr
>(Expr
);
12442 if (Add
== nullptr || Add
->getNumOperands() != 2)
12445 const SCEV
*A
= Add
->getOperand(1);
12446 const auto *Mul
= dyn_cast
<SCEVMulExpr
>(Add
->getOperand(0));
12448 if (Mul
== nullptr)
12451 const auto MatchURemWithDivisor
= [&](const SCEV
*B
) {
12452 // (SomeExpr + (-(SomeExpr / B) * B)).
12453 if (Expr
== getURemExpr(A
, B
)) {
12461 // (SomeExpr + (-1 * (SomeExpr / B) * B)).
12462 if (Mul
->getNumOperands() == 3 && isa
<SCEVConstant
>(Mul
->getOperand(0)))
12463 return MatchURemWithDivisor(Mul
->getOperand(1)) ||
12464 MatchURemWithDivisor(Mul
->getOperand(2));
12466 // (SomeExpr + ((-SomeExpr / B) * B)) or (SomeExpr + ((SomeExpr / B) * -B)).
12467 if (Mul
->getNumOperands() == 2)
12468 return MatchURemWithDivisor(Mul
->getOperand(1)) ||
12469 MatchURemWithDivisor(Mul
->getOperand(0)) ||
12470 MatchURemWithDivisor(getNegativeSCEV(Mul
->getOperand(1))) ||
12471 MatchURemWithDivisor(getNegativeSCEV(Mul
->getOperand(0)));