1 //===- ScalarEvolution.cpp - Scalar Evolution Analysis --------------------===//
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7 //===----------------------------------------------------------------------===//
9 // This file contains the implementation of the scalar evolution analysis
10 // engine, which is used primarily to analyze expressions involving induction
11 // variables in loops.
13 // There are several aspects to this library. First is the representation of
14 // scalar expressions, which are represented as subclasses of the SCEV class.
15 // These classes are used to represent certain types of subexpressions that we
16 // can handle. We only create one SCEV of a particular shape, so
17 // pointer-comparisons for equality are legal.
19 // One important aspect of the SCEV objects is that they are never cyclic, even
20 // if there is a cycle in the dataflow for an expression (ie, a PHI node). If
21 // the PHI node is one of the idioms that we can represent (e.g., a polynomial
22 // recurrence) then we represent it directly as a recurrence node, otherwise we
23 // represent it as a SCEVUnknown node.
25 // In addition to being able to represent expressions of various types, we also
26 // have folders that are used to build the *canonical* representation for a
27 // particular expression. These folders are capable of using a variety of
28 // rewrite rules to simplify the expressions.
30 // Once the folders are defined, we can implement the more interesting
31 // higher-level code, such as the code that recognizes PHI nodes of various
32 // types, computes the execution count of a loop, etc.
34 // TODO: We should use these routines and value representations to implement
35 // dependence analysis!
37 //===----------------------------------------------------------------------===//
39 // There are several good references for the techniques used in this analysis.
41 // Chains of recurrences -- a method to expedite the evaluation
42 // of closed-form functions
43 // Olaf Bachmann, Paul S. Wang, Eugene V. Zima
45 // On computational properties of chains of recurrences
48 // Symbolic Evaluation of Chains of Recurrences for Loop Optimization
49 // Robert A. van Engelen
51 // Efficient Symbolic Analysis for Optimizing Compilers
52 // Robert A. van Engelen
54 // Using the chains of recurrences algebra for data dependence testing and
55 // induction variable substitution
56 // MS Thesis, Johnie Birch
58 //===----------------------------------------------------------------------===//
60 #include "llvm/Analysis/ScalarEvolution.h"
61 #include "llvm/ADT/APInt.h"
62 #include "llvm/ADT/ArrayRef.h"
63 #include "llvm/ADT/DenseMap.h"
64 #include "llvm/ADT/DepthFirstIterator.h"
65 #include "llvm/ADT/EquivalenceClasses.h"
66 #include "llvm/ADT/FoldingSet.h"
67 #include "llvm/ADT/None.h"
68 #include "llvm/ADT/Optional.h"
69 #include "llvm/ADT/STLExtras.h"
70 #include "llvm/ADT/ScopeExit.h"
71 #include "llvm/ADT/Sequence.h"
72 #include "llvm/ADT/SetVector.h"
73 #include "llvm/ADT/SmallPtrSet.h"
74 #include "llvm/ADT/SmallSet.h"
75 #include "llvm/ADT/SmallVector.h"
76 #include "llvm/ADT/Statistic.h"
77 #include "llvm/ADT/StringRef.h"
78 #include "llvm/Analysis/AssumptionCache.h"
79 #include "llvm/Analysis/ConstantFolding.h"
80 #include "llvm/Analysis/InstructionSimplify.h"
81 #include "llvm/Analysis/LoopInfo.h"
82 #include "llvm/Analysis/ScalarEvolutionExpressions.h"
83 #include "llvm/Analysis/TargetLibraryInfo.h"
84 #include "llvm/Analysis/ValueTracking.h"
85 #include "llvm/Config/llvm-config.h"
86 #include "llvm/IR/Argument.h"
87 #include "llvm/IR/BasicBlock.h"
88 #include "llvm/IR/CFG.h"
89 #include "llvm/IR/CallSite.h"
90 #include "llvm/IR/Constant.h"
91 #include "llvm/IR/ConstantRange.h"
92 #include "llvm/IR/Constants.h"
93 #include "llvm/IR/DataLayout.h"
94 #include "llvm/IR/DerivedTypes.h"
95 #include "llvm/IR/Dominators.h"
96 #include "llvm/IR/Function.h"
97 #include "llvm/IR/GlobalAlias.h"
98 #include "llvm/IR/GlobalValue.h"
99 #include "llvm/IR/GlobalVariable.h"
100 #include "llvm/IR/InstIterator.h"
101 #include "llvm/IR/InstrTypes.h"
102 #include "llvm/IR/Instruction.h"
103 #include "llvm/IR/Instructions.h"
104 #include "llvm/IR/IntrinsicInst.h"
105 #include "llvm/IR/Intrinsics.h"
106 #include "llvm/IR/LLVMContext.h"
107 #include "llvm/IR/Metadata.h"
108 #include "llvm/IR/Operator.h"
109 #include "llvm/IR/PatternMatch.h"
110 #include "llvm/IR/Type.h"
111 #include "llvm/IR/Use.h"
112 #include "llvm/IR/User.h"
113 #include "llvm/IR/Value.h"
114 #include "llvm/IR/Verifier.h"
115 #include "llvm/Pass.h"
116 #include "llvm/Support/Casting.h"
117 #include "llvm/Support/CommandLine.h"
118 #include "llvm/Support/Compiler.h"
119 #include "llvm/Support/Debug.h"
120 #include "llvm/Support/ErrorHandling.h"
121 #include "llvm/Support/KnownBits.h"
122 #include "llvm/Support/SaveAndRestore.h"
123 #include "llvm/Support/raw_ostream.h"
136 using namespace llvm
;
138 #define DEBUG_TYPE "scalar-evolution"
140 STATISTIC(NumArrayLenItCounts
,
141 "Number of trip counts computed with array length");
142 STATISTIC(NumTripCountsComputed
,
143 "Number of loops with predictable loop counts");
144 STATISTIC(NumTripCountsNotComputed
,
145 "Number of loops without predictable loop counts");
146 STATISTIC(NumBruteForceTripCountsComputed
,
147 "Number of loops with trip counts computed by force");
149 static cl::opt
<unsigned>
150 MaxBruteForceIterations("scalar-evolution-max-iterations", cl::ReallyHidden
,
151 cl::desc("Maximum number of iterations SCEV will "
152 "symbolically execute a constant "
156 // FIXME: Enable this with EXPENSIVE_CHECKS when the test suite is clean.
157 static cl::opt
<bool> VerifySCEV(
158 "verify-scev", cl::Hidden
,
159 cl::desc("Verify ScalarEvolution's backedge taken counts (slow)"));
161 VerifySCEVMap("verify-scev-maps", cl::Hidden
,
162 cl::desc("Verify no dangling value in ScalarEvolution's "
163 "ExprValueMap (slow)"));
165 static cl::opt
<bool> VerifyIR(
166 "scev-verify-ir", cl::Hidden
,
167 cl::desc("Verify IR correctness when making sensitive SCEV queries (slow)"),
170 static cl::opt
<unsigned> MulOpsInlineThreshold(
171 "scev-mulops-inline-threshold", cl::Hidden
,
172 cl::desc("Threshold for inlining multiplication operands into a SCEV"),
175 static cl::opt
<unsigned> AddOpsInlineThreshold(
176 "scev-addops-inline-threshold", cl::Hidden
,
177 cl::desc("Threshold for inlining addition operands into a SCEV"),
180 static cl::opt
<unsigned> MaxSCEVCompareDepth(
181 "scalar-evolution-max-scev-compare-depth", cl::Hidden
,
182 cl::desc("Maximum depth of recursive SCEV complexity comparisons"),
185 static cl::opt
<unsigned> MaxSCEVOperationsImplicationDepth(
186 "scalar-evolution-max-scev-operations-implication-depth", cl::Hidden
,
187 cl::desc("Maximum depth of recursive SCEV operations implication analysis"),
190 static cl::opt
<unsigned> MaxValueCompareDepth(
191 "scalar-evolution-max-value-compare-depth", cl::Hidden
,
192 cl::desc("Maximum depth of recursive value complexity comparisons"),
195 static cl::opt
<unsigned>
196 MaxArithDepth("scalar-evolution-max-arith-depth", cl::Hidden
,
197 cl::desc("Maximum depth of recursive arithmetics"),
200 static cl::opt
<unsigned> MaxConstantEvolvingDepth(
201 "scalar-evolution-max-constant-evolving-depth", cl::Hidden
,
202 cl::desc("Maximum depth of recursive constant evolving"), cl::init(32));
204 static cl::opt
<unsigned>
205 MaxCastDepth("scalar-evolution-max-cast-depth", cl::Hidden
,
206 cl::desc("Maximum depth of recursive SExt/ZExt/Trunc"),
209 static cl::opt
<unsigned>
210 MaxAddRecSize("scalar-evolution-max-add-rec-size", cl::Hidden
,
211 cl::desc("Max coefficients in AddRec during evolving"),
214 static cl::opt
<unsigned>
215 HugeExprThreshold("scalar-evolution-huge-expr-threshold", cl::Hidden
,
216 cl::desc("Size of the expression which is considered huge"),
219 //===----------------------------------------------------------------------===//
220 // SCEV class definitions
221 //===----------------------------------------------------------------------===//
223 //===----------------------------------------------------------------------===//
224 // Implementation of the SCEV class.
227 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
228 LLVM_DUMP_METHOD
void SCEV::dump() const {
234 void SCEV::print(raw_ostream
&OS
) const {
235 switch (static_cast<SCEVTypes
>(getSCEVType())) {
237 cast
<SCEVConstant
>(this)->getValue()->printAsOperand(OS
, false);
240 const SCEVTruncateExpr
*Trunc
= cast
<SCEVTruncateExpr
>(this);
241 const SCEV
*Op
= Trunc
->getOperand();
242 OS
<< "(trunc " << *Op
->getType() << " " << *Op
<< " to "
243 << *Trunc
->getType() << ")";
247 const SCEVZeroExtendExpr
*ZExt
= cast
<SCEVZeroExtendExpr
>(this);
248 const SCEV
*Op
= ZExt
->getOperand();
249 OS
<< "(zext " << *Op
->getType() << " " << *Op
<< " to "
250 << *ZExt
->getType() << ")";
254 const SCEVSignExtendExpr
*SExt
= cast
<SCEVSignExtendExpr
>(this);
255 const SCEV
*Op
= SExt
->getOperand();
256 OS
<< "(sext " << *Op
->getType() << " " << *Op
<< " to "
257 << *SExt
->getType() << ")";
261 const SCEVAddRecExpr
*AR
= cast
<SCEVAddRecExpr
>(this);
262 OS
<< "{" << *AR
->getOperand(0);
263 for (unsigned i
= 1, e
= AR
->getNumOperands(); i
!= e
; ++i
)
264 OS
<< ",+," << *AR
->getOperand(i
);
266 if (AR
->hasNoUnsignedWrap())
268 if (AR
->hasNoSignedWrap())
270 if (AR
->hasNoSelfWrap() &&
271 !AR
->getNoWrapFlags((NoWrapFlags
)(FlagNUW
| FlagNSW
)))
273 AR
->getLoop()->getHeader()->printAsOperand(OS
, /*PrintType=*/false);
281 const SCEVNAryExpr
*NAry
= cast
<SCEVNAryExpr
>(this);
282 const char *OpStr
= nullptr;
283 switch (NAry
->getSCEVType()) {
284 case scAddExpr
: OpStr
= " + "; break;
285 case scMulExpr
: OpStr
= " * "; break;
286 case scUMaxExpr
: OpStr
= " umax "; break;
287 case scSMaxExpr
: OpStr
= " smax "; break;
290 for (SCEVNAryExpr::op_iterator I
= NAry
->op_begin(), E
= NAry
->op_end();
293 if (std::next(I
) != E
)
297 switch (NAry
->getSCEVType()) {
300 if (NAry
->hasNoUnsignedWrap())
302 if (NAry
->hasNoSignedWrap())
308 const SCEVUDivExpr
*UDiv
= cast
<SCEVUDivExpr
>(this);
309 OS
<< "(" << *UDiv
->getLHS() << " /u " << *UDiv
->getRHS() << ")";
313 const SCEVUnknown
*U
= cast
<SCEVUnknown
>(this);
315 if (U
->isSizeOf(AllocTy
)) {
316 OS
<< "sizeof(" << *AllocTy
<< ")";
319 if (U
->isAlignOf(AllocTy
)) {
320 OS
<< "alignof(" << *AllocTy
<< ")";
326 if (U
->isOffsetOf(CTy
, FieldNo
)) {
327 OS
<< "offsetof(" << *CTy
<< ", ";
328 FieldNo
->printAsOperand(OS
, false);
333 // Otherwise just print it normally.
334 U
->getValue()->printAsOperand(OS
, false);
337 case scCouldNotCompute
:
338 OS
<< "***COULDNOTCOMPUTE***";
341 llvm_unreachable("Unknown SCEV kind!");
344 Type
*SCEV::getType() const {
345 switch (static_cast<SCEVTypes
>(getSCEVType())) {
347 return cast
<SCEVConstant
>(this)->getType();
351 return cast
<SCEVCastExpr
>(this)->getType();
356 return cast
<SCEVNAryExpr
>(this)->getType();
358 return cast
<SCEVAddExpr
>(this)->getType();
360 return cast
<SCEVUDivExpr
>(this)->getType();
362 return cast
<SCEVUnknown
>(this)->getType();
363 case scCouldNotCompute
:
364 llvm_unreachable("Attempt to use a SCEVCouldNotCompute object!");
366 llvm_unreachable("Unknown SCEV kind!");
369 bool SCEV::isZero() const {
370 if (const SCEVConstant
*SC
= dyn_cast
<SCEVConstant
>(this))
371 return SC
->getValue()->isZero();
375 bool SCEV::isOne() const {
376 if (const SCEVConstant
*SC
= dyn_cast
<SCEVConstant
>(this))
377 return SC
->getValue()->isOne();
381 bool SCEV::isAllOnesValue() const {
382 if (const SCEVConstant
*SC
= dyn_cast
<SCEVConstant
>(this))
383 return SC
->getValue()->isMinusOne();
387 bool SCEV::isNonConstantNegative() const {
388 const SCEVMulExpr
*Mul
= dyn_cast
<SCEVMulExpr
>(this);
389 if (!Mul
) return false;
391 // If there is a constant factor, it will be first.
392 const SCEVConstant
*SC
= dyn_cast
<SCEVConstant
>(Mul
->getOperand(0));
393 if (!SC
) return false;
395 // Return true if the value is negative, this matches things like (-42 * V).
396 return SC
->getAPInt().isNegative();
399 SCEVCouldNotCompute::SCEVCouldNotCompute() :
400 SCEV(FoldingSetNodeIDRef(), scCouldNotCompute
, 0) {}
402 bool SCEVCouldNotCompute::classof(const SCEV
*S
) {
403 return S
->getSCEVType() == scCouldNotCompute
;
406 const SCEV
*ScalarEvolution::getConstant(ConstantInt
*V
) {
408 ID
.AddInteger(scConstant
);
411 if (const SCEV
*S
= UniqueSCEVs
.FindNodeOrInsertPos(ID
, IP
)) return S
;
412 SCEV
*S
= new (SCEVAllocator
) SCEVConstant(ID
.Intern(SCEVAllocator
), V
);
413 UniqueSCEVs
.InsertNode(S
, IP
);
417 const SCEV
*ScalarEvolution::getConstant(const APInt
&Val
) {
418 return getConstant(ConstantInt::get(getContext(), Val
));
422 ScalarEvolution::getConstant(Type
*Ty
, uint64_t V
, bool isSigned
) {
423 IntegerType
*ITy
= cast
<IntegerType
>(getEffectiveSCEVType(Ty
));
424 return getConstant(ConstantInt::get(ITy
, V
, isSigned
));
427 SCEVCastExpr::SCEVCastExpr(const FoldingSetNodeIDRef ID
,
428 unsigned SCEVTy
, const SCEV
*op
, Type
*ty
)
429 : SCEV(ID
, SCEVTy
, computeExpressionSize(op
)), Op(op
), Ty(ty
) {}
431 SCEVTruncateExpr::SCEVTruncateExpr(const FoldingSetNodeIDRef ID
,
432 const SCEV
*op
, Type
*ty
)
433 : SCEVCastExpr(ID
, scTruncate
, op
, ty
) {
434 assert(Op
->getType()->isIntOrPtrTy() && Ty
->isIntOrPtrTy() &&
435 "Cannot truncate non-integer value!");
438 SCEVZeroExtendExpr::SCEVZeroExtendExpr(const FoldingSetNodeIDRef ID
,
439 const SCEV
*op
, Type
*ty
)
440 : SCEVCastExpr(ID
, scZeroExtend
, op
, ty
) {
441 assert(Op
->getType()->isIntOrPtrTy() && Ty
->isIntOrPtrTy() &&
442 "Cannot zero extend non-integer value!");
445 SCEVSignExtendExpr::SCEVSignExtendExpr(const FoldingSetNodeIDRef ID
,
446 const SCEV
*op
, Type
*ty
)
447 : SCEVCastExpr(ID
, scSignExtend
, op
, ty
) {
448 assert(Op
->getType()->isIntOrPtrTy() && Ty
->isIntOrPtrTy() &&
449 "Cannot sign extend non-integer value!");
452 void SCEVUnknown::deleted() {
453 // Clear this SCEVUnknown from various maps.
454 SE
->forgetMemoizedResults(this);
456 // Remove this SCEVUnknown from the uniquing map.
457 SE
->UniqueSCEVs
.RemoveNode(this);
459 // Release the value.
463 void SCEVUnknown::allUsesReplacedWith(Value
*New
) {
464 // Remove this SCEVUnknown from the uniquing map.
465 SE
->UniqueSCEVs
.RemoveNode(this);
467 // Update this SCEVUnknown to point to the new value. This is needed
468 // because there may still be outstanding SCEVs which still point to
473 bool SCEVUnknown::isSizeOf(Type
*&AllocTy
) const {
474 if (ConstantExpr
*VCE
= dyn_cast
<ConstantExpr
>(getValue()))
475 if (VCE
->getOpcode() == Instruction::PtrToInt
)
476 if (ConstantExpr
*CE
= dyn_cast
<ConstantExpr
>(VCE
->getOperand(0)))
477 if (CE
->getOpcode() == Instruction::GetElementPtr
&&
478 CE
->getOperand(0)->isNullValue() &&
479 CE
->getNumOperands() == 2)
480 if (ConstantInt
*CI
= dyn_cast
<ConstantInt
>(CE
->getOperand(1)))
482 AllocTy
= cast
<PointerType
>(CE
->getOperand(0)->getType())
490 bool SCEVUnknown::isAlignOf(Type
*&AllocTy
) const {
491 if (ConstantExpr
*VCE
= dyn_cast
<ConstantExpr
>(getValue()))
492 if (VCE
->getOpcode() == Instruction::PtrToInt
)
493 if (ConstantExpr
*CE
= dyn_cast
<ConstantExpr
>(VCE
->getOperand(0)))
494 if (CE
->getOpcode() == Instruction::GetElementPtr
&&
495 CE
->getOperand(0)->isNullValue()) {
497 cast
<PointerType
>(CE
->getOperand(0)->getType())->getElementType();
498 if (StructType
*STy
= dyn_cast
<StructType
>(Ty
))
499 if (!STy
->isPacked() &&
500 CE
->getNumOperands() == 3 &&
501 CE
->getOperand(1)->isNullValue()) {
502 if (ConstantInt
*CI
= dyn_cast
<ConstantInt
>(CE
->getOperand(2)))
504 STy
->getNumElements() == 2 &&
505 STy
->getElementType(0)->isIntegerTy(1)) {
506 AllocTy
= STy
->getElementType(1);
515 bool SCEVUnknown::isOffsetOf(Type
*&CTy
, Constant
*&FieldNo
) const {
516 if (ConstantExpr
*VCE
= dyn_cast
<ConstantExpr
>(getValue()))
517 if (VCE
->getOpcode() == Instruction::PtrToInt
)
518 if (ConstantExpr
*CE
= dyn_cast
<ConstantExpr
>(VCE
->getOperand(0)))
519 if (CE
->getOpcode() == Instruction::GetElementPtr
&&
520 CE
->getNumOperands() == 3 &&
521 CE
->getOperand(0)->isNullValue() &&
522 CE
->getOperand(1)->isNullValue()) {
524 cast
<PointerType
>(CE
->getOperand(0)->getType())->getElementType();
525 // Ignore vector types here so that ScalarEvolutionExpander doesn't
526 // emit getelementptrs that index into vectors.
527 if (Ty
->isStructTy() || Ty
->isArrayTy()) {
529 FieldNo
= CE
->getOperand(2);
537 //===----------------------------------------------------------------------===//
539 //===----------------------------------------------------------------------===//
541 /// Compare the two values \p LV and \p RV in terms of their "complexity" where
542 /// "complexity" is a partial (and somewhat ad-hoc) relation used to order
543 /// operands in SCEV expressions. \p EqCache is a set of pairs of values that
544 /// have been previously deemed to be "equally complex" by this routine. It is
545 /// intended to avoid exponential time complexity in cases like:
555 /// CompareValueComplexity(%f, %c)
557 /// Since we do not continue running this routine on expression trees once we
558 /// have seen unequal values, there is no need to track them in the cache.
560 CompareValueComplexity(EquivalenceClasses
<const Value
*> &EqCacheValue
,
561 const LoopInfo
*const LI
, Value
*LV
, Value
*RV
,
563 if (Depth
> MaxValueCompareDepth
|| EqCacheValue
.isEquivalent(LV
, RV
))
566 // Order pointer values after integer values. This helps SCEVExpander form
568 bool LIsPointer
= LV
->getType()->isPointerTy(),
569 RIsPointer
= RV
->getType()->isPointerTy();
570 if (LIsPointer
!= RIsPointer
)
571 return (int)LIsPointer
- (int)RIsPointer
;
573 // Compare getValueID values.
574 unsigned LID
= LV
->getValueID(), RID
= RV
->getValueID();
576 return (int)LID
- (int)RID
;
578 // Sort arguments by their position.
579 if (const auto *LA
= dyn_cast
<Argument
>(LV
)) {
580 const auto *RA
= cast
<Argument
>(RV
);
581 unsigned LArgNo
= LA
->getArgNo(), RArgNo
= RA
->getArgNo();
582 return (int)LArgNo
- (int)RArgNo
;
585 if (const auto *LGV
= dyn_cast
<GlobalValue
>(LV
)) {
586 const auto *RGV
= cast
<GlobalValue
>(RV
);
588 const auto IsGVNameSemantic
= [&](const GlobalValue
*GV
) {
589 auto LT
= GV
->getLinkage();
590 return !(GlobalValue::isPrivateLinkage(LT
) ||
591 GlobalValue::isInternalLinkage(LT
));
594 // Use the names to distinguish the two values, but only if the
595 // names are semantically important.
596 if (IsGVNameSemantic(LGV
) && IsGVNameSemantic(RGV
))
597 return LGV
->getName().compare(RGV
->getName());
600 // For instructions, compare their loop depth, and their operand count. This
602 if (const auto *LInst
= dyn_cast
<Instruction
>(LV
)) {
603 const auto *RInst
= cast
<Instruction
>(RV
);
605 // Compare loop depths.
606 const BasicBlock
*LParent
= LInst
->getParent(),
607 *RParent
= RInst
->getParent();
608 if (LParent
!= RParent
) {
609 unsigned LDepth
= LI
->getLoopDepth(LParent
),
610 RDepth
= LI
->getLoopDepth(RParent
);
611 if (LDepth
!= RDepth
)
612 return (int)LDepth
- (int)RDepth
;
615 // Compare the number of operands.
616 unsigned LNumOps
= LInst
->getNumOperands(),
617 RNumOps
= RInst
->getNumOperands();
618 if (LNumOps
!= RNumOps
)
619 return (int)LNumOps
- (int)RNumOps
;
621 for (unsigned Idx
: seq(0u, LNumOps
)) {
623 CompareValueComplexity(EqCacheValue
, LI
, LInst
->getOperand(Idx
),
624 RInst
->getOperand(Idx
), Depth
+ 1);
630 EqCacheValue
.unionSets(LV
, RV
);
634 // Return negative, zero, or positive, if LHS is less than, equal to, or greater
635 // than RHS, respectively. A three-way result allows recursive comparisons to be
637 static int CompareSCEVComplexity(
638 EquivalenceClasses
<const SCEV
*> &EqCacheSCEV
,
639 EquivalenceClasses
<const Value
*> &EqCacheValue
,
640 const LoopInfo
*const LI
, const SCEV
*LHS
, const SCEV
*RHS
,
641 DominatorTree
&DT
, unsigned Depth
= 0) {
642 // Fast-path: SCEVs are uniqued so we can do a quick equality check.
646 // Primarily, sort the SCEVs by their getSCEVType().
647 unsigned LType
= LHS
->getSCEVType(), RType
= RHS
->getSCEVType();
649 return (int)LType
- (int)RType
;
651 if (Depth
> MaxSCEVCompareDepth
|| EqCacheSCEV
.isEquivalent(LHS
, RHS
))
653 // Aside from the getSCEVType() ordering, the particular ordering
654 // isn't very important except that it's beneficial to be consistent,
655 // so that (a + b) and (b + a) don't end up as different expressions.
656 switch (static_cast<SCEVTypes
>(LType
)) {
658 const SCEVUnknown
*LU
= cast
<SCEVUnknown
>(LHS
);
659 const SCEVUnknown
*RU
= cast
<SCEVUnknown
>(RHS
);
661 int X
= CompareValueComplexity(EqCacheValue
, LI
, LU
->getValue(),
662 RU
->getValue(), Depth
+ 1);
664 EqCacheSCEV
.unionSets(LHS
, RHS
);
669 const SCEVConstant
*LC
= cast
<SCEVConstant
>(LHS
);
670 const SCEVConstant
*RC
= cast
<SCEVConstant
>(RHS
);
672 // Compare constant values.
673 const APInt
&LA
= LC
->getAPInt();
674 const APInt
&RA
= RC
->getAPInt();
675 unsigned LBitWidth
= LA
.getBitWidth(), RBitWidth
= RA
.getBitWidth();
676 if (LBitWidth
!= RBitWidth
)
677 return (int)LBitWidth
- (int)RBitWidth
;
678 return LA
.ult(RA
) ? -1 : 1;
682 const SCEVAddRecExpr
*LA
= cast
<SCEVAddRecExpr
>(LHS
);
683 const SCEVAddRecExpr
*RA
= cast
<SCEVAddRecExpr
>(RHS
);
685 // There is always a dominance between two recs that are used by one SCEV,
686 // so we can safely sort recs by loop header dominance. We require such
687 // order in getAddExpr.
688 const Loop
*LLoop
= LA
->getLoop(), *RLoop
= RA
->getLoop();
689 if (LLoop
!= RLoop
) {
690 const BasicBlock
*LHead
= LLoop
->getHeader(), *RHead
= RLoop
->getHeader();
691 assert(LHead
!= RHead
&& "Two loops share the same header?");
692 if (DT
.dominates(LHead
, RHead
))
695 assert(DT
.dominates(RHead
, LHead
) &&
696 "No dominance between recurrences used by one SCEV?");
700 // Addrec complexity grows with operand count.
701 unsigned LNumOps
= LA
->getNumOperands(), RNumOps
= RA
->getNumOperands();
702 if (LNumOps
!= RNumOps
)
703 return (int)LNumOps
- (int)RNumOps
;
705 // Lexicographically compare.
706 for (unsigned i
= 0; i
!= LNumOps
; ++i
) {
707 int X
= CompareSCEVComplexity(EqCacheSCEV
, EqCacheValue
, LI
,
708 LA
->getOperand(i
), RA
->getOperand(i
), DT
,
713 EqCacheSCEV
.unionSets(LHS
, RHS
);
721 const SCEVNAryExpr
*LC
= cast
<SCEVNAryExpr
>(LHS
);
722 const SCEVNAryExpr
*RC
= cast
<SCEVNAryExpr
>(RHS
);
724 // Lexicographically compare n-ary expressions.
725 unsigned LNumOps
= LC
->getNumOperands(), RNumOps
= RC
->getNumOperands();
726 if (LNumOps
!= RNumOps
)
727 return (int)LNumOps
- (int)RNumOps
;
729 for (unsigned i
= 0; i
!= LNumOps
; ++i
) {
730 int X
= CompareSCEVComplexity(EqCacheSCEV
, EqCacheValue
, LI
,
731 LC
->getOperand(i
), RC
->getOperand(i
), DT
,
736 EqCacheSCEV
.unionSets(LHS
, RHS
);
741 const SCEVUDivExpr
*LC
= cast
<SCEVUDivExpr
>(LHS
);
742 const SCEVUDivExpr
*RC
= cast
<SCEVUDivExpr
>(RHS
);
744 // Lexicographically compare udiv expressions.
745 int X
= CompareSCEVComplexity(EqCacheSCEV
, EqCacheValue
, LI
, LC
->getLHS(),
746 RC
->getLHS(), DT
, Depth
+ 1);
749 X
= CompareSCEVComplexity(EqCacheSCEV
, EqCacheValue
, LI
, LC
->getRHS(),
750 RC
->getRHS(), DT
, Depth
+ 1);
752 EqCacheSCEV
.unionSets(LHS
, RHS
);
759 const SCEVCastExpr
*LC
= cast
<SCEVCastExpr
>(LHS
);
760 const SCEVCastExpr
*RC
= cast
<SCEVCastExpr
>(RHS
);
762 // Compare cast expressions by operand.
763 int X
= CompareSCEVComplexity(EqCacheSCEV
, EqCacheValue
, LI
,
764 LC
->getOperand(), RC
->getOperand(), DT
,
767 EqCacheSCEV
.unionSets(LHS
, RHS
);
771 case scCouldNotCompute
:
772 llvm_unreachable("Attempt to use a SCEVCouldNotCompute object!");
774 llvm_unreachable("Unknown SCEV kind!");
777 /// Given a list of SCEV objects, order them by their complexity, and group
778 /// objects of the same complexity together by value. When this routine is
779 /// finished, we know that any duplicates in the vector are consecutive and that
780 /// complexity is monotonically increasing.
782 /// Note that we go take special precautions to ensure that we get deterministic
783 /// results from this routine. In other words, we don't want the results of
784 /// this to depend on where the addresses of various SCEV objects happened to
786 static void GroupByComplexity(SmallVectorImpl
<const SCEV
*> &Ops
,
787 LoopInfo
*LI
, DominatorTree
&DT
) {
788 if (Ops
.size() < 2) return; // Noop
790 EquivalenceClasses
<const SCEV
*> EqCacheSCEV
;
791 EquivalenceClasses
<const Value
*> EqCacheValue
;
792 if (Ops
.size() == 2) {
793 // This is the common case, which also happens to be trivially simple.
795 const SCEV
*&LHS
= Ops
[0], *&RHS
= Ops
[1];
796 if (CompareSCEVComplexity(EqCacheSCEV
, EqCacheValue
, LI
, RHS
, LHS
, DT
) < 0)
801 // Do the rough sort by complexity.
802 std::stable_sort(Ops
.begin(), Ops
.end(),
803 [&](const SCEV
*LHS
, const SCEV
*RHS
) {
804 return CompareSCEVComplexity(EqCacheSCEV
, EqCacheValue
, LI
,
808 // Now that we are sorted by complexity, group elements of the same
809 // complexity. Note that this is, at worst, N^2, but the vector is likely to
810 // be extremely short in practice. Note that we take this approach because we
811 // do not want to depend on the addresses of the objects we are grouping.
812 for (unsigned i
= 0, e
= Ops
.size(); i
!= e
-2; ++i
) {
813 const SCEV
*S
= Ops
[i
];
814 unsigned Complexity
= S
->getSCEVType();
816 // If there are any objects of the same complexity and same value as this
818 for (unsigned j
= i
+1; j
!= e
&& Ops
[j
]->getSCEVType() == Complexity
; ++j
) {
819 if (Ops
[j
] == S
) { // Found a duplicate.
820 // Move it to immediately after i'th element.
821 std::swap(Ops
[i
+1], Ops
[j
]);
822 ++i
; // no need to rescan it.
823 if (i
== e
-2) return; // Done!
829 // Returns the size of the SCEV S.
830 static inline int sizeOfSCEV(const SCEV
*S
) {
831 struct FindSCEVSize
{
834 FindSCEVSize() = default;
836 bool follow(const SCEV
*S
) {
838 // Keep looking at all operands of S.
842 bool isDone() const {
848 SCEVTraversal
<FindSCEVSize
> ST(F
);
853 /// Returns true if the subtree of \p S contains at least HugeExprThreshold
855 static bool isHugeExpression(const SCEV
*S
) {
856 return S
->getExpressionSize() >= HugeExprThreshold
;
859 /// Returns true of \p Ops contains a huge SCEV (see definition above).
860 static bool hasHugeExpression(ArrayRef
<const SCEV
*> Ops
) {
861 return any_of(Ops
, isHugeExpression
);
866 struct SCEVDivision
: public SCEVVisitor
<SCEVDivision
, void> {
868 // Computes the Quotient and Remainder of the division of Numerator by
870 static void divide(ScalarEvolution
&SE
, const SCEV
*Numerator
,
871 const SCEV
*Denominator
, const SCEV
**Quotient
,
872 const SCEV
**Remainder
) {
873 assert(Numerator
&& Denominator
&& "Uninitialized SCEV");
875 SCEVDivision
D(SE
, Numerator
, Denominator
);
877 // Check for the trivial case here to avoid having to check for it in the
879 if (Numerator
== Denominator
) {
885 if (Numerator
->isZero()) {
891 // A simple case when N/1. The quotient is N.
892 if (Denominator
->isOne()) {
893 *Quotient
= Numerator
;
898 // Split the Denominator when it is a product.
899 if (const SCEVMulExpr
*T
= dyn_cast
<SCEVMulExpr
>(Denominator
)) {
901 *Quotient
= Numerator
;
902 for (const SCEV
*Op
: T
->operands()) {
903 divide(SE
, *Quotient
, Op
, &Q
, &R
);
906 // Bail out when the Numerator is not divisible by one of the terms of
910 *Remainder
= Numerator
;
919 *Quotient
= D
.Quotient
;
920 *Remainder
= D
.Remainder
;
923 // Except in the trivial case described above, we do not know how to divide
924 // Expr by Denominator for the following functions with empty implementation.
925 void visitTruncateExpr(const SCEVTruncateExpr
*Numerator
) {}
926 void visitZeroExtendExpr(const SCEVZeroExtendExpr
*Numerator
) {}
927 void visitSignExtendExpr(const SCEVSignExtendExpr
*Numerator
) {}
928 void visitUDivExpr(const SCEVUDivExpr
*Numerator
) {}
929 void visitSMaxExpr(const SCEVSMaxExpr
*Numerator
) {}
930 void visitUMaxExpr(const SCEVUMaxExpr
*Numerator
) {}
931 void visitUnknown(const SCEVUnknown
*Numerator
) {}
932 void visitCouldNotCompute(const SCEVCouldNotCompute
*Numerator
) {}
934 void visitConstant(const SCEVConstant
*Numerator
) {
935 if (const SCEVConstant
*D
= dyn_cast
<SCEVConstant
>(Denominator
)) {
936 APInt NumeratorVal
= Numerator
->getAPInt();
937 APInt DenominatorVal
= D
->getAPInt();
938 uint32_t NumeratorBW
= NumeratorVal
.getBitWidth();
939 uint32_t DenominatorBW
= DenominatorVal
.getBitWidth();
941 if (NumeratorBW
> DenominatorBW
)
942 DenominatorVal
= DenominatorVal
.sext(NumeratorBW
);
943 else if (NumeratorBW
< DenominatorBW
)
944 NumeratorVal
= NumeratorVal
.sext(DenominatorBW
);
946 APInt
QuotientVal(NumeratorVal
.getBitWidth(), 0);
947 APInt
RemainderVal(NumeratorVal
.getBitWidth(), 0);
948 APInt::sdivrem(NumeratorVal
, DenominatorVal
, QuotientVal
, RemainderVal
);
949 Quotient
= SE
.getConstant(QuotientVal
);
950 Remainder
= SE
.getConstant(RemainderVal
);
955 void visitAddRecExpr(const SCEVAddRecExpr
*Numerator
) {
956 const SCEV
*StartQ
, *StartR
, *StepQ
, *StepR
;
957 if (!Numerator
->isAffine())
958 return cannotDivide(Numerator
);
959 divide(SE
, Numerator
->getStart(), Denominator
, &StartQ
, &StartR
);
960 divide(SE
, Numerator
->getStepRecurrence(SE
), Denominator
, &StepQ
, &StepR
);
961 // Bail out if the types do not match.
962 Type
*Ty
= Denominator
->getType();
963 if (Ty
!= StartQ
->getType() || Ty
!= StartR
->getType() ||
964 Ty
!= StepQ
->getType() || Ty
!= StepR
->getType())
965 return cannotDivide(Numerator
);
966 Quotient
= SE
.getAddRecExpr(StartQ
, StepQ
, Numerator
->getLoop(),
967 Numerator
->getNoWrapFlags());
968 Remainder
= SE
.getAddRecExpr(StartR
, StepR
, Numerator
->getLoop(),
969 Numerator
->getNoWrapFlags());
972 void visitAddExpr(const SCEVAddExpr
*Numerator
) {
973 SmallVector
<const SCEV
*, 2> Qs
, Rs
;
974 Type
*Ty
= Denominator
->getType();
976 for (const SCEV
*Op
: Numerator
->operands()) {
978 divide(SE
, Op
, Denominator
, &Q
, &R
);
980 // Bail out if types do not match.
981 if (Ty
!= Q
->getType() || Ty
!= R
->getType())
982 return cannotDivide(Numerator
);
988 if (Qs
.size() == 1) {
994 Quotient
= SE
.getAddExpr(Qs
);
995 Remainder
= SE
.getAddExpr(Rs
);
998 void visitMulExpr(const SCEVMulExpr
*Numerator
) {
999 SmallVector
<const SCEV
*, 2> Qs
;
1000 Type
*Ty
= Denominator
->getType();
1002 bool FoundDenominatorTerm
= false;
1003 for (const SCEV
*Op
: Numerator
->operands()) {
1004 // Bail out if types do not match.
1005 if (Ty
!= Op
->getType())
1006 return cannotDivide(Numerator
);
1008 if (FoundDenominatorTerm
) {
1013 // Check whether Denominator divides one of the product operands.
1015 divide(SE
, Op
, Denominator
, &Q
, &R
);
1021 // Bail out if types do not match.
1022 if (Ty
!= Q
->getType())
1023 return cannotDivide(Numerator
);
1025 FoundDenominatorTerm
= true;
1029 if (FoundDenominatorTerm
) {
1034 Quotient
= SE
.getMulExpr(Qs
);
1038 if (!isa
<SCEVUnknown
>(Denominator
))
1039 return cannotDivide(Numerator
);
1041 // The Remainder is obtained by replacing Denominator by 0 in Numerator.
1042 ValueToValueMap RewriteMap
;
1043 RewriteMap
[cast
<SCEVUnknown
>(Denominator
)->getValue()] =
1044 cast
<SCEVConstant
>(Zero
)->getValue();
1045 Remainder
= SCEVParameterRewriter::rewrite(Numerator
, SE
, RewriteMap
, true);
1047 if (Remainder
->isZero()) {
1048 // The Quotient is obtained by replacing Denominator by 1 in Numerator.
1049 RewriteMap
[cast
<SCEVUnknown
>(Denominator
)->getValue()] =
1050 cast
<SCEVConstant
>(One
)->getValue();
1052 SCEVParameterRewriter::rewrite(Numerator
, SE
, RewriteMap
, true);
1056 // Quotient is (Numerator - Remainder) divided by Denominator.
1058 const SCEV
*Diff
= SE
.getMinusSCEV(Numerator
, Remainder
);
1059 // This SCEV does not seem to simplify: fail the division here.
1060 if (sizeOfSCEV(Diff
) > sizeOfSCEV(Numerator
))
1061 return cannotDivide(Numerator
);
1062 divide(SE
, Diff
, Denominator
, &Q
, &R
);
1064 return cannotDivide(Numerator
);
1069 SCEVDivision(ScalarEvolution
&S
, const SCEV
*Numerator
,
1070 const SCEV
*Denominator
)
1071 : SE(S
), Denominator(Denominator
) {
1072 Zero
= SE
.getZero(Denominator
->getType());
1073 One
= SE
.getOne(Denominator
->getType());
1075 // We generally do not know how to divide Expr by Denominator. We
1076 // initialize the division to a "cannot divide" state to simplify the rest
1078 cannotDivide(Numerator
);
1081 // Convenience function for giving up on the division. We set the quotient to
1082 // be equal to zero and the remainder to be equal to the numerator.
1083 void cannotDivide(const SCEV
*Numerator
) {
1085 Remainder
= Numerator
;
1088 ScalarEvolution
&SE
;
1089 const SCEV
*Denominator
, *Quotient
, *Remainder
, *Zero
, *One
;
1092 } // end anonymous namespace
1094 //===----------------------------------------------------------------------===//
1095 // Simple SCEV method implementations
1096 //===----------------------------------------------------------------------===//
1098 /// Compute BC(It, K). The result has width W. Assume, K > 0.
1099 static const SCEV
*BinomialCoefficient(const SCEV
*It
, unsigned K
,
1100 ScalarEvolution
&SE
,
1102 // Handle the simplest case efficiently.
1104 return SE
.getTruncateOrZeroExtend(It
, ResultTy
);
1106 // We are using the following formula for BC(It, K):
1108 // BC(It, K) = (It * (It - 1) * ... * (It - K + 1)) / K!
1110 // Suppose, W is the bitwidth of the return value. We must be prepared for
1111 // overflow. Hence, we must assure that the result of our computation is
1112 // equal to the accurate one modulo 2^W. Unfortunately, division isn't
1113 // safe in modular arithmetic.
1115 // However, this code doesn't use exactly that formula; the formula it uses
1116 // is something like the following, where T is the number of factors of 2 in
1117 // K! (i.e. trailing zeros in the binary representation of K!), and ^ is
1120 // BC(It, K) = (It * (It - 1) * ... * (It - K + 1)) / 2^T / (K! / 2^T)
1122 // This formula is trivially equivalent to the previous formula. However,
1123 // this formula can be implemented much more efficiently. The trick is that
1124 // K! / 2^T is odd, and exact division by an odd number *is* safe in modular
1125 // arithmetic. To do exact division in modular arithmetic, all we have
1126 // to do is multiply by the inverse. Therefore, this step can be done at
1129 // The next issue is how to safely do the division by 2^T. The way this
1130 // is done is by doing the multiplication step at a width of at least W + T
1131 // bits. This way, the bottom W+T bits of the product are accurate. Then,
1132 // when we perform the division by 2^T (which is equivalent to a right shift
1133 // by T), the bottom W bits are accurate. Extra bits are okay; they'll get
1134 // truncated out after the division by 2^T.
1136 // In comparison to just directly using the first formula, this technique
1137 // is much more efficient; using the first formula requires W * K bits,
1138 // but this formula less than W + K bits. Also, the first formula requires
1139 // a division step, whereas this formula only requires multiplies and shifts.
1141 // It doesn't matter whether the subtraction step is done in the calculation
1142 // width or the input iteration count's width; if the subtraction overflows,
1143 // the result must be zero anyway. We prefer here to do it in the width of
1144 // the induction variable because it helps a lot for certain cases; CodeGen
1145 // isn't smart enough to ignore the overflow, which leads to much less
1146 // efficient code if the width of the subtraction is wider than the native
1149 // (It's possible to not widen at all by pulling out factors of 2 before
1150 // the multiplication; for example, K=2 can be calculated as
1151 // It/2*(It+(It*INT_MIN/INT_MIN)+-1). However, it requires
1152 // extra arithmetic, so it's not an obvious win, and it gets
1153 // much more complicated for K > 3.)
1155 // Protection from insane SCEVs; this bound is conservative,
1156 // but it probably doesn't matter.
1158 return SE
.getCouldNotCompute();
1160 unsigned W
= SE
.getTypeSizeInBits(ResultTy
);
1162 // Calculate K! / 2^T and T; we divide out the factors of two before
1163 // multiplying for calculating K! / 2^T to avoid overflow.
1164 // Other overflow doesn't matter because we only care about the bottom
1165 // W bits of the result.
1166 APInt
OddFactorial(W
, 1);
1168 for (unsigned i
= 3; i
<= K
; ++i
) {
1170 unsigned TwoFactors
= Mult
.countTrailingZeros();
1172 Mult
.lshrInPlace(TwoFactors
);
1173 OddFactorial
*= Mult
;
1176 // We need at least W + T bits for the multiplication step
1177 unsigned CalculationBits
= W
+ T
;
1179 // Calculate 2^T, at width T+W.
1180 APInt DivFactor
= APInt::getOneBitSet(CalculationBits
, T
);
1182 // Calculate the multiplicative inverse of K! / 2^T;
1183 // this multiplication factor will perform the exact division by
1185 APInt Mod
= APInt::getSignedMinValue(W
+1);
1186 APInt MultiplyFactor
= OddFactorial
.zext(W
+1);
1187 MultiplyFactor
= MultiplyFactor
.multiplicativeInverse(Mod
);
1188 MultiplyFactor
= MultiplyFactor
.trunc(W
);
1190 // Calculate the product, at width T+W
1191 IntegerType
*CalculationTy
= IntegerType::get(SE
.getContext(),
1193 const SCEV
*Dividend
= SE
.getTruncateOrZeroExtend(It
, CalculationTy
);
1194 for (unsigned i
= 1; i
!= K
; ++i
) {
1195 const SCEV
*S
= SE
.getMinusSCEV(It
, SE
.getConstant(It
->getType(), i
));
1196 Dividend
= SE
.getMulExpr(Dividend
,
1197 SE
.getTruncateOrZeroExtend(S
, CalculationTy
));
1201 const SCEV
*DivResult
= SE
.getUDivExpr(Dividend
, SE
.getConstant(DivFactor
));
1203 // Truncate the result, and divide by K! / 2^T.
1205 return SE
.getMulExpr(SE
.getConstant(MultiplyFactor
),
1206 SE
.getTruncateOrZeroExtend(DivResult
, ResultTy
));
1209 /// Return the value of this chain of recurrences at the specified iteration
1210 /// number. We can evaluate this recurrence by multiplying each element in the
1211 /// chain by the binomial coefficient corresponding to it. In other words, we
1212 /// can evaluate {A,+,B,+,C,+,D} as:
1214 /// A*BC(It, 0) + B*BC(It, 1) + C*BC(It, 2) + D*BC(It, 3)
1216 /// where BC(It, k) stands for binomial coefficient.
1217 const SCEV
*SCEVAddRecExpr::evaluateAtIteration(const SCEV
*It
,
1218 ScalarEvolution
&SE
) const {
1219 const SCEV
*Result
= getStart();
1220 for (unsigned i
= 1, e
= getNumOperands(); i
!= e
; ++i
) {
1221 // The computation is correct in the face of overflow provided that the
1222 // multiplication is performed _after_ the evaluation of the binomial
1224 const SCEV
*Coeff
= BinomialCoefficient(It
, i
, SE
, getType());
1225 if (isa
<SCEVCouldNotCompute
>(Coeff
))
1228 Result
= SE
.getAddExpr(Result
, SE
.getMulExpr(getOperand(i
), Coeff
));
1233 //===----------------------------------------------------------------------===//
1234 // SCEV Expression folder implementations
1235 //===----------------------------------------------------------------------===//
1237 const SCEV
*ScalarEvolution::getTruncateExpr(const SCEV
*Op
, Type
*Ty
,
1239 assert(getTypeSizeInBits(Op
->getType()) > getTypeSizeInBits(Ty
) &&
1240 "This is not a truncating conversion!");
1241 assert(isSCEVable(Ty
) &&
1242 "This is not a conversion to a SCEVable type!");
1243 Ty
= getEffectiveSCEVType(Ty
);
1245 FoldingSetNodeID ID
;
1246 ID
.AddInteger(scTruncate
);
1250 if (const SCEV
*S
= UniqueSCEVs
.FindNodeOrInsertPos(ID
, IP
)) return S
;
1252 // Fold if the operand is constant.
1253 if (const SCEVConstant
*SC
= dyn_cast
<SCEVConstant
>(Op
))
1255 cast
<ConstantInt
>(ConstantExpr::getTrunc(SC
->getValue(), Ty
)));
1257 // trunc(trunc(x)) --> trunc(x)
1258 if (const SCEVTruncateExpr
*ST
= dyn_cast
<SCEVTruncateExpr
>(Op
))
1259 return getTruncateExpr(ST
->getOperand(), Ty
, Depth
+ 1);
1261 // trunc(sext(x)) --> sext(x) if widening or trunc(x) if narrowing
1262 if (const SCEVSignExtendExpr
*SS
= dyn_cast
<SCEVSignExtendExpr
>(Op
))
1263 return getTruncateOrSignExtend(SS
->getOperand(), Ty
, Depth
+ 1);
1265 // trunc(zext(x)) --> zext(x) if widening or trunc(x) if narrowing
1266 if (const SCEVZeroExtendExpr
*SZ
= dyn_cast
<SCEVZeroExtendExpr
>(Op
))
1267 return getTruncateOrZeroExtend(SZ
->getOperand(), Ty
, Depth
+ 1);
1269 if (Depth
> MaxCastDepth
) {
1271 new (SCEVAllocator
) SCEVTruncateExpr(ID
.Intern(SCEVAllocator
), Op
, Ty
);
1272 UniqueSCEVs
.InsertNode(S
, IP
);
1273 addToLoopUseLists(S
);
1277 // trunc(x1 + ... + xN) --> trunc(x1) + ... + trunc(xN) and
1278 // trunc(x1 * ... * xN) --> trunc(x1) * ... * trunc(xN),
1279 // if after transforming we have at most one truncate, not counting truncates
1280 // that replace other casts.
1281 if (isa
<SCEVAddExpr
>(Op
) || isa
<SCEVMulExpr
>(Op
)) {
1282 auto *CommOp
= cast
<SCEVCommutativeExpr
>(Op
);
1283 SmallVector
<const SCEV
*, 4> Operands
;
1284 unsigned numTruncs
= 0;
1285 for (unsigned i
= 0, e
= CommOp
->getNumOperands(); i
!= e
&& numTruncs
< 2;
1287 const SCEV
*S
= getTruncateExpr(CommOp
->getOperand(i
), Ty
, Depth
+ 1);
1288 if (!isa
<SCEVCastExpr
>(CommOp
->getOperand(i
)) && isa
<SCEVTruncateExpr
>(S
))
1290 Operands
.push_back(S
);
1292 if (numTruncs
< 2) {
1293 if (isa
<SCEVAddExpr
>(Op
))
1294 return getAddExpr(Operands
);
1295 else if (isa
<SCEVMulExpr
>(Op
))
1296 return getMulExpr(Operands
);
1298 llvm_unreachable("Unexpected SCEV type for Op.");
1300 // Although we checked in the beginning that ID is not in the cache, it is
1301 // possible that during recursion and different modification ID was inserted
1302 // into the cache. So if we find it, just return it.
1303 if (const SCEV
*S
= UniqueSCEVs
.FindNodeOrInsertPos(ID
, IP
))
1307 // If the input value is a chrec scev, truncate the chrec's operands.
1308 if (const SCEVAddRecExpr
*AddRec
= dyn_cast
<SCEVAddRecExpr
>(Op
)) {
1309 SmallVector
<const SCEV
*, 4> Operands
;
1310 for (const SCEV
*Op
: AddRec
->operands())
1311 Operands
.push_back(getTruncateExpr(Op
, Ty
, Depth
+ 1));
1312 return getAddRecExpr(Operands
, AddRec
->getLoop(), SCEV::FlagAnyWrap
);
1315 // The cast wasn't folded; create an explicit cast node. We can reuse
1316 // the existing insert position since if we get here, we won't have
1317 // made any changes which would invalidate it.
1318 SCEV
*S
= new (SCEVAllocator
) SCEVTruncateExpr(ID
.Intern(SCEVAllocator
),
1320 UniqueSCEVs
.InsertNode(S
, IP
);
1321 addToLoopUseLists(S
);
1325 // Get the limit of a recurrence such that incrementing by Step cannot cause
1326 // signed overflow as long as the value of the recurrence within the
1327 // loop does not exceed this limit before incrementing.
1328 static const SCEV
*getSignedOverflowLimitForStep(const SCEV
*Step
,
1329 ICmpInst::Predicate
*Pred
,
1330 ScalarEvolution
*SE
) {
1331 unsigned BitWidth
= SE
->getTypeSizeInBits(Step
->getType());
1332 if (SE
->isKnownPositive(Step
)) {
1333 *Pred
= ICmpInst::ICMP_SLT
;
1334 return SE
->getConstant(APInt::getSignedMinValue(BitWidth
) -
1335 SE
->getSignedRangeMax(Step
));
1337 if (SE
->isKnownNegative(Step
)) {
1338 *Pred
= ICmpInst::ICMP_SGT
;
1339 return SE
->getConstant(APInt::getSignedMaxValue(BitWidth
) -
1340 SE
->getSignedRangeMin(Step
));
1345 // Get the limit of a recurrence such that incrementing by Step cannot cause
1346 // unsigned overflow as long as the value of the recurrence within the loop does
1347 // not exceed this limit before incrementing.
1348 static const SCEV
*getUnsignedOverflowLimitForStep(const SCEV
*Step
,
1349 ICmpInst::Predicate
*Pred
,
1350 ScalarEvolution
*SE
) {
1351 unsigned BitWidth
= SE
->getTypeSizeInBits(Step
->getType());
1352 *Pred
= ICmpInst::ICMP_ULT
;
1354 return SE
->getConstant(APInt::getMinValue(BitWidth
) -
1355 SE
->getUnsignedRangeMax(Step
));
1360 struct ExtendOpTraitsBase
{
1361 typedef const SCEV
*(ScalarEvolution::*GetExtendExprTy
)(const SCEV
*, Type
*,
1365 // Used to make code generic over signed and unsigned overflow.
1366 template <typename ExtendOp
> struct ExtendOpTraits
{
1369 // static const SCEV::NoWrapFlags WrapType;
1371 // static const ExtendOpTraitsBase::GetExtendExprTy GetExtendExpr;
1373 // static const SCEV *getOverflowLimitForStep(const SCEV *Step,
1374 // ICmpInst::Predicate *Pred,
1375 // ScalarEvolution *SE);
1379 struct ExtendOpTraits
<SCEVSignExtendExpr
> : public ExtendOpTraitsBase
{
1380 static const SCEV::NoWrapFlags WrapType
= SCEV::FlagNSW
;
1382 static const GetExtendExprTy GetExtendExpr
;
1384 static const SCEV
*getOverflowLimitForStep(const SCEV
*Step
,
1385 ICmpInst::Predicate
*Pred
,
1386 ScalarEvolution
*SE
) {
1387 return getSignedOverflowLimitForStep(Step
, Pred
, SE
);
1391 const ExtendOpTraitsBase::GetExtendExprTy ExtendOpTraits
<
1392 SCEVSignExtendExpr
>::GetExtendExpr
= &ScalarEvolution::getSignExtendExpr
;
1395 struct ExtendOpTraits
<SCEVZeroExtendExpr
> : public ExtendOpTraitsBase
{
1396 static const SCEV::NoWrapFlags WrapType
= SCEV::FlagNUW
;
1398 static const GetExtendExprTy GetExtendExpr
;
1400 static const SCEV
*getOverflowLimitForStep(const SCEV
*Step
,
1401 ICmpInst::Predicate
*Pred
,
1402 ScalarEvolution
*SE
) {
1403 return getUnsignedOverflowLimitForStep(Step
, Pred
, SE
);
1407 const ExtendOpTraitsBase::GetExtendExprTy ExtendOpTraits
<
1408 SCEVZeroExtendExpr
>::GetExtendExpr
= &ScalarEvolution::getZeroExtendExpr
;
1410 } // end anonymous namespace
1412 // The recurrence AR has been shown to have no signed/unsigned wrap or something
1413 // close to it. Typically, if we can prove NSW/NUW for AR, then we can just as
1414 // easily prove NSW/NUW for its preincrement or postincrement sibling. This
1415 // allows normalizing a sign/zero extended AddRec as such: {sext/zext(Step +
1416 // Start),+,Step} => {(Step + sext/zext(Start),+,Step} As a result, the
1417 // expression "Step + sext/zext(PreIncAR)" is congruent with
1418 // "sext/zext(PostIncAR)"
1419 template <typename ExtendOpTy
>
1420 static const SCEV
*getPreStartForExtend(const SCEVAddRecExpr
*AR
, Type
*Ty
,
1421 ScalarEvolution
*SE
, unsigned Depth
) {
1422 auto WrapType
= ExtendOpTraits
<ExtendOpTy
>::WrapType
;
1423 auto GetExtendExpr
= ExtendOpTraits
<ExtendOpTy
>::GetExtendExpr
;
1425 const Loop
*L
= AR
->getLoop();
1426 const SCEV
*Start
= AR
->getStart();
1427 const SCEV
*Step
= AR
->getStepRecurrence(*SE
);
1429 // Check for a simple looking step prior to loop entry.
1430 const SCEVAddExpr
*SA
= dyn_cast
<SCEVAddExpr
>(Start
);
1434 // Create an AddExpr for "PreStart" after subtracting Step. Full SCEV
1435 // subtraction is expensive. For this purpose, perform a quick and dirty
1436 // difference, by checking for Step in the operand list.
1437 SmallVector
<const SCEV
*, 4> DiffOps
;
1438 for (const SCEV
*Op
: SA
->operands())
1440 DiffOps
.push_back(Op
);
1442 if (DiffOps
.size() == SA
->getNumOperands())
1445 // Try to prove `WrapType` (SCEV::FlagNSW or SCEV::FlagNUW) on `PreStart` +
1448 // 1. NSW/NUW flags on the step increment.
1449 auto PreStartFlags
=
1450 ScalarEvolution::maskFlags(SA
->getNoWrapFlags(), SCEV::FlagNUW
);
1451 const SCEV
*PreStart
= SE
->getAddExpr(DiffOps
, PreStartFlags
);
1452 const SCEVAddRecExpr
*PreAR
= dyn_cast
<SCEVAddRecExpr
>(
1453 SE
->getAddRecExpr(PreStart
, Step
, L
, SCEV::FlagAnyWrap
));
1455 // "{S,+,X} is <nsw>/<nuw>" and "the backedge is taken at least once" implies
1456 // "S+X does not sign/unsign-overflow".
1459 const SCEV
*BECount
= SE
->getBackedgeTakenCount(L
);
1460 if (PreAR
&& PreAR
->getNoWrapFlags(WrapType
) &&
1461 !isa
<SCEVCouldNotCompute
>(BECount
) && SE
->isKnownPositive(BECount
))
1464 // 2. Direct overflow check on the step operation's expression.
1465 unsigned BitWidth
= SE
->getTypeSizeInBits(AR
->getType());
1466 Type
*WideTy
= IntegerType::get(SE
->getContext(), BitWidth
* 2);
1467 const SCEV
*OperandExtendedStart
=
1468 SE
->getAddExpr((SE
->*GetExtendExpr
)(PreStart
, WideTy
, Depth
),
1469 (SE
->*GetExtendExpr
)(Step
, WideTy
, Depth
));
1470 if ((SE
->*GetExtendExpr
)(Start
, WideTy
, Depth
) == OperandExtendedStart
) {
1471 if (PreAR
&& AR
->getNoWrapFlags(WrapType
)) {
1472 // If we know `AR` == {`PreStart`+`Step`,+,`Step`} is `WrapType` (FlagNSW
1473 // or FlagNUW) and that `PreStart` + `Step` is `WrapType` too, then
1474 // `PreAR` == {`PreStart`,+,`Step`} is also `WrapType`. Cache this fact.
1475 const_cast<SCEVAddRecExpr
*>(PreAR
)->setNoWrapFlags(WrapType
);
1480 // 3. Loop precondition.
1481 ICmpInst::Predicate Pred
;
1482 const SCEV
*OverflowLimit
=
1483 ExtendOpTraits
<ExtendOpTy
>::getOverflowLimitForStep(Step
, &Pred
, SE
);
1485 if (OverflowLimit
&&
1486 SE
->isLoopEntryGuardedByCond(L
, Pred
, PreStart
, OverflowLimit
))
1492 // Get the normalized zero or sign extended expression for this AddRec's Start.
1493 template <typename ExtendOpTy
>
1494 static const SCEV
*getExtendAddRecStart(const SCEVAddRecExpr
*AR
, Type
*Ty
,
1495 ScalarEvolution
*SE
,
1497 auto GetExtendExpr
= ExtendOpTraits
<ExtendOpTy
>::GetExtendExpr
;
1499 const SCEV
*PreStart
= getPreStartForExtend
<ExtendOpTy
>(AR
, Ty
, SE
, Depth
);
1501 return (SE
->*GetExtendExpr
)(AR
->getStart(), Ty
, Depth
);
1503 return SE
->getAddExpr((SE
->*GetExtendExpr
)(AR
->getStepRecurrence(*SE
), Ty
,
1505 (SE
->*GetExtendExpr
)(PreStart
, Ty
, Depth
));
1508 // Try to prove away overflow by looking at "nearby" add recurrences. A
1509 // motivating example for this rule: if we know `{0,+,4}` is `ult` `-1` and it
1510 // does not itself wrap then we can conclude that `{1,+,4}` is `nuw`.
1514 // {S,+,X} == {S-T,+,X} + T
1515 // => Ext({S,+,X}) == Ext({S-T,+,X} + T)
1517 // If ({S-T,+,X} + T) does not overflow ... (1)
1519 // RHS == Ext({S-T,+,X} + T) == Ext({S-T,+,X}) + Ext(T)
1521 // If {S-T,+,X} does not overflow ... (2)
1523 // RHS == Ext({S-T,+,X}) + Ext(T) == {Ext(S-T),+,Ext(X)} + Ext(T)
1524 // == {Ext(S-T)+Ext(T),+,Ext(X)}
1526 // If (S-T)+T does not overflow ... (3)
1528 // RHS == {Ext(S-T)+Ext(T),+,Ext(X)} == {Ext(S-T+T),+,Ext(X)}
1529 // == {Ext(S),+,Ext(X)} == LHS
1531 // Thus, if (1), (2) and (3) are true for some T, then
1532 // Ext({S,+,X}) == {Ext(S),+,Ext(X)}
1534 // (3) is implied by (1) -- "(S-T)+T does not overflow" is simply "({S-T,+,X}+T)
1535 // does not overflow" restricted to the 0th iteration. Therefore we only need
1536 // to check for (1) and (2).
1538 // In the current context, S is `Start`, X is `Step`, Ext is `ExtendOpTy` and T
1539 // is `Delta` (defined below).
1540 template <typename ExtendOpTy
>
1541 bool ScalarEvolution::proveNoWrapByVaryingStart(const SCEV
*Start
,
1544 auto WrapType
= ExtendOpTraits
<ExtendOpTy
>::WrapType
;
1546 // We restrict `Start` to a constant to prevent SCEV from spending too much
1547 // time here. It is correct (but more expensive) to continue with a
1548 // non-constant `Start` and do a general SCEV subtraction to compute
1549 // `PreStart` below.
1550 const SCEVConstant
*StartC
= dyn_cast
<SCEVConstant
>(Start
);
1554 APInt StartAI
= StartC
->getAPInt();
1556 for (unsigned Delta
: {-2, -1, 1, 2}) {
1557 const SCEV
*PreStart
= getConstant(StartAI
- Delta
);
1559 FoldingSetNodeID ID
;
1560 ID
.AddInteger(scAddRecExpr
);
1561 ID
.AddPointer(PreStart
);
1562 ID
.AddPointer(Step
);
1566 static_cast<SCEVAddRecExpr
*>(UniqueSCEVs
.FindNodeOrInsertPos(ID
, IP
));
1568 // Give up if we don't already have the add recurrence we need because
1569 // actually constructing an add recurrence is relatively expensive.
1570 if (PreAR
&& PreAR
->getNoWrapFlags(WrapType
)) { // proves (2)
1571 const SCEV
*DeltaS
= getConstant(StartC
->getType(), Delta
);
1572 ICmpInst::Predicate Pred
= ICmpInst::BAD_ICMP_PREDICATE
;
1573 const SCEV
*Limit
= ExtendOpTraits
<ExtendOpTy
>::getOverflowLimitForStep(
1574 DeltaS
, &Pred
, this);
1575 if (Limit
&& isKnownPredicate(Pred
, PreAR
, Limit
)) // proves (1)
1583 // Finds an integer D for an expression (C + x + y + ...) such that the top
1584 // level addition in (D + (C - D + x + y + ...)) would not wrap (signed or
1585 // unsigned) and the number of trailing zeros of (C - D + x + y + ...) is
1586 // maximized, where C is the \p ConstantTerm, x, y, ... are arbitrary SCEVs, and
1587 // the (C + x + y + ...) expression is \p WholeAddExpr.
1588 static APInt
extractConstantWithoutWrapping(ScalarEvolution
&SE
,
1589 const SCEVConstant
*ConstantTerm
,
1590 const SCEVAddExpr
*WholeAddExpr
) {
1591 const APInt C
= ConstantTerm
->getAPInt();
1592 const unsigned BitWidth
= C
.getBitWidth();
1593 // Find number of trailing zeros of (x + y + ...) w/o the C first:
1594 uint32_t TZ
= BitWidth
;
1595 for (unsigned I
= 1, E
= WholeAddExpr
->getNumOperands(); I
< E
&& TZ
; ++I
)
1596 TZ
= std::min(TZ
, SE
.GetMinTrailingZeros(WholeAddExpr
->getOperand(I
)));
1598 // Set D to be as many least significant bits of C as possible while still
1599 // guaranteeing that adding D to (C - D + x + y + ...) won't cause a wrap:
1600 return TZ
< BitWidth
? C
.trunc(TZ
).zext(BitWidth
) : C
;
1602 return APInt(BitWidth
, 0);
1605 // Finds an integer D for an affine AddRec expression {C,+,x} such that the top
1606 // level addition in (D + {C-D,+,x}) would not wrap (signed or unsigned) and the
1607 // number of trailing zeros of (C - D + x * n) is maximized, where C is the \p
1608 // ConstantStart, x is an arbitrary \p Step, and n is the loop trip count.
1609 static APInt
extractConstantWithoutWrapping(ScalarEvolution
&SE
,
1610 const APInt
&ConstantStart
,
1612 const unsigned BitWidth
= ConstantStart
.getBitWidth();
1613 const uint32_t TZ
= SE
.GetMinTrailingZeros(Step
);
1615 return TZ
< BitWidth
? ConstantStart
.trunc(TZ
).zext(BitWidth
)
1617 return APInt(BitWidth
, 0);
1621 ScalarEvolution::getZeroExtendExpr(const SCEV
*Op
, Type
*Ty
, unsigned Depth
) {
1622 assert(getTypeSizeInBits(Op
->getType()) < getTypeSizeInBits(Ty
) &&
1623 "This is not an extending conversion!");
1624 assert(isSCEVable(Ty
) &&
1625 "This is not a conversion to a SCEVable type!");
1626 Ty
= getEffectiveSCEVType(Ty
);
1628 // Fold if the operand is constant.
1629 if (const SCEVConstant
*SC
= dyn_cast
<SCEVConstant
>(Op
))
1631 cast
<ConstantInt
>(ConstantExpr::getZExt(SC
->getValue(), Ty
)));
1633 // zext(zext(x)) --> zext(x)
1634 if (const SCEVZeroExtendExpr
*SZ
= dyn_cast
<SCEVZeroExtendExpr
>(Op
))
1635 return getZeroExtendExpr(SZ
->getOperand(), Ty
, Depth
+ 1);
1637 // Before doing any expensive analysis, check to see if we've already
1638 // computed a SCEV for this Op and Ty.
1639 FoldingSetNodeID ID
;
1640 ID
.AddInteger(scZeroExtend
);
1644 if (const SCEV
*S
= UniqueSCEVs
.FindNodeOrInsertPos(ID
, IP
)) return S
;
1645 if (Depth
> MaxCastDepth
) {
1646 SCEV
*S
= new (SCEVAllocator
) SCEVZeroExtendExpr(ID
.Intern(SCEVAllocator
),
1648 UniqueSCEVs
.InsertNode(S
, IP
);
1649 addToLoopUseLists(S
);
1653 // zext(trunc(x)) --> zext(x) or x or trunc(x)
1654 if (const SCEVTruncateExpr
*ST
= dyn_cast
<SCEVTruncateExpr
>(Op
)) {
1655 // It's possible the bits taken off by the truncate were all zero bits. If
1656 // so, we should be able to simplify this further.
1657 const SCEV
*X
= ST
->getOperand();
1658 ConstantRange CR
= getUnsignedRange(X
);
1659 unsigned TruncBits
= getTypeSizeInBits(ST
->getType());
1660 unsigned NewBits
= getTypeSizeInBits(Ty
);
1661 if (CR
.truncate(TruncBits
).zeroExtend(NewBits
).contains(
1662 CR
.zextOrTrunc(NewBits
)))
1663 return getTruncateOrZeroExtend(X
, Ty
, Depth
);
1666 // If the input value is a chrec scev, and we can prove that the value
1667 // did not overflow the old, smaller, value, we can zero extend all of the
1668 // operands (often constants). This allows analysis of something like
1669 // this: for (unsigned char X = 0; X < 100; ++X) { int Y = X; }
1670 if (const SCEVAddRecExpr
*AR
= dyn_cast
<SCEVAddRecExpr
>(Op
))
1671 if (AR
->isAffine()) {
1672 const SCEV
*Start
= AR
->getStart();
1673 const SCEV
*Step
= AR
->getStepRecurrence(*this);
1674 unsigned BitWidth
= getTypeSizeInBits(AR
->getType());
1675 const Loop
*L
= AR
->getLoop();
1677 if (!AR
->hasNoUnsignedWrap()) {
1678 auto NewFlags
= proveNoWrapViaConstantRanges(AR
);
1679 const_cast<SCEVAddRecExpr
*>(AR
)->setNoWrapFlags(NewFlags
);
1682 // If we have special knowledge that this addrec won't overflow,
1683 // we don't need to do any further analysis.
1684 if (AR
->hasNoUnsignedWrap())
1685 return getAddRecExpr(
1686 getExtendAddRecStart
<SCEVZeroExtendExpr
>(AR
, Ty
, this, Depth
+ 1),
1687 getZeroExtendExpr(Step
, Ty
, Depth
+ 1), L
, AR
->getNoWrapFlags());
1689 // Check whether the backedge-taken count is SCEVCouldNotCompute.
1690 // Note that this serves two purposes: It filters out loops that are
1691 // simply not analyzable, and it covers the case where this code is
1692 // being called from within backedge-taken count analysis, such that
1693 // attempting to ask for the backedge-taken count would likely result
1694 // in infinite recursion. In the later case, the analysis code will
1695 // cope with a conservative value, and it will take care to purge
1696 // that value once it has finished.
1697 const SCEV
*MaxBECount
= getMaxBackedgeTakenCount(L
);
1698 if (!isa
<SCEVCouldNotCompute
>(MaxBECount
)) {
1699 // Manually compute the final value for AR, checking for
1702 // Check whether the backedge-taken count can be losslessly casted to
1703 // the addrec's type. The count is always unsigned.
1704 const SCEV
*CastedMaxBECount
=
1705 getTruncateOrZeroExtend(MaxBECount
, Start
->getType(), Depth
);
1706 const SCEV
*RecastedMaxBECount
= getTruncateOrZeroExtend(
1707 CastedMaxBECount
, MaxBECount
->getType(), Depth
);
1708 if (MaxBECount
== RecastedMaxBECount
) {
1709 Type
*WideTy
= IntegerType::get(getContext(), BitWidth
* 2);
1710 // Check whether Start+Step*MaxBECount has no unsigned overflow.
1711 const SCEV
*ZMul
= getMulExpr(CastedMaxBECount
, Step
,
1712 SCEV::FlagAnyWrap
, Depth
+ 1);
1713 const SCEV
*ZAdd
= getZeroExtendExpr(getAddExpr(Start
, ZMul
,
1717 const SCEV
*WideStart
= getZeroExtendExpr(Start
, WideTy
, Depth
+ 1);
1718 const SCEV
*WideMaxBECount
=
1719 getZeroExtendExpr(CastedMaxBECount
, WideTy
, Depth
+ 1);
1720 const SCEV
*OperandExtendedAdd
=
1721 getAddExpr(WideStart
,
1722 getMulExpr(WideMaxBECount
,
1723 getZeroExtendExpr(Step
, WideTy
, Depth
+ 1),
1724 SCEV::FlagAnyWrap
, Depth
+ 1),
1725 SCEV::FlagAnyWrap
, Depth
+ 1);
1726 if (ZAdd
== OperandExtendedAdd
) {
1727 // Cache knowledge of AR NUW, which is propagated to this AddRec.
1728 const_cast<SCEVAddRecExpr
*>(AR
)->setNoWrapFlags(SCEV::FlagNUW
);
1729 // Return the expression with the addrec on the outside.
1730 return getAddRecExpr(
1731 getExtendAddRecStart
<SCEVZeroExtendExpr
>(AR
, Ty
, this,
1733 getZeroExtendExpr(Step
, Ty
, Depth
+ 1), L
,
1734 AR
->getNoWrapFlags());
1736 // Similar to above, only this time treat the step value as signed.
1737 // This covers loops that count down.
1738 OperandExtendedAdd
=
1739 getAddExpr(WideStart
,
1740 getMulExpr(WideMaxBECount
,
1741 getSignExtendExpr(Step
, WideTy
, Depth
+ 1),
1742 SCEV::FlagAnyWrap
, Depth
+ 1),
1743 SCEV::FlagAnyWrap
, Depth
+ 1);
1744 if (ZAdd
== OperandExtendedAdd
) {
1745 // Cache knowledge of AR NW, which is propagated to this AddRec.
1746 // Negative step causes unsigned wrap, but it still can't self-wrap.
1747 const_cast<SCEVAddRecExpr
*>(AR
)->setNoWrapFlags(SCEV::FlagNW
);
1748 // Return the expression with the addrec on the outside.
1749 return getAddRecExpr(
1750 getExtendAddRecStart
<SCEVZeroExtendExpr
>(AR
, Ty
, this,
1752 getSignExtendExpr(Step
, Ty
, Depth
+ 1), L
,
1753 AR
->getNoWrapFlags());
1758 // Normally, in the cases we can prove no-overflow via a
1759 // backedge guarding condition, we can also compute a backedge
1760 // taken count for the loop. The exceptions are assumptions and
1761 // guards present in the loop -- SCEV is not great at exploiting
1762 // these to compute max backedge taken counts, but can still use
1763 // these to prove lack of overflow. Use this fact to avoid
1764 // doing extra work that may not pay off.
1765 if (!isa
<SCEVCouldNotCompute
>(MaxBECount
) || HasGuards
||
1766 !AC
.assumptions().empty()) {
1767 // If the backedge is guarded by a comparison with the pre-inc
1768 // value the addrec is safe. Also, if the entry is guarded by
1769 // a comparison with the start value and the backedge is
1770 // guarded by a comparison with the post-inc value, the addrec
1772 if (isKnownPositive(Step
)) {
1773 const SCEV
*N
= getConstant(APInt::getMinValue(BitWidth
) -
1774 getUnsignedRangeMax(Step
));
1775 if (isLoopBackedgeGuardedByCond(L
, ICmpInst::ICMP_ULT
, AR
, N
) ||
1776 isKnownOnEveryIteration(ICmpInst::ICMP_ULT
, AR
, N
)) {
1777 // Cache knowledge of AR NUW, which is propagated to this
1779 const_cast<SCEVAddRecExpr
*>(AR
)->setNoWrapFlags(SCEV::FlagNUW
);
1780 // Return the expression with the addrec on the outside.
1781 return getAddRecExpr(
1782 getExtendAddRecStart
<SCEVZeroExtendExpr
>(AR
, Ty
, this,
1784 getZeroExtendExpr(Step
, Ty
, Depth
+ 1), L
,
1785 AR
->getNoWrapFlags());
1787 } else if (isKnownNegative(Step
)) {
1788 const SCEV
*N
= getConstant(APInt::getMaxValue(BitWidth
) -
1789 getSignedRangeMin(Step
));
1790 if (isLoopBackedgeGuardedByCond(L
, ICmpInst::ICMP_UGT
, AR
, N
) ||
1791 isKnownOnEveryIteration(ICmpInst::ICMP_UGT
, AR
, N
)) {
1792 // Cache knowledge of AR NW, which is propagated to this
1793 // AddRec. Negative step causes unsigned wrap, but it
1794 // still can't self-wrap.
1795 const_cast<SCEVAddRecExpr
*>(AR
)->setNoWrapFlags(SCEV::FlagNW
);
1796 // Return the expression with the addrec on the outside.
1797 return getAddRecExpr(
1798 getExtendAddRecStart
<SCEVZeroExtendExpr
>(AR
, Ty
, this,
1800 getSignExtendExpr(Step
, Ty
, Depth
+ 1), L
,
1801 AR
->getNoWrapFlags());
1806 // zext({C,+,Step}) --> (zext(D) + zext({C-D,+,Step}))<nuw><nsw>
1807 // if D + (C - D + Step * n) could be proven to not unsigned wrap
1808 // where D maximizes the number of trailing zeros of (C - D + Step * n)
1809 if (const auto *SC
= dyn_cast
<SCEVConstant
>(Start
)) {
1810 const APInt
&C
= SC
->getAPInt();
1811 const APInt
&D
= extractConstantWithoutWrapping(*this, C
, Step
);
1813 const SCEV
*SZExtD
= getZeroExtendExpr(getConstant(D
), Ty
, Depth
);
1814 const SCEV
*SResidual
=
1815 getAddRecExpr(getConstant(C
- D
), Step
, L
, AR
->getNoWrapFlags());
1816 const SCEV
*SZExtR
= getZeroExtendExpr(SResidual
, Ty
, Depth
+ 1);
1817 return getAddExpr(SZExtD
, SZExtR
,
1818 (SCEV::NoWrapFlags
)(SCEV::FlagNSW
| SCEV::FlagNUW
),
1823 if (proveNoWrapByVaryingStart
<SCEVZeroExtendExpr
>(Start
, Step
, L
)) {
1824 const_cast<SCEVAddRecExpr
*>(AR
)->setNoWrapFlags(SCEV::FlagNUW
);
1825 return getAddRecExpr(
1826 getExtendAddRecStart
<SCEVZeroExtendExpr
>(AR
, Ty
, this, Depth
+ 1),
1827 getZeroExtendExpr(Step
, Ty
, Depth
+ 1), L
, AR
->getNoWrapFlags());
1831 // zext(A % B) --> zext(A) % zext(B)
1835 if (matchURem(Op
, LHS
, RHS
))
1836 return getURemExpr(getZeroExtendExpr(LHS
, Ty
, Depth
+ 1),
1837 getZeroExtendExpr(RHS
, Ty
, Depth
+ 1));
1840 // zext(A / B) --> zext(A) / zext(B).
1841 if (auto *Div
= dyn_cast
<SCEVUDivExpr
>(Op
))
1842 return getUDivExpr(getZeroExtendExpr(Div
->getLHS(), Ty
, Depth
+ 1),
1843 getZeroExtendExpr(Div
->getRHS(), Ty
, Depth
+ 1));
1845 if (auto *SA
= dyn_cast
<SCEVAddExpr
>(Op
)) {
1846 // zext((A + B + ...)<nuw>) --> (zext(A) + zext(B) + ...)<nuw>
1847 if (SA
->hasNoUnsignedWrap()) {
1848 // If the addition does not unsign overflow then we can, by definition,
1849 // commute the zero extension with the addition operation.
1850 SmallVector
<const SCEV
*, 4> Ops
;
1851 for (const auto *Op
: SA
->operands())
1852 Ops
.push_back(getZeroExtendExpr(Op
, Ty
, Depth
+ 1));
1853 return getAddExpr(Ops
, SCEV::FlagNUW
, Depth
+ 1);
1856 // zext(C + x + y + ...) --> (zext(D) + zext((C - D) + x + y + ...))
1857 // if D + (C - D + x + y + ...) could be proven to not unsigned wrap
1858 // where D maximizes the number of trailing zeros of (C - D + x + y + ...)
1860 // Often address arithmetics contain expressions like
1861 // (zext (add (shl X, C1), C2)), for instance, (zext (5 + (4 * X))).
1862 // This transformation is useful while proving that such expressions are
1863 // equal or differ by a small constant amount, see LoadStoreVectorizer pass.
1864 if (const auto *SC
= dyn_cast
<SCEVConstant
>(SA
->getOperand(0))) {
1865 const APInt
&D
= extractConstantWithoutWrapping(*this, SC
, SA
);
1867 const SCEV
*SZExtD
= getZeroExtendExpr(getConstant(D
), Ty
, Depth
);
1868 const SCEV
*SResidual
=
1869 getAddExpr(getConstant(-D
), SA
, SCEV::FlagAnyWrap
, Depth
);
1870 const SCEV
*SZExtR
= getZeroExtendExpr(SResidual
, Ty
, Depth
+ 1);
1871 return getAddExpr(SZExtD
, SZExtR
,
1872 (SCEV::NoWrapFlags
)(SCEV::FlagNSW
| SCEV::FlagNUW
),
1878 if (auto *SM
= dyn_cast
<SCEVMulExpr
>(Op
)) {
1879 // zext((A * B * ...)<nuw>) --> (zext(A) * zext(B) * ...)<nuw>
1880 if (SM
->hasNoUnsignedWrap()) {
1881 // If the multiply does not unsign overflow then we can, by definition,
1882 // commute the zero extension with the multiply operation.
1883 SmallVector
<const SCEV
*, 4> Ops
;
1884 for (const auto *Op
: SM
->operands())
1885 Ops
.push_back(getZeroExtendExpr(Op
, Ty
, Depth
+ 1));
1886 return getMulExpr(Ops
, SCEV::FlagNUW
, Depth
+ 1);
1889 // zext(2^K * (trunc X to iN)) to iM ->
1890 // 2^K * (zext(trunc X to i{N-K}) to iM)<nuw>
1894 // zext(2^K * (trunc X to iN)) to iM
1895 // = zext((trunc X to iN) << K) to iM
1896 // = zext((trunc X to i{N-K}) << K)<nuw> to iM
1897 // (because shl removes the top K bits)
1898 // = zext((2^K * (trunc X to i{N-K}))<nuw>) to iM
1899 // = (2^K * (zext(trunc X to i{N-K}) to iM))<nuw>.
1901 if (SM
->getNumOperands() == 2)
1902 if (auto *MulLHS
= dyn_cast
<SCEVConstant
>(SM
->getOperand(0)))
1903 if (MulLHS
->getAPInt().isPowerOf2())
1904 if (auto *TruncRHS
= dyn_cast
<SCEVTruncateExpr
>(SM
->getOperand(1))) {
1905 int NewTruncBits
= getTypeSizeInBits(TruncRHS
->getType()) -
1906 MulLHS
->getAPInt().logBase2();
1907 Type
*NewTruncTy
= IntegerType::get(getContext(), NewTruncBits
);
1909 getZeroExtendExpr(MulLHS
, Ty
),
1911 getTruncateExpr(TruncRHS
->getOperand(), NewTruncTy
), Ty
),
1912 SCEV::FlagNUW
, Depth
+ 1);
1916 // The cast wasn't folded; create an explicit cast node.
1917 // Recompute the insert position, as it may have been invalidated.
1918 if (const SCEV
*S
= UniqueSCEVs
.FindNodeOrInsertPos(ID
, IP
)) return S
;
1919 SCEV
*S
= new (SCEVAllocator
) SCEVZeroExtendExpr(ID
.Intern(SCEVAllocator
),
1921 UniqueSCEVs
.InsertNode(S
, IP
);
1922 addToLoopUseLists(S
);
1927 ScalarEvolution::getSignExtendExpr(const SCEV
*Op
, Type
*Ty
, unsigned Depth
) {
1928 assert(getTypeSizeInBits(Op
->getType()) < getTypeSizeInBits(Ty
) &&
1929 "This is not an extending conversion!");
1930 assert(isSCEVable(Ty
) &&
1931 "This is not a conversion to a SCEVable type!");
1932 Ty
= getEffectiveSCEVType(Ty
);
1934 // Fold if the operand is constant.
1935 if (const SCEVConstant
*SC
= dyn_cast
<SCEVConstant
>(Op
))
1937 cast
<ConstantInt
>(ConstantExpr::getSExt(SC
->getValue(), Ty
)));
1939 // sext(sext(x)) --> sext(x)
1940 if (const SCEVSignExtendExpr
*SS
= dyn_cast
<SCEVSignExtendExpr
>(Op
))
1941 return getSignExtendExpr(SS
->getOperand(), Ty
, Depth
+ 1);
1943 // sext(zext(x)) --> zext(x)
1944 if (const SCEVZeroExtendExpr
*SZ
= dyn_cast
<SCEVZeroExtendExpr
>(Op
))
1945 return getZeroExtendExpr(SZ
->getOperand(), Ty
, Depth
+ 1);
1947 // Before doing any expensive analysis, check to see if we've already
1948 // computed a SCEV for this Op and Ty.
1949 FoldingSetNodeID ID
;
1950 ID
.AddInteger(scSignExtend
);
1954 if (const SCEV
*S
= UniqueSCEVs
.FindNodeOrInsertPos(ID
, IP
)) return S
;
1955 // Limit recursion depth.
1956 if (Depth
> MaxCastDepth
) {
1957 SCEV
*S
= new (SCEVAllocator
) SCEVSignExtendExpr(ID
.Intern(SCEVAllocator
),
1959 UniqueSCEVs
.InsertNode(S
, IP
);
1960 addToLoopUseLists(S
);
1964 // sext(trunc(x)) --> sext(x) or x or trunc(x)
1965 if (const SCEVTruncateExpr
*ST
= dyn_cast
<SCEVTruncateExpr
>(Op
)) {
1966 // It's possible the bits taken off by the truncate were all sign bits. If
1967 // so, we should be able to simplify this further.
1968 const SCEV
*X
= ST
->getOperand();
1969 ConstantRange CR
= getSignedRange(X
);
1970 unsigned TruncBits
= getTypeSizeInBits(ST
->getType());
1971 unsigned NewBits
= getTypeSizeInBits(Ty
);
1972 if (CR
.truncate(TruncBits
).signExtend(NewBits
).contains(
1973 CR
.sextOrTrunc(NewBits
)))
1974 return getTruncateOrSignExtend(X
, Ty
, Depth
);
1977 if (auto *SA
= dyn_cast
<SCEVAddExpr
>(Op
)) {
1978 // sext((A + B + ...)<nsw>) --> (sext(A) + sext(B) + ...)<nsw>
1979 if (SA
->hasNoSignedWrap()) {
1980 // If the addition does not sign overflow then we can, by definition,
1981 // commute the sign extension with the addition operation.
1982 SmallVector
<const SCEV
*, 4> Ops
;
1983 for (const auto *Op
: SA
->operands())
1984 Ops
.push_back(getSignExtendExpr(Op
, Ty
, Depth
+ 1));
1985 return getAddExpr(Ops
, SCEV::FlagNSW
, Depth
+ 1);
1988 // sext(C + x + y + ...) --> (sext(D) + sext((C - D) + x + y + ...))
1989 // if D + (C - D + x + y + ...) could be proven to not signed wrap
1990 // where D maximizes the number of trailing zeros of (C - D + x + y + ...)
1992 // For instance, this will bring two seemingly different expressions:
1993 // 1 + sext(5 + 20 * %x + 24 * %y) and
1994 // sext(6 + 20 * %x + 24 * %y)
1995 // to the same form:
1996 // 2 + sext(4 + 20 * %x + 24 * %y)
1997 if (const auto *SC
= dyn_cast
<SCEVConstant
>(SA
->getOperand(0))) {
1998 const APInt
&D
= extractConstantWithoutWrapping(*this, SC
, SA
);
2000 const SCEV
*SSExtD
= getSignExtendExpr(getConstant(D
), Ty
, Depth
);
2001 const SCEV
*SResidual
=
2002 getAddExpr(getConstant(-D
), SA
, SCEV::FlagAnyWrap
, Depth
);
2003 const SCEV
*SSExtR
= getSignExtendExpr(SResidual
, Ty
, Depth
+ 1);
2004 return getAddExpr(SSExtD
, SSExtR
,
2005 (SCEV::NoWrapFlags
)(SCEV::FlagNSW
| SCEV::FlagNUW
),
2010 // If the input value is a chrec scev, and we can prove that the value
2011 // did not overflow the old, smaller, value, we can sign extend all of the
2012 // operands (often constants). This allows analysis of something like
2013 // this: for (signed char X = 0; X < 100; ++X) { int Y = X; }
2014 if (const SCEVAddRecExpr
*AR
= dyn_cast
<SCEVAddRecExpr
>(Op
))
2015 if (AR
->isAffine()) {
2016 const SCEV
*Start
= AR
->getStart();
2017 const SCEV
*Step
= AR
->getStepRecurrence(*this);
2018 unsigned BitWidth
= getTypeSizeInBits(AR
->getType());
2019 const Loop
*L
= AR
->getLoop();
2021 if (!AR
->hasNoSignedWrap()) {
2022 auto NewFlags
= proveNoWrapViaConstantRanges(AR
);
2023 const_cast<SCEVAddRecExpr
*>(AR
)->setNoWrapFlags(NewFlags
);
2026 // If we have special knowledge that this addrec won't overflow,
2027 // we don't need to do any further analysis.
2028 if (AR
->hasNoSignedWrap())
2029 return getAddRecExpr(
2030 getExtendAddRecStart
<SCEVSignExtendExpr
>(AR
, Ty
, this, Depth
+ 1),
2031 getSignExtendExpr(Step
, Ty
, Depth
+ 1), L
, SCEV::FlagNSW
);
2033 // Check whether the backedge-taken count is SCEVCouldNotCompute.
2034 // Note that this serves two purposes: It filters out loops that are
2035 // simply not analyzable, and it covers the case where this code is
2036 // being called from within backedge-taken count analysis, such that
2037 // attempting to ask for the backedge-taken count would likely result
2038 // in infinite recursion. In the later case, the analysis code will
2039 // cope with a conservative value, and it will take care to purge
2040 // that value once it has finished.
2041 const SCEV
*MaxBECount
= getMaxBackedgeTakenCount(L
);
2042 if (!isa
<SCEVCouldNotCompute
>(MaxBECount
)) {
2043 // Manually compute the final value for AR, checking for
2046 // Check whether the backedge-taken count can be losslessly casted to
2047 // the addrec's type. The count is always unsigned.
2048 const SCEV
*CastedMaxBECount
=
2049 getTruncateOrZeroExtend(MaxBECount
, Start
->getType(), Depth
);
2050 const SCEV
*RecastedMaxBECount
= getTruncateOrZeroExtend(
2051 CastedMaxBECount
, MaxBECount
->getType(), Depth
);
2052 if (MaxBECount
== RecastedMaxBECount
) {
2053 Type
*WideTy
= IntegerType::get(getContext(), BitWidth
* 2);
2054 // Check whether Start+Step*MaxBECount has no signed overflow.
2055 const SCEV
*SMul
= getMulExpr(CastedMaxBECount
, Step
,
2056 SCEV::FlagAnyWrap
, Depth
+ 1);
2057 const SCEV
*SAdd
= getSignExtendExpr(getAddExpr(Start
, SMul
,
2061 const SCEV
*WideStart
= getSignExtendExpr(Start
, WideTy
, Depth
+ 1);
2062 const SCEV
*WideMaxBECount
=
2063 getZeroExtendExpr(CastedMaxBECount
, WideTy
, Depth
+ 1);
2064 const SCEV
*OperandExtendedAdd
=
2065 getAddExpr(WideStart
,
2066 getMulExpr(WideMaxBECount
,
2067 getSignExtendExpr(Step
, WideTy
, Depth
+ 1),
2068 SCEV::FlagAnyWrap
, Depth
+ 1),
2069 SCEV::FlagAnyWrap
, Depth
+ 1);
2070 if (SAdd
== OperandExtendedAdd
) {
2071 // Cache knowledge of AR NSW, which is propagated to this AddRec.
2072 const_cast<SCEVAddRecExpr
*>(AR
)->setNoWrapFlags(SCEV::FlagNSW
);
2073 // Return the expression with the addrec on the outside.
2074 return getAddRecExpr(
2075 getExtendAddRecStart
<SCEVSignExtendExpr
>(AR
, Ty
, this,
2077 getSignExtendExpr(Step
, Ty
, Depth
+ 1), L
,
2078 AR
->getNoWrapFlags());
2080 // Similar to above, only this time treat the step value as unsigned.
2081 // This covers loops that count up with an unsigned step.
2082 OperandExtendedAdd
=
2083 getAddExpr(WideStart
,
2084 getMulExpr(WideMaxBECount
,
2085 getZeroExtendExpr(Step
, WideTy
, Depth
+ 1),
2086 SCEV::FlagAnyWrap
, Depth
+ 1),
2087 SCEV::FlagAnyWrap
, Depth
+ 1);
2088 if (SAdd
== OperandExtendedAdd
) {
2089 // If AR wraps around then
2091 // abs(Step) * MaxBECount > unsigned-max(AR->getType())
2092 // => SAdd != OperandExtendedAdd
2094 // Thus (AR is not NW => SAdd != OperandExtendedAdd) <=>
2095 // (SAdd == OperandExtendedAdd => AR is NW)
2097 const_cast<SCEVAddRecExpr
*>(AR
)->setNoWrapFlags(SCEV::FlagNW
);
2099 // Return the expression with the addrec on the outside.
2100 return getAddRecExpr(
2101 getExtendAddRecStart
<SCEVSignExtendExpr
>(AR
, Ty
, this,
2103 getZeroExtendExpr(Step
, Ty
, Depth
+ 1), L
,
2104 AR
->getNoWrapFlags());
2109 // Normally, in the cases we can prove no-overflow via a
2110 // backedge guarding condition, we can also compute a backedge
2111 // taken count for the loop. The exceptions are assumptions and
2112 // guards present in the loop -- SCEV is not great at exploiting
2113 // these to compute max backedge taken counts, but can still use
2114 // these to prove lack of overflow. Use this fact to avoid
2115 // doing extra work that may not pay off.
2117 if (!isa
<SCEVCouldNotCompute
>(MaxBECount
) || HasGuards
||
2118 !AC
.assumptions().empty()) {
2119 // If the backedge is guarded by a comparison with the pre-inc
2120 // value the addrec is safe. Also, if the entry is guarded by
2121 // a comparison with the start value and the backedge is
2122 // guarded by a comparison with the post-inc value, the addrec
2124 ICmpInst::Predicate Pred
;
2125 const SCEV
*OverflowLimit
=
2126 getSignedOverflowLimitForStep(Step
, &Pred
, this);
2127 if (OverflowLimit
&&
2128 (isLoopBackedgeGuardedByCond(L
, Pred
, AR
, OverflowLimit
) ||
2129 isKnownOnEveryIteration(Pred
, AR
, OverflowLimit
))) {
2130 // Cache knowledge of AR NSW, then propagate NSW to the wide AddRec.
2131 const_cast<SCEVAddRecExpr
*>(AR
)->setNoWrapFlags(SCEV::FlagNSW
);
2132 return getAddRecExpr(
2133 getExtendAddRecStart
<SCEVSignExtendExpr
>(AR
, Ty
, this, Depth
+ 1),
2134 getSignExtendExpr(Step
, Ty
, Depth
+ 1), L
, AR
->getNoWrapFlags());
2138 // sext({C,+,Step}) --> (sext(D) + sext({C-D,+,Step}))<nuw><nsw>
2139 // if D + (C - D + Step * n) could be proven to not signed wrap
2140 // where D maximizes the number of trailing zeros of (C - D + Step * n)
2141 if (const auto *SC
= dyn_cast
<SCEVConstant
>(Start
)) {
2142 const APInt
&C
= SC
->getAPInt();
2143 const APInt
&D
= extractConstantWithoutWrapping(*this, C
, Step
);
2145 const SCEV
*SSExtD
= getSignExtendExpr(getConstant(D
), Ty
, Depth
);
2146 const SCEV
*SResidual
=
2147 getAddRecExpr(getConstant(C
- D
), Step
, L
, AR
->getNoWrapFlags());
2148 const SCEV
*SSExtR
= getSignExtendExpr(SResidual
, Ty
, Depth
+ 1);
2149 return getAddExpr(SSExtD
, SSExtR
,
2150 (SCEV::NoWrapFlags
)(SCEV::FlagNSW
| SCEV::FlagNUW
),
2155 if (proveNoWrapByVaryingStart
<SCEVSignExtendExpr
>(Start
, Step
, L
)) {
2156 const_cast<SCEVAddRecExpr
*>(AR
)->setNoWrapFlags(SCEV::FlagNSW
);
2157 return getAddRecExpr(
2158 getExtendAddRecStart
<SCEVSignExtendExpr
>(AR
, Ty
, this, Depth
+ 1),
2159 getSignExtendExpr(Step
, Ty
, Depth
+ 1), L
, AR
->getNoWrapFlags());
2163 // If the input value is provably positive and we could not simplify
2164 // away the sext build a zext instead.
2165 if (isKnownNonNegative(Op
))
2166 return getZeroExtendExpr(Op
, Ty
, Depth
+ 1);
2168 // The cast wasn't folded; create an explicit cast node.
2169 // Recompute the insert position, as it may have been invalidated.
2170 if (const SCEV
*S
= UniqueSCEVs
.FindNodeOrInsertPos(ID
, IP
)) return S
;
2171 SCEV
*S
= new (SCEVAllocator
) SCEVSignExtendExpr(ID
.Intern(SCEVAllocator
),
2173 UniqueSCEVs
.InsertNode(S
, IP
);
2174 addToLoopUseLists(S
);
2178 /// getAnyExtendExpr - Return a SCEV for the given operand extended with
2179 /// unspecified bits out to the given type.
2180 const SCEV
*ScalarEvolution::getAnyExtendExpr(const SCEV
*Op
,
2182 assert(getTypeSizeInBits(Op
->getType()) < getTypeSizeInBits(Ty
) &&
2183 "This is not an extending conversion!");
2184 assert(isSCEVable(Ty
) &&
2185 "This is not a conversion to a SCEVable type!");
2186 Ty
= getEffectiveSCEVType(Ty
);
2188 // Sign-extend negative constants.
2189 if (const SCEVConstant
*SC
= dyn_cast
<SCEVConstant
>(Op
))
2190 if (SC
->getAPInt().isNegative())
2191 return getSignExtendExpr(Op
, Ty
);
2193 // Peel off a truncate cast.
2194 if (const SCEVTruncateExpr
*T
= dyn_cast
<SCEVTruncateExpr
>(Op
)) {
2195 const SCEV
*NewOp
= T
->getOperand();
2196 if (getTypeSizeInBits(NewOp
->getType()) < getTypeSizeInBits(Ty
))
2197 return getAnyExtendExpr(NewOp
, Ty
);
2198 return getTruncateOrNoop(NewOp
, Ty
);
2201 // Next try a zext cast. If the cast is folded, use it.
2202 const SCEV
*ZExt
= getZeroExtendExpr(Op
, Ty
);
2203 if (!isa
<SCEVZeroExtendExpr
>(ZExt
))
2206 // Next try a sext cast. If the cast is folded, use it.
2207 const SCEV
*SExt
= getSignExtendExpr(Op
, Ty
);
2208 if (!isa
<SCEVSignExtendExpr
>(SExt
))
2211 // Force the cast to be folded into the operands of an addrec.
2212 if (const SCEVAddRecExpr
*AR
= dyn_cast
<SCEVAddRecExpr
>(Op
)) {
2213 SmallVector
<const SCEV
*, 4> Ops
;
2214 for (const SCEV
*Op
: AR
->operands())
2215 Ops
.push_back(getAnyExtendExpr(Op
, Ty
));
2216 return getAddRecExpr(Ops
, AR
->getLoop(), SCEV::FlagNW
);
2219 // If the expression is obviously signed, use the sext cast value.
2220 if (isa
<SCEVSMaxExpr
>(Op
))
2223 // Absent any other information, use the zext cast value.
2227 /// Process the given Ops list, which is a list of operands to be added under
2228 /// the given scale, update the given map. This is a helper function for
2229 /// getAddRecExpr. As an example of what it does, given a sequence of operands
2230 /// that would form an add expression like this:
2232 /// m + n + 13 + (A * (o + p + (B * (q + m + 29)))) + r + (-1 * r)
2234 /// where A and B are constants, update the map with these values:
2236 /// (m, 1+A*B), (n, 1), (o, A), (p, A), (q, A*B), (r, 0)
2238 /// and add 13 + A*B*29 to AccumulatedConstant.
2239 /// This will allow getAddRecExpr to produce this:
2241 /// 13+A*B*29 + n + (m * (1+A*B)) + ((o + p) * A) + (q * A*B)
2243 /// This form often exposes folding opportunities that are hidden in
2244 /// the original operand list.
2246 /// Return true iff it appears that any interesting folding opportunities
2247 /// may be exposed. This helps getAddRecExpr short-circuit extra work in
2248 /// the common case where no interesting opportunities are present, and
2249 /// is also used as a check to avoid infinite recursion.
2251 CollectAddOperandsWithScales(DenseMap
<const SCEV
*, APInt
> &M
,
2252 SmallVectorImpl
<const SCEV
*> &NewOps
,
2253 APInt
&AccumulatedConstant
,
2254 const SCEV
*const *Ops
, size_t NumOperands
,
2256 ScalarEvolution
&SE
) {
2257 bool Interesting
= false;
2259 // Iterate over the add operands. They are sorted, with constants first.
2261 while (const SCEVConstant
*C
= dyn_cast
<SCEVConstant
>(Ops
[i
])) {
2263 // Pull a buried constant out to the outside.
2264 if (Scale
!= 1 || AccumulatedConstant
!= 0 || C
->getValue()->isZero())
2266 AccumulatedConstant
+= Scale
* C
->getAPInt();
2269 // Next comes everything else. We're especially interested in multiplies
2270 // here, but they're in the middle, so just visit the rest with one loop.
2271 for (; i
!= NumOperands
; ++i
) {
2272 const SCEVMulExpr
*Mul
= dyn_cast
<SCEVMulExpr
>(Ops
[i
]);
2273 if (Mul
&& isa
<SCEVConstant
>(Mul
->getOperand(0))) {
2275 Scale
* cast
<SCEVConstant
>(Mul
->getOperand(0))->getAPInt();
2276 if (Mul
->getNumOperands() == 2 && isa
<SCEVAddExpr
>(Mul
->getOperand(1))) {
2277 // A multiplication of a constant with another add; recurse.
2278 const SCEVAddExpr
*Add
= cast
<SCEVAddExpr
>(Mul
->getOperand(1));
2280 CollectAddOperandsWithScales(M
, NewOps
, AccumulatedConstant
,
2281 Add
->op_begin(), Add
->getNumOperands(),
2284 // A multiplication of a constant with some other value. Update
2286 SmallVector
<const SCEV
*, 4> MulOps(Mul
->op_begin()+1, Mul
->op_end());
2287 const SCEV
*Key
= SE
.getMulExpr(MulOps
);
2288 auto Pair
= M
.insert({Key
, NewScale
});
2290 NewOps
.push_back(Pair
.first
->first
);
2292 Pair
.first
->second
+= NewScale
;
2293 // The map already had an entry for this value, which may indicate
2294 // a folding opportunity.
2299 // An ordinary operand. Update the map.
2300 std::pair
<DenseMap
<const SCEV
*, APInt
>::iterator
, bool> Pair
=
2301 M
.insert({Ops
[i
], Scale
});
2303 NewOps
.push_back(Pair
.first
->first
);
2305 Pair
.first
->second
+= Scale
;
2306 // The map already had an entry for this value, which may indicate
2307 // a folding opportunity.
2316 // We're trying to construct a SCEV of type `Type' with `Ops' as operands and
2317 // `OldFlags' as can't-wrap behavior. Infer a more aggressive set of
2318 // can't-overflow flags for the operation if possible.
2319 static SCEV::NoWrapFlags
2320 StrengthenNoWrapFlags(ScalarEvolution
*SE
, SCEVTypes Type
,
2321 const ArrayRef
<const SCEV
*> Ops
,
2322 SCEV::NoWrapFlags Flags
) {
2323 using namespace std::placeholders
;
2325 using OBO
= OverflowingBinaryOperator
;
2328 Type
== scAddExpr
|| Type
== scAddRecExpr
|| Type
== scMulExpr
;
2330 assert(CanAnalyze
&& "don't call from other places!");
2332 int SignOrUnsignMask
= SCEV::FlagNUW
| SCEV::FlagNSW
;
2333 SCEV::NoWrapFlags SignOrUnsignWrap
=
2334 ScalarEvolution::maskFlags(Flags
, SignOrUnsignMask
);
2336 // If FlagNSW is true and all the operands are non-negative, infer FlagNUW.
2337 auto IsKnownNonNegative
= [&](const SCEV
*S
) {
2338 return SE
->isKnownNonNegative(S
);
2341 if (SignOrUnsignWrap
== SCEV::FlagNSW
&& all_of(Ops
, IsKnownNonNegative
))
2343 ScalarEvolution::setFlags(Flags
, (SCEV::NoWrapFlags
)SignOrUnsignMask
);
2345 SignOrUnsignWrap
= ScalarEvolution::maskFlags(Flags
, SignOrUnsignMask
);
2347 if (SignOrUnsignWrap
!= SignOrUnsignMask
&&
2348 (Type
== scAddExpr
|| Type
== scMulExpr
) && Ops
.size() == 2 &&
2349 isa
<SCEVConstant
>(Ops
[0])) {
2354 return Instruction::Add
;
2356 return Instruction::Mul
;
2358 llvm_unreachable("Unexpected SCEV op.");
2362 const APInt
&C
= cast
<SCEVConstant
>(Ops
[0])->getAPInt();
2364 // (A <opcode> C) --> (A <opcode> C)<nsw> if the op doesn't sign overflow.
2365 if (!(SignOrUnsignWrap
& SCEV::FlagNSW
)) {
2366 auto NSWRegion
= ConstantRange::makeGuaranteedNoWrapRegion(
2367 Opcode
, C
, OBO::NoSignedWrap
);
2368 if (NSWRegion
.contains(SE
->getSignedRange(Ops
[1])))
2369 Flags
= ScalarEvolution::setFlags(Flags
, SCEV::FlagNSW
);
2372 // (A <opcode> C) --> (A <opcode> C)<nuw> if the op doesn't unsign overflow.
2373 if (!(SignOrUnsignWrap
& SCEV::FlagNUW
)) {
2374 auto NUWRegion
= ConstantRange::makeGuaranteedNoWrapRegion(
2375 Opcode
, C
, OBO::NoUnsignedWrap
);
2376 if (NUWRegion
.contains(SE
->getUnsignedRange(Ops
[1])))
2377 Flags
= ScalarEvolution::setFlags(Flags
, SCEV::FlagNUW
);
2384 bool ScalarEvolution::isAvailableAtLoopEntry(const SCEV
*S
, const Loop
*L
) {
2385 return isLoopInvariant(S
, L
) && properlyDominates(S
, L
->getHeader());
2388 /// Get a canonical add expression, or something simpler if possible.
2389 const SCEV
*ScalarEvolution::getAddExpr(SmallVectorImpl
<const SCEV
*> &Ops
,
2390 SCEV::NoWrapFlags Flags
,
2392 assert(!(Flags
& ~(SCEV::FlagNUW
| SCEV::FlagNSW
)) &&
2393 "only nuw or nsw allowed");
2394 assert(!Ops
.empty() && "Cannot get empty add!");
2395 if (Ops
.size() == 1) return Ops
[0];
2397 Type
*ETy
= getEffectiveSCEVType(Ops
[0]->getType());
2398 for (unsigned i
= 1, e
= Ops
.size(); i
!= e
; ++i
)
2399 assert(getEffectiveSCEVType(Ops
[i
]->getType()) == ETy
&&
2400 "SCEVAddExpr operand types don't match!");
2403 // Sort by complexity, this groups all similar expression types together.
2404 GroupByComplexity(Ops
, &LI
, DT
);
2406 Flags
= StrengthenNoWrapFlags(this, scAddExpr
, Ops
, Flags
);
2408 // If there are any constants, fold them together.
2410 if (const SCEVConstant
*LHSC
= dyn_cast
<SCEVConstant
>(Ops
[0])) {
2412 assert(Idx
< Ops
.size());
2413 while (const SCEVConstant
*RHSC
= dyn_cast
<SCEVConstant
>(Ops
[Idx
])) {
2414 // We found two constants, fold them together!
2415 Ops
[0] = getConstant(LHSC
->getAPInt() + RHSC
->getAPInt());
2416 if (Ops
.size() == 2) return Ops
[0];
2417 Ops
.erase(Ops
.begin()+1); // Erase the folded element
2418 LHSC
= cast
<SCEVConstant
>(Ops
[0]);
2421 // If we are left with a constant zero being added, strip it off.
2422 if (LHSC
->getValue()->isZero()) {
2423 Ops
.erase(Ops
.begin());
2427 if (Ops
.size() == 1) return Ops
[0];
2430 // Limit recursion calls depth.
2431 if (Depth
> MaxArithDepth
|| hasHugeExpression(Ops
))
2432 return getOrCreateAddExpr(Ops
, Flags
);
2434 // Okay, check to see if the same value occurs in the operand list more than
2435 // once. If so, merge them together into an multiply expression. Since we
2436 // sorted the list, these values are required to be adjacent.
2437 Type
*Ty
= Ops
[0]->getType();
2438 bool FoundMatch
= false;
2439 for (unsigned i
= 0, e
= Ops
.size(); i
!= e
-1; ++i
)
2440 if (Ops
[i
] == Ops
[i
+1]) { // X + Y + Y --> X + Y*2
2441 // Scan ahead to count how many equal operands there are.
2443 while (i
+Count
!= e
&& Ops
[i
+Count
] == Ops
[i
])
2445 // Merge the values into a multiply.
2446 const SCEV
*Scale
= getConstant(Ty
, Count
);
2447 const SCEV
*Mul
= getMulExpr(Scale
, Ops
[i
], SCEV::FlagAnyWrap
, Depth
+ 1);
2448 if (Ops
.size() == Count
)
2451 Ops
.erase(Ops
.begin()+i
+1, Ops
.begin()+i
+Count
);
2452 --i
; e
-= Count
- 1;
2456 return getAddExpr(Ops
, Flags
, Depth
+ 1);
2458 // Check for truncates. If all the operands are truncated from the same
2459 // type, see if factoring out the truncate would permit the result to be
2460 // folded. eg., n*trunc(x) + m*trunc(y) --> trunc(trunc(m)*x + trunc(n)*y)
2461 // if the contents of the resulting outer trunc fold to something simple.
2462 auto FindTruncSrcType
= [&]() -> Type
* {
2463 // We're ultimately looking to fold an addrec of truncs and muls of only
2464 // constants and truncs, so if we find any other types of SCEV
2465 // as operands of the addrec then we bail and return nullptr here.
2466 // Otherwise, we return the type of the operand of a trunc that we find.
2467 if (auto *T
= dyn_cast
<SCEVTruncateExpr
>(Ops
[Idx
]))
2468 return T
->getOperand()->getType();
2469 if (const auto *Mul
= dyn_cast
<SCEVMulExpr
>(Ops
[Idx
])) {
2470 const auto *LastOp
= Mul
->getOperand(Mul
->getNumOperands() - 1);
2471 if (const auto *T
= dyn_cast
<SCEVTruncateExpr
>(LastOp
))
2472 return T
->getOperand()->getType();
2476 if (auto *SrcType
= FindTruncSrcType()) {
2477 SmallVector
<const SCEV
*, 8> LargeOps
;
2479 // Check all the operands to see if they can be represented in the
2480 // source type of the truncate.
2481 for (unsigned i
= 0, e
= Ops
.size(); i
!= e
; ++i
) {
2482 if (const SCEVTruncateExpr
*T
= dyn_cast
<SCEVTruncateExpr
>(Ops
[i
])) {
2483 if (T
->getOperand()->getType() != SrcType
) {
2487 LargeOps
.push_back(T
->getOperand());
2488 } else if (const SCEVConstant
*C
= dyn_cast
<SCEVConstant
>(Ops
[i
])) {
2489 LargeOps
.push_back(getAnyExtendExpr(C
, SrcType
));
2490 } else if (const SCEVMulExpr
*M
= dyn_cast
<SCEVMulExpr
>(Ops
[i
])) {
2491 SmallVector
<const SCEV
*, 8> LargeMulOps
;
2492 for (unsigned j
= 0, f
= M
->getNumOperands(); j
!= f
&& Ok
; ++j
) {
2493 if (const SCEVTruncateExpr
*T
=
2494 dyn_cast
<SCEVTruncateExpr
>(M
->getOperand(j
))) {
2495 if (T
->getOperand()->getType() != SrcType
) {
2499 LargeMulOps
.push_back(T
->getOperand());
2500 } else if (const auto *C
= dyn_cast
<SCEVConstant
>(M
->getOperand(j
))) {
2501 LargeMulOps
.push_back(getAnyExtendExpr(C
, SrcType
));
2508 LargeOps
.push_back(getMulExpr(LargeMulOps
, SCEV::FlagAnyWrap
, Depth
+ 1));
2515 // Evaluate the expression in the larger type.
2516 const SCEV
*Fold
= getAddExpr(LargeOps
, SCEV::FlagAnyWrap
, Depth
+ 1);
2517 // If it folds to something simple, use it. Otherwise, don't.
2518 if (isa
<SCEVConstant
>(Fold
) || isa
<SCEVUnknown
>(Fold
))
2519 return getTruncateExpr(Fold
, Ty
);
2523 // Skip past any other cast SCEVs.
2524 while (Idx
< Ops
.size() && Ops
[Idx
]->getSCEVType() < scAddExpr
)
2527 // If there are add operands they would be next.
2528 if (Idx
< Ops
.size()) {
2529 bool DeletedAdd
= false;
2530 while (const SCEVAddExpr
*Add
= dyn_cast
<SCEVAddExpr
>(Ops
[Idx
])) {
2531 if (Ops
.size() > AddOpsInlineThreshold
||
2532 Add
->getNumOperands() > AddOpsInlineThreshold
)
2534 // If we have an add, expand the add operands onto the end of the operands
2536 Ops
.erase(Ops
.begin()+Idx
);
2537 Ops
.append(Add
->op_begin(), Add
->op_end());
2541 // If we deleted at least one add, we added operands to the end of the list,
2542 // and they are not necessarily sorted. Recurse to resort and resimplify
2543 // any operands we just acquired.
2545 return getAddExpr(Ops
, SCEV::FlagAnyWrap
, Depth
+ 1);
2548 // Skip over the add expression until we get to a multiply.
2549 while (Idx
< Ops
.size() && Ops
[Idx
]->getSCEVType() < scMulExpr
)
2552 // Check to see if there are any folding opportunities present with
2553 // operands multiplied by constant values.
2554 if (Idx
< Ops
.size() && isa
<SCEVMulExpr
>(Ops
[Idx
])) {
2555 uint64_t BitWidth
= getTypeSizeInBits(Ty
);
2556 DenseMap
<const SCEV
*, APInt
> M
;
2557 SmallVector
<const SCEV
*, 8> NewOps
;
2558 APInt
AccumulatedConstant(BitWidth
, 0);
2559 if (CollectAddOperandsWithScales(M
, NewOps
, AccumulatedConstant
,
2560 Ops
.data(), Ops
.size(),
2561 APInt(BitWidth
, 1), *this)) {
2562 struct APIntCompare
{
2563 bool operator()(const APInt
&LHS
, const APInt
&RHS
) const {
2564 return LHS
.ult(RHS
);
2568 // Some interesting folding opportunity is present, so its worthwhile to
2569 // re-generate the operands list. Group the operands by constant scale,
2570 // to avoid multiplying by the same constant scale multiple times.
2571 std::map
<APInt
, SmallVector
<const SCEV
*, 4>, APIntCompare
> MulOpLists
;
2572 for (const SCEV
*NewOp
: NewOps
)
2573 MulOpLists
[M
.find(NewOp
)->second
].push_back(NewOp
);
2574 // Re-generate the operands list.
2576 if (AccumulatedConstant
!= 0)
2577 Ops
.push_back(getConstant(AccumulatedConstant
));
2578 for (auto &MulOp
: MulOpLists
)
2579 if (MulOp
.first
!= 0)
2580 Ops
.push_back(getMulExpr(
2581 getConstant(MulOp
.first
),
2582 getAddExpr(MulOp
.second
, SCEV::FlagAnyWrap
, Depth
+ 1),
2583 SCEV::FlagAnyWrap
, Depth
+ 1));
2586 if (Ops
.size() == 1)
2588 return getAddExpr(Ops
, SCEV::FlagAnyWrap
, Depth
+ 1);
2592 // If we are adding something to a multiply expression, make sure the
2593 // something is not already an operand of the multiply. If so, merge it into
2595 for (; Idx
< Ops
.size() && isa
<SCEVMulExpr
>(Ops
[Idx
]); ++Idx
) {
2596 const SCEVMulExpr
*Mul
= cast
<SCEVMulExpr
>(Ops
[Idx
]);
2597 for (unsigned MulOp
= 0, e
= Mul
->getNumOperands(); MulOp
!= e
; ++MulOp
) {
2598 const SCEV
*MulOpSCEV
= Mul
->getOperand(MulOp
);
2599 if (isa
<SCEVConstant
>(MulOpSCEV
))
2601 for (unsigned AddOp
= 0, e
= Ops
.size(); AddOp
!= e
; ++AddOp
)
2602 if (MulOpSCEV
== Ops
[AddOp
]) {
2603 // Fold W + X + (X * Y * Z) --> W + (X * ((Y*Z)+1))
2604 const SCEV
*InnerMul
= Mul
->getOperand(MulOp
== 0);
2605 if (Mul
->getNumOperands() != 2) {
2606 // If the multiply has more than two operands, we must get the
2608 SmallVector
<const SCEV
*, 4> MulOps(Mul
->op_begin(),
2609 Mul
->op_begin()+MulOp
);
2610 MulOps
.append(Mul
->op_begin()+MulOp
+1, Mul
->op_end());
2611 InnerMul
= getMulExpr(MulOps
, SCEV::FlagAnyWrap
, Depth
+ 1);
2613 SmallVector
<const SCEV
*, 2> TwoOps
= {getOne(Ty
), InnerMul
};
2614 const SCEV
*AddOne
= getAddExpr(TwoOps
, SCEV::FlagAnyWrap
, Depth
+ 1);
2615 const SCEV
*OuterMul
= getMulExpr(AddOne
, MulOpSCEV
,
2616 SCEV::FlagAnyWrap
, Depth
+ 1);
2617 if (Ops
.size() == 2) return OuterMul
;
2619 Ops
.erase(Ops
.begin()+AddOp
);
2620 Ops
.erase(Ops
.begin()+Idx
-1);
2622 Ops
.erase(Ops
.begin()+Idx
);
2623 Ops
.erase(Ops
.begin()+AddOp
-1);
2625 Ops
.push_back(OuterMul
);
2626 return getAddExpr(Ops
, SCEV::FlagAnyWrap
, Depth
+ 1);
2629 // Check this multiply against other multiplies being added together.
2630 for (unsigned OtherMulIdx
= Idx
+1;
2631 OtherMulIdx
< Ops
.size() && isa
<SCEVMulExpr
>(Ops
[OtherMulIdx
]);
2633 const SCEVMulExpr
*OtherMul
= cast
<SCEVMulExpr
>(Ops
[OtherMulIdx
]);
2634 // If MulOp occurs in OtherMul, we can fold the two multiplies
2636 for (unsigned OMulOp
= 0, e
= OtherMul
->getNumOperands();
2637 OMulOp
!= e
; ++OMulOp
)
2638 if (OtherMul
->getOperand(OMulOp
) == MulOpSCEV
) {
2639 // Fold X + (A*B*C) + (A*D*E) --> X + (A*(B*C+D*E))
2640 const SCEV
*InnerMul1
= Mul
->getOperand(MulOp
== 0);
2641 if (Mul
->getNumOperands() != 2) {
2642 SmallVector
<const SCEV
*, 4> MulOps(Mul
->op_begin(),
2643 Mul
->op_begin()+MulOp
);
2644 MulOps
.append(Mul
->op_begin()+MulOp
+1, Mul
->op_end());
2645 InnerMul1
= getMulExpr(MulOps
, SCEV::FlagAnyWrap
, Depth
+ 1);
2647 const SCEV
*InnerMul2
= OtherMul
->getOperand(OMulOp
== 0);
2648 if (OtherMul
->getNumOperands() != 2) {
2649 SmallVector
<const SCEV
*, 4> MulOps(OtherMul
->op_begin(),
2650 OtherMul
->op_begin()+OMulOp
);
2651 MulOps
.append(OtherMul
->op_begin()+OMulOp
+1, OtherMul
->op_end());
2652 InnerMul2
= getMulExpr(MulOps
, SCEV::FlagAnyWrap
, Depth
+ 1);
2654 SmallVector
<const SCEV
*, 2> TwoOps
= {InnerMul1
, InnerMul2
};
2655 const SCEV
*InnerMulSum
=
2656 getAddExpr(TwoOps
, SCEV::FlagAnyWrap
, Depth
+ 1);
2657 const SCEV
*OuterMul
= getMulExpr(MulOpSCEV
, InnerMulSum
,
2658 SCEV::FlagAnyWrap
, Depth
+ 1);
2659 if (Ops
.size() == 2) return OuterMul
;
2660 Ops
.erase(Ops
.begin()+Idx
);
2661 Ops
.erase(Ops
.begin()+OtherMulIdx
-1);
2662 Ops
.push_back(OuterMul
);
2663 return getAddExpr(Ops
, SCEV::FlagAnyWrap
, Depth
+ 1);
2669 // If there are any add recurrences in the operands list, see if any other
2670 // added values are loop invariant. If so, we can fold them into the
2672 while (Idx
< Ops
.size() && Ops
[Idx
]->getSCEVType() < scAddRecExpr
)
2675 // Scan over all recurrences, trying to fold loop invariants into them.
2676 for (; Idx
< Ops
.size() && isa
<SCEVAddRecExpr
>(Ops
[Idx
]); ++Idx
) {
2677 // Scan all of the other operands to this add and add them to the vector if
2678 // they are loop invariant w.r.t. the recurrence.
2679 SmallVector
<const SCEV
*, 8> LIOps
;
2680 const SCEVAddRecExpr
*AddRec
= cast
<SCEVAddRecExpr
>(Ops
[Idx
]);
2681 const Loop
*AddRecLoop
= AddRec
->getLoop();
2682 for (unsigned i
= 0, e
= Ops
.size(); i
!= e
; ++i
)
2683 if (isAvailableAtLoopEntry(Ops
[i
], AddRecLoop
)) {
2684 LIOps
.push_back(Ops
[i
]);
2685 Ops
.erase(Ops
.begin()+i
);
2689 // If we found some loop invariants, fold them into the recurrence.
2690 if (!LIOps
.empty()) {
2691 // NLI + LI + {Start,+,Step} --> NLI + {LI+Start,+,Step}
2692 LIOps
.push_back(AddRec
->getStart());
2694 SmallVector
<const SCEV
*, 4> AddRecOps(AddRec
->op_begin(),
2696 // This follows from the fact that the no-wrap flags on the outer add
2697 // expression are applicable on the 0th iteration, when the add recurrence
2698 // will be equal to its start value.
2699 AddRecOps
[0] = getAddExpr(LIOps
, Flags
, Depth
+ 1);
2701 // Build the new addrec. Propagate the NUW and NSW flags if both the
2702 // outer add and the inner addrec are guaranteed to have no overflow.
2703 // Always propagate NW.
2704 Flags
= AddRec
->getNoWrapFlags(setFlags(Flags
, SCEV::FlagNW
));
2705 const SCEV
*NewRec
= getAddRecExpr(AddRecOps
, AddRecLoop
, Flags
);
2707 // If all of the other operands were loop invariant, we are done.
2708 if (Ops
.size() == 1) return NewRec
;
2710 // Otherwise, add the folded AddRec by the non-invariant parts.
2711 for (unsigned i
= 0;; ++i
)
2712 if (Ops
[i
] == AddRec
) {
2716 return getAddExpr(Ops
, SCEV::FlagAnyWrap
, Depth
+ 1);
2719 // Okay, if there weren't any loop invariants to be folded, check to see if
2720 // there are multiple AddRec's with the same loop induction variable being
2721 // added together. If so, we can fold them.
2722 for (unsigned OtherIdx
= Idx
+1;
2723 OtherIdx
< Ops
.size() && isa
<SCEVAddRecExpr
>(Ops
[OtherIdx
]);
2725 // We expect the AddRecExpr's to be sorted in reverse dominance order,
2726 // so that the 1st found AddRecExpr is dominated by all others.
2727 assert(DT
.dominates(
2728 cast
<SCEVAddRecExpr
>(Ops
[OtherIdx
])->getLoop()->getHeader(),
2729 AddRec
->getLoop()->getHeader()) &&
2730 "AddRecExprs are not sorted in reverse dominance order?");
2731 if (AddRecLoop
== cast
<SCEVAddRecExpr
>(Ops
[OtherIdx
])->getLoop()) {
2732 // Other + {A,+,B}<L> + {C,+,D}<L> --> Other + {A+C,+,B+D}<L>
2733 SmallVector
<const SCEV
*, 4> AddRecOps(AddRec
->op_begin(),
2735 for (; OtherIdx
!= Ops
.size() && isa
<SCEVAddRecExpr
>(Ops
[OtherIdx
]);
2737 const auto *OtherAddRec
= cast
<SCEVAddRecExpr
>(Ops
[OtherIdx
]);
2738 if (OtherAddRec
->getLoop() == AddRecLoop
) {
2739 for (unsigned i
= 0, e
= OtherAddRec
->getNumOperands();
2741 if (i
>= AddRecOps
.size()) {
2742 AddRecOps
.append(OtherAddRec
->op_begin()+i
,
2743 OtherAddRec
->op_end());
2746 SmallVector
<const SCEV
*, 2> TwoOps
= {
2747 AddRecOps
[i
], OtherAddRec
->getOperand(i
)};
2748 AddRecOps
[i
] = getAddExpr(TwoOps
, SCEV::FlagAnyWrap
, Depth
+ 1);
2750 Ops
.erase(Ops
.begin() + OtherIdx
); --OtherIdx
;
2753 // Step size has changed, so we cannot guarantee no self-wraparound.
2754 Ops
[Idx
] = getAddRecExpr(AddRecOps
, AddRecLoop
, SCEV::FlagAnyWrap
);
2755 return getAddExpr(Ops
, SCEV::FlagAnyWrap
, Depth
+ 1);
2759 // Otherwise couldn't fold anything into this recurrence. Move onto the
2763 // Okay, it looks like we really DO need an add expr. Check to see if we
2764 // already have one, otherwise create a new one.
2765 return getOrCreateAddExpr(Ops
, Flags
);
2769 ScalarEvolution::getOrCreateAddExpr(ArrayRef
<const SCEV
*> Ops
,
2770 SCEV::NoWrapFlags Flags
) {
2771 FoldingSetNodeID ID
;
2772 ID
.AddInteger(scAddExpr
);
2773 for (const SCEV
*Op
: Ops
)
2777 static_cast<SCEVAddExpr
*>(UniqueSCEVs
.FindNodeOrInsertPos(ID
, IP
));
2779 const SCEV
**O
= SCEVAllocator
.Allocate
<const SCEV
*>(Ops
.size());
2780 std::uninitialized_copy(Ops
.begin(), Ops
.end(), O
);
2781 S
= new (SCEVAllocator
)
2782 SCEVAddExpr(ID
.Intern(SCEVAllocator
), O
, Ops
.size());
2783 UniqueSCEVs
.InsertNode(S
, IP
);
2784 addToLoopUseLists(S
);
2786 S
->setNoWrapFlags(Flags
);
2791 ScalarEvolution::getOrCreateAddRecExpr(ArrayRef
<const SCEV
*> Ops
,
2792 const Loop
*L
, SCEV::NoWrapFlags Flags
) {
2793 FoldingSetNodeID ID
;
2794 ID
.AddInteger(scAddRecExpr
);
2795 for (unsigned i
= 0, e
= Ops
.size(); i
!= e
; ++i
)
2796 ID
.AddPointer(Ops
[i
]);
2800 static_cast<SCEVAddRecExpr
*>(UniqueSCEVs
.FindNodeOrInsertPos(ID
, IP
));
2802 const SCEV
**O
= SCEVAllocator
.Allocate
<const SCEV
*>(Ops
.size());
2803 std::uninitialized_copy(Ops
.begin(), Ops
.end(), O
);
2804 S
= new (SCEVAllocator
)
2805 SCEVAddRecExpr(ID
.Intern(SCEVAllocator
), O
, Ops
.size(), L
);
2806 UniqueSCEVs
.InsertNode(S
, IP
);
2807 addToLoopUseLists(S
);
2809 S
->setNoWrapFlags(Flags
);
2814 ScalarEvolution::getOrCreateMulExpr(ArrayRef
<const SCEV
*> Ops
,
2815 SCEV::NoWrapFlags Flags
) {
2816 FoldingSetNodeID ID
;
2817 ID
.AddInteger(scMulExpr
);
2818 for (unsigned i
= 0, e
= Ops
.size(); i
!= e
; ++i
)
2819 ID
.AddPointer(Ops
[i
]);
2822 static_cast<SCEVMulExpr
*>(UniqueSCEVs
.FindNodeOrInsertPos(ID
, IP
));
2824 const SCEV
**O
= SCEVAllocator
.Allocate
<const SCEV
*>(Ops
.size());
2825 std::uninitialized_copy(Ops
.begin(), Ops
.end(), O
);
2826 S
= new (SCEVAllocator
) SCEVMulExpr(ID
.Intern(SCEVAllocator
),
2828 UniqueSCEVs
.InsertNode(S
, IP
);
2829 addToLoopUseLists(S
);
2831 S
->setNoWrapFlags(Flags
);
2835 static uint64_t umul_ov(uint64_t i
, uint64_t j
, bool &Overflow
) {
2837 if (j
> 1 && k
/ j
!= i
) Overflow
= true;
2841 /// Compute the result of "n choose k", the binomial coefficient. If an
2842 /// intermediate computation overflows, Overflow will be set and the return will
2843 /// be garbage. Overflow is not cleared on absence of overflow.
2844 static uint64_t Choose(uint64_t n
, uint64_t k
, bool &Overflow
) {
2845 // We use the multiplicative formula:
2846 // n(n-1)(n-2)...(n-(k-1)) / k(k-1)(k-2)...1 .
2847 // At each iteration, we take the n-th term of the numeral and divide by the
2848 // (k-n)th term of the denominator. This division will always produce an
2849 // integral result, and helps reduce the chance of overflow in the
2850 // intermediate computations. However, we can still overflow even when the
2851 // final result would fit.
2853 if (n
== 0 || n
== k
) return 1;
2854 if (k
> n
) return 0;
2860 for (uint64_t i
= 1; i
<= k
; ++i
) {
2861 r
= umul_ov(r
, n
-(i
-1), Overflow
);
2867 /// Determine if any of the operands in this SCEV are a constant or if
2868 /// any of the add or multiply expressions in this SCEV contain a constant.
2869 static bool containsConstantInAddMulChain(const SCEV
*StartExpr
) {
2870 struct FindConstantInAddMulChain
{
2871 bool FoundConstant
= false;
2873 bool follow(const SCEV
*S
) {
2874 FoundConstant
|= isa
<SCEVConstant
>(S
);
2875 return isa
<SCEVAddExpr
>(S
) || isa
<SCEVMulExpr
>(S
);
2878 bool isDone() const {
2879 return FoundConstant
;
2883 FindConstantInAddMulChain F
;
2884 SCEVTraversal
<FindConstantInAddMulChain
> ST(F
);
2885 ST
.visitAll(StartExpr
);
2886 return F
.FoundConstant
;
2889 /// Get a canonical multiply expression, or something simpler if possible.
2890 const SCEV
*ScalarEvolution::getMulExpr(SmallVectorImpl
<const SCEV
*> &Ops
,
2891 SCEV::NoWrapFlags Flags
,
2893 assert(Flags
== maskFlags(Flags
, SCEV::FlagNUW
| SCEV::FlagNSW
) &&
2894 "only nuw or nsw allowed");
2895 assert(!Ops
.empty() && "Cannot get empty mul!");
2896 if (Ops
.size() == 1) return Ops
[0];
2898 Type
*ETy
= getEffectiveSCEVType(Ops
[0]->getType());
2899 for (unsigned i
= 1, e
= Ops
.size(); i
!= e
; ++i
)
2900 assert(getEffectiveSCEVType(Ops
[i
]->getType()) == ETy
&&
2901 "SCEVMulExpr operand types don't match!");
2904 // Sort by complexity, this groups all similar expression types together.
2905 GroupByComplexity(Ops
, &LI
, DT
);
2907 Flags
= StrengthenNoWrapFlags(this, scMulExpr
, Ops
, Flags
);
2909 // Limit recursion calls depth.
2910 if (Depth
> MaxArithDepth
|| hasHugeExpression(Ops
))
2911 return getOrCreateMulExpr(Ops
, Flags
);
2913 // If there are any constants, fold them together.
2915 if (const SCEVConstant
*LHSC
= dyn_cast
<SCEVConstant
>(Ops
[0])) {
2917 if (Ops
.size() == 2)
2918 // C1*(C2+V) -> C1*C2 + C1*V
2919 if (const SCEVAddExpr
*Add
= dyn_cast
<SCEVAddExpr
>(Ops
[1]))
2920 // If any of Add's ops are Adds or Muls with a constant, apply this
2921 // transformation as well.
2923 // TODO: There are some cases where this transformation is not
2924 // profitable; for example, Add = (C0 + X) * Y + Z. Maybe the scope of
2925 // this transformation should be narrowed down.
2926 if (Add
->getNumOperands() == 2 && containsConstantInAddMulChain(Add
))
2927 return getAddExpr(getMulExpr(LHSC
, Add
->getOperand(0),
2928 SCEV::FlagAnyWrap
, Depth
+ 1),
2929 getMulExpr(LHSC
, Add
->getOperand(1),
2930 SCEV::FlagAnyWrap
, Depth
+ 1),
2931 SCEV::FlagAnyWrap
, Depth
+ 1);
2934 while (const SCEVConstant
*RHSC
= dyn_cast
<SCEVConstant
>(Ops
[Idx
])) {
2935 // We found two constants, fold them together!
2937 ConstantInt::get(getContext(), LHSC
->getAPInt() * RHSC
->getAPInt());
2938 Ops
[0] = getConstant(Fold
);
2939 Ops
.erase(Ops
.begin()+1); // Erase the folded element
2940 if (Ops
.size() == 1) return Ops
[0];
2941 LHSC
= cast
<SCEVConstant
>(Ops
[0]);
2944 // If we are left with a constant one being multiplied, strip it off.
2945 if (cast
<SCEVConstant
>(Ops
[0])->getValue()->isOne()) {
2946 Ops
.erase(Ops
.begin());
2948 } else if (cast
<SCEVConstant
>(Ops
[0])->getValue()->isZero()) {
2949 // If we have a multiply of zero, it will always be zero.
2951 } else if (Ops
[0]->isAllOnesValue()) {
2952 // If we have a mul by -1 of an add, try distributing the -1 among the
2954 if (Ops
.size() == 2) {
2955 if (const SCEVAddExpr
*Add
= dyn_cast
<SCEVAddExpr
>(Ops
[1])) {
2956 SmallVector
<const SCEV
*, 4> NewOps
;
2957 bool AnyFolded
= false;
2958 for (const SCEV
*AddOp
: Add
->operands()) {
2959 const SCEV
*Mul
= getMulExpr(Ops
[0], AddOp
, SCEV::FlagAnyWrap
,
2961 if (!isa
<SCEVMulExpr
>(Mul
)) AnyFolded
= true;
2962 NewOps
.push_back(Mul
);
2965 return getAddExpr(NewOps
, SCEV::FlagAnyWrap
, Depth
+ 1);
2966 } else if (const auto *AddRec
= dyn_cast
<SCEVAddRecExpr
>(Ops
[1])) {
2967 // Negation preserves a recurrence's no self-wrap property.
2968 SmallVector
<const SCEV
*, 4> Operands
;
2969 for (const SCEV
*AddRecOp
: AddRec
->operands())
2970 Operands
.push_back(getMulExpr(Ops
[0], AddRecOp
, SCEV::FlagAnyWrap
,
2973 return getAddRecExpr(Operands
, AddRec
->getLoop(),
2974 AddRec
->getNoWrapFlags(SCEV::FlagNW
));
2979 if (Ops
.size() == 1)
2983 // Skip over the add expression until we get to a multiply.
2984 while (Idx
< Ops
.size() && Ops
[Idx
]->getSCEVType() < scMulExpr
)
2987 // If there are mul operands inline them all into this expression.
2988 if (Idx
< Ops
.size()) {
2989 bool DeletedMul
= false;
2990 while (const SCEVMulExpr
*Mul
= dyn_cast
<SCEVMulExpr
>(Ops
[Idx
])) {
2991 if (Ops
.size() > MulOpsInlineThreshold
)
2993 // If we have an mul, expand the mul operands onto the end of the
2995 Ops
.erase(Ops
.begin()+Idx
);
2996 Ops
.append(Mul
->op_begin(), Mul
->op_end());
3000 // If we deleted at least one mul, we added operands to the end of the
3001 // list, and they are not necessarily sorted. Recurse to resort and
3002 // resimplify any operands we just acquired.
3004 return getMulExpr(Ops
, SCEV::FlagAnyWrap
, Depth
+ 1);
3007 // If there are any add recurrences in the operands list, see if any other
3008 // added values are loop invariant. If so, we can fold them into the
3010 while (Idx
< Ops
.size() && Ops
[Idx
]->getSCEVType() < scAddRecExpr
)
3013 // Scan over all recurrences, trying to fold loop invariants into them.
3014 for (; Idx
< Ops
.size() && isa
<SCEVAddRecExpr
>(Ops
[Idx
]); ++Idx
) {
3015 // Scan all of the other operands to this mul and add them to the vector
3016 // if they are loop invariant w.r.t. the recurrence.
3017 SmallVector
<const SCEV
*, 8> LIOps
;
3018 const SCEVAddRecExpr
*AddRec
= cast
<SCEVAddRecExpr
>(Ops
[Idx
]);
3019 const Loop
*AddRecLoop
= AddRec
->getLoop();
3020 for (unsigned i
= 0, e
= Ops
.size(); i
!= e
; ++i
)
3021 if (isAvailableAtLoopEntry(Ops
[i
], AddRecLoop
)) {
3022 LIOps
.push_back(Ops
[i
]);
3023 Ops
.erase(Ops
.begin()+i
);
3027 // If we found some loop invariants, fold them into the recurrence.
3028 if (!LIOps
.empty()) {
3029 // NLI * LI * {Start,+,Step} --> NLI * {LI*Start,+,LI*Step}
3030 SmallVector
<const SCEV
*, 4> NewOps
;
3031 NewOps
.reserve(AddRec
->getNumOperands());
3032 const SCEV
*Scale
= getMulExpr(LIOps
, SCEV::FlagAnyWrap
, Depth
+ 1);
3033 for (unsigned i
= 0, e
= AddRec
->getNumOperands(); i
!= e
; ++i
)
3034 NewOps
.push_back(getMulExpr(Scale
, AddRec
->getOperand(i
),
3035 SCEV::FlagAnyWrap
, Depth
+ 1));
3037 // Build the new addrec. Propagate the NUW and NSW flags if both the
3038 // outer mul and the inner addrec are guaranteed to have no overflow.
3040 // No self-wrap cannot be guaranteed after changing the step size, but
3041 // will be inferred if either NUW or NSW is true.
3042 Flags
= AddRec
->getNoWrapFlags(clearFlags(Flags
, SCEV::FlagNW
));
3043 const SCEV
*NewRec
= getAddRecExpr(NewOps
, AddRecLoop
, Flags
);
3045 // If all of the other operands were loop invariant, we are done.
3046 if (Ops
.size() == 1) return NewRec
;
3048 // Otherwise, multiply the folded AddRec by the non-invariant parts.
3049 for (unsigned i
= 0;; ++i
)
3050 if (Ops
[i
] == AddRec
) {
3054 return getMulExpr(Ops
, SCEV::FlagAnyWrap
, Depth
+ 1);
3057 // Okay, if there weren't any loop invariants to be folded, check to see
3058 // if there are multiple AddRec's with the same loop induction variable
3059 // being multiplied together. If so, we can fold them.
3061 // {A1,+,A2,+,...,+,An}<L> * {B1,+,B2,+,...,+,Bn}<L>
3062 // = {x=1 in [ sum y=x..2x [ sum z=max(y-x, y-n)..min(x,n) [
3063 // choose(x, 2x)*choose(2x-y, x-z)*A_{y-z}*B_z
3064 // ]]],+,...up to x=2n}.
3065 // Note that the arguments to choose() are always integers with values
3066 // known at compile time, never SCEV objects.
3068 // The implementation avoids pointless extra computations when the two
3069 // addrec's are of different length (mathematically, it's equivalent to
3070 // an infinite stream of zeros on the right).
3071 bool OpsModified
= false;
3072 for (unsigned OtherIdx
= Idx
+1;
3073 OtherIdx
!= Ops
.size() && isa
<SCEVAddRecExpr
>(Ops
[OtherIdx
]);
3075 const SCEVAddRecExpr
*OtherAddRec
=
3076 dyn_cast
<SCEVAddRecExpr
>(Ops
[OtherIdx
]);
3077 if (!OtherAddRec
|| OtherAddRec
->getLoop() != AddRecLoop
)
3080 // Limit max number of arguments to avoid creation of unreasonably big
3081 // SCEVAddRecs with very complex operands.
3082 if (AddRec
->getNumOperands() + OtherAddRec
->getNumOperands() - 1 >
3083 MaxAddRecSize
|| isHugeExpression(AddRec
) ||
3084 isHugeExpression(OtherAddRec
))
3087 bool Overflow
= false;
3088 Type
*Ty
= AddRec
->getType();
3089 bool LargerThan64Bits
= getTypeSizeInBits(Ty
) > 64;
3090 SmallVector
<const SCEV
*, 7> AddRecOps
;
3091 for (int x
= 0, xe
= AddRec
->getNumOperands() +
3092 OtherAddRec
->getNumOperands() - 1; x
!= xe
&& !Overflow
; ++x
) {
3093 SmallVector
<const SCEV
*, 7> SumOps
;
3094 for (int y
= x
, ye
= 2*x
+1; y
!= ye
&& !Overflow
; ++y
) {
3095 uint64_t Coeff1
= Choose(x
, 2*x
- y
, Overflow
);
3096 for (int z
= std::max(y
-x
, y
-(int)AddRec
->getNumOperands()+1),
3097 ze
= std::min(x
+1, (int)OtherAddRec
->getNumOperands());
3098 z
< ze
&& !Overflow
; ++z
) {
3099 uint64_t Coeff2
= Choose(2*x
- y
, x
-z
, Overflow
);
3101 if (LargerThan64Bits
)
3102 Coeff
= umul_ov(Coeff1
, Coeff2
, Overflow
);
3104 Coeff
= Coeff1
*Coeff2
;
3105 const SCEV
*CoeffTerm
= getConstant(Ty
, Coeff
);
3106 const SCEV
*Term1
= AddRec
->getOperand(y
-z
);
3107 const SCEV
*Term2
= OtherAddRec
->getOperand(z
);
3108 SumOps
.push_back(getMulExpr(CoeffTerm
, Term1
, Term2
,
3109 SCEV::FlagAnyWrap
, Depth
+ 1));
3113 SumOps
.push_back(getZero(Ty
));
3114 AddRecOps
.push_back(getAddExpr(SumOps
, SCEV::FlagAnyWrap
, Depth
+ 1));
3117 const SCEV
*NewAddRec
= getAddRecExpr(AddRecOps
, AddRecLoop
,
3119 if (Ops
.size() == 2) return NewAddRec
;
3120 Ops
[Idx
] = NewAddRec
;
3121 Ops
.erase(Ops
.begin() + OtherIdx
); --OtherIdx
;
3123 AddRec
= dyn_cast
<SCEVAddRecExpr
>(NewAddRec
);
3129 return getMulExpr(Ops
, SCEV::FlagAnyWrap
, Depth
+ 1);
3131 // Otherwise couldn't fold anything into this recurrence. Move onto the
3135 // Okay, it looks like we really DO need an mul expr. Check to see if we
3136 // already have one, otherwise create a new one.
3137 return getOrCreateMulExpr(Ops
, Flags
);
3140 /// Represents an unsigned remainder expression based on unsigned division.
3141 const SCEV
*ScalarEvolution::getURemExpr(const SCEV
*LHS
,
3143 assert(getEffectiveSCEVType(LHS
->getType()) ==
3144 getEffectiveSCEVType(RHS
->getType()) &&
3145 "SCEVURemExpr operand types don't match!");
3147 // Short-circuit easy cases
3148 if (const SCEVConstant
*RHSC
= dyn_cast
<SCEVConstant
>(RHS
)) {
3149 // If constant is one, the result is trivial
3150 if (RHSC
->getValue()->isOne())
3151 return getZero(LHS
->getType()); // X urem 1 --> 0
3153 // If constant is a power of two, fold into a zext(trunc(LHS)).
3154 if (RHSC
->getAPInt().isPowerOf2()) {
3155 Type
*FullTy
= LHS
->getType();
3157 IntegerType::get(getContext(), RHSC
->getAPInt().logBase2());
3158 return getZeroExtendExpr(getTruncateExpr(LHS
, TruncTy
), FullTy
);
3162 // Fallback to %a == %x urem %y == %x -<nuw> ((%x udiv %y) *<nuw> %y)
3163 const SCEV
*UDiv
= getUDivExpr(LHS
, RHS
);
3164 const SCEV
*Mult
= getMulExpr(UDiv
, RHS
, SCEV::FlagNUW
);
3165 return getMinusSCEV(LHS
, Mult
, SCEV::FlagNUW
);
3168 /// Get a canonical unsigned division expression, or something simpler if
3170 const SCEV
*ScalarEvolution::getUDivExpr(const SCEV
*LHS
,
3172 assert(getEffectiveSCEVType(LHS
->getType()) ==
3173 getEffectiveSCEVType(RHS
->getType()) &&
3174 "SCEVUDivExpr operand types don't match!");
3176 if (const SCEVConstant
*RHSC
= dyn_cast
<SCEVConstant
>(RHS
)) {
3177 if (RHSC
->getValue()->isOne())
3178 return LHS
; // X udiv 1 --> x
3179 // If the denominator is zero, the result of the udiv is undefined. Don't
3180 // try to analyze it, because the resolution chosen here may differ from
3181 // the resolution chosen in other parts of the compiler.
3182 if (!RHSC
->getValue()->isZero()) {
3183 // Determine if the division can be folded into the operands of
3185 // TODO: Generalize this to non-constants by using known-bits information.
3186 Type
*Ty
= LHS
->getType();
3187 unsigned LZ
= RHSC
->getAPInt().countLeadingZeros();
3188 unsigned MaxShiftAmt
= getTypeSizeInBits(Ty
) - LZ
- 1;
3189 // For non-power-of-two values, effectively round the value up to the
3190 // nearest power of two.
3191 if (!RHSC
->getAPInt().isPowerOf2())
3193 IntegerType
*ExtTy
=
3194 IntegerType::get(getContext(), getTypeSizeInBits(Ty
) + MaxShiftAmt
);
3195 if (const SCEVAddRecExpr
*AR
= dyn_cast
<SCEVAddRecExpr
>(LHS
))
3196 if (const SCEVConstant
*Step
=
3197 dyn_cast
<SCEVConstant
>(AR
->getStepRecurrence(*this))) {
3198 // {X,+,N}/C --> {X/C,+,N/C} if safe and N/C can be folded.
3199 const APInt
&StepInt
= Step
->getAPInt();
3200 const APInt
&DivInt
= RHSC
->getAPInt();
3201 if (!StepInt
.urem(DivInt
) &&
3202 getZeroExtendExpr(AR
, ExtTy
) ==
3203 getAddRecExpr(getZeroExtendExpr(AR
->getStart(), ExtTy
),
3204 getZeroExtendExpr(Step
, ExtTy
),
3205 AR
->getLoop(), SCEV::FlagAnyWrap
)) {
3206 SmallVector
<const SCEV
*, 4> Operands
;
3207 for (const SCEV
*Op
: AR
->operands())
3208 Operands
.push_back(getUDivExpr(Op
, RHS
));
3209 return getAddRecExpr(Operands
, AR
->getLoop(), SCEV::FlagNW
);
3211 /// Get a canonical UDivExpr for a recurrence.
3212 /// {X,+,N}/C => {Y,+,N}/C where Y=X-(X%N). Safe when C%N=0.
3213 // We can currently only fold X%N if X is constant.
3214 const SCEVConstant
*StartC
= dyn_cast
<SCEVConstant
>(AR
->getStart());
3215 if (StartC
&& !DivInt
.urem(StepInt
) &&
3216 getZeroExtendExpr(AR
, ExtTy
) ==
3217 getAddRecExpr(getZeroExtendExpr(AR
->getStart(), ExtTy
),
3218 getZeroExtendExpr(Step
, ExtTy
),
3219 AR
->getLoop(), SCEV::FlagAnyWrap
)) {
3220 const APInt
&StartInt
= StartC
->getAPInt();
3221 const APInt
&StartRem
= StartInt
.urem(StepInt
);
3223 LHS
= getAddRecExpr(getConstant(StartInt
- StartRem
), Step
,
3224 AR
->getLoop(), SCEV::FlagNW
);
3227 // (A*B)/C --> A*(B/C) if safe and B/C can be folded.
3228 if (const SCEVMulExpr
*M
= dyn_cast
<SCEVMulExpr
>(LHS
)) {
3229 SmallVector
<const SCEV
*, 4> Operands
;
3230 for (const SCEV
*Op
: M
->operands())
3231 Operands
.push_back(getZeroExtendExpr(Op
, ExtTy
));
3232 if (getZeroExtendExpr(M
, ExtTy
) == getMulExpr(Operands
))
3233 // Find an operand that's safely divisible.
3234 for (unsigned i
= 0, e
= M
->getNumOperands(); i
!= e
; ++i
) {
3235 const SCEV
*Op
= M
->getOperand(i
);
3236 const SCEV
*Div
= getUDivExpr(Op
, RHSC
);
3237 if (!isa
<SCEVUDivExpr
>(Div
) && getMulExpr(Div
, RHSC
) == Op
) {
3238 Operands
= SmallVector
<const SCEV
*, 4>(M
->op_begin(),
3241 return getMulExpr(Operands
);
3246 // (A/B)/C --> A/(B*C) if safe and B*C can be folded.
3247 if (const SCEVUDivExpr
*OtherDiv
= dyn_cast
<SCEVUDivExpr
>(LHS
)) {
3248 if (auto *DivisorConstant
=
3249 dyn_cast
<SCEVConstant
>(OtherDiv
->getRHS())) {
3250 bool Overflow
= false;
3252 DivisorConstant
->getAPInt().umul_ov(RHSC
->getAPInt(), Overflow
);
3254 return getConstant(RHSC
->getType(), 0, false);
3256 return getUDivExpr(OtherDiv
->getLHS(), getConstant(NewRHS
));
3260 // (A+B)/C --> (A/C + B/C) if safe and A/C and B/C can be folded.
3261 if (const SCEVAddExpr
*A
= dyn_cast
<SCEVAddExpr
>(LHS
)) {
3262 SmallVector
<const SCEV
*, 4> Operands
;
3263 for (const SCEV
*Op
: A
->operands())
3264 Operands
.push_back(getZeroExtendExpr(Op
, ExtTy
));
3265 if (getZeroExtendExpr(A
, ExtTy
) == getAddExpr(Operands
)) {
3267 for (unsigned i
= 0, e
= A
->getNumOperands(); i
!= e
; ++i
) {
3268 const SCEV
*Op
= getUDivExpr(A
->getOperand(i
), RHS
);
3269 if (isa
<SCEVUDivExpr
>(Op
) ||
3270 getMulExpr(Op
, RHS
) != A
->getOperand(i
))
3272 Operands
.push_back(Op
);
3274 if (Operands
.size() == A
->getNumOperands())
3275 return getAddExpr(Operands
);
3279 // Fold if both operands are constant.
3280 if (const SCEVConstant
*LHSC
= dyn_cast
<SCEVConstant
>(LHS
)) {
3281 Constant
*LHSCV
= LHSC
->getValue();
3282 Constant
*RHSCV
= RHSC
->getValue();
3283 return getConstant(cast
<ConstantInt
>(ConstantExpr::getUDiv(LHSCV
,
3289 FoldingSetNodeID ID
;
3290 ID
.AddInteger(scUDivExpr
);
3294 if (const SCEV
*S
= UniqueSCEVs
.FindNodeOrInsertPos(ID
, IP
)) return S
;
3295 SCEV
*S
= new (SCEVAllocator
) SCEVUDivExpr(ID
.Intern(SCEVAllocator
),
3297 UniqueSCEVs
.InsertNode(S
, IP
);
3298 addToLoopUseLists(S
);
3302 static const APInt
gcd(const SCEVConstant
*C1
, const SCEVConstant
*C2
) {
3303 APInt A
= C1
->getAPInt().abs();
3304 APInt B
= C2
->getAPInt().abs();
3305 uint32_t ABW
= A
.getBitWidth();
3306 uint32_t BBW
= B
.getBitWidth();
3313 return APIntOps::GreatestCommonDivisor(std::move(A
), std::move(B
));
3316 /// Get a canonical unsigned division expression, or something simpler if
3317 /// possible. There is no representation for an exact udiv in SCEV IR, but we
3318 /// can attempt to remove factors from the LHS and RHS. We can't do this when
3319 /// it's not exact because the udiv may be clearing bits.
3320 const SCEV
*ScalarEvolution::getUDivExactExpr(const SCEV
*LHS
,
3322 // TODO: we could try to find factors in all sorts of things, but for now we
3323 // just deal with u/exact (multiply, constant). See SCEVDivision towards the
3324 // end of this file for inspiration.
3326 const SCEVMulExpr
*Mul
= dyn_cast
<SCEVMulExpr
>(LHS
);
3327 if (!Mul
|| !Mul
->hasNoUnsignedWrap())
3328 return getUDivExpr(LHS
, RHS
);
3330 if (const SCEVConstant
*RHSCst
= dyn_cast
<SCEVConstant
>(RHS
)) {
3331 // If the mulexpr multiplies by a constant, then that constant must be the
3332 // first element of the mulexpr.
3333 if (const auto *LHSCst
= dyn_cast
<SCEVConstant
>(Mul
->getOperand(0))) {
3334 if (LHSCst
== RHSCst
) {
3335 SmallVector
<const SCEV
*, 2> Operands
;
3336 Operands
.append(Mul
->op_begin() + 1, Mul
->op_end());
3337 return getMulExpr(Operands
);
3340 // We can't just assume that LHSCst divides RHSCst cleanly, it could be
3341 // that there's a factor provided by one of the other terms. We need to
3343 APInt Factor
= gcd(LHSCst
, RHSCst
);
3344 if (!Factor
.isIntN(1)) {
3346 cast
<SCEVConstant
>(getConstant(LHSCst
->getAPInt().udiv(Factor
)));
3348 cast
<SCEVConstant
>(getConstant(RHSCst
->getAPInt().udiv(Factor
)));
3349 SmallVector
<const SCEV
*, 2> Operands
;
3350 Operands
.push_back(LHSCst
);
3351 Operands
.append(Mul
->op_begin() + 1, Mul
->op_end());
3352 LHS
= getMulExpr(Operands
);
3354 Mul
= dyn_cast
<SCEVMulExpr
>(LHS
);
3356 return getUDivExactExpr(LHS
, RHS
);
3361 for (int i
= 0, e
= Mul
->getNumOperands(); i
!= e
; ++i
) {
3362 if (Mul
->getOperand(i
) == RHS
) {
3363 SmallVector
<const SCEV
*, 2> Operands
;
3364 Operands
.append(Mul
->op_begin(), Mul
->op_begin() + i
);
3365 Operands
.append(Mul
->op_begin() + i
+ 1, Mul
->op_end());
3366 return getMulExpr(Operands
);
3370 return getUDivExpr(LHS
, RHS
);
3373 /// Get an add recurrence expression for the specified loop. Simplify the
3374 /// expression as much as possible.
3375 const SCEV
*ScalarEvolution::getAddRecExpr(const SCEV
*Start
, const SCEV
*Step
,
3377 SCEV::NoWrapFlags Flags
) {
3378 SmallVector
<const SCEV
*, 4> Operands
;
3379 Operands
.push_back(Start
);
3380 if (const SCEVAddRecExpr
*StepChrec
= dyn_cast
<SCEVAddRecExpr
>(Step
))
3381 if (StepChrec
->getLoop() == L
) {
3382 Operands
.append(StepChrec
->op_begin(), StepChrec
->op_end());
3383 return getAddRecExpr(Operands
, L
, maskFlags(Flags
, SCEV::FlagNW
));
3386 Operands
.push_back(Step
);
3387 return getAddRecExpr(Operands
, L
, Flags
);
3390 /// Get an add recurrence expression for the specified loop. Simplify the
3391 /// expression as much as possible.
3393 ScalarEvolution::getAddRecExpr(SmallVectorImpl
<const SCEV
*> &Operands
,
3394 const Loop
*L
, SCEV::NoWrapFlags Flags
) {
3395 if (Operands
.size() == 1) return Operands
[0];
3397 Type
*ETy
= getEffectiveSCEVType(Operands
[0]->getType());
3398 for (unsigned i
= 1, e
= Operands
.size(); i
!= e
; ++i
)
3399 assert(getEffectiveSCEVType(Operands
[i
]->getType()) == ETy
&&
3400 "SCEVAddRecExpr operand types don't match!");
3401 for (unsigned i
= 0, e
= Operands
.size(); i
!= e
; ++i
)
3402 assert(isLoopInvariant(Operands
[i
], L
) &&
3403 "SCEVAddRecExpr operand is not loop-invariant!");
3406 if (Operands
.back()->isZero()) {
3407 Operands
.pop_back();
3408 return getAddRecExpr(Operands
, L
, SCEV::FlagAnyWrap
); // {X,+,0} --> X
3411 // It's tempting to want to call getMaxBackedgeTakenCount count here and
3412 // use that information to infer NUW and NSW flags. However, computing a
3413 // BE count requires calling getAddRecExpr, so we may not yet have a
3414 // meaningful BE count at this point (and if we don't, we'd be stuck
3415 // with a SCEVCouldNotCompute as the cached BE count).
3417 Flags
= StrengthenNoWrapFlags(this, scAddRecExpr
, Operands
, Flags
);
3419 // Canonicalize nested AddRecs in by nesting them in order of loop depth.
3420 if (const SCEVAddRecExpr
*NestedAR
= dyn_cast
<SCEVAddRecExpr
>(Operands
[0])) {
3421 const Loop
*NestedLoop
= NestedAR
->getLoop();
3422 if (L
->contains(NestedLoop
)
3423 ? (L
->getLoopDepth() < NestedLoop
->getLoopDepth())
3424 : (!NestedLoop
->contains(L
) &&
3425 DT
.dominates(L
->getHeader(), NestedLoop
->getHeader()))) {
3426 SmallVector
<const SCEV
*, 4> NestedOperands(NestedAR
->op_begin(),
3427 NestedAR
->op_end());
3428 Operands
[0] = NestedAR
->getStart();
3429 // AddRecs require their operands be loop-invariant with respect to their
3430 // loops. Don't perform this transformation if it would break this
3432 bool AllInvariant
= all_of(
3433 Operands
, [&](const SCEV
*Op
) { return isLoopInvariant(Op
, L
); });
3436 // Create a recurrence for the outer loop with the same step size.
3438 // The outer recurrence keeps its NW flag but only keeps NUW/NSW if the
3439 // inner recurrence has the same property.
3440 SCEV::NoWrapFlags OuterFlags
=
3441 maskFlags(Flags
, SCEV::FlagNW
| NestedAR
->getNoWrapFlags());
3443 NestedOperands
[0] = getAddRecExpr(Operands
, L
, OuterFlags
);
3444 AllInvariant
= all_of(NestedOperands
, [&](const SCEV
*Op
) {
3445 return isLoopInvariant(Op
, NestedLoop
);
3449 // Ok, both add recurrences are valid after the transformation.
3451 // The inner recurrence keeps its NW flag but only keeps NUW/NSW if
3452 // the outer recurrence has the same property.
3453 SCEV::NoWrapFlags InnerFlags
=
3454 maskFlags(NestedAR
->getNoWrapFlags(), SCEV::FlagNW
| Flags
);
3455 return getAddRecExpr(NestedOperands
, NestedLoop
, InnerFlags
);
3458 // Reset Operands to its original state.
3459 Operands
[0] = NestedAR
;
3463 // Okay, it looks like we really DO need an addrec expr. Check to see if we
3464 // already have one, otherwise create a new one.
3465 return getOrCreateAddRecExpr(Operands
, L
, Flags
);
3469 ScalarEvolution::getGEPExpr(GEPOperator
*GEP
,
3470 const SmallVectorImpl
<const SCEV
*> &IndexExprs
) {
3471 const SCEV
*BaseExpr
= getSCEV(GEP
->getPointerOperand());
3472 // getSCEV(Base)->getType() has the same address space as Base->getType()
3473 // because SCEV::getType() preserves the address space.
3474 Type
*IntPtrTy
= getEffectiveSCEVType(BaseExpr
->getType());
3475 // FIXME(PR23527): Don't blindly transfer the inbounds flag from the GEP
3476 // instruction to its SCEV, because the Instruction may be guarded by control
3477 // flow and the no-overflow bits may not be valid for the expression in any
3478 // context. This can be fixed similarly to how these flags are handled for
3480 SCEV::NoWrapFlags Wrap
= GEP
->isInBounds() ? SCEV::FlagNSW
3481 : SCEV::FlagAnyWrap
;
3483 const SCEV
*TotalOffset
= getZero(IntPtrTy
);
3484 // The array size is unimportant. The first thing we do on CurTy is getting
3485 // its element type.
3486 Type
*CurTy
= ArrayType::get(GEP
->getSourceElementType(), 0);
3487 for (const SCEV
*IndexExpr
: IndexExprs
) {
3488 // Compute the (potentially symbolic) offset in bytes for this index.
3489 if (StructType
*STy
= dyn_cast
<StructType
>(CurTy
)) {
3490 // For a struct, add the member offset.
3491 ConstantInt
*Index
= cast
<SCEVConstant
>(IndexExpr
)->getValue();
3492 unsigned FieldNo
= Index
->getZExtValue();
3493 const SCEV
*FieldOffset
= getOffsetOfExpr(IntPtrTy
, STy
, FieldNo
);
3495 // Add the field offset to the running total offset.
3496 TotalOffset
= getAddExpr(TotalOffset
, FieldOffset
);
3498 // Update CurTy to the type of the field at Index.
3499 CurTy
= STy
->getTypeAtIndex(Index
);
3501 // Update CurTy to its element type.
3502 CurTy
= cast
<SequentialType
>(CurTy
)->getElementType();
3503 // For an array, add the element offset, explicitly scaled.
3504 const SCEV
*ElementSize
= getSizeOfExpr(IntPtrTy
, CurTy
);
3505 // Getelementptr indices are signed.
3506 IndexExpr
= getTruncateOrSignExtend(IndexExpr
, IntPtrTy
);
3508 // Multiply the index by the element size to compute the element offset.
3509 const SCEV
*LocalOffset
= getMulExpr(IndexExpr
, ElementSize
, Wrap
);
3511 // Add the element offset to the running total offset.
3512 TotalOffset
= getAddExpr(TotalOffset
, LocalOffset
);
3516 // Add the total offset from all the GEP indices to the base.
3517 return getAddExpr(BaseExpr
, TotalOffset
, Wrap
);
3520 const SCEV
*ScalarEvolution::getSMaxExpr(const SCEV
*LHS
,
3522 SmallVector
<const SCEV
*, 2> Ops
= {LHS
, RHS
};
3523 return getSMaxExpr(Ops
);
3527 ScalarEvolution::getSMaxExpr(SmallVectorImpl
<const SCEV
*> &Ops
) {
3528 assert(!Ops
.empty() && "Cannot get empty smax!");
3529 if (Ops
.size() == 1) return Ops
[0];
3531 Type
*ETy
= getEffectiveSCEVType(Ops
[0]->getType());
3532 for (unsigned i
= 1, e
= Ops
.size(); i
!= e
; ++i
)
3533 assert(getEffectiveSCEVType(Ops
[i
]->getType()) == ETy
&&
3534 "SCEVSMaxExpr operand types don't match!");
3537 // Sort by complexity, this groups all similar expression types together.
3538 GroupByComplexity(Ops
, &LI
, DT
);
3540 // If there are any constants, fold them together.
3542 if (const SCEVConstant
*LHSC
= dyn_cast
<SCEVConstant
>(Ops
[0])) {
3544 assert(Idx
< Ops
.size());
3545 while (const SCEVConstant
*RHSC
= dyn_cast
<SCEVConstant
>(Ops
[Idx
])) {
3546 // We found two constants, fold them together!
3547 ConstantInt
*Fold
= ConstantInt::get(
3548 getContext(), APIntOps::smax(LHSC
->getAPInt(), RHSC
->getAPInt()));
3549 Ops
[0] = getConstant(Fold
);
3550 Ops
.erase(Ops
.begin()+1); // Erase the folded element
3551 if (Ops
.size() == 1) return Ops
[0];
3552 LHSC
= cast
<SCEVConstant
>(Ops
[0]);
3555 // If we are left with a constant minimum-int, strip it off.
3556 if (cast
<SCEVConstant
>(Ops
[0])->getValue()->isMinValue(true)) {
3557 Ops
.erase(Ops
.begin());
3559 } else if (cast
<SCEVConstant
>(Ops
[0])->getValue()->isMaxValue(true)) {
3560 // If we have an smax with a constant maximum-int, it will always be
3565 if (Ops
.size() == 1) return Ops
[0];
3568 // Find the first SMax
3569 while (Idx
< Ops
.size() && Ops
[Idx
]->getSCEVType() < scSMaxExpr
)
3572 // Check to see if one of the operands is an SMax. If so, expand its operands
3573 // onto our operand list, and recurse to simplify.
3574 if (Idx
< Ops
.size()) {
3575 bool DeletedSMax
= false;
3576 while (const SCEVSMaxExpr
*SMax
= dyn_cast
<SCEVSMaxExpr
>(Ops
[Idx
])) {
3577 Ops
.erase(Ops
.begin()+Idx
);
3578 Ops
.append(SMax
->op_begin(), SMax
->op_end());
3583 return getSMaxExpr(Ops
);
3586 // Okay, check to see if the same value occurs in the operand list twice. If
3587 // so, delete one. Since we sorted the list, these values are required to
3589 for (unsigned i
= 0, e
= Ops
.size()-1; i
!= e
; ++i
)
3590 // X smax Y smax Y --> X smax Y
3591 // X smax Y --> X, if X is always greater than Y
3592 if (Ops
[i
] == Ops
[i
+1] ||
3593 isKnownPredicate(ICmpInst::ICMP_SGE
, Ops
[i
], Ops
[i
+1])) {
3594 Ops
.erase(Ops
.begin()+i
+1, Ops
.begin()+i
+2);
3596 } else if (isKnownPredicate(ICmpInst::ICMP_SLE
, Ops
[i
], Ops
[i
+1])) {
3597 Ops
.erase(Ops
.begin()+i
, Ops
.begin()+i
+1);
3601 if (Ops
.size() == 1) return Ops
[0];
3603 assert(!Ops
.empty() && "Reduced smax down to nothing!");
3605 // Okay, it looks like we really DO need an smax expr. Check to see if we
3606 // already have one, otherwise create a new one.
3607 FoldingSetNodeID ID
;
3608 ID
.AddInteger(scSMaxExpr
);
3609 for (unsigned i
= 0, e
= Ops
.size(); i
!= e
; ++i
)
3610 ID
.AddPointer(Ops
[i
]);
3612 if (const SCEV
*S
= UniqueSCEVs
.FindNodeOrInsertPos(ID
, IP
)) return S
;
3613 const SCEV
**O
= SCEVAllocator
.Allocate
<const SCEV
*>(Ops
.size());
3614 std::uninitialized_copy(Ops
.begin(), Ops
.end(), O
);
3615 SCEV
*S
= new (SCEVAllocator
) SCEVSMaxExpr(ID
.Intern(SCEVAllocator
),
3617 UniqueSCEVs
.InsertNode(S
, IP
);
3618 addToLoopUseLists(S
);
3622 const SCEV
*ScalarEvolution::getUMaxExpr(const SCEV
*LHS
,
3624 SmallVector
<const SCEV
*, 2> Ops
= {LHS
, RHS
};
3625 return getUMaxExpr(Ops
);
3629 ScalarEvolution::getUMaxExpr(SmallVectorImpl
<const SCEV
*> &Ops
) {
3630 assert(!Ops
.empty() && "Cannot get empty umax!");
3631 if (Ops
.size() == 1) return Ops
[0];
3633 Type
*ETy
= getEffectiveSCEVType(Ops
[0]->getType());
3634 for (unsigned i
= 1, e
= Ops
.size(); i
!= e
; ++i
)
3635 assert(getEffectiveSCEVType(Ops
[i
]->getType()) == ETy
&&
3636 "SCEVUMaxExpr operand types don't match!");
3639 // Sort by complexity, this groups all similar expression types together.
3640 GroupByComplexity(Ops
, &LI
, DT
);
3642 // If there are any constants, fold them together.
3644 if (const SCEVConstant
*LHSC
= dyn_cast
<SCEVConstant
>(Ops
[0])) {
3646 assert(Idx
< Ops
.size());
3647 while (const SCEVConstant
*RHSC
= dyn_cast
<SCEVConstant
>(Ops
[Idx
])) {
3648 // We found two constants, fold them together!
3649 ConstantInt
*Fold
= ConstantInt::get(
3650 getContext(), APIntOps::umax(LHSC
->getAPInt(), RHSC
->getAPInt()));
3651 Ops
[0] = getConstant(Fold
);
3652 Ops
.erase(Ops
.begin()+1); // Erase the folded element
3653 if (Ops
.size() == 1) return Ops
[0];
3654 LHSC
= cast
<SCEVConstant
>(Ops
[0]);
3657 // If we are left with a constant minimum-int, strip it off.
3658 if (cast
<SCEVConstant
>(Ops
[0])->getValue()->isMinValue(false)) {
3659 Ops
.erase(Ops
.begin());
3661 } else if (cast
<SCEVConstant
>(Ops
[0])->getValue()->isMaxValue(false)) {
3662 // If we have an umax with a constant maximum-int, it will always be
3667 if (Ops
.size() == 1) return Ops
[0];
3670 // Find the first UMax
3671 while (Idx
< Ops
.size() && Ops
[Idx
]->getSCEVType() < scUMaxExpr
)
3674 // Check to see if one of the operands is a UMax. If so, expand its operands
3675 // onto our operand list, and recurse to simplify.
3676 if (Idx
< Ops
.size()) {
3677 bool DeletedUMax
= false;
3678 while (const SCEVUMaxExpr
*UMax
= dyn_cast
<SCEVUMaxExpr
>(Ops
[Idx
])) {
3679 Ops
.erase(Ops
.begin()+Idx
);
3680 Ops
.append(UMax
->op_begin(), UMax
->op_end());
3685 return getUMaxExpr(Ops
);
3688 // Okay, check to see if the same value occurs in the operand list twice. If
3689 // so, delete one. Since we sorted the list, these values are required to
3691 for (unsigned i
= 0, e
= Ops
.size()-1; i
!= e
; ++i
)
3692 // X umax Y umax Y --> X umax Y
3693 // X umax Y --> X, if X is always greater than Y
3694 if (Ops
[i
] == Ops
[i
+ 1] || isKnownViaNonRecursiveReasoning(
3695 ICmpInst::ICMP_UGE
, Ops
[i
], Ops
[i
+ 1])) {
3696 Ops
.erase(Ops
.begin() + i
+ 1, Ops
.begin() + i
+ 2);
3698 } else if (isKnownViaNonRecursiveReasoning(ICmpInst::ICMP_ULE
, Ops
[i
],
3700 Ops
.erase(Ops
.begin() + i
, Ops
.begin() + i
+ 1);
3704 if (Ops
.size() == 1) return Ops
[0];
3706 assert(!Ops
.empty() && "Reduced umax down to nothing!");
3708 // Okay, it looks like we really DO need a umax expr. Check to see if we
3709 // already have one, otherwise create a new one.
3710 FoldingSetNodeID ID
;
3711 ID
.AddInteger(scUMaxExpr
);
3712 for (unsigned i
= 0, e
= Ops
.size(); i
!= e
; ++i
)
3713 ID
.AddPointer(Ops
[i
]);
3715 if (const SCEV
*S
= UniqueSCEVs
.FindNodeOrInsertPos(ID
, IP
)) return S
;
3716 const SCEV
**O
= SCEVAllocator
.Allocate
<const SCEV
*>(Ops
.size());
3717 std::uninitialized_copy(Ops
.begin(), Ops
.end(), O
);
3718 SCEV
*S
= new (SCEVAllocator
) SCEVUMaxExpr(ID
.Intern(SCEVAllocator
),
3720 UniqueSCEVs
.InsertNode(S
, IP
);
3721 addToLoopUseLists(S
);
3725 const SCEV
*ScalarEvolution::getSMinExpr(const SCEV
*LHS
,
3727 SmallVector
<const SCEV
*, 2> Ops
= { LHS
, RHS
};
3728 return getSMinExpr(Ops
);
3731 const SCEV
*ScalarEvolution::getSMinExpr(SmallVectorImpl
<const SCEV
*> &Ops
) {
3732 // ~smax(~x, ~y, ~z) == smin(x, y, z).
3733 SmallVector
<const SCEV
*, 2> NotOps
;
3735 NotOps
.push_back(getNotSCEV(S
));
3736 return getNotSCEV(getSMaxExpr(NotOps
));
3739 const SCEV
*ScalarEvolution::getUMinExpr(const SCEV
*LHS
,
3741 SmallVector
<const SCEV
*, 2> Ops
= { LHS
, RHS
};
3742 return getUMinExpr(Ops
);
3745 const SCEV
*ScalarEvolution::getUMinExpr(SmallVectorImpl
<const SCEV
*> &Ops
) {
3746 assert(!Ops
.empty() && "At least one operand must be!");
3748 if (Ops
.size() == 1)
3751 // ~umax(~x, ~y, ~z) == umin(x, y, z).
3752 SmallVector
<const SCEV
*, 2> NotOps
;
3754 NotOps
.push_back(getNotSCEV(S
));
3755 return getNotSCEV(getUMaxExpr(NotOps
));
3758 const SCEV
*ScalarEvolution::getSizeOfExpr(Type
*IntTy
, Type
*AllocTy
) {
3759 // We can bypass creating a target-independent
3760 // constant expression and then folding it back into a ConstantInt.
3761 // This is just a compile-time optimization.
3762 return getConstant(IntTy
, getDataLayout().getTypeAllocSize(AllocTy
));
3765 const SCEV
*ScalarEvolution::getOffsetOfExpr(Type
*IntTy
,
3768 // We can bypass creating a target-independent
3769 // constant expression and then folding it back into a ConstantInt.
3770 // This is just a compile-time optimization.
3772 IntTy
, getDataLayout().getStructLayout(STy
)->getElementOffset(FieldNo
));
3775 const SCEV
*ScalarEvolution::getUnknown(Value
*V
) {
3776 // Don't attempt to do anything other than create a SCEVUnknown object
3777 // here. createSCEV only calls getUnknown after checking for all other
3778 // interesting possibilities, and any other code that calls getUnknown
3779 // is doing so in order to hide a value from SCEV canonicalization.
3781 FoldingSetNodeID ID
;
3782 ID
.AddInteger(scUnknown
);
3785 if (SCEV
*S
= UniqueSCEVs
.FindNodeOrInsertPos(ID
, IP
)) {
3786 assert(cast
<SCEVUnknown
>(S
)->getValue() == V
&&
3787 "Stale SCEVUnknown in uniquing map!");
3790 SCEV
*S
= new (SCEVAllocator
) SCEVUnknown(ID
.Intern(SCEVAllocator
), V
, this,
3792 FirstUnknown
= cast
<SCEVUnknown
>(S
);
3793 UniqueSCEVs
.InsertNode(S
, IP
);
3797 //===----------------------------------------------------------------------===//
3798 // Basic SCEV Analysis and PHI Idiom Recognition Code
3801 /// Test if values of the given type are analyzable within the SCEV
3802 /// framework. This primarily includes integer types, and it can optionally
3803 /// include pointer types if the ScalarEvolution class has access to
3804 /// target-specific information.
3805 bool ScalarEvolution::isSCEVable(Type
*Ty
) const {
3806 // Integers and pointers are always SCEVable.
3807 return Ty
->isIntOrPtrTy();
3810 /// Return the size in bits of the specified type, for which isSCEVable must
3812 uint64_t ScalarEvolution::getTypeSizeInBits(Type
*Ty
) const {
3813 assert(isSCEVable(Ty
) && "Type is not SCEVable!");
3814 if (Ty
->isPointerTy())
3815 return getDataLayout().getIndexTypeSizeInBits(Ty
);
3816 return getDataLayout().getTypeSizeInBits(Ty
);
3819 /// Return a type with the same bitwidth as the given type and which represents
3820 /// how SCEV will treat the given type, for which isSCEVable must return
3821 /// true. For pointer types, this is the pointer-sized integer type.
3822 Type
*ScalarEvolution::getEffectiveSCEVType(Type
*Ty
) const {
3823 assert(isSCEVable(Ty
) && "Type is not SCEVable!");
3825 if (Ty
->isIntegerTy())
3828 // The only other support type is pointer.
3829 assert(Ty
->isPointerTy() && "Unexpected non-pointer non-integer type!");
3830 return getDataLayout().getIntPtrType(Ty
);
3833 Type
*ScalarEvolution::getWiderType(Type
*T1
, Type
*T2
) const {
3834 return getTypeSizeInBits(T1
) >= getTypeSizeInBits(T2
) ? T1
: T2
;
3837 const SCEV
*ScalarEvolution::getCouldNotCompute() {
3838 return CouldNotCompute
.get();
3841 bool ScalarEvolution::checkValidity(const SCEV
*S
) const {
3842 bool ContainsNulls
= SCEVExprContains(S
, [](const SCEV
*S
) {
3843 auto *SU
= dyn_cast
<SCEVUnknown
>(S
);
3844 return SU
&& SU
->getValue() == nullptr;
3847 return !ContainsNulls
;
3850 bool ScalarEvolution::containsAddRecurrence(const SCEV
*S
) {
3851 HasRecMapType::iterator I
= HasRecMap
.find(S
);
3852 if (I
!= HasRecMap
.end())
3855 bool FoundAddRec
= SCEVExprContains(S
, isa
<SCEVAddRecExpr
, const SCEV
*>);
3856 HasRecMap
.insert({S
, FoundAddRec
});
3860 /// Try to split a SCEVAddExpr into a pair of {SCEV, ConstantInt}.
3861 /// If \p S is a SCEVAddExpr and is composed of a sub SCEV S' and an
3862 /// offset I, then return {S', I}, else return {\p S, nullptr}.
3863 static std::pair
<const SCEV
*, ConstantInt
*> splitAddExpr(const SCEV
*S
) {
3864 const auto *Add
= dyn_cast
<SCEVAddExpr
>(S
);
3866 return {S
, nullptr};
3868 if (Add
->getNumOperands() != 2)
3869 return {S
, nullptr};
3871 auto *ConstOp
= dyn_cast
<SCEVConstant
>(Add
->getOperand(0));
3873 return {S
, nullptr};
3875 return {Add
->getOperand(1), ConstOp
->getValue()};
3878 /// Return the ValueOffsetPair set for \p S. \p S can be represented
3879 /// by the value and offset from any ValueOffsetPair in the set.
3880 SetVector
<ScalarEvolution::ValueOffsetPair
> *
3881 ScalarEvolution::getSCEVValues(const SCEV
*S
) {
3882 ExprValueMapType::iterator SI
= ExprValueMap
.find_as(S
);
3883 if (SI
== ExprValueMap
.end())
3886 if (VerifySCEVMap
) {
3887 // Check there is no dangling Value in the set returned.
3888 for (const auto &VE
: SI
->second
)
3889 assert(ValueExprMap
.count(VE
.first
));
3895 /// Erase Value from ValueExprMap and ExprValueMap. ValueExprMap.erase(V)
3896 /// cannot be used separately. eraseValueFromMap should be used to remove
3897 /// V from ValueExprMap and ExprValueMap at the same time.
3898 void ScalarEvolution::eraseValueFromMap(Value
*V
) {
3899 ValueExprMapType::iterator I
= ValueExprMap
.find_as(V
);
3900 if (I
!= ValueExprMap
.end()) {
3901 const SCEV
*S
= I
->second
;
3902 // Remove {V, 0} from the set of ExprValueMap[S]
3903 if (SetVector
<ValueOffsetPair
> *SV
= getSCEVValues(S
))
3904 SV
->remove({V
, nullptr});
3906 // Remove {V, Offset} from the set of ExprValueMap[Stripped]
3907 const SCEV
*Stripped
;
3908 ConstantInt
*Offset
;
3909 std::tie(Stripped
, Offset
) = splitAddExpr(S
);
3910 if (Offset
!= nullptr) {
3911 if (SetVector
<ValueOffsetPair
> *SV
= getSCEVValues(Stripped
))
3912 SV
->remove({V
, Offset
});
3914 ValueExprMap
.erase(V
);
3918 /// Check whether value has nuw/nsw/exact set but SCEV does not.
3919 /// TODO: In reality it is better to check the poison recursively
3920 /// but this is better than nothing.
3921 static bool SCEVLostPoisonFlags(const SCEV
*S
, const Value
*V
) {
3922 if (auto *I
= dyn_cast
<Instruction
>(V
)) {
3923 if (isa
<OverflowingBinaryOperator
>(I
)) {
3924 if (auto *NS
= dyn_cast
<SCEVNAryExpr
>(S
)) {
3925 if (I
->hasNoSignedWrap() && !NS
->hasNoSignedWrap())
3927 if (I
->hasNoUnsignedWrap() && !NS
->hasNoUnsignedWrap())
3930 } else if (isa
<PossiblyExactOperator
>(I
) && I
->isExact())
3936 /// Return an existing SCEV if it exists, otherwise analyze the expression and
3937 /// create a new one.
3938 const SCEV
*ScalarEvolution::getSCEV(Value
*V
) {
3939 assert(isSCEVable(V
->getType()) && "Value is not SCEVable!");
3941 const SCEV
*S
= getExistingSCEV(V
);
3944 // During PHI resolution, it is possible to create two SCEVs for the same
3945 // V, so it is needed to double check whether V->S is inserted into
3946 // ValueExprMap before insert S->{V, 0} into ExprValueMap.
3947 std::pair
<ValueExprMapType::iterator
, bool> Pair
=
3948 ValueExprMap
.insert({SCEVCallbackVH(V
, this), S
});
3949 if (Pair
.second
&& !SCEVLostPoisonFlags(S
, V
)) {
3950 ExprValueMap
[S
].insert({V
, nullptr});
3952 // If S == Stripped + Offset, add Stripped -> {V, Offset} into
3954 const SCEV
*Stripped
= S
;
3955 ConstantInt
*Offset
= nullptr;
3956 std::tie(Stripped
, Offset
) = splitAddExpr(S
);
3957 // If stripped is SCEVUnknown, don't bother to save
3958 // Stripped -> {V, offset}. It doesn't simplify and sometimes even
3959 // increase the complexity of the expansion code.
3960 // If V is GetElementPtrInst, don't save Stripped -> {V, offset}
3961 // because it may generate add/sub instead of GEP in SCEV expansion.
3962 if (Offset
!= nullptr && !isa
<SCEVUnknown
>(Stripped
) &&
3963 !isa
<GetElementPtrInst
>(V
))
3964 ExprValueMap
[Stripped
].insert({V
, Offset
});
3970 const SCEV
*ScalarEvolution::getExistingSCEV(Value
*V
) {
3971 assert(isSCEVable(V
->getType()) && "Value is not SCEVable!");
3973 ValueExprMapType::iterator I
= ValueExprMap
.find_as(V
);
3974 if (I
!= ValueExprMap
.end()) {
3975 const SCEV
*S
= I
->second
;
3976 if (checkValidity(S
))
3978 eraseValueFromMap(V
);
3979 forgetMemoizedResults(S
);
3984 /// Return a SCEV corresponding to -V = -1*V
3985 const SCEV
*ScalarEvolution::getNegativeSCEV(const SCEV
*V
,
3986 SCEV::NoWrapFlags Flags
) {
3987 if (const SCEVConstant
*VC
= dyn_cast
<SCEVConstant
>(V
))
3989 cast
<ConstantInt
>(ConstantExpr::getNeg(VC
->getValue())));
3991 Type
*Ty
= V
->getType();
3992 Ty
= getEffectiveSCEVType(Ty
);
3994 V
, getConstant(cast
<ConstantInt
>(Constant::getAllOnesValue(Ty
))), Flags
);
3997 /// Return a SCEV corresponding to ~V = -1-V
3998 const SCEV
*ScalarEvolution::getNotSCEV(const SCEV
*V
) {
3999 if (const SCEVConstant
*VC
= dyn_cast
<SCEVConstant
>(V
))
4001 cast
<ConstantInt
>(ConstantExpr::getNot(VC
->getValue())));
4003 Type
*Ty
= V
->getType();
4004 Ty
= getEffectiveSCEVType(Ty
);
4005 const SCEV
*AllOnes
=
4006 getConstant(cast
<ConstantInt
>(Constant::getAllOnesValue(Ty
)));
4007 return getMinusSCEV(AllOnes
, V
);
4010 const SCEV
*ScalarEvolution::getMinusSCEV(const SCEV
*LHS
, const SCEV
*RHS
,
4011 SCEV::NoWrapFlags Flags
,
4013 // Fast path: X - X --> 0.
4015 return getZero(LHS
->getType());
4017 // We represent LHS - RHS as LHS + (-1)*RHS. This transformation
4018 // makes it so that we cannot make much use of NUW.
4019 auto AddFlags
= SCEV::FlagAnyWrap
;
4020 const bool RHSIsNotMinSigned
=
4021 !getSignedRangeMin(RHS
).isMinSignedValue();
4022 if (maskFlags(Flags
, SCEV::FlagNSW
) == SCEV::FlagNSW
) {
4023 // Let M be the minimum representable signed value. Then (-1)*RHS
4024 // signed-wraps if and only if RHS is M. That can happen even for
4025 // a NSW subtraction because e.g. (-1)*M signed-wraps even though
4026 // -1 - M does not. So to transfer NSW from LHS - RHS to LHS +
4027 // (-1)*RHS, we need to prove that RHS != M.
4029 // If LHS is non-negative and we know that LHS - RHS does not
4030 // signed-wrap, then RHS cannot be M. So we can rule out signed-wrap
4031 // either by proving that RHS > M or that LHS >= 0.
4032 if (RHSIsNotMinSigned
|| isKnownNonNegative(LHS
)) {
4033 AddFlags
= SCEV::FlagNSW
;
4037 // FIXME: Find a correct way to transfer NSW to (-1)*M when LHS -
4038 // RHS is NSW and LHS >= 0.
4040 // The difficulty here is that the NSW flag may have been proven
4041 // relative to a loop that is to be found in a recurrence in LHS and
4042 // not in RHS. Applying NSW to (-1)*M may then let the NSW have a
4043 // larger scope than intended.
4044 auto NegFlags
= RHSIsNotMinSigned
? SCEV::FlagNSW
: SCEV::FlagAnyWrap
;
4046 return getAddExpr(LHS
, getNegativeSCEV(RHS
, NegFlags
), AddFlags
, Depth
);
4049 const SCEV
*ScalarEvolution::getTruncateOrZeroExtend(const SCEV
*V
, Type
*Ty
,
4051 Type
*SrcTy
= V
->getType();
4052 assert(SrcTy
->isIntOrPtrTy() && Ty
->isIntOrPtrTy() &&
4053 "Cannot truncate or zero extend with non-integer arguments!");
4054 if (getTypeSizeInBits(SrcTy
) == getTypeSizeInBits(Ty
))
4055 return V
; // No conversion
4056 if (getTypeSizeInBits(SrcTy
) > getTypeSizeInBits(Ty
))
4057 return getTruncateExpr(V
, Ty
, Depth
);
4058 return getZeroExtendExpr(V
, Ty
, Depth
);
4061 const SCEV
*ScalarEvolution::getTruncateOrSignExtend(const SCEV
*V
, Type
*Ty
,
4063 Type
*SrcTy
= V
->getType();
4064 assert(SrcTy
->isIntOrPtrTy() && Ty
->isIntOrPtrTy() &&
4065 "Cannot truncate or zero extend with non-integer arguments!");
4066 if (getTypeSizeInBits(SrcTy
) == getTypeSizeInBits(Ty
))
4067 return V
; // No conversion
4068 if (getTypeSizeInBits(SrcTy
) > getTypeSizeInBits(Ty
))
4069 return getTruncateExpr(V
, Ty
, Depth
);
4070 return getSignExtendExpr(V
, Ty
, Depth
);
4074 ScalarEvolution::getNoopOrZeroExtend(const SCEV
*V
, Type
*Ty
) {
4075 Type
*SrcTy
= V
->getType();
4076 assert(SrcTy
->isIntOrPtrTy() && Ty
->isIntOrPtrTy() &&
4077 "Cannot noop or zero extend with non-integer arguments!");
4078 assert(getTypeSizeInBits(SrcTy
) <= getTypeSizeInBits(Ty
) &&
4079 "getNoopOrZeroExtend cannot truncate!");
4080 if (getTypeSizeInBits(SrcTy
) == getTypeSizeInBits(Ty
))
4081 return V
; // No conversion
4082 return getZeroExtendExpr(V
, Ty
);
4086 ScalarEvolution::getNoopOrSignExtend(const SCEV
*V
, Type
*Ty
) {
4087 Type
*SrcTy
= V
->getType();
4088 assert(SrcTy
->isIntOrPtrTy() && Ty
->isIntOrPtrTy() &&
4089 "Cannot noop or sign extend with non-integer arguments!");
4090 assert(getTypeSizeInBits(SrcTy
) <= getTypeSizeInBits(Ty
) &&
4091 "getNoopOrSignExtend cannot truncate!");
4092 if (getTypeSizeInBits(SrcTy
) == getTypeSizeInBits(Ty
))
4093 return V
; // No conversion
4094 return getSignExtendExpr(V
, Ty
);
4098 ScalarEvolution::getNoopOrAnyExtend(const SCEV
*V
, Type
*Ty
) {
4099 Type
*SrcTy
= V
->getType();
4100 assert(SrcTy
->isIntOrPtrTy() && Ty
->isIntOrPtrTy() &&
4101 "Cannot noop or any extend with non-integer arguments!");
4102 assert(getTypeSizeInBits(SrcTy
) <= getTypeSizeInBits(Ty
) &&
4103 "getNoopOrAnyExtend cannot truncate!");
4104 if (getTypeSizeInBits(SrcTy
) == getTypeSizeInBits(Ty
))
4105 return V
; // No conversion
4106 return getAnyExtendExpr(V
, Ty
);
4110 ScalarEvolution::getTruncateOrNoop(const SCEV
*V
, Type
*Ty
) {
4111 Type
*SrcTy
= V
->getType();
4112 assert(SrcTy
->isIntOrPtrTy() && Ty
->isIntOrPtrTy() &&
4113 "Cannot truncate or noop with non-integer arguments!");
4114 assert(getTypeSizeInBits(SrcTy
) >= getTypeSizeInBits(Ty
) &&
4115 "getTruncateOrNoop cannot extend!");
4116 if (getTypeSizeInBits(SrcTy
) == getTypeSizeInBits(Ty
))
4117 return V
; // No conversion
4118 return getTruncateExpr(V
, Ty
);
4121 const SCEV
*ScalarEvolution::getUMaxFromMismatchedTypes(const SCEV
*LHS
,
4123 const SCEV
*PromotedLHS
= LHS
;
4124 const SCEV
*PromotedRHS
= RHS
;
4126 if (getTypeSizeInBits(LHS
->getType()) > getTypeSizeInBits(RHS
->getType()))
4127 PromotedRHS
= getZeroExtendExpr(RHS
, LHS
->getType());
4129 PromotedLHS
= getNoopOrZeroExtend(LHS
, RHS
->getType());
4131 return getUMaxExpr(PromotedLHS
, PromotedRHS
);
4134 const SCEV
*ScalarEvolution::getUMinFromMismatchedTypes(const SCEV
*LHS
,
4136 SmallVector
<const SCEV
*, 2> Ops
= { LHS
, RHS
};
4137 return getUMinFromMismatchedTypes(Ops
);
4140 const SCEV
*ScalarEvolution::getUMinFromMismatchedTypes(
4141 SmallVectorImpl
<const SCEV
*> &Ops
) {
4142 assert(!Ops
.empty() && "At least one operand must be!");
4144 if (Ops
.size() == 1)
4147 // Find the max type first.
4148 Type
*MaxType
= nullptr;
4151 MaxType
= getWiderType(MaxType
, S
->getType());
4153 MaxType
= S
->getType();
4155 // Extend all ops to max type.
4156 SmallVector
<const SCEV
*, 2> PromotedOps
;
4158 PromotedOps
.push_back(getNoopOrZeroExtend(S
, MaxType
));
4161 return getUMinExpr(PromotedOps
);
4164 const SCEV
*ScalarEvolution::getPointerBase(const SCEV
*V
) {
4165 // A pointer operand may evaluate to a nonpointer expression, such as null.
4166 if (!V
->getType()->isPointerTy())
4169 if (const SCEVCastExpr
*Cast
= dyn_cast
<SCEVCastExpr
>(V
)) {
4170 return getPointerBase(Cast
->getOperand());
4171 } else if (const SCEVNAryExpr
*NAry
= dyn_cast
<SCEVNAryExpr
>(V
)) {
4172 const SCEV
*PtrOp
= nullptr;
4173 for (const SCEV
*NAryOp
: NAry
->operands()) {
4174 if (NAryOp
->getType()->isPointerTy()) {
4175 // Cannot find the base of an expression with multiple pointer operands.
4183 return getPointerBase(PtrOp
);
4188 /// Push users of the given Instruction onto the given Worklist.
4190 PushDefUseChildren(Instruction
*I
,
4191 SmallVectorImpl
<Instruction
*> &Worklist
) {
4192 // Push the def-use children onto the Worklist stack.
4193 for (User
*U
: I
->users())
4194 Worklist
.push_back(cast
<Instruction
>(U
));
4197 void ScalarEvolution::forgetSymbolicName(Instruction
*PN
, const SCEV
*SymName
) {
4198 SmallVector
<Instruction
*, 16> Worklist
;
4199 PushDefUseChildren(PN
, Worklist
);
4201 SmallPtrSet
<Instruction
*, 8> Visited
;
4203 while (!Worklist
.empty()) {
4204 Instruction
*I
= Worklist
.pop_back_val();
4205 if (!Visited
.insert(I
).second
)
4208 auto It
= ValueExprMap
.find_as(static_cast<Value
*>(I
));
4209 if (It
!= ValueExprMap
.end()) {
4210 const SCEV
*Old
= It
->second
;
4212 // Short-circuit the def-use traversal if the symbolic name
4213 // ceases to appear in expressions.
4214 if (Old
!= SymName
&& !hasOperand(Old
, SymName
))
4217 // SCEVUnknown for a PHI either means that it has an unrecognized
4218 // structure, it's a PHI that's in the progress of being computed
4219 // by createNodeForPHI, or it's a single-value PHI. In the first case,
4220 // additional loop trip count information isn't going to change anything.
4221 // In the second case, createNodeForPHI will perform the necessary
4222 // updates on its own when it gets to that point. In the third, we do
4223 // want to forget the SCEVUnknown.
4224 if (!isa
<PHINode
>(I
) ||
4225 !isa
<SCEVUnknown
>(Old
) ||
4226 (I
!= PN
&& Old
== SymName
)) {
4227 eraseValueFromMap(It
->first
);
4228 forgetMemoizedResults(Old
);
4232 PushDefUseChildren(I
, Worklist
);
4238 /// Takes SCEV S and Loop L. For each AddRec sub-expression, use its start
4239 /// expression in case its Loop is L. If it is not L then
4240 /// if IgnoreOtherLoops is true then use AddRec itself
4241 /// otherwise rewrite cannot be done.
4242 /// If SCEV contains non-invariant unknown SCEV rewrite cannot be done.
4243 class SCEVInitRewriter
: public SCEVRewriteVisitor
<SCEVInitRewriter
> {
4245 static const SCEV
*rewrite(const SCEV
*S
, const Loop
*L
, ScalarEvolution
&SE
,
4246 bool IgnoreOtherLoops
= true) {
4247 SCEVInitRewriter
Rewriter(L
, SE
);
4248 const SCEV
*Result
= Rewriter
.visit(S
);
4249 if (Rewriter
.hasSeenLoopVariantSCEVUnknown())
4250 return SE
.getCouldNotCompute();
4251 return Rewriter
.hasSeenOtherLoops() && !IgnoreOtherLoops
4252 ? SE
.getCouldNotCompute()
4256 const SCEV
*visitUnknown(const SCEVUnknown
*Expr
) {
4257 if (!SE
.isLoopInvariant(Expr
, L
))
4258 SeenLoopVariantSCEVUnknown
= true;
4262 const SCEV
*visitAddRecExpr(const SCEVAddRecExpr
*Expr
) {
4263 // Only re-write AddRecExprs for this loop.
4264 if (Expr
->getLoop() == L
)
4265 return Expr
->getStart();
4266 SeenOtherLoops
= true;
4270 bool hasSeenLoopVariantSCEVUnknown() { return SeenLoopVariantSCEVUnknown
; }
4272 bool hasSeenOtherLoops() { return SeenOtherLoops
; }
4275 explicit SCEVInitRewriter(const Loop
*L
, ScalarEvolution
&SE
)
4276 : SCEVRewriteVisitor(SE
), L(L
) {}
4279 bool SeenLoopVariantSCEVUnknown
= false;
4280 bool SeenOtherLoops
= false;
4283 /// Takes SCEV S and Loop L. For each AddRec sub-expression, use its post
4284 /// increment expression in case its Loop is L. If it is not L then
4285 /// use AddRec itself.
4286 /// If SCEV contains non-invariant unknown SCEV rewrite cannot be done.
4287 class SCEVPostIncRewriter
: public SCEVRewriteVisitor
<SCEVPostIncRewriter
> {
4289 static const SCEV
*rewrite(const SCEV
*S
, const Loop
*L
, ScalarEvolution
&SE
) {
4290 SCEVPostIncRewriter
Rewriter(L
, SE
);
4291 const SCEV
*Result
= Rewriter
.visit(S
);
4292 return Rewriter
.hasSeenLoopVariantSCEVUnknown()
4293 ? SE
.getCouldNotCompute()
4297 const SCEV
*visitUnknown(const SCEVUnknown
*Expr
) {
4298 if (!SE
.isLoopInvariant(Expr
, L
))
4299 SeenLoopVariantSCEVUnknown
= true;
4303 const SCEV
*visitAddRecExpr(const SCEVAddRecExpr
*Expr
) {
4304 // Only re-write AddRecExprs for this loop.
4305 if (Expr
->getLoop() == L
)
4306 return Expr
->getPostIncExpr(SE
);
4307 SeenOtherLoops
= true;
4311 bool hasSeenLoopVariantSCEVUnknown() { return SeenLoopVariantSCEVUnknown
; }
4313 bool hasSeenOtherLoops() { return SeenOtherLoops
; }
4316 explicit SCEVPostIncRewriter(const Loop
*L
, ScalarEvolution
&SE
)
4317 : SCEVRewriteVisitor(SE
), L(L
) {}
4320 bool SeenLoopVariantSCEVUnknown
= false;
4321 bool SeenOtherLoops
= false;
4324 /// This class evaluates the compare condition by matching it against the
4325 /// condition of loop latch. If there is a match we assume a true value
4326 /// for the condition while building SCEV nodes.
4327 class SCEVBackedgeConditionFolder
4328 : public SCEVRewriteVisitor
<SCEVBackedgeConditionFolder
> {
4330 static const SCEV
*rewrite(const SCEV
*S
, const Loop
*L
,
4331 ScalarEvolution
&SE
) {
4332 bool IsPosBECond
= false;
4333 Value
*BECond
= nullptr;
4334 if (BasicBlock
*Latch
= L
->getLoopLatch()) {
4335 BranchInst
*BI
= dyn_cast
<BranchInst
>(Latch
->getTerminator());
4336 if (BI
&& BI
->isConditional()) {
4337 assert(BI
->getSuccessor(0) != BI
->getSuccessor(1) &&
4338 "Both outgoing branches should not target same header!");
4339 BECond
= BI
->getCondition();
4340 IsPosBECond
= BI
->getSuccessor(0) == L
->getHeader();
4345 SCEVBackedgeConditionFolder
Rewriter(L
, BECond
, IsPosBECond
, SE
);
4346 return Rewriter
.visit(S
);
4349 const SCEV
*visitUnknown(const SCEVUnknown
*Expr
) {
4350 const SCEV
*Result
= Expr
;
4351 bool InvariantF
= SE
.isLoopInvariant(Expr
, L
);
4354 Instruction
*I
= cast
<Instruction
>(Expr
->getValue());
4355 switch (I
->getOpcode()) {
4356 case Instruction::Select
: {
4357 SelectInst
*SI
= cast
<SelectInst
>(I
);
4358 Optional
<const SCEV
*> Res
=
4359 compareWithBackedgeCondition(SI
->getCondition());
4360 if (Res
.hasValue()) {
4361 bool IsOne
= cast
<SCEVConstant
>(Res
.getValue())->getValue()->isOne();
4362 Result
= SE
.getSCEV(IsOne
? SI
->getTrueValue() : SI
->getFalseValue());
4367 Optional
<const SCEV
*> Res
= compareWithBackedgeCondition(I
);
4369 Result
= Res
.getValue();
4378 explicit SCEVBackedgeConditionFolder(const Loop
*L
, Value
*BECond
,
4379 bool IsPosBECond
, ScalarEvolution
&SE
)
4380 : SCEVRewriteVisitor(SE
), L(L
), BackedgeCond(BECond
),
4381 IsPositiveBECond(IsPosBECond
) {}
4383 Optional
<const SCEV
*> compareWithBackedgeCondition(Value
*IC
);
4386 /// Loop back condition.
4387 Value
*BackedgeCond
= nullptr;
4388 /// Set to true if loop back is on positive branch condition.
4389 bool IsPositiveBECond
;
4392 Optional
<const SCEV
*>
4393 SCEVBackedgeConditionFolder::compareWithBackedgeCondition(Value
*IC
) {
4395 // If value matches the backedge condition for loop latch,
4396 // then return a constant evolution node based on loopback
4398 if (BackedgeCond
== IC
)
4399 return IsPositiveBECond
? SE
.getOne(Type::getInt1Ty(SE
.getContext()))
4400 : SE
.getZero(Type::getInt1Ty(SE
.getContext()));
4404 class SCEVShiftRewriter
: public SCEVRewriteVisitor
<SCEVShiftRewriter
> {
4406 static const SCEV
*rewrite(const SCEV
*S
, const Loop
*L
,
4407 ScalarEvolution
&SE
) {
4408 SCEVShiftRewriter
Rewriter(L
, SE
);
4409 const SCEV
*Result
= Rewriter
.visit(S
);
4410 return Rewriter
.isValid() ? Result
: SE
.getCouldNotCompute();
4413 const SCEV
*visitUnknown(const SCEVUnknown
*Expr
) {
4414 // Only allow AddRecExprs for this loop.
4415 if (!SE
.isLoopInvariant(Expr
, L
))
4420 const SCEV
*visitAddRecExpr(const SCEVAddRecExpr
*Expr
) {
4421 if (Expr
->getLoop() == L
&& Expr
->isAffine())
4422 return SE
.getMinusSCEV(Expr
, Expr
->getStepRecurrence(SE
));
4427 bool isValid() { return Valid
; }
4430 explicit SCEVShiftRewriter(const Loop
*L
, ScalarEvolution
&SE
)
4431 : SCEVRewriteVisitor(SE
), L(L
) {}
4437 } // end anonymous namespace
4440 ScalarEvolution::proveNoWrapViaConstantRanges(const SCEVAddRecExpr
*AR
) {
4441 if (!AR
->isAffine())
4442 return SCEV::FlagAnyWrap
;
4444 using OBO
= OverflowingBinaryOperator
;
4446 SCEV::NoWrapFlags Result
= SCEV::FlagAnyWrap
;
4448 if (!AR
->hasNoSignedWrap()) {
4449 ConstantRange AddRecRange
= getSignedRange(AR
);
4450 ConstantRange IncRange
= getSignedRange(AR
->getStepRecurrence(*this));
4452 auto NSWRegion
= ConstantRange::makeGuaranteedNoWrapRegion(
4453 Instruction::Add
, IncRange
, OBO::NoSignedWrap
);
4454 if (NSWRegion
.contains(AddRecRange
))
4455 Result
= ScalarEvolution::setFlags(Result
, SCEV::FlagNSW
);
4458 if (!AR
->hasNoUnsignedWrap()) {
4459 ConstantRange AddRecRange
= getUnsignedRange(AR
);
4460 ConstantRange IncRange
= getUnsignedRange(AR
->getStepRecurrence(*this));
4462 auto NUWRegion
= ConstantRange::makeGuaranteedNoWrapRegion(
4463 Instruction::Add
, IncRange
, OBO::NoUnsignedWrap
);
4464 if (NUWRegion
.contains(AddRecRange
))
4465 Result
= ScalarEvolution::setFlags(Result
, SCEV::FlagNUW
);
4473 /// Represents an abstract binary operation. This may exist as a
4474 /// normal instruction or constant expression, or may have been
4475 /// derived from an expression tree.
4483 /// Op is set if this BinaryOp corresponds to a concrete LLVM instruction or
4484 /// constant expression.
4485 Operator
*Op
= nullptr;
4487 explicit BinaryOp(Operator
*Op
)
4488 : Opcode(Op
->getOpcode()), LHS(Op
->getOperand(0)), RHS(Op
->getOperand(1)),
4490 if (auto *OBO
= dyn_cast
<OverflowingBinaryOperator
>(Op
)) {
4491 IsNSW
= OBO
->hasNoSignedWrap();
4492 IsNUW
= OBO
->hasNoUnsignedWrap();
4496 explicit BinaryOp(unsigned Opcode
, Value
*LHS
, Value
*RHS
, bool IsNSW
= false,
4498 : Opcode(Opcode
), LHS(LHS
), RHS(RHS
), IsNSW(IsNSW
), IsNUW(IsNUW
) {}
4501 } // end anonymous namespace
4503 /// Try to map \p V into a BinaryOp, and return \c None on failure.
4504 static Optional
<BinaryOp
> MatchBinaryOp(Value
*V
, DominatorTree
&DT
) {
4505 auto *Op
= dyn_cast
<Operator
>(V
);
4509 // Implementation detail: all the cleverness here should happen without
4510 // creating new SCEV expressions -- our caller knowns tricks to avoid creating
4511 // SCEV expressions when possible, and we should not break that.
4513 switch (Op
->getOpcode()) {
4514 case Instruction::Add
:
4515 case Instruction::Sub
:
4516 case Instruction::Mul
:
4517 case Instruction::UDiv
:
4518 case Instruction::URem
:
4519 case Instruction::And
:
4520 case Instruction::Or
:
4521 case Instruction::AShr
:
4522 case Instruction::Shl
:
4523 return BinaryOp(Op
);
4525 case Instruction::Xor
:
4526 if (auto *RHSC
= dyn_cast
<ConstantInt
>(Op
->getOperand(1)))
4527 // If the RHS of the xor is a signmask, then this is just an add.
4528 // Instcombine turns add of signmask into xor as a strength reduction step.
4529 if (RHSC
->getValue().isSignMask())
4530 return BinaryOp(Instruction::Add
, Op
->getOperand(0), Op
->getOperand(1));
4531 return BinaryOp(Op
);
4533 case Instruction::LShr
:
4534 // Turn logical shift right of a constant into a unsigned divide.
4535 if (ConstantInt
*SA
= dyn_cast
<ConstantInt
>(Op
->getOperand(1))) {
4536 uint32_t BitWidth
= cast
<IntegerType
>(Op
->getType())->getBitWidth();
4538 // If the shift count is not less than the bitwidth, the result of
4539 // the shift is undefined. Don't try to analyze it, because the
4540 // resolution chosen here may differ from the resolution chosen in
4541 // other parts of the compiler.
4542 if (SA
->getValue().ult(BitWidth
)) {
4544 ConstantInt::get(SA
->getContext(),
4545 APInt::getOneBitSet(BitWidth
, SA
->getZExtValue()));
4546 return BinaryOp(Instruction::UDiv
, Op
->getOperand(0), X
);
4549 return BinaryOp(Op
);
4551 case Instruction::ExtractValue
: {
4552 auto *EVI
= cast
<ExtractValueInst
>(Op
);
4553 if (EVI
->getNumIndices() != 1 || EVI
->getIndices()[0] != 0)
4556 auto *CI
= dyn_cast
<CallInst
>(EVI
->getAggregateOperand());
4560 if (auto *F
= CI
->getCalledFunction())
4561 switch (F
->getIntrinsicID()) {
4562 case Intrinsic::sadd_with_overflow
:
4563 case Intrinsic::uadd_with_overflow
:
4564 if (!isOverflowIntrinsicNoWrap(cast
<IntrinsicInst
>(CI
), DT
))
4565 return BinaryOp(Instruction::Add
, CI
->getArgOperand(0),
4566 CI
->getArgOperand(1));
4568 // Now that we know that all uses of the arithmetic-result component of
4569 // CI are guarded by the overflow check, we can go ahead and pretend
4570 // that the arithmetic is non-overflowing.
4571 if (F
->getIntrinsicID() == Intrinsic::sadd_with_overflow
)
4572 return BinaryOp(Instruction::Add
, CI
->getArgOperand(0),
4573 CI
->getArgOperand(1), /* IsNSW = */ true,
4574 /* IsNUW = */ false);
4576 return BinaryOp(Instruction::Add
, CI
->getArgOperand(0),
4577 CI
->getArgOperand(1), /* IsNSW = */ false,
4579 case Intrinsic::ssub_with_overflow
:
4580 case Intrinsic::usub_with_overflow
:
4581 if (!isOverflowIntrinsicNoWrap(cast
<IntrinsicInst
>(CI
), DT
))
4582 return BinaryOp(Instruction::Sub
, CI
->getArgOperand(0),
4583 CI
->getArgOperand(1));
4585 // The same reasoning as sadd/uadd above.
4586 if (F
->getIntrinsicID() == Intrinsic::ssub_with_overflow
)
4587 return BinaryOp(Instruction::Sub
, CI
->getArgOperand(0),
4588 CI
->getArgOperand(1), /* IsNSW = */ true,
4589 /* IsNUW = */ false);
4591 return BinaryOp(Instruction::Sub
, CI
->getArgOperand(0),
4592 CI
->getArgOperand(1), /* IsNSW = */ false,
4593 /* IsNUW = */ true);
4594 case Intrinsic::smul_with_overflow
:
4595 case Intrinsic::umul_with_overflow
:
4596 return BinaryOp(Instruction::Mul
, CI
->getArgOperand(0),
4597 CI
->getArgOperand(1));
4611 /// Helper function to createAddRecFromPHIWithCasts. We have a phi
4612 /// node whose symbolic (unknown) SCEV is \p SymbolicPHI, which is updated via
4613 /// the loop backedge by a SCEVAddExpr, possibly also with a few casts on the
4614 /// way. This function checks if \p Op, an operand of this SCEVAddExpr,
4615 /// follows one of the following patterns:
4616 /// Op == (SExt ix (Trunc iy (%SymbolicPHI) to ix) to iy)
4617 /// Op == (ZExt ix (Trunc iy (%SymbolicPHI) to ix) to iy)
4618 /// If the SCEV expression of \p Op conforms with one of the expected patterns
4619 /// we return the type of the truncation operation, and indicate whether the
4620 /// truncated type should be treated as signed/unsigned by setting
4621 /// \p Signed to true/false, respectively.
4622 static Type
*isSimpleCastedPHI(const SCEV
*Op
, const SCEVUnknown
*SymbolicPHI
,
4623 bool &Signed
, ScalarEvolution
&SE
) {
4624 // The case where Op == SymbolicPHI (that is, with no type conversions on
4625 // the way) is handled by the regular add recurrence creating logic and
4626 // would have already been triggered in createAddRecForPHI. Reaching it here
4627 // means that createAddRecFromPHI had failed for this PHI before (e.g.,
4628 // because one of the other operands of the SCEVAddExpr updating this PHI is
4631 // Here we look for the case where Op = (ext(trunc(SymbolicPHI))), and in
4632 // this case predicates that allow us to prove that Op == SymbolicPHI will
4634 if (Op
== SymbolicPHI
)
4637 unsigned SourceBits
= SE
.getTypeSizeInBits(SymbolicPHI
->getType());
4638 unsigned NewBits
= SE
.getTypeSizeInBits(Op
->getType());
4639 if (SourceBits
!= NewBits
)
4642 const SCEVSignExtendExpr
*SExt
= dyn_cast
<SCEVSignExtendExpr
>(Op
);
4643 const SCEVZeroExtendExpr
*ZExt
= dyn_cast
<SCEVZeroExtendExpr
>(Op
);
4646 const SCEVTruncateExpr
*Trunc
=
4647 SExt
? dyn_cast
<SCEVTruncateExpr
>(SExt
->getOperand())
4648 : dyn_cast
<SCEVTruncateExpr
>(ZExt
->getOperand());
4651 const SCEV
*X
= Trunc
->getOperand();
4652 if (X
!= SymbolicPHI
)
4654 Signed
= SExt
!= nullptr;
4655 return Trunc
->getType();
4658 static const Loop
*isIntegerLoopHeaderPHI(const PHINode
*PN
, LoopInfo
&LI
) {
4659 if (!PN
->getType()->isIntegerTy())
4661 const Loop
*L
= LI
.getLoopFor(PN
->getParent());
4662 if (!L
|| L
->getHeader() != PN
->getParent())
4667 // Analyze \p SymbolicPHI, a SCEV expression of a phi node, and check if the
4668 // computation that updates the phi follows the following pattern:
4669 // (SExt/ZExt ix (Trunc iy (%SymbolicPHI) to ix) to iy) + InvariantAccum
4670 // which correspond to a phi->trunc->sext/zext->add->phi update chain.
4671 // If so, try to see if it can be rewritten as an AddRecExpr under some
4672 // Predicates. If successful, return them as a pair. Also cache the results
4675 // Example usage scenario:
4676 // Say the Rewriter is called for the following SCEV:
4677 // 8 * ((sext i32 (trunc i64 %X to i32) to i64) + %Step)
4679 // %X = phi i64 (%Start, %BEValue)
4680 // It will visitMul->visitAdd->visitSExt->visitTrunc->visitUnknown(%X),
4681 // and call this function with %SymbolicPHI = %X.
4683 // The analysis will find that the value coming around the backedge has
4684 // the following SCEV:
4685 // BEValue = ((sext i32 (trunc i64 %X to i32) to i64) + %Step)
4686 // Upon concluding that this matches the desired pattern, the function
4687 // will return the pair {NewAddRec, SmallPredsVec} where:
4688 // NewAddRec = {%Start,+,%Step}
4689 // SmallPredsVec = {P1, P2, P3} as follows:
4690 // P1(WrapPred): AR: {trunc(%Start),+,(trunc %Step)}<nsw> Flags: <nssw>
4691 // P2(EqualPred): %Start == (sext i32 (trunc i64 %Start to i32) to i64)
4692 // P3(EqualPred): %Step == (sext i32 (trunc i64 %Step to i32) to i64)
4693 // The returned pair means that SymbolicPHI can be rewritten into NewAddRec
4694 // under the predicates {P1,P2,P3}.
4695 // This predicated rewrite will be cached in PredicatedSCEVRewrites:
4696 // PredicatedSCEVRewrites[{%X,L}] = {NewAddRec, {P1,P2,P3)}
4700 // 1) Extend the Induction descriptor to also support inductions that involve
4701 // casts: When needed (namely, when we are called in the context of the
4702 // vectorizer induction analysis), a Set of cast instructions will be
4703 // populated by this method, and provided back to isInductionPHI. This is
4704 // needed to allow the vectorizer to properly record them to be ignored by
4705 // the cost model and to avoid vectorizing them (otherwise these casts,
4706 // which are redundant under the runtime overflow checks, will be
4707 // vectorized, which can be costly).
4709 // 2) Support additional induction/PHISCEV patterns: We also want to support
4710 // inductions where the sext-trunc / zext-trunc operations (partly) occur
4711 // after the induction update operation (the induction increment):
4713 // (Trunc iy (SExt/ZExt ix (%SymbolicPHI + InvariantAccum) to iy) to ix)
4714 // which correspond to a phi->add->trunc->sext/zext->phi update chain.
4716 // (Trunc iy ((SExt/ZExt ix (%SymbolicPhi) to iy) + InvariantAccum) to ix)
4717 // which correspond to a phi->trunc->add->sext/zext->phi update chain.
4719 // 3) Outline common code with createAddRecFromPHI to avoid duplication.
4720 Optional
<std::pair
<const SCEV
*, SmallVector
<const SCEVPredicate
*, 3>>>
4721 ScalarEvolution::createAddRecFromPHIWithCastsImpl(const SCEVUnknown
*SymbolicPHI
) {
4722 SmallVector
<const SCEVPredicate
*, 3> Predicates
;
4724 // *** Part1: Analyze if we have a phi-with-cast pattern for which we can
4725 // return an AddRec expression under some predicate.
4727 auto *PN
= cast
<PHINode
>(SymbolicPHI
->getValue());
4728 const Loop
*L
= isIntegerLoopHeaderPHI(PN
, LI
);
4729 assert(L
&& "Expecting an integer loop header phi");
4731 // The loop may have multiple entrances or multiple exits; we can analyze
4732 // this phi as an addrec if it has a unique entry value and a unique
4734 Value
*BEValueV
= nullptr, *StartValueV
= nullptr;
4735 for (unsigned i
= 0, e
= PN
->getNumIncomingValues(); i
!= e
; ++i
) {
4736 Value
*V
= PN
->getIncomingValue(i
);
4737 if (L
->contains(PN
->getIncomingBlock(i
))) {
4740 } else if (BEValueV
!= V
) {
4744 } else if (!StartValueV
) {
4746 } else if (StartValueV
!= V
) {
4747 StartValueV
= nullptr;
4751 if (!BEValueV
|| !StartValueV
)
4754 const SCEV
*BEValue
= getSCEV(BEValueV
);
4756 // If the value coming around the backedge is an add with the symbolic
4757 // value we just inserted, possibly with casts that we can ignore under
4758 // an appropriate runtime guard, then we found a simple induction variable!
4759 const auto *Add
= dyn_cast
<SCEVAddExpr
>(BEValue
);
4763 // If there is a single occurrence of the symbolic value, possibly
4764 // casted, replace it with a recurrence.
4765 unsigned FoundIndex
= Add
->getNumOperands();
4766 Type
*TruncTy
= nullptr;
4768 for (unsigned i
= 0, e
= Add
->getNumOperands(); i
!= e
; ++i
)
4770 isSimpleCastedPHI(Add
->getOperand(i
), SymbolicPHI
, Signed
, *this)))
4771 if (FoundIndex
== e
) {
4776 if (FoundIndex
== Add
->getNumOperands())
4779 // Create an add with everything but the specified operand.
4780 SmallVector
<const SCEV
*, 8> Ops
;
4781 for (unsigned i
= 0, e
= Add
->getNumOperands(); i
!= e
; ++i
)
4782 if (i
!= FoundIndex
)
4783 Ops
.push_back(Add
->getOperand(i
));
4784 const SCEV
*Accum
= getAddExpr(Ops
);
4786 // The runtime checks will not be valid if the step amount is
4787 // varying inside the loop.
4788 if (!isLoopInvariant(Accum
, L
))
4791 // *** Part2: Create the predicates
4793 // Analysis was successful: we have a phi-with-cast pattern for which we
4794 // can return an AddRec expression under the following predicates:
4796 // P1: A Wrap predicate that guarantees that Trunc(Start) + i*Trunc(Accum)
4797 // fits within the truncated type (does not overflow) for i = 0 to n-1.
4798 // P2: An Equal predicate that guarantees that
4799 // Start = (Ext ix (Trunc iy (Start) to ix) to iy)
4800 // P3: An Equal predicate that guarantees that
4801 // Accum = (Ext ix (Trunc iy (Accum) to ix) to iy)
4803 // As we next prove, the above predicates guarantee that:
4804 // Start + i*Accum = (Ext ix (Trunc iy ( Start + i*Accum ) to ix) to iy)
4807 // More formally, we want to prove that:
4808 // Expr(i+1) = Start + (i+1) * Accum
4809 // = (Ext ix (Trunc iy (Expr(i)) to ix) to iy) + Accum
4812 // 1) Expr(0) = Start
4813 // 2) Expr(1) = Start + Accum
4814 // = (Ext ix (Trunc iy (Start) to ix) to iy) + Accum :: from P2
4815 // 3) Induction hypothesis (step i):
4816 // Expr(i) = (Ext ix (Trunc iy (Expr(i-1)) to ix) to iy) + Accum
4820 // = Start + (i+1)*Accum
4821 // = (Start + i*Accum) + Accum
4822 // = Expr(i) + Accum
4823 // = (Ext ix (Trunc iy (Expr(i-1)) to ix) to iy) + Accum + Accum
4826 // = (Ext ix (Trunc iy (Start + (i-1)*Accum) to ix) to iy) + Accum + Accum
4828 // = (Ext ix (Trunc iy (Start + (i-1)*Accum) to ix) to iy)
4829 // + (Ext ix (Trunc iy (Accum) to ix) to iy)
4830 // + Accum :: from P3
4832 // = (Ext ix (Trunc iy ((Start + (i-1)*Accum) + Accum) to ix) to iy)
4833 // + Accum :: from P1: Ext(x)+Ext(y)=>Ext(x+y)
4835 // = (Ext ix (Trunc iy (Start + i*Accum) to ix) to iy) + Accum
4836 // = (Ext ix (Trunc iy (Expr(i)) to ix) to iy) + Accum
4838 // By induction, the same applies to all iterations 1<=i<n:
4841 // Create a truncated addrec for which we will add a no overflow check (P1).
4842 const SCEV
*StartVal
= getSCEV(StartValueV
);
4843 const SCEV
*PHISCEV
=
4844 getAddRecExpr(getTruncateExpr(StartVal
, TruncTy
),
4845 getTruncateExpr(Accum
, TruncTy
), L
, SCEV::FlagAnyWrap
);
4847 // PHISCEV can be either a SCEVConstant or a SCEVAddRecExpr.
4848 // ex: If truncated Accum is 0 and StartVal is a constant, then PHISCEV
4849 // will be constant.
4851 // If PHISCEV is a constant, then P1 degenerates into P2 or P3, so we don't
4853 if (const auto *AR
= dyn_cast
<SCEVAddRecExpr
>(PHISCEV
)) {
4854 SCEVWrapPredicate::IncrementWrapFlags AddedFlags
=
4855 Signed
? SCEVWrapPredicate::IncrementNSSW
4856 : SCEVWrapPredicate::IncrementNUSW
;
4857 const SCEVPredicate
*AddRecPred
= getWrapPredicate(AR
, AddedFlags
);
4858 Predicates
.push_back(AddRecPred
);
4861 // Create the Equal Predicates P2,P3:
4863 // It is possible that the predicates P2 and/or P3 are computable at
4864 // compile time due to StartVal and/or Accum being constants.
4865 // If either one is, then we can check that now and escape if either P2
4868 // Construct the extended SCEV: (Ext ix (Trunc iy (Expr) to ix) to iy)
4869 // for each of StartVal and Accum
4870 auto getExtendedExpr
= [&](const SCEV
*Expr
,
4871 bool CreateSignExtend
) -> const SCEV
* {
4872 assert(isLoopInvariant(Expr
, L
) && "Expr is expected to be invariant");
4873 const SCEV
*TruncatedExpr
= getTruncateExpr(Expr
, TruncTy
);
4874 const SCEV
*ExtendedExpr
=
4875 CreateSignExtend
? getSignExtendExpr(TruncatedExpr
, Expr
->getType())
4876 : getZeroExtendExpr(TruncatedExpr
, Expr
->getType());
4877 return ExtendedExpr
;
4881 // ExtendedExpr = (Ext ix (Trunc iy (Expr) to ix) to iy
4882 // = getExtendedExpr(Expr)
4883 // Determine whether the predicate P: Expr == ExtendedExpr
4884 // is known to be false at compile time
4885 auto PredIsKnownFalse
= [&](const SCEV
*Expr
,
4886 const SCEV
*ExtendedExpr
) -> bool {
4887 return Expr
!= ExtendedExpr
&&
4888 isKnownPredicate(ICmpInst::ICMP_NE
, Expr
, ExtendedExpr
);
4891 const SCEV
*StartExtended
= getExtendedExpr(StartVal
, Signed
);
4892 if (PredIsKnownFalse(StartVal
, StartExtended
)) {
4893 LLVM_DEBUG(dbgs() << "P2 is compile-time false\n";);
4897 // The Step is always Signed (because the overflow checks are either
4899 const SCEV
*AccumExtended
= getExtendedExpr(Accum
, /*CreateSignExtend=*/true);
4900 if (PredIsKnownFalse(Accum
, AccumExtended
)) {
4901 LLVM_DEBUG(dbgs() << "P3 is compile-time false\n";);
4905 auto AppendPredicate
= [&](const SCEV
*Expr
,
4906 const SCEV
*ExtendedExpr
) -> void {
4907 if (Expr
!= ExtendedExpr
&&
4908 !isKnownPredicate(ICmpInst::ICMP_EQ
, Expr
, ExtendedExpr
)) {
4909 const SCEVPredicate
*Pred
= getEqualPredicate(Expr
, ExtendedExpr
);
4910 LLVM_DEBUG(dbgs() << "Added Predicate: " << *Pred
);
4911 Predicates
.push_back(Pred
);
4915 AppendPredicate(StartVal
, StartExtended
);
4916 AppendPredicate(Accum
, AccumExtended
);
4918 // *** Part3: Predicates are ready. Now go ahead and create the new addrec in
4919 // which the casts had been folded away. The caller can rewrite SymbolicPHI
4920 // into NewAR if it will also add the runtime overflow checks specified in
4922 auto *NewAR
= getAddRecExpr(StartVal
, Accum
, L
, SCEV::FlagAnyWrap
);
4924 std::pair
<const SCEV
*, SmallVector
<const SCEVPredicate
*, 3>> PredRewrite
=
4925 std::make_pair(NewAR
, Predicates
);
4926 // Remember the result of the analysis for this SCEV at this locayyytion.
4927 PredicatedSCEVRewrites
[{SymbolicPHI
, L
}] = PredRewrite
;
4931 Optional
<std::pair
<const SCEV
*, SmallVector
<const SCEVPredicate
*, 3>>>
4932 ScalarEvolution::createAddRecFromPHIWithCasts(const SCEVUnknown
*SymbolicPHI
) {
4933 auto *PN
= cast
<PHINode
>(SymbolicPHI
->getValue());
4934 const Loop
*L
= isIntegerLoopHeaderPHI(PN
, LI
);
4938 // Check to see if we already analyzed this PHI.
4939 auto I
= PredicatedSCEVRewrites
.find({SymbolicPHI
, L
});
4940 if (I
!= PredicatedSCEVRewrites
.end()) {
4941 std::pair
<const SCEV
*, SmallVector
<const SCEVPredicate
*, 3>> Rewrite
=
4943 // Analysis was done before and failed to create an AddRec:
4944 if (Rewrite
.first
== SymbolicPHI
)
4946 // Analysis was done before and succeeded to create an AddRec under
4948 assert(isa
<SCEVAddRecExpr
>(Rewrite
.first
) && "Expected an AddRec");
4949 assert(!(Rewrite
.second
).empty() && "Expected to find Predicates");
4953 Optional
<std::pair
<const SCEV
*, SmallVector
<const SCEVPredicate
*, 3>>>
4954 Rewrite
= createAddRecFromPHIWithCastsImpl(SymbolicPHI
);
4956 // Record in the cache that the analysis failed
4958 SmallVector
<const SCEVPredicate
*, 3> Predicates
;
4959 PredicatedSCEVRewrites
[{SymbolicPHI
, L
}] = {SymbolicPHI
, Predicates
};
4966 // FIXME: This utility is currently required because the Rewriter currently
4967 // does not rewrite this expression:
4968 // {0, +, (sext ix (trunc iy to ix) to iy)}
4969 // into {0, +, %step},
4970 // even when the following Equal predicate exists:
4971 // "%step == (sext ix (trunc iy to ix) to iy)".
4972 bool PredicatedScalarEvolution::areAddRecsEqualWithPreds(
4973 const SCEVAddRecExpr
*AR1
, const SCEVAddRecExpr
*AR2
) const {
4977 auto areExprsEqual
= [&](const SCEV
*Expr1
, const SCEV
*Expr2
) -> bool {
4978 if (Expr1
!= Expr2
&& !Preds
.implies(SE
.getEqualPredicate(Expr1
, Expr2
)) &&
4979 !Preds
.implies(SE
.getEqualPredicate(Expr2
, Expr1
)))
4984 if (!areExprsEqual(AR1
->getStart(), AR2
->getStart()) ||
4985 !areExprsEqual(AR1
->getStepRecurrence(SE
), AR2
->getStepRecurrence(SE
)))
4990 /// A helper function for createAddRecFromPHI to handle simple cases.
4992 /// This function tries to find an AddRec expression for the simplest (yet most
4993 /// common) cases: PN = PHI(Start, OP(Self, LoopInvariant)).
4994 /// If it fails, createAddRecFromPHI will use a more general, but slow,
4995 /// technique for finding the AddRec expression.
4996 const SCEV
*ScalarEvolution::createSimpleAffineAddRec(PHINode
*PN
,
4998 Value
*StartValueV
) {
4999 const Loop
*L
= LI
.getLoopFor(PN
->getParent());
5000 assert(L
&& L
->getHeader() == PN
->getParent());
5001 assert(BEValueV
&& StartValueV
);
5003 auto BO
= MatchBinaryOp(BEValueV
, DT
);
5007 if (BO
->Opcode
!= Instruction::Add
)
5010 const SCEV
*Accum
= nullptr;
5011 if (BO
->LHS
== PN
&& L
->isLoopInvariant(BO
->RHS
))
5012 Accum
= getSCEV(BO
->RHS
);
5013 else if (BO
->RHS
== PN
&& L
->isLoopInvariant(BO
->LHS
))
5014 Accum
= getSCEV(BO
->LHS
);
5019 SCEV::NoWrapFlags Flags
= SCEV::FlagAnyWrap
;
5021 Flags
= setFlags(Flags
, SCEV::FlagNUW
);
5023 Flags
= setFlags(Flags
, SCEV::FlagNSW
);
5025 const SCEV
*StartVal
= getSCEV(StartValueV
);
5026 const SCEV
*PHISCEV
= getAddRecExpr(StartVal
, Accum
, L
, Flags
);
5028 ValueExprMap
[SCEVCallbackVH(PN
, this)] = PHISCEV
;
5030 // We can add Flags to the post-inc expression only if we
5031 // know that it is *undefined behavior* for BEValueV to
5033 if (auto *BEInst
= dyn_cast
<Instruction
>(BEValueV
))
5034 if (isLoopInvariant(Accum
, L
) && isAddRecNeverPoison(BEInst
, L
))
5035 (void)getAddRecExpr(getAddExpr(StartVal
, Accum
), Accum
, L
, Flags
);
5040 const SCEV
*ScalarEvolution::createAddRecFromPHI(PHINode
*PN
) {
5041 const Loop
*L
= LI
.getLoopFor(PN
->getParent());
5042 if (!L
|| L
->getHeader() != PN
->getParent())
5045 // The loop may have multiple entrances or multiple exits; we can analyze
5046 // this phi as an addrec if it has a unique entry value and a unique
5048 Value
*BEValueV
= nullptr, *StartValueV
= nullptr;
5049 for (unsigned i
= 0, e
= PN
->getNumIncomingValues(); i
!= e
; ++i
) {
5050 Value
*V
= PN
->getIncomingValue(i
);
5051 if (L
->contains(PN
->getIncomingBlock(i
))) {
5054 } else if (BEValueV
!= V
) {
5058 } else if (!StartValueV
) {
5060 } else if (StartValueV
!= V
) {
5061 StartValueV
= nullptr;
5065 if (!BEValueV
|| !StartValueV
)
5068 assert(ValueExprMap
.find_as(PN
) == ValueExprMap
.end() &&
5069 "PHI node already processed?");
5071 // First, try to find AddRec expression without creating a fictituos symbolic
5073 if (auto *S
= createSimpleAffineAddRec(PN
, BEValueV
, StartValueV
))
5076 // Handle PHI node value symbolically.
5077 const SCEV
*SymbolicName
= getUnknown(PN
);
5078 ValueExprMap
.insert({SCEVCallbackVH(PN
, this), SymbolicName
});
5080 // Using this symbolic name for the PHI, analyze the value coming around
5082 const SCEV
*BEValue
= getSCEV(BEValueV
);
5084 // NOTE: If BEValue is loop invariant, we know that the PHI node just
5085 // has a special value for the first iteration of the loop.
5087 // If the value coming around the backedge is an add with the symbolic
5088 // value we just inserted, then we found a simple induction variable!
5089 if (const SCEVAddExpr
*Add
= dyn_cast
<SCEVAddExpr
>(BEValue
)) {
5090 // If there is a single occurrence of the symbolic value, replace it
5091 // with a recurrence.
5092 unsigned FoundIndex
= Add
->getNumOperands();
5093 for (unsigned i
= 0, e
= Add
->getNumOperands(); i
!= e
; ++i
)
5094 if (Add
->getOperand(i
) == SymbolicName
)
5095 if (FoundIndex
== e
) {
5100 if (FoundIndex
!= Add
->getNumOperands()) {
5101 // Create an add with everything but the specified operand.
5102 SmallVector
<const SCEV
*, 8> Ops
;
5103 for (unsigned i
= 0, e
= Add
->getNumOperands(); i
!= e
; ++i
)
5104 if (i
!= FoundIndex
)
5105 Ops
.push_back(SCEVBackedgeConditionFolder::rewrite(Add
->getOperand(i
),
5107 const SCEV
*Accum
= getAddExpr(Ops
);
5109 // This is not a valid addrec if the step amount is varying each
5110 // loop iteration, but is not itself an addrec in this loop.
5111 if (isLoopInvariant(Accum
, L
) ||
5112 (isa
<SCEVAddRecExpr
>(Accum
) &&
5113 cast
<SCEVAddRecExpr
>(Accum
)->getLoop() == L
)) {
5114 SCEV::NoWrapFlags Flags
= SCEV::FlagAnyWrap
;
5116 if (auto BO
= MatchBinaryOp(BEValueV
, DT
)) {
5117 if (BO
->Opcode
== Instruction::Add
&& BO
->LHS
== PN
) {
5119 Flags
= setFlags(Flags
, SCEV::FlagNUW
);
5121 Flags
= setFlags(Flags
, SCEV::FlagNSW
);
5123 } else if (GEPOperator
*GEP
= dyn_cast
<GEPOperator
>(BEValueV
)) {
5124 // If the increment is an inbounds GEP, then we know the address
5125 // space cannot be wrapped around. We cannot make any guarantee
5126 // about signed or unsigned overflow because pointers are
5127 // unsigned but we may have a negative index from the base
5128 // pointer. We can guarantee that no unsigned wrap occurs if the
5129 // indices form a positive value.
5130 if (GEP
->isInBounds() && GEP
->getOperand(0) == PN
) {
5131 Flags
= setFlags(Flags
, SCEV::FlagNW
);
5133 const SCEV
*Ptr
= getSCEV(GEP
->getPointerOperand());
5134 if (isKnownPositive(getMinusSCEV(getSCEV(GEP
), Ptr
)))
5135 Flags
= setFlags(Flags
, SCEV::FlagNUW
);
5138 // We cannot transfer nuw and nsw flags from subtraction
5139 // operations -- sub nuw X, Y is not the same as add nuw X, -Y
5143 const SCEV
*StartVal
= getSCEV(StartValueV
);
5144 const SCEV
*PHISCEV
= getAddRecExpr(StartVal
, Accum
, L
, Flags
);
5146 // Okay, for the entire analysis of this edge we assumed the PHI
5147 // to be symbolic. We now need to go back and purge all of the
5148 // entries for the scalars that use the symbolic expression.
5149 forgetSymbolicName(PN
, SymbolicName
);
5150 ValueExprMap
[SCEVCallbackVH(PN
, this)] = PHISCEV
;
5152 // We can add Flags to the post-inc expression only if we
5153 // know that it is *undefined behavior* for BEValueV to
5155 if (auto *BEInst
= dyn_cast
<Instruction
>(BEValueV
))
5156 if (isLoopInvariant(Accum
, L
) && isAddRecNeverPoison(BEInst
, L
))
5157 (void)getAddRecExpr(getAddExpr(StartVal
, Accum
), Accum
, L
, Flags
);
5163 // Otherwise, this could be a loop like this:
5164 // i = 0; for (j = 1; ..; ++j) { .... i = j; }
5165 // In this case, j = {1,+,1} and BEValue is j.
5166 // Because the other in-value of i (0) fits the evolution of BEValue
5167 // i really is an addrec evolution.
5169 // We can generalize this saying that i is the shifted value of BEValue
5170 // by one iteration:
5171 // PHI(f(0), f({1,+,1})) --> f({0,+,1})
5172 const SCEV
*Shifted
= SCEVShiftRewriter::rewrite(BEValue
, L
, *this);
5173 const SCEV
*Start
= SCEVInitRewriter::rewrite(Shifted
, L
, *this, false);
5174 if (Shifted
!= getCouldNotCompute() &&
5175 Start
!= getCouldNotCompute()) {
5176 const SCEV
*StartVal
= getSCEV(StartValueV
);
5177 if (Start
== StartVal
) {
5178 // Okay, for the entire analysis of this edge we assumed the PHI
5179 // to be symbolic. We now need to go back and purge all of the
5180 // entries for the scalars that use the symbolic expression.
5181 forgetSymbolicName(PN
, SymbolicName
);
5182 ValueExprMap
[SCEVCallbackVH(PN
, this)] = Shifted
;
5188 // Remove the temporary PHI node SCEV that has been inserted while intending
5189 // to create an AddRecExpr for this PHI node. We can not keep this temporary
5190 // as it will prevent later (possibly simpler) SCEV expressions to be added
5191 // to the ValueExprMap.
5192 eraseValueFromMap(PN
);
5197 // Checks if the SCEV S is available at BB. S is considered available at BB
5198 // if S can be materialized at BB without introducing a fault.
5199 static bool IsAvailableOnEntry(const Loop
*L
, DominatorTree
&DT
, const SCEV
*S
,
5201 struct CheckAvailable
{
5202 bool TraversalDone
= false;
5203 bool Available
= true;
5205 const Loop
*L
= nullptr; // The loop BB is in (can be nullptr)
5206 BasicBlock
*BB
= nullptr;
5209 CheckAvailable(const Loop
*L
, BasicBlock
*BB
, DominatorTree
&DT
)
5210 : L(L
), BB(BB
), DT(DT
) {}
5212 bool setUnavailable() {
5213 TraversalDone
= true;
5218 bool follow(const SCEV
*S
) {
5219 switch (S
->getSCEVType()) {
5220 case scConstant
: case scTruncate
: case scZeroExtend
: case scSignExtend
:
5221 case scAddExpr
: case scMulExpr
: case scUMaxExpr
: case scSMaxExpr
:
5222 // These expressions are available if their operand(s) is/are.
5225 case scAddRecExpr
: {
5226 // We allow add recurrences that are on the loop BB is in, or some
5227 // outer loop. This guarantees availability because the value of the
5228 // add recurrence at BB is simply the "current" value of the induction
5229 // variable. We can relax this in the future; for instance an add
5230 // recurrence on a sibling dominating loop is also available at BB.
5231 const auto *ARLoop
= cast
<SCEVAddRecExpr
>(S
)->getLoop();
5232 if (L
&& (ARLoop
== L
|| ARLoop
->contains(L
)))
5235 return setUnavailable();
5239 // For SCEVUnknown, we check for simple dominance.
5240 const auto *SU
= cast
<SCEVUnknown
>(S
);
5241 Value
*V
= SU
->getValue();
5243 if (isa
<Argument
>(V
))
5246 if (isa
<Instruction
>(V
) && DT
.dominates(cast
<Instruction
>(V
), BB
))
5249 return setUnavailable();
5253 case scCouldNotCompute
:
5254 // We do not try to smart about these at all.
5255 return setUnavailable();
5257 llvm_unreachable("switch should be fully covered!");
5260 bool isDone() { return TraversalDone
; }
5263 CheckAvailable
CA(L
, BB
, DT
);
5264 SCEVTraversal
<CheckAvailable
> ST(CA
);
5267 return CA
.Available
;
5270 // Try to match a control flow sequence that branches out at BI and merges back
5271 // at Merge into a "C ? LHS : RHS" select pattern. Return true on a successful
5273 static bool BrPHIToSelect(DominatorTree
&DT
, BranchInst
*BI
, PHINode
*Merge
,
5274 Value
*&C
, Value
*&LHS
, Value
*&RHS
) {
5275 C
= BI
->getCondition();
5277 BasicBlockEdge
LeftEdge(BI
->getParent(), BI
->getSuccessor(0));
5278 BasicBlockEdge
RightEdge(BI
->getParent(), BI
->getSuccessor(1));
5280 if (!LeftEdge
.isSingleEdge())
5283 assert(RightEdge
.isSingleEdge() && "Follows from LeftEdge.isSingleEdge()");
5285 Use
&LeftUse
= Merge
->getOperandUse(0);
5286 Use
&RightUse
= Merge
->getOperandUse(1);
5288 if (DT
.dominates(LeftEdge
, LeftUse
) && DT
.dominates(RightEdge
, RightUse
)) {
5294 if (DT
.dominates(LeftEdge
, RightUse
) && DT
.dominates(RightEdge
, LeftUse
)) {
5303 const SCEV
*ScalarEvolution::createNodeFromSelectLikePHI(PHINode
*PN
) {
5305 [&](BasicBlock
*BB
) { return DT
.isReachableFromEntry(BB
); };
5306 if (PN
->getNumIncomingValues() == 2 && all_of(PN
->blocks(), IsReachable
)) {
5307 const Loop
*L
= LI
.getLoopFor(PN
->getParent());
5309 // We don't want to break LCSSA, even in a SCEV expression tree.
5310 for (unsigned i
= 0, e
= PN
->getNumIncomingValues(); i
!= e
; ++i
)
5311 if (LI
.getLoopFor(PN
->getIncomingBlock(i
)) != L
)
5316 // br %cond, label %left, label %right
5322 // V = phi [ %x, %left ], [ %y, %right ]
5324 // as "select %cond, %x, %y"
5326 BasicBlock
*IDom
= DT
[PN
->getParent()]->getIDom()->getBlock();
5327 assert(IDom
&& "At least the entry block should dominate PN");
5329 auto *BI
= dyn_cast
<BranchInst
>(IDom
->getTerminator());
5330 Value
*Cond
= nullptr, *LHS
= nullptr, *RHS
= nullptr;
5332 if (BI
&& BI
->isConditional() &&
5333 BrPHIToSelect(DT
, BI
, PN
, Cond
, LHS
, RHS
) &&
5334 IsAvailableOnEntry(L
, DT
, getSCEV(LHS
), PN
->getParent()) &&
5335 IsAvailableOnEntry(L
, DT
, getSCEV(RHS
), PN
->getParent()))
5336 return createNodeForSelectOrPHI(PN
, Cond
, LHS
, RHS
);
5342 const SCEV
*ScalarEvolution::createNodeForPHI(PHINode
*PN
) {
5343 if (const SCEV
*S
= createAddRecFromPHI(PN
))
5346 if (const SCEV
*S
= createNodeFromSelectLikePHI(PN
))
5349 // If the PHI has a single incoming value, follow that value, unless the
5350 // PHI's incoming blocks are in a different loop, in which case doing so
5351 // risks breaking LCSSA form. Instcombine would normally zap these, but
5352 // it doesn't have DominatorTree information, so it may miss cases.
5353 if (Value
*V
= SimplifyInstruction(PN
, {getDataLayout(), &TLI
, &DT
, &AC
}))
5354 if (LI
.replacementPreservesLCSSAForm(PN
, V
))
5357 // If it's not a loop phi, we can't handle it yet.
5358 return getUnknown(PN
);
5361 const SCEV
*ScalarEvolution::createNodeForSelectOrPHI(Instruction
*I
,
5365 // Handle "constant" branch or select. This can occur for instance when a
5366 // loop pass transforms an inner loop and moves on to process the outer loop.
5367 if (auto *CI
= dyn_cast
<ConstantInt
>(Cond
))
5368 return getSCEV(CI
->isOne() ? TrueVal
: FalseVal
);
5370 // Try to match some simple smax or umax patterns.
5371 auto *ICI
= dyn_cast
<ICmpInst
>(Cond
);
5373 return getUnknown(I
);
5375 Value
*LHS
= ICI
->getOperand(0);
5376 Value
*RHS
= ICI
->getOperand(1);
5378 switch (ICI
->getPredicate()) {
5379 case ICmpInst::ICMP_SLT
:
5380 case ICmpInst::ICMP_SLE
:
5381 std::swap(LHS
, RHS
);
5383 case ICmpInst::ICMP_SGT
:
5384 case ICmpInst::ICMP_SGE
:
5385 // a >s b ? a+x : b+x -> smax(a, b)+x
5386 // a >s b ? b+x : a+x -> smin(a, b)+x
5387 if (getTypeSizeInBits(LHS
->getType()) <= getTypeSizeInBits(I
->getType())) {
5388 const SCEV
*LS
= getNoopOrSignExtend(getSCEV(LHS
), I
->getType());
5389 const SCEV
*RS
= getNoopOrSignExtend(getSCEV(RHS
), I
->getType());
5390 const SCEV
*LA
= getSCEV(TrueVal
);
5391 const SCEV
*RA
= getSCEV(FalseVal
);
5392 const SCEV
*LDiff
= getMinusSCEV(LA
, LS
);
5393 const SCEV
*RDiff
= getMinusSCEV(RA
, RS
);
5395 return getAddExpr(getSMaxExpr(LS
, RS
), LDiff
);
5396 LDiff
= getMinusSCEV(LA
, RS
);
5397 RDiff
= getMinusSCEV(RA
, LS
);
5399 return getAddExpr(getSMinExpr(LS
, RS
), LDiff
);
5402 case ICmpInst::ICMP_ULT
:
5403 case ICmpInst::ICMP_ULE
:
5404 std::swap(LHS
, RHS
);
5406 case ICmpInst::ICMP_UGT
:
5407 case ICmpInst::ICMP_UGE
:
5408 // a >u b ? a+x : b+x -> umax(a, b)+x
5409 // a >u b ? b+x : a+x -> umin(a, b)+x
5410 if (getTypeSizeInBits(LHS
->getType()) <= getTypeSizeInBits(I
->getType())) {
5411 const SCEV
*LS
= getNoopOrZeroExtend(getSCEV(LHS
), I
->getType());
5412 const SCEV
*RS
= getNoopOrZeroExtend(getSCEV(RHS
), I
->getType());
5413 const SCEV
*LA
= getSCEV(TrueVal
);
5414 const SCEV
*RA
= getSCEV(FalseVal
);
5415 const SCEV
*LDiff
= getMinusSCEV(LA
, LS
);
5416 const SCEV
*RDiff
= getMinusSCEV(RA
, RS
);
5418 return getAddExpr(getUMaxExpr(LS
, RS
), LDiff
);
5419 LDiff
= getMinusSCEV(LA
, RS
);
5420 RDiff
= getMinusSCEV(RA
, LS
);
5422 return getAddExpr(getUMinExpr(LS
, RS
), LDiff
);
5425 case ICmpInst::ICMP_NE
:
5426 // n != 0 ? n+x : 1+x -> umax(n, 1)+x
5427 if (getTypeSizeInBits(LHS
->getType()) <= getTypeSizeInBits(I
->getType()) &&
5428 isa
<ConstantInt
>(RHS
) && cast
<ConstantInt
>(RHS
)->isZero()) {
5429 const SCEV
*One
= getOne(I
->getType());
5430 const SCEV
*LS
= getNoopOrZeroExtend(getSCEV(LHS
), I
->getType());
5431 const SCEV
*LA
= getSCEV(TrueVal
);
5432 const SCEV
*RA
= getSCEV(FalseVal
);
5433 const SCEV
*LDiff
= getMinusSCEV(LA
, LS
);
5434 const SCEV
*RDiff
= getMinusSCEV(RA
, One
);
5436 return getAddExpr(getUMaxExpr(One
, LS
), LDiff
);
5439 case ICmpInst::ICMP_EQ
:
5440 // n == 0 ? 1+x : n+x -> umax(n, 1)+x
5441 if (getTypeSizeInBits(LHS
->getType()) <= getTypeSizeInBits(I
->getType()) &&
5442 isa
<ConstantInt
>(RHS
) && cast
<ConstantInt
>(RHS
)->isZero()) {
5443 const SCEV
*One
= getOne(I
->getType());
5444 const SCEV
*LS
= getNoopOrZeroExtend(getSCEV(LHS
), I
->getType());
5445 const SCEV
*LA
= getSCEV(TrueVal
);
5446 const SCEV
*RA
= getSCEV(FalseVal
);
5447 const SCEV
*LDiff
= getMinusSCEV(LA
, One
);
5448 const SCEV
*RDiff
= getMinusSCEV(RA
, LS
);
5450 return getAddExpr(getUMaxExpr(One
, LS
), LDiff
);
5457 return getUnknown(I
);
5460 /// Expand GEP instructions into add and multiply operations. This allows them
5461 /// to be analyzed by regular SCEV code.
5462 const SCEV
*ScalarEvolution::createNodeForGEP(GEPOperator
*GEP
) {
5463 // Don't attempt to analyze GEPs over unsized objects.
5464 if (!GEP
->getSourceElementType()->isSized())
5465 return getUnknown(GEP
);
5467 SmallVector
<const SCEV
*, 4> IndexExprs
;
5468 for (auto Index
= GEP
->idx_begin(); Index
!= GEP
->idx_end(); ++Index
)
5469 IndexExprs
.push_back(getSCEV(*Index
));
5470 return getGEPExpr(GEP
, IndexExprs
);
5473 uint32_t ScalarEvolution::GetMinTrailingZerosImpl(const SCEV
*S
) {
5474 if (const SCEVConstant
*C
= dyn_cast
<SCEVConstant
>(S
))
5475 return C
->getAPInt().countTrailingZeros();
5477 if (const SCEVTruncateExpr
*T
= dyn_cast
<SCEVTruncateExpr
>(S
))
5478 return std::min(GetMinTrailingZeros(T
->getOperand()),
5479 (uint32_t)getTypeSizeInBits(T
->getType()));
5481 if (const SCEVZeroExtendExpr
*E
= dyn_cast
<SCEVZeroExtendExpr
>(S
)) {
5482 uint32_t OpRes
= GetMinTrailingZeros(E
->getOperand());
5483 return OpRes
== getTypeSizeInBits(E
->getOperand()->getType())
5484 ? getTypeSizeInBits(E
->getType())
5488 if (const SCEVSignExtendExpr
*E
= dyn_cast
<SCEVSignExtendExpr
>(S
)) {
5489 uint32_t OpRes
= GetMinTrailingZeros(E
->getOperand());
5490 return OpRes
== getTypeSizeInBits(E
->getOperand()->getType())
5491 ? getTypeSizeInBits(E
->getType())
5495 if (const SCEVAddExpr
*A
= dyn_cast
<SCEVAddExpr
>(S
)) {
5496 // The result is the min of all operands results.
5497 uint32_t MinOpRes
= GetMinTrailingZeros(A
->getOperand(0));
5498 for (unsigned i
= 1, e
= A
->getNumOperands(); MinOpRes
&& i
!= e
; ++i
)
5499 MinOpRes
= std::min(MinOpRes
, GetMinTrailingZeros(A
->getOperand(i
)));
5503 if (const SCEVMulExpr
*M
= dyn_cast
<SCEVMulExpr
>(S
)) {
5504 // The result is the sum of all operands results.
5505 uint32_t SumOpRes
= GetMinTrailingZeros(M
->getOperand(0));
5506 uint32_t BitWidth
= getTypeSizeInBits(M
->getType());
5507 for (unsigned i
= 1, e
= M
->getNumOperands();
5508 SumOpRes
!= BitWidth
&& i
!= e
; ++i
)
5510 std::min(SumOpRes
+ GetMinTrailingZeros(M
->getOperand(i
)), BitWidth
);
5514 if (const SCEVAddRecExpr
*A
= dyn_cast
<SCEVAddRecExpr
>(S
)) {
5515 // The result is the min of all operands results.
5516 uint32_t MinOpRes
= GetMinTrailingZeros(A
->getOperand(0));
5517 for (unsigned i
= 1, e
= A
->getNumOperands(); MinOpRes
&& i
!= e
; ++i
)
5518 MinOpRes
= std::min(MinOpRes
, GetMinTrailingZeros(A
->getOperand(i
)));
5522 if (const SCEVSMaxExpr
*M
= dyn_cast
<SCEVSMaxExpr
>(S
)) {
5523 // The result is the min of all operands results.
5524 uint32_t MinOpRes
= GetMinTrailingZeros(M
->getOperand(0));
5525 for (unsigned i
= 1, e
= M
->getNumOperands(); MinOpRes
&& i
!= e
; ++i
)
5526 MinOpRes
= std::min(MinOpRes
, GetMinTrailingZeros(M
->getOperand(i
)));
5530 if (const SCEVUMaxExpr
*M
= dyn_cast
<SCEVUMaxExpr
>(S
)) {
5531 // The result is the min of all operands results.
5532 uint32_t MinOpRes
= GetMinTrailingZeros(M
->getOperand(0));
5533 for (unsigned i
= 1, e
= M
->getNumOperands(); MinOpRes
&& i
!= e
; ++i
)
5534 MinOpRes
= std::min(MinOpRes
, GetMinTrailingZeros(M
->getOperand(i
)));
5538 if (const SCEVUnknown
*U
= dyn_cast
<SCEVUnknown
>(S
)) {
5539 // For a SCEVUnknown, ask ValueTracking.
5540 KnownBits Known
= computeKnownBits(U
->getValue(), getDataLayout(), 0, &AC
, nullptr, &DT
);
5541 return Known
.countMinTrailingZeros();
5548 uint32_t ScalarEvolution::GetMinTrailingZeros(const SCEV
*S
) {
5549 auto I
= MinTrailingZerosCache
.find(S
);
5550 if (I
!= MinTrailingZerosCache
.end())
5553 uint32_t Result
= GetMinTrailingZerosImpl(S
);
5554 auto InsertPair
= MinTrailingZerosCache
.insert({S
, Result
});
5555 assert(InsertPair
.second
&& "Should insert a new key");
5556 return InsertPair
.first
->second
;
5559 /// Helper method to assign a range to V from metadata present in the IR.
5560 static Optional
<ConstantRange
> GetRangeFromMetadata(Value
*V
) {
5561 if (Instruction
*I
= dyn_cast
<Instruction
>(V
))
5562 if (MDNode
*MD
= I
->getMetadata(LLVMContext::MD_range
))
5563 return getConstantRangeFromMetadata(*MD
);
5568 /// Determine the range for a particular SCEV. If SignHint is
5569 /// HINT_RANGE_UNSIGNED (resp. HINT_RANGE_SIGNED) then getRange prefers ranges
5570 /// with a "cleaner" unsigned (resp. signed) representation.
5571 const ConstantRange
&
5572 ScalarEvolution::getRangeRef(const SCEV
*S
,
5573 ScalarEvolution::RangeSignHint SignHint
) {
5574 DenseMap
<const SCEV
*, ConstantRange
> &Cache
=
5575 SignHint
== ScalarEvolution::HINT_RANGE_UNSIGNED
? UnsignedRanges
5578 // See if we've computed this range already.
5579 DenseMap
<const SCEV
*, ConstantRange
>::iterator I
= Cache
.find(S
);
5580 if (I
!= Cache
.end())
5583 if (const SCEVConstant
*C
= dyn_cast
<SCEVConstant
>(S
))
5584 return setRange(C
, SignHint
, ConstantRange(C
->getAPInt()));
5586 unsigned BitWidth
= getTypeSizeInBits(S
->getType());
5587 ConstantRange
ConservativeResult(BitWidth
, /*isFullSet=*/true);
5589 // If the value has known zeros, the maximum value will have those known zeros
5591 uint32_t TZ
= GetMinTrailingZeros(S
);
5593 if (SignHint
== ScalarEvolution::HINT_RANGE_UNSIGNED
)
5594 ConservativeResult
=
5595 ConstantRange(APInt::getMinValue(BitWidth
),
5596 APInt::getMaxValue(BitWidth
).lshr(TZ
).shl(TZ
) + 1);
5598 ConservativeResult
= ConstantRange(
5599 APInt::getSignedMinValue(BitWidth
),
5600 APInt::getSignedMaxValue(BitWidth
).ashr(TZ
).shl(TZ
) + 1);
5603 if (const SCEVAddExpr
*Add
= dyn_cast
<SCEVAddExpr
>(S
)) {
5604 ConstantRange X
= getRangeRef(Add
->getOperand(0), SignHint
);
5605 for (unsigned i
= 1, e
= Add
->getNumOperands(); i
!= e
; ++i
)
5606 X
= X
.add(getRangeRef(Add
->getOperand(i
), SignHint
));
5607 return setRange(Add
, SignHint
, ConservativeResult
.intersectWith(X
));
5610 if (const SCEVMulExpr
*Mul
= dyn_cast
<SCEVMulExpr
>(S
)) {
5611 ConstantRange X
= getRangeRef(Mul
->getOperand(0), SignHint
);
5612 for (unsigned i
= 1, e
= Mul
->getNumOperands(); i
!= e
; ++i
)
5613 X
= X
.multiply(getRangeRef(Mul
->getOperand(i
), SignHint
));
5614 return setRange(Mul
, SignHint
, ConservativeResult
.intersectWith(X
));
5617 if (const SCEVSMaxExpr
*SMax
= dyn_cast
<SCEVSMaxExpr
>(S
)) {
5618 ConstantRange X
= getRangeRef(SMax
->getOperand(0), SignHint
);
5619 for (unsigned i
= 1, e
= SMax
->getNumOperands(); i
!= e
; ++i
)
5620 X
= X
.smax(getRangeRef(SMax
->getOperand(i
), SignHint
));
5621 return setRange(SMax
, SignHint
, ConservativeResult
.intersectWith(X
));
5624 if (const SCEVUMaxExpr
*UMax
= dyn_cast
<SCEVUMaxExpr
>(S
)) {
5625 ConstantRange X
= getRangeRef(UMax
->getOperand(0), SignHint
);
5626 for (unsigned i
= 1, e
= UMax
->getNumOperands(); i
!= e
; ++i
)
5627 X
= X
.umax(getRangeRef(UMax
->getOperand(i
), SignHint
));
5628 return setRange(UMax
, SignHint
, ConservativeResult
.intersectWith(X
));
5631 if (const SCEVUDivExpr
*UDiv
= dyn_cast
<SCEVUDivExpr
>(S
)) {
5632 ConstantRange X
= getRangeRef(UDiv
->getLHS(), SignHint
);
5633 ConstantRange Y
= getRangeRef(UDiv
->getRHS(), SignHint
);
5634 return setRange(UDiv
, SignHint
,
5635 ConservativeResult
.intersectWith(X
.udiv(Y
)));
5638 if (const SCEVZeroExtendExpr
*ZExt
= dyn_cast
<SCEVZeroExtendExpr
>(S
)) {
5639 ConstantRange X
= getRangeRef(ZExt
->getOperand(), SignHint
);
5640 return setRange(ZExt
, SignHint
,
5641 ConservativeResult
.intersectWith(X
.zeroExtend(BitWidth
)));
5644 if (const SCEVSignExtendExpr
*SExt
= dyn_cast
<SCEVSignExtendExpr
>(S
)) {
5645 ConstantRange X
= getRangeRef(SExt
->getOperand(), SignHint
);
5646 return setRange(SExt
, SignHint
,
5647 ConservativeResult
.intersectWith(X
.signExtend(BitWidth
)));
5650 if (const SCEVTruncateExpr
*Trunc
= dyn_cast
<SCEVTruncateExpr
>(S
)) {
5651 ConstantRange X
= getRangeRef(Trunc
->getOperand(), SignHint
);
5652 return setRange(Trunc
, SignHint
,
5653 ConservativeResult
.intersectWith(X
.truncate(BitWidth
)));
5656 if (const SCEVAddRecExpr
*AddRec
= dyn_cast
<SCEVAddRecExpr
>(S
)) {
5657 // If there's no unsigned wrap, the value will never be less than its
5659 if (AddRec
->hasNoUnsignedWrap())
5660 if (const SCEVConstant
*C
= dyn_cast
<SCEVConstant
>(AddRec
->getStart()))
5661 if (!C
->getValue()->isZero())
5662 ConservativeResult
= ConservativeResult
.intersectWith(
5663 ConstantRange(C
->getAPInt(), APInt(BitWidth
, 0)));
5665 // If there's no signed wrap, and all the operands have the same sign or
5666 // zero, the value won't ever change sign.
5667 if (AddRec
->hasNoSignedWrap()) {
5668 bool AllNonNeg
= true;
5669 bool AllNonPos
= true;
5670 for (unsigned i
= 0, e
= AddRec
->getNumOperands(); i
!= e
; ++i
) {
5671 if (!isKnownNonNegative(AddRec
->getOperand(i
))) AllNonNeg
= false;
5672 if (!isKnownNonPositive(AddRec
->getOperand(i
))) AllNonPos
= false;
5675 ConservativeResult
= ConservativeResult
.intersectWith(
5676 ConstantRange(APInt(BitWidth
, 0),
5677 APInt::getSignedMinValue(BitWidth
)));
5679 ConservativeResult
= ConservativeResult
.intersectWith(
5680 ConstantRange(APInt::getSignedMinValue(BitWidth
),
5681 APInt(BitWidth
, 1)));
5684 // TODO: non-affine addrec
5685 if (AddRec
->isAffine()) {
5686 const SCEV
*MaxBECount
= getMaxBackedgeTakenCount(AddRec
->getLoop());
5687 if (!isa
<SCEVCouldNotCompute
>(MaxBECount
) &&
5688 getTypeSizeInBits(MaxBECount
->getType()) <= BitWidth
) {
5689 auto RangeFromAffine
= getRangeForAffineAR(
5690 AddRec
->getStart(), AddRec
->getStepRecurrence(*this), MaxBECount
,
5692 if (!RangeFromAffine
.isFullSet())
5693 ConservativeResult
=
5694 ConservativeResult
.intersectWith(RangeFromAffine
);
5696 auto RangeFromFactoring
= getRangeViaFactoring(
5697 AddRec
->getStart(), AddRec
->getStepRecurrence(*this), MaxBECount
,
5699 if (!RangeFromFactoring
.isFullSet())
5700 ConservativeResult
=
5701 ConservativeResult
.intersectWith(RangeFromFactoring
);
5705 return setRange(AddRec
, SignHint
, std::move(ConservativeResult
));
5708 if (const SCEVUnknown
*U
= dyn_cast
<SCEVUnknown
>(S
)) {
5709 // Check if the IR explicitly contains !range metadata.
5710 Optional
<ConstantRange
> MDRange
= GetRangeFromMetadata(U
->getValue());
5711 if (MDRange
.hasValue())
5712 ConservativeResult
= ConservativeResult
.intersectWith(MDRange
.getValue());
5714 // Split here to avoid paying the compile-time cost of calling both
5715 // computeKnownBits and ComputeNumSignBits. This restriction can be lifted
5717 const DataLayout
&DL
= getDataLayout();
5718 if (SignHint
== ScalarEvolution::HINT_RANGE_UNSIGNED
) {
5719 // For a SCEVUnknown, ask ValueTracking.
5720 KnownBits Known
= computeKnownBits(U
->getValue(), DL
, 0, &AC
, nullptr, &DT
);
5721 if (Known
.One
!= ~Known
.Zero
+ 1)
5722 ConservativeResult
=
5723 ConservativeResult
.intersectWith(ConstantRange(Known
.One
,
5726 assert(SignHint
== ScalarEvolution::HINT_RANGE_SIGNED
&&
5727 "generalize as needed!");
5728 unsigned NS
= ComputeNumSignBits(U
->getValue(), DL
, 0, &AC
, nullptr, &DT
);
5730 ConservativeResult
= ConservativeResult
.intersectWith(
5731 ConstantRange(APInt::getSignedMinValue(BitWidth
).ashr(NS
- 1),
5732 APInt::getSignedMaxValue(BitWidth
).ashr(NS
- 1) + 1));
5735 // A range of Phi is a subset of union of all ranges of its input.
5736 if (const PHINode
*Phi
= dyn_cast
<PHINode
>(U
->getValue())) {
5737 // Make sure that we do not run over cycled Phis.
5738 if (PendingPhiRanges
.insert(Phi
).second
) {
5739 ConstantRange
RangeFromOps(BitWidth
, /*isFullSet=*/false);
5740 for (auto &Op
: Phi
->operands()) {
5741 auto OpRange
= getRangeRef(getSCEV(Op
), SignHint
);
5742 RangeFromOps
= RangeFromOps
.unionWith(OpRange
);
5743 // No point to continue if we already have a full set.
5744 if (RangeFromOps
.isFullSet())
5747 ConservativeResult
= ConservativeResult
.intersectWith(RangeFromOps
);
5748 bool Erased
= PendingPhiRanges
.erase(Phi
);
5749 assert(Erased
&& "Failed to erase Phi properly?");
5754 return setRange(U
, SignHint
, std::move(ConservativeResult
));
5757 return setRange(S
, SignHint
, std::move(ConservativeResult
));
5760 // Given a StartRange, Step and MaxBECount for an expression compute a range of
5761 // values that the expression can take. Initially, the expression has a value
5762 // from StartRange and then is changed by Step up to MaxBECount times. Signed
5763 // argument defines if we treat Step as signed or unsigned.
5764 static ConstantRange
getRangeForAffineARHelper(APInt Step
,
5765 const ConstantRange
&StartRange
,
5766 const APInt
&MaxBECount
,
5767 unsigned BitWidth
, bool Signed
) {
5768 // If either Step or MaxBECount is 0, then the expression won't change, and we
5769 // just need to return the initial range.
5770 if (Step
== 0 || MaxBECount
== 0)
5773 // If we don't know anything about the initial value (i.e. StartRange is
5774 // FullRange), then we don't know anything about the final range either.
5775 // Return FullRange.
5776 if (StartRange
.isFullSet())
5777 return ConstantRange::getFull(BitWidth
);
5779 // If Step is signed and negative, then we use its absolute value, but we also
5780 // note that we're moving in the opposite direction.
5781 bool Descending
= Signed
&& Step
.isNegative();
5784 // This is correct even for INT_SMIN. Let's look at i8 to illustrate this:
5785 // abs(INT_SMIN) = abs(-128) = abs(0x80) = -0x80 = 0x80 = 128.
5786 // This equations hold true due to the well-defined wrap-around behavior of
5790 // Check if Offset is more than full span of BitWidth. If it is, the
5791 // expression is guaranteed to overflow.
5792 if (APInt::getMaxValue(StartRange
.getBitWidth()).udiv(Step
).ult(MaxBECount
))
5793 return ConstantRange::getFull(BitWidth
);
5795 // Offset is by how much the expression can change. Checks above guarantee no
5797 APInt Offset
= Step
* MaxBECount
;
5799 // Minimum value of the final range will match the minimal value of StartRange
5800 // if the expression is increasing and will be decreased by Offset otherwise.
5801 // Maximum value of the final range will match the maximal value of StartRange
5802 // if the expression is decreasing and will be increased by Offset otherwise.
5803 APInt StartLower
= StartRange
.getLower();
5804 APInt StartUpper
= StartRange
.getUpper() - 1;
5805 APInt MovedBoundary
= Descending
? (StartLower
- std::move(Offset
))
5806 : (StartUpper
+ std::move(Offset
));
5808 // It's possible that the new minimum/maximum value will fall into the initial
5809 // range (due to wrap around). This means that the expression can take any
5810 // value in this bitwidth, and we have to return full range.
5811 if (StartRange
.contains(MovedBoundary
))
5812 return ConstantRange::getFull(BitWidth
);
5815 Descending
? std::move(MovedBoundary
) : std::move(StartLower
);
5817 Descending
? std::move(StartUpper
) : std::move(MovedBoundary
);
5820 // If we end up with full range, return a proper full range.
5821 if (NewLower
== NewUpper
)
5822 return ConstantRange::getFull(BitWidth
);
5824 // No overflow detected, return [StartLower, StartUpper + Offset + 1) range.
5825 return ConstantRange(std::move(NewLower
), std::move(NewUpper
));
5828 ConstantRange
ScalarEvolution::getRangeForAffineAR(const SCEV
*Start
,
5830 const SCEV
*MaxBECount
,
5831 unsigned BitWidth
) {
5832 assert(!isa
<SCEVCouldNotCompute
>(MaxBECount
) &&
5833 getTypeSizeInBits(MaxBECount
->getType()) <= BitWidth
&&
5836 MaxBECount
= getNoopOrZeroExtend(MaxBECount
, Start
->getType());
5837 APInt MaxBECountValue
= getUnsignedRangeMax(MaxBECount
);
5839 // First, consider step signed.
5840 ConstantRange StartSRange
= getSignedRange(Start
);
5841 ConstantRange StepSRange
= getSignedRange(Step
);
5843 // If Step can be both positive and negative, we need to find ranges for the
5844 // maximum absolute step values in both directions and union them.
5846 getRangeForAffineARHelper(StepSRange
.getSignedMin(), StartSRange
,
5847 MaxBECountValue
, BitWidth
, /* Signed = */ true);
5848 SR
= SR
.unionWith(getRangeForAffineARHelper(StepSRange
.getSignedMax(),
5849 StartSRange
, MaxBECountValue
,
5850 BitWidth
, /* Signed = */ true));
5852 // Next, consider step unsigned.
5853 ConstantRange UR
= getRangeForAffineARHelper(
5854 getUnsignedRangeMax(Step
), getUnsignedRange(Start
),
5855 MaxBECountValue
, BitWidth
, /* Signed = */ false);
5857 // Finally, intersect signed and unsigned ranges.
5858 return SR
.intersectWith(UR
);
5861 ConstantRange
ScalarEvolution::getRangeViaFactoring(const SCEV
*Start
,
5863 const SCEV
*MaxBECount
,
5864 unsigned BitWidth
) {
5865 // RangeOf({C?A:B,+,C?P:Q}) == RangeOf(C?{A,+,P}:{B,+,Q})
5866 // == RangeOf({A,+,P}) union RangeOf({B,+,Q})
5868 struct SelectPattern
{
5869 Value
*Condition
= nullptr;
5873 explicit SelectPattern(ScalarEvolution
&SE
, unsigned BitWidth
,
5875 Optional
<unsigned> CastOp
;
5876 APInt
Offset(BitWidth
, 0);
5878 assert(SE
.getTypeSizeInBits(S
->getType()) == BitWidth
&&
5881 // Peel off a constant offset:
5882 if (auto *SA
= dyn_cast
<SCEVAddExpr
>(S
)) {
5883 // In the future we could consider being smarter here and handle
5884 // {Start+Step,+,Step} too.
5885 if (SA
->getNumOperands() != 2 || !isa
<SCEVConstant
>(SA
->getOperand(0)))
5888 Offset
= cast
<SCEVConstant
>(SA
->getOperand(0))->getAPInt();
5889 S
= SA
->getOperand(1);
5892 // Peel off a cast operation
5893 if (auto *SCast
= dyn_cast
<SCEVCastExpr
>(S
)) {
5894 CastOp
= SCast
->getSCEVType();
5895 S
= SCast
->getOperand();
5898 using namespace llvm::PatternMatch
;
5900 auto *SU
= dyn_cast
<SCEVUnknown
>(S
);
5901 const APInt
*TrueVal
, *FalseVal
;
5903 !match(SU
->getValue(), m_Select(m_Value(Condition
), m_APInt(TrueVal
),
5904 m_APInt(FalseVal
)))) {
5905 Condition
= nullptr;
5909 TrueValue
= *TrueVal
;
5910 FalseValue
= *FalseVal
;
5912 // Re-apply the cast we peeled off earlier
5913 if (CastOp
.hasValue())
5916 llvm_unreachable("Unknown SCEV cast type!");
5919 TrueValue
= TrueValue
.trunc(BitWidth
);
5920 FalseValue
= FalseValue
.trunc(BitWidth
);
5923 TrueValue
= TrueValue
.zext(BitWidth
);
5924 FalseValue
= FalseValue
.zext(BitWidth
);
5927 TrueValue
= TrueValue
.sext(BitWidth
);
5928 FalseValue
= FalseValue
.sext(BitWidth
);
5932 // Re-apply the constant offset we peeled off earlier
5933 TrueValue
+= Offset
;
5934 FalseValue
+= Offset
;
5937 bool isRecognized() { return Condition
!= nullptr; }
5940 SelectPattern
StartPattern(*this, BitWidth
, Start
);
5941 if (!StartPattern
.isRecognized())
5942 return ConstantRange::getFull(BitWidth
);
5944 SelectPattern
StepPattern(*this, BitWidth
, Step
);
5945 if (!StepPattern
.isRecognized())
5946 return ConstantRange::getFull(BitWidth
);
5948 if (StartPattern
.Condition
!= StepPattern
.Condition
) {
5949 // We don't handle this case today; but we could, by considering four
5950 // possibilities below instead of two. I'm not sure if there are cases where
5951 // that will help over what getRange already does, though.
5952 return ConstantRange::getFull(BitWidth
);
5955 // NB! Calling ScalarEvolution::getConstant is fine, but we should not try to
5956 // construct arbitrary general SCEV expressions here. This function is called
5957 // from deep in the call stack, and calling getSCEV (on a sext instruction,
5958 // say) can end up caching a suboptimal value.
5960 // FIXME: without the explicit `this` receiver below, MSVC errors out with
5961 // C2352 and C2512 (otherwise it isn't needed).
5963 const SCEV
*TrueStart
= this->getConstant(StartPattern
.TrueValue
);
5964 const SCEV
*TrueStep
= this->getConstant(StepPattern
.TrueValue
);
5965 const SCEV
*FalseStart
= this->getConstant(StartPattern
.FalseValue
);
5966 const SCEV
*FalseStep
= this->getConstant(StepPattern
.FalseValue
);
5968 ConstantRange TrueRange
=
5969 this->getRangeForAffineAR(TrueStart
, TrueStep
, MaxBECount
, BitWidth
);
5970 ConstantRange FalseRange
=
5971 this->getRangeForAffineAR(FalseStart
, FalseStep
, MaxBECount
, BitWidth
);
5973 return TrueRange
.unionWith(FalseRange
);
5976 SCEV::NoWrapFlags
ScalarEvolution::getNoWrapFlagsFromUB(const Value
*V
) {
5977 if (isa
<ConstantExpr
>(V
)) return SCEV::FlagAnyWrap
;
5978 const BinaryOperator
*BinOp
= cast
<BinaryOperator
>(V
);
5980 // Return early if there are no flags to propagate to the SCEV.
5981 SCEV::NoWrapFlags Flags
= SCEV::FlagAnyWrap
;
5982 if (BinOp
->hasNoUnsignedWrap())
5983 Flags
= ScalarEvolution::setFlags(Flags
, SCEV::FlagNUW
);
5984 if (BinOp
->hasNoSignedWrap())
5985 Flags
= ScalarEvolution::setFlags(Flags
, SCEV::FlagNSW
);
5986 if (Flags
== SCEV::FlagAnyWrap
)
5987 return SCEV::FlagAnyWrap
;
5989 return isSCEVExprNeverPoison(BinOp
) ? Flags
: SCEV::FlagAnyWrap
;
5992 bool ScalarEvolution::isSCEVExprNeverPoison(const Instruction
*I
) {
5993 // Here we check that I is in the header of the innermost loop containing I,
5994 // since we only deal with instructions in the loop header. The actual loop we
5995 // need to check later will come from an add recurrence, but getting that
5996 // requires computing the SCEV of the operands, which can be expensive. This
5997 // check we can do cheaply to rule out some cases early.
5998 Loop
*InnermostContainingLoop
= LI
.getLoopFor(I
->getParent());
5999 if (InnermostContainingLoop
== nullptr ||
6000 InnermostContainingLoop
->getHeader() != I
->getParent())
6003 // Only proceed if we can prove that I does not yield poison.
6004 if (!programUndefinedIfFullPoison(I
))
6007 // At this point we know that if I is executed, then it does not wrap
6008 // according to at least one of NSW or NUW. If I is not executed, then we do
6009 // not know if the calculation that I represents would wrap. Multiple
6010 // instructions can map to the same SCEV. If we apply NSW or NUW from I to
6011 // the SCEV, we must guarantee no wrapping for that SCEV also when it is
6012 // derived from other instructions that map to the same SCEV. We cannot make
6013 // that guarantee for cases where I is not executed. So we need to find the
6014 // loop that I is considered in relation to and prove that I is executed for
6015 // every iteration of that loop. That implies that the value that I
6016 // calculates does not wrap anywhere in the loop, so then we can apply the
6017 // flags to the SCEV.
6019 // We check isLoopInvariant to disambiguate in case we are adding recurrences
6020 // from different loops, so that we know which loop to prove that I is
6022 for (unsigned OpIndex
= 0; OpIndex
< I
->getNumOperands(); ++OpIndex
) {
6023 // I could be an extractvalue from a call to an overflow intrinsic.
6024 // TODO: We can do better here in some cases.
6025 if (!isSCEVable(I
->getOperand(OpIndex
)->getType()))
6027 const SCEV
*Op
= getSCEV(I
->getOperand(OpIndex
));
6028 if (auto *AddRec
= dyn_cast
<SCEVAddRecExpr
>(Op
)) {
6029 bool AllOtherOpsLoopInvariant
= true;
6030 for (unsigned OtherOpIndex
= 0; OtherOpIndex
< I
->getNumOperands();
6032 if (OtherOpIndex
!= OpIndex
) {
6033 const SCEV
*OtherOp
= getSCEV(I
->getOperand(OtherOpIndex
));
6034 if (!isLoopInvariant(OtherOp
, AddRec
->getLoop())) {
6035 AllOtherOpsLoopInvariant
= false;
6040 if (AllOtherOpsLoopInvariant
&&
6041 isGuaranteedToExecuteForEveryIteration(I
, AddRec
->getLoop()))
6048 bool ScalarEvolution::isAddRecNeverPoison(const Instruction
*I
, const Loop
*L
) {
6049 // If we know that \c I can never be poison period, then that's enough.
6050 if (isSCEVExprNeverPoison(I
))
6053 // For an add recurrence specifically, we assume that infinite loops without
6054 // side effects are undefined behavior, and then reason as follows:
6056 // If the add recurrence is poison in any iteration, it is poison on all
6057 // future iterations (since incrementing poison yields poison). If the result
6058 // of the add recurrence is fed into the loop latch condition and the loop
6059 // does not contain any throws or exiting blocks other than the latch, we now
6060 // have the ability to "choose" whether the backedge is taken or not (by
6061 // choosing a sufficiently evil value for the poison feeding into the branch)
6062 // for every iteration including and after the one in which \p I first became
6063 // poison. There are two possibilities (let's call the iteration in which \p
6064 // I first became poison as K):
6066 // 1. In the set of iterations including and after K, the loop body executes
6067 // no side effects. In this case executing the backege an infinte number
6068 // of times will yield undefined behavior.
6070 // 2. In the set of iterations including and after K, the loop body executes
6071 // at least one side effect. In this case, that specific instance of side
6072 // effect is control dependent on poison, which also yields undefined
6075 auto *ExitingBB
= L
->getExitingBlock();
6076 auto *LatchBB
= L
->getLoopLatch();
6077 if (!ExitingBB
|| !LatchBB
|| ExitingBB
!= LatchBB
)
6080 SmallPtrSet
<const Instruction
*, 16> Pushed
;
6081 SmallVector
<const Instruction
*, 8> PoisonStack
;
6083 // We start by assuming \c I, the post-inc add recurrence, is poison. Only
6084 // things that are known to be fully poison under that assumption go on the
6087 PoisonStack
.push_back(I
);
6089 bool LatchControlDependentOnPoison
= false;
6090 while (!PoisonStack
.empty() && !LatchControlDependentOnPoison
) {
6091 const Instruction
*Poison
= PoisonStack
.pop_back_val();
6093 for (auto *PoisonUser
: Poison
->users()) {
6094 if (propagatesFullPoison(cast
<Instruction
>(PoisonUser
))) {
6095 if (Pushed
.insert(cast
<Instruction
>(PoisonUser
)).second
)
6096 PoisonStack
.push_back(cast
<Instruction
>(PoisonUser
));
6097 } else if (auto *BI
= dyn_cast
<BranchInst
>(PoisonUser
)) {
6098 assert(BI
->isConditional() && "Only possibility!");
6099 if (BI
->getParent() == LatchBB
) {
6100 LatchControlDependentOnPoison
= true;
6107 return LatchControlDependentOnPoison
&& loopHasNoAbnormalExits(L
);
6110 ScalarEvolution::LoopProperties
6111 ScalarEvolution::getLoopProperties(const Loop
*L
) {
6112 using LoopProperties
= ScalarEvolution::LoopProperties
;
6114 auto Itr
= LoopPropertiesCache
.find(L
);
6115 if (Itr
== LoopPropertiesCache
.end()) {
6116 auto HasSideEffects
= [](Instruction
*I
) {
6117 if (auto *SI
= dyn_cast
<StoreInst
>(I
))
6118 return !SI
->isSimple();
6120 return I
->mayHaveSideEffects();
6123 LoopProperties LP
= {/* HasNoAbnormalExits */ true,
6124 /*HasNoSideEffects*/ true};
6126 for (auto *BB
: L
->getBlocks())
6127 for (auto &I
: *BB
) {
6128 if (!isGuaranteedToTransferExecutionToSuccessor(&I
))
6129 LP
.HasNoAbnormalExits
= false;
6130 if (HasSideEffects(&I
))
6131 LP
.HasNoSideEffects
= false;
6132 if (!LP
.HasNoAbnormalExits
&& !LP
.HasNoSideEffects
)
6133 break; // We're already as pessimistic as we can get.
6136 auto InsertPair
= LoopPropertiesCache
.insert({L
, LP
});
6137 assert(InsertPair
.second
&& "We just checked!");
6138 Itr
= InsertPair
.first
;
6144 const SCEV
*ScalarEvolution::createSCEV(Value
*V
) {
6145 if (!isSCEVable(V
->getType()))
6146 return getUnknown(V
);
6148 if (Instruction
*I
= dyn_cast
<Instruction
>(V
)) {
6149 // Don't attempt to analyze instructions in blocks that aren't
6150 // reachable. Such instructions don't matter, and they aren't required
6151 // to obey basic rules for definitions dominating uses which this
6152 // analysis depends on.
6153 if (!DT
.isReachableFromEntry(I
->getParent()))
6154 return getUnknown(UndefValue::get(V
->getType()));
6155 } else if (ConstantInt
*CI
= dyn_cast
<ConstantInt
>(V
))
6156 return getConstant(CI
);
6157 else if (isa
<ConstantPointerNull
>(V
))
6158 return getZero(V
->getType());
6159 else if (GlobalAlias
*GA
= dyn_cast
<GlobalAlias
>(V
))
6160 return GA
->isInterposable() ? getUnknown(V
) : getSCEV(GA
->getAliasee());
6161 else if (!isa
<ConstantExpr
>(V
))
6162 return getUnknown(V
);
6164 Operator
*U
= cast
<Operator
>(V
);
6165 if (auto BO
= MatchBinaryOp(U
, DT
)) {
6166 switch (BO
->Opcode
) {
6167 case Instruction::Add
: {
6168 // The simple thing to do would be to just call getSCEV on both operands
6169 // and call getAddExpr with the result. However if we're looking at a
6170 // bunch of things all added together, this can be quite inefficient,
6171 // because it leads to N-1 getAddExpr calls for N ultimate operands.
6172 // Instead, gather up all the operands and make a single getAddExpr call.
6173 // LLVM IR canonical form means we need only traverse the left operands.
6174 SmallVector
<const SCEV
*, 4> AddOps
;
6177 if (auto *OpSCEV
= getExistingSCEV(BO
->Op
)) {
6178 AddOps
.push_back(OpSCEV
);
6182 // If a NUW or NSW flag can be applied to the SCEV for this
6183 // addition, then compute the SCEV for this addition by itself
6184 // with a separate call to getAddExpr. We need to do that
6185 // instead of pushing the operands of the addition onto AddOps,
6186 // since the flags are only known to apply to this particular
6187 // addition - they may not apply to other additions that can be
6188 // formed with operands from AddOps.
6189 const SCEV
*RHS
= getSCEV(BO
->RHS
);
6190 SCEV::NoWrapFlags Flags
= getNoWrapFlagsFromUB(BO
->Op
);
6191 if (Flags
!= SCEV::FlagAnyWrap
) {
6192 const SCEV
*LHS
= getSCEV(BO
->LHS
);
6193 if (BO
->Opcode
== Instruction::Sub
)
6194 AddOps
.push_back(getMinusSCEV(LHS
, RHS
, Flags
));
6196 AddOps
.push_back(getAddExpr(LHS
, RHS
, Flags
));
6201 if (BO
->Opcode
== Instruction::Sub
)
6202 AddOps
.push_back(getNegativeSCEV(getSCEV(BO
->RHS
)));
6204 AddOps
.push_back(getSCEV(BO
->RHS
));
6206 auto NewBO
= MatchBinaryOp(BO
->LHS
, DT
);
6207 if (!NewBO
|| (NewBO
->Opcode
!= Instruction::Add
&&
6208 NewBO
->Opcode
!= Instruction::Sub
)) {
6209 AddOps
.push_back(getSCEV(BO
->LHS
));
6215 return getAddExpr(AddOps
);
6218 case Instruction::Mul
: {
6219 SmallVector
<const SCEV
*, 4> MulOps
;
6222 if (auto *OpSCEV
= getExistingSCEV(BO
->Op
)) {
6223 MulOps
.push_back(OpSCEV
);
6227 SCEV::NoWrapFlags Flags
= getNoWrapFlagsFromUB(BO
->Op
);
6228 if (Flags
!= SCEV::FlagAnyWrap
) {
6230 getMulExpr(getSCEV(BO
->LHS
), getSCEV(BO
->RHS
), Flags
));
6235 MulOps
.push_back(getSCEV(BO
->RHS
));
6236 auto NewBO
= MatchBinaryOp(BO
->LHS
, DT
);
6237 if (!NewBO
|| NewBO
->Opcode
!= Instruction::Mul
) {
6238 MulOps
.push_back(getSCEV(BO
->LHS
));
6244 return getMulExpr(MulOps
);
6246 case Instruction::UDiv
:
6247 return getUDivExpr(getSCEV(BO
->LHS
), getSCEV(BO
->RHS
));
6248 case Instruction::URem
:
6249 return getURemExpr(getSCEV(BO
->LHS
), getSCEV(BO
->RHS
));
6250 case Instruction::Sub
: {
6251 SCEV::NoWrapFlags Flags
= SCEV::FlagAnyWrap
;
6253 Flags
= getNoWrapFlagsFromUB(BO
->Op
);
6254 return getMinusSCEV(getSCEV(BO
->LHS
), getSCEV(BO
->RHS
), Flags
);
6256 case Instruction::And
:
6257 // For an expression like x&255 that merely masks off the high bits,
6258 // use zext(trunc(x)) as the SCEV expression.
6259 if (ConstantInt
*CI
= dyn_cast
<ConstantInt
>(BO
->RHS
)) {
6261 return getSCEV(BO
->RHS
);
6262 if (CI
->isMinusOne())
6263 return getSCEV(BO
->LHS
);
6264 const APInt
&A
= CI
->getValue();
6266 // Instcombine's ShrinkDemandedConstant may strip bits out of
6267 // constants, obscuring what would otherwise be a low-bits mask.
6268 // Use computeKnownBits to compute what ShrinkDemandedConstant
6269 // knew about to reconstruct a low-bits mask value.
6270 unsigned LZ
= A
.countLeadingZeros();
6271 unsigned TZ
= A
.countTrailingZeros();
6272 unsigned BitWidth
= A
.getBitWidth();
6273 KnownBits
Known(BitWidth
);
6274 computeKnownBits(BO
->LHS
, Known
, getDataLayout(),
6275 0, &AC
, nullptr, &DT
);
6277 APInt EffectiveMask
=
6278 APInt::getLowBitsSet(BitWidth
, BitWidth
- LZ
- TZ
).shl(TZ
);
6279 if ((LZ
!= 0 || TZ
!= 0) && !((~A
& ~Known
.Zero
) & EffectiveMask
)) {
6280 const SCEV
*MulCount
= getConstant(APInt::getOneBitSet(BitWidth
, TZ
));
6281 const SCEV
*LHS
= getSCEV(BO
->LHS
);
6282 const SCEV
*ShiftedLHS
= nullptr;
6283 if (auto *LHSMul
= dyn_cast
<SCEVMulExpr
>(LHS
)) {
6284 if (auto *OpC
= dyn_cast
<SCEVConstant
>(LHSMul
->getOperand(0))) {
6285 // For an expression like (x * 8) & 8, simplify the multiply.
6286 unsigned MulZeros
= OpC
->getAPInt().countTrailingZeros();
6287 unsigned GCD
= std::min(MulZeros
, TZ
);
6288 APInt DivAmt
= APInt::getOneBitSet(BitWidth
, TZ
- GCD
);
6289 SmallVector
<const SCEV
*, 4> MulOps
;
6290 MulOps
.push_back(getConstant(OpC
->getAPInt().lshr(GCD
)));
6291 MulOps
.append(LHSMul
->op_begin() + 1, LHSMul
->op_end());
6292 auto *NewMul
= getMulExpr(MulOps
, LHSMul
->getNoWrapFlags());
6293 ShiftedLHS
= getUDivExpr(NewMul
, getConstant(DivAmt
));
6297 ShiftedLHS
= getUDivExpr(LHS
, MulCount
);
6300 getTruncateExpr(ShiftedLHS
,
6301 IntegerType::get(getContext(), BitWidth
- LZ
- TZ
)),
6302 BO
->LHS
->getType()),
6308 case Instruction::Or
:
6309 // If the RHS of the Or is a constant, we may have something like:
6310 // X*4+1 which got turned into X*4|1. Handle this as an Add so loop
6311 // optimizations will transparently handle this case.
6313 // In order for this transformation to be safe, the LHS must be of the
6314 // form X*(2^n) and the Or constant must be less than 2^n.
6315 if (ConstantInt
*CI
= dyn_cast
<ConstantInt
>(BO
->RHS
)) {
6316 const SCEV
*LHS
= getSCEV(BO
->LHS
);
6317 const APInt
&CIVal
= CI
->getValue();
6318 if (GetMinTrailingZeros(LHS
) >=
6319 (CIVal
.getBitWidth() - CIVal
.countLeadingZeros())) {
6320 // Build a plain add SCEV.
6321 const SCEV
*S
= getAddExpr(LHS
, getSCEV(CI
));
6322 // If the LHS of the add was an addrec and it has no-wrap flags,
6323 // transfer the no-wrap flags, since an or won't introduce a wrap.
6324 if (const SCEVAddRecExpr
*NewAR
= dyn_cast
<SCEVAddRecExpr
>(S
)) {
6325 const SCEVAddRecExpr
*OldAR
= cast
<SCEVAddRecExpr
>(LHS
);
6326 const_cast<SCEVAddRecExpr
*>(NewAR
)->setNoWrapFlags(
6327 OldAR
->getNoWrapFlags());
6334 case Instruction::Xor
:
6335 if (ConstantInt
*CI
= dyn_cast
<ConstantInt
>(BO
->RHS
)) {
6336 // If the RHS of xor is -1, then this is a not operation.
6337 if (CI
->isMinusOne())
6338 return getNotSCEV(getSCEV(BO
->LHS
));
6340 // Model xor(and(x, C), C) as and(~x, C), if C is a low-bits mask.
6341 // This is a variant of the check for xor with -1, and it handles
6342 // the case where instcombine has trimmed non-demanded bits out
6343 // of an xor with -1.
6344 if (auto *LBO
= dyn_cast
<BinaryOperator
>(BO
->LHS
))
6345 if (ConstantInt
*LCI
= dyn_cast
<ConstantInt
>(LBO
->getOperand(1)))
6346 if (LBO
->getOpcode() == Instruction::And
&&
6347 LCI
->getValue() == CI
->getValue())
6348 if (const SCEVZeroExtendExpr
*Z
=
6349 dyn_cast
<SCEVZeroExtendExpr
>(getSCEV(BO
->LHS
))) {
6350 Type
*UTy
= BO
->LHS
->getType();
6351 const SCEV
*Z0
= Z
->getOperand();
6352 Type
*Z0Ty
= Z0
->getType();
6353 unsigned Z0TySize
= getTypeSizeInBits(Z0Ty
);
6355 // If C is a low-bits mask, the zero extend is serving to
6356 // mask off the high bits. Complement the operand and
6357 // re-apply the zext.
6358 if (CI
->getValue().isMask(Z0TySize
))
6359 return getZeroExtendExpr(getNotSCEV(Z0
), UTy
);
6361 // If C is a single bit, it may be in the sign-bit position
6362 // before the zero-extend. In this case, represent the xor
6363 // using an add, which is equivalent, and re-apply the zext.
6364 APInt Trunc
= CI
->getValue().trunc(Z0TySize
);
6365 if (Trunc
.zext(getTypeSizeInBits(UTy
)) == CI
->getValue() &&
6367 return getZeroExtendExpr(getAddExpr(Z0
, getConstant(Trunc
)),
6373 case Instruction::Shl
:
6374 // Turn shift left of a constant amount into a multiply.
6375 if (ConstantInt
*SA
= dyn_cast
<ConstantInt
>(BO
->RHS
)) {
6376 uint32_t BitWidth
= cast
<IntegerType
>(SA
->getType())->getBitWidth();
6378 // If the shift count is not less than the bitwidth, the result of
6379 // the shift is undefined. Don't try to analyze it, because the
6380 // resolution chosen here may differ from the resolution chosen in
6381 // other parts of the compiler.
6382 if (SA
->getValue().uge(BitWidth
))
6385 // It is currently not resolved how to interpret NSW for left
6386 // shift by BitWidth - 1, so we avoid applying flags in that
6387 // case. Remove this check (or this comment) once the situation
6389 // http://lists.llvm.org/pipermail/llvm-dev/2015-April/084195.html
6390 // and http://reviews.llvm.org/D8890 .
6391 auto Flags
= SCEV::FlagAnyWrap
;
6392 if (BO
->Op
&& SA
->getValue().ult(BitWidth
- 1))
6393 Flags
= getNoWrapFlagsFromUB(BO
->Op
);
6395 Constant
*X
= ConstantInt::get(
6396 getContext(), APInt::getOneBitSet(BitWidth
, SA
->getZExtValue()));
6397 return getMulExpr(getSCEV(BO
->LHS
), getSCEV(X
), Flags
);
6401 case Instruction::AShr
: {
6402 // AShr X, C, where C is a constant.
6403 ConstantInt
*CI
= dyn_cast
<ConstantInt
>(BO
->RHS
);
6407 Type
*OuterTy
= BO
->LHS
->getType();
6408 uint64_t BitWidth
= getTypeSizeInBits(OuterTy
);
6409 // If the shift count is not less than the bitwidth, the result of
6410 // the shift is undefined. Don't try to analyze it, because the
6411 // resolution chosen here may differ from the resolution chosen in
6412 // other parts of the compiler.
6413 if (CI
->getValue().uge(BitWidth
))
6417 return getSCEV(BO
->LHS
); // shift by zero --> noop
6419 uint64_t AShrAmt
= CI
->getZExtValue();
6420 Type
*TruncTy
= IntegerType::get(getContext(), BitWidth
- AShrAmt
);
6422 Operator
*L
= dyn_cast
<Operator
>(BO
->LHS
);
6423 if (L
&& L
->getOpcode() == Instruction::Shl
) {
6426 // Both n and m are constant.
6428 const SCEV
*ShlOp0SCEV
= getSCEV(L
->getOperand(0));
6429 if (L
->getOperand(1) == BO
->RHS
)
6430 // For a two-shift sext-inreg, i.e. n = m,
6431 // use sext(trunc(x)) as the SCEV expression.
6432 return getSignExtendExpr(
6433 getTruncateExpr(ShlOp0SCEV
, TruncTy
), OuterTy
);
6435 ConstantInt
*ShlAmtCI
= dyn_cast
<ConstantInt
>(L
->getOperand(1));
6436 if (ShlAmtCI
&& ShlAmtCI
->getValue().ult(BitWidth
)) {
6437 uint64_t ShlAmt
= ShlAmtCI
->getZExtValue();
6438 if (ShlAmt
> AShrAmt
) {
6439 // When n > m, use sext(mul(trunc(x), 2^(n-m)))) as the SCEV
6440 // expression. We already checked that ShlAmt < BitWidth, so
6441 // the multiplier, 1 << (ShlAmt - AShrAmt), fits into TruncTy as
6442 // ShlAmt - AShrAmt < Amt.
6443 APInt Mul
= APInt::getOneBitSet(BitWidth
- AShrAmt
,
6445 return getSignExtendExpr(
6446 getMulExpr(getTruncateExpr(ShlOp0SCEV
, TruncTy
),
6447 getConstant(Mul
)), OuterTy
);
6456 switch (U
->getOpcode()) {
6457 case Instruction::Trunc
:
6458 return getTruncateExpr(getSCEV(U
->getOperand(0)), U
->getType());
6460 case Instruction::ZExt
:
6461 return getZeroExtendExpr(getSCEV(U
->getOperand(0)), U
->getType());
6463 case Instruction::SExt
:
6464 if (auto BO
= MatchBinaryOp(U
->getOperand(0), DT
)) {
6465 // The NSW flag of a subtract does not always survive the conversion to
6466 // A + (-1)*B. By pushing sign extension onto its operands we are much
6467 // more likely to preserve NSW and allow later AddRec optimisations.
6469 // NOTE: This is effectively duplicating this logic from getSignExtend:
6470 // sext((A + B + ...)<nsw>) --> (sext(A) + sext(B) + ...)<nsw>
6471 // but by that point the NSW information has potentially been lost.
6472 if (BO
->Opcode
== Instruction::Sub
&& BO
->IsNSW
) {
6473 Type
*Ty
= U
->getType();
6474 auto *V1
= getSignExtendExpr(getSCEV(BO
->LHS
), Ty
);
6475 auto *V2
= getSignExtendExpr(getSCEV(BO
->RHS
), Ty
);
6476 return getMinusSCEV(V1
, V2
, SCEV::FlagNSW
);
6479 return getSignExtendExpr(getSCEV(U
->getOperand(0)), U
->getType());
6481 case Instruction::BitCast
:
6482 // BitCasts are no-op casts so we just eliminate the cast.
6483 if (isSCEVable(U
->getType()) && isSCEVable(U
->getOperand(0)->getType()))
6484 return getSCEV(U
->getOperand(0));
6487 // It's tempting to handle inttoptr and ptrtoint as no-ops, however this can
6488 // lead to pointer expressions which cannot safely be expanded to GEPs,
6489 // because ScalarEvolution doesn't respect the GEP aliasing rules when
6490 // simplifying integer expressions.
6492 case Instruction::GetElementPtr
:
6493 return createNodeForGEP(cast
<GEPOperator
>(U
));
6495 case Instruction::PHI
:
6496 return createNodeForPHI(cast
<PHINode
>(U
));
6498 case Instruction::Select
:
6499 // U can also be a select constant expr, which let fall through. Since
6500 // createNodeForSelect only works for a condition that is an `ICmpInst`, and
6501 // constant expressions cannot have instructions as operands, we'd have
6502 // returned getUnknown for a select constant expressions anyway.
6503 if (isa
<Instruction
>(U
))
6504 return createNodeForSelectOrPHI(cast
<Instruction
>(U
), U
->getOperand(0),
6505 U
->getOperand(1), U
->getOperand(2));
6508 case Instruction::Call
:
6509 case Instruction::Invoke
:
6510 if (Value
*RV
= CallSite(U
).getReturnedArgOperand())
6515 return getUnknown(V
);
6518 //===----------------------------------------------------------------------===//
6519 // Iteration Count Computation Code
6522 static unsigned getConstantTripCount(const SCEVConstant
*ExitCount
) {
6526 ConstantInt
*ExitConst
= ExitCount
->getValue();
6528 // Guard against huge trip counts.
6529 if (ExitConst
->getValue().getActiveBits() > 32)
6532 // In case of integer overflow, this returns 0, which is correct.
6533 return ((unsigned)ExitConst
->getZExtValue()) + 1;
6536 unsigned ScalarEvolution::getSmallConstantTripCount(const Loop
*L
) {
6537 if (BasicBlock
*ExitingBB
= L
->getExitingBlock())
6538 return getSmallConstantTripCount(L
, ExitingBB
);
6540 // No trip count information for multiple exits.
6544 unsigned ScalarEvolution::getSmallConstantTripCount(const Loop
*L
,
6545 BasicBlock
*ExitingBlock
) {
6546 assert(ExitingBlock
&& "Must pass a non-null exiting block!");
6547 assert(L
->isLoopExiting(ExitingBlock
) &&
6548 "Exiting block must actually branch out of the loop!");
6549 const SCEVConstant
*ExitCount
=
6550 dyn_cast
<SCEVConstant
>(getExitCount(L
, ExitingBlock
));
6551 return getConstantTripCount(ExitCount
);
6554 unsigned ScalarEvolution::getSmallConstantMaxTripCount(const Loop
*L
) {
6555 const auto *MaxExitCount
=
6556 dyn_cast
<SCEVConstant
>(getMaxBackedgeTakenCount(L
));
6557 return getConstantTripCount(MaxExitCount
);
6560 unsigned ScalarEvolution::getSmallConstantTripMultiple(const Loop
*L
) {
6561 if (BasicBlock
*ExitingBB
= L
->getExitingBlock())
6562 return getSmallConstantTripMultiple(L
, ExitingBB
);
6564 // No trip multiple information for multiple exits.
6568 /// Returns the largest constant divisor of the trip count of this loop as a
6569 /// normal unsigned value, if possible. This means that the actual trip count is
6570 /// always a multiple of the returned value (don't forget the trip count could
6571 /// very well be zero as well!).
6573 /// Returns 1 if the trip count is unknown or not guaranteed to be the
6574 /// multiple of a constant (which is also the case if the trip count is simply
6575 /// constant, use getSmallConstantTripCount for that case), Will also return 1
6576 /// if the trip count is very large (>= 2^32).
6578 /// As explained in the comments for getSmallConstantTripCount, this assumes
6579 /// that control exits the loop via ExitingBlock.
6581 ScalarEvolution::getSmallConstantTripMultiple(const Loop
*L
,
6582 BasicBlock
*ExitingBlock
) {
6583 assert(ExitingBlock
&& "Must pass a non-null exiting block!");
6584 assert(L
->isLoopExiting(ExitingBlock
) &&
6585 "Exiting block must actually branch out of the loop!");
6586 const SCEV
*ExitCount
= getExitCount(L
, ExitingBlock
);
6587 if (ExitCount
== getCouldNotCompute())
6590 // Get the trip count from the BE count by adding 1.
6591 const SCEV
*TCExpr
= getAddExpr(ExitCount
, getOne(ExitCount
->getType()));
6593 const SCEVConstant
*TC
= dyn_cast
<SCEVConstant
>(TCExpr
);
6595 // Attempt to factor more general cases. Returns the greatest power of
6596 // two divisor. If overflow happens, the trip count expression is still
6597 // divisible by the greatest power of 2 divisor returned.
6598 return 1U << std::min((uint32_t)31, GetMinTrailingZeros(TCExpr
));
6600 ConstantInt
*Result
= TC
->getValue();
6602 // Guard against huge trip counts (this requires checking
6603 // for zero to handle the case where the trip count == -1 and the
6605 if (!Result
|| Result
->getValue().getActiveBits() > 32 ||
6606 Result
->getValue().getActiveBits() == 0)
6609 return (unsigned)Result
->getZExtValue();
6612 /// Get the expression for the number of loop iterations for which this loop is
6613 /// guaranteed not to exit via ExitingBlock. Otherwise return
6614 /// SCEVCouldNotCompute.
6615 const SCEV
*ScalarEvolution::getExitCount(const Loop
*L
,
6616 BasicBlock
*ExitingBlock
) {
6617 return getBackedgeTakenInfo(L
).getExact(ExitingBlock
, this);
6621 ScalarEvolution::getPredicatedBackedgeTakenCount(const Loop
*L
,
6622 SCEVUnionPredicate
&Preds
) {
6623 return getPredicatedBackedgeTakenInfo(L
).getExact(L
, this, &Preds
);
6626 const SCEV
*ScalarEvolution::getBackedgeTakenCount(const Loop
*L
) {
6627 return getBackedgeTakenInfo(L
).getExact(L
, this);
6630 /// Similar to getBackedgeTakenCount, except return the least SCEV value that is
6631 /// known never to be less than the actual backedge taken count.
6632 const SCEV
*ScalarEvolution::getMaxBackedgeTakenCount(const Loop
*L
) {
6633 return getBackedgeTakenInfo(L
).getMax(this);
6636 bool ScalarEvolution::isBackedgeTakenCountMaxOrZero(const Loop
*L
) {
6637 return getBackedgeTakenInfo(L
).isMaxOrZero(this);
6640 /// Push PHI nodes in the header of the given loop onto the given Worklist.
6642 PushLoopPHIs(const Loop
*L
, SmallVectorImpl
<Instruction
*> &Worklist
) {
6643 BasicBlock
*Header
= L
->getHeader();
6645 // Push all Loop-header PHIs onto the Worklist stack.
6646 for (PHINode
&PN
: Header
->phis())
6647 Worklist
.push_back(&PN
);
6650 const ScalarEvolution::BackedgeTakenInfo
&
6651 ScalarEvolution::getPredicatedBackedgeTakenInfo(const Loop
*L
) {
6652 auto &BTI
= getBackedgeTakenInfo(L
);
6653 if (BTI
.hasFullInfo())
6656 auto Pair
= PredicatedBackedgeTakenCounts
.insert({L
, BackedgeTakenInfo()});
6659 return Pair
.first
->second
;
6661 BackedgeTakenInfo Result
=
6662 computeBackedgeTakenCount(L
, /*AllowPredicates=*/true);
6664 return PredicatedBackedgeTakenCounts
.find(L
)->second
= std::move(Result
);
6667 const ScalarEvolution::BackedgeTakenInfo
&
6668 ScalarEvolution::getBackedgeTakenInfo(const Loop
*L
) {
6669 // Initially insert an invalid entry for this loop. If the insertion
6670 // succeeds, proceed to actually compute a backedge-taken count and
6671 // update the value. The temporary CouldNotCompute value tells SCEV
6672 // code elsewhere that it shouldn't attempt to request a new
6673 // backedge-taken count, which could result in infinite recursion.
6674 std::pair
<DenseMap
<const Loop
*, BackedgeTakenInfo
>::iterator
, bool> Pair
=
6675 BackedgeTakenCounts
.insert({L
, BackedgeTakenInfo()});
6677 return Pair
.first
->second
;
6679 // computeBackedgeTakenCount may allocate memory for its result. Inserting it
6680 // into the BackedgeTakenCounts map transfers ownership. Otherwise, the result
6681 // must be cleared in this scope.
6682 BackedgeTakenInfo Result
= computeBackedgeTakenCount(L
);
6684 // In product build, there are no usage of statistic.
6685 (void)NumTripCountsComputed
;
6686 (void)NumTripCountsNotComputed
;
6687 #if LLVM_ENABLE_STATS || !defined(NDEBUG)
6688 const SCEV
*BEExact
= Result
.getExact(L
, this);
6689 if (BEExact
!= getCouldNotCompute()) {
6690 assert(isLoopInvariant(BEExact
, L
) &&
6691 isLoopInvariant(Result
.getMax(this), L
) &&
6692 "Computed backedge-taken count isn't loop invariant for loop!");
6693 ++NumTripCountsComputed
;
6695 else if (Result
.getMax(this) == getCouldNotCompute() &&
6696 isa
<PHINode
>(L
->getHeader()->begin())) {
6697 // Only count loops that have phi nodes as not being computable.
6698 ++NumTripCountsNotComputed
;
6700 #endif // LLVM_ENABLE_STATS || !defined(NDEBUG)
6702 // Now that we know more about the trip count for this loop, forget any
6703 // existing SCEV values for PHI nodes in this loop since they are only
6704 // conservative estimates made without the benefit of trip count
6705 // information. This is similar to the code in forgetLoop, except that
6706 // it handles SCEVUnknown PHI nodes specially.
6707 if (Result
.hasAnyInfo()) {
6708 SmallVector
<Instruction
*, 16> Worklist
;
6709 PushLoopPHIs(L
, Worklist
);
6711 SmallPtrSet
<Instruction
*, 8> Discovered
;
6712 while (!Worklist
.empty()) {
6713 Instruction
*I
= Worklist
.pop_back_val();
6715 ValueExprMapType::iterator It
=
6716 ValueExprMap
.find_as(static_cast<Value
*>(I
));
6717 if (It
!= ValueExprMap
.end()) {
6718 const SCEV
*Old
= It
->second
;
6720 // SCEVUnknown for a PHI either means that it has an unrecognized
6721 // structure, or it's a PHI that's in the progress of being computed
6722 // by createNodeForPHI. In the former case, additional loop trip
6723 // count information isn't going to change anything. In the later
6724 // case, createNodeForPHI will perform the necessary updates on its
6725 // own when it gets to that point.
6726 if (!isa
<PHINode
>(I
) || !isa
<SCEVUnknown
>(Old
)) {
6727 eraseValueFromMap(It
->first
);
6728 forgetMemoizedResults(Old
);
6730 if (PHINode
*PN
= dyn_cast
<PHINode
>(I
))
6731 ConstantEvolutionLoopExitValue
.erase(PN
);
6734 // Since we don't need to invalidate anything for correctness and we're
6735 // only invalidating to make SCEV's results more precise, we get to stop
6736 // early to avoid invalidating too much. This is especially important in
6739 // %v = f(pn0, pn1) // pn0 and pn1 used through some other phi node
6747 // where both loop0 and loop1's backedge taken count uses the SCEV
6748 // expression for %v. If we don't have the early stop below then in cases
6749 // like the above, getBackedgeTakenInfo(loop1) will clear out the trip
6750 // count for loop0 and getBackedgeTakenInfo(loop0) will clear out the trip
6751 // count for loop1, effectively nullifying SCEV's trip count cache.
6752 for (auto *U
: I
->users())
6753 if (auto *I
= dyn_cast
<Instruction
>(U
)) {
6754 auto *LoopForUser
= LI
.getLoopFor(I
->getParent());
6755 if (LoopForUser
&& L
->contains(LoopForUser
) &&
6756 Discovered
.insert(I
).second
)
6757 Worklist
.push_back(I
);
6762 // Re-lookup the insert position, since the call to
6763 // computeBackedgeTakenCount above could result in a
6764 // recusive call to getBackedgeTakenInfo (on a different
6765 // loop), which would invalidate the iterator computed
6767 return BackedgeTakenCounts
.find(L
)->second
= std::move(Result
);
6770 void ScalarEvolution::forgetLoop(const Loop
*L
) {
6771 // Drop any stored trip count value.
6772 auto RemoveLoopFromBackedgeMap
=
6773 [](DenseMap
<const Loop
*, BackedgeTakenInfo
> &Map
, const Loop
*L
) {
6774 auto BTCPos
= Map
.find(L
);
6775 if (BTCPos
!= Map
.end()) {
6776 BTCPos
->second
.clear();
6781 SmallVector
<const Loop
*, 16> LoopWorklist(1, L
);
6782 SmallVector
<Instruction
*, 32> Worklist
;
6783 SmallPtrSet
<Instruction
*, 16> Visited
;
6785 // Iterate over all the loops and sub-loops to drop SCEV information.
6786 while (!LoopWorklist
.empty()) {
6787 auto *CurrL
= LoopWorklist
.pop_back_val();
6789 RemoveLoopFromBackedgeMap(BackedgeTakenCounts
, CurrL
);
6790 RemoveLoopFromBackedgeMap(PredicatedBackedgeTakenCounts
, CurrL
);
6792 // Drop information about predicated SCEV rewrites for this loop.
6793 for (auto I
= PredicatedSCEVRewrites
.begin();
6794 I
!= PredicatedSCEVRewrites
.end();) {
6795 std::pair
<const SCEV
*, const Loop
*> Entry
= I
->first
;
6796 if (Entry
.second
== CurrL
)
6797 PredicatedSCEVRewrites
.erase(I
++);
6802 auto LoopUsersItr
= LoopUsers
.find(CurrL
);
6803 if (LoopUsersItr
!= LoopUsers
.end()) {
6804 for (auto *S
: LoopUsersItr
->second
)
6805 forgetMemoizedResults(S
);
6806 LoopUsers
.erase(LoopUsersItr
);
6809 // Drop information about expressions based on loop-header PHIs.
6810 PushLoopPHIs(CurrL
, Worklist
);
6812 while (!Worklist
.empty()) {
6813 Instruction
*I
= Worklist
.pop_back_val();
6814 if (!Visited
.insert(I
).second
)
6817 ValueExprMapType::iterator It
=
6818 ValueExprMap
.find_as(static_cast<Value
*>(I
));
6819 if (It
!= ValueExprMap
.end()) {
6820 eraseValueFromMap(It
->first
);
6821 forgetMemoizedResults(It
->second
);
6822 if (PHINode
*PN
= dyn_cast
<PHINode
>(I
))
6823 ConstantEvolutionLoopExitValue
.erase(PN
);
6826 PushDefUseChildren(I
, Worklist
);
6829 LoopPropertiesCache
.erase(CurrL
);
6830 // Forget all contained loops too, to avoid dangling entries in the
6831 // ValuesAtScopes map.
6832 LoopWorklist
.append(CurrL
->begin(), CurrL
->end());
6836 void ScalarEvolution::forgetTopmostLoop(const Loop
*L
) {
6837 while (Loop
*Parent
= L
->getParentLoop())
6842 void ScalarEvolution::forgetValue(Value
*V
) {
6843 Instruction
*I
= dyn_cast
<Instruction
>(V
);
6846 // Drop information about expressions based on loop-header PHIs.
6847 SmallVector
<Instruction
*, 16> Worklist
;
6848 Worklist
.push_back(I
);
6850 SmallPtrSet
<Instruction
*, 8> Visited
;
6851 while (!Worklist
.empty()) {
6852 I
= Worklist
.pop_back_val();
6853 if (!Visited
.insert(I
).second
)
6856 ValueExprMapType::iterator It
=
6857 ValueExprMap
.find_as(static_cast<Value
*>(I
));
6858 if (It
!= ValueExprMap
.end()) {
6859 eraseValueFromMap(It
->first
);
6860 forgetMemoizedResults(It
->second
);
6861 if (PHINode
*PN
= dyn_cast
<PHINode
>(I
))
6862 ConstantEvolutionLoopExitValue
.erase(PN
);
6865 PushDefUseChildren(I
, Worklist
);
6869 /// Get the exact loop backedge taken count considering all loop exits. A
6870 /// computable result can only be returned for loops with all exiting blocks
6871 /// dominating the latch. howFarToZero assumes that the limit of each loop test
6872 /// is never skipped. This is a valid assumption as long as the loop exits via
6873 /// that test. For precise results, it is the caller's responsibility to specify
6874 /// the relevant loop exiting block using getExact(ExitingBlock, SE).
6876 ScalarEvolution::BackedgeTakenInfo::getExact(const Loop
*L
, ScalarEvolution
*SE
,
6877 SCEVUnionPredicate
*Preds
) const {
6878 // If any exits were not computable, the loop is not computable.
6879 if (!isComplete() || ExitNotTaken
.empty())
6880 return SE
->getCouldNotCompute();
6882 const BasicBlock
*Latch
= L
->getLoopLatch();
6883 // All exiting blocks we have collected must dominate the only backedge.
6885 return SE
->getCouldNotCompute();
6887 // All exiting blocks we have gathered dominate loop's latch, so exact trip
6888 // count is simply a minimum out of all these calculated exit counts.
6889 SmallVector
<const SCEV
*, 2> Ops
;
6890 for (auto &ENT
: ExitNotTaken
) {
6891 const SCEV
*BECount
= ENT
.ExactNotTaken
;
6892 assert(BECount
!= SE
->getCouldNotCompute() && "Bad exit SCEV!");
6893 assert(SE
->DT
.dominates(ENT
.ExitingBlock
, Latch
) &&
6894 "We should only have known counts for exiting blocks that dominate "
6897 Ops
.push_back(BECount
);
6899 if (Preds
&& !ENT
.hasAlwaysTruePredicate())
6900 Preds
->add(ENT
.Predicate
.get());
6902 assert((Preds
|| ENT
.hasAlwaysTruePredicate()) &&
6903 "Predicate should be always true!");
6906 return SE
->getUMinFromMismatchedTypes(Ops
);
6909 /// Get the exact not taken count for this loop exit.
6911 ScalarEvolution::BackedgeTakenInfo::getExact(BasicBlock
*ExitingBlock
,
6912 ScalarEvolution
*SE
) const {
6913 for (auto &ENT
: ExitNotTaken
)
6914 if (ENT
.ExitingBlock
== ExitingBlock
&& ENT
.hasAlwaysTruePredicate())
6915 return ENT
.ExactNotTaken
;
6917 return SE
->getCouldNotCompute();
6920 /// getMax - Get the max backedge taken count for the loop.
6922 ScalarEvolution::BackedgeTakenInfo::getMax(ScalarEvolution
*SE
) const {
6923 auto PredicateNotAlwaysTrue
= [](const ExitNotTakenInfo
&ENT
) {
6924 return !ENT
.hasAlwaysTruePredicate();
6927 if (any_of(ExitNotTaken
, PredicateNotAlwaysTrue
) || !getMax())
6928 return SE
->getCouldNotCompute();
6930 assert((isa
<SCEVCouldNotCompute
>(getMax()) || isa
<SCEVConstant
>(getMax())) &&
6931 "No point in having a non-constant max backedge taken count!");
6935 bool ScalarEvolution::BackedgeTakenInfo::isMaxOrZero(ScalarEvolution
*SE
) const {
6936 auto PredicateNotAlwaysTrue
= [](const ExitNotTakenInfo
&ENT
) {
6937 return !ENT
.hasAlwaysTruePredicate();
6939 return MaxOrZero
&& !any_of(ExitNotTaken
, PredicateNotAlwaysTrue
);
6942 bool ScalarEvolution::BackedgeTakenInfo::hasOperand(const SCEV
*S
,
6943 ScalarEvolution
*SE
) const {
6944 if (getMax() && getMax() != SE
->getCouldNotCompute() &&
6945 SE
->hasOperand(getMax(), S
))
6948 for (auto &ENT
: ExitNotTaken
)
6949 if (ENT
.ExactNotTaken
!= SE
->getCouldNotCompute() &&
6950 SE
->hasOperand(ENT
.ExactNotTaken
, S
))
6956 ScalarEvolution::ExitLimit::ExitLimit(const SCEV
*E
)
6957 : ExactNotTaken(E
), MaxNotTaken(E
) {
6958 assert((isa
<SCEVCouldNotCompute
>(MaxNotTaken
) ||
6959 isa
<SCEVConstant
>(MaxNotTaken
)) &&
6960 "No point in having a non-constant max backedge taken count!");
6963 ScalarEvolution::ExitLimit::ExitLimit(
6964 const SCEV
*E
, const SCEV
*M
, bool MaxOrZero
,
6965 ArrayRef
<const SmallPtrSetImpl
<const SCEVPredicate
*> *> PredSetList
)
6966 : ExactNotTaken(E
), MaxNotTaken(M
), MaxOrZero(MaxOrZero
) {
6967 assert((isa
<SCEVCouldNotCompute
>(ExactNotTaken
) ||
6968 !isa
<SCEVCouldNotCompute
>(MaxNotTaken
)) &&
6969 "Exact is not allowed to be less precise than Max");
6970 assert((isa
<SCEVCouldNotCompute
>(MaxNotTaken
) ||
6971 isa
<SCEVConstant
>(MaxNotTaken
)) &&
6972 "No point in having a non-constant max backedge taken count!");
6973 for (auto *PredSet
: PredSetList
)
6974 for (auto *P
: *PredSet
)
6978 ScalarEvolution::ExitLimit::ExitLimit(
6979 const SCEV
*E
, const SCEV
*M
, bool MaxOrZero
,
6980 const SmallPtrSetImpl
<const SCEVPredicate
*> &PredSet
)
6981 : ExitLimit(E
, M
, MaxOrZero
, {&PredSet
}) {
6982 assert((isa
<SCEVCouldNotCompute
>(MaxNotTaken
) ||
6983 isa
<SCEVConstant
>(MaxNotTaken
)) &&
6984 "No point in having a non-constant max backedge taken count!");
6987 ScalarEvolution::ExitLimit::ExitLimit(const SCEV
*E
, const SCEV
*M
,
6989 : ExitLimit(E
, M
, MaxOrZero
, None
) {
6990 assert((isa
<SCEVCouldNotCompute
>(MaxNotTaken
) ||
6991 isa
<SCEVConstant
>(MaxNotTaken
)) &&
6992 "No point in having a non-constant max backedge taken count!");
6995 /// Allocate memory for BackedgeTakenInfo and copy the not-taken count of each
6996 /// computable exit into a persistent ExitNotTakenInfo array.
6997 ScalarEvolution::BackedgeTakenInfo::BackedgeTakenInfo(
6998 ArrayRef
<ScalarEvolution::BackedgeTakenInfo::EdgeExitInfo
>
7000 bool Complete
, const SCEV
*MaxCount
, bool MaxOrZero
)
7001 : MaxAndComplete(MaxCount
, Complete
), MaxOrZero(MaxOrZero
) {
7002 using EdgeExitInfo
= ScalarEvolution::BackedgeTakenInfo::EdgeExitInfo
;
7004 ExitNotTaken
.reserve(ExitCounts
.size());
7006 ExitCounts
.begin(), ExitCounts
.end(), std::back_inserter(ExitNotTaken
),
7007 [&](const EdgeExitInfo
&EEI
) {
7008 BasicBlock
*ExitBB
= EEI
.first
;
7009 const ExitLimit
&EL
= EEI
.second
;
7010 if (EL
.Predicates
.empty())
7011 return ExitNotTakenInfo(ExitBB
, EL
.ExactNotTaken
, nullptr);
7013 std::unique_ptr
<SCEVUnionPredicate
> Predicate(new SCEVUnionPredicate
);
7014 for (auto *Pred
: EL
.Predicates
)
7015 Predicate
->add(Pred
);
7017 return ExitNotTakenInfo(ExitBB
, EL
.ExactNotTaken
, std::move(Predicate
));
7019 assert((isa
<SCEVCouldNotCompute
>(MaxCount
) || isa
<SCEVConstant
>(MaxCount
)) &&
7020 "No point in having a non-constant max backedge taken count!");
7023 /// Invalidate this result and free the ExitNotTakenInfo array.
7024 void ScalarEvolution::BackedgeTakenInfo::clear() {
7025 ExitNotTaken
.clear();
7028 /// Compute the number of times the backedge of the specified loop will execute.
7029 ScalarEvolution::BackedgeTakenInfo
7030 ScalarEvolution::computeBackedgeTakenCount(const Loop
*L
,
7031 bool AllowPredicates
) {
7032 SmallVector
<BasicBlock
*, 8> ExitingBlocks
;
7033 L
->getExitingBlocks(ExitingBlocks
);
7035 using EdgeExitInfo
= ScalarEvolution::BackedgeTakenInfo::EdgeExitInfo
;
7037 SmallVector
<EdgeExitInfo
, 4> ExitCounts
;
7038 bool CouldComputeBECount
= true;
7039 BasicBlock
*Latch
= L
->getLoopLatch(); // may be NULL.
7040 const SCEV
*MustExitMaxBECount
= nullptr;
7041 const SCEV
*MayExitMaxBECount
= nullptr;
7042 bool MustExitMaxOrZero
= false;
7044 // Compute the ExitLimit for each loop exit. Use this to populate ExitCounts
7045 // and compute maxBECount.
7046 // Do a union of all the predicates here.
7047 for (unsigned i
= 0, e
= ExitingBlocks
.size(); i
!= e
; ++i
) {
7048 BasicBlock
*ExitBB
= ExitingBlocks
[i
];
7049 ExitLimit EL
= computeExitLimit(L
, ExitBB
, AllowPredicates
);
7051 assert((AllowPredicates
|| EL
.Predicates
.empty()) &&
7052 "Predicated exit limit when predicates are not allowed!");
7054 // 1. For each exit that can be computed, add an entry to ExitCounts.
7055 // CouldComputeBECount is true only if all exits can be computed.
7056 if (EL
.ExactNotTaken
== getCouldNotCompute())
7057 // We couldn't compute an exact value for this exit, so
7058 // we won't be able to compute an exact value for the loop.
7059 CouldComputeBECount
= false;
7061 ExitCounts
.emplace_back(ExitBB
, EL
);
7063 // 2. Derive the loop's MaxBECount from each exit's max number of
7064 // non-exiting iterations. Partition the loop exits into two kinds:
7065 // LoopMustExits and LoopMayExits.
7067 // If the exit dominates the loop latch, it is a LoopMustExit otherwise it
7068 // is a LoopMayExit. If any computable LoopMustExit is found, then
7069 // MaxBECount is the minimum EL.MaxNotTaken of computable
7070 // LoopMustExits. Otherwise, MaxBECount is conservatively the maximum
7071 // EL.MaxNotTaken, where CouldNotCompute is considered greater than any
7072 // computable EL.MaxNotTaken.
7073 if (EL
.MaxNotTaken
!= getCouldNotCompute() && Latch
&&
7074 DT
.dominates(ExitBB
, Latch
)) {
7075 if (!MustExitMaxBECount
) {
7076 MustExitMaxBECount
= EL
.MaxNotTaken
;
7077 MustExitMaxOrZero
= EL
.MaxOrZero
;
7079 MustExitMaxBECount
=
7080 getUMinFromMismatchedTypes(MustExitMaxBECount
, EL
.MaxNotTaken
);
7082 } else if (MayExitMaxBECount
!= getCouldNotCompute()) {
7083 if (!MayExitMaxBECount
|| EL
.MaxNotTaken
== getCouldNotCompute())
7084 MayExitMaxBECount
= EL
.MaxNotTaken
;
7087 getUMaxFromMismatchedTypes(MayExitMaxBECount
, EL
.MaxNotTaken
);
7091 const SCEV
*MaxBECount
= MustExitMaxBECount
? MustExitMaxBECount
:
7092 (MayExitMaxBECount
? MayExitMaxBECount
: getCouldNotCompute());
7093 // The loop backedge will be taken the maximum or zero times if there's
7094 // a single exit that must be taken the maximum or zero times.
7095 bool MaxOrZero
= (MustExitMaxOrZero
&& ExitingBlocks
.size() == 1);
7096 return BackedgeTakenInfo(std::move(ExitCounts
), CouldComputeBECount
,
7097 MaxBECount
, MaxOrZero
);
7100 ScalarEvolution::ExitLimit
7101 ScalarEvolution::computeExitLimit(const Loop
*L
, BasicBlock
*ExitingBlock
,
7102 bool AllowPredicates
) {
7103 assert(L
->contains(ExitingBlock
) && "Exit count for non-loop block?");
7104 // If our exiting block does not dominate the latch, then its connection with
7105 // loop's exit limit may be far from trivial.
7106 const BasicBlock
*Latch
= L
->getLoopLatch();
7107 if (!Latch
|| !DT
.dominates(ExitingBlock
, Latch
))
7108 return getCouldNotCompute();
7110 bool IsOnlyExit
= (L
->getExitingBlock() != nullptr);
7111 Instruction
*Term
= ExitingBlock
->getTerminator();
7112 if (BranchInst
*BI
= dyn_cast
<BranchInst
>(Term
)) {
7113 assert(BI
->isConditional() && "If unconditional, it can't be in loop!");
7114 bool ExitIfTrue
= !L
->contains(BI
->getSuccessor(0));
7115 assert(ExitIfTrue
== L
->contains(BI
->getSuccessor(1)) &&
7116 "It should have one successor in loop and one exit block!");
7117 // Proceed to the next level to examine the exit condition expression.
7118 return computeExitLimitFromCond(
7119 L
, BI
->getCondition(), ExitIfTrue
,
7120 /*ControlsExit=*/IsOnlyExit
, AllowPredicates
);
7123 if (SwitchInst
*SI
= dyn_cast
<SwitchInst
>(Term
)) {
7124 // For switch, make sure that there is a single exit from the loop.
7125 BasicBlock
*Exit
= nullptr;
7126 for (auto *SBB
: successors(ExitingBlock
))
7127 if (!L
->contains(SBB
)) {
7128 if (Exit
) // Multiple exit successors.
7129 return getCouldNotCompute();
7132 assert(Exit
&& "Exiting block must have at least one exit");
7133 return computeExitLimitFromSingleExitSwitch(L
, SI
, Exit
,
7134 /*ControlsExit=*/IsOnlyExit
);
7137 return getCouldNotCompute();
7140 ScalarEvolution::ExitLimit
ScalarEvolution::computeExitLimitFromCond(
7141 const Loop
*L
, Value
*ExitCond
, bool ExitIfTrue
,
7142 bool ControlsExit
, bool AllowPredicates
) {
7143 ScalarEvolution::ExitLimitCacheTy
Cache(L
, ExitIfTrue
, AllowPredicates
);
7144 return computeExitLimitFromCondCached(Cache
, L
, ExitCond
, ExitIfTrue
,
7145 ControlsExit
, AllowPredicates
);
7148 Optional
<ScalarEvolution::ExitLimit
>
7149 ScalarEvolution::ExitLimitCache::find(const Loop
*L
, Value
*ExitCond
,
7150 bool ExitIfTrue
, bool ControlsExit
,
7151 bool AllowPredicates
) {
7153 (void)this->ExitIfTrue
;
7154 (void)this->AllowPredicates
;
7156 assert(this->L
== L
&& this->ExitIfTrue
== ExitIfTrue
&&
7157 this->AllowPredicates
== AllowPredicates
&&
7158 "Variance in assumed invariant key components!");
7159 auto Itr
= TripCountMap
.find({ExitCond
, ControlsExit
});
7160 if (Itr
== TripCountMap
.end())
7165 void ScalarEvolution::ExitLimitCache::insert(const Loop
*L
, Value
*ExitCond
,
7168 bool AllowPredicates
,
7169 const ExitLimit
&EL
) {
7170 assert(this->L
== L
&& this->ExitIfTrue
== ExitIfTrue
&&
7171 this->AllowPredicates
== AllowPredicates
&&
7172 "Variance in assumed invariant key components!");
7174 auto InsertResult
= TripCountMap
.insert({{ExitCond
, ControlsExit
}, EL
});
7175 assert(InsertResult
.second
&& "Expected successful insertion!");
7180 ScalarEvolution::ExitLimit
ScalarEvolution::computeExitLimitFromCondCached(
7181 ExitLimitCacheTy
&Cache
, const Loop
*L
, Value
*ExitCond
, bool ExitIfTrue
,
7182 bool ControlsExit
, bool AllowPredicates
) {
7185 Cache
.find(L
, ExitCond
, ExitIfTrue
, ControlsExit
, AllowPredicates
))
7188 ExitLimit EL
= computeExitLimitFromCondImpl(Cache
, L
, ExitCond
, ExitIfTrue
,
7189 ControlsExit
, AllowPredicates
);
7190 Cache
.insert(L
, ExitCond
, ExitIfTrue
, ControlsExit
, AllowPredicates
, EL
);
7194 ScalarEvolution::ExitLimit
ScalarEvolution::computeExitLimitFromCondImpl(
7195 ExitLimitCacheTy
&Cache
, const Loop
*L
, Value
*ExitCond
, bool ExitIfTrue
,
7196 bool ControlsExit
, bool AllowPredicates
) {
7197 // Check if the controlling expression for this loop is an And or Or.
7198 if (BinaryOperator
*BO
= dyn_cast
<BinaryOperator
>(ExitCond
)) {
7199 if (BO
->getOpcode() == Instruction::And
) {
7200 // Recurse on the operands of the and.
7201 bool EitherMayExit
= !ExitIfTrue
;
7202 ExitLimit EL0
= computeExitLimitFromCondCached(
7203 Cache
, L
, BO
->getOperand(0), ExitIfTrue
,
7204 ControlsExit
&& !EitherMayExit
, AllowPredicates
);
7205 ExitLimit EL1
= computeExitLimitFromCondCached(
7206 Cache
, L
, BO
->getOperand(1), ExitIfTrue
,
7207 ControlsExit
&& !EitherMayExit
, AllowPredicates
);
7208 const SCEV
*BECount
= getCouldNotCompute();
7209 const SCEV
*MaxBECount
= getCouldNotCompute();
7210 if (EitherMayExit
) {
7211 // Both conditions must be true for the loop to continue executing.
7212 // Choose the less conservative count.
7213 if (EL0
.ExactNotTaken
== getCouldNotCompute() ||
7214 EL1
.ExactNotTaken
== getCouldNotCompute())
7215 BECount
= getCouldNotCompute();
7218 getUMinFromMismatchedTypes(EL0
.ExactNotTaken
, EL1
.ExactNotTaken
);
7219 if (EL0
.MaxNotTaken
== getCouldNotCompute())
7220 MaxBECount
= EL1
.MaxNotTaken
;
7221 else if (EL1
.MaxNotTaken
== getCouldNotCompute())
7222 MaxBECount
= EL0
.MaxNotTaken
;
7225 getUMinFromMismatchedTypes(EL0
.MaxNotTaken
, EL1
.MaxNotTaken
);
7227 // Both conditions must be true at the same time for the loop to exit.
7228 // For now, be conservative.
7229 if (EL0
.MaxNotTaken
== EL1
.MaxNotTaken
)
7230 MaxBECount
= EL0
.MaxNotTaken
;
7231 if (EL0
.ExactNotTaken
== EL1
.ExactNotTaken
)
7232 BECount
= EL0
.ExactNotTaken
;
7235 // There are cases (e.g. PR26207) where computeExitLimitFromCond is able
7236 // to be more aggressive when computing BECount than when computing
7237 // MaxBECount. In these cases it is possible for EL0.ExactNotTaken and
7238 // EL1.ExactNotTaken to match, but for EL0.MaxNotTaken and EL1.MaxNotTaken
7240 if (isa
<SCEVCouldNotCompute
>(MaxBECount
) &&
7241 !isa
<SCEVCouldNotCompute
>(BECount
))
7242 MaxBECount
= getConstant(getUnsignedRangeMax(BECount
));
7244 return ExitLimit(BECount
, MaxBECount
, false,
7245 {&EL0
.Predicates
, &EL1
.Predicates
});
7247 if (BO
->getOpcode() == Instruction::Or
) {
7248 // Recurse on the operands of the or.
7249 bool EitherMayExit
= ExitIfTrue
;
7250 ExitLimit EL0
= computeExitLimitFromCondCached(
7251 Cache
, L
, BO
->getOperand(0), ExitIfTrue
,
7252 ControlsExit
&& !EitherMayExit
, AllowPredicates
);
7253 ExitLimit EL1
= computeExitLimitFromCondCached(
7254 Cache
, L
, BO
->getOperand(1), ExitIfTrue
,
7255 ControlsExit
&& !EitherMayExit
, AllowPredicates
);
7256 const SCEV
*BECount
= getCouldNotCompute();
7257 const SCEV
*MaxBECount
= getCouldNotCompute();
7258 if (EitherMayExit
) {
7259 // Both conditions must be false for the loop to continue executing.
7260 // Choose the less conservative count.
7261 if (EL0
.ExactNotTaken
== getCouldNotCompute() ||
7262 EL1
.ExactNotTaken
== getCouldNotCompute())
7263 BECount
= getCouldNotCompute();
7266 getUMinFromMismatchedTypes(EL0
.ExactNotTaken
, EL1
.ExactNotTaken
);
7267 if (EL0
.MaxNotTaken
== getCouldNotCompute())
7268 MaxBECount
= EL1
.MaxNotTaken
;
7269 else if (EL1
.MaxNotTaken
== getCouldNotCompute())
7270 MaxBECount
= EL0
.MaxNotTaken
;
7273 getUMinFromMismatchedTypes(EL0
.MaxNotTaken
, EL1
.MaxNotTaken
);
7275 // Both conditions must be false at the same time for the loop to exit.
7276 // For now, be conservative.
7277 if (EL0
.MaxNotTaken
== EL1
.MaxNotTaken
)
7278 MaxBECount
= EL0
.MaxNotTaken
;
7279 if (EL0
.ExactNotTaken
== EL1
.ExactNotTaken
)
7280 BECount
= EL0
.ExactNotTaken
;
7282 // There are cases (e.g. PR26207) where computeExitLimitFromCond is able
7283 // to be more aggressive when computing BECount than when computing
7284 // MaxBECount. In these cases it is possible for EL0.ExactNotTaken and
7285 // EL1.ExactNotTaken to match, but for EL0.MaxNotTaken and EL1.MaxNotTaken
7287 if (isa
<SCEVCouldNotCompute
>(MaxBECount
) &&
7288 !isa
<SCEVCouldNotCompute
>(BECount
))
7289 MaxBECount
= getConstant(getUnsignedRangeMax(BECount
));
7291 return ExitLimit(BECount
, MaxBECount
, false,
7292 {&EL0
.Predicates
, &EL1
.Predicates
});
7296 // With an icmp, it may be feasible to compute an exact backedge-taken count.
7297 // Proceed to the next level to examine the icmp.
7298 if (ICmpInst
*ExitCondICmp
= dyn_cast
<ICmpInst
>(ExitCond
)) {
7300 computeExitLimitFromICmp(L
, ExitCondICmp
, ExitIfTrue
, ControlsExit
);
7301 if (EL
.hasFullInfo() || !AllowPredicates
)
7304 // Try again, but use SCEV predicates this time.
7305 return computeExitLimitFromICmp(L
, ExitCondICmp
, ExitIfTrue
, ControlsExit
,
7306 /*AllowPredicates=*/true);
7309 // Check for a constant condition. These are normally stripped out by
7310 // SimplifyCFG, but ScalarEvolution may be used by a pass which wishes to
7311 // preserve the CFG and is temporarily leaving constant conditions
7313 if (ConstantInt
*CI
= dyn_cast
<ConstantInt
>(ExitCond
)) {
7314 if (ExitIfTrue
== !CI
->getZExtValue())
7315 // The backedge is always taken.
7316 return getCouldNotCompute();
7318 // The backedge is never taken.
7319 return getZero(CI
->getType());
7322 // If it's not an integer or pointer comparison then compute it the hard way.
7323 return computeExitCountExhaustively(L
, ExitCond
, ExitIfTrue
);
7326 ScalarEvolution::ExitLimit
7327 ScalarEvolution::computeExitLimitFromICmp(const Loop
*L
,
7331 bool AllowPredicates
) {
7332 // If the condition was exit on true, convert the condition to exit on false
7333 ICmpInst::Predicate Pred
;
7335 Pred
= ExitCond
->getPredicate();
7337 Pred
= ExitCond
->getInversePredicate();
7338 const ICmpInst::Predicate OriginalPred
= Pred
;
7340 // Handle common loops like: for (X = "string"; *X; ++X)
7341 if (LoadInst
*LI
= dyn_cast
<LoadInst
>(ExitCond
->getOperand(0)))
7342 if (Constant
*RHS
= dyn_cast
<Constant
>(ExitCond
->getOperand(1))) {
7344 computeLoadConstantCompareExitLimit(LI
, RHS
, L
, Pred
);
7345 if (ItCnt
.hasAnyInfo())
7349 const SCEV
*LHS
= getSCEV(ExitCond
->getOperand(0));
7350 const SCEV
*RHS
= getSCEV(ExitCond
->getOperand(1));
7352 // Try to evaluate any dependencies out of the loop.
7353 LHS
= getSCEVAtScope(LHS
, L
);
7354 RHS
= getSCEVAtScope(RHS
, L
);
7356 // At this point, we would like to compute how many iterations of the
7357 // loop the predicate will return true for these inputs.
7358 if (isLoopInvariant(LHS
, L
) && !isLoopInvariant(RHS
, L
)) {
7359 // If there is a loop-invariant, force it into the RHS.
7360 std::swap(LHS
, RHS
);
7361 Pred
= ICmpInst::getSwappedPredicate(Pred
);
7364 // Simplify the operands before analyzing them.
7365 (void)SimplifyICmpOperands(Pred
, LHS
, RHS
);
7367 // If we have a comparison of a chrec against a constant, try to use value
7368 // ranges to answer this query.
7369 if (const SCEVConstant
*RHSC
= dyn_cast
<SCEVConstant
>(RHS
))
7370 if (const SCEVAddRecExpr
*AddRec
= dyn_cast
<SCEVAddRecExpr
>(LHS
))
7371 if (AddRec
->getLoop() == L
) {
7372 // Form the constant range.
7373 ConstantRange CompRange
=
7374 ConstantRange::makeExactICmpRegion(Pred
, RHSC
->getAPInt());
7376 const SCEV
*Ret
= AddRec
->getNumIterationsInRange(CompRange
, *this);
7377 if (!isa
<SCEVCouldNotCompute
>(Ret
)) return Ret
;
7381 case ICmpInst::ICMP_NE
: { // while (X != Y)
7382 // Convert to: while (X-Y != 0)
7383 ExitLimit EL
= howFarToZero(getMinusSCEV(LHS
, RHS
), L
, ControlsExit
,
7385 if (EL
.hasAnyInfo()) return EL
;
7388 case ICmpInst::ICMP_EQ
: { // while (X == Y)
7389 // Convert to: while (X-Y == 0)
7390 ExitLimit EL
= howFarToNonZero(getMinusSCEV(LHS
, RHS
), L
);
7391 if (EL
.hasAnyInfo()) return EL
;
7394 case ICmpInst::ICMP_SLT
:
7395 case ICmpInst::ICMP_ULT
: { // while (X < Y)
7396 bool IsSigned
= Pred
== ICmpInst::ICMP_SLT
;
7397 ExitLimit EL
= howManyLessThans(LHS
, RHS
, L
, IsSigned
, ControlsExit
,
7399 if (EL
.hasAnyInfo()) return EL
;
7402 case ICmpInst::ICMP_SGT
:
7403 case ICmpInst::ICMP_UGT
: { // while (X > Y)
7404 bool IsSigned
= Pred
== ICmpInst::ICMP_SGT
;
7406 howManyGreaterThans(LHS
, RHS
, L
, IsSigned
, ControlsExit
,
7408 if (EL
.hasAnyInfo()) return EL
;
7415 auto *ExhaustiveCount
=
7416 computeExitCountExhaustively(L
, ExitCond
, ExitIfTrue
);
7418 if (!isa
<SCEVCouldNotCompute
>(ExhaustiveCount
))
7419 return ExhaustiveCount
;
7421 return computeShiftCompareExitLimit(ExitCond
->getOperand(0),
7422 ExitCond
->getOperand(1), L
, OriginalPred
);
7425 ScalarEvolution::ExitLimit
7426 ScalarEvolution::computeExitLimitFromSingleExitSwitch(const Loop
*L
,
7428 BasicBlock
*ExitingBlock
,
7429 bool ControlsExit
) {
7430 assert(!L
->contains(ExitingBlock
) && "Not an exiting block!");
7432 // Give up if the exit is the default dest of a switch.
7433 if (Switch
->getDefaultDest() == ExitingBlock
)
7434 return getCouldNotCompute();
7436 assert(L
->contains(Switch
->getDefaultDest()) &&
7437 "Default case must not exit the loop!");
7438 const SCEV
*LHS
= getSCEVAtScope(Switch
->getCondition(), L
);
7439 const SCEV
*RHS
= getConstant(Switch
->findCaseDest(ExitingBlock
));
7441 // while (X != Y) --> while (X-Y != 0)
7442 ExitLimit EL
= howFarToZero(getMinusSCEV(LHS
, RHS
), L
, ControlsExit
);
7443 if (EL
.hasAnyInfo())
7446 return getCouldNotCompute();
7449 static ConstantInt
*
7450 EvaluateConstantChrecAtConstant(const SCEVAddRecExpr
*AddRec
, ConstantInt
*C
,
7451 ScalarEvolution
&SE
) {
7452 const SCEV
*InVal
= SE
.getConstant(C
);
7453 const SCEV
*Val
= AddRec
->evaluateAtIteration(InVal
, SE
);
7454 assert(isa
<SCEVConstant
>(Val
) &&
7455 "Evaluation of SCEV at constant didn't fold correctly?");
7456 return cast
<SCEVConstant
>(Val
)->getValue();
7459 /// Given an exit condition of 'icmp op load X, cst', try to see if we can
7460 /// compute the backedge execution count.
7461 ScalarEvolution::ExitLimit
7462 ScalarEvolution::computeLoadConstantCompareExitLimit(
7466 ICmpInst::Predicate predicate
) {
7467 if (LI
->isVolatile()) return getCouldNotCompute();
7469 // Check to see if the loaded pointer is a getelementptr of a global.
7470 // TODO: Use SCEV instead of manually grubbing with GEPs.
7471 GetElementPtrInst
*GEP
= dyn_cast
<GetElementPtrInst
>(LI
->getOperand(0));
7472 if (!GEP
) return getCouldNotCompute();
7474 // Make sure that it is really a constant global we are gepping, with an
7475 // initializer, and make sure the first IDX is really 0.
7476 GlobalVariable
*GV
= dyn_cast
<GlobalVariable
>(GEP
->getOperand(0));
7477 if (!GV
|| !GV
->isConstant() || !GV
->hasDefinitiveInitializer() ||
7478 GEP
->getNumOperands() < 3 || !isa
<Constant
>(GEP
->getOperand(1)) ||
7479 !cast
<Constant
>(GEP
->getOperand(1))->isNullValue())
7480 return getCouldNotCompute();
7482 // Okay, we allow one non-constant index into the GEP instruction.
7483 Value
*VarIdx
= nullptr;
7484 std::vector
<Constant
*> Indexes
;
7485 unsigned VarIdxNum
= 0;
7486 for (unsigned i
= 2, e
= GEP
->getNumOperands(); i
!= e
; ++i
)
7487 if (ConstantInt
*CI
= dyn_cast
<ConstantInt
>(GEP
->getOperand(i
))) {
7488 Indexes
.push_back(CI
);
7489 } else if (!isa
<ConstantInt
>(GEP
->getOperand(i
))) {
7490 if (VarIdx
) return getCouldNotCompute(); // Multiple non-constant idx's.
7491 VarIdx
= GEP
->getOperand(i
);
7493 Indexes
.push_back(nullptr);
7496 // Loop-invariant loads may be a byproduct of loop optimization. Skip them.
7498 return getCouldNotCompute();
7500 // Okay, we know we have a (load (gep GV, 0, X)) comparison with a constant.
7501 // Check to see if X is a loop variant variable value now.
7502 const SCEV
*Idx
= getSCEV(VarIdx
);
7503 Idx
= getSCEVAtScope(Idx
, L
);
7505 // We can only recognize very limited forms of loop index expressions, in
7506 // particular, only affine AddRec's like {C1,+,C2}.
7507 const SCEVAddRecExpr
*IdxExpr
= dyn_cast
<SCEVAddRecExpr
>(Idx
);
7508 if (!IdxExpr
|| !IdxExpr
->isAffine() || isLoopInvariant(IdxExpr
, L
) ||
7509 !isa
<SCEVConstant
>(IdxExpr
->getOperand(0)) ||
7510 !isa
<SCEVConstant
>(IdxExpr
->getOperand(1)))
7511 return getCouldNotCompute();
7513 unsigned MaxSteps
= MaxBruteForceIterations
;
7514 for (unsigned IterationNum
= 0; IterationNum
!= MaxSteps
; ++IterationNum
) {
7515 ConstantInt
*ItCst
= ConstantInt::get(
7516 cast
<IntegerType
>(IdxExpr
->getType()), IterationNum
);
7517 ConstantInt
*Val
= EvaluateConstantChrecAtConstant(IdxExpr
, ItCst
, *this);
7519 // Form the GEP offset.
7520 Indexes
[VarIdxNum
] = Val
;
7522 Constant
*Result
= ConstantFoldLoadThroughGEPIndices(GV
->getInitializer(),
7524 if (!Result
) break; // Cannot compute!
7526 // Evaluate the condition for this iteration.
7527 Result
= ConstantExpr::getICmp(predicate
, Result
, RHS
);
7528 if (!isa
<ConstantInt
>(Result
)) break; // Couldn't decide for sure
7529 if (cast
<ConstantInt
>(Result
)->getValue().isMinValue()) {
7530 ++NumArrayLenItCounts
;
7531 return getConstant(ItCst
); // Found terminating iteration!
7534 return getCouldNotCompute();
7537 ScalarEvolution::ExitLimit
ScalarEvolution::computeShiftCompareExitLimit(
7538 Value
*LHS
, Value
*RHSV
, const Loop
*L
, ICmpInst::Predicate Pred
) {
7539 ConstantInt
*RHS
= dyn_cast
<ConstantInt
>(RHSV
);
7541 return getCouldNotCompute();
7543 const BasicBlock
*Latch
= L
->getLoopLatch();
7545 return getCouldNotCompute();
7547 const BasicBlock
*Predecessor
= L
->getLoopPredecessor();
7549 return getCouldNotCompute();
7551 // Return true if V is of the form "LHS `shift_op` <positive constant>".
7552 // Return LHS in OutLHS and shift_opt in OutOpCode.
7553 auto MatchPositiveShift
=
7554 [](Value
*V
, Value
*&OutLHS
, Instruction::BinaryOps
&OutOpCode
) {
7556 using namespace PatternMatch
;
7558 ConstantInt
*ShiftAmt
;
7559 if (match(V
, m_LShr(m_Value(OutLHS
), m_ConstantInt(ShiftAmt
))))
7560 OutOpCode
= Instruction::LShr
;
7561 else if (match(V
, m_AShr(m_Value(OutLHS
), m_ConstantInt(ShiftAmt
))))
7562 OutOpCode
= Instruction::AShr
;
7563 else if (match(V
, m_Shl(m_Value(OutLHS
), m_ConstantInt(ShiftAmt
))))
7564 OutOpCode
= Instruction::Shl
;
7568 return ShiftAmt
->getValue().isStrictlyPositive();
7571 // Recognize a "shift recurrence" either of the form %iv or of %iv.shifted in
7574 // %iv = phi i32 [ %iv.shifted, %loop ], [ %val, %preheader ]
7575 // %iv.shifted = lshr i32 %iv, <positive constant>
7577 // Return true on a successful match. Return the corresponding PHI node (%iv
7578 // above) in PNOut and the opcode of the shift operation in OpCodeOut.
7579 auto MatchShiftRecurrence
=
7580 [&](Value
*V
, PHINode
*&PNOut
, Instruction::BinaryOps
&OpCodeOut
) {
7581 Optional
<Instruction::BinaryOps
> PostShiftOpCode
;
7584 Instruction::BinaryOps OpC
;
7587 // If we encounter a shift instruction, "peel off" the shift operation,
7588 // and remember that we did so. Later when we inspect %iv's backedge
7589 // value, we will make sure that the backedge value uses the same
7592 // Note: the peeled shift operation does not have to be the same
7593 // instruction as the one feeding into the PHI's backedge value. We only
7594 // really care about it being the same *kind* of shift instruction --
7595 // that's all that is required for our later inferences to hold.
7596 if (MatchPositiveShift(LHS
, V
, OpC
)) {
7597 PostShiftOpCode
= OpC
;
7602 PNOut
= dyn_cast
<PHINode
>(LHS
);
7603 if (!PNOut
|| PNOut
->getParent() != L
->getHeader())
7606 Value
*BEValue
= PNOut
->getIncomingValueForBlock(Latch
);
7610 // The backedge value for the PHI node must be a shift by a positive
7612 MatchPositiveShift(BEValue
, OpLHS
, OpCodeOut
) &&
7614 // of the PHI node itself
7617 // and the kind of shift should be match the kind of shift we peeled
7619 (!PostShiftOpCode
.hasValue() || *PostShiftOpCode
== OpCodeOut
);
7623 Instruction::BinaryOps OpCode
;
7624 if (!MatchShiftRecurrence(LHS
, PN
, OpCode
))
7625 return getCouldNotCompute();
7627 const DataLayout
&DL
= getDataLayout();
7629 // The key rationale for this optimization is that for some kinds of shift
7630 // recurrences, the value of the recurrence "stabilizes" to either 0 or -1
7631 // within a finite number of iterations. If the condition guarding the
7632 // backedge (in the sense that the backedge is taken if the condition is true)
7633 // is false for the value the shift recurrence stabilizes to, then we know
7634 // that the backedge is taken only a finite number of times.
7636 ConstantInt
*StableValue
= nullptr;
7639 llvm_unreachable("Impossible case!");
7641 case Instruction::AShr
: {
7642 // {K,ashr,<positive-constant>} stabilizes to signum(K) in at most
7643 // bitwidth(K) iterations.
7644 Value
*FirstValue
= PN
->getIncomingValueForBlock(Predecessor
);
7645 KnownBits Known
= computeKnownBits(FirstValue
, DL
, 0, nullptr,
7646 Predecessor
->getTerminator(), &DT
);
7647 auto *Ty
= cast
<IntegerType
>(RHS
->getType());
7648 if (Known
.isNonNegative())
7649 StableValue
= ConstantInt::get(Ty
, 0);
7650 else if (Known
.isNegative())
7651 StableValue
= ConstantInt::get(Ty
, -1, true);
7653 return getCouldNotCompute();
7657 case Instruction::LShr
:
7658 case Instruction::Shl
:
7659 // Both {K,lshr,<positive-constant>} and {K,shl,<positive-constant>}
7660 // stabilize to 0 in at most bitwidth(K) iterations.
7661 StableValue
= ConstantInt::get(cast
<IntegerType
>(RHS
->getType()), 0);
7666 ConstantFoldCompareInstOperands(Pred
, StableValue
, RHS
, DL
, &TLI
);
7667 assert(Result
->getType()->isIntegerTy(1) &&
7668 "Otherwise cannot be an operand to a branch instruction");
7670 if (Result
->isZeroValue()) {
7671 unsigned BitWidth
= getTypeSizeInBits(RHS
->getType());
7672 const SCEV
*UpperBound
=
7673 getConstant(getEffectiveSCEVType(RHS
->getType()), BitWidth
);
7674 return ExitLimit(getCouldNotCompute(), UpperBound
, false);
7677 return getCouldNotCompute();
7680 /// Return true if we can constant fold an instruction of the specified type,
7681 /// assuming that all operands were constants.
7682 static bool CanConstantFold(const Instruction
*I
) {
7683 if (isa
<BinaryOperator
>(I
) || isa
<CmpInst
>(I
) ||
7684 isa
<SelectInst
>(I
) || isa
<CastInst
>(I
) || isa
<GetElementPtrInst
>(I
) ||
7688 if (const CallInst
*CI
= dyn_cast
<CallInst
>(I
))
7689 if (const Function
*F
= CI
->getCalledFunction())
7690 return canConstantFoldCallTo(CI
, F
);
7694 /// Determine whether this instruction can constant evolve within this loop
7695 /// assuming its operands can all constant evolve.
7696 static bool canConstantEvolve(Instruction
*I
, const Loop
*L
) {
7697 // An instruction outside of the loop can't be derived from a loop PHI.
7698 if (!L
->contains(I
)) return false;
7700 if (isa
<PHINode
>(I
)) {
7701 // We don't currently keep track of the control flow needed to evaluate
7702 // PHIs, so we cannot handle PHIs inside of loops.
7703 return L
->getHeader() == I
->getParent();
7706 // If we won't be able to constant fold this expression even if the operands
7707 // are constants, bail early.
7708 return CanConstantFold(I
);
7711 /// getConstantEvolvingPHIOperands - Implement getConstantEvolvingPHI by
7712 /// recursing through each instruction operand until reaching a loop header phi.
7714 getConstantEvolvingPHIOperands(Instruction
*UseInst
, const Loop
*L
,
7715 DenseMap
<Instruction
*, PHINode
*> &PHIMap
,
7717 if (Depth
> MaxConstantEvolvingDepth
)
7720 // Otherwise, we can evaluate this instruction if all of its operands are
7721 // constant or derived from a PHI node themselves.
7722 PHINode
*PHI
= nullptr;
7723 for (Value
*Op
: UseInst
->operands()) {
7724 if (isa
<Constant
>(Op
)) continue;
7726 Instruction
*OpInst
= dyn_cast
<Instruction
>(Op
);
7727 if (!OpInst
|| !canConstantEvolve(OpInst
, L
)) return nullptr;
7729 PHINode
*P
= dyn_cast
<PHINode
>(OpInst
);
7731 // If this operand is already visited, reuse the prior result.
7732 // We may have P != PHI if this is the deepest point at which the
7733 // inconsistent paths meet.
7734 P
= PHIMap
.lookup(OpInst
);
7736 // Recurse and memoize the results, whether a phi is found or not.
7737 // This recursive call invalidates pointers into PHIMap.
7738 P
= getConstantEvolvingPHIOperands(OpInst
, L
, PHIMap
, Depth
+ 1);
7742 return nullptr; // Not evolving from PHI
7743 if (PHI
&& PHI
!= P
)
7744 return nullptr; // Evolving from multiple different PHIs.
7747 // This is a expression evolving from a constant PHI!
7751 /// getConstantEvolvingPHI - Given an LLVM value and a loop, return a PHI node
7752 /// in the loop that V is derived from. We allow arbitrary operations along the
7753 /// way, but the operands of an operation must either be constants or a value
7754 /// derived from a constant PHI. If this expression does not fit with these
7755 /// constraints, return null.
7756 static PHINode
*getConstantEvolvingPHI(Value
*V
, const Loop
*L
) {
7757 Instruction
*I
= dyn_cast
<Instruction
>(V
);
7758 if (!I
|| !canConstantEvolve(I
, L
)) return nullptr;
7760 if (PHINode
*PN
= dyn_cast
<PHINode
>(I
))
7763 // Record non-constant instructions contained by the loop.
7764 DenseMap
<Instruction
*, PHINode
*> PHIMap
;
7765 return getConstantEvolvingPHIOperands(I
, L
, PHIMap
, 0);
7768 /// EvaluateExpression - Given an expression that passes the
7769 /// getConstantEvolvingPHI predicate, evaluate its value assuming the PHI node
7770 /// in the loop has the value PHIVal. If we can't fold this expression for some
7771 /// reason, return null.
7772 static Constant
*EvaluateExpression(Value
*V
, const Loop
*L
,
7773 DenseMap
<Instruction
*, Constant
*> &Vals
,
7774 const DataLayout
&DL
,
7775 const TargetLibraryInfo
*TLI
) {
7776 // Convenient constant check, but redundant for recursive calls.
7777 if (Constant
*C
= dyn_cast
<Constant
>(V
)) return C
;
7778 Instruction
*I
= dyn_cast
<Instruction
>(V
);
7779 if (!I
) return nullptr;
7781 if (Constant
*C
= Vals
.lookup(I
)) return C
;
7783 // An instruction inside the loop depends on a value outside the loop that we
7784 // weren't given a mapping for, or a value such as a call inside the loop.
7785 if (!canConstantEvolve(I
, L
)) return nullptr;
7787 // An unmapped PHI can be due to a branch or another loop inside this loop,
7788 // or due to this not being the initial iteration through a loop where we
7789 // couldn't compute the evolution of this particular PHI last time.
7790 if (isa
<PHINode
>(I
)) return nullptr;
7792 std::vector
<Constant
*> Operands(I
->getNumOperands());
7794 for (unsigned i
= 0, e
= I
->getNumOperands(); i
!= e
; ++i
) {
7795 Instruction
*Operand
= dyn_cast
<Instruction
>(I
->getOperand(i
));
7797 Operands
[i
] = dyn_cast
<Constant
>(I
->getOperand(i
));
7798 if (!Operands
[i
]) return nullptr;
7801 Constant
*C
= EvaluateExpression(Operand
, L
, Vals
, DL
, TLI
);
7803 if (!C
) return nullptr;
7807 if (CmpInst
*CI
= dyn_cast
<CmpInst
>(I
))
7808 return ConstantFoldCompareInstOperands(CI
->getPredicate(), Operands
[0],
7809 Operands
[1], DL
, TLI
);
7810 if (LoadInst
*LI
= dyn_cast
<LoadInst
>(I
)) {
7811 if (!LI
->isVolatile())
7812 return ConstantFoldLoadFromConstPtr(Operands
[0], LI
->getType(), DL
);
7814 return ConstantFoldInstOperands(I
, Operands
, DL
, TLI
);
7818 // If every incoming value to PN except the one for BB is a specific Constant,
7819 // return that, else return nullptr.
7820 static Constant
*getOtherIncomingValue(PHINode
*PN
, BasicBlock
*BB
) {
7821 Constant
*IncomingVal
= nullptr;
7823 for (unsigned i
= 0, e
= PN
->getNumIncomingValues(); i
!= e
; ++i
) {
7824 if (PN
->getIncomingBlock(i
) == BB
)
7827 auto *CurrentVal
= dyn_cast
<Constant
>(PN
->getIncomingValue(i
));
7831 if (IncomingVal
!= CurrentVal
) {
7834 IncomingVal
= CurrentVal
;
7841 /// getConstantEvolutionLoopExitValue - If we know that the specified Phi is
7842 /// in the header of its containing loop, we know the loop executes a
7843 /// constant number of times, and the PHI node is just a recurrence
7844 /// involving constants, fold it.
7846 ScalarEvolution::getConstantEvolutionLoopExitValue(PHINode
*PN
,
7849 auto I
= ConstantEvolutionLoopExitValue
.find(PN
);
7850 if (I
!= ConstantEvolutionLoopExitValue
.end())
7853 if (BEs
.ugt(MaxBruteForceIterations
))
7854 return ConstantEvolutionLoopExitValue
[PN
] = nullptr; // Not going to evaluate it.
7856 Constant
*&RetVal
= ConstantEvolutionLoopExitValue
[PN
];
7858 DenseMap
<Instruction
*, Constant
*> CurrentIterVals
;
7859 BasicBlock
*Header
= L
->getHeader();
7860 assert(PN
->getParent() == Header
&& "Can't evaluate PHI not in loop header!");
7862 BasicBlock
*Latch
= L
->getLoopLatch();
7866 for (PHINode
&PHI
: Header
->phis()) {
7867 if (auto *StartCST
= getOtherIncomingValue(&PHI
, Latch
))
7868 CurrentIterVals
[&PHI
] = StartCST
;
7870 if (!CurrentIterVals
.count(PN
))
7871 return RetVal
= nullptr;
7873 Value
*BEValue
= PN
->getIncomingValueForBlock(Latch
);
7875 // Execute the loop symbolically to determine the exit value.
7876 assert(BEs
.getActiveBits() < CHAR_BIT
* sizeof(unsigned) &&
7877 "BEs is <= MaxBruteForceIterations which is an 'unsigned'!");
7879 unsigned NumIterations
= BEs
.getZExtValue(); // must be in range
7880 unsigned IterationNum
= 0;
7881 const DataLayout
&DL
= getDataLayout();
7882 for (; ; ++IterationNum
) {
7883 if (IterationNum
== NumIterations
)
7884 return RetVal
= CurrentIterVals
[PN
]; // Got exit value!
7886 // Compute the value of the PHIs for the next iteration.
7887 // EvaluateExpression adds non-phi values to the CurrentIterVals map.
7888 DenseMap
<Instruction
*, Constant
*> NextIterVals
;
7890 EvaluateExpression(BEValue
, L
, CurrentIterVals
, DL
, &TLI
);
7892 return nullptr; // Couldn't evaluate!
7893 NextIterVals
[PN
] = NextPHI
;
7895 bool StoppedEvolving
= NextPHI
== CurrentIterVals
[PN
];
7897 // Also evaluate the other PHI nodes. However, we don't get to stop if we
7898 // cease to be able to evaluate one of them or if they stop evolving,
7899 // because that doesn't necessarily prevent us from computing PN.
7900 SmallVector
<std::pair
<PHINode
*, Constant
*>, 8> PHIsToCompute
;
7901 for (const auto &I
: CurrentIterVals
) {
7902 PHINode
*PHI
= dyn_cast
<PHINode
>(I
.first
);
7903 if (!PHI
|| PHI
== PN
|| PHI
->getParent() != Header
) continue;
7904 PHIsToCompute
.emplace_back(PHI
, I
.second
);
7906 // We use two distinct loops because EvaluateExpression may invalidate any
7907 // iterators into CurrentIterVals.
7908 for (const auto &I
: PHIsToCompute
) {
7909 PHINode
*PHI
= I
.first
;
7910 Constant
*&NextPHI
= NextIterVals
[PHI
];
7911 if (!NextPHI
) { // Not already computed.
7912 Value
*BEValue
= PHI
->getIncomingValueForBlock(Latch
);
7913 NextPHI
= EvaluateExpression(BEValue
, L
, CurrentIterVals
, DL
, &TLI
);
7915 if (NextPHI
!= I
.second
)
7916 StoppedEvolving
= false;
7919 // If all entries in CurrentIterVals == NextIterVals then we can stop
7920 // iterating, the loop can't continue to change.
7921 if (StoppedEvolving
)
7922 return RetVal
= CurrentIterVals
[PN
];
7924 CurrentIterVals
.swap(NextIterVals
);
7928 const SCEV
*ScalarEvolution::computeExitCountExhaustively(const Loop
*L
,
7931 PHINode
*PN
= getConstantEvolvingPHI(Cond
, L
);
7932 if (!PN
) return getCouldNotCompute();
7934 // If the loop is canonicalized, the PHI will have exactly two entries.
7935 // That's the only form we support here.
7936 if (PN
->getNumIncomingValues() != 2) return getCouldNotCompute();
7938 DenseMap
<Instruction
*, Constant
*> CurrentIterVals
;
7939 BasicBlock
*Header
= L
->getHeader();
7940 assert(PN
->getParent() == Header
&& "Can't evaluate PHI not in loop header!");
7942 BasicBlock
*Latch
= L
->getLoopLatch();
7943 assert(Latch
&& "Should follow from NumIncomingValues == 2!");
7945 for (PHINode
&PHI
: Header
->phis()) {
7946 if (auto *StartCST
= getOtherIncomingValue(&PHI
, Latch
))
7947 CurrentIterVals
[&PHI
] = StartCST
;
7949 if (!CurrentIterVals
.count(PN
))
7950 return getCouldNotCompute();
7952 // Okay, we find a PHI node that defines the trip count of this loop. Execute
7953 // the loop symbolically to determine when the condition gets a value of
7955 unsigned MaxIterations
= MaxBruteForceIterations
; // Limit analysis.
7956 const DataLayout
&DL
= getDataLayout();
7957 for (unsigned IterationNum
= 0; IterationNum
!= MaxIterations
;++IterationNum
){
7958 auto *CondVal
= dyn_cast_or_null
<ConstantInt
>(
7959 EvaluateExpression(Cond
, L
, CurrentIterVals
, DL
, &TLI
));
7961 // Couldn't symbolically evaluate.
7962 if (!CondVal
) return getCouldNotCompute();
7964 if (CondVal
->getValue() == uint64_t(ExitWhen
)) {
7965 ++NumBruteForceTripCountsComputed
;
7966 return getConstant(Type::getInt32Ty(getContext()), IterationNum
);
7969 // Update all the PHI nodes for the next iteration.
7970 DenseMap
<Instruction
*, Constant
*> NextIterVals
;
7972 // Create a list of which PHIs we need to compute. We want to do this before
7973 // calling EvaluateExpression on them because that may invalidate iterators
7974 // into CurrentIterVals.
7975 SmallVector
<PHINode
*, 8> PHIsToCompute
;
7976 for (const auto &I
: CurrentIterVals
) {
7977 PHINode
*PHI
= dyn_cast
<PHINode
>(I
.first
);
7978 if (!PHI
|| PHI
->getParent() != Header
) continue;
7979 PHIsToCompute
.push_back(PHI
);
7981 for (PHINode
*PHI
: PHIsToCompute
) {
7982 Constant
*&NextPHI
= NextIterVals
[PHI
];
7983 if (NextPHI
) continue; // Already computed!
7985 Value
*BEValue
= PHI
->getIncomingValueForBlock(Latch
);
7986 NextPHI
= EvaluateExpression(BEValue
, L
, CurrentIterVals
, DL
, &TLI
);
7988 CurrentIterVals
.swap(NextIterVals
);
7991 // Too many iterations were needed to evaluate.
7992 return getCouldNotCompute();
7995 const SCEV
*ScalarEvolution::getSCEVAtScope(const SCEV
*V
, const Loop
*L
) {
7996 SmallVector
<std::pair
<const Loop
*, const SCEV
*>, 2> &Values
=
7998 // Check to see if we've folded this expression at this loop before.
7999 for (auto &LS
: Values
)
8001 return LS
.second
? LS
.second
: V
;
8003 Values
.emplace_back(L
, nullptr);
8005 // Otherwise compute it.
8006 const SCEV
*C
= computeSCEVAtScope(V
, L
);
8007 for (auto &LS
: reverse(ValuesAtScopes
[V
]))
8008 if (LS
.first
== L
) {
8015 /// This builds up a Constant using the ConstantExpr interface. That way, we
8016 /// will return Constants for objects which aren't represented by a
8017 /// SCEVConstant, because SCEVConstant is restricted to ConstantInt.
8018 /// Returns NULL if the SCEV isn't representable as a Constant.
8019 static Constant
*BuildConstantFromSCEV(const SCEV
*V
) {
8020 switch (static_cast<SCEVTypes
>(V
->getSCEVType())) {
8021 case scCouldNotCompute
:
8025 return cast
<SCEVConstant
>(V
)->getValue();
8027 return dyn_cast
<Constant
>(cast
<SCEVUnknown
>(V
)->getValue());
8028 case scSignExtend
: {
8029 const SCEVSignExtendExpr
*SS
= cast
<SCEVSignExtendExpr
>(V
);
8030 if (Constant
*CastOp
= BuildConstantFromSCEV(SS
->getOperand()))
8031 return ConstantExpr::getSExt(CastOp
, SS
->getType());
8034 case scZeroExtend
: {
8035 const SCEVZeroExtendExpr
*SZ
= cast
<SCEVZeroExtendExpr
>(V
);
8036 if (Constant
*CastOp
= BuildConstantFromSCEV(SZ
->getOperand()))
8037 return ConstantExpr::getZExt(CastOp
, SZ
->getType());
8041 const SCEVTruncateExpr
*ST
= cast
<SCEVTruncateExpr
>(V
);
8042 if (Constant
*CastOp
= BuildConstantFromSCEV(ST
->getOperand()))
8043 return ConstantExpr::getTrunc(CastOp
, ST
->getType());
8047 const SCEVAddExpr
*SA
= cast
<SCEVAddExpr
>(V
);
8048 if (Constant
*C
= BuildConstantFromSCEV(SA
->getOperand(0))) {
8049 if (PointerType
*PTy
= dyn_cast
<PointerType
>(C
->getType())) {
8050 unsigned AS
= PTy
->getAddressSpace();
8051 Type
*DestPtrTy
= Type::getInt8PtrTy(C
->getContext(), AS
);
8052 C
= ConstantExpr::getBitCast(C
, DestPtrTy
);
8054 for (unsigned i
= 1, e
= SA
->getNumOperands(); i
!= e
; ++i
) {
8055 Constant
*C2
= BuildConstantFromSCEV(SA
->getOperand(i
));
8056 if (!C2
) return nullptr;
8059 if (!C
->getType()->isPointerTy() && C2
->getType()->isPointerTy()) {
8060 unsigned AS
= C2
->getType()->getPointerAddressSpace();
8062 Type
*DestPtrTy
= Type::getInt8PtrTy(C
->getContext(), AS
);
8063 // The offsets have been converted to bytes. We can add bytes to an
8064 // i8* by GEP with the byte count in the first index.
8065 C
= ConstantExpr::getBitCast(C
, DestPtrTy
);
8068 // Don't bother trying to sum two pointers. We probably can't
8069 // statically compute a load that results from it anyway.
8070 if (C2
->getType()->isPointerTy())
8073 if (PointerType
*PTy
= dyn_cast
<PointerType
>(C
->getType())) {
8074 if (PTy
->getElementType()->isStructTy())
8075 C2
= ConstantExpr::getIntegerCast(
8076 C2
, Type::getInt32Ty(C
->getContext()), true);
8077 C
= ConstantExpr::getGetElementPtr(PTy
->getElementType(), C
, C2
);
8079 C
= ConstantExpr::getAdd(C
, C2
);
8086 const SCEVMulExpr
*SM
= cast
<SCEVMulExpr
>(V
);
8087 if (Constant
*C
= BuildConstantFromSCEV(SM
->getOperand(0))) {
8088 // Don't bother with pointers at all.
8089 if (C
->getType()->isPointerTy()) return nullptr;
8090 for (unsigned i
= 1, e
= SM
->getNumOperands(); i
!= e
; ++i
) {
8091 Constant
*C2
= BuildConstantFromSCEV(SM
->getOperand(i
));
8092 if (!C2
|| C2
->getType()->isPointerTy()) return nullptr;
8093 C
= ConstantExpr::getMul(C
, C2
);
8100 const SCEVUDivExpr
*SU
= cast
<SCEVUDivExpr
>(V
);
8101 if (Constant
*LHS
= BuildConstantFromSCEV(SU
->getLHS()))
8102 if (Constant
*RHS
= BuildConstantFromSCEV(SU
->getRHS()))
8103 if (LHS
->getType() == RHS
->getType())
8104 return ConstantExpr::getUDiv(LHS
, RHS
);
8109 break; // TODO: smax, umax.
8114 const SCEV
*ScalarEvolution::computeSCEVAtScope(const SCEV
*V
, const Loop
*L
) {
8115 if (isa
<SCEVConstant
>(V
)) return V
;
8117 // If this instruction is evolved from a constant-evolving PHI, compute the
8118 // exit value from the loop without using SCEVs.
8119 if (const SCEVUnknown
*SU
= dyn_cast
<SCEVUnknown
>(V
)) {
8120 if (Instruction
*I
= dyn_cast
<Instruction
>(SU
->getValue())) {
8121 if (PHINode
*PN
= dyn_cast
<PHINode
>(I
)) {
8122 const Loop
*LI
= this->LI
[I
->getParent()];
8123 // Looking for loop exit value.
8124 if (LI
&& LI
->getParentLoop() == L
&&
8125 PN
->getParent() == LI
->getHeader()) {
8126 // Okay, there is no closed form solution for the PHI node. Check
8127 // to see if the loop that contains it has a known backedge-taken
8128 // count. If so, we may be able to force computation of the exit
8130 const SCEV
*BackedgeTakenCount
= getBackedgeTakenCount(LI
);
8131 if (const SCEVConstant
*BTCC
=
8132 dyn_cast
<SCEVConstant
>(BackedgeTakenCount
)) {
8134 // This trivial case can show up in some degenerate cases where
8135 // the incoming IR has not yet been fully simplified.
8136 if (BTCC
->getValue()->isZero()) {
8137 Value
*InitValue
= nullptr;
8138 bool MultipleInitValues
= false;
8139 for (unsigned i
= 0; i
< PN
->getNumIncomingValues(); i
++) {
8140 if (!LI
->contains(PN
->getIncomingBlock(i
))) {
8142 InitValue
= PN
->getIncomingValue(i
);
8143 else if (InitValue
!= PN
->getIncomingValue(i
)) {
8144 MultipleInitValues
= true;
8148 if (!MultipleInitValues
&& InitValue
)
8149 return getSCEV(InitValue
);
8152 // Okay, we know how many times the containing loop executes. If
8153 // this is a constant evolving PHI node, get the final value at
8154 // the specified iteration number.
8156 getConstantEvolutionLoopExitValue(PN
, BTCC
->getAPInt(), LI
);
8157 if (RV
) return getSCEV(RV
);
8162 // Okay, this is an expression that we cannot symbolically evaluate
8163 // into a SCEV. Check to see if it's possible to symbolically evaluate
8164 // the arguments into constants, and if so, try to constant propagate the
8165 // result. This is particularly useful for computing loop exit values.
8166 if (CanConstantFold(I
)) {
8167 SmallVector
<Constant
*, 4> Operands
;
8168 bool MadeImprovement
= false;
8169 for (Value
*Op
: I
->operands()) {
8170 if (Constant
*C
= dyn_cast
<Constant
>(Op
)) {
8171 Operands
.push_back(C
);
8175 // If any of the operands is non-constant and if they are
8176 // non-integer and non-pointer, don't even try to analyze them
8177 // with scev techniques.
8178 if (!isSCEVable(Op
->getType()))
8181 const SCEV
*OrigV
= getSCEV(Op
);
8182 const SCEV
*OpV
= getSCEVAtScope(OrigV
, L
);
8183 MadeImprovement
|= OrigV
!= OpV
;
8185 Constant
*C
= BuildConstantFromSCEV(OpV
);
8187 if (C
->getType() != Op
->getType())
8188 C
= ConstantExpr::getCast(CastInst::getCastOpcode(C
, false,
8192 Operands
.push_back(C
);
8195 // Check to see if getSCEVAtScope actually made an improvement.
8196 if (MadeImprovement
) {
8197 Constant
*C
= nullptr;
8198 const DataLayout
&DL
= getDataLayout();
8199 if (const CmpInst
*CI
= dyn_cast
<CmpInst
>(I
))
8200 C
= ConstantFoldCompareInstOperands(CI
->getPredicate(), Operands
[0],
8201 Operands
[1], DL
, &TLI
);
8202 else if (const LoadInst
*LI
= dyn_cast
<LoadInst
>(I
)) {
8203 if (!LI
->isVolatile())
8204 C
= ConstantFoldLoadFromConstPtr(Operands
[0], LI
->getType(), DL
);
8206 C
= ConstantFoldInstOperands(I
, Operands
, DL
, &TLI
);
8213 // This is some other type of SCEVUnknown, just return it.
8217 if (const SCEVCommutativeExpr
*Comm
= dyn_cast
<SCEVCommutativeExpr
>(V
)) {
8218 // Avoid performing the look-up in the common case where the specified
8219 // expression has no loop-variant portions.
8220 for (unsigned i
= 0, e
= Comm
->getNumOperands(); i
!= e
; ++i
) {
8221 const SCEV
*OpAtScope
= getSCEVAtScope(Comm
->getOperand(i
), L
);
8222 if (OpAtScope
!= Comm
->getOperand(i
)) {
8223 // Okay, at least one of these operands is loop variant but might be
8224 // foldable. Build a new instance of the folded commutative expression.
8225 SmallVector
<const SCEV
*, 8> NewOps(Comm
->op_begin(),
8226 Comm
->op_begin()+i
);
8227 NewOps
.push_back(OpAtScope
);
8229 for (++i
; i
!= e
; ++i
) {
8230 OpAtScope
= getSCEVAtScope(Comm
->getOperand(i
), L
);
8231 NewOps
.push_back(OpAtScope
);
8233 if (isa
<SCEVAddExpr
>(Comm
))
8234 return getAddExpr(NewOps
);
8235 if (isa
<SCEVMulExpr
>(Comm
))
8236 return getMulExpr(NewOps
);
8237 if (isa
<SCEVSMaxExpr
>(Comm
))
8238 return getSMaxExpr(NewOps
);
8239 if (isa
<SCEVUMaxExpr
>(Comm
))
8240 return getUMaxExpr(NewOps
);
8241 llvm_unreachable("Unknown commutative SCEV type!");
8244 // If we got here, all operands are loop invariant.
8248 if (const SCEVUDivExpr
*Div
= dyn_cast
<SCEVUDivExpr
>(V
)) {
8249 const SCEV
*LHS
= getSCEVAtScope(Div
->getLHS(), L
);
8250 const SCEV
*RHS
= getSCEVAtScope(Div
->getRHS(), L
);
8251 if (LHS
== Div
->getLHS() && RHS
== Div
->getRHS())
8252 return Div
; // must be loop invariant
8253 return getUDivExpr(LHS
, RHS
);
8256 // If this is a loop recurrence for a loop that does not contain L, then we
8257 // are dealing with the final value computed by the loop.
8258 if (const SCEVAddRecExpr
*AddRec
= dyn_cast
<SCEVAddRecExpr
>(V
)) {
8259 // First, attempt to evaluate each operand.
8260 // Avoid performing the look-up in the common case where the specified
8261 // expression has no loop-variant portions.
8262 for (unsigned i
= 0, e
= AddRec
->getNumOperands(); i
!= e
; ++i
) {
8263 const SCEV
*OpAtScope
= getSCEVAtScope(AddRec
->getOperand(i
), L
);
8264 if (OpAtScope
== AddRec
->getOperand(i
))
8267 // Okay, at least one of these operands is loop variant but might be
8268 // foldable. Build a new instance of the folded commutative expression.
8269 SmallVector
<const SCEV
*, 8> NewOps(AddRec
->op_begin(),
8270 AddRec
->op_begin()+i
);
8271 NewOps
.push_back(OpAtScope
);
8272 for (++i
; i
!= e
; ++i
)
8273 NewOps
.push_back(getSCEVAtScope(AddRec
->getOperand(i
), L
));
8275 const SCEV
*FoldedRec
=
8276 getAddRecExpr(NewOps
, AddRec
->getLoop(),
8277 AddRec
->getNoWrapFlags(SCEV::FlagNW
));
8278 AddRec
= dyn_cast
<SCEVAddRecExpr
>(FoldedRec
);
8279 // The addrec may be folded to a nonrecurrence, for example, if the
8280 // induction variable is multiplied by zero after constant folding. Go
8281 // ahead and return the folded value.
8287 // If the scope is outside the addrec's loop, evaluate it by using the
8288 // loop exit value of the addrec.
8289 if (!AddRec
->getLoop()->contains(L
)) {
8290 // To evaluate this recurrence, we need to know how many times the AddRec
8291 // loop iterates. Compute this now.
8292 const SCEV
*BackedgeTakenCount
= getBackedgeTakenCount(AddRec
->getLoop());
8293 if (BackedgeTakenCount
== getCouldNotCompute()) return AddRec
;
8295 // Then, evaluate the AddRec.
8296 return AddRec
->evaluateAtIteration(BackedgeTakenCount
, *this);
8302 if (const SCEVZeroExtendExpr
*Cast
= dyn_cast
<SCEVZeroExtendExpr
>(V
)) {
8303 const SCEV
*Op
= getSCEVAtScope(Cast
->getOperand(), L
);
8304 if (Op
== Cast
->getOperand())
8305 return Cast
; // must be loop invariant
8306 return getZeroExtendExpr(Op
, Cast
->getType());
8309 if (const SCEVSignExtendExpr
*Cast
= dyn_cast
<SCEVSignExtendExpr
>(V
)) {
8310 const SCEV
*Op
= getSCEVAtScope(Cast
->getOperand(), L
);
8311 if (Op
== Cast
->getOperand())
8312 return Cast
; // must be loop invariant
8313 return getSignExtendExpr(Op
, Cast
->getType());
8316 if (const SCEVTruncateExpr
*Cast
= dyn_cast
<SCEVTruncateExpr
>(V
)) {
8317 const SCEV
*Op
= getSCEVAtScope(Cast
->getOperand(), L
);
8318 if (Op
== Cast
->getOperand())
8319 return Cast
; // must be loop invariant
8320 return getTruncateExpr(Op
, Cast
->getType());
8323 llvm_unreachable("Unknown SCEV type!");
8326 const SCEV
*ScalarEvolution::getSCEVAtScope(Value
*V
, const Loop
*L
) {
8327 return getSCEVAtScope(getSCEV(V
), L
);
8330 const SCEV
*ScalarEvolution::stripInjectiveFunctions(const SCEV
*S
) const {
8331 if (const SCEVZeroExtendExpr
*ZExt
= dyn_cast
<SCEVZeroExtendExpr
>(S
))
8332 return stripInjectiveFunctions(ZExt
->getOperand());
8333 if (const SCEVSignExtendExpr
*SExt
= dyn_cast
<SCEVSignExtendExpr
>(S
))
8334 return stripInjectiveFunctions(SExt
->getOperand());
8338 /// Finds the minimum unsigned root of the following equation:
8340 /// A * X = B (mod N)
8342 /// where N = 2^BW and BW is the common bit width of A and B. The signedness of
8343 /// A and B isn't important.
8345 /// If the equation does not have a solution, SCEVCouldNotCompute is returned.
8346 static const SCEV
*SolveLinEquationWithOverflow(const APInt
&A
, const SCEV
*B
,
8347 ScalarEvolution
&SE
) {
8348 uint32_t BW
= A
.getBitWidth();
8349 assert(BW
== SE
.getTypeSizeInBits(B
->getType()));
8350 assert(A
!= 0 && "A must be non-zero.");
8354 // The gcd of A and N may have only one prime factor: 2. The number of
8355 // trailing zeros in A is its multiplicity
8356 uint32_t Mult2
= A
.countTrailingZeros();
8359 // 2. Check if B is divisible by D.
8361 // B is divisible by D if and only if the multiplicity of prime factor 2 for B
8362 // is not less than multiplicity of this prime factor for D.
8363 if (SE
.GetMinTrailingZeros(B
) < Mult2
)
8364 return SE
.getCouldNotCompute();
8366 // 3. Compute I: the multiplicative inverse of (A / D) in arithmetic
8369 // If D == 1, (N / D) == N == 2^BW, so we need one extra bit to represent
8370 // (N / D) in general. The inverse itself always fits into BW bits, though,
8371 // so we immediately truncate it.
8372 APInt AD
= A
.lshr(Mult2
).zext(BW
+ 1); // AD = A / D
8373 APInt
Mod(BW
+ 1, 0);
8374 Mod
.setBit(BW
- Mult2
); // Mod = N / D
8375 APInt I
= AD
.multiplicativeInverse(Mod
).trunc(BW
);
8377 // 4. Compute the minimum unsigned root of the equation:
8378 // I * (B / D) mod (N / D)
8379 // To simplify the computation, we factor out the divide by D:
8380 // (I * B mod N) / D
8381 const SCEV
*D
= SE
.getConstant(APInt::getOneBitSet(BW
, Mult2
));
8382 return SE
.getUDivExactExpr(SE
.getMulExpr(B
, SE
.getConstant(I
)), D
);
8385 /// For a given quadratic addrec, generate coefficients of the corresponding
8386 /// quadratic equation, multiplied by a common value to ensure that they are
8388 /// The returned value is a tuple { A, B, C, M, BitWidth }, where
8389 /// Ax^2 + Bx + C is the quadratic function, M is the value that A, B and C
8390 /// were multiplied by, and BitWidth is the bit width of the original addrec
8392 /// This function returns None if the addrec coefficients are not compile-
8394 static Optional
<std::tuple
<APInt
, APInt
, APInt
, APInt
, unsigned>>
8395 GetQuadraticEquation(const SCEVAddRecExpr
*AddRec
) {
8396 assert(AddRec
->getNumOperands() == 3 && "This is not a quadratic chrec!");
8397 const SCEVConstant
*LC
= dyn_cast
<SCEVConstant
>(AddRec
->getOperand(0));
8398 const SCEVConstant
*MC
= dyn_cast
<SCEVConstant
>(AddRec
->getOperand(1));
8399 const SCEVConstant
*NC
= dyn_cast
<SCEVConstant
>(AddRec
->getOperand(2));
8400 LLVM_DEBUG(dbgs() << __func__
<< ": analyzing quadratic addrec: "
8401 << *AddRec
<< '\n');
8403 // We currently can only solve this if the coefficients are constants.
8404 if (!LC
|| !MC
|| !NC
) {
8405 LLVM_DEBUG(dbgs() << __func__
<< ": coefficients are not constant\n");
8409 APInt L
= LC
->getAPInt();
8410 APInt M
= MC
->getAPInt();
8411 APInt N
= NC
->getAPInt();
8412 assert(!N
.isNullValue() && "This is not a quadratic addrec");
8414 unsigned BitWidth
= LC
->getAPInt().getBitWidth();
8415 unsigned NewWidth
= BitWidth
+ 1;
8416 LLVM_DEBUG(dbgs() << __func__
<< ": addrec coeff bw: "
8417 << BitWidth
<< '\n');
8418 // The sign-extension (as opposed to a zero-extension) here matches the
8419 // extension used in SolveQuadraticEquationWrap (with the same motivation).
8420 N
= N
.sext(NewWidth
);
8421 M
= M
.sext(NewWidth
);
8422 L
= L
.sext(NewWidth
);
8424 // The increments are M, M+N, M+2N, ..., so the accumulated values are
8425 // L+M, (L+M)+(M+N), (L+M)+(M+N)+(M+2N), ..., that is,
8426 // L+M, L+2M+N, L+3M+3N, ...
8427 // After n iterations the accumulated value Acc is L + nM + n(n-1)/2 N.
8429 // The equation Acc = 0 is then
8430 // L + nM + n(n-1)/2 N = 0, or 2L + 2M n + n(n-1) N = 0.
8431 // In a quadratic form it becomes:
8432 // N n^2 + (2M-N) n + 2L = 0.
8435 APInt B
= 2 * M
- A
;
8437 APInt T
= APInt(NewWidth
, 2);
8438 LLVM_DEBUG(dbgs() << __func__
<< ": equation " << A
<< "x^2 + " << B
8439 << "x + " << C
<< ", coeff bw: " << NewWidth
8440 << ", multiplied by " << T
<< '\n');
8441 return std::make_tuple(A
, B
, C
, T
, BitWidth
);
8444 /// Helper function to compare optional APInts:
8445 /// (a) if X and Y both exist, return min(X, Y),
8446 /// (b) if neither X nor Y exist, return None,
8447 /// (c) if exactly one of X and Y exists, return that value.
8448 static Optional
<APInt
> MinOptional(Optional
<APInt
> X
, Optional
<APInt
> Y
) {
8449 if (X
.hasValue() && Y
.hasValue()) {
8450 unsigned W
= std::max(X
->getBitWidth(), Y
->getBitWidth());
8451 APInt XW
= X
->sextOrSelf(W
);
8452 APInt YW
= Y
->sextOrSelf(W
);
8453 return XW
.slt(YW
) ? *X
: *Y
;
8455 if (!X
.hasValue() && !Y
.hasValue())
8457 return X
.hasValue() ? *X
: *Y
;
8460 /// Helper function to truncate an optional APInt to a given BitWidth.
8461 /// When solving addrec-related equations, it is preferable to return a value
8462 /// that has the same bit width as the original addrec's coefficients. If the
8463 /// solution fits in the original bit width, truncate it (except for i1).
8464 /// Returning a value of a different bit width may inhibit some optimizations.
8466 /// In general, a solution to a quadratic equation generated from an addrec
8467 /// may require BW+1 bits, where BW is the bit width of the addrec's
8468 /// coefficients. The reason is that the coefficients of the quadratic
8469 /// equation are BW+1 bits wide (to avoid truncation when converting from
8470 /// the addrec to the equation).
8471 static Optional
<APInt
> TruncIfPossible(Optional
<APInt
> X
, unsigned BitWidth
) {
8474 unsigned W
= X
->getBitWidth();
8475 if (BitWidth
> 1 && BitWidth
< W
&& X
->isIntN(BitWidth
))
8476 return X
->trunc(BitWidth
);
8480 /// Let c(n) be the value of the quadratic chrec {L,+,M,+,N} after n
8481 /// iterations. The values L, M, N are assumed to be signed, and they
8482 /// should all have the same bit widths.
8483 /// Find the least n >= 0 such that c(n) = 0 in the arithmetic modulo 2^BW,
8484 /// where BW is the bit width of the addrec's coefficients.
8485 /// If the calculated value is a BW-bit integer (for BW > 1), it will be
8486 /// returned as such, otherwise the bit width of the returned value may
8487 /// be greater than BW.
8489 /// This function returns None if
8490 /// (a) the addrec coefficients are not constant, or
8491 /// (b) SolveQuadraticEquationWrap was unable to find a solution. For cases
8492 /// like x^2 = 5, no integer solutions exist, in other cases an integer
8493 /// solution may exist, but SolveQuadraticEquationWrap may fail to find it.
8494 static Optional
<APInt
>
8495 SolveQuadraticAddRecExact(const SCEVAddRecExpr
*AddRec
, ScalarEvolution
&SE
) {
8498 auto T
= GetQuadraticEquation(AddRec
);
8502 std::tie(A
, B
, C
, M
, BitWidth
) = *T
;
8503 LLVM_DEBUG(dbgs() << __func__
<< ": solving for unsigned overflow\n");
8504 Optional
<APInt
> X
= APIntOps::SolveQuadraticEquationWrap(A
, B
, C
, BitWidth
+1);
8508 ConstantInt
*CX
= ConstantInt::get(SE
.getContext(), *X
);
8509 ConstantInt
*V
= EvaluateConstantChrecAtConstant(AddRec
, CX
, SE
);
8513 return TruncIfPossible(X
, BitWidth
);
8516 /// Let c(n) be the value of the quadratic chrec {0,+,M,+,N} after n
8517 /// iterations. The values M, N are assumed to be signed, and they
8518 /// should all have the same bit widths.
8519 /// Find the least n such that c(n) does not belong to the given range,
8520 /// while c(n-1) does.
8522 /// This function returns None if
8523 /// (a) the addrec coefficients are not constant, or
8524 /// (b) SolveQuadraticEquationWrap was unable to find a solution for the
8525 /// bounds of the range.
8526 static Optional
<APInt
>
8527 SolveQuadraticAddRecRange(const SCEVAddRecExpr
*AddRec
,
8528 const ConstantRange
&Range
, ScalarEvolution
&SE
) {
8529 assert(AddRec
->getOperand(0)->isZero() &&
8530 "Starting value of addrec should be 0");
8531 LLVM_DEBUG(dbgs() << __func__
<< ": solving boundary crossing for range "
8532 << Range
<< ", addrec " << *AddRec
<< '\n');
8533 // This case is handled in getNumIterationsInRange. Here we can assume that
8534 // we start in the range.
8535 assert(Range
.contains(APInt(SE
.getTypeSizeInBits(AddRec
->getType()), 0)) &&
8536 "Addrec's initial value should be in range");
8540 auto T
= GetQuadraticEquation(AddRec
);
8544 // Be careful about the return value: there can be two reasons for not
8545 // returning an actual number. First, if no solutions to the equations
8546 // were found, and second, if the solutions don't leave the given range.
8547 // The first case means that the actual solution is "unknown", the second
8548 // means that it's known, but not valid. If the solution is unknown, we
8549 // cannot make any conclusions.
8550 // Return a pair: the optional solution and a flag indicating if the
8551 // solution was found.
8552 auto SolveForBoundary
= [&](APInt Bound
) -> std::pair
<Optional
<APInt
>,bool> {
8553 // Solve for signed overflow and unsigned overflow, pick the lower
8555 LLVM_DEBUG(dbgs() << "SolveQuadraticAddRecRange: checking boundary "
8556 << Bound
<< " (before multiplying by " << M
<< ")\n");
8557 Bound
*= M
; // The quadratic equation multiplier.
8559 Optional
<APInt
> SO
= None
;
8561 LLVM_DEBUG(dbgs() << "SolveQuadraticAddRecRange: solving for "
8562 "signed overflow\n");
8563 SO
= APIntOps::SolveQuadraticEquationWrap(A
, B
, -Bound
, BitWidth
);
8565 LLVM_DEBUG(dbgs() << "SolveQuadraticAddRecRange: solving for "
8566 "unsigned overflow\n");
8567 Optional
<APInt
> UO
= APIntOps::SolveQuadraticEquationWrap(A
, B
, -Bound
,
8570 auto LeavesRange
= [&] (const APInt
&X
) {
8571 ConstantInt
*C0
= ConstantInt::get(SE
.getContext(), X
);
8572 ConstantInt
*V0
= EvaluateConstantChrecAtConstant(AddRec
, C0
, SE
);
8573 if (Range
.contains(V0
->getValue()))
8575 // X should be at least 1, so X-1 is non-negative.
8576 ConstantInt
*C1
= ConstantInt::get(SE
.getContext(), X
-1);
8577 ConstantInt
*V1
= EvaluateConstantChrecAtConstant(AddRec
, C1
, SE
);
8578 if (Range
.contains(V1
->getValue()))
8583 // If SolveQuadraticEquationWrap returns None, it means that there can
8584 // be a solution, but the function failed to find it. We cannot treat it
8585 // as "no solution".
8586 if (!SO
.hasValue() || !UO
.hasValue())
8587 return { None
, false };
8589 // Check the smaller value first to see if it leaves the range.
8590 // At this point, both SO and UO must have values.
8591 Optional
<APInt
> Min
= MinOptional(SO
, UO
);
8592 if (LeavesRange(*Min
))
8593 return { Min
, true };
8594 Optional
<APInt
> Max
= Min
== SO
? UO
: SO
;
8595 if (LeavesRange(*Max
))
8596 return { Max
, true };
8598 // Solutions were found, but were eliminated, hence the "true".
8599 return { None
, true };
8602 std::tie(A
, B
, C
, M
, BitWidth
) = *T
;
8603 // Lower bound is inclusive, subtract 1 to represent the exiting value.
8604 APInt Lower
= Range
.getLower().sextOrSelf(A
.getBitWidth()) - 1;
8605 APInt Upper
= Range
.getUpper().sextOrSelf(A
.getBitWidth());
8606 auto SL
= SolveForBoundary(Lower
);
8607 auto SU
= SolveForBoundary(Upper
);
8608 // If any of the solutions was unknown, no meaninigful conclusions can
8610 if (!SL
.second
|| !SU
.second
)
8613 // Claim: The correct solution is not some value between Min and Max.
8615 // Justification: Assuming that Min and Max are different values, one of
8616 // them is when the first signed overflow happens, the other is when the
8617 // first unsigned overflow happens. Crossing the range boundary is only
8618 // possible via an overflow (treating 0 as a special case of it, modeling
8619 // an overflow as crossing k*2^W for some k).
8621 // The interesting case here is when Min was eliminated as an invalid
8622 // solution, but Max was not. The argument is that if there was another
8623 // overflow between Min and Max, it would also have been eliminated if
8624 // it was considered.
8626 // For a given boundary, it is possible to have two overflows of the same
8627 // type (signed/unsigned) without having the other type in between: this
8628 // can happen when the vertex of the parabola is between the iterations
8629 // corresponding to the overflows. This is only possible when the two
8630 // overflows cross k*2^W for the same k. In such case, if the second one
8631 // left the range (and was the first one to do so), the first overflow
8632 // would have to enter the range, which would mean that either we had left
8633 // the range before or that we started outside of it. Both of these cases
8634 // are contradictions.
8636 // Claim: In the case where SolveForBoundary returns None, the correct
8637 // solution is not some value between the Max for this boundary and the
8638 // Min of the other boundary.
8640 // Justification: Assume that we had such Max_A and Min_B corresponding
8641 // to range boundaries A and B and such that Max_A < Min_B. If there was
8642 // a solution between Max_A and Min_B, it would have to be caused by an
8643 // overflow corresponding to either A or B. It cannot correspond to B,
8644 // since Min_B is the first occurrence of such an overflow. If it
8645 // corresponded to A, it would have to be either a signed or an unsigned
8646 // overflow that is larger than both eliminated overflows for A. But
8647 // between the eliminated overflows and this overflow, the values would
8648 // cover the entire value space, thus crossing the other boundary, which
8649 // is a contradiction.
8651 return TruncIfPossible(MinOptional(SL
.first
, SU
.first
), BitWidth
);
8654 ScalarEvolution::ExitLimit
8655 ScalarEvolution::howFarToZero(const SCEV
*V
, const Loop
*L
, bool ControlsExit
,
8656 bool AllowPredicates
) {
8658 // This is only used for loops with a "x != y" exit test. The exit condition
8659 // is now expressed as a single expression, V = x-y. So the exit test is
8660 // effectively V != 0. We know and take advantage of the fact that this
8661 // expression only being used in a comparison by zero context.
8663 SmallPtrSet
<const SCEVPredicate
*, 4> Predicates
;
8664 // If the value is a constant
8665 if (const SCEVConstant
*C
= dyn_cast
<SCEVConstant
>(V
)) {
8666 // If the value is already zero, the branch will execute zero times.
8667 if (C
->getValue()->isZero()) return C
;
8668 return getCouldNotCompute(); // Otherwise it will loop infinitely.
8671 const SCEVAddRecExpr
*AddRec
=
8672 dyn_cast
<SCEVAddRecExpr
>(stripInjectiveFunctions(V
));
8674 if (!AddRec
&& AllowPredicates
)
8675 // Try to make this an AddRec using runtime tests, in the first X
8676 // iterations of this loop, where X is the SCEV expression found by the
8678 AddRec
= convertSCEVToAddRecWithPredicates(V
, L
, Predicates
);
8680 if (!AddRec
|| AddRec
->getLoop() != L
)
8681 return getCouldNotCompute();
8683 // If this is a quadratic (3-term) AddRec {L,+,M,+,N}, find the roots of
8684 // the quadratic equation to solve it.
8685 if (AddRec
->isQuadratic() && AddRec
->getType()->isIntegerTy()) {
8686 // We can only use this value if the chrec ends up with an exact zero
8687 // value at this index. When solving for "X*X != 5", for example, we
8688 // should not accept a root of 2.
8689 if (auto S
= SolveQuadraticAddRecExact(AddRec
, *this)) {
8690 const auto *R
= cast
<SCEVConstant
>(getConstant(S
.getValue()));
8691 return ExitLimit(R
, R
, false, Predicates
);
8693 return getCouldNotCompute();
8696 // Otherwise we can only handle this if it is affine.
8697 if (!AddRec
->isAffine())
8698 return getCouldNotCompute();
8700 // If this is an affine expression, the execution count of this branch is
8701 // the minimum unsigned root of the following equation:
8703 // Start + Step*N = 0 (mod 2^BW)
8707 // Step*N = -Start (mod 2^BW)
8709 // where BW is the common bit width of Start and Step.
8711 // Get the initial value for the loop.
8712 const SCEV
*Start
= getSCEVAtScope(AddRec
->getStart(), L
->getParentLoop());
8713 const SCEV
*Step
= getSCEVAtScope(AddRec
->getOperand(1), L
->getParentLoop());
8715 // For now we handle only constant steps.
8717 // TODO: Handle a nonconstant Step given AddRec<NUW>. If the
8718 // AddRec is NUW, then (in an unsigned sense) it cannot be counting up to wrap
8719 // to 0, it must be counting down to equal 0. Consequently, N = Start / -Step.
8720 // We have not yet seen any such cases.
8721 const SCEVConstant
*StepC
= dyn_cast
<SCEVConstant
>(Step
);
8722 if (!StepC
|| StepC
->getValue()->isZero())
8723 return getCouldNotCompute();
8725 // For positive steps (counting up until unsigned overflow):
8726 // N = -Start/Step (as unsigned)
8727 // For negative steps (counting down to zero):
8729 // First compute the unsigned distance from zero in the direction of Step.
8730 bool CountDown
= StepC
->getAPInt().isNegative();
8731 const SCEV
*Distance
= CountDown
? Start
: getNegativeSCEV(Start
);
8733 // Handle unitary steps, which cannot wraparound.
8734 // 1*N = -Start; -1*N = Start (mod 2^BW), so:
8735 // N = Distance (as unsigned)
8736 if (StepC
->getValue()->isOne() || StepC
->getValue()->isMinusOne()) {
8737 APInt MaxBECount
= getUnsignedRangeMax(Distance
);
8739 // When a loop like "for (int i = 0; i != n; ++i) { /* body */ }" is rotated,
8740 // we end up with a loop whose backedge-taken count is n - 1. Detect this
8741 // case, and see if we can improve the bound.
8743 // Explicitly handling this here is necessary because getUnsignedRange
8744 // isn't context-sensitive; it doesn't know that we only care about the
8745 // range inside the loop.
8746 const SCEV
*Zero
= getZero(Distance
->getType());
8747 const SCEV
*One
= getOne(Distance
->getType());
8748 const SCEV
*DistancePlusOne
= getAddExpr(Distance
, One
);
8749 if (isLoopEntryGuardedByCond(L
, ICmpInst::ICMP_NE
, DistancePlusOne
, Zero
)) {
8750 // If Distance + 1 doesn't overflow, we can compute the maximum distance
8751 // as "unsigned_max(Distance + 1) - 1".
8752 ConstantRange CR
= getUnsignedRange(DistancePlusOne
);
8753 MaxBECount
= APIntOps::umin(MaxBECount
, CR
.getUnsignedMax() - 1);
8755 return ExitLimit(Distance
, getConstant(MaxBECount
), false, Predicates
);
8758 // If the condition controls loop exit (the loop exits only if the expression
8759 // is true) and the addition is no-wrap we can use unsigned divide to
8760 // compute the backedge count. In this case, the step may not divide the
8761 // distance, but we don't care because if the condition is "missed" the loop
8762 // will have undefined behavior due to wrapping.
8763 if (ControlsExit
&& AddRec
->hasNoSelfWrap() &&
8764 loopHasNoAbnormalExits(AddRec
->getLoop())) {
8766 getUDivExpr(Distance
, CountDown
? getNegativeSCEV(Step
) : Step
);
8768 Exact
== getCouldNotCompute()
8770 : getConstant(getUnsignedRangeMax(Exact
));
8771 return ExitLimit(Exact
, Max
, false, Predicates
);
8774 // Solve the general equation.
8775 const SCEV
*E
= SolveLinEquationWithOverflow(StepC
->getAPInt(),
8776 getNegativeSCEV(Start
), *this);
8777 const SCEV
*M
= E
== getCouldNotCompute()
8779 : getConstant(getUnsignedRangeMax(E
));
8780 return ExitLimit(E
, M
, false, Predicates
);
8783 ScalarEvolution::ExitLimit
8784 ScalarEvolution::howFarToNonZero(const SCEV
*V
, const Loop
*L
) {
8785 // Loops that look like: while (X == 0) are very strange indeed. We don't
8786 // handle them yet except for the trivial case. This could be expanded in the
8787 // future as needed.
8789 // If the value is a constant, check to see if it is known to be non-zero
8790 // already. If so, the backedge will execute zero times.
8791 if (const SCEVConstant
*C
= dyn_cast
<SCEVConstant
>(V
)) {
8792 if (!C
->getValue()->isZero())
8793 return getZero(C
->getType());
8794 return getCouldNotCompute(); // Otherwise it will loop infinitely.
8797 // We could implement others, but I really doubt anyone writes loops like
8798 // this, and if they did, they would already be constant folded.
8799 return getCouldNotCompute();
8802 std::pair
<BasicBlock
*, BasicBlock
*>
8803 ScalarEvolution::getPredecessorWithUniqueSuccessorForBB(BasicBlock
*BB
) {
8804 // If the block has a unique predecessor, then there is no path from the
8805 // predecessor to the block that does not go through the direct edge
8806 // from the predecessor to the block.
8807 if (BasicBlock
*Pred
= BB
->getSinglePredecessor())
8810 // A loop's header is defined to be a block that dominates the loop.
8811 // If the header has a unique predecessor outside the loop, it must be
8812 // a block that has exactly one successor that can reach the loop.
8813 if (Loop
*L
= LI
.getLoopFor(BB
))
8814 return {L
->getLoopPredecessor(), L
->getHeader()};
8816 return {nullptr, nullptr};
8819 /// SCEV structural equivalence is usually sufficient for testing whether two
8820 /// expressions are equal, however for the purposes of looking for a condition
8821 /// guarding a loop, it can be useful to be a little more general, since a
8822 /// front-end may have replicated the controlling expression.
8823 static bool HasSameValue(const SCEV
*A
, const SCEV
*B
) {
8824 // Quick check to see if they are the same SCEV.
8825 if (A
== B
) return true;
8827 auto ComputesEqualValues
= [](const Instruction
*A
, const Instruction
*B
) {
8828 // Not all instructions that are "identical" compute the same value. For
8829 // instance, two distinct alloca instructions allocating the same type are
8830 // identical and do not read memory; but compute distinct values.
8831 return A
->isIdenticalTo(B
) && (isa
<BinaryOperator
>(A
) || isa
<GetElementPtrInst
>(A
));
8834 // Otherwise, if they're both SCEVUnknown, it's possible that they hold
8835 // two different instructions with the same value. Check for this case.
8836 if (const SCEVUnknown
*AU
= dyn_cast
<SCEVUnknown
>(A
))
8837 if (const SCEVUnknown
*BU
= dyn_cast
<SCEVUnknown
>(B
))
8838 if (const Instruction
*AI
= dyn_cast
<Instruction
>(AU
->getValue()))
8839 if (const Instruction
*BI
= dyn_cast
<Instruction
>(BU
->getValue()))
8840 if (ComputesEqualValues(AI
, BI
))
8843 // Otherwise assume they may have a different value.
8847 bool ScalarEvolution::SimplifyICmpOperands(ICmpInst::Predicate
&Pred
,
8848 const SCEV
*&LHS
, const SCEV
*&RHS
,
8850 bool Changed
= false;
8851 // Simplifies ICMP to trivial true or false by turning it into '0 == 0' or
8853 auto TrivialCase
= [&](bool TriviallyTrue
) {
8854 LHS
= RHS
= getConstant(ConstantInt::getFalse(getContext()));
8855 Pred
= TriviallyTrue
? ICmpInst::ICMP_EQ
: ICmpInst::ICMP_NE
;
8858 // If we hit the max recursion limit bail out.
8862 // Canonicalize a constant to the right side.
8863 if (const SCEVConstant
*LHSC
= dyn_cast
<SCEVConstant
>(LHS
)) {
8864 // Check for both operands constant.
8865 if (const SCEVConstant
*RHSC
= dyn_cast
<SCEVConstant
>(RHS
)) {
8866 if (ConstantExpr::getICmp(Pred
,
8868 RHSC
->getValue())->isNullValue())
8869 return TrivialCase(false);
8871 return TrivialCase(true);
8873 // Otherwise swap the operands to put the constant on the right.
8874 std::swap(LHS
, RHS
);
8875 Pred
= ICmpInst::getSwappedPredicate(Pred
);
8879 // If we're comparing an addrec with a value which is loop-invariant in the
8880 // addrec's loop, put the addrec on the left. Also make a dominance check,
8881 // as both operands could be addrecs loop-invariant in each other's loop.
8882 if (const SCEVAddRecExpr
*AR
= dyn_cast
<SCEVAddRecExpr
>(RHS
)) {
8883 const Loop
*L
= AR
->getLoop();
8884 if (isLoopInvariant(LHS
, L
) && properlyDominates(LHS
, L
->getHeader())) {
8885 std::swap(LHS
, RHS
);
8886 Pred
= ICmpInst::getSwappedPredicate(Pred
);
8891 // If there's a constant operand, canonicalize comparisons with boundary
8892 // cases, and canonicalize *-or-equal comparisons to regular comparisons.
8893 if (const SCEVConstant
*RC
= dyn_cast
<SCEVConstant
>(RHS
)) {
8894 const APInt
&RA
= RC
->getAPInt();
8896 bool SimplifiedByConstantRange
= false;
8898 if (!ICmpInst::isEquality(Pred
)) {
8899 ConstantRange ExactCR
= ConstantRange::makeExactICmpRegion(Pred
, RA
);
8900 if (ExactCR
.isFullSet())
8901 return TrivialCase(true);
8902 else if (ExactCR
.isEmptySet())
8903 return TrivialCase(false);
8906 CmpInst::Predicate NewPred
;
8907 if (ExactCR
.getEquivalentICmp(NewPred
, NewRHS
) &&
8908 ICmpInst::isEquality(NewPred
)) {
8909 // We were able to convert an inequality to an equality.
8911 RHS
= getConstant(NewRHS
);
8912 Changed
= SimplifiedByConstantRange
= true;
8916 if (!SimplifiedByConstantRange
) {
8920 case ICmpInst::ICMP_EQ
:
8921 case ICmpInst::ICMP_NE
:
8922 // Fold ((-1) * %a) + %b == 0 (equivalent to %b-%a == 0) into %a == %b.
8924 if (const SCEVAddExpr
*AE
= dyn_cast
<SCEVAddExpr
>(LHS
))
8925 if (const SCEVMulExpr
*ME
=
8926 dyn_cast
<SCEVMulExpr
>(AE
->getOperand(0)))
8927 if (AE
->getNumOperands() == 2 && ME
->getNumOperands() == 2 &&
8928 ME
->getOperand(0)->isAllOnesValue()) {
8929 RHS
= AE
->getOperand(1);
8930 LHS
= ME
->getOperand(1);
8936 // The "Should have been caught earlier!" messages refer to the fact
8937 // that the ExactCR.isFullSet() or ExactCR.isEmptySet() check above
8938 // should have fired on the corresponding cases, and canonicalized the
8939 // check to trivial case.
8941 case ICmpInst::ICMP_UGE
:
8942 assert(!RA
.isMinValue() && "Should have been caught earlier!");
8943 Pred
= ICmpInst::ICMP_UGT
;
8944 RHS
= getConstant(RA
- 1);
8947 case ICmpInst::ICMP_ULE
:
8948 assert(!RA
.isMaxValue() && "Should have been caught earlier!");
8949 Pred
= ICmpInst::ICMP_ULT
;
8950 RHS
= getConstant(RA
+ 1);
8953 case ICmpInst::ICMP_SGE
:
8954 assert(!RA
.isMinSignedValue() && "Should have been caught earlier!");
8955 Pred
= ICmpInst::ICMP_SGT
;
8956 RHS
= getConstant(RA
- 1);
8959 case ICmpInst::ICMP_SLE
:
8960 assert(!RA
.isMaxSignedValue() && "Should have been caught earlier!");
8961 Pred
= ICmpInst::ICMP_SLT
;
8962 RHS
= getConstant(RA
+ 1);
8969 // Check for obvious equality.
8970 if (HasSameValue(LHS
, RHS
)) {
8971 if (ICmpInst::isTrueWhenEqual(Pred
))
8972 return TrivialCase(true);
8973 if (ICmpInst::isFalseWhenEqual(Pred
))
8974 return TrivialCase(false);
8977 // If possible, canonicalize GE/LE comparisons to GT/LT comparisons, by
8978 // adding or subtracting 1 from one of the operands.
8980 case ICmpInst::ICMP_SLE
:
8981 if (!getSignedRangeMax(RHS
).isMaxSignedValue()) {
8982 RHS
= getAddExpr(getConstant(RHS
->getType(), 1, true), RHS
,
8984 Pred
= ICmpInst::ICMP_SLT
;
8986 } else if (!getSignedRangeMin(LHS
).isMinSignedValue()) {
8987 LHS
= getAddExpr(getConstant(RHS
->getType(), (uint64_t)-1, true), LHS
,
8989 Pred
= ICmpInst::ICMP_SLT
;
8993 case ICmpInst::ICMP_SGE
:
8994 if (!getSignedRangeMin(RHS
).isMinSignedValue()) {
8995 RHS
= getAddExpr(getConstant(RHS
->getType(), (uint64_t)-1, true), RHS
,
8997 Pred
= ICmpInst::ICMP_SGT
;
8999 } else if (!getSignedRangeMax(LHS
).isMaxSignedValue()) {
9000 LHS
= getAddExpr(getConstant(RHS
->getType(), 1, true), LHS
,
9002 Pred
= ICmpInst::ICMP_SGT
;
9006 case ICmpInst::ICMP_ULE
:
9007 if (!getUnsignedRangeMax(RHS
).isMaxValue()) {
9008 RHS
= getAddExpr(getConstant(RHS
->getType(), 1, true), RHS
,
9010 Pred
= ICmpInst::ICMP_ULT
;
9012 } else if (!getUnsignedRangeMin(LHS
).isMinValue()) {
9013 LHS
= getAddExpr(getConstant(RHS
->getType(), (uint64_t)-1, true), LHS
);
9014 Pred
= ICmpInst::ICMP_ULT
;
9018 case ICmpInst::ICMP_UGE
:
9019 if (!getUnsignedRangeMin(RHS
).isMinValue()) {
9020 RHS
= getAddExpr(getConstant(RHS
->getType(), (uint64_t)-1, true), RHS
);
9021 Pred
= ICmpInst::ICMP_UGT
;
9023 } else if (!getUnsignedRangeMax(LHS
).isMaxValue()) {
9024 LHS
= getAddExpr(getConstant(RHS
->getType(), 1, true), LHS
,
9026 Pred
= ICmpInst::ICMP_UGT
;
9034 // TODO: More simplifications are possible here.
9036 // Recursively simplify until we either hit a recursion limit or nothing
9039 return SimplifyICmpOperands(Pred
, LHS
, RHS
, Depth
+1);
9044 bool ScalarEvolution::isKnownNegative(const SCEV
*S
) {
9045 return getSignedRangeMax(S
).isNegative();
9048 bool ScalarEvolution::isKnownPositive(const SCEV
*S
) {
9049 return getSignedRangeMin(S
).isStrictlyPositive();
9052 bool ScalarEvolution::isKnownNonNegative(const SCEV
*S
) {
9053 return !getSignedRangeMin(S
).isNegative();
9056 bool ScalarEvolution::isKnownNonPositive(const SCEV
*S
) {
9057 return !getSignedRangeMax(S
).isStrictlyPositive();
9060 bool ScalarEvolution::isKnownNonZero(const SCEV
*S
) {
9061 return isKnownNegative(S
) || isKnownPositive(S
);
9064 std::pair
<const SCEV
*, const SCEV
*>
9065 ScalarEvolution::SplitIntoInitAndPostInc(const Loop
*L
, const SCEV
*S
) {
9066 // Compute SCEV on entry of loop L.
9067 const SCEV
*Start
= SCEVInitRewriter::rewrite(S
, L
, *this);
9068 if (Start
== getCouldNotCompute())
9069 return { Start
, Start
};
9070 // Compute post increment SCEV for loop L.
9071 const SCEV
*PostInc
= SCEVPostIncRewriter::rewrite(S
, L
, *this);
9072 assert(PostInc
!= getCouldNotCompute() && "Unexpected could not compute");
9073 return { Start
, PostInc
};
9076 bool ScalarEvolution::isKnownViaInduction(ICmpInst::Predicate Pred
,
9077 const SCEV
*LHS
, const SCEV
*RHS
) {
9078 // First collect all loops.
9079 SmallPtrSet
<const Loop
*, 8> LoopsUsed
;
9080 getUsedLoops(LHS
, LoopsUsed
);
9081 getUsedLoops(RHS
, LoopsUsed
);
9083 if (LoopsUsed
.empty())
9086 // Domination relationship must be a linear order on collected loops.
9088 for (auto *L1
: LoopsUsed
)
9089 for (auto *L2
: LoopsUsed
)
9090 assert((DT
.dominates(L1
->getHeader(), L2
->getHeader()) ||
9091 DT
.dominates(L2
->getHeader(), L1
->getHeader())) &&
9092 "Domination relationship is not a linear order");
9096 *std::max_element(LoopsUsed
.begin(), LoopsUsed
.end(),
9097 [&](const Loop
*L1
, const Loop
*L2
) {
9098 return DT
.properlyDominates(L1
->getHeader(), L2
->getHeader());
9101 // Get init and post increment value for LHS.
9102 auto SplitLHS
= SplitIntoInitAndPostInc(MDL
, LHS
);
9103 // if LHS contains unknown non-invariant SCEV then bail out.
9104 if (SplitLHS
.first
== getCouldNotCompute())
9106 assert (SplitLHS
.second
!= getCouldNotCompute() && "Unexpected CNC");
9107 // Get init and post increment value for RHS.
9108 auto SplitRHS
= SplitIntoInitAndPostInc(MDL
, RHS
);
9109 // if RHS contains unknown non-invariant SCEV then bail out.
9110 if (SplitRHS
.first
== getCouldNotCompute())
9112 assert (SplitRHS
.second
!= getCouldNotCompute() && "Unexpected CNC");
9113 // It is possible that init SCEV contains an invariant load but it does
9114 // not dominate MDL and is not available at MDL loop entry, so we should
9116 if (!isAvailableAtLoopEntry(SplitLHS
.first
, MDL
) ||
9117 !isAvailableAtLoopEntry(SplitRHS
.first
, MDL
))
9120 return isLoopEntryGuardedByCond(MDL
, Pred
, SplitLHS
.first
, SplitRHS
.first
) &&
9121 isLoopBackedgeGuardedByCond(MDL
, Pred
, SplitLHS
.second
,
9125 bool ScalarEvolution::isKnownPredicate(ICmpInst::Predicate Pred
,
9126 const SCEV
*LHS
, const SCEV
*RHS
) {
9127 // Canonicalize the inputs first.
9128 (void)SimplifyICmpOperands(Pred
, LHS
, RHS
);
9130 if (isKnownViaInduction(Pred
, LHS
, RHS
))
9133 if (isKnownPredicateViaSplitting(Pred
, LHS
, RHS
))
9136 // Otherwise see what can be done with some simple reasoning.
9137 return isKnownViaNonRecursiveReasoning(Pred
, LHS
, RHS
);
9140 bool ScalarEvolution::isKnownOnEveryIteration(ICmpInst::Predicate Pred
,
9141 const SCEVAddRecExpr
*LHS
,
9143 const Loop
*L
= LHS
->getLoop();
9144 return isLoopEntryGuardedByCond(L
, Pred
, LHS
->getStart(), RHS
) &&
9145 isLoopBackedgeGuardedByCond(L
, Pred
, LHS
->getPostIncExpr(*this), RHS
);
9148 bool ScalarEvolution::isMonotonicPredicate(const SCEVAddRecExpr
*LHS
,
9149 ICmpInst::Predicate Pred
,
9151 bool Result
= isMonotonicPredicateImpl(LHS
, Pred
, Increasing
);
9154 // Verify an invariant: inverting the predicate should turn a monotonically
9155 // increasing change to a monotonically decreasing one, and vice versa.
9156 bool IncreasingSwapped
;
9157 bool ResultSwapped
= isMonotonicPredicateImpl(
9158 LHS
, ICmpInst::getSwappedPredicate(Pred
), IncreasingSwapped
);
9160 assert(Result
== ResultSwapped
&& "should be able to analyze both!");
9162 assert(Increasing
== !IncreasingSwapped
&&
9163 "monotonicity should flip as we flip the predicate");
9169 bool ScalarEvolution::isMonotonicPredicateImpl(const SCEVAddRecExpr
*LHS
,
9170 ICmpInst::Predicate Pred
,
9173 // A zero step value for LHS means the induction variable is essentially a
9174 // loop invariant value. We don't really depend on the predicate actually
9175 // flipping from false to true (for increasing predicates, and the other way
9176 // around for decreasing predicates), all we care about is that *if* the
9177 // predicate changes then it only changes from false to true.
9179 // A zero step value in itself is not very useful, but there may be places
9180 // where SCEV can prove X >= 0 but not prove X > 0, so it is helpful to be
9181 // as general as possible.
9185 return false; // Conservative answer
9187 case ICmpInst::ICMP_UGT
:
9188 case ICmpInst::ICMP_UGE
:
9189 case ICmpInst::ICMP_ULT
:
9190 case ICmpInst::ICMP_ULE
:
9191 if (!LHS
->hasNoUnsignedWrap())
9194 Increasing
= Pred
== ICmpInst::ICMP_UGT
|| Pred
== ICmpInst::ICMP_UGE
;
9197 case ICmpInst::ICMP_SGT
:
9198 case ICmpInst::ICMP_SGE
:
9199 case ICmpInst::ICMP_SLT
:
9200 case ICmpInst::ICMP_SLE
: {
9201 if (!LHS
->hasNoSignedWrap())
9204 const SCEV
*Step
= LHS
->getStepRecurrence(*this);
9206 if (isKnownNonNegative(Step
)) {
9207 Increasing
= Pred
== ICmpInst::ICMP_SGT
|| Pred
== ICmpInst::ICMP_SGE
;
9211 if (isKnownNonPositive(Step
)) {
9212 Increasing
= Pred
== ICmpInst::ICMP_SLT
|| Pred
== ICmpInst::ICMP_SLE
;
9221 llvm_unreachable("switch has default clause!");
9224 bool ScalarEvolution::isLoopInvariantPredicate(
9225 ICmpInst::Predicate Pred
, const SCEV
*LHS
, const SCEV
*RHS
, const Loop
*L
,
9226 ICmpInst::Predicate
&InvariantPred
, const SCEV
*&InvariantLHS
,
9227 const SCEV
*&InvariantRHS
) {
9229 // If there is a loop-invariant, force it into the RHS, otherwise bail out.
9230 if (!isLoopInvariant(RHS
, L
)) {
9231 if (!isLoopInvariant(LHS
, L
))
9234 std::swap(LHS
, RHS
);
9235 Pred
= ICmpInst::getSwappedPredicate(Pred
);
9238 const SCEVAddRecExpr
*ArLHS
= dyn_cast
<SCEVAddRecExpr
>(LHS
);
9239 if (!ArLHS
|| ArLHS
->getLoop() != L
)
9243 if (!isMonotonicPredicate(ArLHS
, Pred
, Increasing
))
9246 // If the predicate "ArLHS `Pred` RHS" monotonically increases from false to
9247 // true as the loop iterates, and the backedge is control dependent on
9248 // "ArLHS `Pred` RHS" == true then we can reason as follows:
9250 // * if the predicate was false in the first iteration then the predicate
9251 // is never evaluated again, since the loop exits without taking the
9253 // * if the predicate was true in the first iteration then it will
9254 // continue to be true for all future iterations since it is
9255 // monotonically increasing.
9257 // For both the above possibilities, we can replace the loop varying
9258 // predicate with its value on the first iteration of the loop (which is
9261 // A similar reasoning applies for a monotonically decreasing predicate, by
9262 // replacing true with false and false with true in the above two bullets.
9264 auto P
= Increasing
? Pred
: ICmpInst::getInversePredicate(Pred
);
9266 if (!isLoopBackedgeGuardedByCond(L
, P
, LHS
, RHS
))
9269 InvariantPred
= Pred
;
9270 InvariantLHS
= ArLHS
->getStart();
9275 bool ScalarEvolution::isKnownPredicateViaConstantRanges(
9276 ICmpInst::Predicate Pred
, const SCEV
*LHS
, const SCEV
*RHS
) {
9277 if (HasSameValue(LHS
, RHS
))
9278 return ICmpInst::isTrueWhenEqual(Pred
);
9280 // This code is split out from isKnownPredicate because it is called from
9281 // within isLoopEntryGuardedByCond.
9284 [&](const ConstantRange
&RangeLHS
, const ConstantRange
&RangeRHS
) {
9285 return ConstantRange::makeSatisfyingICmpRegion(Pred
, RangeRHS
)
9286 .contains(RangeLHS
);
9289 // The check at the top of the function catches the case where the values are
9290 // known to be equal.
9291 if (Pred
== CmpInst::ICMP_EQ
)
9294 if (Pred
== CmpInst::ICMP_NE
)
9295 return CheckRanges(getSignedRange(LHS
), getSignedRange(RHS
)) ||
9296 CheckRanges(getUnsignedRange(LHS
), getUnsignedRange(RHS
)) ||
9297 isKnownNonZero(getMinusSCEV(LHS
, RHS
));
9299 if (CmpInst::isSigned(Pred
))
9300 return CheckRanges(getSignedRange(LHS
), getSignedRange(RHS
));
9302 return CheckRanges(getUnsignedRange(LHS
), getUnsignedRange(RHS
));
9305 bool ScalarEvolution::isKnownPredicateViaNoOverflow(ICmpInst::Predicate Pred
,
9308 // Match Result to (X + Y)<ExpectedFlags> where Y is a constant integer.
9309 // Return Y via OutY.
9310 auto MatchBinaryAddToConst
=
9311 [this](const SCEV
*Result
, const SCEV
*X
, APInt
&OutY
,
9312 SCEV::NoWrapFlags ExpectedFlags
) {
9313 const SCEV
*NonConstOp
, *ConstOp
;
9314 SCEV::NoWrapFlags FlagsPresent
;
9316 if (!splitBinaryAdd(Result
, ConstOp
, NonConstOp
, FlagsPresent
) ||
9317 !isa
<SCEVConstant
>(ConstOp
) || NonConstOp
!= X
)
9320 OutY
= cast
<SCEVConstant
>(ConstOp
)->getAPInt();
9321 return (FlagsPresent
& ExpectedFlags
) == ExpectedFlags
;
9330 case ICmpInst::ICMP_SGE
:
9331 std::swap(LHS
, RHS
);
9333 case ICmpInst::ICMP_SLE
:
9334 // X s<= (X + C)<nsw> if C >= 0
9335 if (MatchBinaryAddToConst(RHS
, LHS
, C
, SCEV::FlagNSW
) && C
.isNonNegative())
9338 // (X + C)<nsw> s<= X if C <= 0
9339 if (MatchBinaryAddToConst(LHS
, RHS
, C
, SCEV::FlagNSW
) &&
9340 !C
.isStrictlyPositive())
9344 case ICmpInst::ICMP_SGT
:
9345 std::swap(LHS
, RHS
);
9347 case ICmpInst::ICMP_SLT
:
9348 // X s< (X + C)<nsw> if C > 0
9349 if (MatchBinaryAddToConst(RHS
, LHS
, C
, SCEV::FlagNSW
) &&
9350 C
.isStrictlyPositive())
9353 // (X + C)<nsw> s< X if C < 0
9354 if (MatchBinaryAddToConst(LHS
, RHS
, C
, SCEV::FlagNSW
) && C
.isNegative())
9362 bool ScalarEvolution::isKnownPredicateViaSplitting(ICmpInst::Predicate Pred
,
9365 if (Pred
!= ICmpInst::ICMP_ULT
|| ProvingSplitPredicate
)
9368 // Allowing arbitrary number of activations of isKnownPredicateViaSplitting on
9369 // the stack can result in exponential time complexity.
9370 SaveAndRestore
<bool> Restore(ProvingSplitPredicate
, true);
9372 // If L >= 0 then I `ult` L <=> I >= 0 && I `slt` L
9374 // To prove L >= 0 we use isKnownNonNegative whereas to prove I >= 0 we use
9375 // isKnownPredicate. isKnownPredicate is more powerful, but also more
9376 // expensive; and using isKnownNonNegative(RHS) is sufficient for most of the
9377 // interesting cases seen in practice. We can consider "upgrading" L >= 0 to
9378 // use isKnownPredicate later if needed.
9379 return isKnownNonNegative(RHS
) &&
9380 isKnownPredicate(CmpInst::ICMP_SGE
, LHS
, getZero(LHS
->getType())) &&
9381 isKnownPredicate(CmpInst::ICMP_SLT
, LHS
, RHS
);
9384 bool ScalarEvolution::isImpliedViaGuard(BasicBlock
*BB
,
9385 ICmpInst::Predicate Pred
,
9386 const SCEV
*LHS
, const SCEV
*RHS
) {
9387 // No need to even try if we know the module has no guards.
9391 return any_of(*BB
, [&](Instruction
&I
) {
9392 using namespace llvm::PatternMatch
;
9395 return match(&I
, m_Intrinsic
<Intrinsic::experimental_guard
>(
9396 m_Value(Condition
))) &&
9397 isImpliedCond(Pred
, LHS
, RHS
, Condition
, false);
9401 /// isLoopBackedgeGuardedByCond - Test whether the backedge of the loop is
9402 /// protected by a conditional between LHS and RHS. This is used to
9403 /// to eliminate casts.
9405 ScalarEvolution::isLoopBackedgeGuardedByCond(const Loop
*L
,
9406 ICmpInst::Predicate Pred
,
9407 const SCEV
*LHS
, const SCEV
*RHS
) {
9408 // Interpret a null as meaning no loop, where there is obviously no guard
9409 // (interprocedural conditions notwithstanding).
9410 if (!L
) return true;
9413 assert(!verifyFunction(*L
->getHeader()->getParent(), &dbgs()) &&
9414 "This cannot be done on broken IR!");
9417 if (isKnownViaNonRecursiveReasoning(Pred
, LHS
, RHS
))
9420 BasicBlock
*Latch
= L
->getLoopLatch();
9424 BranchInst
*LoopContinuePredicate
=
9425 dyn_cast
<BranchInst
>(Latch
->getTerminator());
9426 if (LoopContinuePredicate
&& LoopContinuePredicate
->isConditional() &&
9427 isImpliedCond(Pred
, LHS
, RHS
,
9428 LoopContinuePredicate
->getCondition(),
9429 LoopContinuePredicate
->getSuccessor(0) != L
->getHeader()))
9432 // We don't want more than one activation of the following loops on the stack
9433 // -- that can lead to O(n!) time complexity.
9434 if (WalkingBEDominatingConds
)
9437 SaveAndRestore
<bool> ClearOnExit(WalkingBEDominatingConds
, true);
9439 // See if we can exploit a trip count to prove the predicate.
9440 const auto &BETakenInfo
= getBackedgeTakenInfo(L
);
9441 const SCEV
*LatchBECount
= BETakenInfo
.getExact(Latch
, this);
9442 if (LatchBECount
!= getCouldNotCompute()) {
9443 // We know that Latch branches back to the loop header exactly
9444 // LatchBECount times. This means the backdege condition at Latch is
9445 // equivalent to "{0,+,1} u< LatchBECount".
9446 Type
*Ty
= LatchBECount
->getType();
9447 auto NoWrapFlags
= SCEV::NoWrapFlags(SCEV::FlagNUW
| SCEV::FlagNW
);
9448 const SCEV
*LoopCounter
=
9449 getAddRecExpr(getZero(Ty
), getOne(Ty
), L
, NoWrapFlags
);
9450 if (isImpliedCond(Pred
, LHS
, RHS
, ICmpInst::ICMP_ULT
, LoopCounter
,
9455 // Check conditions due to any @llvm.assume intrinsics.
9456 for (auto &AssumeVH
: AC
.assumptions()) {
9459 auto *CI
= cast
<CallInst
>(AssumeVH
);
9460 if (!DT
.dominates(CI
, Latch
->getTerminator()))
9463 if (isImpliedCond(Pred
, LHS
, RHS
, CI
->getArgOperand(0), false))
9467 // If the loop is not reachable from the entry block, we risk running into an
9468 // infinite loop as we walk up into the dom tree. These loops do not matter
9469 // anyway, so we just return a conservative answer when we see them.
9470 if (!DT
.isReachableFromEntry(L
->getHeader()))
9473 if (isImpliedViaGuard(Latch
, Pred
, LHS
, RHS
))
9476 for (DomTreeNode
*DTN
= DT
[Latch
], *HeaderDTN
= DT
[L
->getHeader()];
9477 DTN
!= HeaderDTN
; DTN
= DTN
->getIDom()) {
9478 assert(DTN
&& "should reach the loop header before reaching the root!");
9480 BasicBlock
*BB
= DTN
->getBlock();
9481 if (isImpliedViaGuard(BB
, Pred
, LHS
, RHS
))
9484 BasicBlock
*PBB
= BB
->getSinglePredecessor();
9488 BranchInst
*ContinuePredicate
= dyn_cast
<BranchInst
>(PBB
->getTerminator());
9489 if (!ContinuePredicate
|| !ContinuePredicate
->isConditional())
9492 Value
*Condition
= ContinuePredicate
->getCondition();
9494 // If we have an edge `E` within the loop body that dominates the only
9495 // latch, the condition guarding `E` also guards the backedge. This
9496 // reasoning works only for loops with a single latch.
9498 BasicBlockEdge
DominatingEdge(PBB
, BB
);
9499 if (DominatingEdge
.isSingleEdge()) {
9500 // We're constructively (and conservatively) enumerating edges within the
9501 // loop body that dominate the latch. The dominator tree better agree
9503 assert(DT
.dominates(DominatingEdge
, Latch
) && "should be!");
9505 if (isImpliedCond(Pred
, LHS
, RHS
, Condition
,
9506 BB
!= ContinuePredicate
->getSuccessor(0)))
9515 ScalarEvolution::isLoopEntryGuardedByCond(const Loop
*L
,
9516 ICmpInst::Predicate Pred
,
9517 const SCEV
*LHS
, const SCEV
*RHS
) {
9518 // Interpret a null as meaning no loop, where there is obviously no guard
9519 // (interprocedural conditions notwithstanding).
9520 if (!L
) return false;
9523 assert(!verifyFunction(*L
->getHeader()->getParent(), &dbgs()) &&
9524 "This cannot be done on broken IR!");
9526 // Both LHS and RHS must be available at loop entry.
9527 assert(isAvailableAtLoopEntry(LHS
, L
) &&
9528 "LHS is not available at Loop Entry");
9529 assert(isAvailableAtLoopEntry(RHS
, L
) &&
9530 "RHS is not available at Loop Entry");
9532 if (isKnownViaNonRecursiveReasoning(Pred
, LHS
, RHS
))
9535 // If we cannot prove strict comparison (e.g. a > b), maybe we can prove
9536 // the facts (a >= b && a != b) separately. A typical situation is when the
9537 // non-strict comparison is known from ranges and non-equality is known from
9538 // dominating predicates. If we are proving strict comparison, we always try
9539 // to prove non-equality and non-strict comparison separately.
9540 auto NonStrictPredicate
= ICmpInst::getNonStrictPredicate(Pred
);
9541 const bool ProvingStrictComparison
= (Pred
!= NonStrictPredicate
);
9542 bool ProvedNonStrictComparison
= false;
9543 bool ProvedNonEquality
= false;
9545 if (ProvingStrictComparison
) {
9546 ProvedNonStrictComparison
=
9547 isKnownViaNonRecursiveReasoning(NonStrictPredicate
, LHS
, RHS
);
9549 isKnownViaNonRecursiveReasoning(ICmpInst::ICMP_NE
, LHS
, RHS
);
9550 if (ProvedNonStrictComparison
&& ProvedNonEquality
)
9554 // Try to prove (Pred, LHS, RHS) using isImpliedViaGuard.
9555 auto ProveViaGuard
= [&](BasicBlock
*Block
) {
9556 if (isImpliedViaGuard(Block
, Pred
, LHS
, RHS
))
9558 if (ProvingStrictComparison
) {
9559 if (!ProvedNonStrictComparison
)
9560 ProvedNonStrictComparison
=
9561 isImpliedViaGuard(Block
, NonStrictPredicate
, LHS
, RHS
);
9562 if (!ProvedNonEquality
)
9564 isImpliedViaGuard(Block
, ICmpInst::ICMP_NE
, LHS
, RHS
);
9565 if (ProvedNonStrictComparison
&& ProvedNonEquality
)
9571 // Try to prove (Pred, LHS, RHS) using isImpliedCond.
9572 auto ProveViaCond
= [&](Value
*Condition
, bool Inverse
) {
9573 if (isImpliedCond(Pred
, LHS
, RHS
, Condition
, Inverse
))
9575 if (ProvingStrictComparison
) {
9576 if (!ProvedNonStrictComparison
)
9577 ProvedNonStrictComparison
=
9578 isImpliedCond(NonStrictPredicate
, LHS
, RHS
, Condition
, Inverse
);
9579 if (!ProvedNonEquality
)
9581 isImpliedCond(ICmpInst::ICMP_NE
, LHS
, RHS
, Condition
, Inverse
);
9582 if (ProvedNonStrictComparison
&& ProvedNonEquality
)
9588 // Starting at the loop predecessor, climb up the predecessor chain, as long
9589 // as there are predecessors that can be found that have unique successors
9590 // leading to the original header.
9591 for (std::pair
<BasicBlock
*, BasicBlock
*>
9592 Pair(L
->getLoopPredecessor(), L
->getHeader());
9594 Pair
= getPredecessorWithUniqueSuccessorForBB(Pair
.first
)) {
9596 if (ProveViaGuard(Pair
.first
))
9599 BranchInst
*LoopEntryPredicate
=
9600 dyn_cast
<BranchInst
>(Pair
.first
->getTerminator());
9601 if (!LoopEntryPredicate
||
9602 LoopEntryPredicate
->isUnconditional())
9605 if (ProveViaCond(LoopEntryPredicate
->getCondition(),
9606 LoopEntryPredicate
->getSuccessor(0) != Pair
.second
))
9610 // Check conditions due to any @llvm.assume intrinsics.
9611 for (auto &AssumeVH
: AC
.assumptions()) {
9614 auto *CI
= cast
<CallInst
>(AssumeVH
);
9615 if (!DT
.dominates(CI
, L
->getHeader()))
9618 if (ProveViaCond(CI
->getArgOperand(0), false))
9625 bool ScalarEvolution::isImpliedCond(ICmpInst::Predicate Pred
,
9626 const SCEV
*LHS
, const SCEV
*RHS
,
9627 Value
*FoundCondValue
,
9629 if (!PendingLoopPredicates
.insert(FoundCondValue
).second
)
9633 make_scope_exit([&]() { PendingLoopPredicates
.erase(FoundCondValue
); });
9635 // Recursively handle And and Or conditions.
9636 if (BinaryOperator
*BO
= dyn_cast
<BinaryOperator
>(FoundCondValue
)) {
9637 if (BO
->getOpcode() == Instruction::And
) {
9639 return isImpliedCond(Pred
, LHS
, RHS
, BO
->getOperand(0), Inverse
) ||
9640 isImpliedCond(Pred
, LHS
, RHS
, BO
->getOperand(1), Inverse
);
9641 } else if (BO
->getOpcode() == Instruction::Or
) {
9643 return isImpliedCond(Pred
, LHS
, RHS
, BO
->getOperand(0), Inverse
) ||
9644 isImpliedCond(Pred
, LHS
, RHS
, BO
->getOperand(1), Inverse
);
9648 ICmpInst
*ICI
= dyn_cast
<ICmpInst
>(FoundCondValue
);
9649 if (!ICI
) return false;
9651 // Now that we found a conditional branch that dominates the loop or controls
9652 // the loop latch. Check to see if it is the comparison we are looking for.
9653 ICmpInst::Predicate FoundPred
;
9655 FoundPred
= ICI
->getInversePredicate();
9657 FoundPred
= ICI
->getPredicate();
9659 const SCEV
*FoundLHS
= getSCEV(ICI
->getOperand(0));
9660 const SCEV
*FoundRHS
= getSCEV(ICI
->getOperand(1));
9662 return isImpliedCond(Pred
, LHS
, RHS
, FoundPred
, FoundLHS
, FoundRHS
);
9665 bool ScalarEvolution::isImpliedCond(ICmpInst::Predicate Pred
, const SCEV
*LHS
,
9667 ICmpInst::Predicate FoundPred
,
9668 const SCEV
*FoundLHS
,
9669 const SCEV
*FoundRHS
) {
9670 // Balance the types.
9671 if (getTypeSizeInBits(LHS
->getType()) <
9672 getTypeSizeInBits(FoundLHS
->getType())) {
9673 if (CmpInst::isSigned(Pred
)) {
9674 LHS
= getSignExtendExpr(LHS
, FoundLHS
->getType());
9675 RHS
= getSignExtendExpr(RHS
, FoundLHS
->getType());
9677 LHS
= getZeroExtendExpr(LHS
, FoundLHS
->getType());
9678 RHS
= getZeroExtendExpr(RHS
, FoundLHS
->getType());
9680 } else if (getTypeSizeInBits(LHS
->getType()) >
9681 getTypeSizeInBits(FoundLHS
->getType())) {
9682 if (CmpInst::isSigned(FoundPred
)) {
9683 FoundLHS
= getSignExtendExpr(FoundLHS
, LHS
->getType());
9684 FoundRHS
= getSignExtendExpr(FoundRHS
, LHS
->getType());
9686 FoundLHS
= getZeroExtendExpr(FoundLHS
, LHS
->getType());
9687 FoundRHS
= getZeroExtendExpr(FoundRHS
, LHS
->getType());
9691 // Canonicalize the query to match the way instcombine will have
9692 // canonicalized the comparison.
9693 if (SimplifyICmpOperands(Pred
, LHS
, RHS
))
9695 return CmpInst::isTrueWhenEqual(Pred
);
9696 if (SimplifyICmpOperands(FoundPred
, FoundLHS
, FoundRHS
))
9697 if (FoundLHS
== FoundRHS
)
9698 return CmpInst::isFalseWhenEqual(FoundPred
);
9700 // Check to see if we can make the LHS or RHS match.
9701 if (LHS
== FoundRHS
|| RHS
== FoundLHS
) {
9702 if (isa
<SCEVConstant
>(RHS
)) {
9703 std::swap(FoundLHS
, FoundRHS
);
9704 FoundPred
= ICmpInst::getSwappedPredicate(FoundPred
);
9706 std::swap(LHS
, RHS
);
9707 Pred
= ICmpInst::getSwappedPredicate(Pred
);
9711 // Check whether the found predicate is the same as the desired predicate.
9712 if (FoundPred
== Pred
)
9713 return isImpliedCondOperands(Pred
, LHS
, RHS
, FoundLHS
, FoundRHS
);
9715 // Check whether swapping the found predicate makes it the same as the
9716 // desired predicate.
9717 if (ICmpInst::getSwappedPredicate(FoundPred
) == Pred
) {
9718 if (isa
<SCEVConstant
>(RHS
))
9719 return isImpliedCondOperands(Pred
, LHS
, RHS
, FoundRHS
, FoundLHS
);
9721 return isImpliedCondOperands(ICmpInst::getSwappedPredicate(Pred
),
9722 RHS
, LHS
, FoundLHS
, FoundRHS
);
9725 // Unsigned comparison is the same as signed comparison when both the operands
9726 // are non-negative.
9727 if (CmpInst::isUnsigned(FoundPred
) &&
9728 CmpInst::getSignedPredicate(FoundPred
) == Pred
&&
9729 isKnownNonNegative(FoundLHS
) && isKnownNonNegative(FoundRHS
))
9730 return isImpliedCondOperands(Pred
, LHS
, RHS
, FoundLHS
, FoundRHS
);
9732 // Check if we can make progress by sharpening ranges.
9733 if (FoundPred
== ICmpInst::ICMP_NE
&&
9734 (isa
<SCEVConstant
>(FoundLHS
) || isa
<SCEVConstant
>(FoundRHS
))) {
9736 const SCEVConstant
*C
= nullptr;
9737 const SCEV
*V
= nullptr;
9739 if (isa
<SCEVConstant
>(FoundLHS
)) {
9740 C
= cast
<SCEVConstant
>(FoundLHS
);
9743 C
= cast
<SCEVConstant
>(FoundRHS
);
9747 // The guarding predicate tells us that C != V. If the known range
9748 // of V is [C, t), we can sharpen the range to [C + 1, t). The
9749 // range we consider has to correspond to same signedness as the
9750 // predicate we're interested in folding.
9752 APInt Min
= ICmpInst::isSigned(Pred
) ?
9753 getSignedRangeMin(V
) : getUnsignedRangeMin(V
);
9755 if (Min
== C
->getAPInt()) {
9756 // Given (V >= Min && V != Min) we conclude V >= (Min + 1).
9757 // This is true even if (Min + 1) wraps around -- in case of
9758 // wraparound, (Min + 1) < Min, so (V >= Min => V >= (Min + 1)).
9760 APInt SharperMin
= Min
+ 1;
9763 case ICmpInst::ICMP_SGE
:
9764 case ICmpInst::ICMP_UGE
:
9765 // We know V `Pred` SharperMin. If this implies LHS `Pred`
9767 if (isImpliedCondOperands(Pred
, LHS
, RHS
, V
,
9768 getConstant(SharperMin
)))
9772 case ICmpInst::ICMP_SGT
:
9773 case ICmpInst::ICMP_UGT
:
9774 // We know from the range information that (V `Pred` Min ||
9775 // V == Min). We know from the guarding condition that !(V
9776 // == Min). This gives us
9778 // V `Pred` Min || V == Min && !(V == Min)
9781 // If V `Pred` Min implies LHS `Pred` RHS, we're done.
9783 if (isImpliedCondOperands(Pred
, LHS
, RHS
, V
, getConstant(Min
)))
9794 // Check whether the actual condition is beyond sufficient.
9795 if (FoundPred
== ICmpInst::ICMP_EQ
)
9796 if (ICmpInst::isTrueWhenEqual(Pred
))
9797 if (isImpliedCondOperands(Pred
, LHS
, RHS
, FoundLHS
, FoundRHS
))
9799 if (Pred
== ICmpInst::ICMP_NE
)
9800 if (!ICmpInst::isTrueWhenEqual(FoundPred
))
9801 if (isImpliedCondOperands(FoundPred
, LHS
, RHS
, FoundLHS
, FoundRHS
))
9804 // Otherwise assume the worst.
9808 bool ScalarEvolution::splitBinaryAdd(const SCEV
*Expr
,
9809 const SCEV
*&L
, const SCEV
*&R
,
9810 SCEV::NoWrapFlags
&Flags
) {
9811 const auto *AE
= dyn_cast
<SCEVAddExpr
>(Expr
);
9812 if (!AE
|| AE
->getNumOperands() != 2)
9815 L
= AE
->getOperand(0);
9816 R
= AE
->getOperand(1);
9817 Flags
= AE
->getNoWrapFlags();
9821 Optional
<APInt
> ScalarEvolution::computeConstantDifference(const SCEV
*More
,
9823 // We avoid subtracting expressions here because this function is usually
9824 // fairly deep in the call stack (i.e. is called many times).
9826 if (isa
<SCEVAddRecExpr
>(Less
) && isa
<SCEVAddRecExpr
>(More
)) {
9827 const auto *LAR
= cast
<SCEVAddRecExpr
>(Less
);
9828 const auto *MAR
= cast
<SCEVAddRecExpr
>(More
);
9830 if (LAR
->getLoop() != MAR
->getLoop())
9833 // We look at affine expressions only; not for correctness but to keep
9834 // getStepRecurrence cheap.
9835 if (!LAR
->isAffine() || !MAR
->isAffine())
9838 if (LAR
->getStepRecurrence(*this) != MAR
->getStepRecurrence(*this))
9841 Less
= LAR
->getStart();
9842 More
= MAR
->getStart();
9847 if (isa
<SCEVConstant
>(Less
) && isa
<SCEVConstant
>(More
)) {
9848 const auto &M
= cast
<SCEVConstant
>(More
)->getAPInt();
9849 const auto &L
= cast
<SCEVConstant
>(Less
)->getAPInt();
9853 SCEV::NoWrapFlags Flags
;
9854 const SCEV
*LLess
= nullptr, *RLess
= nullptr;
9855 const SCEV
*LMore
= nullptr, *RMore
= nullptr;
9856 const SCEVConstant
*C1
= nullptr, *C2
= nullptr;
9857 // Compare (X + C1) vs X.
9858 if (splitBinaryAdd(Less
, LLess
, RLess
, Flags
))
9859 if ((C1
= dyn_cast
<SCEVConstant
>(LLess
)))
9861 return -(C1
->getAPInt());
9863 // Compare X vs (X + C2).
9864 if (splitBinaryAdd(More
, LMore
, RMore
, Flags
))
9865 if ((C2
= dyn_cast
<SCEVConstant
>(LMore
)))
9867 return C2
->getAPInt();
9869 // Compare (X + C1) vs (X + C2).
9870 if (C1
&& C2
&& RLess
== RMore
)
9871 return C2
->getAPInt() - C1
->getAPInt();
9876 bool ScalarEvolution::isImpliedCondOperandsViaNoOverflow(
9877 ICmpInst::Predicate Pred
, const SCEV
*LHS
, const SCEV
*RHS
,
9878 const SCEV
*FoundLHS
, const SCEV
*FoundRHS
) {
9879 if (Pred
!= CmpInst::ICMP_SLT
&& Pred
!= CmpInst::ICMP_ULT
)
9882 const auto *AddRecLHS
= dyn_cast
<SCEVAddRecExpr
>(LHS
);
9886 const auto *AddRecFoundLHS
= dyn_cast
<SCEVAddRecExpr
>(FoundLHS
);
9887 if (!AddRecFoundLHS
)
9890 // We'd like to let SCEV reason about control dependencies, so we constrain
9891 // both the inequalities to be about add recurrences on the same loop. This
9892 // way we can use isLoopEntryGuardedByCond later.
9894 const Loop
*L
= AddRecFoundLHS
->getLoop();
9895 if (L
!= AddRecLHS
->getLoop())
9898 // FoundLHS u< FoundRHS u< -C => (FoundLHS + C) u< (FoundRHS + C) ... (1)
9900 // FoundLHS s< FoundRHS s< INT_MIN - C => (FoundLHS + C) s< (FoundRHS + C)
9903 // Informal proof for (2), assuming (1) [*]:
9905 // We'll also assume (A s< B) <=> ((A + INT_MIN) u< (B + INT_MIN)) ... (3)[**]
9909 // FoundLHS s< FoundRHS s< INT_MIN - C
9910 // <=> (FoundLHS + INT_MIN) u< (FoundRHS + INT_MIN) u< -C [ using (3) ]
9911 // <=> (FoundLHS + INT_MIN + C) u< (FoundRHS + INT_MIN + C) [ using (1) ]
9912 // <=> (FoundLHS + INT_MIN + C + INT_MIN) s<
9913 // (FoundRHS + INT_MIN + C + INT_MIN) [ using (3) ]
9914 // <=> FoundLHS + C s< FoundRHS + C
9916 // [*]: (1) can be proved by ruling out overflow.
9918 // [**]: This can be proved by analyzing all the four possibilities:
9919 // (A s< 0, B s< 0), (A s< 0, B s>= 0), (A s>= 0, B s< 0) and
9920 // (A s>= 0, B s>= 0).
9923 // Despite (2), "FoundRHS s< INT_MIN - C" does not mean that "FoundRHS + C"
9924 // will not sign underflow. For instance, say FoundLHS = (i8 -128), FoundRHS
9925 // = (i8 -127) and C = (i8 -100). Then INT_MIN - C = (i8 -28), and FoundRHS
9926 // s< (INT_MIN - C). Lack of sign overflow / underflow in "FoundRHS + C" is
9927 // neither necessary nor sufficient to prove "(FoundLHS + C) s< (FoundRHS +
9930 Optional
<APInt
> LDiff
= computeConstantDifference(LHS
, FoundLHS
);
9931 Optional
<APInt
> RDiff
= computeConstantDifference(RHS
, FoundRHS
);
9932 if (!LDiff
|| !RDiff
|| *LDiff
!= *RDiff
)
9935 if (LDiff
->isMinValue())
9938 APInt FoundRHSLimit
;
9940 if (Pred
== CmpInst::ICMP_ULT
) {
9941 FoundRHSLimit
= -(*RDiff
);
9943 assert(Pred
== CmpInst::ICMP_SLT
&& "Checked above!");
9944 FoundRHSLimit
= APInt::getSignedMinValue(getTypeSizeInBits(RHS
->getType())) - *RDiff
;
9947 // Try to prove (1) or (2), as needed.
9948 return isAvailableAtLoopEntry(FoundRHS
, L
) &&
9949 isLoopEntryGuardedByCond(L
, Pred
, FoundRHS
,
9950 getConstant(FoundRHSLimit
));
9953 bool ScalarEvolution::isImpliedViaMerge(ICmpInst::Predicate Pred
,
9954 const SCEV
*LHS
, const SCEV
*RHS
,
9955 const SCEV
*FoundLHS
,
9956 const SCEV
*FoundRHS
, unsigned Depth
) {
9957 const PHINode
*LPhi
= nullptr, *RPhi
= nullptr;
9959 auto ClearOnExit
= make_scope_exit([&]() {
9961 bool Erased
= PendingMerges
.erase(LPhi
);
9962 assert(Erased
&& "Failed to erase LPhi!");
9966 bool Erased
= PendingMerges
.erase(RPhi
);
9967 assert(Erased
&& "Failed to erase RPhi!");
9972 // Find respective Phis and check that they are not being pending.
9973 if (const SCEVUnknown
*LU
= dyn_cast
<SCEVUnknown
>(LHS
))
9974 if (auto *Phi
= dyn_cast
<PHINode
>(LU
->getValue())) {
9975 if (!PendingMerges
.insert(Phi
).second
)
9979 if (const SCEVUnknown
*RU
= dyn_cast
<SCEVUnknown
>(RHS
))
9980 if (auto *Phi
= dyn_cast
<PHINode
>(RU
->getValue())) {
9981 // If we detect a loop of Phi nodes being processed by this method, for
9984 // %a = phi i32 [ %some1, %preheader ], [ %b, %latch ]
9985 // %b = phi i32 [ %some2, %preheader ], [ %a, %latch ]
9987 // we don't want to deal with a case that complex, so return conservative
9989 if (!PendingMerges
.insert(Phi
).second
)
9994 // If none of LHS, RHS is a Phi, nothing to do here.
9998 // If there is a SCEVUnknown Phi we are interested in, make it left.
10000 std::swap(LHS
, RHS
);
10001 std::swap(FoundLHS
, FoundRHS
);
10002 std::swap(LPhi
, RPhi
);
10003 Pred
= ICmpInst::getSwappedPredicate(Pred
);
10006 assert(LPhi
&& "LPhi should definitely be a SCEVUnknown Phi!");
10007 const BasicBlock
*LBB
= LPhi
->getParent();
10008 const SCEVAddRecExpr
*RAR
= dyn_cast
<SCEVAddRecExpr
>(RHS
);
10010 auto ProvedEasily
= [&](const SCEV
*S1
, const SCEV
*S2
) {
10011 return isKnownViaNonRecursiveReasoning(Pred
, S1
, S2
) ||
10012 isImpliedCondOperandsViaRanges(Pred
, S1
, S2
, FoundLHS
, FoundRHS
) ||
10013 isImpliedViaOperations(Pred
, S1
, S2
, FoundLHS
, FoundRHS
, Depth
);
10016 if (RPhi
&& RPhi
->getParent() == LBB
) {
10017 // Case one: RHS is also a SCEVUnknown Phi from the same basic block.
10018 // If we compare two Phis from the same block, and for each entry block
10019 // the predicate is true for incoming values from this block, then the
10020 // predicate is also true for the Phis.
10021 for (const BasicBlock
*IncBB
: predecessors(LBB
)) {
10022 const SCEV
*L
= getSCEV(LPhi
->getIncomingValueForBlock(IncBB
));
10023 const SCEV
*R
= getSCEV(RPhi
->getIncomingValueForBlock(IncBB
));
10024 if (!ProvedEasily(L
, R
))
10027 } else if (RAR
&& RAR
->getLoop()->getHeader() == LBB
) {
10028 // Case two: RHS is also a Phi from the same basic block, and it is an
10029 // AddRec. It means that there is a loop which has both AddRec and Unknown
10030 // PHIs, for it we can compare incoming values of AddRec from above the loop
10031 // and latch with their respective incoming values of LPhi.
10032 // TODO: Generalize to handle loops with many inputs in a header.
10033 if (LPhi
->getNumIncomingValues() != 2) return false;
10035 auto *RLoop
= RAR
->getLoop();
10036 auto *Predecessor
= RLoop
->getLoopPredecessor();
10037 assert(Predecessor
&& "Loop with AddRec with no predecessor?");
10038 const SCEV
*L1
= getSCEV(LPhi
->getIncomingValueForBlock(Predecessor
));
10039 if (!ProvedEasily(L1
, RAR
->getStart()))
10041 auto *Latch
= RLoop
->getLoopLatch();
10042 assert(Latch
&& "Loop with AddRec with no latch?");
10043 const SCEV
*L2
= getSCEV(LPhi
->getIncomingValueForBlock(Latch
));
10044 if (!ProvedEasily(L2
, RAR
->getPostIncExpr(*this)))
10047 // In all other cases go over inputs of LHS and compare each of them to RHS,
10048 // the predicate is true for (LHS, RHS) if it is true for all such pairs.
10049 // At this point RHS is either a non-Phi, or it is a Phi from some block
10050 // different from LBB.
10051 for (const BasicBlock
*IncBB
: predecessors(LBB
)) {
10052 // Check that RHS is available in this block.
10053 if (!dominates(RHS
, IncBB
))
10055 const SCEV
*L
= getSCEV(LPhi
->getIncomingValueForBlock(IncBB
));
10056 if (!ProvedEasily(L
, RHS
))
10063 bool ScalarEvolution::isImpliedCondOperands(ICmpInst::Predicate Pred
,
10064 const SCEV
*LHS
, const SCEV
*RHS
,
10065 const SCEV
*FoundLHS
,
10066 const SCEV
*FoundRHS
) {
10067 if (isImpliedCondOperandsViaRanges(Pred
, LHS
, RHS
, FoundLHS
, FoundRHS
))
10070 if (isImpliedCondOperandsViaNoOverflow(Pred
, LHS
, RHS
, FoundLHS
, FoundRHS
))
10073 return isImpliedCondOperandsHelper(Pred
, LHS
, RHS
,
10074 FoundLHS
, FoundRHS
) ||
10075 // ~x < ~y --> x > y
10076 isImpliedCondOperandsHelper(Pred
, LHS
, RHS
,
10077 getNotSCEV(FoundRHS
),
10078 getNotSCEV(FoundLHS
));
10081 /// If Expr computes ~A, return A else return nullptr
10082 static const SCEV
*MatchNotExpr(const SCEV
*Expr
) {
10083 const SCEVAddExpr
*Add
= dyn_cast
<SCEVAddExpr
>(Expr
);
10084 if (!Add
|| Add
->getNumOperands() != 2 ||
10085 !Add
->getOperand(0)->isAllOnesValue())
10088 const SCEVMulExpr
*AddRHS
= dyn_cast
<SCEVMulExpr
>(Add
->getOperand(1));
10089 if (!AddRHS
|| AddRHS
->getNumOperands() != 2 ||
10090 !AddRHS
->getOperand(0)->isAllOnesValue())
10093 return AddRHS
->getOperand(1);
10096 /// Is MaybeMaxExpr an SMax or UMax of Candidate and some other values?
10097 template<typename MaxExprType
>
10098 static bool IsMaxConsistingOf(const SCEV
*MaybeMaxExpr
,
10099 const SCEV
*Candidate
) {
10100 const MaxExprType
*MaxExpr
= dyn_cast
<MaxExprType
>(MaybeMaxExpr
);
10101 if (!MaxExpr
) return false;
10103 return find(MaxExpr
->operands(), Candidate
) != MaxExpr
->op_end();
10106 /// Is MaybeMinExpr an SMin or UMin of Candidate and some other values?
10107 template<typename MaxExprType
>
10108 static bool IsMinConsistingOf(ScalarEvolution
&SE
,
10109 const SCEV
*MaybeMinExpr
,
10110 const SCEV
*Candidate
) {
10111 const SCEV
*MaybeMaxExpr
= MatchNotExpr(MaybeMinExpr
);
10115 return IsMaxConsistingOf
<MaxExprType
>(MaybeMaxExpr
, SE
.getNotSCEV(Candidate
));
10118 static bool IsKnownPredicateViaAddRecStart(ScalarEvolution
&SE
,
10119 ICmpInst::Predicate Pred
,
10120 const SCEV
*LHS
, const SCEV
*RHS
) {
10121 // If both sides are affine addrecs for the same loop, with equal
10122 // steps, and we know the recurrences don't wrap, then we only
10123 // need to check the predicate on the starting values.
10125 if (!ICmpInst::isRelational(Pred
))
10128 const SCEVAddRecExpr
*LAR
= dyn_cast
<SCEVAddRecExpr
>(LHS
);
10131 const SCEVAddRecExpr
*RAR
= dyn_cast
<SCEVAddRecExpr
>(RHS
);
10134 if (LAR
->getLoop() != RAR
->getLoop())
10136 if (!LAR
->isAffine() || !RAR
->isAffine())
10139 if (LAR
->getStepRecurrence(SE
) != RAR
->getStepRecurrence(SE
))
10142 SCEV::NoWrapFlags NW
= ICmpInst::isSigned(Pred
) ?
10143 SCEV::FlagNSW
: SCEV::FlagNUW
;
10144 if (!LAR
->getNoWrapFlags(NW
) || !RAR
->getNoWrapFlags(NW
))
10147 return SE
.isKnownPredicate(Pred
, LAR
->getStart(), RAR
->getStart());
10150 /// Is LHS `Pred` RHS true on the virtue of LHS or RHS being a Min or Max
10152 static bool IsKnownPredicateViaMinOrMax(ScalarEvolution
&SE
,
10153 ICmpInst::Predicate Pred
,
10154 const SCEV
*LHS
, const SCEV
*RHS
) {
10159 case ICmpInst::ICMP_SGE
:
10160 std::swap(LHS
, RHS
);
10162 case ICmpInst::ICMP_SLE
:
10164 // min(A, ...) <= A
10165 IsMinConsistingOf
<SCEVSMaxExpr
>(SE
, LHS
, RHS
) ||
10166 // A <= max(A, ...)
10167 IsMaxConsistingOf
<SCEVSMaxExpr
>(RHS
, LHS
);
10169 case ICmpInst::ICMP_UGE
:
10170 std::swap(LHS
, RHS
);
10172 case ICmpInst::ICMP_ULE
:
10174 // min(A, ...) <= A
10175 IsMinConsistingOf
<SCEVUMaxExpr
>(SE
, LHS
, RHS
) ||
10176 // A <= max(A, ...)
10177 IsMaxConsistingOf
<SCEVUMaxExpr
>(RHS
, LHS
);
10180 llvm_unreachable("covered switch fell through?!");
10183 bool ScalarEvolution::isImpliedViaOperations(ICmpInst::Predicate Pred
,
10184 const SCEV
*LHS
, const SCEV
*RHS
,
10185 const SCEV
*FoundLHS
,
10186 const SCEV
*FoundRHS
,
10188 assert(getTypeSizeInBits(LHS
->getType()) ==
10189 getTypeSizeInBits(RHS
->getType()) &&
10190 "LHS and RHS have different sizes?");
10191 assert(getTypeSizeInBits(FoundLHS
->getType()) ==
10192 getTypeSizeInBits(FoundRHS
->getType()) &&
10193 "FoundLHS and FoundRHS have different sizes?");
10194 // We want to avoid hurting the compile time with analysis of too big trees.
10195 if (Depth
> MaxSCEVOperationsImplicationDepth
)
10197 // We only want to work with ICMP_SGT comparison so far.
10198 // TODO: Extend to ICMP_UGT?
10199 if (Pred
== ICmpInst::ICMP_SLT
) {
10200 Pred
= ICmpInst::ICMP_SGT
;
10201 std::swap(LHS
, RHS
);
10202 std::swap(FoundLHS
, FoundRHS
);
10204 if (Pred
!= ICmpInst::ICMP_SGT
)
10207 auto GetOpFromSExt
= [&](const SCEV
*S
) {
10208 if (auto *Ext
= dyn_cast
<SCEVSignExtendExpr
>(S
))
10209 return Ext
->getOperand();
10210 // TODO: If S is a SCEVConstant then you can cheaply "strip" the sext off
10211 // the constant in some cases.
10215 // Acquire values from extensions.
10216 auto *OrigLHS
= LHS
;
10217 auto *OrigFoundLHS
= FoundLHS
;
10218 LHS
= GetOpFromSExt(LHS
);
10219 FoundLHS
= GetOpFromSExt(FoundLHS
);
10221 // Is the SGT predicate can be proved trivially or using the found context.
10222 auto IsSGTViaContext
= [&](const SCEV
*S1
, const SCEV
*S2
) {
10223 return isKnownViaNonRecursiveReasoning(ICmpInst::ICMP_SGT
, S1
, S2
) ||
10224 isImpliedViaOperations(ICmpInst::ICMP_SGT
, S1
, S2
, OrigFoundLHS
,
10225 FoundRHS
, Depth
+ 1);
10228 if (auto *LHSAddExpr
= dyn_cast
<SCEVAddExpr
>(LHS
)) {
10229 // We want to avoid creation of any new non-constant SCEV. Since we are
10230 // going to compare the operands to RHS, we should be certain that we don't
10231 // need any size extensions for this. So let's decline all cases when the
10232 // sizes of types of LHS and RHS do not match.
10233 // TODO: Maybe try to get RHS from sext to catch more cases?
10234 if (getTypeSizeInBits(LHS
->getType()) != getTypeSizeInBits(RHS
->getType()))
10237 // Should not overflow.
10238 if (!LHSAddExpr
->hasNoSignedWrap())
10241 auto *LL
= LHSAddExpr
->getOperand(0);
10242 auto *LR
= LHSAddExpr
->getOperand(1);
10243 auto *MinusOne
= getNegativeSCEV(getOne(RHS
->getType()));
10245 // Checks that S1 >= 0 && S2 > RHS, trivially or using the found context.
10246 auto IsSumGreaterThanRHS
= [&](const SCEV
*S1
, const SCEV
*S2
) {
10247 return IsSGTViaContext(S1
, MinusOne
) && IsSGTViaContext(S2
, RHS
);
10249 // Try to prove the following rule:
10250 // (LHS = LL + LR) && (LL >= 0) && (LR > RHS) => (LHS > RHS).
10251 // (LHS = LL + LR) && (LR >= 0) && (LL > RHS) => (LHS > RHS).
10252 if (IsSumGreaterThanRHS(LL
, LR
) || IsSumGreaterThanRHS(LR
, LL
))
10254 } else if (auto *LHSUnknownExpr
= dyn_cast
<SCEVUnknown
>(LHS
)) {
10256 // FIXME: Once we have SDiv implemented, we can get rid of this matching.
10258 using namespace llvm::PatternMatch
;
10260 if (match(LHSUnknownExpr
->getValue(), m_SDiv(m_Value(LL
), m_Value(LR
)))) {
10261 // Rules for division.
10262 // We are going to perform some comparisons with Denominator and its
10263 // derivative expressions. In general case, creating a SCEV for it may
10264 // lead to a complex analysis of the entire graph, and in particular it
10265 // can request trip count recalculation for the same loop. This would
10266 // cache as SCEVCouldNotCompute to avoid the infinite recursion. To avoid
10267 // this, we only want to create SCEVs that are constants in this section.
10268 // So we bail if Denominator is not a constant.
10269 if (!isa
<ConstantInt
>(LR
))
10272 auto *Denominator
= cast
<SCEVConstant
>(getSCEV(LR
));
10274 // We want to make sure that LHS = FoundLHS / Denominator. If it is so,
10275 // then a SCEV for the numerator already exists and matches with FoundLHS.
10276 auto *Numerator
= getExistingSCEV(LL
);
10277 if (!Numerator
|| Numerator
->getType() != FoundLHS
->getType())
10280 // Make sure that the numerator matches with FoundLHS and the denominator
10282 if (!HasSameValue(Numerator
, FoundLHS
) || !isKnownPositive(Denominator
))
10285 auto *DTy
= Denominator
->getType();
10286 auto *FRHSTy
= FoundRHS
->getType();
10287 if (DTy
->isPointerTy() != FRHSTy
->isPointerTy())
10288 // One of types is a pointer and another one is not. We cannot extend
10289 // them properly to a wider type, so let us just reject this case.
10290 // TODO: Usage of getEffectiveSCEVType for DTy, FRHSTy etc should help
10291 // to avoid this check.
10295 // FoundLHS > FoundRHS, LHS = FoundLHS / Denominator, Denominator > 0.
10296 auto *WTy
= getWiderType(DTy
, FRHSTy
);
10297 auto *DenominatorExt
= getNoopOrSignExtend(Denominator
, WTy
);
10298 auto *FoundRHSExt
= getNoopOrSignExtend(FoundRHS
, WTy
);
10300 // Try to prove the following rule:
10301 // (FoundRHS > Denominator - 2) && (RHS <= 0) => (LHS > RHS).
10302 // For example, given that FoundLHS > 2. It means that FoundLHS is at
10303 // least 3. If we divide it by Denominator < 4, we will have at least 1.
10304 auto *DenomMinusTwo
= getMinusSCEV(DenominatorExt
, getConstant(WTy
, 2));
10305 if (isKnownNonPositive(RHS
) &&
10306 IsSGTViaContext(FoundRHSExt
, DenomMinusTwo
))
10309 // Try to prove the following rule:
10310 // (FoundRHS > -1 - Denominator) && (RHS < 0) => (LHS > RHS).
10311 // For example, given that FoundLHS > -3. Then FoundLHS is at least -2.
10312 // If we divide it by Denominator > 2, then:
10313 // 1. If FoundLHS is negative, then the result is 0.
10314 // 2. If FoundLHS is non-negative, then the result is non-negative.
10315 // Anyways, the result is non-negative.
10316 auto *MinusOne
= getNegativeSCEV(getOne(WTy
));
10317 auto *NegDenomMinusOne
= getMinusSCEV(MinusOne
, DenominatorExt
);
10318 if (isKnownNegative(RHS
) &&
10319 IsSGTViaContext(FoundRHSExt
, NegDenomMinusOne
))
10324 // If our expression contained SCEVUnknown Phis, and we split it down and now
10325 // need to prove something for them, try to prove the predicate for every
10326 // possible incoming values of those Phis.
10327 if (isImpliedViaMerge(Pred
, OrigLHS
, RHS
, OrigFoundLHS
, FoundRHS
, Depth
+ 1))
10334 ScalarEvolution::isKnownViaNonRecursiveReasoning(ICmpInst::Predicate Pred
,
10335 const SCEV
*LHS
, const SCEV
*RHS
) {
10336 return isKnownPredicateViaConstantRanges(Pred
, LHS
, RHS
) ||
10337 IsKnownPredicateViaMinOrMax(*this, Pred
, LHS
, RHS
) ||
10338 IsKnownPredicateViaAddRecStart(*this, Pred
, LHS
, RHS
) ||
10339 isKnownPredicateViaNoOverflow(Pred
, LHS
, RHS
);
10343 ScalarEvolution::isImpliedCondOperandsHelper(ICmpInst::Predicate Pred
,
10344 const SCEV
*LHS
, const SCEV
*RHS
,
10345 const SCEV
*FoundLHS
,
10346 const SCEV
*FoundRHS
) {
10348 default: llvm_unreachable("Unexpected ICmpInst::Predicate value!");
10349 case ICmpInst::ICMP_EQ
:
10350 case ICmpInst::ICMP_NE
:
10351 if (HasSameValue(LHS
, FoundLHS
) && HasSameValue(RHS
, FoundRHS
))
10354 case ICmpInst::ICMP_SLT
:
10355 case ICmpInst::ICMP_SLE
:
10356 if (isKnownViaNonRecursiveReasoning(ICmpInst::ICMP_SLE
, LHS
, FoundLHS
) &&
10357 isKnownViaNonRecursiveReasoning(ICmpInst::ICMP_SGE
, RHS
, FoundRHS
))
10360 case ICmpInst::ICMP_SGT
:
10361 case ICmpInst::ICMP_SGE
:
10362 if (isKnownViaNonRecursiveReasoning(ICmpInst::ICMP_SGE
, LHS
, FoundLHS
) &&
10363 isKnownViaNonRecursiveReasoning(ICmpInst::ICMP_SLE
, RHS
, FoundRHS
))
10366 case ICmpInst::ICMP_ULT
:
10367 case ICmpInst::ICMP_ULE
:
10368 if (isKnownViaNonRecursiveReasoning(ICmpInst::ICMP_ULE
, LHS
, FoundLHS
) &&
10369 isKnownViaNonRecursiveReasoning(ICmpInst::ICMP_UGE
, RHS
, FoundRHS
))
10372 case ICmpInst::ICMP_UGT
:
10373 case ICmpInst::ICMP_UGE
:
10374 if (isKnownViaNonRecursiveReasoning(ICmpInst::ICMP_UGE
, LHS
, FoundLHS
) &&
10375 isKnownViaNonRecursiveReasoning(ICmpInst::ICMP_ULE
, RHS
, FoundRHS
))
10380 // Maybe it can be proved via operations?
10381 if (isImpliedViaOperations(Pred
, LHS
, RHS
, FoundLHS
, FoundRHS
))
10387 bool ScalarEvolution::isImpliedCondOperandsViaRanges(ICmpInst::Predicate Pred
,
10390 const SCEV
*FoundLHS
,
10391 const SCEV
*FoundRHS
) {
10392 if (!isa
<SCEVConstant
>(RHS
) || !isa
<SCEVConstant
>(FoundRHS
))
10393 // The restriction on `FoundRHS` be lifted easily -- it exists only to
10394 // reduce the compile time impact of this optimization.
10397 Optional
<APInt
> Addend
= computeConstantDifference(LHS
, FoundLHS
);
10401 const APInt
&ConstFoundRHS
= cast
<SCEVConstant
>(FoundRHS
)->getAPInt();
10403 // `FoundLHSRange` is the range we know `FoundLHS` to be in by virtue of the
10404 // antecedent "`FoundLHS` `Pred` `FoundRHS`".
10405 ConstantRange FoundLHSRange
=
10406 ConstantRange::makeAllowedICmpRegion(Pred
, ConstFoundRHS
);
10408 // Since `LHS` is `FoundLHS` + `Addend`, we can compute a range for `LHS`:
10409 ConstantRange LHSRange
= FoundLHSRange
.add(ConstantRange(*Addend
));
10411 // We can also compute the range of values for `LHS` that satisfy the
10412 // consequent, "`LHS` `Pred` `RHS`":
10413 const APInt
&ConstRHS
= cast
<SCEVConstant
>(RHS
)->getAPInt();
10414 ConstantRange SatisfyingLHSRange
=
10415 ConstantRange::makeSatisfyingICmpRegion(Pred
, ConstRHS
);
10417 // The antecedent implies the consequent if every value of `LHS` that
10418 // satisfies the antecedent also satisfies the consequent.
10419 return SatisfyingLHSRange
.contains(LHSRange
);
10422 bool ScalarEvolution::doesIVOverflowOnLT(const SCEV
*RHS
, const SCEV
*Stride
,
10423 bool IsSigned
, bool NoWrap
) {
10424 assert(isKnownPositive(Stride
) && "Positive stride expected!");
10426 if (NoWrap
) return false;
10428 unsigned BitWidth
= getTypeSizeInBits(RHS
->getType());
10429 const SCEV
*One
= getOne(Stride
->getType());
10432 APInt MaxRHS
= getSignedRangeMax(RHS
);
10433 APInt MaxValue
= APInt::getSignedMaxValue(BitWidth
);
10434 APInt MaxStrideMinusOne
= getSignedRangeMax(getMinusSCEV(Stride
, One
));
10436 // SMaxRHS + SMaxStrideMinusOne > SMaxValue => overflow!
10437 return (std::move(MaxValue
) - MaxStrideMinusOne
).slt(MaxRHS
);
10440 APInt MaxRHS
= getUnsignedRangeMax(RHS
);
10441 APInt MaxValue
= APInt::getMaxValue(BitWidth
);
10442 APInt MaxStrideMinusOne
= getUnsignedRangeMax(getMinusSCEV(Stride
, One
));
10444 // UMaxRHS + UMaxStrideMinusOne > UMaxValue => overflow!
10445 return (std::move(MaxValue
) - MaxStrideMinusOne
).ult(MaxRHS
);
10448 bool ScalarEvolution::doesIVOverflowOnGT(const SCEV
*RHS
, const SCEV
*Stride
,
10449 bool IsSigned
, bool NoWrap
) {
10450 if (NoWrap
) return false;
10452 unsigned BitWidth
= getTypeSizeInBits(RHS
->getType());
10453 const SCEV
*One
= getOne(Stride
->getType());
10456 APInt MinRHS
= getSignedRangeMin(RHS
);
10457 APInt MinValue
= APInt::getSignedMinValue(BitWidth
);
10458 APInt MaxStrideMinusOne
= getSignedRangeMax(getMinusSCEV(Stride
, One
));
10460 // SMinRHS - SMaxStrideMinusOne < SMinValue => overflow!
10461 return (std::move(MinValue
) + MaxStrideMinusOne
).sgt(MinRHS
);
10464 APInt MinRHS
= getUnsignedRangeMin(RHS
);
10465 APInt MinValue
= APInt::getMinValue(BitWidth
);
10466 APInt MaxStrideMinusOne
= getUnsignedRangeMax(getMinusSCEV(Stride
, One
));
10468 // UMinRHS - UMaxStrideMinusOne < UMinValue => overflow!
10469 return (std::move(MinValue
) + MaxStrideMinusOne
).ugt(MinRHS
);
10472 const SCEV
*ScalarEvolution::computeBECount(const SCEV
*Delta
, const SCEV
*Step
,
10474 const SCEV
*One
= getOne(Step
->getType());
10475 Delta
= Equality
? getAddExpr(Delta
, Step
)
10476 : getAddExpr(Delta
, getMinusSCEV(Step
, One
));
10477 return getUDivExpr(Delta
, Step
);
10480 const SCEV
*ScalarEvolution::computeMaxBECountForLT(const SCEV
*Start
,
10481 const SCEV
*Stride
,
10486 assert(!isKnownNonPositive(Stride
) &&
10487 "Stride is expected strictly positive!");
10488 // Calculate the maximum backedge count based on the range of values
10489 // permitted by Start, End, and Stride.
10490 const SCEV
*MaxBECount
;
10492 IsSigned
? getSignedRangeMin(Start
) : getUnsignedRangeMin(Start
);
10494 APInt StrideForMaxBECount
=
10495 IsSigned
? getSignedRangeMin(Stride
) : getUnsignedRangeMin(Stride
);
10497 // We already know that the stride is positive, so we paper over conservatism
10498 // in our range computation by forcing StrideForMaxBECount to be at least one.
10499 // In theory this is unnecessary, but we expect MaxBECount to be a
10500 // SCEVConstant, and (udiv <constant> 0) is not constant folded by SCEV (there
10501 // is nothing to constant fold it to).
10502 APInt
One(BitWidth
, 1, IsSigned
);
10503 StrideForMaxBECount
= APIntOps::smax(One
, StrideForMaxBECount
);
10505 APInt MaxValue
= IsSigned
? APInt::getSignedMaxValue(BitWidth
)
10506 : APInt::getMaxValue(BitWidth
);
10507 APInt Limit
= MaxValue
- (StrideForMaxBECount
- 1);
10509 // Although End can be a MAX expression we estimate MaxEnd considering only
10510 // the case End = RHS of the loop termination condition. This is safe because
10511 // in the other case (End - Start) is zero, leading to a zero maximum backedge
10513 APInt MaxEnd
= IsSigned
? APIntOps::smin(getSignedRangeMax(End
), Limit
)
10514 : APIntOps::umin(getUnsignedRangeMax(End
), Limit
);
10516 MaxBECount
= computeBECount(getConstant(MaxEnd
- MinStart
) /* Delta */,
10517 getConstant(StrideForMaxBECount
) /* Step */,
10518 false /* Equality */);
10523 ScalarEvolution::ExitLimit
10524 ScalarEvolution::howManyLessThans(const SCEV
*LHS
, const SCEV
*RHS
,
10525 const Loop
*L
, bool IsSigned
,
10526 bool ControlsExit
, bool AllowPredicates
) {
10527 SmallPtrSet
<const SCEVPredicate
*, 4> Predicates
;
10529 const SCEVAddRecExpr
*IV
= dyn_cast
<SCEVAddRecExpr
>(LHS
);
10530 bool PredicatedIV
= false;
10532 if (!IV
&& AllowPredicates
) {
10533 // Try to make this an AddRec using runtime tests, in the first X
10534 // iterations of this loop, where X is the SCEV expression found by the
10535 // algorithm below.
10536 IV
= convertSCEVToAddRecWithPredicates(LHS
, L
, Predicates
);
10537 PredicatedIV
= true;
10540 // Avoid weird loops
10541 if (!IV
|| IV
->getLoop() != L
|| !IV
->isAffine())
10542 return getCouldNotCompute();
10544 bool NoWrap
= ControlsExit
&&
10545 IV
->getNoWrapFlags(IsSigned
? SCEV::FlagNSW
: SCEV::FlagNUW
);
10547 const SCEV
*Stride
= IV
->getStepRecurrence(*this);
10549 bool PositiveStride
= isKnownPositive(Stride
);
10551 // Avoid negative or zero stride values.
10552 if (!PositiveStride
) {
10553 // We can compute the correct backedge taken count for loops with unknown
10554 // strides if we can prove that the loop is not an infinite loop with side
10555 // effects. Here's the loop structure we are trying to handle -
10561 // } while (i < end);
10563 // The backedge taken count for such loops is evaluated as -
10564 // (max(end, start + stride) - start - 1) /u stride
10566 // The additional preconditions that we need to check to prove correctness
10567 // of the above formula is as follows -
10569 // a) IV is either nuw or nsw depending upon signedness (indicated by the
10571 // b) loop is single exit with no side effects.
10574 // Precondition a) implies that if the stride is negative, this is a single
10575 // trip loop. The backedge taken count formula reduces to zero in this case.
10577 // Precondition b) implies that the unknown stride cannot be zero otherwise
10580 // The positive stride case is the same as isKnownPositive(Stride) returning
10581 // true (original behavior of the function).
10583 // We want to make sure that the stride is truly unknown as there are edge
10584 // cases where ScalarEvolution propagates no wrap flags to the
10585 // post-increment/decrement IV even though the increment/decrement operation
10586 // itself is wrapping. The computed backedge taken count may be wrong in
10587 // such cases. This is prevented by checking that the stride is not known to
10588 // be either positive or non-positive. For example, no wrap flags are
10589 // propagated to the post-increment IV of this loop with a trip count of 2 -
10591 // unsigned char i;
10592 // for(i=127; i<128; i+=129)
10595 if (PredicatedIV
|| !NoWrap
|| isKnownNonPositive(Stride
) ||
10596 !loopHasNoSideEffects(L
))
10597 return getCouldNotCompute();
10598 } else if (!Stride
->isOne() &&
10599 doesIVOverflowOnLT(RHS
, Stride
, IsSigned
, NoWrap
))
10600 // Avoid proven overflow cases: this will ensure that the backedge taken
10601 // count will not generate any unsigned overflow. Relaxed no-overflow
10602 // conditions exploit NoWrapFlags, allowing to optimize in presence of
10603 // undefined behaviors like the case of C language.
10604 return getCouldNotCompute();
10606 ICmpInst::Predicate Cond
= IsSigned
? ICmpInst::ICMP_SLT
10607 : ICmpInst::ICMP_ULT
;
10608 const SCEV
*Start
= IV
->getStart();
10609 const SCEV
*End
= RHS
;
10610 // When the RHS is not invariant, we do not know the end bound of the loop and
10611 // cannot calculate the ExactBECount needed by ExitLimit. However, we can
10612 // calculate the MaxBECount, given the start, stride and max value for the end
10613 // bound of the loop (RHS), and the fact that IV does not overflow (which is
10615 if (!isLoopInvariant(RHS
, L
)) {
10616 const SCEV
*MaxBECount
= computeMaxBECountForLT(
10617 Start
, Stride
, RHS
, getTypeSizeInBits(LHS
->getType()), IsSigned
);
10618 return ExitLimit(getCouldNotCompute() /* ExactNotTaken */, MaxBECount
,
10619 false /*MaxOrZero*/, Predicates
);
10621 // If the backedge is taken at least once, then it will be taken
10622 // (End-Start)/Stride times (rounded up to a multiple of Stride), where Start
10623 // is the LHS value of the less-than comparison the first time it is evaluated
10624 // and End is the RHS.
10625 const SCEV
*BECountIfBackedgeTaken
=
10626 computeBECount(getMinusSCEV(End
, Start
), Stride
, false);
10627 // If the loop entry is guarded by the result of the backedge test of the
10628 // first loop iteration, then we know the backedge will be taken at least
10629 // once and so the backedge taken count is as above. If not then we use the
10630 // expression (max(End,Start)-Start)/Stride to describe the backedge count,
10631 // as if the backedge is taken at least once max(End,Start) is End and so the
10632 // result is as above, and if not max(End,Start) is Start so we get a backedge
10634 const SCEV
*BECount
;
10635 if (isLoopEntryGuardedByCond(L
, Cond
, getMinusSCEV(Start
, Stride
), RHS
))
10636 BECount
= BECountIfBackedgeTaken
;
10638 End
= IsSigned
? getSMaxExpr(RHS
, Start
) : getUMaxExpr(RHS
, Start
);
10639 BECount
= computeBECount(getMinusSCEV(End
, Start
), Stride
, false);
10642 const SCEV
*MaxBECount
;
10643 bool MaxOrZero
= false;
10644 if (isa
<SCEVConstant
>(BECount
))
10645 MaxBECount
= BECount
;
10646 else if (isa
<SCEVConstant
>(BECountIfBackedgeTaken
)) {
10647 // If we know exactly how many times the backedge will be taken if it's
10648 // taken at least once, then the backedge count will either be that or
10650 MaxBECount
= BECountIfBackedgeTaken
;
10653 MaxBECount
= computeMaxBECountForLT(
10654 Start
, Stride
, RHS
, getTypeSizeInBits(LHS
->getType()), IsSigned
);
10657 if (isa
<SCEVCouldNotCompute
>(MaxBECount
) &&
10658 !isa
<SCEVCouldNotCompute
>(BECount
))
10659 MaxBECount
= getConstant(getUnsignedRangeMax(BECount
));
10661 return ExitLimit(BECount
, MaxBECount
, MaxOrZero
, Predicates
);
10664 ScalarEvolution::ExitLimit
10665 ScalarEvolution::howManyGreaterThans(const SCEV
*LHS
, const SCEV
*RHS
,
10666 const Loop
*L
, bool IsSigned
,
10667 bool ControlsExit
, bool AllowPredicates
) {
10668 SmallPtrSet
<const SCEVPredicate
*, 4> Predicates
;
10669 // We handle only IV > Invariant
10670 if (!isLoopInvariant(RHS
, L
))
10671 return getCouldNotCompute();
10673 const SCEVAddRecExpr
*IV
= dyn_cast
<SCEVAddRecExpr
>(LHS
);
10674 if (!IV
&& AllowPredicates
)
10675 // Try to make this an AddRec using runtime tests, in the first X
10676 // iterations of this loop, where X is the SCEV expression found by the
10677 // algorithm below.
10678 IV
= convertSCEVToAddRecWithPredicates(LHS
, L
, Predicates
);
10680 // Avoid weird loops
10681 if (!IV
|| IV
->getLoop() != L
|| !IV
->isAffine())
10682 return getCouldNotCompute();
10684 bool NoWrap
= ControlsExit
&&
10685 IV
->getNoWrapFlags(IsSigned
? SCEV::FlagNSW
: SCEV::FlagNUW
);
10687 const SCEV
*Stride
= getNegativeSCEV(IV
->getStepRecurrence(*this));
10689 // Avoid negative or zero stride values
10690 if (!isKnownPositive(Stride
))
10691 return getCouldNotCompute();
10693 // Avoid proven overflow cases: this will ensure that the backedge taken count
10694 // will not generate any unsigned overflow. Relaxed no-overflow conditions
10695 // exploit NoWrapFlags, allowing to optimize in presence of undefined
10696 // behaviors like the case of C language.
10697 if (!Stride
->isOne() && doesIVOverflowOnGT(RHS
, Stride
, IsSigned
, NoWrap
))
10698 return getCouldNotCompute();
10700 ICmpInst::Predicate Cond
= IsSigned
? ICmpInst::ICMP_SGT
10701 : ICmpInst::ICMP_UGT
;
10703 const SCEV
*Start
= IV
->getStart();
10704 const SCEV
*End
= RHS
;
10705 if (!isLoopEntryGuardedByCond(L
, Cond
, getAddExpr(Start
, Stride
), RHS
))
10706 End
= IsSigned
? getSMinExpr(RHS
, Start
) : getUMinExpr(RHS
, Start
);
10708 const SCEV
*BECount
= computeBECount(getMinusSCEV(Start
, End
), Stride
, false);
10710 APInt MaxStart
= IsSigned
? getSignedRangeMax(Start
)
10711 : getUnsignedRangeMax(Start
);
10713 APInt MinStride
= IsSigned
? getSignedRangeMin(Stride
)
10714 : getUnsignedRangeMin(Stride
);
10716 unsigned BitWidth
= getTypeSizeInBits(LHS
->getType());
10717 APInt Limit
= IsSigned
? APInt::getSignedMinValue(BitWidth
) + (MinStride
- 1)
10718 : APInt::getMinValue(BitWidth
) + (MinStride
- 1);
10720 // Although End can be a MIN expression we estimate MinEnd considering only
10721 // the case End = RHS. This is safe because in the other case (Start - End)
10722 // is zero, leading to a zero maximum backedge taken count.
10724 IsSigned
? APIntOps::smax(getSignedRangeMin(RHS
), Limit
)
10725 : APIntOps::umax(getUnsignedRangeMin(RHS
), Limit
);
10728 const SCEV
*MaxBECount
= getCouldNotCompute();
10729 if (isa
<SCEVConstant
>(BECount
))
10730 MaxBECount
= BECount
;
10732 MaxBECount
= computeBECount(getConstant(MaxStart
- MinEnd
),
10733 getConstant(MinStride
), false);
10735 if (isa
<SCEVCouldNotCompute
>(MaxBECount
))
10736 MaxBECount
= BECount
;
10738 return ExitLimit(BECount
, MaxBECount
, false, Predicates
);
10741 const SCEV
*SCEVAddRecExpr::getNumIterationsInRange(const ConstantRange
&Range
,
10742 ScalarEvolution
&SE
) const {
10743 if (Range
.isFullSet()) // Infinite loop.
10744 return SE
.getCouldNotCompute();
10746 // If the start is a non-zero constant, shift the range to simplify things.
10747 if (const SCEVConstant
*SC
= dyn_cast
<SCEVConstant
>(getStart()))
10748 if (!SC
->getValue()->isZero()) {
10749 SmallVector
<const SCEV
*, 4> Operands(op_begin(), op_end());
10750 Operands
[0] = SE
.getZero(SC
->getType());
10751 const SCEV
*Shifted
= SE
.getAddRecExpr(Operands
, getLoop(),
10752 getNoWrapFlags(FlagNW
));
10753 if (const auto *ShiftedAddRec
= dyn_cast
<SCEVAddRecExpr
>(Shifted
))
10754 return ShiftedAddRec
->getNumIterationsInRange(
10755 Range
.subtract(SC
->getAPInt()), SE
);
10756 // This is strange and shouldn't happen.
10757 return SE
.getCouldNotCompute();
10760 // The only time we can solve this is when we have all constant indices.
10761 // Otherwise, we cannot determine the overflow conditions.
10762 if (any_of(operands(), [](const SCEV
*Op
) { return !isa
<SCEVConstant
>(Op
); }))
10763 return SE
.getCouldNotCompute();
10765 // Okay at this point we know that all elements of the chrec are constants and
10766 // that the start element is zero.
10768 // First check to see if the range contains zero. If not, the first
10769 // iteration exits.
10770 unsigned BitWidth
= SE
.getTypeSizeInBits(getType());
10771 if (!Range
.contains(APInt(BitWidth
, 0)))
10772 return SE
.getZero(getType());
10775 // If this is an affine expression then we have this situation:
10776 // Solve {0,+,A} in Range === Ax in Range
10778 // We know that zero is in the range. If A is positive then we know that
10779 // the upper value of the range must be the first possible exit value.
10780 // If A is negative then the lower of the range is the last possible loop
10781 // value. Also note that we already checked for a full range.
10782 APInt A
= cast
<SCEVConstant
>(getOperand(1))->getAPInt();
10783 APInt End
= A
.sge(1) ? (Range
.getUpper() - 1) : Range
.getLower();
10785 // The exit value should be (End+A)/A.
10786 APInt ExitVal
= (End
+ A
).udiv(A
);
10787 ConstantInt
*ExitValue
= ConstantInt::get(SE
.getContext(), ExitVal
);
10789 // Evaluate at the exit value. If we really did fall out of the valid
10790 // range, then we computed our trip count, otherwise wrap around or other
10791 // things must have happened.
10792 ConstantInt
*Val
= EvaluateConstantChrecAtConstant(this, ExitValue
, SE
);
10793 if (Range
.contains(Val
->getValue()))
10794 return SE
.getCouldNotCompute(); // Something strange happened
10796 // Ensure that the previous value is in the range. This is a sanity check.
10797 assert(Range
.contains(
10798 EvaluateConstantChrecAtConstant(this,
10799 ConstantInt::get(SE
.getContext(), ExitVal
- 1), SE
)->getValue()) &&
10800 "Linear scev computation is off in a bad way!");
10801 return SE
.getConstant(ExitValue
);
10804 if (isQuadratic()) {
10805 if (auto S
= SolveQuadraticAddRecRange(this, Range
, SE
))
10806 return SE
.getConstant(S
.getValue());
10809 return SE
.getCouldNotCompute();
10812 const SCEVAddRecExpr
*
10813 SCEVAddRecExpr::getPostIncExpr(ScalarEvolution
&SE
) const {
10814 assert(getNumOperands() > 1 && "AddRec with zero step?");
10815 // There is a temptation to just call getAddExpr(this, getStepRecurrence(SE)),
10816 // but in this case we cannot guarantee that the value returned will be an
10817 // AddRec because SCEV does not have a fixed point where it stops
10818 // simplification: it is legal to return ({rec1} + {rec2}). For example, it
10819 // may happen if we reach arithmetic depth limit while simplifying. So we
10820 // construct the returned value explicitly.
10821 SmallVector
<const SCEV
*, 3> Ops
;
10822 // If this is {A,+,B,+,C,...,+,N}, then its step is {B,+,C,+,...,+,N}, and
10823 // (this + Step) is {A+B,+,B+C,+...,+,N}.
10824 for (unsigned i
= 0, e
= getNumOperands() - 1; i
< e
; ++i
)
10825 Ops
.push_back(SE
.getAddExpr(getOperand(i
), getOperand(i
+ 1)));
10826 // We know that the last operand is not a constant zero (otherwise it would
10827 // have been popped out earlier). This guarantees us that if the result has
10828 // the same last operand, then it will also not be popped out, meaning that
10829 // the returned value will be an AddRec.
10830 const SCEV
*Last
= getOperand(getNumOperands() - 1);
10831 assert(!Last
->isZero() && "Recurrency with zero step?");
10832 Ops
.push_back(Last
);
10833 return cast
<SCEVAddRecExpr
>(SE
.getAddRecExpr(Ops
, getLoop(),
10834 SCEV::FlagAnyWrap
));
10837 // Return true when S contains at least an undef value.
10838 static inline bool containsUndefs(const SCEV
*S
) {
10839 return SCEVExprContains(S
, [](const SCEV
*S
) {
10840 if (const auto *SU
= dyn_cast
<SCEVUnknown
>(S
))
10841 return isa
<UndefValue
>(SU
->getValue());
10848 // Collect all steps of SCEV expressions.
10849 struct SCEVCollectStrides
{
10850 ScalarEvolution
&SE
;
10851 SmallVectorImpl
<const SCEV
*> &Strides
;
10853 SCEVCollectStrides(ScalarEvolution
&SE
, SmallVectorImpl
<const SCEV
*> &S
)
10854 : SE(SE
), Strides(S
) {}
10856 bool follow(const SCEV
*S
) {
10857 if (const SCEVAddRecExpr
*AR
= dyn_cast
<SCEVAddRecExpr
>(S
))
10858 Strides
.push_back(AR
->getStepRecurrence(SE
));
10862 bool isDone() const { return false; }
10865 // Collect all SCEVUnknown and SCEVMulExpr expressions.
10866 struct SCEVCollectTerms
{
10867 SmallVectorImpl
<const SCEV
*> &Terms
;
10869 SCEVCollectTerms(SmallVectorImpl
<const SCEV
*> &T
) : Terms(T
) {}
10871 bool follow(const SCEV
*S
) {
10872 if (isa
<SCEVUnknown
>(S
) || isa
<SCEVMulExpr
>(S
) ||
10873 isa
<SCEVSignExtendExpr
>(S
)) {
10874 if (!containsUndefs(S
))
10875 Terms
.push_back(S
);
10877 // Stop recursion: once we collected a term, do not walk its operands.
10885 bool isDone() const { return false; }
10888 // Check if a SCEV contains an AddRecExpr.
10889 struct SCEVHasAddRec
{
10890 bool &ContainsAddRec
;
10892 SCEVHasAddRec(bool &ContainsAddRec
) : ContainsAddRec(ContainsAddRec
) {
10893 ContainsAddRec
= false;
10896 bool follow(const SCEV
*S
) {
10897 if (isa
<SCEVAddRecExpr
>(S
)) {
10898 ContainsAddRec
= true;
10900 // Stop recursion: once we collected a term, do not walk its operands.
10908 bool isDone() const { return false; }
10911 // Find factors that are multiplied with an expression that (possibly as a
10912 // subexpression) contains an AddRecExpr. In the expression:
10914 // 8 * (100 + %p * %q * (%a + {0, +, 1}_loop))
10916 // "%p * %q" are factors multiplied by the expression "(%a + {0, +, 1}_loop)"
10917 // that contains the AddRec {0, +, 1}_loop. %p * %q are likely to be array size
10918 // parameters as they form a product with an induction variable.
10920 // This collector expects all array size parameters to be in the same MulExpr.
10921 // It might be necessary to later add support for collecting parameters that are
10922 // spread over different nested MulExpr.
10923 struct SCEVCollectAddRecMultiplies
{
10924 SmallVectorImpl
<const SCEV
*> &Terms
;
10925 ScalarEvolution
&SE
;
10927 SCEVCollectAddRecMultiplies(SmallVectorImpl
<const SCEV
*> &T
, ScalarEvolution
&SE
)
10928 : Terms(T
), SE(SE
) {}
10930 bool follow(const SCEV
*S
) {
10931 if (auto *Mul
= dyn_cast
<SCEVMulExpr
>(S
)) {
10932 bool HasAddRec
= false;
10933 SmallVector
<const SCEV
*, 0> Operands
;
10934 for (auto Op
: Mul
->operands()) {
10935 const SCEVUnknown
*Unknown
= dyn_cast
<SCEVUnknown
>(Op
);
10936 if (Unknown
&& !isa
<CallInst
>(Unknown
->getValue())) {
10937 Operands
.push_back(Op
);
10938 } else if (Unknown
) {
10941 bool ContainsAddRec
;
10942 SCEVHasAddRec
ContiansAddRec(ContainsAddRec
);
10943 visitAll(Op
, ContiansAddRec
);
10944 HasAddRec
|= ContainsAddRec
;
10947 if (Operands
.size() == 0)
10953 Terms
.push_back(SE
.getMulExpr(Operands
));
10954 // Stop recursion: once we collected a term, do not walk its operands.
10962 bool isDone() const { return false; }
10965 } // end anonymous namespace
10967 /// Find parametric terms in this SCEVAddRecExpr. We first for parameters in
10969 /// 1) The strides of AddRec expressions.
10970 /// 2) Unknowns that are multiplied with AddRec expressions.
10971 void ScalarEvolution::collectParametricTerms(const SCEV
*Expr
,
10972 SmallVectorImpl
<const SCEV
*> &Terms
) {
10973 SmallVector
<const SCEV
*, 4> Strides
;
10974 SCEVCollectStrides
StrideCollector(*this, Strides
);
10975 visitAll(Expr
, StrideCollector
);
10978 dbgs() << "Strides:\n";
10979 for (const SCEV
*S
: Strides
)
10980 dbgs() << *S
<< "\n";
10983 for (const SCEV
*S
: Strides
) {
10984 SCEVCollectTerms
TermCollector(Terms
);
10985 visitAll(S
, TermCollector
);
10989 dbgs() << "Terms:\n";
10990 for (const SCEV
*T
: Terms
)
10991 dbgs() << *T
<< "\n";
10994 SCEVCollectAddRecMultiplies
MulCollector(Terms
, *this);
10995 visitAll(Expr
, MulCollector
);
10998 static bool findArrayDimensionsRec(ScalarEvolution
&SE
,
10999 SmallVectorImpl
<const SCEV
*> &Terms
,
11000 SmallVectorImpl
<const SCEV
*> &Sizes
) {
11001 int Last
= Terms
.size() - 1;
11002 const SCEV
*Step
= Terms
[Last
];
11004 // End of recursion.
11006 if (const SCEVMulExpr
*M
= dyn_cast
<SCEVMulExpr
>(Step
)) {
11007 SmallVector
<const SCEV
*, 2> Qs
;
11008 for (const SCEV
*Op
: M
->operands())
11009 if (!isa
<SCEVConstant
>(Op
))
11012 Step
= SE
.getMulExpr(Qs
);
11015 Sizes
.push_back(Step
);
11019 for (const SCEV
*&Term
: Terms
) {
11020 // Normalize the terms before the next call to findArrayDimensionsRec.
11022 SCEVDivision::divide(SE
, Term
, Step
, &Q
, &R
);
11024 // Bail out when GCD does not evenly divide one of the terms.
11031 // Remove all SCEVConstants.
11033 remove_if(Terms
, [](const SCEV
*E
) { return isa
<SCEVConstant
>(E
); }),
11036 if (Terms
.size() > 0)
11037 if (!findArrayDimensionsRec(SE
, Terms
, Sizes
))
11040 Sizes
.push_back(Step
);
11044 // Returns true when one of the SCEVs of Terms contains a SCEVUnknown parameter.
11045 static inline bool containsParameters(SmallVectorImpl
<const SCEV
*> &Terms
) {
11046 for (const SCEV
*T
: Terms
)
11047 if (SCEVExprContains(T
, isa
<SCEVUnknown
, const SCEV
*>))
11052 // Return the number of product terms in S.
11053 static inline int numberOfTerms(const SCEV
*S
) {
11054 if (const SCEVMulExpr
*Expr
= dyn_cast
<SCEVMulExpr
>(S
))
11055 return Expr
->getNumOperands();
11059 static const SCEV
*removeConstantFactors(ScalarEvolution
&SE
, const SCEV
*T
) {
11060 if (isa
<SCEVConstant
>(T
))
11063 if (isa
<SCEVUnknown
>(T
))
11066 if (const SCEVMulExpr
*M
= dyn_cast
<SCEVMulExpr
>(T
)) {
11067 SmallVector
<const SCEV
*, 2> Factors
;
11068 for (const SCEV
*Op
: M
->operands())
11069 if (!isa
<SCEVConstant
>(Op
))
11070 Factors
.push_back(Op
);
11072 return SE
.getMulExpr(Factors
);
11078 /// Return the size of an element read or written by Inst.
11079 const SCEV
*ScalarEvolution::getElementSize(Instruction
*Inst
) {
11081 if (StoreInst
*Store
= dyn_cast
<StoreInst
>(Inst
))
11082 Ty
= Store
->getValueOperand()->getType();
11083 else if (LoadInst
*Load
= dyn_cast
<LoadInst
>(Inst
))
11084 Ty
= Load
->getType();
11088 Type
*ETy
= getEffectiveSCEVType(PointerType::getUnqual(Ty
));
11089 return getSizeOfExpr(ETy
, Ty
);
11092 void ScalarEvolution::findArrayDimensions(SmallVectorImpl
<const SCEV
*> &Terms
,
11093 SmallVectorImpl
<const SCEV
*> &Sizes
,
11094 const SCEV
*ElementSize
) {
11095 if (Terms
.size() < 1 || !ElementSize
)
11098 // Early return when Terms do not contain parameters: we do not delinearize
11099 // non parametric SCEVs.
11100 if (!containsParameters(Terms
))
11104 dbgs() << "Terms:\n";
11105 for (const SCEV
*T
: Terms
)
11106 dbgs() << *T
<< "\n";
11109 // Remove duplicates.
11110 array_pod_sort(Terms
.begin(), Terms
.end());
11111 Terms
.erase(std::unique(Terms
.begin(), Terms
.end()), Terms
.end());
11113 // Put larger terms first.
11114 llvm::sort(Terms
, [](const SCEV
*LHS
, const SCEV
*RHS
) {
11115 return numberOfTerms(LHS
) > numberOfTerms(RHS
);
11118 // Try to divide all terms by the element size. If term is not divisible by
11119 // element size, proceed with the original term.
11120 for (const SCEV
*&Term
: Terms
) {
11122 SCEVDivision::divide(*this, Term
, ElementSize
, &Q
, &R
);
11127 SmallVector
<const SCEV
*, 4> NewTerms
;
11129 // Remove constant factors.
11130 for (const SCEV
*T
: Terms
)
11131 if (const SCEV
*NewT
= removeConstantFactors(*this, T
))
11132 NewTerms
.push_back(NewT
);
11135 dbgs() << "Terms after sorting:\n";
11136 for (const SCEV
*T
: NewTerms
)
11137 dbgs() << *T
<< "\n";
11140 if (NewTerms
.empty() || !findArrayDimensionsRec(*this, NewTerms
, Sizes
)) {
11145 // The last element to be pushed into Sizes is the size of an element.
11146 Sizes
.push_back(ElementSize
);
11149 dbgs() << "Sizes:\n";
11150 for (const SCEV
*S
: Sizes
)
11151 dbgs() << *S
<< "\n";
11155 void ScalarEvolution::computeAccessFunctions(
11156 const SCEV
*Expr
, SmallVectorImpl
<const SCEV
*> &Subscripts
,
11157 SmallVectorImpl
<const SCEV
*> &Sizes
) {
11158 // Early exit in case this SCEV is not an affine multivariate function.
11162 if (auto *AR
= dyn_cast
<SCEVAddRecExpr
>(Expr
))
11163 if (!AR
->isAffine())
11166 const SCEV
*Res
= Expr
;
11167 int Last
= Sizes
.size() - 1;
11168 for (int i
= Last
; i
>= 0; i
--) {
11170 SCEVDivision::divide(*this, Res
, Sizes
[i
], &Q
, &R
);
11173 dbgs() << "Res: " << *Res
<< "\n";
11174 dbgs() << "Sizes[i]: " << *Sizes
[i
] << "\n";
11175 dbgs() << "Res divided by Sizes[i]:\n";
11176 dbgs() << "Quotient: " << *Q
<< "\n";
11177 dbgs() << "Remainder: " << *R
<< "\n";
11182 // Do not record the last subscript corresponding to the size of elements in
11186 // Bail out if the remainder is too complex.
11187 if (isa
<SCEVAddRecExpr
>(R
)) {
11188 Subscripts
.clear();
11196 // Record the access function for the current subscript.
11197 Subscripts
.push_back(R
);
11200 // Also push in last position the remainder of the last division: it will be
11201 // the access function of the innermost dimension.
11202 Subscripts
.push_back(Res
);
11204 std::reverse(Subscripts
.begin(), Subscripts
.end());
11207 dbgs() << "Subscripts:\n";
11208 for (const SCEV
*S
: Subscripts
)
11209 dbgs() << *S
<< "\n";
11213 /// Splits the SCEV into two vectors of SCEVs representing the subscripts and
11214 /// sizes of an array access. Returns the remainder of the delinearization that
11215 /// is the offset start of the array. The SCEV->delinearize algorithm computes
11216 /// the multiples of SCEV coefficients: that is a pattern matching of sub
11217 /// expressions in the stride and base of a SCEV corresponding to the
11218 /// computation of a GCD (greatest common divisor) of base and stride. When
11219 /// SCEV->delinearize fails, it returns the SCEV unchanged.
11221 /// For example: when analyzing the memory access A[i][j][k] in this loop nest
11223 /// void foo(long n, long m, long o, double A[n][m][o]) {
11225 /// for (long i = 0; i < n; i++)
11226 /// for (long j = 0; j < m; j++)
11227 /// for (long k = 0; k < o; k++)
11228 /// A[i][j][k] = 1.0;
11231 /// the delinearization input is the following AddRec SCEV:
11233 /// AddRec: {{{%A,+,(8 * %m * %o)}<%for.i>,+,(8 * %o)}<%for.j>,+,8}<%for.k>
11235 /// From this SCEV, we are able to say that the base offset of the access is %A
11236 /// because it appears as an offset that does not divide any of the strides in
11239 /// CHECK: Base offset: %A
11241 /// and then SCEV->delinearize determines the size of some of the dimensions of
11242 /// the array as these are the multiples by which the strides are happening:
11244 /// CHECK: ArrayDecl[UnknownSize][%m][%o] with elements of sizeof(double) bytes.
11246 /// Note that the outermost dimension remains of UnknownSize because there are
11247 /// no strides that would help identifying the size of the last dimension: when
11248 /// the array has been statically allocated, one could compute the size of that
11249 /// dimension by dividing the overall size of the array by the size of the known
11250 /// dimensions: %m * %o * 8.
11252 /// Finally delinearize provides the access functions for the array reference
11253 /// that does correspond to A[i][j][k] of the above C testcase:
11255 /// CHECK: ArrayRef[{0,+,1}<%for.i>][{0,+,1}<%for.j>][{0,+,1}<%for.k>]
11257 /// The testcases are checking the output of a function pass:
11258 /// DelinearizationPass that walks through all loads and stores of a function
11259 /// asking for the SCEV of the memory access with respect to all enclosing
11260 /// loops, calling SCEV->delinearize on that and printing the results.
11261 void ScalarEvolution::delinearize(const SCEV
*Expr
,
11262 SmallVectorImpl
<const SCEV
*> &Subscripts
,
11263 SmallVectorImpl
<const SCEV
*> &Sizes
,
11264 const SCEV
*ElementSize
) {
11265 // First step: collect parametric terms.
11266 SmallVector
<const SCEV
*, 4> Terms
;
11267 collectParametricTerms(Expr
, Terms
);
11272 // Second step: find subscript sizes.
11273 findArrayDimensions(Terms
, Sizes
, ElementSize
);
11278 // Third step: compute the access functions for each subscript.
11279 computeAccessFunctions(Expr
, Subscripts
, Sizes
);
11281 if (Subscripts
.empty())
11285 dbgs() << "succeeded to delinearize " << *Expr
<< "\n";
11286 dbgs() << "ArrayDecl[UnknownSize]";
11287 for (const SCEV
*S
: Sizes
)
11288 dbgs() << "[" << *S
<< "]";
11290 dbgs() << "\nArrayRef";
11291 for (const SCEV
*S
: Subscripts
)
11292 dbgs() << "[" << *S
<< "]";
11297 //===----------------------------------------------------------------------===//
11298 // SCEVCallbackVH Class Implementation
11299 //===----------------------------------------------------------------------===//
11301 void ScalarEvolution::SCEVCallbackVH::deleted() {
11302 assert(SE
&& "SCEVCallbackVH called with a null ScalarEvolution!");
11303 if (PHINode
*PN
= dyn_cast
<PHINode
>(getValPtr()))
11304 SE
->ConstantEvolutionLoopExitValue
.erase(PN
);
11305 SE
->eraseValueFromMap(getValPtr());
11306 // this now dangles!
11309 void ScalarEvolution::SCEVCallbackVH::allUsesReplacedWith(Value
*V
) {
11310 assert(SE
&& "SCEVCallbackVH called with a null ScalarEvolution!");
11312 // Forget all the expressions associated with users of the old value,
11313 // so that future queries will recompute the expressions using the new
11315 Value
*Old
= getValPtr();
11316 SmallVector
<User
*, 16> Worklist(Old
->user_begin(), Old
->user_end());
11317 SmallPtrSet
<User
*, 8> Visited
;
11318 while (!Worklist
.empty()) {
11319 User
*U
= Worklist
.pop_back_val();
11320 // Deleting the Old value will cause this to dangle. Postpone
11321 // that until everything else is done.
11324 if (!Visited
.insert(U
).second
)
11326 if (PHINode
*PN
= dyn_cast
<PHINode
>(U
))
11327 SE
->ConstantEvolutionLoopExitValue
.erase(PN
);
11328 SE
->eraseValueFromMap(U
);
11329 Worklist
.insert(Worklist
.end(), U
->user_begin(), U
->user_end());
11331 // Delete the Old value.
11332 if (PHINode
*PN
= dyn_cast
<PHINode
>(Old
))
11333 SE
->ConstantEvolutionLoopExitValue
.erase(PN
);
11334 SE
->eraseValueFromMap(Old
);
11335 // this now dangles!
11338 ScalarEvolution::SCEVCallbackVH::SCEVCallbackVH(Value
*V
, ScalarEvolution
*se
)
11339 : CallbackVH(V
), SE(se
) {}
11341 //===----------------------------------------------------------------------===//
11342 // ScalarEvolution Class Implementation
11343 //===----------------------------------------------------------------------===//
11345 ScalarEvolution::ScalarEvolution(Function
&F
, TargetLibraryInfo
&TLI
,
11346 AssumptionCache
&AC
, DominatorTree
&DT
,
11348 : F(F
), TLI(TLI
), AC(AC
), DT(DT
), LI(LI
),
11349 CouldNotCompute(new SCEVCouldNotCompute()), ValuesAtScopes(64),
11350 LoopDispositions(64), BlockDispositions(64) {
11351 // To use guards for proving predicates, we need to scan every instruction in
11352 // relevant basic blocks, and not just terminators. Doing this is a waste of
11353 // time if the IR does not actually contain any calls to
11354 // @llvm.experimental.guard, so do a quick check and remember this beforehand.
11356 // This pessimizes the case where a pass that preserves ScalarEvolution wants
11357 // to _add_ guards to the module when there weren't any before, and wants
11358 // ScalarEvolution to optimize based on those guards. For now we prefer to be
11359 // efficient in lieu of being smart in that rather obscure case.
11361 auto *GuardDecl
= F
.getParent()->getFunction(
11362 Intrinsic::getName(Intrinsic::experimental_guard
));
11363 HasGuards
= GuardDecl
&& !GuardDecl
->use_empty();
11366 ScalarEvolution::ScalarEvolution(ScalarEvolution
&&Arg
)
11367 : F(Arg
.F
), HasGuards(Arg
.HasGuards
), TLI(Arg
.TLI
), AC(Arg
.AC
), DT(Arg
.DT
),
11368 LI(Arg
.LI
), CouldNotCompute(std::move(Arg
.CouldNotCompute
)),
11369 ValueExprMap(std::move(Arg
.ValueExprMap
)),
11370 PendingLoopPredicates(std::move(Arg
.PendingLoopPredicates
)),
11371 PendingPhiRanges(std::move(Arg
.PendingPhiRanges
)),
11372 PendingMerges(std::move(Arg
.PendingMerges
)),
11373 MinTrailingZerosCache(std::move(Arg
.MinTrailingZerosCache
)),
11374 BackedgeTakenCounts(std::move(Arg
.BackedgeTakenCounts
)),
11375 PredicatedBackedgeTakenCounts(
11376 std::move(Arg
.PredicatedBackedgeTakenCounts
)),
11377 ConstantEvolutionLoopExitValue(
11378 std::move(Arg
.ConstantEvolutionLoopExitValue
)),
11379 ValuesAtScopes(std::move(Arg
.ValuesAtScopes
)),
11380 LoopDispositions(std::move(Arg
.LoopDispositions
)),
11381 LoopPropertiesCache(std::move(Arg
.LoopPropertiesCache
)),
11382 BlockDispositions(std::move(Arg
.BlockDispositions
)),
11383 UnsignedRanges(std::move(Arg
.UnsignedRanges
)),
11384 SignedRanges(std::move(Arg
.SignedRanges
)),
11385 UniqueSCEVs(std::move(Arg
.UniqueSCEVs
)),
11386 UniquePreds(std::move(Arg
.UniquePreds
)),
11387 SCEVAllocator(std::move(Arg
.SCEVAllocator
)),
11388 LoopUsers(std::move(Arg
.LoopUsers
)),
11389 PredicatedSCEVRewrites(std::move(Arg
.PredicatedSCEVRewrites
)),
11390 FirstUnknown(Arg
.FirstUnknown
) {
11391 Arg
.FirstUnknown
= nullptr;
11394 ScalarEvolution::~ScalarEvolution() {
11395 // Iterate through all the SCEVUnknown instances and call their
11396 // destructors, so that they release their references to their values.
11397 for (SCEVUnknown
*U
= FirstUnknown
; U
;) {
11398 SCEVUnknown
*Tmp
= U
;
11400 Tmp
->~SCEVUnknown();
11402 FirstUnknown
= nullptr;
11404 ExprValueMap
.clear();
11405 ValueExprMap
.clear();
11408 // Free any extra memory created for ExitNotTakenInfo in the unlikely event
11409 // that a loop had multiple computable exits.
11410 for (auto &BTCI
: BackedgeTakenCounts
)
11411 BTCI
.second
.clear();
11412 for (auto &BTCI
: PredicatedBackedgeTakenCounts
)
11413 BTCI
.second
.clear();
11415 assert(PendingLoopPredicates
.empty() && "isImpliedCond garbage");
11416 assert(PendingPhiRanges
.empty() && "getRangeRef garbage");
11417 assert(PendingMerges
.empty() && "isImpliedViaMerge garbage");
11418 assert(!WalkingBEDominatingConds
&& "isLoopBackedgeGuardedByCond garbage!");
11419 assert(!ProvingSplitPredicate
&& "ProvingSplitPredicate garbage!");
11422 bool ScalarEvolution::hasLoopInvariantBackedgeTakenCount(const Loop
*L
) {
11423 return !isa
<SCEVCouldNotCompute
>(getBackedgeTakenCount(L
));
11426 static void PrintLoopInfo(raw_ostream
&OS
, ScalarEvolution
*SE
,
11428 // Print all inner loops first
11430 PrintLoopInfo(OS
, SE
, I
);
11433 L
->getHeader()->printAsOperand(OS
, /*PrintType=*/false);
11436 SmallVector
<BasicBlock
*, 8> ExitBlocks
;
11437 L
->getExitBlocks(ExitBlocks
);
11438 if (ExitBlocks
.size() != 1)
11439 OS
<< "<multiple exits> ";
11441 if (SE
->hasLoopInvariantBackedgeTakenCount(L
)) {
11442 OS
<< "backedge-taken count is " << *SE
->getBackedgeTakenCount(L
);
11444 OS
<< "Unpredictable backedge-taken count. ";
11449 L
->getHeader()->printAsOperand(OS
, /*PrintType=*/false);
11452 if (!isa
<SCEVCouldNotCompute
>(SE
->getMaxBackedgeTakenCount(L
))) {
11453 OS
<< "max backedge-taken count is " << *SE
->getMaxBackedgeTakenCount(L
);
11454 if (SE
->isBackedgeTakenCountMaxOrZero(L
))
11455 OS
<< ", actual taken count either this or zero.";
11457 OS
<< "Unpredictable max backedge-taken count. ";
11462 L
->getHeader()->printAsOperand(OS
, /*PrintType=*/false);
11465 SCEVUnionPredicate Pred
;
11466 auto PBT
= SE
->getPredicatedBackedgeTakenCount(L
, Pred
);
11467 if (!isa
<SCEVCouldNotCompute
>(PBT
)) {
11468 OS
<< "Predicated backedge-taken count is " << *PBT
<< "\n";
11469 OS
<< " Predicates:\n";
11472 OS
<< "Unpredictable predicated backedge-taken count. ";
11476 if (SE
->hasLoopInvariantBackedgeTakenCount(L
)) {
11478 L
->getHeader()->printAsOperand(OS
, /*PrintType=*/false);
11480 OS
<< "Trip multiple is " << SE
->getSmallConstantTripMultiple(L
) << "\n";
11484 static StringRef
loopDispositionToStr(ScalarEvolution::LoopDisposition LD
) {
11486 case ScalarEvolution::LoopVariant
:
11488 case ScalarEvolution::LoopInvariant
:
11489 return "Invariant";
11490 case ScalarEvolution::LoopComputable
:
11491 return "Computable";
11493 llvm_unreachable("Unknown ScalarEvolution::LoopDisposition kind!");
11496 void ScalarEvolution::print(raw_ostream
&OS
) const {
11497 // ScalarEvolution's implementation of the print method is to print
11498 // out SCEV values of all instructions that are interesting. Doing
11499 // this potentially causes it to create new SCEV objects though,
11500 // which technically conflicts with the const qualifier. This isn't
11501 // observable from outside the class though, so casting away the
11502 // const isn't dangerous.
11503 ScalarEvolution
&SE
= *const_cast<ScalarEvolution
*>(this);
11505 OS
<< "Classifying expressions for: ";
11506 F
.printAsOperand(OS
, /*PrintType=*/false);
11508 for (Instruction
&I
: instructions(F
))
11509 if (isSCEVable(I
.getType()) && !isa
<CmpInst
>(I
)) {
11512 const SCEV
*SV
= SE
.getSCEV(&I
);
11514 if (!isa
<SCEVCouldNotCompute
>(SV
)) {
11516 SE
.getUnsignedRange(SV
).print(OS
);
11518 SE
.getSignedRange(SV
).print(OS
);
11521 const Loop
*L
= LI
.getLoopFor(I
.getParent());
11523 const SCEV
*AtUse
= SE
.getSCEVAtScope(SV
, L
);
11527 if (!isa
<SCEVCouldNotCompute
>(AtUse
)) {
11529 SE
.getUnsignedRange(AtUse
).print(OS
);
11531 SE
.getSignedRange(AtUse
).print(OS
);
11536 OS
<< "\t\t" "Exits: ";
11537 const SCEV
*ExitValue
= SE
.getSCEVAtScope(SV
, L
->getParentLoop());
11538 if (!SE
.isLoopInvariant(ExitValue
, L
)) {
11539 OS
<< "<<Unknown>>";
11545 for (auto *Iter
= L
; Iter
; Iter
= Iter
->getParentLoop()) {
11547 OS
<< "\t\t" "LoopDispositions: { ";
11553 Iter
->getHeader()->printAsOperand(OS
, /*PrintType=*/false);
11554 OS
<< ": " << loopDispositionToStr(SE
.getLoopDisposition(SV
, Iter
));
11557 for (auto *InnerL
: depth_first(L
)) {
11561 OS
<< "\t\t" "LoopDispositions: { ";
11567 InnerL
->getHeader()->printAsOperand(OS
, /*PrintType=*/false);
11568 OS
<< ": " << loopDispositionToStr(SE
.getLoopDisposition(SV
, InnerL
));
11577 OS
<< "Determining loop execution counts for: ";
11578 F
.printAsOperand(OS
, /*PrintType=*/false);
11581 PrintLoopInfo(OS
, &SE
, I
);
11584 ScalarEvolution::LoopDisposition
11585 ScalarEvolution::getLoopDisposition(const SCEV
*S
, const Loop
*L
) {
11586 auto &Values
= LoopDispositions
[S
];
11587 for (auto &V
: Values
) {
11588 if (V
.getPointer() == L
)
11591 Values
.emplace_back(L
, LoopVariant
);
11592 LoopDisposition D
= computeLoopDisposition(S
, L
);
11593 auto &Values2
= LoopDispositions
[S
];
11594 for (auto &V
: make_range(Values2
.rbegin(), Values2
.rend())) {
11595 if (V
.getPointer() == L
) {
11603 ScalarEvolution::LoopDisposition
11604 ScalarEvolution::computeLoopDisposition(const SCEV
*S
, const Loop
*L
) {
11605 switch (static_cast<SCEVTypes
>(S
->getSCEVType())) {
11607 return LoopInvariant
;
11611 return getLoopDisposition(cast
<SCEVCastExpr
>(S
)->getOperand(), L
);
11612 case scAddRecExpr
: {
11613 const SCEVAddRecExpr
*AR
= cast
<SCEVAddRecExpr
>(S
);
11615 // If L is the addrec's loop, it's computable.
11616 if (AR
->getLoop() == L
)
11617 return LoopComputable
;
11619 // Add recurrences are never invariant in the function-body (null loop).
11621 return LoopVariant
;
11623 // Everything that is not defined at loop entry is variant.
11624 if (DT
.dominates(L
->getHeader(), AR
->getLoop()->getHeader()))
11625 return LoopVariant
;
11626 assert(!L
->contains(AR
->getLoop()) && "Containing loop's header does not"
11627 " dominate the contained loop's header?");
11629 // This recurrence is invariant w.r.t. L if AR's loop contains L.
11630 if (AR
->getLoop()->contains(L
))
11631 return LoopInvariant
;
11633 // This recurrence is variant w.r.t. L if any of its operands
11635 for (auto *Op
: AR
->operands())
11636 if (!isLoopInvariant(Op
, L
))
11637 return LoopVariant
;
11639 // Otherwise it's loop-invariant.
11640 return LoopInvariant
;
11646 bool HasVarying
= false;
11647 for (auto *Op
: cast
<SCEVNAryExpr
>(S
)->operands()) {
11648 LoopDisposition D
= getLoopDisposition(Op
, L
);
11649 if (D
== LoopVariant
)
11650 return LoopVariant
;
11651 if (D
== LoopComputable
)
11654 return HasVarying
? LoopComputable
: LoopInvariant
;
11657 const SCEVUDivExpr
*UDiv
= cast
<SCEVUDivExpr
>(S
);
11658 LoopDisposition LD
= getLoopDisposition(UDiv
->getLHS(), L
);
11659 if (LD
== LoopVariant
)
11660 return LoopVariant
;
11661 LoopDisposition RD
= getLoopDisposition(UDiv
->getRHS(), L
);
11662 if (RD
== LoopVariant
)
11663 return LoopVariant
;
11664 return (LD
== LoopInvariant
&& RD
== LoopInvariant
) ?
11665 LoopInvariant
: LoopComputable
;
11668 // All non-instruction values are loop invariant. All instructions are loop
11669 // invariant if they are not contained in the specified loop.
11670 // Instructions are never considered invariant in the function body
11671 // (null loop) because they are defined within the "loop".
11672 if (auto *I
= dyn_cast
<Instruction
>(cast
<SCEVUnknown
>(S
)->getValue()))
11673 return (L
&& !L
->contains(I
)) ? LoopInvariant
: LoopVariant
;
11674 return LoopInvariant
;
11675 case scCouldNotCompute
:
11676 llvm_unreachable("Attempt to use a SCEVCouldNotCompute object!");
11678 llvm_unreachable("Unknown SCEV kind!");
11681 bool ScalarEvolution::isLoopInvariant(const SCEV
*S
, const Loop
*L
) {
11682 return getLoopDisposition(S
, L
) == LoopInvariant
;
11685 bool ScalarEvolution::hasComputableLoopEvolution(const SCEV
*S
, const Loop
*L
) {
11686 return getLoopDisposition(S
, L
) == LoopComputable
;
11689 ScalarEvolution::BlockDisposition
11690 ScalarEvolution::getBlockDisposition(const SCEV
*S
, const BasicBlock
*BB
) {
11691 auto &Values
= BlockDispositions
[S
];
11692 for (auto &V
: Values
) {
11693 if (V
.getPointer() == BB
)
11696 Values
.emplace_back(BB
, DoesNotDominateBlock
);
11697 BlockDisposition D
= computeBlockDisposition(S
, BB
);
11698 auto &Values2
= BlockDispositions
[S
];
11699 for (auto &V
: make_range(Values2
.rbegin(), Values2
.rend())) {
11700 if (V
.getPointer() == BB
) {
11708 ScalarEvolution::BlockDisposition
11709 ScalarEvolution::computeBlockDisposition(const SCEV
*S
, const BasicBlock
*BB
) {
11710 switch (static_cast<SCEVTypes
>(S
->getSCEVType())) {
11712 return ProperlyDominatesBlock
;
11716 return getBlockDisposition(cast
<SCEVCastExpr
>(S
)->getOperand(), BB
);
11717 case scAddRecExpr
: {
11718 // This uses a "dominates" query instead of "properly dominates" query
11719 // to test for proper dominance too, because the instruction which
11720 // produces the addrec's value is a PHI, and a PHI effectively properly
11721 // dominates its entire containing block.
11722 const SCEVAddRecExpr
*AR
= cast
<SCEVAddRecExpr
>(S
);
11723 if (!DT
.dominates(AR
->getLoop()->getHeader(), BB
))
11724 return DoesNotDominateBlock
;
11726 // Fall through into SCEVNAryExpr handling.
11733 const SCEVNAryExpr
*NAry
= cast
<SCEVNAryExpr
>(S
);
11734 bool Proper
= true;
11735 for (const SCEV
*NAryOp
: NAry
->operands()) {
11736 BlockDisposition D
= getBlockDisposition(NAryOp
, BB
);
11737 if (D
== DoesNotDominateBlock
)
11738 return DoesNotDominateBlock
;
11739 if (D
== DominatesBlock
)
11742 return Proper
? ProperlyDominatesBlock
: DominatesBlock
;
11745 const SCEVUDivExpr
*UDiv
= cast
<SCEVUDivExpr
>(S
);
11746 const SCEV
*LHS
= UDiv
->getLHS(), *RHS
= UDiv
->getRHS();
11747 BlockDisposition LD
= getBlockDisposition(LHS
, BB
);
11748 if (LD
== DoesNotDominateBlock
)
11749 return DoesNotDominateBlock
;
11750 BlockDisposition RD
= getBlockDisposition(RHS
, BB
);
11751 if (RD
== DoesNotDominateBlock
)
11752 return DoesNotDominateBlock
;
11753 return (LD
== ProperlyDominatesBlock
&& RD
== ProperlyDominatesBlock
) ?
11754 ProperlyDominatesBlock
: DominatesBlock
;
11757 if (Instruction
*I
=
11758 dyn_cast
<Instruction
>(cast
<SCEVUnknown
>(S
)->getValue())) {
11759 if (I
->getParent() == BB
)
11760 return DominatesBlock
;
11761 if (DT
.properlyDominates(I
->getParent(), BB
))
11762 return ProperlyDominatesBlock
;
11763 return DoesNotDominateBlock
;
11765 return ProperlyDominatesBlock
;
11766 case scCouldNotCompute
:
11767 llvm_unreachable("Attempt to use a SCEVCouldNotCompute object!");
11769 llvm_unreachable("Unknown SCEV kind!");
11772 bool ScalarEvolution::dominates(const SCEV
*S
, const BasicBlock
*BB
) {
11773 return getBlockDisposition(S
, BB
) >= DominatesBlock
;
11776 bool ScalarEvolution::properlyDominates(const SCEV
*S
, const BasicBlock
*BB
) {
11777 return getBlockDisposition(S
, BB
) == ProperlyDominatesBlock
;
11780 bool ScalarEvolution::hasOperand(const SCEV
*S
, const SCEV
*Op
) const {
11781 return SCEVExprContains(S
, [&](const SCEV
*Expr
) { return Expr
== Op
; });
11784 bool ScalarEvolution::ExitLimit::hasOperand(const SCEV
*S
) const {
11785 auto IsS
= [&](const SCEV
*X
) { return S
== X
; };
11786 auto ContainsS
= [&](const SCEV
*X
) {
11787 return !isa
<SCEVCouldNotCompute
>(X
) && SCEVExprContains(X
, IsS
);
11789 return ContainsS(ExactNotTaken
) || ContainsS(MaxNotTaken
);
11793 ScalarEvolution::forgetMemoizedResults(const SCEV
*S
) {
11794 ValuesAtScopes
.erase(S
);
11795 LoopDispositions
.erase(S
);
11796 BlockDispositions
.erase(S
);
11797 UnsignedRanges
.erase(S
);
11798 SignedRanges
.erase(S
);
11799 ExprValueMap
.erase(S
);
11800 HasRecMap
.erase(S
);
11801 MinTrailingZerosCache
.erase(S
);
11803 for (auto I
= PredicatedSCEVRewrites
.begin();
11804 I
!= PredicatedSCEVRewrites
.end();) {
11805 std::pair
<const SCEV
*, const Loop
*> Entry
= I
->first
;
11806 if (Entry
.first
== S
)
11807 PredicatedSCEVRewrites
.erase(I
++);
11812 auto RemoveSCEVFromBackedgeMap
=
11813 [S
, this](DenseMap
<const Loop
*, BackedgeTakenInfo
> &Map
) {
11814 for (auto I
= Map
.begin(), E
= Map
.end(); I
!= E
;) {
11815 BackedgeTakenInfo
&BEInfo
= I
->second
;
11816 if (BEInfo
.hasOperand(S
, this)) {
11824 RemoveSCEVFromBackedgeMap(BackedgeTakenCounts
);
11825 RemoveSCEVFromBackedgeMap(PredicatedBackedgeTakenCounts
);
11829 ScalarEvolution::getUsedLoops(const SCEV
*S
,
11830 SmallPtrSetImpl
<const Loop
*> &LoopsUsed
) {
11831 struct FindUsedLoops
{
11832 FindUsedLoops(SmallPtrSetImpl
<const Loop
*> &LoopsUsed
)
11833 : LoopsUsed(LoopsUsed
) {}
11834 SmallPtrSetImpl
<const Loop
*> &LoopsUsed
;
11835 bool follow(const SCEV
*S
) {
11836 if (auto *AR
= dyn_cast
<SCEVAddRecExpr
>(S
))
11837 LoopsUsed
.insert(AR
->getLoop());
11841 bool isDone() const { return false; }
11844 FindUsedLoops
F(LoopsUsed
);
11845 SCEVTraversal
<FindUsedLoops
>(F
).visitAll(S
);
11848 void ScalarEvolution::addToLoopUseLists(const SCEV
*S
) {
11849 SmallPtrSet
<const Loop
*, 8> LoopsUsed
;
11850 getUsedLoops(S
, LoopsUsed
);
11851 for (auto *L
: LoopsUsed
)
11852 LoopUsers
[L
].push_back(S
);
11855 void ScalarEvolution::verify() const {
11856 ScalarEvolution
&SE
= *const_cast<ScalarEvolution
*>(this);
11857 ScalarEvolution
SE2(F
, TLI
, AC
, DT
, LI
);
11859 SmallVector
<Loop
*, 8> LoopStack(LI
.begin(), LI
.end());
11861 // Map's SCEV expressions from one ScalarEvolution "universe" to another.
11862 struct SCEVMapper
: public SCEVRewriteVisitor
<SCEVMapper
> {
11863 SCEVMapper(ScalarEvolution
&SE
) : SCEVRewriteVisitor
<SCEVMapper
>(SE
) {}
11865 const SCEV
*visitConstant(const SCEVConstant
*Constant
) {
11866 return SE
.getConstant(Constant
->getAPInt());
11869 const SCEV
*visitUnknown(const SCEVUnknown
*Expr
) {
11870 return SE
.getUnknown(Expr
->getValue());
11873 const SCEV
*visitCouldNotCompute(const SCEVCouldNotCompute
*Expr
) {
11874 return SE
.getCouldNotCompute();
11878 SCEVMapper
SCM(SE2
);
11880 while (!LoopStack
.empty()) {
11881 auto *L
= LoopStack
.pop_back_val();
11882 LoopStack
.insert(LoopStack
.end(), L
->begin(), L
->end());
11884 auto *CurBECount
= SCM
.visit(
11885 const_cast<ScalarEvolution
*>(this)->getBackedgeTakenCount(L
));
11886 auto *NewBECount
= SE2
.getBackedgeTakenCount(L
);
11888 if (CurBECount
== SE2
.getCouldNotCompute() ||
11889 NewBECount
== SE2
.getCouldNotCompute()) {
11890 // NB! This situation is legal, but is very suspicious -- whatever pass
11891 // change the loop to make a trip count go from could not compute to
11892 // computable or vice-versa *should have* invalidated SCEV. However, we
11893 // choose not to assert here (for now) since we don't want false
11898 if (containsUndefs(CurBECount
) || containsUndefs(NewBECount
)) {
11899 // SCEV treats "undef" as an unknown but consistent value (i.e. it does
11900 // not propagate undef aggressively). This means we can (and do) fail
11901 // verification in cases where a transform makes the trip count of a loop
11902 // go from "undef" to "undef+1" (say). The transform is fine, since in
11903 // both cases the loop iterates "undef" times, but SCEV thinks we
11904 // increased the trip count of the loop by 1 incorrectly.
11908 if (SE
.getTypeSizeInBits(CurBECount
->getType()) >
11909 SE
.getTypeSizeInBits(NewBECount
->getType()))
11910 NewBECount
= SE2
.getZeroExtendExpr(NewBECount
, CurBECount
->getType());
11911 else if (SE
.getTypeSizeInBits(CurBECount
->getType()) <
11912 SE
.getTypeSizeInBits(NewBECount
->getType()))
11913 CurBECount
= SE2
.getZeroExtendExpr(CurBECount
, NewBECount
->getType());
11915 auto *ConstantDelta
=
11916 dyn_cast
<SCEVConstant
>(SE2
.getMinusSCEV(CurBECount
, NewBECount
));
11918 if (ConstantDelta
&& ConstantDelta
->getAPInt() != 0) {
11919 dbgs() << "Trip Count Changed!\n";
11920 dbgs() << "Old: " << *CurBECount
<< "\n";
11921 dbgs() << "New: " << *NewBECount
<< "\n";
11922 dbgs() << "Delta: " << *ConstantDelta
<< "\n";
11928 bool ScalarEvolution::invalidate(
11929 Function
&F
, const PreservedAnalyses
&PA
,
11930 FunctionAnalysisManager::Invalidator
&Inv
) {
11931 // Invalidate the ScalarEvolution object whenever it isn't preserved or one
11932 // of its dependencies is invalidated.
11933 auto PAC
= PA
.getChecker
<ScalarEvolutionAnalysis
>();
11934 return !(PAC
.preserved() || PAC
.preservedSet
<AllAnalysesOn
<Function
>>()) ||
11935 Inv
.invalidate
<AssumptionAnalysis
>(F
, PA
) ||
11936 Inv
.invalidate
<DominatorTreeAnalysis
>(F
, PA
) ||
11937 Inv
.invalidate
<LoopAnalysis
>(F
, PA
);
11940 AnalysisKey
ScalarEvolutionAnalysis::Key
;
11942 ScalarEvolution
ScalarEvolutionAnalysis::run(Function
&F
,
11943 FunctionAnalysisManager
&AM
) {
11944 return ScalarEvolution(F
, AM
.getResult
<TargetLibraryAnalysis
>(F
),
11945 AM
.getResult
<AssumptionAnalysis
>(F
),
11946 AM
.getResult
<DominatorTreeAnalysis
>(F
),
11947 AM
.getResult
<LoopAnalysis
>(F
));
11951 ScalarEvolutionPrinterPass::run(Function
&F
, FunctionAnalysisManager
&AM
) {
11952 AM
.getResult
<ScalarEvolutionAnalysis
>(F
).print(OS
);
11953 return PreservedAnalyses::all();
11956 INITIALIZE_PASS_BEGIN(ScalarEvolutionWrapperPass
, "scalar-evolution",
11957 "Scalar Evolution Analysis", false, true)
11958 INITIALIZE_PASS_DEPENDENCY(AssumptionCacheTracker
)
11959 INITIALIZE_PASS_DEPENDENCY(LoopInfoWrapperPass
)
11960 INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass
)
11961 INITIALIZE_PASS_DEPENDENCY(TargetLibraryInfoWrapperPass
)
11962 INITIALIZE_PASS_END(ScalarEvolutionWrapperPass
, "scalar-evolution",
11963 "Scalar Evolution Analysis", false, true)
11965 char ScalarEvolutionWrapperPass::ID
= 0;
11967 ScalarEvolutionWrapperPass::ScalarEvolutionWrapperPass() : FunctionPass(ID
) {
11968 initializeScalarEvolutionWrapperPassPass(*PassRegistry::getPassRegistry());
11971 bool ScalarEvolutionWrapperPass::runOnFunction(Function
&F
) {
11972 SE
.reset(new ScalarEvolution(
11973 F
, getAnalysis
<TargetLibraryInfoWrapperPass
>().getTLI(),
11974 getAnalysis
<AssumptionCacheTracker
>().getAssumptionCache(F
),
11975 getAnalysis
<DominatorTreeWrapperPass
>().getDomTree(),
11976 getAnalysis
<LoopInfoWrapperPass
>().getLoopInfo()));
11980 void ScalarEvolutionWrapperPass::releaseMemory() { SE
.reset(); }
11982 void ScalarEvolutionWrapperPass::print(raw_ostream
&OS
, const Module
*) const {
11986 void ScalarEvolutionWrapperPass::verifyAnalysis() const {
11993 void ScalarEvolutionWrapperPass::getAnalysisUsage(AnalysisUsage
&AU
) const {
11994 AU
.setPreservesAll();
11995 AU
.addRequiredTransitive
<AssumptionCacheTracker
>();
11996 AU
.addRequiredTransitive
<LoopInfoWrapperPass
>();
11997 AU
.addRequiredTransitive
<DominatorTreeWrapperPass
>();
11998 AU
.addRequiredTransitive
<TargetLibraryInfoWrapperPass
>();
12001 const SCEVPredicate
*ScalarEvolution::getEqualPredicate(const SCEV
*LHS
,
12003 FoldingSetNodeID ID
;
12004 assert(LHS
->getType() == RHS
->getType() &&
12005 "Type mismatch between LHS and RHS");
12006 // Unique this node based on the arguments
12007 ID
.AddInteger(SCEVPredicate::P_Equal
);
12008 ID
.AddPointer(LHS
);
12009 ID
.AddPointer(RHS
);
12010 void *IP
= nullptr;
12011 if (const auto *S
= UniquePreds
.FindNodeOrInsertPos(ID
, IP
))
12013 SCEVEqualPredicate
*Eq
= new (SCEVAllocator
)
12014 SCEVEqualPredicate(ID
.Intern(SCEVAllocator
), LHS
, RHS
);
12015 UniquePreds
.InsertNode(Eq
, IP
);
12019 const SCEVPredicate
*ScalarEvolution::getWrapPredicate(
12020 const SCEVAddRecExpr
*AR
,
12021 SCEVWrapPredicate::IncrementWrapFlags AddedFlags
) {
12022 FoldingSetNodeID ID
;
12023 // Unique this node based on the arguments
12024 ID
.AddInteger(SCEVPredicate::P_Wrap
);
12026 ID
.AddInteger(AddedFlags
);
12027 void *IP
= nullptr;
12028 if (const auto *S
= UniquePreds
.FindNodeOrInsertPos(ID
, IP
))
12030 auto *OF
= new (SCEVAllocator
)
12031 SCEVWrapPredicate(ID
.Intern(SCEVAllocator
), AR
, AddedFlags
);
12032 UniquePreds
.InsertNode(OF
, IP
);
12038 class SCEVPredicateRewriter
: public SCEVRewriteVisitor
<SCEVPredicateRewriter
> {
12041 /// Rewrites \p S in the context of a loop L and the SCEV predication
12042 /// infrastructure.
12044 /// If \p Pred is non-null, the SCEV expression is rewritten to respect the
12045 /// equivalences present in \p Pred.
12047 /// If \p NewPreds is non-null, rewrite is free to add further predicates to
12048 /// \p NewPreds such that the result will be an AddRecExpr.
12049 static const SCEV
*rewrite(const SCEV
*S
, const Loop
*L
, ScalarEvolution
&SE
,
12050 SmallPtrSetImpl
<const SCEVPredicate
*> *NewPreds
,
12051 SCEVUnionPredicate
*Pred
) {
12052 SCEVPredicateRewriter
Rewriter(L
, SE
, NewPreds
, Pred
);
12053 return Rewriter
.visit(S
);
12056 const SCEV
*visitUnknown(const SCEVUnknown
*Expr
) {
12058 auto ExprPreds
= Pred
->getPredicatesForExpr(Expr
);
12059 for (auto *Pred
: ExprPreds
)
12060 if (const auto *IPred
= dyn_cast
<SCEVEqualPredicate
>(Pred
))
12061 if (IPred
->getLHS() == Expr
)
12062 return IPred
->getRHS();
12064 return convertToAddRecWithPreds(Expr
);
12067 const SCEV
*visitZeroExtendExpr(const SCEVZeroExtendExpr
*Expr
) {
12068 const SCEV
*Operand
= visit(Expr
->getOperand());
12069 const SCEVAddRecExpr
*AR
= dyn_cast
<SCEVAddRecExpr
>(Operand
);
12070 if (AR
&& AR
->getLoop() == L
&& AR
->isAffine()) {
12071 // This couldn't be folded because the operand didn't have the nuw
12072 // flag. Add the nusw flag as an assumption that we could make.
12073 const SCEV
*Step
= AR
->getStepRecurrence(SE
);
12074 Type
*Ty
= Expr
->getType();
12075 if (addOverflowAssumption(AR
, SCEVWrapPredicate::IncrementNUSW
))
12076 return SE
.getAddRecExpr(SE
.getZeroExtendExpr(AR
->getStart(), Ty
),
12077 SE
.getSignExtendExpr(Step
, Ty
), L
,
12078 AR
->getNoWrapFlags());
12080 return SE
.getZeroExtendExpr(Operand
, Expr
->getType());
12083 const SCEV
*visitSignExtendExpr(const SCEVSignExtendExpr
*Expr
) {
12084 const SCEV
*Operand
= visit(Expr
->getOperand());
12085 const SCEVAddRecExpr
*AR
= dyn_cast
<SCEVAddRecExpr
>(Operand
);
12086 if (AR
&& AR
->getLoop() == L
&& AR
->isAffine()) {
12087 // This couldn't be folded because the operand didn't have the nsw
12088 // flag. Add the nssw flag as an assumption that we could make.
12089 const SCEV
*Step
= AR
->getStepRecurrence(SE
);
12090 Type
*Ty
= Expr
->getType();
12091 if (addOverflowAssumption(AR
, SCEVWrapPredicate::IncrementNSSW
))
12092 return SE
.getAddRecExpr(SE
.getSignExtendExpr(AR
->getStart(), Ty
),
12093 SE
.getSignExtendExpr(Step
, Ty
), L
,
12094 AR
->getNoWrapFlags());
12096 return SE
.getSignExtendExpr(Operand
, Expr
->getType());
12100 explicit SCEVPredicateRewriter(const Loop
*L
, ScalarEvolution
&SE
,
12101 SmallPtrSetImpl
<const SCEVPredicate
*> *NewPreds
,
12102 SCEVUnionPredicate
*Pred
)
12103 : SCEVRewriteVisitor(SE
), NewPreds(NewPreds
), Pred(Pred
), L(L
) {}
12105 bool addOverflowAssumption(const SCEVPredicate
*P
) {
12107 // Check if we've already made this assumption.
12108 return Pred
&& Pred
->implies(P
);
12110 NewPreds
->insert(P
);
12114 bool addOverflowAssumption(const SCEVAddRecExpr
*AR
,
12115 SCEVWrapPredicate::IncrementWrapFlags AddedFlags
) {
12116 auto *A
= SE
.getWrapPredicate(AR
, AddedFlags
);
12117 return addOverflowAssumption(A
);
12120 // If \p Expr represents a PHINode, we try to see if it can be represented
12121 // as an AddRec, possibly under a predicate (PHISCEVPred). If it is possible
12122 // to add this predicate as a runtime overflow check, we return the AddRec.
12123 // If \p Expr does not meet these conditions (is not a PHI node, or we
12124 // couldn't create an AddRec for it, or couldn't add the predicate), we just
12126 const SCEV
*convertToAddRecWithPreds(const SCEVUnknown
*Expr
) {
12127 if (!isa
<PHINode
>(Expr
->getValue()))
12129 Optional
<std::pair
<const SCEV
*, SmallVector
<const SCEVPredicate
*, 3>>>
12130 PredicatedRewrite
= SE
.createAddRecFromPHIWithCasts(Expr
);
12131 if (!PredicatedRewrite
)
12133 for (auto *P
: PredicatedRewrite
->second
){
12134 // Wrap predicates from outer loops are not supported.
12135 if (auto *WP
= dyn_cast
<const SCEVWrapPredicate
>(P
)) {
12136 auto *AR
= cast
<const SCEVAddRecExpr
>(WP
->getExpr());
12137 if (L
!= AR
->getLoop())
12140 if (!addOverflowAssumption(P
))
12143 return PredicatedRewrite
->first
;
12146 SmallPtrSetImpl
<const SCEVPredicate
*> *NewPreds
;
12147 SCEVUnionPredicate
*Pred
;
12151 } // end anonymous namespace
12153 const SCEV
*ScalarEvolution::rewriteUsingPredicate(const SCEV
*S
, const Loop
*L
,
12154 SCEVUnionPredicate
&Preds
) {
12155 return SCEVPredicateRewriter::rewrite(S
, L
, *this, nullptr, &Preds
);
12158 const SCEVAddRecExpr
*ScalarEvolution::convertSCEVToAddRecWithPredicates(
12159 const SCEV
*S
, const Loop
*L
,
12160 SmallPtrSetImpl
<const SCEVPredicate
*> &Preds
) {
12161 SmallPtrSet
<const SCEVPredicate
*, 4> TransformPreds
;
12162 S
= SCEVPredicateRewriter::rewrite(S
, L
, *this, &TransformPreds
, nullptr);
12163 auto *AddRec
= dyn_cast
<SCEVAddRecExpr
>(S
);
12168 // Since the transformation was successful, we can now transfer the SCEV
12170 for (auto *P
: TransformPreds
)
12176 /// SCEV predicates
12177 SCEVPredicate::SCEVPredicate(const FoldingSetNodeIDRef ID
,
12178 SCEVPredicateKind Kind
)
12179 : FastID(ID
), Kind(Kind
) {}
12181 SCEVEqualPredicate::SCEVEqualPredicate(const FoldingSetNodeIDRef ID
,
12182 const SCEV
*LHS
, const SCEV
*RHS
)
12183 : SCEVPredicate(ID
, P_Equal
), LHS(LHS
), RHS(RHS
) {
12184 assert(LHS
->getType() == RHS
->getType() && "LHS and RHS types don't match");
12185 assert(LHS
!= RHS
&& "LHS and RHS are the same SCEV");
12188 bool SCEVEqualPredicate::implies(const SCEVPredicate
*N
) const {
12189 const auto *Op
= dyn_cast
<SCEVEqualPredicate
>(N
);
12194 return Op
->LHS
== LHS
&& Op
->RHS
== RHS
;
12197 bool SCEVEqualPredicate::isAlwaysTrue() const { return false; }
12199 const SCEV
*SCEVEqualPredicate::getExpr() const { return LHS
; }
12201 void SCEVEqualPredicate::print(raw_ostream
&OS
, unsigned Depth
) const {
12202 OS
.indent(Depth
) << "Equal predicate: " << *LHS
<< " == " << *RHS
<< "\n";
12205 SCEVWrapPredicate::SCEVWrapPredicate(const FoldingSetNodeIDRef ID
,
12206 const SCEVAddRecExpr
*AR
,
12207 IncrementWrapFlags Flags
)
12208 : SCEVPredicate(ID
, P_Wrap
), AR(AR
), Flags(Flags
) {}
12210 const SCEV
*SCEVWrapPredicate::getExpr() const { return AR
; }
12212 bool SCEVWrapPredicate::implies(const SCEVPredicate
*N
) const {
12213 const auto *Op
= dyn_cast
<SCEVWrapPredicate
>(N
);
12215 return Op
&& Op
->AR
== AR
&& setFlags(Flags
, Op
->Flags
) == Flags
;
12218 bool SCEVWrapPredicate::isAlwaysTrue() const {
12219 SCEV::NoWrapFlags ScevFlags
= AR
->getNoWrapFlags();
12220 IncrementWrapFlags IFlags
= Flags
;
12222 if (ScalarEvolution::setFlags(ScevFlags
, SCEV::FlagNSW
) == ScevFlags
)
12223 IFlags
= clearFlags(IFlags
, IncrementNSSW
);
12225 return IFlags
== IncrementAnyWrap
;
12228 void SCEVWrapPredicate::print(raw_ostream
&OS
, unsigned Depth
) const {
12229 OS
.indent(Depth
) << *getExpr() << " Added Flags: ";
12230 if (SCEVWrapPredicate::IncrementNUSW
& getFlags())
12232 if (SCEVWrapPredicate::IncrementNSSW
& getFlags())
12237 SCEVWrapPredicate::IncrementWrapFlags
12238 SCEVWrapPredicate::getImpliedFlags(const SCEVAddRecExpr
*AR
,
12239 ScalarEvolution
&SE
) {
12240 IncrementWrapFlags ImpliedFlags
= IncrementAnyWrap
;
12241 SCEV::NoWrapFlags StaticFlags
= AR
->getNoWrapFlags();
12243 // We can safely transfer the NSW flag as NSSW.
12244 if (ScalarEvolution::setFlags(StaticFlags
, SCEV::FlagNSW
) == StaticFlags
)
12245 ImpliedFlags
= IncrementNSSW
;
12247 if (ScalarEvolution::setFlags(StaticFlags
, SCEV::FlagNUW
) == StaticFlags
) {
12248 // If the increment is positive, the SCEV NUW flag will also imply the
12249 // WrapPredicate NUSW flag.
12250 if (const auto *Step
= dyn_cast
<SCEVConstant
>(AR
->getStepRecurrence(SE
)))
12251 if (Step
->getValue()->getValue().isNonNegative())
12252 ImpliedFlags
= setFlags(ImpliedFlags
, IncrementNUSW
);
12255 return ImpliedFlags
;
12258 /// Union predicates don't get cached so create a dummy set ID for it.
12259 SCEVUnionPredicate::SCEVUnionPredicate()
12260 : SCEVPredicate(FoldingSetNodeIDRef(nullptr, 0), P_Union
) {}
12262 bool SCEVUnionPredicate::isAlwaysTrue() const {
12263 return all_of(Preds
,
12264 [](const SCEVPredicate
*I
) { return I
->isAlwaysTrue(); });
12267 ArrayRef
<const SCEVPredicate
*>
12268 SCEVUnionPredicate::getPredicatesForExpr(const SCEV
*Expr
) {
12269 auto I
= SCEVToPreds
.find(Expr
);
12270 if (I
== SCEVToPreds
.end())
12271 return ArrayRef
<const SCEVPredicate
*>();
12275 bool SCEVUnionPredicate::implies(const SCEVPredicate
*N
) const {
12276 if (const auto *Set
= dyn_cast
<SCEVUnionPredicate
>(N
))
12277 return all_of(Set
->Preds
,
12278 [this](const SCEVPredicate
*I
) { return this->implies(I
); });
12280 auto ScevPredsIt
= SCEVToPreds
.find(N
->getExpr());
12281 if (ScevPredsIt
== SCEVToPreds
.end())
12283 auto &SCEVPreds
= ScevPredsIt
->second
;
12285 return any_of(SCEVPreds
,
12286 [N
](const SCEVPredicate
*I
) { return I
->implies(N
); });
12289 const SCEV
*SCEVUnionPredicate::getExpr() const { return nullptr; }
12291 void SCEVUnionPredicate::print(raw_ostream
&OS
, unsigned Depth
) const {
12292 for (auto Pred
: Preds
)
12293 Pred
->print(OS
, Depth
);
12296 void SCEVUnionPredicate::add(const SCEVPredicate
*N
) {
12297 if (const auto *Set
= dyn_cast
<SCEVUnionPredicate
>(N
)) {
12298 for (auto Pred
: Set
->Preds
)
12306 const SCEV
*Key
= N
->getExpr();
12307 assert(Key
&& "Only SCEVUnionPredicate doesn't have an "
12308 " associated expression!");
12310 SCEVToPreds
[Key
].push_back(N
);
12311 Preds
.push_back(N
);
12314 PredicatedScalarEvolution::PredicatedScalarEvolution(ScalarEvolution
&SE
,
12318 const SCEV
*PredicatedScalarEvolution::getSCEV(Value
*V
) {
12319 const SCEV
*Expr
= SE
.getSCEV(V
);
12320 RewriteEntry
&Entry
= RewriteMap
[Expr
];
12322 // If we already have an entry and the version matches, return it.
12323 if (Entry
.second
&& Generation
== Entry
.first
)
12324 return Entry
.second
;
12326 // We found an entry but it's stale. Rewrite the stale entry
12327 // according to the current predicate.
12329 Expr
= Entry
.second
;
12331 const SCEV
*NewSCEV
= SE
.rewriteUsingPredicate(Expr
, &L
, Preds
);
12332 Entry
= {Generation
, NewSCEV
};
12337 const SCEV
*PredicatedScalarEvolution::getBackedgeTakenCount() {
12338 if (!BackedgeCount
) {
12339 SCEVUnionPredicate BackedgePred
;
12340 BackedgeCount
= SE
.getPredicatedBackedgeTakenCount(&L
, BackedgePred
);
12341 addPredicate(BackedgePred
);
12343 return BackedgeCount
;
12346 void PredicatedScalarEvolution::addPredicate(const SCEVPredicate
&Pred
) {
12347 if (Preds
.implies(&Pred
))
12350 updateGeneration();
12353 const SCEVUnionPredicate
&PredicatedScalarEvolution::getUnionPredicate() const {
12357 void PredicatedScalarEvolution::updateGeneration() {
12358 // If the generation number wrapped recompute everything.
12359 if (++Generation
== 0) {
12360 for (auto &II
: RewriteMap
) {
12361 const SCEV
*Rewritten
= II
.second
.second
;
12362 II
.second
= {Generation
, SE
.rewriteUsingPredicate(Rewritten
, &L
, Preds
)};
12367 void PredicatedScalarEvolution::setNoOverflow(
12368 Value
*V
, SCEVWrapPredicate::IncrementWrapFlags Flags
) {
12369 const SCEV
*Expr
= getSCEV(V
);
12370 const auto *AR
= cast
<SCEVAddRecExpr
>(Expr
);
12372 auto ImpliedFlags
= SCEVWrapPredicate::getImpliedFlags(AR
, SE
);
12374 // Clear the statically implied flags.
12375 Flags
= SCEVWrapPredicate::clearFlags(Flags
, ImpliedFlags
);
12376 addPredicate(*SE
.getWrapPredicate(AR
, Flags
));
12378 auto II
= FlagsMap
.insert({V
, Flags
});
12380 II
.first
->second
= SCEVWrapPredicate::setFlags(Flags
, II
.first
->second
);
12383 bool PredicatedScalarEvolution::hasNoOverflow(
12384 Value
*V
, SCEVWrapPredicate::IncrementWrapFlags Flags
) {
12385 const SCEV
*Expr
= getSCEV(V
);
12386 const auto *AR
= cast
<SCEVAddRecExpr
>(Expr
);
12388 Flags
= SCEVWrapPredicate::clearFlags(
12389 Flags
, SCEVWrapPredicate::getImpliedFlags(AR
, SE
));
12391 auto II
= FlagsMap
.find(V
);
12393 if (II
!= FlagsMap
.end())
12394 Flags
= SCEVWrapPredicate::clearFlags(Flags
, II
->second
);
12396 return Flags
== SCEVWrapPredicate::IncrementAnyWrap
;
12399 const SCEVAddRecExpr
*PredicatedScalarEvolution::getAsAddRec(Value
*V
) {
12400 const SCEV
*Expr
= this->getSCEV(V
);
12401 SmallPtrSet
<const SCEVPredicate
*, 4> NewPreds
;
12402 auto *New
= SE
.convertSCEVToAddRecWithPredicates(Expr
, &L
, NewPreds
);
12407 for (auto *P
: NewPreds
)
12410 updateGeneration();
12411 RewriteMap
[SE
.getSCEV(V
)] = {Generation
, New
};
12415 PredicatedScalarEvolution::PredicatedScalarEvolution(
12416 const PredicatedScalarEvolution
&Init
)
12417 : RewriteMap(Init
.RewriteMap
), SE(Init
.SE
), L(Init
.L
), Preds(Init
.Preds
),
12418 Generation(Init
.Generation
), BackedgeCount(Init
.BackedgeCount
) {
12419 for (const auto &I
: Init
.FlagsMap
)
12420 FlagsMap
.insert(I
);
12423 void PredicatedScalarEvolution::print(raw_ostream
&OS
, unsigned Depth
) const {
12425 for (auto *BB
: L
.getBlocks())
12426 for (auto &I
: *BB
) {
12427 if (!SE
.isSCEVable(I
.getType()))
12430 auto *Expr
= SE
.getSCEV(&I
);
12431 auto II
= RewriteMap
.find(Expr
);
12433 if (II
== RewriteMap
.end())
12436 // Don't print things that are not interesting.
12437 if (II
->second
.second
== Expr
)
12440 OS
.indent(Depth
) << "[PSE]" << I
<< ":\n";
12441 OS
.indent(Depth
+ 2) << *Expr
<< "\n";
12442 OS
.indent(Depth
+ 2) << "--> " << *II
->second
.second
<< "\n";
12446 // Match the mathematical pattern A - (A / B) * B, where A and B can be
12447 // arbitrary expressions.
12448 // It's not always easy, as A and B can be folded (imagine A is X / 2, and B is
12449 // 4, A / B becomes X / 8).
12450 bool ScalarEvolution::matchURem(const SCEV
*Expr
, const SCEV
*&LHS
,
12451 const SCEV
*&RHS
) {
12452 const auto *Add
= dyn_cast
<SCEVAddExpr
>(Expr
);
12453 if (Add
== nullptr || Add
->getNumOperands() != 2)
12456 const SCEV
*A
= Add
->getOperand(1);
12457 const auto *Mul
= dyn_cast
<SCEVMulExpr
>(Add
->getOperand(0));
12459 if (Mul
== nullptr)
12462 const auto MatchURemWithDivisor
= [&](const SCEV
*B
) {
12463 // (SomeExpr + (-(SomeExpr / B) * B)).
12464 if (Expr
== getURemExpr(A
, B
)) {
12472 // (SomeExpr + (-1 * (SomeExpr / B) * B)).
12473 if (Mul
->getNumOperands() == 3 && isa
<SCEVConstant
>(Mul
->getOperand(0)))
12474 return MatchURemWithDivisor(Mul
->getOperand(1)) ||
12475 MatchURemWithDivisor(Mul
->getOperand(2));
12477 // (SomeExpr + ((-SomeExpr / B) * B)) or (SomeExpr + ((SomeExpr / B) * -B)).
12478 if (Mul
->getNumOperands() == 2)
12479 return MatchURemWithDivisor(Mul
->getOperand(1)) ||
12480 MatchURemWithDivisor(Mul
->getOperand(0)) ||
12481 MatchURemWithDivisor(getNegativeSCEV(Mul
->getOperand(1))) ||
12482 MatchURemWithDivisor(getNegativeSCEV(Mul
->getOperand(0)));