[ORC] Add std::tuple support to SimplePackedSerialization.
[llvm-project.git] / llvm / lib / Analysis / ScalarEvolution.cpp
blobe6fdcc1ea4f10c06e3cbdf4ccae341e1c5ecbeba
1 //===- ScalarEvolution.cpp - Scalar Evolution Analysis --------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file contains the implementation of the scalar evolution analysis
10 // engine, which is used primarily to analyze expressions involving induction
11 // variables in loops.
13 // There are several aspects to this library. First is the representation of
14 // scalar expressions, which are represented as subclasses of the SCEV class.
15 // These classes are used to represent certain types of subexpressions that we
16 // can handle. We only create one SCEV of a particular shape, so
17 // pointer-comparisons for equality are legal.
19 // One important aspect of the SCEV objects is that they are never cyclic, even
20 // if there is a cycle in the dataflow for an expression (ie, a PHI node). If
21 // the PHI node is one of the idioms that we can represent (e.g., a polynomial
22 // recurrence) then we represent it directly as a recurrence node, otherwise we
23 // represent it as a SCEVUnknown node.
25 // In addition to being able to represent expressions of various types, we also
26 // have folders that are used to build the *canonical* representation for a
27 // particular expression. These folders are capable of using a variety of
28 // rewrite rules to simplify the expressions.
30 // Once the folders are defined, we can implement the more interesting
31 // higher-level code, such as the code that recognizes PHI nodes of various
32 // types, computes the execution count of a loop, etc.
34 // TODO: We should use these routines and value representations to implement
35 // dependence analysis!
37 //===----------------------------------------------------------------------===//
39 // There are several good references for the techniques used in this analysis.
41 // Chains of recurrences -- a method to expedite the evaluation
42 // of closed-form functions
43 // Olaf Bachmann, Paul S. Wang, Eugene V. Zima
45 // On computational properties of chains of recurrences
46 // Eugene V. Zima
48 // Symbolic Evaluation of Chains of Recurrences for Loop Optimization
49 // Robert A. van Engelen
51 // Efficient Symbolic Analysis for Optimizing Compilers
52 // Robert A. van Engelen
54 // Using the chains of recurrences algebra for data dependence testing and
55 // induction variable substitution
56 // MS Thesis, Johnie Birch
58 //===----------------------------------------------------------------------===//
60 #include "llvm/Analysis/ScalarEvolution.h"
61 #include "llvm/ADT/APInt.h"
62 #include "llvm/ADT/ArrayRef.h"
63 #include "llvm/ADT/DenseMap.h"
64 #include "llvm/ADT/DepthFirstIterator.h"
65 #include "llvm/ADT/EquivalenceClasses.h"
66 #include "llvm/ADT/FoldingSet.h"
67 #include "llvm/ADT/None.h"
68 #include "llvm/ADT/Optional.h"
69 #include "llvm/ADT/STLExtras.h"
70 #include "llvm/ADT/ScopeExit.h"
71 #include "llvm/ADT/Sequence.h"
72 #include "llvm/ADT/SetVector.h"
73 #include "llvm/ADT/SmallPtrSet.h"
74 #include "llvm/ADT/SmallSet.h"
75 #include "llvm/ADT/SmallVector.h"
76 #include "llvm/ADT/Statistic.h"
77 #include "llvm/ADT/StringRef.h"
78 #include "llvm/Analysis/AssumptionCache.h"
79 #include "llvm/Analysis/ConstantFolding.h"
80 #include "llvm/Analysis/InstructionSimplify.h"
81 #include "llvm/Analysis/LoopInfo.h"
82 #include "llvm/Analysis/ScalarEvolutionDivision.h"
83 #include "llvm/Analysis/ScalarEvolutionExpressions.h"
84 #include "llvm/Analysis/TargetLibraryInfo.h"
85 #include "llvm/Analysis/ValueTracking.h"
86 #include "llvm/Config/llvm-config.h"
87 #include "llvm/IR/Argument.h"
88 #include "llvm/IR/BasicBlock.h"
89 #include "llvm/IR/CFG.h"
90 #include "llvm/IR/Constant.h"
91 #include "llvm/IR/ConstantRange.h"
92 #include "llvm/IR/Constants.h"
93 #include "llvm/IR/DataLayout.h"
94 #include "llvm/IR/DerivedTypes.h"
95 #include "llvm/IR/Dominators.h"
96 #include "llvm/IR/Function.h"
97 #include "llvm/IR/GlobalAlias.h"
98 #include "llvm/IR/GlobalValue.h"
99 #include "llvm/IR/GlobalVariable.h"
100 #include "llvm/IR/InstIterator.h"
101 #include "llvm/IR/InstrTypes.h"
102 #include "llvm/IR/Instruction.h"
103 #include "llvm/IR/Instructions.h"
104 #include "llvm/IR/IntrinsicInst.h"
105 #include "llvm/IR/Intrinsics.h"
106 #include "llvm/IR/LLVMContext.h"
107 #include "llvm/IR/Metadata.h"
108 #include "llvm/IR/Operator.h"
109 #include "llvm/IR/PatternMatch.h"
110 #include "llvm/IR/Type.h"
111 #include "llvm/IR/Use.h"
112 #include "llvm/IR/User.h"
113 #include "llvm/IR/Value.h"
114 #include "llvm/IR/Verifier.h"
115 #include "llvm/InitializePasses.h"
116 #include "llvm/Pass.h"
117 #include "llvm/Support/Casting.h"
118 #include "llvm/Support/CommandLine.h"
119 #include "llvm/Support/Compiler.h"
120 #include "llvm/Support/Debug.h"
121 #include "llvm/Support/ErrorHandling.h"
122 #include "llvm/Support/KnownBits.h"
123 #include "llvm/Support/SaveAndRestore.h"
124 #include "llvm/Support/raw_ostream.h"
125 #include <algorithm>
126 #include <cassert>
127 #include <climits>
128 #include <cstddef>
129 #include <cstdint>
130 #include <cstdlib>
131 #include <map>
132 #include <memory>
133 #include <tuple>
134 #include <utility>
135 #include <vector>
137 using namespace llvm;
138 using namespace PatternMatch;
140 #define DEBUG_TYPE "scalar-evolution"
142 STATISTIC(NumArrayLenItCounts,
143 "Number of trip counts computed with array length");
144 STATISTIC(NumTripCountsComputed,
145 "Number of loops with predictable loop counts");
146 STATISTIC(NumTripCountsNotComputed,
147 "Number of loops without predictable loop counts");
148 STATISTIC(NumBruteForceTripCountsComputed,
149 "Number of loops with trip counts computed by force");
151 static cl::opt<unsigned>
152 MaxBruteForceIterations("scalar-evolution-max-iterations", cl::ReallyHidden,
153 cl::ZeroOrMore,
154 cl::desc("Maximum number of iterations SCEV will "
155 "symbolically execute a constant "
156 "derived loop"),
157 cl::init(100));
159 // FIXME: Enable this with EXPENSIVE_CHECKS when the test suite is clean.
160 static cl::opt<bool> VerifySCEV(
161 "verify-scev", cl::Hidden,
162 cl::desc("Verify ScalarEvolution's backedge taken counts (slow)"));
163 static cl::opt<bool> VerifySCEVStrict(
164 "verify-scev-strict", cl::Hidden,
165 cl::desc("Enable stricter verification with -verify-scev is passed"));
166 static cl::opt<bool>
167 VerifySCEVMap("verify-scev-maps", cl::Hidden,
168 cl::desc("Verify no dangling value in ScalarEvolution's "
169 "ExprValueMap (slow)"));
171 static cl::opt<bool> VerifyIR(
172 "scev-verify-ir", cl::Hidden,
173 cl::desc("Verify IR correctness when making sensitive SCEV queries (slow)"),
174 cl::init(false));
176 static cl::opt<unsigned> MulOpsInlineThreshold(
177 "scev-mulops-inline-threshold", cl::Hidden,
178 cl::desc("Threshold for inlining multiplication operands into a SCEV"),
179 cl::init(32));
181 static cl::opt<unsigned> AddOpsInlineThreshold(
182 "scev-addops-inline-threshold", cl::Hidden,
183 cl::desc("Threshold for inlining addition operands into a SCEV"),
184 cl::init(500));
186 static cl::opt<unsigned> MaxSCEVCompareDepth(
187 "scalar-evolution-max-scev-compare-depth", cl::Hidden,
188 cl::desc("Maximum depth of recursive SCEV complexity comparisons"),
189 cl::init(32));
191 static cl::opt<unsigned> MaxSCEVOperationsImplicationDepth(
192 "scalar-evolution-max-scev-operations-implication-depth", cl::Hidden,
193 cl::desc("Maximum depth of recursive SCEV operations implication analysis"),
194 cl::init(2));
196 static cl::opt<unsigned> MaxValueCompareDepth(
197 "scalar-evolution-max-value-compare-depth", cl::Hidden,
198 cl::desc("Maximum depth of recursive value complexity comparisons"),
199 cl::init(2));
201 static cl::opt<unsigned>
202 MaxArithDepth("scalar-evolution-max-arith-depth", cl::Hidden,
203 cl::desc("Maximum depth of recursive arithmetics"),
204 cl::init(32));
206 static cl::opt<unsigned> MaxConstantEvolvingDepth(
207 "scalar-evolution-max-constant-evolving-depth", cl::Hidden,
208 cl::desc("Maximum depth of recursive constant evolving"), cl::init(32));
210 static cl::opt<unsigned>
211 MaxCastDepth("scalar-evolution-max-cast-depth", cl::Hidden,
212 cl::desc("Maximum depth of recursive SExt/ZExt/Trunc"),
213 cl::init(8));
215 static cl::opt<unsigned>
216 MaxAddRecSize("scalar-evolution-max-add-rec-size", cl::Hidden,
217 cl::desc("Max coefficients in AddRec during evolving"),
218 cl::init(8));
220 static cl::opt<unsigned>
221 HugeExprThreshold("scalar-evolution-huge-expr-threshold", cl::Hidden,
222 cl::desc("Size of the expression which is considered huge"),
223 cl::init(4096));
225 static cl::opt<bool>
226 ClassifyExpressions("scalar-evolution-classify-expressions",
227 cl::Hidden, cl::init(true),
228 cl::desc("When printing analysis, include information on every instruction"));
230 static cl::opt<bool> UseExpensiveRangeSharpening(
231 "scalar-evolution-use-expensive-range-sharpening", cl::Hidden,
232 cl::init(false),
233 cl::desc("Use more powerful methods of sharpening expression ranges. May "
234 "be costly in terms of compile time"));
236 //===----------------------------------------------------------------------===//
237 // SCEV class definitions
238 //===----------------------------------------------------------------------===//
240 //===----------------------------------------------------------------------===//
241 // Implementation of the SCEV class.
244 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
245 LLVM_DUMP_METHOD void SCEV::dump() const {
246 print(dbgs());
247 dbgs() << '\n';
249 #endif
251 void SCEV::print(raw_ostream &OS) const {
252 switch (getSCEVType()) {
253 case scConstant:
254 cast<SCEVConstant>(this)->getValue()->printAsOperand(OS, false);
255 return;
256 case scPtrToInt: {
257 const SCEVPtrToIntExpr *PtrToInt = cast<SCEVPtrToIntExpr>(this);
258 const SCEV *Op = PtrToInt->getOperand();
259 OS << "(ptrtoint " << *Op->getType() << " " << *Op << " to "
260 << *PtrToInt->getType() << ")";
261 return;
263 case scTruncate: {
264 const SCEVTruncateExpr *Trunc = cast<SCEVTruncateExpr>(this);
265 const SCEV *Op = Trunc->getOperand();
266 OS << "(trunc " << *Op->getType() << " " << *Op << " to "
267 << *Trunc->getType() << ")";
268 return;
270 case scZeroExtend: {
271 const SCEVZeroExtendExpr *ZExt = cast<SCEVZeroExtendExpr>(this);
272 const SCEV *Op = ZExt->getOperand();
273 OS << "(zext " << *Op->getType() << " " << *Op << " to "
274 << *ZExt->getType() << ")";
275 return;
277 case scSignExtend: {
278 const SCEVSignExtendExpr *SExt = cast<SCEVSignExtendExpr>(this);
279 const SCEV *Op = SExt->getOperand();
280 OS << "(sext " << *Op->getType() << " " << *Op << " to "
281 << *SExt->getType() << ")";
282 return;
284 case scAddRecExpr: {
285 const SCEVAddRecExpr *AR = cast<SCEVAddRecExpr>(this);
286 OS << "{" << *AR->getOperand(0);
287 for (unsigned i = 1, e = AR->getNumOperands(); i != e; ++i)
288 OS << ",+," << *AR->getOperand(i);
289 OS << "}<";
290 if (AR->hasNoUnsignedWrap())
291 OS << "nuw><";
292 if (AR->hasNoSignedWrap())
293 OS << "nsw><";
294 if (AR->hasNoSelfWrap() &&
295 !AR->getNoWrapFlags((NoWrapFlags)(FlagNUW | FlagNSW)))
296 OS << "nw><";
297 AR->getLoop()->getHeader()->printAsOperand(OS, /*PrintType=*/false);
298 OS << ">";
299 return;
301 case scAddExpr:
302 case scMulExpr:
303 case scUMaxExpr:
304 case scSMaxExpr:
305 case scUMinExpr:
306 case scSMinExpr: {
307 const SCEVNAryExpr *NAry = cast<SCEVNAryExpr>(this);
308 const char *OpStr = nullptr;
309 switch (NAry->getSCEVType()) {
310 case scAddExpr: OpStr = " + "; break;
311 case scMulExpr: OpStr = " * "; break;
312 case scUMaxExpr: OpStr = " umax "; break;
313 case scSMaxExpr: OpStr = " smax "; break;
314 case scUMinExpr:
315 OpStr = " umin ";
316 break;
317 case scSMinExpr:
318 OpStr = " smin ";
319 break;
320 default:
321 llvm_unreachable("There are no other nary expression types.");
323 OS << "(";
324 ListSeparator LS(OpStr);
325 for (const SCEV *Op : NAry->operands())
326 OS << LS << *Op;
327 OS << ")";
328 switch (NAry->getSCEVType()) {
329 case scAddExpr:
330 case scMulExpr:
331 if (NAry->hasNoUnsignedWrap())
332 OS << "<nuw>";
333 if (NAry->hasNoSignedWrap())
334 OS << "<nsw>";
335 break;
336 default:
337 // Nothing to print for other nary expressions.
338 break;
340 return;
342 case scUDivExpr: {
343 const SCEVUDivExpr *UDiv = cast<SCEVUDivExpr>(this);
344 OS << "(" << *UDiv->getLHS() << " /u " << *UDiv->getRHS() << ")";
345 return;
347 case scUnknown: {
348 const SCEVUnknown *U = cast<SCEVUnknown>(this);
349 Type *AllocTy;
350 if (U->isSizeOf(AllocTy)) {
351 OS << "sizeof(" << *AllocTy << ")";
352 return;
354 if (U->isAlignOf(AllocTy)) {
355 OS << "alignof(" << *AllocTy << ")";
356 return;
359 Type *CTy;
360 Constant *FieldNo;
361 if (U->isOffsetOf(CTy, FieldNo)) {
362 OS << "offsetof(" << *CTy << ", ";
363 FieldNo->printAsOperand(OS, false);
364 OS << ")";
365 return;
368 // Otherwise just print it normally.
369 U->getValue()->printAsOperand(OS, false);
370 return;
372 case scCouldNotCompute:
373 OS << "***COULDNOTCOMPUTE***";
374 return;
376 llvm_unreachable("Unknown SCEV kind!");
379 Type *SCEV::getType() const {
380 switch (getSCEVType()) {
381 case scConstant:
382 return cast<SCEVConstant>(this)->getType();
383 case scPtrToInt:
384 case scTruncate:
385 case scZeroExtend:
386 case scSignExtend:
387 return cast<SCEVCastExpr>(this)->getType();
388 case scAddRecExpr:
389 return cast<SCEVAddRecExpr>(this)->getType();
390 case scMulExpr:
391 return cast<SCEVMulExpr>(this)->getType();
392 case scUMaxExpr:
393 case scSMaxExpr:
394 case scUMinExpr:
395 case scSMinExpr:
396 return cast<SCEVMinMaxExpr>(this)->getType();
397 case scAddExpr:
398 return cast<SCEVAddExpr>(this)->getType();
399 case scUDivExpr:
400 return cast<SCEVUDivExpr>(this)->getType();
401 case scUnknown:
402 return cast<SCEVUnknown>(this)->getType();
403 case scCouldNotCompute:
404 llvm_unreachable("Attempt to use a SCEVCouldNotCompute object!");
406 llvm_unreachable("Unknown SCEV kind!");
409 bool SCEV::isZero() const {
410 if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(this))
411 return SC->getValue()->isZero();
412 return false;
415 bool SCEV::isOne() const {
416 if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(this))
417 return SC->getValue()->isOne();
418 return false;
421 bool SCEV::isAllOnesValue() const {
422 if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(this))
423 return SC->getValue()->isMinusOne();
424 return false;
427 bool SCEV::isNonConstantNegative() const {
428 const SCEVMulExpr *Mul = dyn_cast<SCEVMulExpr>(this);
429 if (!Mul) return false;
431 // If there is a constant factor, it will be first.
432 const SCEVConstant *SC = dyn_cast<SCEVConstant>(Mul->getOperand(0));
433 if (!SC) return false;
435 // Return true if the value is negative, this matches things like (-42 * V).
436 return SC->getAPInt().isNegative();
439 SCEVCouldNotCompute::SCEVCouldNotCompute() :
440 SCEV(FoldingSetNodeIDRef(), scCouldNotCompute, 0) {}
442 bool SCEVCouldNotCompute::classof(const SCEV *S) {
443 return S->getSCEVType() == scCouldNotCompute;
446 const SCEV *ScalarEvolution::getConstant(ConstantInt *V) {
447 FoldingSetNodeID ID;
448 ID.AddInteger(scConstant);
449 ID.AddPointer(V);
450 void *IP = nullptr;
451 if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S;
452 SCEV *S = new (SCEVAllocator) SCEVConstant(ID.Intern(SCEVAllocator), V);
453 UniqueSCEVs.InsertNode(S, IP);
454 return S;
457 const SCEV *ScalarEvolution::getConstant(const APInt &Val) {
458 return getConstant(ConstantInt::get(getContext(), Val));
461 const SCEV *
462 ScalarEvolution::getConstant(Type *Ty, uint64_t V, bool isSigned) {
463 IntegerType *ITy = cast<IntegerType>(getEffectiveSCEVType(Ty));
464 return getConstant(ConstantInt::get(ITy, V, isSigned));
467 SCEVCastExpr::SCEVCastExpr(const FoldingSetNodeIDRef ID, SCEVTypes SCEVTy,
468 const SCEV *op, Type *ty)
469 : SCEV(ID, SCEVTy, computeExpressionSize(op)), Ty(ty) {
470 Operands[0] = op;
473 SCEVPtrToIntExpr::SCEVPtrToIntExpr(const FoldingSetNodeIDRef ID, const SCEV *Op,
474 Type *ITy)
475 : SCEVCastExpr(ID, scPtrToInt, Op, ITy) {
476 assert(getOperand()->getType()->isPointerTy() && Ty->isIntegerTy() &&
477 "Must be a non-bit-width-changing pointer-to-integer cast!");
480 SCEVIntegralCastExpr::SCEVIntegralCastExpr(const FoldingSetNodeIDRef ID,
481 SCEVTypes SCEVTy, const SCEV *op,
482 Type *ty)
483 : SCEVCastExpr(ID, SCEVTy, op, ty) {}
485 SCEVTruncateExpr::SCEVTruncateExpr(const FoldingSetNodeIDRef ID, const SCEV *op,
486 Type *ty)
487 : SCEVIntegralCastExpr(ID, scTruncate, op, ty) {
488 assert(getOperand()->getType()->isIntOrPtrTy() && Ty->isIntOrPtrTy() &&
489 "Cannot truncate non-integer value!");
492 SCEVZeroExtendExpr::SCEVZeroExtendExpr(const FoldingSetNodeIDRef ID,
493 const SCEV *op, Type *ty)
494 : SCEVIntegralCastExpr(ID, scZeroExtend, op, ty) {
495 assert(getOperand()->getType()->isIntOrPtrTy() && Ty->isIntOrPtrTy() &&
496 "Cannot zero extend non-integer value!");
499 SCEVSignExtendExpr::SCEVSignExtendExpr(const FoldingSetNodeIDRef ID,
500 const SCEV *op, Type *ty)
501 : SCEVIntegralCastExpr(ID, scSignExtend, op, ty) {
502 assert(getOperand()->getType()->isIntOrPtrTy() && Ty->isIntOrPtrTy() &&
503 "Cannot sign extend non-integer value!");
506 void SCEVUnknown::deleted() {
507 // Clear this SCEVUnknown from various maps.
508 SE->forgetMemoizedResults(this);
510 // Remove this SCEVUnknown from the uniquing map.
511 SE->UniqueSCEVs.RemoveNode(this);
513 // Release the value.
514 setValPtr(nullptr);
517 void SCEVUnknown::allUsesReplacedWith(Value *New) {
518 // Remove this SCEVUnknown from the uniquing map.
519 SE->UniqueSCEVs.RemoveNode(this);
521 // Update this SCEVUnknown to point to the new value. This is needed
522 // because there may still be outstanding SCEVs which still point to
523 // this SCEVUnknown.
524 setValPtr(New);
527 bool SCEVUnknown::isSizeOf(Type *&AllocTy) const {
528 if (ConstantExpr *VCE = dyn_cast<ConstantExpr>(getValue()))
529 if (VCE->getOpcode() == Instruction::PtrToInt)
530 if (ConstantExpr *CE = dyn_cast<ConstantExpr>(VCE->getOperand(0)))
531 if (CE->getOpcode() == Instruction::GetElementPtr &&
532 CE->getOperand(0)->isNullValue() &&
533 CE->getNumOperands() == 2)
534 if (ConstantInt *CI = dyn_cast<ConstantInt>(CE->getOperand(1)))
535 if (CI->isOne()) {
536 AllocTy = cast<GEPOperator>(CE)->getSourceElementType();
537 return true;
540 return false;
543 bool SCEVUnknown::isAlignOf(Type *&AllocTy) const {
544 if (ConstantExpr *VCE = dyn_cast<ConstantExpr>(getValue()))
545 if (VCE->getOpcode() == Instruction::PtrToInt)
546 if (ConstantExpr *CE = dyn_cast<ConstantExpr>(VCE->getOperand(0)))
547 if (CE->getOpcode() == Instruction::GetElementPtr &&
548 CE->getOperand(0)->isNullValue()) {
549 Type *Ty = cast<GEPOperator>(CE)->getSourceElementType();
550 if (StructType *STy = dyn_cast<StructType>(Ty))
551 if (!STy->isPacked() &&
552 CE->getNumOperands() == 3 &&
553 CE->getOperand(1)->isNullValue()) {
554 if (ConstantInt *CI = dyn_cast<ConstantInt>(CE->getOperand(2)))
555 if (CI->isOne() &&
556 STy->getNumElements() == 2 &&
557 STy->getElementType(0)->isIntegerTy(1)) {
558 AllocTy = STy->getElementType(1);
559 return true;
564 return false;
567 bool SCEVUnknown::isOffsetOf(Type *&CTy, Constant *&FieldNo) const {
568 if (ConstantExpr *VCE = dyn_cast<ConstantExpr>(getValue()))
569 if (VCE->getOpcode() == Instruction::PtrToInt)
570 if (ConstantExpr *CE = dyn_cast<ConstantExpr>(VCE->getOperand(0)))
571 if (CE->getOpcode() == Instruction::GetElementPtr &&
572 CE->getNumOperands() == 3 &&
573 CE->getOperand(0)->isNullValue() &&
574 CE->getOperand(1)->isNullValue()) {
575 Type *Ty = cast<GEPOperator>(CE)->getSourceElementType();
576 // Ignore vector types here so that ScalarEvolutionExpander doesn't
577 // emit getelementptrs that index into vectors.
578 if (Ty->isStructTy() || Ty->isArrayTy()) {
579 CTy = Ty;
580 FieldNo = CE->getOperand(2);
581 return true;
585 return false;
588 //===----------------------------------------------------------------------===//
589 // SCEV Utilities
590 //===----------------------------------------------------------------------===//
592 /// Compare the two values \p LV and \p RV in terms of their "complexity" where
593 /// "complexity" is a partial (and somewhat ad-hoc) relation used to order
594 /// operands in SCEV expressions. \p EqCache is a set of pairs of values that
595 /// have been previously deemed to be "equally complex" by this routine. It is
596 /// intended to avoid exponential time complexity in cases like:
598 /// %a = f(%x, %y)
599 /// %b = f(%a, %a)
600 /// %c = f(%b, %b)
602 /// %d = f(%x, %y)
603 /// %e = f(%d, %d)
604 /// %f = f(%e, %e)
606 /// CompareValueComplexity(%f, %c)
608 /// Since we do not continue running this routine on expression trees once we
609 /// have seen unequal values, there is no need to track them in the cache.
610 static int
611 CompareValueComplexity(EquivalenceClasses<const Value *> &EqCacheValue,
612 const LoopInfo *const LI, Value *LV, Value *RV,
613 unsigned Depth) {
614 if (Depth > MaxValueCompareDepth || EqCacheValue.isEquivalent(LV, RV))
615 return 0;
617 // Order pointer values after integer values. This helps SCEVExpander form
618 // GEPs.
619 bool LIsPointer = LV->getType()->isPointerTy(),
620 RIsPointer = RV->getType()->isPointerTy();
621 if (LIsPointer != RIsPointer)
622 return (int)LIsPointer - (int)RIsPointer;
624 // Compare getValueID values.
625 unsigned LID = LV->getValueID(), RID = RV->getValueID();
626 if (LID != RID)
627 return (int)LID - (int)RID;
629 // Sort arguments by their position.
630 if (const auto *LA = dyn_cast<Argument>(LV)) {
631 const auto *RA = cast<Argument>(RV);
632 unsigned LArgNo = LA->getArgNo(), RArgNo = RA->getArgNo();
633 return (int)LArgNo - (int)RArgNo;
636 if (const auto *LGV = dyn_cast<GlobalValue>(LV)) {
637 const auto *RGV = cast<GlobalValue>(RV);
639 const auto IsGVNameSemantic = [&](const GlobalValue *GV) {
640 auto LT = GV->getLinkage();
641 return !(GlobalValue::isPrivateLinkage(LT) ||
642 GlobalValue::isInternalLinkage(LT));
645 // Use the names to distinguish the two values, but only if the
646 // names are semantically important.
647 if (IsGVNameSemantic(LGV) && IsGVNameSemantic(RGV))
648 return LGV->getName().compare(RGV->getName());
651 // For instructions, compare their loop depth, and their operand count. This
652 // is pretty loose.
653 if (const auto *LInst = dyn_cast<Instruction>(LV)) {
654 const auto *RInst = cast<Instruction>(RV);
656 // Compare loop depths.
657 const BasicBlock *LParent = LInst->getParent(),
658 *RParent = RInst->getParent();
659 if (LParent != RParent) {
660 unsigned LDepth = LI->getLoopDepth(LParent),
661 RDepth = LI->getLoopDepth(RParent);
662 if (LDepth != RDepth)
663 return (int)LDepth - (int)RDepth;
666 // Compare the number of operands.
667 unsigned LNumOps = LInst->getNumOperands(),
668 RNumOps = RInst->getNumOperands();
669 if (LNumOps != RNumOps)
670 return (int)LNumOps - (int)RNumOps;
672 for (unsigned Idx : seq(0u, LNumOps)) {
673 int Result =
674 CompareValueComplexity(EqCacheValue, LI, LInst->getOperand(Idx),
675 RInst->getOperand(Idx), Depth + 1);
676 if (Result != 0)
677 return Result;
681 EqCacheValue.unionSets(LV, RV);
682 return 0;
685 // Return negative, zero, or positive, if LHS is less than, equal to, or greater
686 // than RHS, respectively. A three-way result allows recursive comparisons to be
687 // more efficient.
688 // If the max analysis depth was reached, return None, assuming we do not know
689 // if they are equivalent for sure.
690 static Optional<int>
691 CompareSCEVComplexity(EquivalenceClasses<const SCEV *> &EqCacheSCEV,
692 EquivalenceClasses<const Value *> &EqCacheValue,
693 const LoopInfo *const LI, const SCEV *LHS,
694 const SCEV *RHS, DominatorTree &DT, unsigned Depth = 0) {
695 // Fast-path: SCEVs are uniqued so we can do a quick equality check.
696 if (LHS == RHS)
697 return 0;
699 // Primarily, sort the SCEVs by their getSCEVType().
700 SCEVTypes LType = LHS->getSCEVType(), RType = RHS->getSCEVType();
701 if (LType != RType)
702 return (int)LType - (int)RType;
704 if (EqCacheSCEV.isEquivalent(LHS, RHS))
705 return 0;
707 if (Depth > MaxSCEVCompareDepth)
708 return None;
710 // Aside from the getSCEVType() ordering, the particular ordering
711 // isn't very important except that it's beneficial to be consistent,
712 // so that (a + b) and (b + a) don't end up as different expressions.
713 switch (LType) {
714 case scUnknown: {
715 const SCEVUnknown *LU = cast<SCEVUnknown>(LHS);
716 const SCEVUnknown *RU = cast<SCEVUnknown>(RHS);
718 int X = CompareValueComplexity(EqCacheValue, LI, LU->getValue(),
719 RU->getValue(), Depth + 1);
720 if (X == 0)
721 EqCacheSCEV.unionSets(LHS, RHS);
722 return X;
725 case scConstant: {
726 const SCEVConstant *LC = cast<SCEVConstant>(LHS);
727 const SCEVConstant *RC = cast<SCEVConstant>(RHS);
729 // Compare constant values.
730 const APInt &LA = LC->getAPInt();
731 const APInt &RA = RC->getAPInt();
732 unsigned LBitWidth = LA.getBitWidth(), RBitWidth = RA.getBitWidth();
733 if (LBitWidth != RBitWidth)
734 return (int)LBitWidth - (int)RBitWidth;
735 return LA.ult(RA) ? -1 : 1;
738 case scAddRecExpr: {
739 const SCEVAddRecExpr *LA = cast<SCEVAddRecExpr>(LHS);
740 const SCEVAddRecExpr *RA = cast<SCEVAddRecExpr>(RHS);
742 // There is always a dominance between two recs that are used by one SCEV,
743 // so we can safely sort recs by loop header dominance. We require such
744 // order in getAddExpr.
745 const Loop *LLoop = LA->getLoop(), *RLoop = RA->getLoop();
746 if (LLoop != RLoop) {
747 const BasicBlock *LHead = LLoop->getHeader(), *RHead = RLoop->getHeader();
748 assert(LHead != RHead && "Two loops share the same header?");
749 if (DT.dominates(LHead, RHead))
750 return 1;
751 else
752 assert(DT.dominates(RHead, LHead) &&
753 "No dominance between recurrences used by one SCEV?");
754 return -1;
757 // Addrec complexity grows with operand count.
758 unsigned LNumOps = LA->getNumOperands(), RNumOps = RA->getNumOperands();
759 if (LNumOps != RNumOps)
760 return (int)LNumOps - (int)RNumOps;
762 // Lexicographically compare.
763 for (unsigned i = 0; i != LNumOps; ++i) {
764 auto X = CompareSCEVComplexity(EqCacheSCEV, EqCacheValue, LI,
765 LA->getOperand(i), RA->getOperand(i), DT,
766 Depth + 1);
767 if (X != 0)
768 return X;
770 EqCacheSCEV.unionSets(LHS, RHS);
771 return 0;
774 case scAddExpr:
775 case scMulExpr:
776 case scSMaxExpr:
777 case scUMaxExpr:
778 case scSMinExpr:
779 case scUMinExpr: {
780 const SCEVNAryExpr *LC = cast<SCEVNAryExpr>(LHS);
781 const SCEVNAryExpr *RC = cast<SCEVNAryExpr>(RHS);
783 // Lexicographically compare n-ary expressions.
784 unsigned LNumOps = LC->getNumOperands(), RNumOps = RC->getNumOperands();
785 if (LNumOps != RNumOps)
786 return (int)LNumOps - (int)RNumOps;
788 for (unsigned i = 0; i != LNumOps; ++i) {
789 auto X = CompareSCEVComplexity(EqCacheSCEV, EqCacheValue, LI,
790 LC->getOperand(i), RC->getOperand(i), DT,
791 Depth + 1);
792 if (X != 0)
793 return X;
795 EqCacheSCEV.unionSets(LHS, RHS);
796 return 0;
799 case scUDivExpr: {
800 const SCEVUDivExpr *LC = cast<SCEVUDivExpr>(LHS);
801 const SCEVUDivExpr *RC = cast<SCEVUDivExpr>(RHS);
803 // Lexicographically compare udiv expressions.
804 auto X = CompareSCEVComplexity(EqCacheSCEV, EqCacheValue, LI, LC->getLHS(),
805 RC->getLHS(), DT, Depth + 1);
806 if (X != 0)
807 return X;
808 X = CompareSCEVComplexity(EqCacheSCEV, EqCacheValue, LI, LC->getRHS(),
809 RC->getRHS(), DT, Depth + 1);
810 if (X == 0)
811 EqCacheSCEV.unionSets(LHS, RHS);
812 return X;
815 case scPtrToInt:
816 case scTruncate:
817 case scZeroExtend:
818 case scSignExtend: {
819 const SCEVCastExpr *LC = cast<SCEVCastExpr>(LHS);
820 const SCEVCastExpr *RC = cast<SCEVCastExpr>(RHS);
822 // Compare cast expressions by operand.
823 auto X =
824 CompareSCEVComplexity(EqCacheSCEV, EqCacheValue, LI, LC->getOperand(),
825 RC->getOperand(), DT, Depth + 1);
826 if (X == 0)
827 EqCacheSCEV.unionSets(LHS, RHS);
828 return X;
831 case scCouldNotCompute:
832 llvm_unreachable("Attempt to use a SCEVCouldNotCompute object!");
834 llvm_unreachable("Unknown SCEV kind!");
837 /// Given a list of SCEV objects, order them by their complexity, and group
838 /// objects of the same complexity together by value. When this routine is
839 /// finished, we know that any duplicates in the vector are consecutive and that
840 /// complexity is monotonically increasing.
842 /// Note that we go take special precautions to ensure that we get deterministic
843 /// results from this routine. In other words, we don't want the results of
844 /// this to depend on where the addresses of various SCEV objects happened to
845 /// land in memory.
846 static void GroupByComplexity(SmallVectorImpl<const SCEV *> &Ops,
847 LoopInfo *LI, DominatorTree &DT) {
848 if (Ops.size() < 2) return; // Noop
850 EquivalenceClasses<const SCEV *> EqCacheSCEV;
851 EquivalenceClasses<const Value *> EqCacheValue;
853 // Whether LHS has provably less complexity than RHS.
854 auto IsLessComplex = [&](const SCEV *LHS, const SCEV *RHS) {
855 auto Complexity =
856 CompareSCEVComplexity(EqCacheSCEV, EqCacheValue, LI, LHS, RHS, DT);
857 return Complexity && *Complexity < 0;
859 if (Ops.size() == 2) {
860 // This is the common case, which also happens to be trivially simple.
861 // Special case it.
862 const SCEV *&LHS = Ops[0], *&RHS = Ops[1];
863 if (IsLessComplex(RHS, LHS))
864 std::swap(LHS, RHS);
865 return;
868 // Do the rough sort by complexity.
869 llvm::stable_sort(Ops, [&](const SCEV *LHS, const SCEV *RHS) {
870 return IsLessComplex(LHS, RHS);
873 // Now that we are sorted by complexity, group elements of the same
874 // complexity. Note that this is, at worst, N^2, but the vector is likely to
875 // be extremely short in practice. Note that we take this approach because we
876 // do not want to depend on the addresses of the objects we are grouping.
877 for (unsigned i = 0, e = Ops.size(); i != e-2; ++i) {
878 const SCEV *S = Ops[i];
879 unsigned Complexity = S->getSCEVType();
881 // If there are any objects of the same complexity and same value as this
882 // one, group them.
883 for (unsigned j = i+1; j != e && Ops[j]->getSCEVType() == Complexity; ++j) {
884 if (Ops[j] == S) { // Found a duplicate.
885 // Move it to immediately after i'th element.
886 std::swap(Ops[i+1], Ops[j]);
887 ++i; // no need to rescan it.
888 if (i == e-2) return; // Done!
894 /// Returns true if \p Ops contains a huge SCEV (the subtree of S contains at
895 /// least HugeExprThreshold nodes).
896 static bool hasHugeExpression(ArrayRef<const SCEV *> Ops) {
897 return any_of(Ops, [](const SCEV *S) {
898 return S->getExpressionSize() >= HugeExprThreshold;
902 //===----------------------------------------------------------------------===//
903 // Simple SCEV method implementations
904 //===----------------------------------------------------------------------===//
906 /// Compute BC(It, K). The result has width W. Assume, K > 0.
907 static const SCEV *BinomialCoefficient(const SCEV *It, unsigned K,
908 ScalarEvolution &SE,
909 Type *ResultTy) {
910 // Handle the simplest case efficiently.
911 if (K == 1)
912 return SE.getTruncateOrZeroExtend(It, ResultTy);
914 // We are using the following formula for BC(It, K):
916 // BC(It, K) = (It * (It - 1) * ... * (It - K + 1)) / K!
918 // Suppose, W is the bitwidth of the return value. We must be prepared for
919 // overflow. Hence, we must assure that the result of our computation is
920 // equal to the accurate one modulo 2^W. Unfortunately, division isn't
921 // safe in modular arithmetic.
923 // However, this code doesn't use exactly that formula; the formula it uses
924 // is something like the following, where T is the number of factors of 2 in
925 // K! (i.e. trailing zeros in the binary representation of K!), and ^ is
926 // exponentiation:
928 // BC(It, K) = (It * (It - 1) * ... * (It - K + 1)) / 2^T / (K! / 2^T)
930 // This formula is trivially equivalent to the previous formula. However,
931 // this formula can be implemented much more efficiently. The trick is that
932 // K! / 2^T is odd, and exact division by an odd number *is* safe in modular
933 // arithmetic. To do exact division in modular arithmetic, all we have
934 // to do is multiply by the inverse. Therefore, this step can be done at
935 // width W.
937 // The next issue is how to safely do the division by 2^T. The way this
938 // is done is by doing the multiplication step at a width of at least W + T
939 // bits. This way, the bottom W+T bits of the product are accurate. Then,
940 // when we perform the division by 2^T (which is equivalent to a right shift
941 // by T), the bottom W bits are accurate. Extra bits are okay; they'll get
942 // truncated out after the division by 2^T.
944 // In comparison to just directly using the first formula, this technique
945 // is much more efficient; using the first formula requires W * K bits,
946 // but this formula less than W + K bits. Also, the first formula requires
947 // a division step, whereas this formula only requires multiplies and shifts.
949 // It doesn't matter whether the subtraction step is done in the calculation
950 // width or the input iteration count's width; if the subtraction overflows,
951 // the result must be zero anyway. We prefer here to do it in the width of
952 // the induction variable because it helps a lot for certain cases; CodeGen
953 // isn't smart enough to ignore the overflow, which leads to much less
954 // efficient code if the width of the subtraction is wider than the native
955 // register width.
957 // (It's possible to not widen at all by pulling out factors of 2 before
958 // the multiplication; for example, K=2 can be calculated as
959 // It/2*(It+(It*INT_MIN/INT_MIN)+-1). However, it requires
960 // extra arithmetic, so it's not an obvious win, and it gets
961 // much more complicated for K > 3.)
963 // Protection from insane SCEVs; this bound is conservative,
964 // but it probably doesn't matter.
965 if (K > 1000)
966 return SE.getCouldNotCompute();
968 unsigned W = SE.getTypeSizeInBits(ResultTy);
970 // Calculate K! / 2^T and T; we divide out the factors of two before
971 // multiplying for calculating K! / 2^T to avoid overflow.
972 // Other overflow doesn't matter because we only care about the bottom
973 // W bits of the result.
974 APInt OddFactorial(W, 1);
975 unsigned T = 1;
976 for (unsigned i = 3; i <= K; ++i) {
977 APInt Mult(W, i);
978 unsigned TwoFactors = Mult.countTrailingZeros();
979 T += TwoFactors;
980 Mult.lshrInPlace(TwoFactors);
981 OddFactorial *= Mult;
984 // We need at least W + T bits for the multiplication step
985 unsigned CalculationBits = W + T;
987 // Calculate 2^T, at width T+W.
988 APInt DivFactor = APInt::getOneBitSet(CalculationBits, T);
990 // Calculate the multiplicative inverse of K! / 2^T;
991 // this multiplication factor will perform the exact division by
992 // K! / 2^T.
993 APInt Mod = APInt::getSignedMinValue(W+1);
994 APInt MultiplyFactor = OddFactorial.zext(W+1);
995 MultiplyFactor = MultiplyFactor.multiplicativeInverse(Mod);
996 MultiplyFactor = MultiplyFactor.trunc(W);
998 // Calculate the product, at width T+W
999 IntegerType *CalculationTy = IntegerType::get(SE.getContext(),
1000 CalculationBits);
1001 const SCEV *Dividend = SE.getTruncateOrZeroExtend(It, CalculationTy);
1002 for (unsigned i = 1; i != K; ++i) {
1003 const SCEV *S = SE.getMinusSCEV(It, SE.getConstant(It->getType(), i));
1004 Dividend = SE.getMulExpr(Dividend,
1005 SE.getTruncateOrZeroExtend(S, CalculationTy));
1008 // Divide by 2^T
1009 const SCEV *DivResult = SE.getUDivExpr(Dividend, SE.getConstant(DivFactor));
1011 // Truncate the result, and divide by K! / 2^T.
1013 return SE.getMulExpr(SE.getConstant(MultiplyFactor),
1014 SE.getTruncateOrZeroExtend(DivResult, ResultTy));
1017 /// Return the value of this chain of recurrences at the specified iteration
1018 /// number. We can evaluate this recurrence by multiplying each element in the
1019 /// chain by the binomial coefficient corresponding to it. In other words, we
1020 /// can evaluate {A,+,B,+,C,+,D} as:
1022 /// A*BC(It, 0) + B*BC(It, 1) + C*BC(It, 2) + D*BC(It, 3)
1024 /// where BC(It, k) stands for binomial coefficient.
1025 const SCEV *SCEVAddRecExpr::evaluateAtIteration(const SCEV *It,
1026 ScalarEvolution &SE) const {
1027 return evaluateAtIteration(makeArrayRef(op_begin(), op_end()), It, SE);
1030 const SCEV *
1031 SCEVAddRecExpr::evaluateAtIteration(ArrayRef<const SCEV *> Operands,
1032 const SCEV *It, ScalarEvolution &SE) {
1033 assert(Operands.size() > 0);
1034 const SCEV *Result = Operands[0];
1035 for (unsigned i = 1, e = Operands.size(); i != e; ++i) {
1036 // The computation is correct in the face of overflow provided that the
1037 // multiplication is performed _after_ the evaluation of the binomial
1038 // coefficient.
1039 const SCEV *Coeff = BinomialCoefficient(It, i, SE, Result->getType());
1040 if (isa<SCEVCouldNotCompute>(Coeff))
1041 return Coeff;
1043 Result = SE.getAddExpr(Result, SE.getMulExpr(Operands[i], Coeff));
1045 return Result;
1048 //===----------------------------------------------------------------------===//
1049 // SCEV Expression folder implementations
1050 //===----------------------------------------------------------------------===//
1052 const SCEV *ScalarEvolution::getLosslessPtrToIntExpr(const SCEV *Op,
1053 unsigned Depth) {
1054 assert(Depth <= 1 &&
1055 "getLosslessPtrToIntExpr() should self-recurse at most once.");
1057 // We could be called with an integer-typed operands during SCEV rewrites.
1058 // Since the operand is an integer already, just perform zext/trunc/self cast.
1059 if (!Op->getType()->isPointerTy())
1060 return Op;
1062 // What would be an ID for such a SCEV cast expression?
1063 FoldingSetNodeID ID;
1064 ID.AddInteger(scPtrToInt);
1065 ID.AddPointer(Op);
1067 void *IP = nullptr;
1069 // Is there already an expression for such a cast?
1070 if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP))
1071 return S;
1073 // It isn't legal for optimizations to construct new ptrtoint expressions
1074 // for non-integral pointers.
1075 if (getDataLayout().isNonIntegralPointerType(Op->getType()))
1076 return getCouldNotCompute();
1078 Type *IntPtrTy = getDataLayout().getIntPtrType(Op->getType());
1080 // We can only trivially model ptrtoint if SCEV's effective (integer) type
1081 // is sufficiently wide to represent all possible pointer values.
1082 // We could theoretically teach SCEV to truncate wider pointers, but
1083 // that isn't implemented for now.
1084 if (getDataLayout().getTypeSizeInBits(getEffectiveSCEVType(Op->getType())) !=
1085 getDataLayout().getTypeSizeInBits(IntPtrTy))
1086 return getCouldNotCompute();
1088 // If not, is this expression something we can't reduce any further?
1089 if (auto *U = dyn_cast<SCEVUnknown>(Op)) {
1090 // Perform some basic constant folding. If the operand of the ptr2int cast
1091 // is a null pointer, don't create a ptr2int SCEV expression (that will be
1092 // left as-is), but produce a zero constant.
1093 // NOTE: We could handle a more general case, but lack motivational cases.
1094 if (isa<ConstantPointerNull>(U->getValue()))
1095 return getZero(IntPtrTy);
1097 // Create an explicit cast node.
1098 // We can reuse the existing insert position since if we get here,
1099 // we won't have made any changes which would invalidate it.
1100 SCEV *S = new (SCEVAllocator)
1101 SCEVPtrToIntExpr(ID.Intern(SCEVAllocator), Op, IntPtrTy);
1102 UniqueSCEVs.InsertNode(S, IP);
1103 addToLoopUseLists(S);
1104 return S;
1107 assert(Depth == 0 && "getLosslessPtrToIntExpr() should not self-recurse for "
1108 "non-SCEVUnknown's.");
1110 // Otherwise, we've got some expression that is more complex than just a
1111 // single SCEVUnknown. But we don't want to have a SCEVPtrToIntExpr of an
1112 // arbitrary expression, we want to have SCEVPtrToIntExpr of an SCEVUnknown
1113 // only, and the expressions must otherwise be integer-typed.
1114 // So sink the cast down to the SCEVUnknown's.
1116 /// The SCEVPtrToIntSinkingRewriter takes a scalar evolution expression,
1117 /// which computes a pointer-typed value, and rewrites the whole expression
1118 /// tree so that *all* the computations are done on integers, and the only
1119 /// pointer-typed operands in the expression are SCEVUnknown.
1120 class SCEVPtrToIntSinkingRewriter
1121 : public SCEVRewriteVisitor<SCEVPtrToIntSinkingRewriter> {
1122 using Base = SCEVRewriteVisitor<SCEVPtrToIntSinkingRewriter>;
1124 public:
1125 SCEVPtrToIntSinkingRewriter(ScalarEvolution &SE) : SCEVRewriteVisitor(SE) {}
1127 static const SCEV *rewrite(const SCEV *Scev, ScalarEvolution &SE) {
1128 SCEVPtrToIntSinkingRewriter Rewriter(SE);
1129 return Rewriter.visit(Scev);
1132 const SCEV *visit(const SCEV *S) {
1133 Type *STy = S->getType();
1134 // If the expression is not pointer-typed, just keep it as-is.
1135 if (!STy->isPointerTy())
1136 return S;
1137 // Else, recursively sink the cast down into it.
1138 return Base::visit(S);
1141 const SCEV *visitAddExpr(const SCEVAddExpr *Expr) {
1142 SmallVector<const SCEV *, 2> Operands;
1143 bool Changed = false;
1144 for (auto *Op : Expr->operands()) {
1145 Operands.push_back(visit(Op));
1146 Changed |= Op != Operands.back();
1148 return !Changed ? Expr : SE.getAddExpr(Operands, Expr->getNoWrapFlags());
1151 const SCEV *visitMulExpr(const SCEVMulExpr *Expr) {
1152 SmallVector<const SCEV *, 2> Operands;
1153 bool Changed = false;
1154 for (auto *Op : Expr->operands()) {
1155 Operands.push_back(visit(Op));
1156 Changed |= Op != Operands.back();
1158 return !Changed ? Expr : SE.getMulExpr(Operands, Expr->getNoWrapFlags());
1161 const SCEV *visitUnknown(const SCEVUnknown *Expr) {
1162 assert(Expr->getType()->isPointerTy() &&
1163 "Should only reach pointer-typed SCEVUnknown's.");
1164 return SE.getLosslessPtrToIntExpr(Expr, /*Depth=*/1);
1168 // And actually perform the cast sinking.
1169 const SCEV *IntOp = SCEVPtrToIntSinkingRewriter::rewrite(Op, *this);
1170 assert(IntOp->getType()->isIntegerTy() &&
1171 "We must have succeeded in sinking the cast, "
1172 "and ending up with an integer-typed expression!");
1173 return IntOp;
1176 const SCEV *ScalarEvolution::getPtrToIntExpr(const SCEV *Op, Type *Ty) {
1177 assert(Ty->isIntegerTy() && "Target type must be an integer type!");
1179 const SCEV *IntOp = getLosslessPtrToIntExpr(Op);
1180 if (isa<SCEVCouldNotCompute>(IntOp))
1181 return IntOp;
1183 return getTruncateOrZeroExtend(IntOp, Ty);
1186 const SCEV *ScalarEvolution::getTruncateExpr(const SCEV *Op, Type *Ty,
1187 unsigned Depth) {
1188 assert(getTypeSizeInBits(Op->getType()) > getTypeSizeInBits(Ty) &&
1189 "This is not a truncating conversion!");
1190 assert(isSCEVable(Ty) &&
1191 "This is not a conversion to a SCEVable type!");
1192 assert(!Op->getType()->isPointerTy() && "Can't truncate pointer!");
1193 Ty = getEffectiveSCEVType(Ty);
1195 FoldingSetNodeID ID;
1196 ID.AddInteger(scTruncate);
1197 ID.AddPointer(Op);
1198 ID.AddPointer(Ty);
1199 void *IP = nullptr;
1200 if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S;
1202 // Fold if the operand is constant.
1203 if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(Op))
1204 return getConstant(
1205 cast<ConstantInt>(ConstantExpr::getTrunc(SC->getValue(), Ty)));
1207 // trunc(trunc(x)) --> trunc(x)
1208 if (const SCEVTruncateExpr *ST = dyn_cast<SCEVTruncateExpr>(Op))
1209 return getTruncateExpr(ST->getOperand(), Ty, Depth + 1);
1211 // trunc(sext(x)) --> sext(x) if widening or trunc(x) if narrowing
1212 if (const SCEVSignExtendExpr *SS = dyn_cast<SCEVSignExtendExpr>(Op))
1213 return getTruncateOrSignExtend(SS->getOperand(), Ty, Depth + 1);
1215 // trunc(zext(x)) --> zext(x) if widening or trunc(x) if narrowing
1216 if (const SCEVZeroExtendExpr *SZ = dyn_cast<SCEVZeroExtendExpr>(Op))
1217 return getTruncateOrZeroExtend(SZ->getOperand(), Ty, Depth + 1);
1219 if (Depth > MaxCastDepth) {
1220 SCEV *S =
1221 new (SCEVAllocator) SCEVTruncateExpr(ID.Intern(SCEVAllocator), Op, Ty);
1222 UniqueSCEVs.InsertNode(S, IP);
1223 addToLoopUseLists(S);
1224 return S;
1227 // trunc(x1 + ... + xN) --> trunc(x1) + ... + trunc(xN) and
1228 // trunc(x1 * ... * xN) --> trunc(x1) * ... * trunc(xN),
1229 // if after transforming we have at most one truncate, not counting truncates
1230 // that replace other casts.
1231 if (isa<SCEVAddExpr>(Op) || isa<SCEVMulExpr>(Op)) {
1232 auto *CommOp = cast<SCEVCommutativeExpr>(Op);
1233 SmallVector<const SCEV *, 4> Operands;
1234 unsigned numTruncs = 0;
1235 for (unsigned i = 0, e = CommOp->getNumOperands(); i != e && numTruncs < 2;
1236 ++i) {
1237 const SCEV *S = getTruncateExpr(CommOp->getOperand(i), Ty, Depth + 1);
1238 if (!isa<SCEVIntegralCastExpr>(CommOp->getOperand(i)) &&
1239 isa<SCEVTruncateExpr>(S))
1240 numTruncs++;
1241 Operands.push_back(S);
1243 if (numTruncs < 2) {
1244 if (isa<SCEVAddExpr>(Op))
1245 return getAddExpr(Operands);
1246 else if (isa<SCEVMulExpr>(Op))
1247 return getMulExpr(Operands);
1248 else
1249 llvm_unreachable("Unexpected SCEV type for Op.");
1251 // Although we checked in the beginning that ID is not in the cache, it is
1252 // possible that during recursion and different modification ID was inserted
1253 // into the cache. So if we find it, just return it.
1254 if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP))
1255 return S;
1258 // If the input value is a chrec scev, truncate the chrec's operands.
1259 if (const SCEVAddRecExpr *AddRec = dyn_cast<SCEVAddRecExpr>(Op)) {
1260 SmallVector<const SCEV *, 4> Operands;
1261 for (const SCEV *Op : AddRec->operands())
1262 Operands.push_back(getTruncateExpr(Op, Ty, Depth + 1));
1263 return getAddRecExpr(Operands, AddRec->getLoop(), SCEV::FlagAnyWrap);
1266 // Return zero if truncating to known zeros.
1267 uint32_t MinTrailingZeros = GetMinTrailingZeros(Op);
1268 if (MinTrailingZeros >= getTypeSizeInBits(Ty))
1269 return getZero(Ty);
1271 // The cast wasn't folded; create an explicit cast node. We can reuse
1272 // the existing insert position since if we get here, we won't have
1273 // made any changes which would invalidate it.
1274 SCEV *S = new (SCEVAllocator) SCEVTruncateExpr(ID.Intern(SCEVAllocator),
1275 Op, Ty);
1276 UniqueSCEVs.InsertNode(S, IP);
1277 addToLoopUseLists(S);
1278 return S;
1281 // Get the limit of a recurrence such that incrementing by Step cannot cause
1282 // signed overflow as long as the value of the recurrence within the
1283 // loop does not exceed this limit before incrementing.
1284 static const SCEV *getSignedOverflowLimitForStep(const SCEV *Step,
1285 ICmpInst::Predicate *Pred,
1286 ScalarEvolution *SE) {
1287 unsigned BitWidth = SE->getTypeSizeInBits(Step->getType());
1288 if (SE->isKnownPositive(Step)) {
1289 *Pred = ICmpInst::ICMP_SLT;
1290 return SE->getConstant(APInt::getSignedMinValue(BitWidth) -
1291 SE->getSignedRangeMax(Step));
1293 if (SE->isKnownNegative(Step)) {
1294 *Pred = ICmpInst::ICMP_SGT;
1295 return SE->getConstant(APInt::getSignedMaxValue(BitWidth) -
1296 SE->getSignedRangeMin(Step));
1298 return nullptr;
1301 // Get the limit of a recurrence such that incrementing by Step cannot cause
1302 // unsigned overflow as long as the value of the recurrence within the loop does
1303 // not exceed this limit before incrementing.
1304 static const SCEV *getUnsignedOverflowLimitForStep(const SCEV *Step,
1305 ICmpInst::Predicate *Pred,
1306 ScalarEvolution *SE) {
1307 unsigned BitWidth = SE->getTypeSizeInBits(Step->getType());
1308 *Pred = ICmpInst::ICMP_ULT;
1310 return SE->getConstant(APInt::getMinValue(BitWidth) -
1311 SE->getUnsignedRangeMax(Step));
1314 namespace {
1316 struct ExtendOpTraitsBase {
1317 typedef const SCEV *(ScalarEvolution::*GetExtendExprTy)(const SCEV *, Type *,
1318 unsigned);
1321 // Used to make code generic over signed and unsigned overflow.
1322 template <typename ExtendOp> struct ExtendOpTraits {
1323 // Members present:
1325 // static const SCEV::NoWrapFlags WrapType;
1327 // static const ExtendOpTraitsBase::GetExtendExprTy GetExtendExpr;
1329 // static const SCEV *getOverflowLimitForStep(const SCEV *Step,
1330 // ICmpInst::Predicate *Pred,
1331 // ScalarEvolution *SE);
1334 template <>
1335 struct ExtendOpTraits<SCEVSignExtendExpr> : public ExtendOpTraitsBase {
1336 static const SCEV::NoWrapFlags WrapType = SCEV::FlagNSW;
1338 static const GetExtendExprTy GetExtendExpr;
1340 static const SCEV *getOverflowLimitForStep(const SCEV *Step,
1341 ICmpInst::Predicate *Pred,
1342 ScalarEvolution *SE) {
1343 return getSignedOverflowLimitForStep(Step, Pred, SE);
1347 const ExtendOpTraitsBase::GetExtendExprTy ExtendOpTraits<
1348 SCEVSignExtendExpr>::GetExtendExpr = &ScalarEvolution::getSignExtendExpr;
1350 template <>
1351 struct ExtendOpTraits<SCEVZeroExtendExpr> : public ExtendOpTraitsBase {
1352 static const SCEV::NoWrapFlags WrapType = SCEV::FlagNUW;
1354 static const GetExtendExprTy GetExtendExpr;
1356 static const SCEV *getOverflowLimitForStep(const SCEV *Step,
1357 ICmpInst::Predicate *Pred,
1358 ScalarEvolution *SE) {
1359 return getUnsignedOverflowLimitForStep(Step, Pred, SE);
1363 const ExtendOpTraitsBase::GetExtendExprTy ExtendOpTraits<
1364 SCEVZeroExtendExpr>::GetExtendExpr = &ScalarEvolution::getZeroExtendExpr;
1366 } // end anonymous namespace
1368 // The recurrence AR has been shown to have no signed/unsigned wrap or something
1369 // close to it. Typically, if we can prove NSW/NUW for AR, then we can just as
1370 // easily prove NSW/NUW for its preincrement or postincrement sibling. This
1371 // allows normalizing a sign/zero extended AddRec as such: {sext/zext(Step +
1372 // Start),+,Step} => {(Step + sext/zext(Start),+,Step} As a result, the
1373 // expression "Step + sext/zext(PreIncAR)" is congruent with
1374 // "sext/zext(PostIncAR)"
1375 template <typename ExtendOpTy>
1376 static const SCEV *getPreStartForExtend(const SCEVAddRecExpr *AR, Type *Ty,
1377 ScalarEvolution *SE, unsigned Depth) {
1378 auto WrapType = ExtendOpTraits<ExtendOpTy>::WrapType;
1379 auto GetExtendExpr = ExtendOpTraits<ExtendOpTy>::GetExtendExpr;
1381 const Loop *L = AR->getLoop();
1382 const SCEV *Start = AR->getStart();
1383 const SCEV *Step = AR->getStepRecurrence(*SE);
1385 // Check for a simple looking step prior to loop entry.
1386 const SCEVAddExpr *SA = dyn_cast<SCEVAddExpr>(Start);
1387 if (!SA)
1388 return nullptr;
1390 // Create an AddExpr for "PreStart" after subtracting Step. Full SCEV
1391 // subtraction is expensive. For this purpose, perform a quick and dirty
1392 // difference, by checking for Step in the operand list.
1393 SmallVector<const SCEV *, 4> DiffOps;
1394 for (const SCEV *Op : SA->operands())
1395 if (Op != Step)
1396 DiffOps.push_back(Op);
1398 if (DiffOps.size() == SA->getNumOperands())
1399 return nullptr;
1401 // Try to prove `WrapType` (SCEV::FlagNSW or SCEV::FlagNUW) on `PreStart` +
1402 // `Step`:
1404 // 1. NSW/NUW flags on the step increment.
1405 auto PreStartFlags =
1406 ScalarEvolution::maskFlags(SA->getNoWrapFlags(), SCEV::FlagNUW);
1407 const SCEV *PreStart = SE->getAddExpr(DiffOps, PreStartFlags);
1408 const SCEVAddRecExpr *PreAR = dyn_cast<SCEVAddRecExpr>(
1409 SE->getAddRecExpr(PreStart, Step, L, SCEV::FlagAnyWrap));
1411 // "{S,+,X} is <nsw>/<nuw>" and "the backedge is taken at least once" implies
1412 // "S+X does not sign/unsign-overflow".
1415 const SCEV *BECount = SE->getBackedgeTakenCount(L);
1416 if (PreAR && PreAR->getNoWrapFlags(WrapType) &&
1417 !isa<SCEVCouldNotCompute>(BECount) && SE->isKnownPositive(BECount))
1418 return PreStart;
1420 // 2. Direct overflow check on the step operation's expression.
1421 unsigned BitWidth = SE->getTypeSizeInBits(AR->getType());
1422 Type *WideTy = IntegerType::get(SE->getContext(), BitWidth * 2);
1423 const SCEV *OperandExtendedStart =
1424 SE->getAddExpr((SE->*GetExtendExpr)(PreStart, WideTy, Depth),
1425 (SE->*GetExtendExpr)(Step, WideTy, Depth));
1426 if ((SE->*GetExtendExpr)(Start, WideTy, Depth) == OperandExtendedStart) {
1427 if (PreAR && AR->getNoWrapFlags(WrapType)) {
1428 // If we know `AR` == {`PreStart`+`Step`,+,`Step`} is `WrapType` (FlagNSW
1429 // or FlagNUW) and that `PreStart` + `Step` is `WrapType` too, then
1430 // `PreAR` == {`PreStart`,+,`Step`} is also `WrapType`. Cache this fact.
1431 SE->setNoWrapFlags(const_cast<SCEVAddRecExpr *>(PreAR), WrapType);
1433 return PreStart;
1436 // 3. Loop precondition.
1437 ICmpInst::Predicate Pred;
1438 const SCEV *OverflowLimit =
1439 ExtendOpTraits<ExtendOpTy>::getOverflowLimitForStep(Step, &Pred, SE);
1441 if (OverflowLimit &&
1442 SE->isLoopEntryGuardedByCond(L, Pred, PreStart, OverflowLimit))
1443 return PreStart;
1445 return nullptr;
1448 // Get the normalized zero or sign extended expression for this AddRec's Start.
1449 template <typename ExtendOpTy>
1450 static const SCEV *getExtendAddRecStart(const SCEVAddRecExpr *AR, Type *Ty,
1451 ScalarEvolution *SE,
1452 unsigned Depth) {
1453 auto GetExtendExpr = ExtendOpTraits<ExtendOpTy>::GetExtendExpr;
1455 const SCEV *PreStart = getPreStartForExtend<ExtendOpTy>(AR, Ty, SE, Depth);
1456 if (!PreStart)
1457 return (SE->*GetExtendExpr)(AR->getStart(), Ty, Depth);
1459 return SE->getAddExpr((SE->*GetExtendExpr)(AR->getStepRecurrence(*SE), Ty,
1460 Depth),
1461 (SE->*GetExtendExpr)(PreStart, Ty, Depth));
1464 // Try to prove away overflow by looking at "nearby" add recurrences. A
1465 // motivating example for this rule: if we know `{0,+,4}` is `ult` `-1` and it
1466 // does not itself wrap then we can conclude that `{1,+,4}` is `nuw`.
1468 // Formally:
1470 // {S,+,X} == {S-T,+,X} + T
1471 // => Ext({S,+,X}) == Ext({S-T,+,X} + T)
1473 // If ({S-T,+,X} + T) does not overflow ... (1)
1475 // RHS == Ext({S-T,+,X} + T) == Ext({S-T,+,X}) + Ext(T)
1477 // If {S-T,+,X} does not overflow ... (2)
1479 // RHS == Ext({S-T,+,X}) + Ext(T) == {Ext(S-T),+,Ext(X)} + Ext(T)
1480 // == {Ext(S-T)+Ext(T),+,Ext(X)}
1482 // If (S-T)+T does not overflow ... (3)
1484 // RHS == {Ext(S-T)+Ext(T),+,Ext(X)} == {Ext(S-T+T),+,Ext(X)}
1485 // == {Ext(S),+,Ext(X)} == LHS
1487 // Thus, if (1), (2) and (3) are true for some T, then
1488 // Ext({S,+,X}) == {Ext(S),+,Ext(X)}
1490 // (3) is implied by (1) -- "(S-T)+T does not overflow" is simply "({S-T,+,X}+T)
1491 // does not overflow" restricted to the 0th iteration. Therefore we only need
1492 // to check for (1) and (2).
1494 // In the current context, S is `Start`, X is `Step`, Ext is `ExtendOpTy` and T
1495 // is `Delta` (defined below).
1496 template <typename ExtendOpTy>
1497 bool ScalarEvolution::proveNoWrapByVaryingStart(const SCEV *Start,
1498 const SCEV *Step,
1499 const Loop *L) {
1500 auto WrapType = ExtendOpTraits<ExtendOpTy>::WrapType;
1502 // We restrict `Start` to a constant to prevent SCEV from spending too much
1503 // time here. It is correct (but more expensive) to continue with a
1504 // non-constant `Start` and do a general SCEV subtraction to compute
1505 // `PreStart` below.
1506 const SCEVConstant *StartC = dyn_cast<SCEVConstant>(Start);
1507 if (!StartC)
1508 return false;
1510 APInt StartAI = StartC->getAPInt();
1512 for (unsigned Delta : {-2, -1, 1, 2}) {
1513 const SCEV *PreStart = getConstant(StartAI - Delta);
1515 FoldingSetNodeID ID;
1516 ID.AddInteger(scAddRecExpr);
1517 ID.AddPointer(PreStart);
1518 ID.AddPointer(Step);
1519 ID.AddPointer(L);
1520 void *IP = nullptr;
1521 const auto *PreAR =
1522 static_cast<SCEVAddRecExpr *>(UniqueSCEVs.FindNodeOrInsertPos(ID, IP));
1524 // Give up if we don't already have the add recurrence we need because
1525 // actually constructing an add recurrence is relatively expensive.
1526 if (PreAR && PreAR->getNoWrapFlags(WrapType)) { // proves (2)
1527 const SCEV *DeltaS = getConstant(StartC->getType(), Delta);
1528 ICmpInst::Predicate Pred = ICmpInst::BAD_ICMP_PREDICATE;
1529 const SCEV *Limit = ExtendOpTraits<ExtendOpTy>::getOverflowLimitForStep(
1530 DeltaS, &Pred, this);
1531 if (Limit && isKnownPredicate(Pred, PreAR, Limit)) // proves (1)
1532 return true;
1536 return false;
1539 // Finds an integer D for an expression (C + x + y + ...) such that the top
1540 // level addition in (D + (C - D + x + y + ...)) would not wrap (signed or
1541 // unsigned) and the number of trailing zeros of (C - D + x + y + ...) is
1542 // maximized, where C is the \p ConstantTerm, x, y, ... are arbitrary SCEVs, and
1543 // the (C + x + y + ...) expression is \p WholeAddExpr.
1544 static APInt extractConstantWithoutWrapping(ScalarEvolution &SE,
1545 const SCEVConstant *ConstantTerm,
1546 const SCEVAddExpr *WholeAddExpr) {
1547 const APInt &C = ConstantTerm->getAPInt();
1548 const unsigned BitWidth = C.getBitWidth();
1549 // Find number of trailing zeros of (x + y + ...) w/o the C first:
1550 uint32_t TZ = BitWidth;
1551 for (unsigned I = 1, E = WholeAddExpr->getNumOperands(); I < E && TZ; ++I)
1552 TZ = std::min(TZ, SE.GetMinTrailingZeros(WholeAddExpr->getOperand(I)));
1553 if (TZ) {
1554 // Set D to be as many least significant bits of C as possible while still
1555 // guaranteeing that adding D to (C - D + x + y + ...) won't cause a wrap:
1556 return TZ < BitWidth ? C.trunc(TZ).zext(BitWidth) : C;
1558 return APInt(BitWidth, 0);
1561 // Finds an integer D for an affine AddRec expression {C,+,x} such that the top
1562 // level addition in (D + {C-D,+,x}) would not wrap (signed or unsigned) and the
1563 // number of trailing zeros of (C - D + x * n) is maximized, where C is the \p
1564 // ConstantStart, x is an arbitrary \p Step, and n is the loop trip count.
1565 static APInt extractConstantWithoutWrapping(ScalarEvolution &SE,
1566 const APInt &ConstantStart,
1567 const SCEV *Step) {
1568 const unsigned BitWidth = ConstantStart.getBitWidth();
1569 const uint32_t TZ = SE.GetMinTrailingZeros(Step);
1570 if (TZ)
1571 return TZ < BitWidth ? ConstantStart.trunc(TZ).zext(BitWidth)
1572 : ConstantStart;
1573 return APInt(BitWidth, 0);
1576 const SCEV *
1577 ScalarEvolution::getZeroExtendExpr(const SCEV *Op, Type *Ty, unsigned Depth) {
1578 assert(getTypeSizeInBits(Op->getType()) < getTypeSizeInBits(Ty) &&
1579 "This is not an extending conversion!");
1580 assert(isSCEVable(Ty) &&
1581 "This is not a conversion to a SCEVable type!");
1582 assert(!Op->getType()->isPointerTy() && "Can't extend pointer!");
1583 Ty = getEffectiveSCEVType(Ty);
1585 // Fold if the operand is constant.
1586 if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(Op))
1587 return getConstant(
1588 cast<ConstantInt>(ConstantExpr::getZExt(SC->getValue(), Ty)));
1590 // zext(zext(x)) --> zext(x)
1591 if (const SCEVZeroExtendExpr *SZ = dyn_cast<SCEVZeroExtendExpr>(Op))
1592 return getZeroExtendExpr(SZ->getOperand(), Ty, Depth + 1);
1594 // Before doing any expensive analysis, check to see if we've already
1595 // computed a SCEV for this Op and Ty.
1596 FoldingSetNodeID ID;
1597 ID.AddInteger(scZeroExtend);
1598 ID.AddPointer(Op);
1599 ID.AddPointer(Ty);
1600 void *IP = nullptr;
1601 if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S;
1602 if (Depth > MaxCastDepth) {
1603 SCEV *S = new (SCEVAllocator) SCEVZeroExtendExpr(ID.Intern(SCEVAllocator),
1604 Op, Ty);
1605 UniqueSCEVs.InsertNode(S, IP);
1606 addToLoopUseLists(S);
1607 return S;
1610 // zext(trunc(x)) --> zext(x) or x or trunc(x)
1611 if (const SCEVTruncateExpr *ST = dyn_cast<SCEVTruncateExpr>(Op)) {
1612 // It's possible the bits taken off by the truncate were all zero bits. If
1613 // so, we should be able to simplify this further.
1614 const SCEV *X = ST->getOperand();
1615 ConstantRange CR = getUnsignedRange(X);
1616 unsigned TruncBits = getTypeSizeInBits(ST->getType());
1617 unsigned NewBits = getTypeSizeInBits(Ty);
1618 if (CR.truncate(TruncBits).zeroExtend(NewBits).contains(
1619 CR.zextOrTrunc(NewBits)))
1620 return getTruncateOrZeroExtend(X, Ty, Depth);
1623 // If the input value is a chrec scev, and we can prove that the value
1624 // did not overflow the old, smaller, value, we can zero extend all of the
1625 // operands (often constants). This allows analysis of something like
1626 // this: for (unsigned char X = 0; X < 100; ++X) { int Y = X; }
1627 if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(Op))
1628 if (AR->isAffine()) {
1629 const SCEV *Start = AR->getStart();
1630 const SCEV *Step = AR->getStepRecurrence(*this);
1631 unsigned BitWidth = getTypeSizeInBits(AR->getType());
1632 const Loop *L = AR->getLoop();
1634 if (!AR->hasNoUnsignedWrap()) {
1635 auto NewFlags = proveNoWrapViaConstantRanges(AR);
1636 setNoWrapFlags(const_cast<SCEVAddRecExpr *>(AR), NewFlags);
1639 // If we have special knowledge that this addrec won't overflow,
1640 // we don't need to do any further analysis.
1641 if (AR->hasNoUnsignedWrap())
1642 return getAddRecExpr(
1643 getExtendAddRecStart<SCEVZeroExtendExpr>(AR, Ty, this, Depth + 1),
1644 getZeroExtendExpr(Step, Ty, Depth + 1), L, AR->getNoWrapFlags());
1646 // Check whether the backedge-taken count is SCEVCouldNotCompute.
1647 // Note that this serves two purposes: It filters out loops that are
1648 // simply not analyzable, and it covers the case where this code is
1649 // being called from within backedge-taken count analysis, such that
1650 // attempting to ask for the backedge-taken count would likely result
1651 // in infinite recursion. In the later case, the analysis code will
1652 // cope with a conservative value, and it will take care to purge
1653 // that value once it has finished.
1654 const SCEV *MaxBECount = getConstantMaxBackedgeTakenCount(L);
1655 if (!isa<SCEVCouldNotCompute>(MaxBECount)) {
1656 // Manually compute the final value for AR, checking for overflow.
1658 // Check whether the backedge-taken count can be losslessly casted to
1659 // the addrec's type. The count is always unsigned.
1660 const SCEV *CastedMaxBECount =
1661 getTruncateOrZeroExtend(MaxBECount, Start->getType(), Depth);
1662 const SCEV *RecastedMaxBECount = getTruncateOrZeroExtend(
1663 CastedMaxBECount, MaxBECount->getType(), Depth);
1664 if (MaxBECount == RecastedMaxBECount) {
1665 Type *WideTy = IntegerType::get(getContext(), BitWidth * 2);
1666 // Check whether Start+Step*MaxBECount has no unsigned overflow.
1667 const SCEV *ZMul = getMulExpr(CastedMaxBECount, Step,
1668 SCEV::FlagAnyWrap, Depth + 1);
1669 const SCEV *ZAdd = getZeroExtendExpr(getAddExpr(Start, ZMul,
1670 SCEV::FlagAnyWrap,
1671 Depth + 1),
1672 WideTy, Depth + 1);
1673 const SCEV *WideStart = getZeroExtendExpr(Start, WideTy, Depth + 1);
1674 const SCEV *WideMaxBECount =
1675 getZeroExtendExpr(CastedMaxBECount, WideTy, Depth + 1);
1676 const SCEV *OperandExtendedAdd =
1677 getAddExpr(WideStart,
1678 getMulExpr(WideMaxBECount,
1679 getZeroExtendExpr(Step, WideTy, Depth + 1),
1680 SCEV::FlagAnyWrap, Depth + 1),
1681 SCEV::FlagAnyWrap, Depth + 1);
1682 if (ZAdd == OperandExtendedAdd) {
1683 // Cache knowledge of AR NUW, which is propagated to this AddRec.
1684 setNoWrapFlags(const_cast<SCEVAddRecExpr *>(AR), SCEV::FlagNUW);
1685 // Return the expression with the addrec on the outside.
1686 return getAddRecExpr(
1687 getExtendAddRecStart<SCEVZeroExtendExpr>(AR, Ty, this,
1688 Depth + 1),
1689 getZeroExtendExpr(Step, Ty, Depth + 1), L,
1690 AR->getNoWrapFlags());
1692 // Similar to above, only this time treat the step value as signed.
1693 // This covers loops that count down.
1694 OperandExtendedAdd =
1695 getAddExpr(WideStart,
1696 getMulExpr(WideMaxBECount,
1697 getSignExtendExpr(Step, WideTy, Depth + 1),
1698 SCEV::FlagAnyWrap, Depth + 1),
1699 SCEV::FlagAnyWrap, Depth + 1);
1700 if (ZAdd == OperandExtendedAdd) {
1701 // Cache knowledge of AR NW, which is propagated to this AddRec.
1702 // Negative step causes unsigned wrap, but it still can't self-wrap.
1703 setNoWrapFlags(const_cast<SCEVAddRecExpr *>(AR), SCEV::FlagNW);
1704 // Return the expression with the addrec on the outside.
1705 return getAddRecExpr(
1706 getExtendAddRecStart<SCEVZeroExtendExpr>(AR, Ty, this,
1707 Depth + 1),
1708 getSignExtendExpr(Step, Ty, Depth + 1), L,
1709 AR->getNoWrapFlags());
1714 // Normally, in the cases we can prove no-overflow via a
1715 // backedge guarding condition, we can also compute a backedge
1716 // taken count for the loop. The exceptions are assumptions and
1717 // guards present in the loop -- SCEV is not great at exploiting
1718 // these to compute max backedge taken counts, but can still use
1719 // these to prove lack of overflow. Use this fact to avoid
1720 // doing extra work that may not pay off.
1721 if (!isa<SCEVCouldNotCompute>(MaxBECount) || HasGuards ||
1722 !AC.assumptions().empty()) {
1724 auto NewFlags = proveNoUnsignedWrapViaInduction(AR);
1725 setNoWrapFlags(const_cast<SCEVAddRecExpr *>(AR), NewFlags);
1726 if (AR->hasNoUnsignedWrap()) {
1727 // Same as nuw case above - duplicated here to avoid a compile time
1728 // issue. It's not clear that the order of checks does matter, but
1729 // it's one of two issue possible causes for a change which was
1730 // reverted. Be conservative for the moment.
1731 return getAddRecExpr(
1732 getExtendAddRecStart<SCEVZeroExtendExpr>(AR, Ty, this,
1733 Depth + 1),
1734 getZeroExtendExpr(Step, Ty, Depth + 1), L,
1735 AR->getNoWrapFlags());
1738 // For a negative step, we can extend the operands iff doing so only
1739 // traverses values in the range zext([0,UINT_MAX]).
1740 if (isKnownNegative(Step)) {
1741 const SCEV *N = getConstant(APInt::getMaxValue(BitWidth) -
1742 getSignedRangeMin(Step));
1743 if (isLoopBackedgeGuardedByCond(L, ICmpInst::ICMP_UGT, AR, N) ||
1744 isKnownOnEveryIteration(ICmpInst::ICMP_UGT, AR, N)) {
1745 // Cache knowledge of AR NW, which is propagated to this
1746 // AddRec. Negative step causes unsigned wrap, but it
1747 // still can't self-wrap.
1748 setNoWrapFlags(const_cast<SCEVAddRecExpr *>(AR), SCEV::FlagNW);
1749 // Return the expression with the addrec on the outside.
1750 return getAddRecExpr(
1751 getExtendAddRecStart<SCEVZeroExtendExpr>(AR, Ty, this,
1752 Depth + 1),
1753 getSignExtendExpr(Step, Ty, Depth + 1), L,
1754 AR->getNoWrapFlags());
1759 // zext({C,+,Step}) --> (zext(D) + zext({C-D,+,Step}))<nuw><nsw>
1760 // if D + (C - D + Step * n) could be proven to not unsigned wrap
1761 // where D maximizes the number of trailing zeros of (C - D + Step * n)
1762 if (const auto *SC = dyn_cast<SCEVConstant>(Start)) {
1763 const APInt &C = SC->getAPInt();
1764 const APInt &D = extractConstantWithoutWrapping(*this, C, Step);
1765 if (D != 0) {
1766 const SCEV *SZExtD = getZeroExtendExpr(getConstant(D), Ty, Depth);
1767 const SCEV *SResidual =
1768 getAddRecExpr(getConstant(C - D), Step, L, AR->getNoWrapFlags());
1769 const SCEV *SZExtR = getZeroExtendExpr(SResidual, Ty, Depth + 1);
1770 return getAddExpr(SZExtD, SZExtR,
1771 (SCEV::NoWrapFlags)(SCEV::FlagNSW | SCEV::FlagNUW),
1772 Depth + 1);
1776 if (proveNoWrapByVaryingStart<SCEVZeroExtendExpr>(Start, Step, L)) {
1777 setNoWrapFlags(const_cast<SCEVAddRecExpr *>(AR), SCEV::FlagNUW);
1778 return getAddRecExpr(
1779 getExtendAddRecStart<SCEVZeroExtendExpr>(AR, Ty, this, Depth + 1),
1780 getZeroExtendExpr(Step, Ty, Depth + 1), L, AR->getNoWrapFlags());
1784 // zext(A % B) --> zext(A) % zext(B)
1786 const SCEV *LHS;
1787 const SCEV *RHS;
1788 if (matchURem(Op, LHS, RHS))
1789 return getURemExpr(getZeroExtendExpr(LHS, Ty, Depth + 1),
1790 getZeroExtendExpr(RHS, Ty, Depth + 1));
1793 // zext(A / B) --> zext(A) / zext(B).
1794 if (auto *Div = dyn_cast<SCEVUDivExpr>(Op))
1795 return getUDivExpr(getZeroExtendExpr(Div->getLHS(), Ty, Depth + 1),
1796 getZeroExtendExpr(Div->getRHS(), Ty, Depth + 1));
1798 if (auto *SA = dyn_cast<SCEVAddExpr>(Op)) {
1799 // zext((A + B + ...)<nuw>) --> (zext(A) + zext(B) + ...)<nuw>
1800 if (SA->hasNoUnsignedWrap()) {
1801 // If the addition does not unsign overflow then we can, by definition,
1802 // commute the zero extension with the addition operation.
1803 SmallVector<const SCEV *, 4> Ops;
1804 for (const auto *Op : SA->operands())
1805 Ops.push_back(getZeroExtendExpr(Op, Ty, Depth + 1));
1806 return getAddExpr(Ops, SCEV::FlagNUW, Depth + 1);
1809 // zext(C + x + y + ...) --> (zext(D) + zext((C - D) + x + y + ...))
1810 // if D + (C - D + x + y + ...) could be proven to not unsigned wrap
1811 // where D maximizes the number of trailing zeros of (C - D + x + y + ...)
1813 // Often address arithmetics contain expressions like
1814 // (zext (add (shl X, C1), C2)), for instance, (zext (5 + (4 * X))).
1815 // This transformation is useful while proving that such expressions are
1816 // equal or differ by a small constant amount, see LoadStoreVectorizer pass.
1817 if (const auto *SC = dyn_cast<SCEVConstant>(SA->getOperand(0))) {
1818 const APInt &D = extractConstantWithoutWrapping(*this, SC, SA);
1819 if (D != 0) {
1820 const SCEV *SZExtD = getZeroExtendExpr(getConstant(D), Ty, Depth);
1821 const SCEV *SResidual =
1822 getAddExpr(getConstant(-D), SA, SCEV::FlagAnyWrap, Depth);
1823 const SCEV *SZExtR = getZeroExtendExpr(SResidual, Ty, Depth + 1);
1824 return getAddExpr(SZExtD, SZExtR,
1825 (SCEV::NoWrapFlags)(SCEV::FlagNSW | SCEV::FlagNUW),
1826 Depth + 1);
1831 if (auto *SM = dyn_cast<SCEVMulExpr>(Op)) {
1832 // zext((A * B * ...)<nuw>) --> (zext(A) * zext(B) * ...)<nuw>
1833 if (SM->hasNoUnsignedWrap()) {
1834 // If the multiply does not unsign overflow then we can, by definition,
1835 // commute the zero extension with the multiply operation.
1836 SmallVector<const SCEV *, 4> Ops;
1837 for (const auto *Op : SM->operands())
1838 Ops.push_back(getZeroExtendExpr(Op, Ty, Depth + 1));
1839 return getMulExpr(Ops, SCEV::FlagNUW, Depth + 1);
1842 // zext(2^K * (trunc X to iN)) to iM ->
1843 // 2^K * (zext(trunc X to i{N-K}) to iM)<nuw>
1845 // Proof:
1847 // zext(2^K * (trunc X to iN)) to iM
1848 // = zext((trunc X to iN) << K) to iM
1849 // = zext((trunc X to i{N-K}) << K)<nuw> to iM
1850 // (because shl removes the top K bits)
1851 // = zext((2^K * (trunc X to i{N-K}))<nuw>) to iM
1852 // = (2^K * (zext(trunc X to i{N-K}) to iM))<nuw>.
1854 if (SM->getNumOperands() == 2)
1855 if (auto *MulLHS = dyn_cast<SCEVConstant>(SM->getOperand(0)))
1856 if (MulLHS->getAPInt().isPowerOf2())
1857 if (auto *TruncRHS = dyn_cast<SCEVTruncateExpr>(SM->getOperand(1))) {
1858 int NewTruncBits = getTypeSizeInBits(TruncRHS->getType()) -
1859 MulLHS->getAPInt().logBase2();
1860 Type *NewTruncTy = IntegerType::get(getContext(), NewTruncBits);
1861 return getMulExpr(
1862 getZeroExtendExpr(MulLHS, Ty),
1863 getZeroExtendExpr(
1864 getTruncateExpr(TruncRHS->getOperand(), NewTruncTy), Ty),
1865 SCEV::FlagNUW, Depth + 1);
1869 // The cast wasn't folded; create an explicit cast node.
1870 // Recompute the insert position, as it may have been invalidated.
1871 if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S;
1872 SCEV *S = new (SCEVAllocator) SCEVZeroExtendExpr(ID.Intern(SCEVAllocator),
1873 Op, Ty);
1874 UniqueSCEVs.InsertNode(S, IP);
1875 addToLoopUseLists(S);
1876 return S;
1879 const SCEV *
1880 ScalarEvolution::getSignExtendExpr(const SCEV *Op, Type *Ty, unsigned Depth) {
1881 assert(getTypeSizeInBits(Op->getType()) < getTypeSizeInBits(Ty) &&
1882 "This is not an extending conversion!");
1883 assert(isSCEVable(Ty) &&
1884 "This is not a conversion to a SCEVable type!");
1885 assert(!Op->getType()->isPointerTy() && "Can't extend pointer!");
1886 Ty = getEffectiveSCEVType(Ty);
1888 // Fold if the operand is constant.
1889 if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(Op))
1890 return getConstant(
1891 cast<ConstantInt>(ConstantExpr::getSExt(SC->getValue(), Ty)));
1893 // sext(sext(x)) --> sext(x)
1894 if (const SCEVSignExtendExpr *SS = dyn_cast<SCEVSignExtendExpr>(Op))
1895 return getSignExtendExpr(SS->getOperand(), Ty, Depth + 1);
1897 // sext(zext(x)) --> zext(x)
1898 if (const SCEVZeroExtendExpr *SZ = dyn_cast<SCEVZeroExtendExpr>(Op))
1899 return getZeroExtendExpr(SZ->getOperand(), Ty, Depth + 1);
1901 // Before doing any expensive analysis, check to see if we've already
1902 // computed a SCEV for this Op and Ty.
1903 FoldingSetNodeID ID;
1904 ID.AddInteger(scSignExtend);
1905 ID.AddPointer(Op);
1906 ID.AddPointer(Ty);
1907 void *IP = nullptr;
1908 if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S;
1909 // Limit recursion depth.
1910 if (Depth > MaxCastDepth) {
1911 SCEV *S = new (SCEVAllocator) SCEVSignExtendExpr(ID.Intern(SCEVAllocator),
1912 Op, Ty);
1913 UniqueSCEVs.InsertNode(S, IP);
1914 addToLoopUseLists(S);
1915 return S;
1918 // sext(trunc(x)) --> sext(x) or x or trunc(x)
1919 if (const SCEVTruncateExpr *ST = dyn_cast<SCEVTruncateExpr>(Op)) {
1920 // It's possible the bits taken off by the truncate were all sign bits. If
1921 // so, we should be able to simplify this further.
1922 const SCEV *X = ST->getOperand();
1923 ConstantRange CR = getSignedRange(X);
1924 unsigned TruncBits = getTypeSizeInBits(ST->getType());
1925 unsigned NewBits = getTypeSizeInBits(Ty);
1926 if (CR.truncate(TruncBits).signExtend(NewBits).contains(
1927 CR.sextOrTrunc(NewBits)))
1928 return getTruncateOrSignExtend(X, Ty, Depth);
1931 if (auto *SA = dyn_cast<SCEVAddExpr>(Op)) {
1932 // sext((A + B + ...)<nsw>) --> (sext(A) + sext(B) + ...)<nsw>
1933 if (SA->hasNoSignedWrap()) {
1934 // If the addition does not sign overflow then we can, by definition,
1935 // commute the sign extension with the addition operation.
1936 SmallVector<const SCEV *, 4> Ops;
1937 for (const auto *Op : SA->operands())
1938 Ops.push_back(getSignExtendExpr(Op, Ty, Depth + 1));
1939 return getAddExpr(Ops, SCEV::FlagNSW, Depth + 1);
1942 // sext(C + x + y + ...) --> (sext(D) + sext((C - D) + x + y + ...))
1943 // if D + (C - D + x + y + ...) could be proven to not signed wrap
1944 // where D maximizes the number of trailing zeros of (C - D + x + y + ...)
1946 // For instance, this will bring two seemingly different expressions:
1947 // 1 + sext(5 + 20 * %x + 24 * %y) and
1948 // sext(6 + 20 * %x + 24 * %y)
1949 // to the same form:
1950 // 2 + sext(4 + 20 * %x + 24 * %y)
1951 if (const auto *SC = dyn_cast<SCEVConstant>(SA->getOperand(0))) {
1952 const APInt &D = extractConstantWithoutWrapping(*this, SC, SA);
1953 if (D != 0) {
1954 const SCEV *SSExtD = getSignExtendExpr(getConstant(D), Ty, Depth);
1955 const SCEV *SResidual =
1956 getAddExpr(getConstant(-D), SA, SCEV::FlagAnyWrap, Depth);
1957 const SCEV *SSExtR = getSignExtendExpr(SResidual, Ty, Depth + 1);
1958 return getAddExpr(SSExtD, SSExtR,
1959 (SCEV::NoWrapFlags)(SCEV::FlagNSW | SCEV::FlagNUW),
1960 Depth + 1);
1964 // If the input value is a chrec scev, and we can prove that the value
1965 // did not overflow the old, smaller, value, we can sign extend all of the
1966 // operands (often constants). This allows analysis of something like
1967 // this: for (signed char X = 0; X < 100; ++X) { int Y = X; }
1968 if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(Op))
1969 if (AR->isAffine()) {
1970 const SCEV *Start = AR->getStart();
1971 const SCEV *Step = AR->getStepRecurrence(*this);
1972 unsigned BitWidth = getTypeSizeInBits(AR->getType());
1973 const Loop *L = AR->getLoop();
1975 if (!AR->hasNoSignedWrap()) {
1976 auto NewFlags = proveNoWrapViaConstantRanges(AR);
1977 setNoWrapFlags(const_cast<SCEVAddRecExpr *>(AR), NewFlags);
1980 // If we have special knowledge that this addrec won't overflow,
1981 // we don't need to do any further analysis.
1982 if (AR->hasNoSignedWrap())
1983 return getAddRecExpr(
1984 getExtendAddRecStart<SCEVSignExtendExpr>(AR, Ty, this, Depth + 1),
1985 getSignExtendExpr(Step, Ty, Depth + 1), L, SCEV::FlagNSW);
1987 // Check whether the backedge-taken count is SCEVCouldNotCompute.
1988 // Note that this serves two purposes: It filters out loops that are
1989 // simply not analyzable, and it covers the case where this code is
1990 // being called from within backedge-taken count analysis, such that
1991 // attempting to ask for the backedge-taken count would likely result
1992 // in infinite recursion. In the later case, the analysis code will
1993 // cope with a conservative value, and it will take care to purge
1994 // that value once it has finished.
1995 const SCEV *MaxBECount = getConstantMaxBackedgeTakenCount(L);
1996 if (!isa<SCEVCouldNotCompute>(MaxBECount)) {
1997 // Manually compute the final value for AR, checking for
1998 // overflow.
2000 // Check whether the backedge-taken count can be losslessly casted to
2001 // the addrec's type. The count is always unsigned.
2002 const SCEV *CastedMaxBECount =
2003 getTruncateOrZeroExtend(MaxBECount, Start->getType(), Depth);
2004 const SCEV *RecastedMaxBECount = getTruncateOrZeroExtend(
2005 CastedMaxBECount, MaxBECount->getType(), Depth);
2006 if (MaxBECount == RecastedMaxBECount) {
2007 Type *WideTy = IntegerType::get(getContext(), BitWidth * 2);
2008 // Check whether Start+Step*MaxBECount has no signed overflow.
2009 const SCEV *SMul = getMulExpr(CastedMaxBECount, Step,
2010 SCEV::FlagAnyWrap, Depth + 1);
2011 const SCEV *SAdd = getSignExtendExpr(getAddExpr(Start, SMul,
2012 SCEV::FlagAnyWrap,
2013 Depth + 1),
2014 WideTy, Depth + 1);
2015 const SCEV *WideStart = getSignExtendExpr(Start, WideTy, Depth + 1);
2016 const SCEV *WideMaxBECount =
2017 getZeroExtendExpr(CastedMaxBECount, WideTy, Depth + 1);
2018 const SCEV *OperandExtendedAdd =
2019 getAddExpr(WideStart,
2020 getMulExpr(WideMaxBECount,
2021 getSignExtendExpr(Step, WideTy, Depth + 1),
2022 SCEV::FlagAnyWrap, Depth + 1),
2023 SCEV::FlagAnyWrap, Depth + 1);
2024 if (SAdd == OperandExtendedAdd) {
2025 // Cache knowledge of AR NSW, which is propagated to this AddRec.
2026 setNoWrapFlags(const_cast<SCEVAddRecExpr *>(AR), SCEV::FlagNSW);
2027 // Return the expression with the addrec on the outside.
2028 return getAddRecExpr(
2029 getExtendAddRecStart<SCEVSignExtendExpr>(AR, Ty, this,
2030 Depth + 1),
2031 getSignExtendExpr(Step, Ty, Depth + 1), L,
2032 AR->getNoWrapFlags());
2034 // Similar to above, only this time treat the step value as unsigned.
2035 // This covers loops that count up with an unsigned step.
2036 OperandExtendedAdd =
2037 getAddExpr(WideStart,
2038 getMulExpr(WideMaxBECount,
2039 getZeroExtendExpr(Step, WideTy, Depth + 1),
2040 SCEV::FlagAnyWrap, Depth + 1),
2041 SCEV::FlagAnyWrap, Depth + 1);
2042 if (SAdd == OperandExtendedAdd) {
2043 // If AR wraps around then
2045 // abs(Step) * MaxBECount > unsigned-max(AR->getType())
2046 // => SAdd != OperandExtendedAdd
2048 // Thus (AR is not NW => SAdd != OperandExtendedAdd) <=>
2049 // (SAdd == OperandExtendedAdd => AR is NW)
2051 setNoWrapFlags(const_cast<SCEVAddRecExpr *>(AR), SCEV::FlagNW);
2053 // Return the expression with the addrec on the outside.
2054 return getAddRecExpr(
2055 getExtendAddRecStart<SCEVSignExtendExpr>(AR, Ty, this,
2056 Depth + 1),
2057 getZeroExtendExpr(Step, Ty, Depth + 1), L,
2058 AR->getNoWrapFlags());
2063 auto NewFlags = proveNoSignedWrapViaInduction(AR);
2064 setNoWrapFlags(const_cast<SCEVAddRecExpr *>(AR), NewFlags);
2065 if (AR->hasNoSignedWrap()) {
2066 // Same as nsw case above - duplicated here to avoid a compile time
2067 // issue. It's not clear that the order of checks does matter, but
2068 // it's one of two issue possible causes for a change which was
2069 // reverted. Be conservative for the moment.
2070 return getAddRecExpr(
2071 getExtendAddRecStart<SCEVSignExtendExpr>(AR, Ty, this, Depth + 1),
2072 getSignExtendExpr(Step, Ty, Depth + 1), L, AR->getNoWrapFlags());
2075 // sext({C,+,Step}) --> (sext(D) + sext({C-D,+,Step}))<nuw><nsw>
2076 // if D + (C - D + Step * n) could be proven to not signed wrap
2077 // where D maximizes the number of trailing zeros of (C - D + Step * n)
2078 if (const auto *SC = dyn_cast<SCEVConstant>(Start)) {
2079 const APInt &C = SC->getAPInt();
2080 const APInt &D = extractConstantWithoutWrapping(*this, C, Step);
2081 if (D != 0) {
2082 const SCEV *SSExtD = getSignExtendExpr(getConstant(D), Ty, Depth);
2083 const SCEV *SResidual =
2084 getAddRecExpr(getConstant(C - D), Step, L, AR->getNoWrapFlags());
2085 const SCEV *SSExtR = getSignExtendExpr(SResidual, Ty, Depth + 1);
2086 return getAddExpr(SSExtD, SSExtR,
2087 (SCEV::NoWrapFlags)(SCEV::FlagNSW | SCEV::FlagNUW),
2088 Depth + 1);
2092 if (proveNoWrapByVaryingStart<SCEVSignExtendExpr>(Start, Step, L)) {
2093 setNoWrapFlags(const_cast<SCEVAddRecExpr *>(AR), SCEV::FlagNSW);
2094 return getAddRecExpr(
2095 getExtendAddRecStart<SCEVSignExtendExpr>(AR, Ty, this, Depth + 1),
2096 getSignExtendExpr(Step, Ty, Depth + 1), L, AR->getNoWrapFlags());
2100 // If the input value is provably positive and we could not simplify
2101 // away the sext build a zext instead.
2102 if (isKnownNonNegative(Op))
2103 return getZeroExtendExpr(Op, Ty, Depth + 1);
2105 // The cast wasn't folded; create an explicit cast node.
2106 // Recompute the insert position, as it may have been invalidated.
2107 if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S;
2108 SCEV *S = new (SCEVAllocator) SCEVSignExtendExpr(ID.Intern(SCEVAllocator),
2109 Op, Ty);
2110 UniqueSCEVs.InsertNode(S, IP);
2111 addToLoopUseLists(S);
2112 return S;
2115 /// getAnyExtendExpr - Return a SCEV for the given operand extended with
2116 /// unspecified bits out to the given type.
2117 const SCEV *ScalarEvolution::getAnyExtendExpr(const SCEV *Op,
2118 Type *Ty) {
2119 assert(getTypeSizeInBits(Op->getType()) < getTypeSizeInBits(Ty) &&
2120 "This is not an extending conversion!");
2121 assert(isSCEVable(Ty) &&
2122 "This is not a conversion to a SCEVable type!");
2123 Ty = getEffectiveSCEVType(Ty);
2125 // Sign-extend negative constants.
2126 if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(Op))
2127 if (SC->getAPInt().isNegative())
2128 return getSignExtendExpr(Op, Ty);
2130 // Peel off a truncate cast.
2131 if (const SCEVTruncateExpr *T = dyn_cast<SCEVTruncateExpr>(Op)) {
2132 const SCEV *NewOp = T->getOperand();
2133 if (getTypeSizeInBits(NewOp->getType()) < getTypeSizeInBits(Ty))
2134 return getAnyExtendExpr(NewOp, Ty);
2135 return getTruncateOrNoop(NewOp, Ty);
2138 // Next try a zext cast. If the cast is folded, use it.
2139 const SCEV *ZExt = getZeroExtendExpr(Op, Ty);
2140 if (!isa<SCEVZeroExtendExpr>(ZExt))
2141 return ZExt;
2143 // Next try a sext cast. If the cast is folded, use it.
2144 const SCEV *SExt = getSignExtendExpr(Op, Ty);
2145 if (!isa<SCEVSignExtendExpr>(SExt))
2146 return SExt;
2148 // Force the cast to be folded into the operands of an addrec.
2149 if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(Op)) {
2150 SmallVector<const SCEV *, 4> Ops;
2151 for (const SCEV *Op : AR->operands())
2152 Ops.push_back(getAnyExtendExpr(Op, Ty));
2153 return getAddRecExpr(Ops, AR->getLoop(), SCEV::FlagNW);
2156 // If the expression is obviously signed, use the sext cast value.
2157 if (isa<SCEVSMaxExpr>(Op))
2158 return SExt;
2160 // Absent any other information, use the zext cast value.
2161 return ZExt;
2164 /// Process the given Ops list, which is a list of operands to be added under
2165 /// the given scale, update the given map. This is a helper function for
2166 /// getAddRecExpr. As an example of what it does, given a sequence of operands
2167 /// that would form an add expression like this:
2169 /// m + n + 13 + (A * (o + p + (B * (q + m + 29)))) + r + (-1 * r)
2171 /// where A and B are constants, update the map with these values:
2173 /// (m, 1+A*B), (n, 1), (o, A), (p, A), (q, A*B), (r, 0)
2175 /// and add 13 + A*B*29 to AccumulatedConstant.
2176 /// This will allow getAddRecExpr to produce this:
2178 /// 13+A*B*29 + n + (m * (1+A*B)) + ((o + p) * A) + (q * A*B)
2180 /// This form often exposes folding opportunities that are hidden in
2181 /// the original operand list.
2183 /// Return true iff it appears that any interesting folding opportunities
2184 /// may be exposed. This helps getAddRecExpr short-circuit extra work in
2185 /// the common case where no interesting opportunities are present, and
2186 /// is also used as a check to avoid infinite recursion.
2187 static bool
2188 CollectAddOperandsWithScales(DenseMap<const SCEV *, APInt> &M,
2189 SmallVectorImpl<const SCEV *> &NewOps,
2190 APInt &AccumulatedConstant,
2191 const SCEV *const *Ops, size_t NumOperands,
2192 const APInt &Scale,
2193 ScalarEvolution &SE) {
2194 bool Interesting = false;
2196 // Iterate over the add operands. They are sorted, with constants first.
2197 unsigned i = 0;
2198 while (const SCEVConstant *C = dyn_cast<SCEVConstant>(Ops[i])) {
2199 ++i;
2200 // Pull a buried constant out to the outside.
2201 if (Scale != 1 || AccumulatedConstant != 0 || C->getValue()->isZero())
2202 Interesting = true;
2203 AccumulatedConstant += Scale * C->getAPInt();
2206 // Next comes everything else. We're especially interested in multiplies
2207 // here, but they're in the middle, so just visit the rest with one loop.
2208 for (; i != NumOperands; ++i) {
2209 const SCEVMulExpr *Mul = dyn_cast<SCEVMulExpr>(Ops[i]);
2210 if (Mul && isa<SCEVConstant>(Mul->getOperand(0))) {
2211 APInt NewScale =
2212 Scale * cast<SCEVConstant>(Mul->getOperand(0))->getAPInt();
2213 if (Mul->getNumOperands() == 2 && isa<SCEVAddExpr>(Mul->getOperand(1))) {
2214 // A multiplication of a constant with another add; recurse.
2215 const SCEVAddExpr *Add = cast<SCEVAddExpr>(Mul->getOperand(1));
2216 Interesting |=
2217 CollectAddOperandsWithScales(M, NewOps, AccumulatedConstant,
2218 Add->op_begin(), Add->getNumOperands(),
2219 NewScale, SE);
2220 } else {
2221 // A multiplication of a constant with some other value. Update
2222 // the map.
2223 SmallVector<const SCEV *, 4> MulOps(drop_begin(Mul->operands()));
2224 const SCEV *Key = SE.getMulExpr(MulOps);
2225 auto Pair = M.insert({Key, NewScale});
2226 if (Pair.second) {
2227 NewOps.push_back(Pair.first->first);
2228 } else {
2229 Pair.first->second += NewScale;
2230 // The map already had an entry for this value, which may indicate
2231 // a folding opportunity.
2232 Interesting = true;
2235 } else {
2236 // An ordinary operand. Update the map.
2237 std::pair<DenseMap<const SCEV *, APInt>::iterator, bool> Pair =
2238 M.insert({Ops[i], Scale});
2239 if (Pair.second) {
2240 NewOps.push_back(Pair.first->first);
2241 } else {
2242 Pair.first->second += Scale;
2243 // The map already had an entry for this value, which may indicate
2244 // a folding opportunity.
2245 Interesting = true;
2250 return Interesting;
2253 bool ScalarEvolution::willNotOverflow(Instruction::BinaryOps BinOp, bool Signed,
2254 const SCEV *LHS, const SCEV *RHS) {
2255 const SCEV *(ScalarEvolution::*Operation)(const SCEV *, const SCEV *,
2256 SCEV::NoWrapFlags, unsigned);
2257 switch (BinOp) {
2258 default:
2259 llvm_unreachable("Unsupported binary op");
2260 case Instruction::Add:
2261 Operation = &ScalarEvolution::getAddExpr;
2262 break;
2263 case Instruction::Sub:
2264 Operation = &ScalarEvolution::getMinusSCEV;
2265 break;
2266 case Instruction::Mul:
2267 Operation = &ScalarEvolution::getMulExpr;
2268 break;
2271 const SCEV *(ScalarEvolution::*Extension)(const SCEV *, Type *, unsigned) =
2272 Signed ? &ScalarEvolution::getSignExtendExpr
2273 : &ScalarEvolution::getZeroExtendExpr;
2275 // Check ext(LHS op RHS) == ext(LHS) op ext(RHS)
2276 auto *NarrowTy = cast<IntegerType>(LHS->getType());
2277 auto *WideTy =
2278 IntegerType::get(NarrowTy->getContext(), NarrowTy->getBitWidth() * 2);
2280 const SCEV *A = (this->*Extension)(
2281 (this->*Operation)(LHS, RHS, SCEV::FlagAnyWrap, 0), WideTy, 0);
2282 const SCEV *B = (this->*Operation)((this->*Extension)(LHS, WideTy, 0),
2283 (this->*Extension)(RHS, WideTy, 0),
2284 SCEV::FlagAnyWrap, 0);
2285 return A == B;
2288 std::pair<SCEV::NoWrapFlags, bool /*Deduced*/>
2289 ScalarEvolution::getStrengthenedNoWrapFlagsFromBinOp(
2290 const OverflowingBinaryOperator *OBO) {
2291 SCEV::NoWrapFlags Flags = SCEV::NoWrapFlags::FlagAnyWrap;
2293 if (OBO->hasNoUnsignedWrap())
2294 Flags = ScalarEvolution::setFlags(Flags, SCEV::FlagNUW);
2295 if (OBO->hasNoSignedWrap())
2296 Flags = ScalarEvolution::setFlags(Flags, SCEV::FlagNSW);
2298 bool Deduced = false;
2300 if (OBO->hasNoUnsignedWrap() && OBO->hasNoSignedWrap())
2301 return {Flags, Deduced};
2303 if (OBO->getOpcode() != Instruction::Add &&
2304 OBO->getOpcode() != Instruction::Sub &&
2305 OBO->getOpcode() != Instruction::Mul)
2306 return {Flags, Deduced};
2308 const SCEV *LHS = getSCEV(OBO->getOperand(0));
2309 const SCEV *RHS = getSCEV(OBO->getOperand(1));
2311 if (!OBO->hasNoUnsignedWrap() &&
2312 willNotOverflow((Instruction::BinaryOps)OBO->getOpcode(),
2313 /* Signed */ false, LHS, RHS)) {
2314 Flags = ScalarEvolution::setFlags(Flags, SCEV::FlagNUW);
2315 Deduced = true;
2318 if (!OBO->hasNoSignedWrap() &&
2319 willNotOverflow((Instruction::BinaryOps)OBO->getOpcode(),
2320 /* Signed */ true, LHS, RHS)) {
2321 Flags = ScalarEvolution::setFlags(Flags, SCEV::FlagNSW);
2322 Deduced = true;
2325 return {Flags, Deduced};
2328 // We're trying to construct a SCEV of type `Type' with `Ops' as operands and
2329 // `OldFlags' as can't-wrap behavior. Infer a more aggressive set of
2330 // can't-overflow flags for the operation if possible.
2331 static SCEV::NoWrapFlags
2332 StrengthenNoWrapFlags(ScalarEvolution *SE, SCEVTypes Type,
2333 const ArrayRef<const SCEV *> Ops,
2334 SCEV::NoWrapFlags Flags) {
2335 using namespace std::placeholders;
2337 using OBO = OverflowingBinaryOperator;
2339 bool CanAnalyze =
2340 Type == scAddExpr || Type == scAddRecExpr || Type == scMulExpr;
2341 (void)CanAnalyze;
2342 assert(CanAnalyze && "don't call from other places!");
2344 int SignOrUnsignMask = SCEV::FlagNUW | SCEV::FlagNSW;
2345 SCEV::NoWrapFlags SignOrUnsignWrap =
2346 ScalarEvolution::maskFlags(Flags, SignOrUnsignMask);
2348 // If FlagNSW is true and all the operands are non-negative, infer FlagNUW.
2349 auto IsKnownNonNegative = [&](const SCEV *S) {
2350 return SE->isKnownNonNegative(S);
2353 if (SignOrUnsignWrap == SCEV::FlagNSW && all_of(Ops, IsKnownNonNegative))
2354 Flags =
2355 ScalarEvolution::setFlags(Flags, (SCEV::NoWrapFlags)SignOrUnsignMask);
2357 SignOrUnsignWrap = ScalarEvolution::maskFlags(Flags, SignOrUnsignMask);
2359 if (SignOrUnsignWrap != SignOrUnsignMask &&
2360 (Type == scAddExpr || Type == scMulExpr) && Ops.size() == 2 &&
2361 isa<SCEVConstant>(Ops[0])) {
2363 auto Opcode = [&] {
2364 switch (Type) {
2365 case scAddExpr:
2366 return Instruction::Add;
2367 case scMulExpr:
2368 return Instruction::Mul;
2369 default:
2370 llvm_unreachable("Unexpected SCEV op.");
2372 }();
2374 const APInt &C = cast<SCEVConstant>(Ops[0])->getAPInt();
2376 // (A <opcode> C) --> (A <opcode> C)<nsw> if the op doesn't sign overflow.
2377 if (!(SignOrUnsignWrap & SCEV::FlagNSW)) {
2378 auto NSWRegion = ConstantRange::makeGuaranteedNoWrapRegion(
2379 Opcode, C, OBO::NoSignedWrap);
2380 if (NSWRegion.contains(SE->getSignedRange(Ops[1])))
2381 Flags = ScalarEvolution::setFlags(Flags, SCEV::FlagNSW);
2384 // (A <opcode> C) --> (A <opcode> C)<nuw> if the op doesn't unsign overflow.
2385 if (!(SignOrUnsignWrap & SCEV::FlagNUW)) {
2386 auto NUWRegion = ConstantRange::makeGuaranteedNoWrapRegion(
2387 Opcode, C, OBO::NoUnsignedWrap);
2388 if (NUWRegion.contains(SE->getUnsignedRange(Ops[1])))
2389 Flags = ScalarEvolution::setFlags(Flags, SCEV::FlagNUW);
2393 return Flags;
2396 bool ScalarEvolution::isAvailableAtLoopEntry(const SCEV *S, const Loop *L) {
2397 return isLoopInvariant(S, L) && properlyDominates(S, L->getHeader());
2400 /// Get a canonical add expression, or something simpler if possible.
2401 const SCEV *ScalarEvolution::getAddExpr(SmallVectorImpl<const SCEV *> &Ops,
2402 SCEV::NoWrapFlags OrigFlags,
2403 unsigned Depth) {
2404 assert(!(OrigFlags & ~(SCEV::FlagNUW | SCEV::FlagNSW)) &&
2405 "only nuw or nsw allowed");
2406 assert(!Ops.empty() && "Cannot get empty add!");
2407 if (Ops.size() == 1) return Ops[0];
2408 #ifndef NDEBUG
2409 Type *ETy = getEffectiveSCEVType(Ops[0]->getType());
2410 for (unsigned i = 1, e = Ops.size(); i != e; ++i)
2411 assert(getEffectiveSCEVType(Ops[i]->getType()) == ETy &&
2412 "SCEVAddExpr operand types don't match!");
2413 unsigned NumPtrs = count_if(
2414 Ops, [](const SCEV *Op) { return Op->getType()->isPointerTy(); });
2415 assert(NumPtrs <= 1 && "add has at most one pointer operand");
2416 #endif
2418 // Sort by complexity, this groups all similar expression types together.
2419 GroupByComplexity(Ops, &LI, DT);
2421 // If there are any constants, fold them together.
2422 unsigned Idx = 0;
2423 if (const SCEVConstant *LHSC = dyn_cast<SCEVConstant>(Ops[0])) {
2424 ++Idx;
2425 assert(Idx < Ops.size());
2426 while (const SCEVConstant *RHSC = dyn_cast<SCEVConstant>(Ops[Idx])) {
2427 // We found two constants, fold them together!
2428 Ops[0] = getConstant(LHSC->getAPInt() + RHSC->getAPInt());
2429 if (Ops.size() == 2) return Ops[0];
2430 Ops.erase(Ops.begin()+1); // Erase the folded element
2431 LHSC = cast<SCEVConstant>(Ops[0]);
2434 // If we are left with a constant zero being added, strip it off.
2435 if (LHSC->getValue()->isZero()) {
2436 Ops.erase(Ops.begin());
2437 --Idx;
2440 if (Ops.size() == 1) return Ops[0];
2443 // Delay expensive flag strengthening until necessary.
2444 auto ComputeFlags = [this, OrigFlags](const ArrayRef<const SCEV *> Ops) {
2445 return StrengthenNoWrapFlags(this, scAddExpr, Ops, OrigFlags);
2448 // Limit recursion calls depth.
2449 if (Depth > MaxArithDepth || hasHugeExpression(Ops))
2450 return getOrCreateAddExpr(Ops, ComputeFlags(Ops));
2452 if (SCEV *S = std::get<0>(findExistingSCEVInCache(scAddExpr, Ops))) {
2453 // Don't strengthen flags if we have no new information.
2454 SCEVAddExpr *Add = static_cast<SCEVAddExpr *>(S);
2455 if (Add->getNoWrapFlags(OrigFlags) != OrigFlags)
2456 Add->setNoWrapFlags(ComputeFlags(Ops));
2457 return S;
2460 // Okay, check to see if the same value occurs in the operand list more than
2461 // once. If so, merge them together into an multiply expression. Since we
2462 // sorted the list, these values are required to be adjacent.
2463 Type *Ty = Ops[0]->getType();
2464 bool FoundMatch = false;
2465 for (unsigned i = 0, e = Ops.size(); i != e-1; ++i)
2466 if (Ops[i] == Ops[i+1]) { // X + Y + Y --> X + Y*2
2467 // Scan ahead to count how many equal operands there are.
2468 unsigned Count = 2;
2469 while (i+Count != e && Ops[i+Count] == Ops[i])
2470 ++Count;
2471 // Merge the values into a multiply.
2472 const SCEV *Scale = getConstant(Ty, Count);
2473 const SCEV *Mul = getMulExpr(Scale, Ops[i], SCEV::FlagAnyWrap, Depth + 1);
2474 if (Ops.size() == Count)
2475 return Mul;
2476 Ops[i] = Mul;
2477 Ops.erase(Ops.begin()+i+1, Ops.begin()+i+Count);
2478 --i; e -= Count - 1;
2479 FoundMatch = true;
2481 if (FoundMatch)
2482 return getAddExpr(Ops, OrigFlags, Depth + 1);
2484 // Check for truncates. If all the operands are truncated from the same
2485 // type, see if factoring out the truncate would permit the result to be
2486 // folded. eg., n*trunc(x) + m*trunc(y) --> trunc(trunc(m)*x + trunc(n)*y)
2487 // if the contents of the resulting outer trunc fold to something simple.
2488 auto FindTruncSrcType = [&]() -> Type * {
2489 // We're ultimately looking to fold an addrec of truncs and muls of only
2490 // constants and truncs, so if we find any other types of SCEV
2491 // as operands of the addrec then we bail and return nullptr here.
2492 // Otherwise, we return the type of the operand of a trunc that we find.
2493 if (auto *T = dyn_cast<SCEVTruncateExpr>(Ops[Idx]))
2494 return T->getOperand()->getType();
2495 if (const auto *Mul = dyn_cast<SCEVMulExpr>(Ops[Idx])) {
2496 const auto *LastOp = Mul->getOperand(Mul->getNumOperands() - 1);
2497 if (const auto *T = dyn_cast<SCEVTruncateExpr>(LastOp))
2498 return T->getOperand()->getType();
2500 return nullptr;
2502 if (auto *SrcType = FindTruncSrcType()) {
2503 SmallVector<const SCEV *, 8> LargeOps;
2504 bool Ok = true;
2505 // Check all the operands to see if they can be represented in the
2506 // source type of the truncate.
2507 for (unsigned i = 0, e = Ops.size(); i != e; ++i) {
2508 if (const SCEVTruncateExpr *T = dyn_cast<SCEVTruncateExpr>(Ops[i])) {
2509 if (T->getOperand()->getType() != SrcType) {
2510 Ok = false;
2511 break;
2513 LargeOps.push_back(T->getOperand());
2514 } else if (const SCEVConstant *C = dyn_cast<SCEVConstant>(Ops[i])) {
2515 LargeOps.push_back(getAnyExtendExpr(C, SrcType));
2516 } else if (const SCEVMulExpr *M = dyn_cast<SCEVMulExpr>(Ops[i])) {
2517 SmallVector<const SCEV *, 8> LargeMulOps;
2518 for (unsigned j = 0, f = M->getNumOperands(); j != f && Ok; ++j) {
2519 if (const SCEVTruncateExpr *T =
2520 dyn_cast<SCEVTruncateExpr>(M->getOperand(j))) {
2521 if (T->getOperand()->getType() != SrcType) {
2522 Ok = false;
2523 break;
2525 LargeMulOps.push_back(T->getOperand());
2526 } else if (const auto *C = dyn_cast<SCEVConstant>(M->getOperand(j))) {
2527 LargeMulOps.push_back(getAnyExtendExpr(C, SrcType));
2528 } else {
2529 Ok = false;
2530 break;
2533 if (Ok)
2534 LargeOps.push_back(getMulExpr(LargeMulOps, SCEV::FlagAnyWrap, Depth + 1));
2535 } else {
2536 Ok = false;
2537 break;
2540 if (Ok) {
2541 // Evaluate the expression in the larger type.
2542 const SCEV *Fold = getAddExpr(LargeOps, SCEV::FlagAnyWrap, Depth + 1);
2543 // If it folds to something simple, use it. Otherwise, don't.
2544 if (isa<SCEVConstant>(Fold) || isa<SCEVUnknown>(Fold))
2545 return getTruncateExpr(Fold, Ty);
2549 if (Ops.size() == 2) {
2550 // Check if we have an expression of the form ((X + C1) - C2), where C1 and
2551 // C2 can be folded in a way that allows retaining wrapping flags of (X +
2552 // C1).
2553 const SCEV *A = Ops[0];
2554 const SCEV *B = Ops[1];
2555 auto *AddExpr = dyn_cast<SCEVAddExpr>(B);
2556 auto *C = dyn_cast<SCEVConstant>(A);
2557 if (AddExpr && C && isa<SCEVConstant>(AddExpr->getOperand(0))) {
2558 auto C1 = cast<SCEVConstant>(AddExpr->getOperand(0))->getAPInt();
2559 auto C2 = C->getAPInt();
2560 SCEV::NoWrapFlags PreservedFlags = SCEV::FlagAnyWrap;
2562 APInt ConstAdd = C1 + C2;
2563 auto AddFlags = AddExpr->getNoWrapFlags();
2564 // Adding a smaller constant is NUW if the original AddExpr was NUW.
2565 if (ScalarEvolution::maskFlags(AddFlags, SCEV::FlagNUW) ==
2566 SCEV::FlagNUW &&
2567 ConstAdd.ule(C1)) {
2568 PreservedFlags =
2569 ScalarEvolution::setFlags(PreservedFlags, SCEV::FlagNUW);
2572 // Adding a constant with the same sign and small magnitude is NSW, if the
2573 // original AddExpr was NSW.
2574 if (ScalarEvolution::maskFlags(AddFlags, SCEV::FlagNSW) ==
2575 SCEV::FlagNSW &&
2576 C1.isSignBitSet() == ConstAdd.isSignBitSet() &&
2577 ConstAdd.abs().ule(C1.abs())) {
2578 PreservedFlags =
2579 ScalarEvolution::setFlags(PreservedFlags, SCEV::FlagNSW);
2582 if (PreservedFlags != SCEV::FlagAnyWrap) {
2583 SmallVector<const SCEV *, 4> NewOps(AddExpr->op_begin(),
2584 AddExpr->op_end());
2585 NewOps[0] = getConstant(ConstAdd);
2586 return getAddExpr(NewOps, PreservedFlags);
2591 // Skip past any other cast SCEVs.
2592 while (Idx < Ops.size() && Ops[Idx]->getSCEVType() < scAddExpr)
2593 ++Idx;
2595 // If there are add operands they would be next.
2596 if (Idx < Ops.size()) {
2597 bool DeletedAdd = false;
2598 // If the original flags and all inlined SCEVAddExprs are NUW, use the
2599 // common NUW flag for expression after inlining. Other flags cannot be
2600 // preserved, because they may depend on the original order of operations.
2601 SCEV::NoWrapFlags CommonFlags = maskFlags(OrigFlags, SCEV::FlagNUW);
2602 while (const SCEVAddExpr *Add = dyn_cast<SCEVAddExpr>(Ops[Idx])) {
2603 if (Ops.size() > AddOpsInlineThreshold ||
2604 Add->getNumOperands() > AddOpsInlineThreshold)
2605 break;
2606 // If we have an add, expand the add operands onto the end of the operands
2607 // list.
2608 Ops.erase(Ops.begin()+Idx);
2609 Ops.append(Add->op_begin(), Add->op_end());
2610 DeletedAdd = true;
2611 CommonFlags = maskFlags(CommonFlags, Add->getNoWrapFlags());
2614 // If we deleted at least one add, we added operands to the end of the list,
2615 // and they are not necessarily sorted. Recurse to resort and resimplify
2616 // any operands we just acquired.
2617 if (DeletedAdd)
2618 return getAddExpr(Ops, CommonFlags, Depth + 1);
2621 // Skip over the add expression until we get to a multiply.
2622 while (Idx < Ops.size() && Ops[Idx]->getSCEVType() < scMulExpr)
2623 ++Idx;
2625 // Check to see if there are any folding opportunities present with
2626 // operands multiplied by constant values.
2627 if (Idx < Ops.size() && isa<SCEVMulExpr>(Ops[Idx])) {
2628 uint64_t BitWidth = getTypeSizeInBits(Ty);
2629 DenseMap<const SCEV *, APInt> M;
2630 SmallVector<const SCEV *, 8> NewOps;
2631 APInt AccumulatedConstant(BitWidth, 0);
2632 if (CollectAddOperandsWithScales(M, NewOps, AccumulatedConstant,
2633 Ops.data(), Ops.size(),
2634 APInt(BitWidth, 1), *this)) {
2635 struct APIntCompare {
2636 bool operator()(const APInt &LHS, const APInt &RHS) const {
2637 return LHS.ult(RHS);
2641 // Some interesting folding opportunity is present, so its worthwhile to
2642 // re-generate the operands list. Group the operands by constant scale,
2643 // to avoid multiplying by the same constant scale multiple times.
2644 std::map<APInt, SmallVector<const SCEV *, 4>, APIntCompare> MulOpLists;
2645 for (const SCEV *NewOp : NewOps)
2646 MulOpLists[M.find(NewOp)->second].push_back(NewOp);
2647 // Re-generate the operands list.
2648 Ops.clear();
2649 if (AccumulatedConstant != 0)
2650 Ops.push_back(getConstant(AccumulatedConstant));
2651 for (auto &MulOp : MulOpLists) {
2652 if (MulOp.first == 1) {
2653 Ops.push_back(getAddExpr(MulOp.second, SCEV::FlagAnyWrap, Depth + 1));
2654 } else if (MulOp.first != 0) {
2655 Ops.push_back(getMulExpr(
2656 getConstant(MulOp.first),
2657 getAddExpr(MulOp.second, SCEV::FlagAnyWrap, Depth + 1),
2658 SCEV::FlagAnyWrap, Depth + 1));
2661 if (Ops.empty())
2662 return getZero(Ty);
2663 if (Ops.size() == 1)
2664 return Ops[0];
2665 return getAddExpr(Ops, SCEV::FlagAnyWrap, Depth + 1);
2669 // If we are adding something to a multiply expression, make sure the
2670 // something is not already an operand of the multiply. If so, merge it into
2671 // the multiply.
2672 for (; Idx < Ops.size() && isa<SCEVMulExpr>(Ops[Idx]); ++Idx) {
2673 const SCEVMulExpr *Mul = cast<SCEVMulExpr>(Ops[Idx]);
2674 for (unsigned MulOp = 0, e = Mul->getNumOperands(); MulOp != e; ++MulOp) {
2675 const SCEV *MulOpSCEV = Mul->getOperand(MulOp);
2676 if (isa<SCEVConstant>(MulOpSCEV))
2677 continue;
2678 for (unsigned AddOp = 0, e = Ops.size(); AddOp != e; ++AddOp)
2679 if (MulOpSCEV == Ops[AddOp]) {
2680 // Fold W + X + (X * Y * Z) --> W + (X * ((Y*Z)+1))
2681 const SCEV *InnerMul = Mul->getOperand(MulOp == 0);
2682 if (Mul->getNumOperands() != 2) {
2683 // If the multiply has more than two operands, we must get the
2684 // Y*Z term.
2685 SmallVector<const SCEV *, 4> MulOps(Mul->op_begin(),
2686 Mul->op_begin()+MulOp);
2687 MulOps.append(Mul->op_begin()+MulOp+1, Mul->op_end());
2688 InnerMul = getMulExpr(MulOps, SCEV::FlagAnyWrap, Depth + 1);
2690 SmallVector<const SCEV *, 2> TwoOps = {getOne(Ty), InnerMul};
2691 const SCEV *AddOne = getAddExpr(TwoOps, SCEV::FlagAnyWrap, Depth + 1);
2692 const SCEV *OuterMul = getMulExpr(AddOne, MulOpSCEV,
2693 SCEV::FlagAnyWrap, Depth + 1);
2694 if (Ops.size() == 2) return OuterMul;
2695 if (AddOp < Idx) {
2696 Ops.erase(Ops.begin()+AddOp);
2697 Ops.erase(Ops.begin()+Idx-1);
2698 } else {
2699 Ops.erase(Ops.begin()+Idx);
2700 Ops.erase(Ops.begin()+AddOp-1);
2702 Ops.push_back(OuterMul);
2703 return getAddExpr(Ops, SCEV::FlagAnyWrap, Depth + 1);
2706 // Check this multiply against other multiplies being added together.
2707 for (unsigned OtherMulIdx = Idx+1;
2708 OtherMulIdx < Ops.size() && isa<SCEVMulExpr>(Ops[OtherMulIdx]);
2709 ++OtherMulIdx) {
2710 const SCEVMulExpr *OtherMul = cast<SCEVMulExpr>(Ops[OtherMulIdx]);
2711 // If MulOp occurs in OtherMul, we can fold the two multiplies
2712 // together.
2713 for (unsigned OMulOp = 0, e = OtherMul->getNumOperands();
2714 OMulOp != e; ++OMulOp)
2715 if (OtherMul->getOperand(OMulOp) == MulOpSCEV) {
2716 // Fold X + (A*B*C) + (A*D*E) --> X + (A*(B*C+D*E))
2717 const SCEV *InnerMul1 = Mul->getOperand(MulOp == 0);
2718 if (Mul->getNumOperands() != 2) {
2719 SmallVector<const SCEV *, 4> MulOps(Mul->op_begin(),
2720 Mul->op_begin()+MulOp);
2721 MulOps.append(Mul->op_begin()+MulOp+1, Mul->op_end());
2722 InnerMul1 = getMulExpr(MulOps, SCEV::FlagAnyWrap, Depth + 1);
2724 const SCEV *InnerMul2 = OtherMul->getOperand(OMulOp == 0);
2725 if (OtherMul->getNumOperands() != 2) {
2726 SmallVector<const SCEV *, 4> MulOps(OtherMul->op_begin(),
2727 OtherMul->op_begin()+OMulOp);
2728 MulOps.append(OtherMul->op_begin()+OMulOp+1, OtherMul->op_end());
2729 InnerMul2 = getMulExpr(MulOps, SCEV::FlagAnyWrap, Depth + 1);
2731 SmallVector<const SCEV *, 2> TwoOps = {InnerMul1, InnerMul2};
2732 const SCEV *InnerMulSum =
2733 getAddExpr(TwoOps, SCEV::FlagAnyWrap, Depth + 1);
2734 const SCEV *OuterMul = getMulExpr(MulOpSCEV, InnerMulSum,
2735 SCEV::FlagAnyWrap, Depth + 1);
2736 if (Ops.size() == 2) return OuterMul;
2737 Ops.erase(Ops.begin()+Idx);
2738 Ops.erase(Ops.begin()+OtherMulIdx-1);
2739 Ops.push_back(OuterMul);
2740 return getAddExpr(Ops, SCEV::FlagAnyWrap, Depth + 1);
2746 // If there are any add recurrences in the operands list, see if any other
2747 // added values are loop invariant. If so, we can fold them into the
2748 // recurrence.
2749 while (Idx < Ops.size() && Ops[Idx]->getSCEVType() < scAddRecExpr)
2750 ++Idx;
2752 // Scan over all recurrences, trying to fold loop invariants into them.
2753 for (; Idx < Ops.size() && isa<SCEVAddRecExpr>(Ops[Idx]); ++Idx) {
2754 // Scan all of the other operands to this add and add them to the vector if
2755 // they are loop invariant w.r.t. the recurrence.
2756 SmallVector<const SCEV *, 8> LIOps;
2757 const SCEVAddRecExpr *AddRec = cast<SCEVAddRecExpr>(Ops[Idx]);
2758 const Loop *AddRecLoop = AddRec->getLoop();
2759 for (unsigned i = 0, e = Ops.size(); i != e; ++i)
2760 if (isAvailableAtLoopEntry(Ops[i], AddRecLoop)) {
2761 LIOps.push_back(Ops[i]);
2762 Ops.erase(Ops.begin()+i);
2763 --i; --e;
2766 // If we found some loop invariants, fold them into the recurrence.
2767 if (!LIOps.empty()) {
2768 // Compute nowrap flags for the addition of the loop-invariant ops and
2769 // the addrec. Temporarily push it as an operand for that purpose.
2770 LIOps.push_back(AddRec);
2771 SCEV::NoWrapFlags Flags = ComputeFlags(LIOps);
2772 LIOps.pop_back();
2774 // NLI + LI + {Start,+,Step} --> NLI + {LI+Start,+,Step}
2775 LIOps.push_back(AddRec->getStart());
2777 SmallVector<const SCEV *, 4> AddRecOps(AddRec->operands());
2778 // This follows from the fact that the no-wrap flags on the outer add
2779 // expression are applicable on the 0th iteration, when the add recurrence
2780 // will be equal to its start value.
2781 AddRecOps[0] = getAddExpr(LIOps, Flags, Depth + 1);
2783 // Build the new addrec. Propagate the NUW and NSW flags if both the
2784 // outer add and the inner addrec are guaranteed to have no overflow.
2785 // Always propagate NW.
2786 Flags = AddRec->getNoWrapFlags(setFlags(Flags, SCEV::FlagNW));
2787 const SCEV *NewRec = getAddRecExpr(AddRecOps, AddRecLoop, Flags);
2789 // If all of the other operands were loop invariant, we are done.
2790 if (Ops.size() == 1) return NewRec;
2792 // Otherwise, add the folded AddRec by the non-invariant parts.
2793 for (unsigned i = 0;; ++i)
2794 if (Ops[i] == AddRec) {
2795 Ops[i] = NewRec;
2796 break;
2798 return getAddExpr(Ops, SCEV::FlagAnyWrap, Depth + 1);
2801 // Okay, if there weren't any loop invariants to be folded, check to see if
2802 // there are multiple AddRec's with the same loop induction variable being
2803 // added together. If so, we can fold them.
2804 for (unsigned OtherIdx = Idx+1;
2805 OtherIdx < Ops.size() && isa<SCEVAddRecExpr>(Ops[OtherIdx]);
2806 ++OtherIdx) {
2807 // We expect the AddRecExpr's to be sorted in reverse dominance order,
2808 // so that the 1st found AddRecExpr is dominated by all others.
2809 assert(DT.dominates(
2810 cast<SCEVAddRecExpr>(Ops[OtherIdx])->getLoop()->getHeader(),
2811 AddRec->getLoop()->getHeader()) &&
2812 "AddRecExprs are not sorted in reverse dominance order?");
2813 if (AddRecLoop == cast<SCEVAddRecExpr>(Ops[OtherIdx])->getLoop()) {
2814 // Other + {A,+,B}<L> + {C,+,D}<L> --> Other + {A+C,+,B+D}<L>
2815 SmallVector<const SCEV *, 4> AddRecOps(AddRec->operands());
2816 for (; OtherIdx != Ops.size() && isa<SCEVAddRecExpr>(Ops[OtherIdx]);
2817 ++OtherIdx) {
2818 const auto *OtherAddRec = cast<SCEVAddRecExpr>(Ops[OtherIdx]);
2819 if (OtherAddRec->getLoop() == AddRecLoop) {
2820 for (unsigned i = 0, e = OtherAddRec->getNumOperands();
2821 i != e; ++i) {
2822 if (i >= AddRecOps.size()) {
2823 AddRecOps.append(OtherAddRec->op_begin()+i,
2824 OtherAddRec->op_end());
2825 break;
2827 SmallVector<const SCEV *, 2> TwoOps = {
2828 AddRecOps[i], OtherAddRec->getOperand(i)};
2829 AddRecOps[i] = getAddExpr(TwoOps, SCEV::FlagAnyWrap, Depth + 1);
2831 Ops.erase(Ops.begin() + OtherIdx); --OtherIdx;
2834 // Step size has changed, so we cannot guarantee no self-wraparound.
2835 Ops[Idx] = getAddRecExpr(AddRecOps, AddRecLoop, SCEV::FlagAnyWrap);
2836 return getAddExpr(Ops, SCEV::FlagAnyWrap, Depth + 1);
2840 // Otherwise couldn't fold anything into this recurrence. Move onto the
2841 // next one.
2844 // Okay, it looks like we really DO need an add expr. Check to see if we
2845 // already have one, otherwise create a new one.
2846 return getOrCreateAddExpr(Ops, ComputeFlags(Ops));
2849 const SCEV *
2850 ScalarEvolution::getOrCreateAddExpr(ArrayRef<const SCEV *> Ops,
2851 SCEV::NoWrapFlags Flags) {
2852 FoldingSetNodeID ID;
2853 ID.AddInteger(scAddExpr);
2854 for (const SCEV *Op : Ops)
2855 ID.AddPointer(Op);
2856 void *IP = nullptr;
2857 SCEVAddExpr *S =
2858 static_cast<SCEVAddExpr *>(UniqueSCEVs.FindNodeOrInsertPos(ID, IP));
2859 if (!S) {
2860 const SCEV **O = SCEVAllocator.Allocate<const SCEV *>(Ops.size());
2861 std::uninitialized_copy(Ops.begin(), Ops.end(), O);
2862 S = new (SCEVAllocator)
2863 SCEVAddExpr(ID.Intern(SCEVAllocator), O, Ops.size());
2864 UniqueSCEVs.InsertNode(S, IP);
2865 addToLoopUseLists(S);
2867 S->setNoWrapFlags(Flags);
2868 return S;
2871 const SCEV *
2872 ScalarEvolution::getOrCreateAddRecExpr(ArrayRef<const SCEV *> Ops,
2873 const Loop *L, SCEV::NoWrapFlags Flags) {
2874 FoldingSetNodeID ID;
2875 ID.AddInteger(scAddRecExpr);
2876 for (unsigned i = 0, e = Ops.size(); i != e; ++i)
2877 ID.AddPointer(Ops[i]);
2878 ID.AddPointer(L);
2879 void *IP = nullptr;
2880 SCEVAddRecExpr *S =
2881 static_cast<SCEVAddRecExpr *>(UniqueSCEVs.FindNodeOrInsertPos(ID, IP));
2882 if (!S) {
2883 const SCEV **O = SCEVAllocator.Allocate<const SCEV *>(Ops.size());
2884 std::uninitialized_copy(Ops.begin(), Ops.end(), O);
2885 S = new (SCEVAllocator)
2886 SCEVAddRecExpr(ID.Intern(SCEVAllocator), O, Ops.size(), L);
2887 UniqueSCEVs.InsertNode(S, IP);
2888 addToLoopUseLists(S);
2890 setNoWrapFlags(S, Flags);
2891 return S;
2894 const SCEV *
2895 ScalarEvolution::getOrCreateMulExpr(ArrayRef<const SCEV *> Ops,
2896 SCEV::NoWrapFlags Flags) {
2897 FoldingSetNodeID ID;
2898 ID.AddInteger(scMulExpr);
2899 for (unsigned i = 0, e = Ops.size(); i != e; ++i)
2900 ID.AddPointer(Ops[i]);
2901 void *IP = nullptr;
2902 SCEVMulExpr *S =
2903 static_cast<SCEVMulExpr *>(UniqueSCEVs.FindNodeOrInsertPos(ID, IP));
2904 if (!S) {
2905 const SCEV **O = SCEVAllocator.Allocate<const SCEV *>(Ops.size());
2906 std::uninitialized_copy(Ops.begin(), Ops.end(), O);
2907 S = new (SCEVAllocator) SCEVMulExpr(ID.Intern(SCEVAllocator),
2908 O, Ops.size());
2909 UniqueSCEVs.InsertNode(S, IP);
2910 addToLoopUseLists(S);
2912 S->setNoWrapFlags(Flags);
2913 return S;
2916 static uint64_t umul_ov(uint64_t i, uint64_t j, bool &Overflow) {
2917 uint64_t k = i*j;
2918 if (j > 1 && k / j != i) Overflow = true;
2919 return k;
2922 /// Compute the result of "n choose k", the binomial coefficient. If an
2923 /// intermediate computation overflows, Overflow will be set and the return will
2924 /// be garbage. Overflow is not cleared on absence of overflow.
2925 static uint64_t Choose(uint64_t n, uint64_t k, bool &Overflow) {
2926 // We use the multiplicative formula:
2927 // n(n-1)(n-2)...(n-(k-1)) / k(k-1)(k-2)...1 .
2928 // At each iteration, we take the n-th term of the numeral and divide by the
2929 // (k-n)th term of the denominator. This division will always produce an
2930 // integral result, and helps reduce the chance of overflow in the
2931 // intermediate computations. However, we can still overflow even when the
2932 // final result would fit.
2934 if (n == 0 || n == k) return 1;
2935 if (k > n) return 0;
2937 if (k > n/2)
2938 k = n-k;
2940 uint64_t r = 1;
2941 for (uint64_t i = 1; i <= k; ++i) {
2942 r = umul_ov(r, n-(i-1), Overflow);
2943 r /= i;
2945 return r;
2948 /// Determine if any of the operands in this SCEV are a constant or if
2949 /// any of the add or multiply expressions in this SCEV contain a constant.
2950 static bool containsConstantInAddMulChain(const SCEV *StartExpr) {
2951 struct FindConstantInAddMulChain {
2952 bool FoundConstant = false;
2954 bool follow(const SCEV *S) {
2955 FoundConstant |= isa<SCEVConstant>(S);
2956 return isa<SCEVAddExpr>(S) || isa<SCEVMulExpr>(S);
2959 bool isDone() const {
2960 return FoundConstant;
2964 FindConstantInAddMulChain F;
2965 SCEVTraversal<FindConstantInAddMulChain> ST(F);
2966 ST.visitAll(StartExpr);
2967 return F.FoundConstant;
2970 /// Get a canonical multiply expression, or something simpler if possible.
2971 const SCEV *ScalarEvolution::getMulExpr(SmallVectorImpl<const SCEV *> &Ops,
2972 SCEV::NoWrapFlags OrigFlags,
2973 unsigned Depth) {
2974 assert(OrigFlags == maskFlags(OrigFlags, SCEV::FlagNUW | SCEV::FlagNSW) &&
2975 "only nuw or nsw allowed");
2976 assert(!Ops.empty() && "Cannot get empty mul!");
2977 if (Ops.size() == 1) return Ops[0];
2978 #ifndef NDEBUG
2979 Type *ETy = Ops[0]->getType();
2980 assert(!ETy->isPointerTy());
2981 for (unsigned i = 1, e = Ops.size(); i != e; ++i)
2982 assert(Ops[i]->getType() == ETy &&
2983 "SCEVMulExpr operand types don't match!");
2984 #endif
2986 // Sort by complexity, this groups all similar expression types together.
2987 GroupByComplexity(Ops, &LI, DT);
2989 // If there are any constants, fold them together.
2990 unsigned Idx = 0;
2991 if (const SCEVConstant *LHSC = dyn_cast<SCEVConstant>(Ops[0])) {
2992 ++Idx;
2993 assert(Idx < Ops.size());
2994 while (const SCEVConstant *RHSC = dyn_cast<SCEVConstant>(Ops[Idx])) {
2995 // We found two constants, fold them together!
2996 Ops[0] = getConstant(LHSC->getAPInt() * RHSC->getAPInt());
2997 if (Ops.size() == 2) return Ops[0];
2998 Ops.erase(Ops.begin()+1); // Erase the folded element
2999 LHSC = cast<SCEVConstant>(Ops[0]);
3002 // If we have a multiply of zero, it will always be zero.
3003 if (LHSC->getValue()->isZero())
3004 return LHSC;
3006 // If we are left with a constant one being multiplied, strip it off.
3007 if (LHSC->getValue()->isOne()) {
3008 Ops.erase(Ops.begin());
3009 --Idx;
3012 if (Ops.size() == 1)
3013 return Ops[0];
3016 // Delay expensive flag strengthening until necessary.
3017 auto ComputeFlags = [this, OrigFlags](const ArrayRef<const SCEV *> Ops) {
3018 return StrengthenNoWrapFlags(this, scMulExpr, Ops, OrigFlags);
3021 // Limit recursion calls depth.
3022 if (Depth > MaxArithDepth || hasHugeExpression(Ops))
3023 return getOrCreateMulExpr(Ops, ComputeFlags(Ops));
3025 if (SCEV *S = std::get<0>(findExistingSCEVInCache(scMulExpr, Ops))) {
3026 // Don't strengthen flags if we have no new information.
3027 SCEVMulExpr *Mul = static_cast<SCEVMulExpr *>(S);
3028 if (Mul->getNoWrapFlags(OrigFlags) != OrigFlags)
3029 Mul->setNoWrapFlags(ComputeFlags(Ops));
3030 return S;
3033 if (const SCEVConstant *LHSC = dyn_cast<SCEVConstant>(Ops[0])) {
3034 if (Ops.size() == 2) {
3035 // C1*(C2+V) -> C1*C2 + C1*V
3036 if (const SCEVAddExpr *Add = dyn_cast<SCEVAddExpr>(Ops[1]))
3037 // If any of Add's ops are Adds or Muls with a constant, apply this
3038 // transformation as well.
3040 // TODO: There are some cases where this transformation is not
3041 // profitable; for example, Add = (C0 + X) * Y + Z. Maybe the scope of
3042 // this transformation should be narrowed down.
3043 if (Add->getNumOperands() == 2 && containsConstantInAddMulChain(Add))
3044 return getAddExpr(getMulExpr(LHSC, Add->getOperand(0),
3045 SCEV::FlagAnyWrap, Depth + 1),
3046 getMulExpr(LHSC, Add->getOperand(1),
3047 SCEV::FlagAnyWrap, Depth + 1),
3048 SCEV::FlagAnyWrap, Depth + 1);
3050 if (Ops[0]->isAllOnesValue()) {
3051 // If we have a mul by -1 of an add, try distributing the -1 among the
3052 // add operands.
3053 if (const SCEVAddExpr *Add = dyn_cast<SCEVAddExpr>(Ops[1])) {
3054 SmallVector<const SCEV *, 4> NewOps;
3055 bool AnyFolded = false;
3056 for (const SCEV *AddOp : Add->operands()) {
3057 const SCEV *Mul = getMulExpr(Ops[0], AddOp, SCEV::FlagAnyWrap,
3058 Depth + 1);
3059 if (!isa<SCEVMulExpr>(Mul)) AnyFolded = true;
3060 NewOps.push_back(Mul);
3062 if (AnyFolded)
3063 return getAddExpr(NewOps, SCEV::FlagAnyWrap, Depth + 1);
3064 } else if (const auto *AddRec = dyn_cast<SCEVAddRecExpr>(Ops[1])) {
3065 // Negation preserves a recurrence's no self-wrap property.
3066 SmallVector<const SCEV *, 4> Operands;
3067 for (const SCEV *AddRecOp : AddRec->operands())
3068 Operands.push_back(getMulExpr(Ops[0], AddRecOp, SCEV::FlagAnyWrap,
3069 Depth + 1));
3071 return getAddRecExpr(Operands, AddRec->getLoop(),
3072 AddRec->getNoWrapFlags(SCEV::FlagNW));
3078 // Skip over the add expression until we get to a multiply.
3079 while (Idx < Ops.size() && Ops[Idx]->getSCEVType() < scMulExpr)
3080 ++Idx;
3082 // If there are mul operands inline them all into this expression.
3083 if (Idx < Ops.size()) {
3084 bool DeletedMul = false;
3085 while (const SCEVMulExpr *Mul = dyn_cast<SCEVMulExpr>(Ops[Idx])) {
3086 if (Ops.size() > MulOpsInlineThreshold)
3087 break;
3088 // If we have an mul, expand the mul operands onto the end of the
3089 // operands list.
3090 Ops.erase(Ops.begin()+Idx);
3091 Ops.append(Mul->op_begin(), Mul->op_end());
3092 DeletedMul = true;
3095 // If we deleted at least one mul, we added operands to the end of the
3096 // list, and they are not necessarily sorted. Recurse to resort and
3097 // resimplify any operands we just acquired.
3098 if (DeletedMul)
3099 return getMulExpr(Ops, SCEV::FlagAnyWrap, Depth + 1);
3102 // If there are any add recurrences in the operands list, see if any other
3103 // added values are loop invariant. If so, we can fold them into the
3104 // recurrence.
3105 while (Idx < Ops.size() && Ops[Idx]->getSCEVType() < scAddRecExpr)
3106 ++Idx;
3108 // Scan over all recurrences, trying to fold loop invariants into them.
3109 for (; Idx < Ops.size() && isa<SCEVAddRecExpr>(Ops[Idx]); ++Idx) {
3110 // Scan all of the other operands to this mul and add them to the vector
3111 // if they are loop invariant w.r.t. the recurrence.
3112 SmallVector<const SCEV *, 8> LIOps;
3113 const SCEVAddRecExpr *AddRec = cast<SCEVAddRecExpr>(Ops[Idx]);
3114 const Loop *AddRecLoop = AddRec->getLoop();
3115 for (unsigned i = 0, e = Ops.size(); i != e; ++i)
3116 if (isAvailableAtLoopEntry(Ops[i], AddRecLoop)) {
3117 LIOps.push_back(Ops[i]);
3118 Ops.erase(Ops.begin()+i);
3119 --i; --e;
3122 // If we found some loop invariants, fold them into the recurrence.
3123 if (!LIOps.empty()) {
3124 // NLI * LI * {Start,+,Step} --> NLI * {LI*Start,+,LI*Step}
3125 SmallVector<const SCEV *, 4> NewOps;
3126 NewOps.reserve(AddRec->getNumOperands());
3127 const SCEV *Scale = getMulExpr(LIOps, SCEV::FlagAnyWrap, Depth + 1);
3128 for (unsigned i = 0, e = AddRec->getNumOperands(); i != e; ++i)
3129 NewOps.push_back(getMulExpr(Scale, AddRec->getOperand(i),
3130 SCEV::FlagAnyWrap, Depth + 1));
3132 // Build the new addrec. Propagate the NUW and NSW flags if both the
3133 // outer mul and the inner addrec are guaranteed to have no overflow.
3135 // No self-wrap cannot be guaranteed after changing the step size, but
3136 // will be inferred if either NUW or NSW is true.
3137 SCEV::NoWrapFlags Flags = ComputeFlags({Scale, AddRec});
3138 const SCEV *NewRec = getAddRecExpr(
3139 NewOps, AddRecLoop, AddRec->getNoWrapFlags(Flags));
3141 // If all of the other operands were loop invariant, we are done.
3142 if (Ops.size() == 1) return NewRec;
3144 // Otherwise, multiply the folded AddRec by the non-invariant parts.
3145 for (unsigned i = 0;; ++i)
3146 if (Ops[i] == AddRec) {
3147 Ops[i] = NewRec;
3148 break;
3150 return getMulExpr(Ops, SCEV::FlagAnyWrap, Depth + 1);
3153 // Okay, if there weren't any loop invariants to be folded, check to see
3154 // if there are multiple AddRec's with the same loop induction variable
3155 // being multiplied together. If so, we can fold them.
3157 // {A1,+,A2,+,...,+,An}<L> * {B1,+,B2,+,...,+,Bn}<L>
3158 // = {x=1 in [ sum y=x..2x [ sum z=max(y-x, y-n)..min(x,n) [
3159 // choose(x, 2x)*choose(2x-y, x-z)*A_{y-z}*B_z
3160 // ]]],+,...up to x=2n}.
3161 // Note that the arguments to choose() are always integers with values
3162 // known at compile time, never SCEV objects.
3164 // The implementation avoids pointless extra computations when the two
3165 // addrec's are of different length (mathematically, it's equivalent to
3166 // an infinite stream of zeros on the right).
3167 bool OpsModified = false;
3168 for (unsigned OtherIdx = Idx+1;
3169 OtherIdx != Ops.size() && isa<SCEVAddRecExpr>(Ops[OtherIdx]);
3170 ++OtherIdx) {
3171 const SCEVAddRecExpr *OtherAddRec =
3172 dyn_cast<SCEVAddRecExpr>(Ops[OtherIdx]);
3173 if (!OtherAddRec || OtherAddRec->getLoop() != AddRecLoop)
3174 continue;
3176 // Limit max number of arguments to avoid creation of unreasonably big
3177 // SCEVAddRecs with very complex operands.
3178 if (AddRec->getNumOperands() + OtherAddRec->getNumOperands() - 1 >
3179 MaxAddRecSize || hasHugeExpression({AddRec, OtherAddRec}))
3180 continue;
3182 bool Overflow = false;
3183 Type *Ty = AddRec->getType();
3184 bool LargerThan64Bits = getTypeSizeInBits(Ty) > 64;
3185 SmallVector<const SCEV*, 7> AddRecOps;
3186 for (int x = 0, xe = AddRec->getNumOperands() +
3187 OtherAddRec->getNumOperands() - 1; x != xe && !Overflow; ++x) {
3188 SmallVector <const SCEV *, 7> SumOps;
3189 for (int y = x, ye = 2*x+1; y != ye && !Overflow; ++y) {
3190 uint64_t Coeff1 = Choose(x, 2*x - y, Overflow);
3191 for (int z = std::max(y-x, y-(int)AddRec->getNumOperands()+1),
3192 ze = std::min(x+1, (int)OtherAddRec->getNumOperands());
3193 z < ze && !Overflow; ++z) {
3194 uint64_t Coeff2 = Choose(2*x - y, x-z, Overflow);
3195 uint64_t Coeff;
3196 if (LargerThan64Bits)
3197 Coeff = umul_ov(Coeff1, Coeff2, Overflow);
3198 else
3199 Coeff = Coeff1*Coeff2;
3200 const SCEV *CoeffTerm = getConstant(Ty, Coeff);
3201 const SCEV *Term1 = AddRec->getOperand(y-z);
3202 const SCEV *Term2 = OtherAddRec->getOperand(z);
3203 SumOps.push_back(getMulExpr(CoeffTerm, Term1, Term2,
3204 SCEV::FlagAnyWrap, Depth + 1));
3207 if (SumOps.empty())
3208 SumOps.push_back(getZero(Ty));
3209 AddRecOps.push_back(getAddExpr(SumOps, SCEV::FlagAnyWrap, Depth + 1));
3211 if (!Overflow) {
3212 const SCEV *NewAddRec = getAddRecExpr(AddRecOps, AddRecLoop,
3213 SCEV::FlagAnyWrap);
3214 if (Ops.size() == 2) return NewAddRec;
3215 Ops[Idx] = NewAddRec;
3216 Ops.erase(Ops.begin() + OtherIdx); --OtherIdx;
3217 OpsModified = true;
3218 AddRec = dyn_cast<SCEVAddRecExpr>(NewAddRec);
3219 if (!AddRec)
3220 break;
3223 if (OpsModified)
3224 return getMulExpr(Ops, SCEV::FlagAnyWrap, Depth + 1);
3226 // Otherwise couldn't fold anything into this recurrence. Move onto the
3227 // next one.
3230 // Okay, it looks like we really DO need an mul expr. Check to see if we
3231 // already have one, otherwise create a new one.
3232 return getOrCreateMulExpr(Ops, ComputeFlags(Ops));
3235 /// Represents an unsigned remainder expression based on unsigned division.
3236 const SCEV *ScalarEvolution::getURemExpr(const SCEV *LHS,
3237 const SCEV *RHS) {
3238 assert(getEffectiveSCEVType(LHS->getType()) ==
3239 getEffectiveSCEVType(RHS->getType()) &&
3240 "SCEVURemExpr operand types don't match!");
3242 // Short-circuit easy cases
3243 if (const SCEVConstant *RHSC = dyn_cast<SCEVConstant>(RHS)) {
3244 // If constant is one, the result is trivial
3245 if (RHSC->getValue()->isOne())
3246 return getZero(LHS->getType()); // X urem 1 --> 0
3248 // If constant is a power of two, fold into a zext(trunc(LHS)).
3249 if (RHSC->getAPInt().isPowerOf2()) {
3250 Type *FullTy = LHS->getType();
3251 Type *TruncTy =
3252 IntegerType::get(getContext(), RHSC->getAPInt().logBase2());
3253 return getZeroExtendExpr(getTruncateExpr(LHS, TruncTy), FullTy);
3257 // Fallback to %a == %x urem %y == %x -<nuw> ((%x udiv %y) *<nuw> %y)
3258 const SCEV *UDiv = getUDivExpr(LHS, RHS);
3259 const SCEV *Mult = getMulExpr(UDiv, RHS, SCEV::FlagNUW);
3260 return getMinusSCEV(LHS, Mult, SCEV::FlagNUW);
3263 /// Get a canonical unsigned division expression, or something simpler if
3264 /// possible.
3265 const SCEV *ScalarEvolution::getUDivExpr(const SCEV *LHS,
3266 const SCEV *RHS) {
3267 assert(!LHS->getType()->isPointerTy() &&
3268 "SCEVUDivExpr operand can't be pointer!");
3269 assert(LHS->getType() == RHS->getType() &&
3270 "SCEVUDivExpr operand types don't match!");
3272 FoldingSetNodeID ID;
3273 ID.AddInteger(scUDivExpr);
3274 ID.AddPointer(LHS);
3275 ID.AddPointer(RHS);
3276 void *IP = nullptr;
3277 if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP))
3278 return S;
3280 // 0 udiv Y == 0
3281 if (const SCEVConstant *LHSC = dyn_cast<SCEVConstant>(LHS))
3282 if (LHSC->getValue()->isZero())
3283 return LHS;
3285 if (const SCEVConstant *RHSC = dyn_cast<SCEVConstant>(RHS)) {
3286 if (RHSC->getValue()->isOne())
3287 return LHS; // X udiv 1 --> x
3288 // If the denominator is zero, the result of the udiv is undefined. Don't
3289 // try to analyze it, because the resolution chosen here may differ from
3290 // the resolution chosen in other parts of the compiler.
3291 if (!RHSC->getValue()->isZero()) {
3292 // Determine if the division can be folded into the operands of
3293 // its operands.
3294 // TODO: Generalize this to non-constants by using known-bits information.
3295 Type *Ty = LHS->getType();
3296 unsigned LZ = RHSC->getAPInt().countLeadingZeros();
3297 unsigned MaxShiftAmt = getTypeSizeInBits(Ty) - LZ - 1;
3298 // For non-power-of-two values, effectively round the value up to the
3299 // nearest power of two.
3300 if (!RHSC->getAPInt().isPowerOf2())
3301 ++MaxShiftAmt;
3302 IntegerType *ExtTy =
3303 IntegerType::get(getContext(), getTypeSizeInBits(Ty) + MaxShiftAmt);
3304 if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(LHS))
3305 if (const SCEVConstant *Step =
3306 dyn_cast<SCEVConstant>(AR->getStepRecurrence(*this))) {
3307 // {X,+,N}/C --> {X/C,+,N/C} if safe and N/C can be folded.
3308 const APInt &StepInt = Step->getAPInt();
3309 const APInt &DivInt = RHSC->getAPInt();
3310 if (!StepInt.urem(DivInt) &&
3311 getZeroExtendExpr(AR, ExtTy) ==
3312 getAddRecExpr(getZeroExtendExpr(AR->getStart(), ExtTy),
3313 getZeroExtendExpr(Step, ExtTy),
3314 AR->getLoop(), SCEV::FlagAnyWrap)) {
3315 SmallVector<const SCEV *, 4> Operands;
3316 for (const SCEV *Op : AR->operands())
3317 Operands.push_back(getUDivExpr(Op, RHS));
3318 return getAddRecExpr(Operands, AR->getLoop(), SCEV::FlagNW);
3320 /// Get a canonical UDivExpr for a recurrence.
3321 /// {X,+,N}/C => {Y,+,N}/C where Y=X-(X%N). Safe when C%N=0.
3322 // We can currently only fold X%N if X is constant.
3323 const SCEVConstant *StartC = dyn_cast<SCEVConstant>(AR->getStart());
3324 if (StartC && !DivInt.urem(StepInt) &&
3325 getZeroExtendExpr(AR, ExtTy) ==
3326 getAddRecExpr(getZeroExtendExpr(AR->getStart(), ExtTy),
3327 getZeroExtendExpr(Step, ExtTy),
3328 AR->getLoop(), SCEV::FlagAnyWrap)) {
3329 const APInt &StartInt = StartC->getAPInt();
3330 const APInt &StartRem = StartInt.urem(StepInt);
3331 if (StartRem != 0) {
3332 const SCEV *NewLHS =
3333 getAddRecExpr(getConstant(StartInt - StartRem), Step,
3334 AR->getLoop(), SCEV::FlagNW);
3335 if (LHS != NewLHS) {
3336 LHS = NewLHS;
3338 // Reset the ID to include the new LHS, and check if it is
3339 // already cached.
3340 ID.clear();
3341 ID.AddInteger(scUDivExpr);
3342 ID.AddPointer(LHS);
3343 ID.AddPointer(RHS);
3344 IP = nullptr;
3345 if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP))
3346 return S;
3351 // (A*B)/C --> A*(B/C) if safe and B/C can be folded.
3352 if (const SCEVMulExpr *M = dyn_cast<SCEVMulExpr>(LHS)) {
3353 SmallVector<const SCEV *, 4> Operands;
3354 for (const SCEV *Op : M->operands())
3355 Operands.push_back(getZeroExtendExpr(Op, ExtTy));
3356 if (getZeroExtendExpr(M, ExtTy) == getMulExpr(Operands))
3357 // Find an operand that's safely divisible.
3358 for (unsigned i = 0, e = M->getNumOperands(); i != e; ++i) {
3359 const SCEV *Op = M->getOperand(i);
3360 const SCEV *Div = getUDivExpr(Op, RHSC);
3361 if (!isa<SCEVUDivExpr>(Div) && getMulExpr(Div, RHSC) == Op) {
3362 Operands = SmallVector<const SCEV *, 4>(M->operands());
3363 Operands[i] = Div;
3364 return getMulExpr(Operands);
3369 // (A/B)/C --> A/(B*C) if safe and B*C can be folded.
3370 if (const SCEVUDivExpr *OtherDiv = dyn_cast<SCEVUDivExpr>(LHS)) {
3371 if (auto *DivisorConstant =
3372 dyn_cast<SCEVConstant>(OtherDiv->getRHS())) {
3373 bool Overflow = false;
3374 APInt NewRHS =
3375 DivisorConstant->getAPInt().umul_ov(RHSC->getAPInt(), Overflow);
3376 if (Overflow) {
3377 return getConstant(RHSC->getType(), 0, false);
3379 return getUDivExpr(OtherDiv->getLHS(), getConstant(NewRHS));
3383 // (A+B)/C --> (A/C + B/C) if safe and A/C and B/C can be folded.
3384 if (const SCEVAddExpr *A = dyn_cast<SCEVAddExpr>(LHS)) {
3385 SmallVector<const SCEV *, 4> Operands;
3386 for (const SCEV *Op : A->operands())
3387 Operands.push_back(getZeroExtendExpr(Op, ExtTy));
3388 if (getZeroExtendExpr(A, ExtTy) == getAddExpr(Operands)) {
3389 Operands.clear();
3390 for (unsigned i = 0, e = A->getNumOperands(); i != e; ++i) {
3391 const SCEV *Op = getUDivExpr(A->getOperand(i), RHS);
3392 if (isa<SCEVUDivExpr>(Op) ||
3393 getMulExpr(Op, RHS) != A->getOperand(i))
3394 break;
3395 Operands.push_back(Op);
3397 if (Operands.size() == A->getNumOperands())
3398 return getAddExpr(Operands);
3402 // Fold if both operands are constant.
3403 if (const SCEVConstant *LHSC = dyn_cast<SCEVConstant>(LHS)) {
3404 Constant *LHSCV = LHSC->getValue();
3405 Constant *RHSCV = RHSC->getValue();
3406 return getConstant(cast<ConstantInt>(ConstantExpr::getUDiv(LHSCV,
3407 RHSCV)));
3412 // The Insertion Point (IP) might be invalid by now (due to UniqueSCEVs
3413 // changes). Make sure we get a new one.
3414 IP = nullptr;
3415 if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S;
3416 SCEV *S = new (SCEVAllocator) SCEVUDivExpr(ID.Intern(SCEVAllocator),
3417 LHS, RHS);
3418 UniqueSCEVs.InsertNode(S, IP);
3419 addToLoopUseLists(S);
3420 return S;
3423 static const APInt gcd(const SCEVConstant *C1, const SCEVConstant *C2) {
3424 APInt A = C1->getAPInt().abs();
3425 APInt B = C2->getAPInt().abs();
3426 uint32_t ABW = A.getBitWidth();
3427 uint32_t BBW = B.getBitWidth();
3429 if (ABW > BBW)
3430 B = B.zext(ABW);
3431 else if (ABW < BBW)
3432 A = A.zext(BBW);
3434 return APIntOps::GreatestCommonDivisor(std::move(A), std::move(B));
3437 /// Get a canonical unsigned division expression, or something simpler if
3438 /// possible. There is no representation for an exact udiv in SCEV IR, but we
3439 /// can attempt to remove factors from the LHS and RHS. We can't do this when
3440 /// it's not exact because the udiv may be clearing bits.
3441 const SCEV *ScalarEvolution::getUDivExactExpr(const SCEV *LHS,
3442 const SCEV *RHS) {
3443 // TODO: we could try to find factors in all sorts of things, but for now we
3444 // just deal with u/exact (multiply, constant). See SCEVDivision towards the
3445 // end of this file for inspiration.
3447 const SCEVMulExpr *Mul = dyn_cast<SCEVMulExpr>(LHS);
3448 if (!Mul || !Mul->hasNoUnsignedWrap())
3449 return getUDivExpr(LHS, RHS);
3451 if (const SCEVConstant *RHSCst = dyn_cast<SCEVConstant>(RHS)) {
3452 // If the mulexpr multiplies by a constant, then that constant must be the
3453 // first element of the mulexpr.
3454 if (const auto *LHSCst = dyn_cast<SCEVConstant>(Mul->getOperand(0))) {
3455 if (LHSCst == RHSCst) {
3456 SmallVector<const SCEV *, 2> Operands(drop_begin(Mul->operands()));
3457 return getMulExpr(Operands);
3460 // We can't just assume that LHSCst divides RHSCst cleanly, it could be
3461 // that there's a factor provided by one of the other terms. We need to
3462 // check.
3463 APInt Factor = gcd(LHSCst, RHSCst);
3464 if (!Factor.isIntN(1)) {
3465 LHSCst =
3466 cast<SCEVConstant>(getConstant(LHSCst->getAPInt().udiv(Factor)));
3467 RHSCst =
3468 cast<SCEVConstant>(getConstant(RHSCst->getAPInt().udiv(Factor)));
3469 SmallVector<const SCEV *, 2> Operands;
3470 Operands.push_back(LHSCst);
3471 Operands.append(Mul->op_begin() + 1, Mul->op_end());
3472 LHS = getMulExpr(Operands);
3473 RHS = RHSCst;
3474 Mul = dyn_cast<SCEVMulExpr>(LHS);
3475 if (!Mul)
3476 return getUDivExactExpr(LHS, RHS);
3481 for (int i = 0, e = Mul->getNumOperands(); i != e; ++i) {
3482 if (Mul->getOperand(i) == RHS) {
3483 SmallVector<const SCEV *, 2> Operands;
3484 Operands.append(Mul->op_begin(), Mul->op_begin() + i);
3485 Operands.append(Mul->op_begin() + i + 1, Mul->op_end());
3486 return getMulExpr(Operands);
3490 return getUDivExpr(LHS, RHS);
3493 /// Get an add recurrence expression for the specified loop. Simplify the
3494 /// expression as much as possible.
3495 const SCEV *ScalarEvolution::getAddRecExpr(const SCEV *Start, const SCEV *Step,
3496 const Loop *L,
3497 SCEV::NoWrapFlags Flags) {
3498 SmallVector<const SCEV *, 4> Operands;
3499 Operands.push_back(Start);
3500 if (const SCEVAddRecExpr *StepChrec = dyn_cast<SCEVAddRecExpr>(Step))
3501 if (StepChrec->getLoop() == L) {
3502 Operands.append(StepChrec->op_begin(), StepChrec->op_end());
3503 return getAddRecExpr(Operands, L, maskFlags(Flags, SCEV::FlagNW));
3506 Operands.push_back(Step);
3507 return getAddRecExpr(Operands, L, Flags);
3510 /// Get an add recurrence expression for the specified loop. Simplify the
3511 /// expression as much as possible.
3512 const SCEV *
3513 ScalarEvolution::getAddRecExpr(SmallVectorImpl<const SCEV *> &Operands,
3514 const Loop *L, SCEV::NoWrapFlags Flags) {
3515 if (Operands.size() == 1) return Operands[0];
3516 #ifndef NDEBUG
3517 Type *ETy = getEffectiveSCEVType(Operands[0]->getType());
3518 for (unsigned i = 1, e = Operands.size(); i != e; ++i) {
3519 assert(getEffectiveSCEVType(Operands[i]->getType()) == ETy &&
3520 "SCEVAddRecExpr operand types don't match!");
3521 assert(!Operands[i]->getType()->isPointerTy() && "Step must be integer");
3523 for (unsigned i = 0, e = Operands.size(); i != e; ++i)
3524 assert(isLoopInvariant(Operands[i], L) &&
3525 "SCEVAddRecExpr operand is not loop-invariant!");
3526 #endif
3528 if (Operands.back()->isZero()) {
3529 Operands.pop_back();
3530 return getAddRecExpr(Operands, L, SCEV::FlagAnyWrap); // {X,+,0} --> X
3533 // It's tempting to want to call getConstantMaxBackedgeTakenCount count here and
3534 // use that information to infer NUW and NSW flags. However, computing a
3535 // BE count requires calling getAddRecExpr, so we may not yet have a
3536 // meaningful BE count at this point (and if we don't, we'd be stuck
3537 // with a SCEVCouldNotCompute as the cached BE count).
3539 Flags = StrengthenNoWrapFlags(this, scAddRecExpr, Operands, Flags);
3541 // Canonicalize nested AddRecs in by nesting them in order of loop depth.
3542 if (const SCEVAddRecExpr *NestedAR = dyn_cast<SCEVAddRecExpr>(Operands[0])) {
3543 const Loop *NestedLoop = NestedAR->getLoop();
3544 if (L->contains(NestedLoop)
3545 ? (L->getLoopDepth() < NestedLoop->getLoopDepth())
3546 : (!NestedLoop->contains(L) &&
3547 DT.dominates(L->getHeader(), NestedLoop->getHeader()))) {
3548 SmallVector<const SCEV *, 4> NestedOperands(NestedAR->operands());
3549 Operands[0] = NestedAR->getStart();
3550 // AddRecs require their operands be loop-invariant with respect to their
3551 // loops. Don't perform this transformation if it would break this
3552 // requirement.
3553 bool AllInvariant = all_of(
3554 Operands, [&](const SCEV *Op) { return isLoopInvariant(Op, L); });
3556 if (AllInvariant) {
3557 // Create a recurrence for the outer loop with the same step size.
3559 // The outer recurrence keeps its NW flag but only keeps NUW/NSW if the
3560 // inner recurrence has the same property.
3561 SCEV::NoWrapFlags OuterFlags =
3562 maskFlags(Flags, SCEV::FlagNW | NestedAR->getNoWrapFlags());
3564 NestedOperands[0] = getAddRecExpr(Operands, L, OuterFlags);
3565 AllInvariant = all_of(NestedOperands, [&](const SCEV *Op) {
3566 return isLoopInvariant(Op, NestedLoop);
3569 if (AllInvariant) {
3570 // Ok, both add recurrences are valid after the transformation.
3572 // The inner recurrence keeps its NW flag but only keeps NUW/NSW if
3573 // the outer recurrence has the same property.
3574 SCEV::NoWrapFlags InnerFlags =
3575 maskFlags(NestedAR->getNoWrapFlags(), SCEV::FlagNW | Flags);
3576 return getAddRecExpr(NestedOperands, NestedLoop, InnerFlags);
3579 // Reset Operands to its original state.
3580 Operands[0] = NestedAR;
3584 // Okay, it looks like we really DO need an addrec expr. Check to see if we
3585 // already have one, otherwise create a new one.
3586 return getOrCreateAddRecExpr(Operands, L, Flags);
3589 const SCEV *
3590 ScalarEvolution::getGEPExpr(GEPOperator *GEP,
3591 const SmallVectorImpl<const SCEV *> &IndexExprs) {
3592 const SCEV *BaseExpr = getSCEV(GEP->getPointerOperand());
3593 // getSCEV(Base)->getType() has the same address space as Base->getType()
3594 // because SCEV::getType() preserves the address space.
3595 Type *IntIdxTy = getEffectiveSCEVType(BaseExpr->getType());
3596 // FIXME(PR23527): Don't blindly transfer the inbounds flag from the GEP
3597 // instruction to its SCEV, because the Instruction may be guarded by control
3598 // flow and the no-overflow bits may not be valid for the expression in any
3599 // context. This can be fixed similarly to how these flags are handled for
3600 // adds.
3601 SCEV::NoWrapFlags OffsetWrap =
3602 GEP->isInBounds() ? SCEV::FlagNSW : SCEV::FlagAnyWrap;
3604 Type *CurTy = GEP->getType();
3605 bool FirstIter = true;
3606 SmallVector<const SCEV *, 4> Offsets;
3607 for (const SCEV *IndexExpr : IndexExprs) {
3608 // Compute the (potentially symbolic) offset in bytes for this index.
3609 if (StructType *STy = dyn_cast<StructType>(CurTy)) {
3610 // For a struct, add the member offset.
3611 ConstantInt *Index = cast<SCEVConstant>(IndexExpr)->getValue();
3612 unsigned FieldNo = Index->getZExtValue();
3613 const SCEV *FieldOffset = getOffsetOfExpr(IntIdxTy, STy, FieldNo);
3614 Offsets.push_back(FieldOffset);
3616 // Update CurTy to the type of the field at Index.
3617 CurTy = STy->getTypeAtIndex(Index);
3618 } else {
3619 // Update CurTy to its element type.
3620 if (FirstIter) {
3621 assert(isa<PointerType>(CurTy) &&
3622 "The first index of a GEP indexes a pointer");
3623 CurTy = GEP->getSourceElementType();
3624 FirstIter = false;
3625 } else {
3626 CurTy = GetElementPtrInst::getTypeAtIndex(CurTy, (uint64_t)0);
3628 // For an array, add the element offset, explicitly scaled.
3629 const SCEV *ElementSize = getSizeOfExpr(IntIdxTy, CurTy);
3630 // Getelementptr indices are signed.
3631 IndexExpr = getTruncateOrSignExtend(IndexExpr, IntIdxTy);
3633 // Multiply the index by the element size to compute the element offset.
3634 const SCEV *LocalOffset = getMulExpr(IndexExpr, ElementSize, OffsetWrap);
3635 Offsets.push_back(LocalOffset);
3639 // Handle degenerate case of GEP without offsets.
3640 if (Offsets.empty())
3641 return BaseExpr;
3643 // Add the offsets together, assuming nsw if inbounds.
3644 const SCEV *Offset = getAddExpr(Offsets, OffsetWrap);
3645 // Add the base address and the offset. We cannot use the nsw flag, as the
3646 // base address is unsigned. However, if we know that the offset is
3647 // non-negative, we can use nuw.
3648 SCEV::NoWrapFlags BaseWrap = GEP->isInBounds() && isKnownNonNegative(Offset)
3649 ? SCEV::FlagNUW : SCEV::FlagAnyWrap;
3650 auto *GEPExpr = getAddExpr(BaseExpr, Offset, BaseWrap);
3651 assert(BaseExpr->getType() == GEPExpr->getType() &&
3652 "GEP should not change type mid-flight.");
3653 return GEPExpr;
3656 std::tuple<SCEV *, FoldingSetNodeID, void *>
3657 ScalarEvolution::findExistingSCEVInCache(SCEVTypes SCEVType,
3658 ArrayRef<const SCEV *> Ops) {
3659 FoldingSetNodeID ID;
3660 void *IP = nullptr;
3661 ID.AddInteger(SCEVType);
3662 for (unsigned i = 0, e = Ops.size(); i != e; ++i)
3663 ID.AddPointer(Ops[i]);
3664 return std::tuple<SCEV *, FoldingSetNodeID, void *>(
3665 UniqueSCEVs.FindNodeOrInsertPos(ID, IP), std::move(ID), IP);
3668 const SCEV *ScalarEvolution::getAbsExpr(const SCEV *Op, bool IsNSW) {
3669 SCEV::NoWrapFlags Flags = IsNSW ? SCEV::FlagNSW : SCEV::FlagAnyWrap;
3670 return getSMaxExpr(Op, getNegativeSCEV(Op, Flags));
3673 const SCEV *ScalarEvolution::getMinMaxExpr(SCEVTypes Kind,
3674 SmallVectorImpl<const SCEV *> &Ops) {
3675 assert(!Ops.empty() && "Cannot get empty (u|s)(min|max)!");
3676 if (Ops.size() == 1) return Ops[0];
3677 #ifndef NDEBUG
3678 Type *ETy = getEffectiveSCEVType(Ops[0]->getType());
3679 for (unsigned i = 1, e = Ops.size(); i != e; ++i) {
3680 assert(getEffectiveSCEVType(Ops[i]->getType()) == ETy &&
3681 "Operand types don't match!");
3682 assert(Ops[0]->getType()->isPointerTy() ==
3683 Ops[i]->getType()->isPointerTy() &&
3684 "min/max should be consistently pointerish");
3686 #endif
3688 bool IsSigned = Kind == scSMaxExpr || Kind == scSMinExpr;
3689 bool IsMax = Kind == scSMaxExpr || Kind == scUMaxExpr;
3691 // Sort by complexity, this groups all similar expression types together.
3692 GroupByComplexity(Ops, &LI, DT);
3694 // Check if we have created the same expression before.
3695 if (const SCEV *S = std::get<0>(findExistingSCEVInCache(Kind, Ops))) {
3696 return S;
3699 // If there are any constants, fold them together.
3700 unsigned Idx = 0;
3701 if (const SCEVConstant *LHSC = dyn_cast<SCEVConstant>(Ops[0])) {
3702 ++Idx;
3703 assert(Idx < Ops.size());
3704 auto FoldOp = [&](const APInt &LHS, const APInt &RHS) {
3705 if (Kind == scSMaxExpr)
3706 return APIntOps::smax(LHS, RHS);
3707 else if (Kind == scSMinExpr)
3708 return APIntOps::smin(LHS, RHS);
3709 else if (Kind == scUMaxExpr)
3710 return APIntOps::umax(LHS, RHS);
3711 else if (Kind == scUMinExpr)
3712 return APIntOps::umin(LHS, RHS);
3713 llvm_unreachable("Unknown SCEV min/max opcode");
3716 while (const SCEVConstant *RHSC = dyn_cast<SCEVConstant>(Ops[Idx])) {
3717 // We found two constants, fold them together!
3718 ConstantInt *Fold = ConstantInt::get(
3719 getContext(), FoldOp(LHSC->getAPInt(), RHSC->getAPInt()));
3720 Ops[0] = getConstant(Fold);
3721 Ops.erase(Ops.begin()+1); // Erase the folded element
3722 if (Ops.size() == 1) return Ops[0];
3723 LHSC = cast<SCEVConstant>(Ops[0]);
3726 bool IsMinV = LHSC->getValue()->isMinValue(IsSigned);
3727 bool IsMaxV = LHSC->getValue()->isMaxValue(IsSigned);
3729 if (IsMax ? IsMinV : IsMaxV) {
3730 // If we are left with a constant minimum(/maximum)-int, strip it off.
3731 Ops.erase(Ops.begin());
3732 --Idx;
3733 } else if (IsMax ? IsMaxV : IsMinV) {
3734 // If we have a max(/min) with a constant maximum(/minimum)-int,
3735 // it will always be the extremum.
3736 return LHSC;
3739 if (Ops.size() == 1) return Ops[0];
3742 // Find the first operation of the same kind
3743 while (Idx < Ops.size() && Ops[Idx]->getSCEVType() < Kind)
3744 ++Idx;
3746 // Check to see if one of the operands is of the same kind. If so, expand its
3747 // operands onto our operand list, and recurse to simplify.
3748 if (Idx < Ops.size()) {
3749 bool DeletedAny = false;
3750 while (Ops[Idx]->getSCEVType() == Kind) {
3751 const SCEVMinMaxExpr *SMME = cast<SCEVMinMaxExpr>(Ops[Idx]);
3752 Ops.erase(Ops.begin()+Idx);
3753 Ops.append(SMME->op_begin(), SMME->op_end());
3754 DeletedAny = true;
3757 if (DeletedAny)
3758 return getMinMaxExpr(Kind, Ops);
3761 // Okay, check to see if the same value occurs in the operand list twice. If
3762 // so, delete one. Since we sorted the list, these values are required to
3763 // be adjacent.
3764 llvm::CmpInst::Predicate GEPred =
3765 IsSigned ? ICmpInst::ICMP_SGE : ICmpInst::ICMP_UGE;
3766 llvm::CmpInst::Predicate LEPred =
3767 IsSigned ? ICmpInst::ICMP_SLE : ICmpInst::ICMP_ULE;
3768 llvm::CmpInst::Predicate FirstPred = IsMax ? GEPred : LEPred;
3769 llvm::CmpInst::Predicate SecondPred = IsMax ? LEPred : GEPred;
3770 for (unsigned i = 0, e = Ops.size() - 1; i != e; ++i) {
3771 if (Ops[i] == Ops[i + 1] ||
3772 isKnownViaNonRecursiveReasoning(FirstPred, Ops[i], Ops[i + 1])) {
3773 // X op Y op Y --> X op Y
3774 // X op Y --> X, if we know X, Y are ordered appropriately
3775 Ops.erase(Ops.begin() + i + 1, Ops.begin() + i + 2);
3776 --i;
3777 --e;
3778 } else if (isKnownViaNonRecursiveReasoning(SecondPred, Ops[i],
3779 Ops[i + 1])) {
3780 // X op Y --> Y, if we know X, Y are ordered appropriately
3781 Ops.erase(Ops.begin() + i, Ops.begin() + i + 1);
3782 --i;
3783 --e;
3787 if (Ops.size() == 1) return Ops[0];
3789 assert(!Ops.empty() && "Reduced smax down to nothing!");
3791 // Okay, it looks like we really DO need an expr. Check to see if we
3792 // already have one, otherwise create a new one.
3793 const SCEV *ExistingSCEV;
3794 FoldingSetNodeID ID;
3795 void *IP;
3796 std::tie(ExistingSCEV, ID, IP) = findExistingSCEVInCache(Kind, Ops);
3797 if (ExistingSCEV)
3798 return ExistingSCEV;
3799 const SCEV **O = SCEVAllocator.Allocate<const SCEV *>(Ops.size());
3800 std::uninitialized_copy(Ops.begin(), Ops.end(), O);
3801 SCEV *S = new (SCEVAllocator)
3802 SCEVMinMaxExpr(ID.Intern(SCEVAllocator), Kind, O, Ops.size());
3804 UniqueSCEVs.InsertNode(S, IP);
3805 addToLoopUseLists(S);
3806 return S;
3809 const SCEV *ScalarEvolution::getSMaxExpr(const SCEV *LHS, const SCEV *RHS) {
3810 SmallVector<const SCEV *, 2> Ops = {LHS, RHS};
3811 return getSMaxExpr(Ops);
3814 const SCEV *ScalarEvolution::getSMaxExpr(SmallVectorImpl<const SCEV *> &Ops) {
3815 return getMinMaxExpr(scSMaxExpr, Ops);
3818 const SCEV *ScalarEvolution::getUMaxExpr(const SCEV *LHS, const SCEV *RHS) {
3819 SmallVector<const SCEV *, 2> Ops = {LHS, RHS};
3820 return getUMaxExpr(Ops);
3823 const SCEV *ScalarEvolution::getUMaxExpr(SmallVectorImpl<const SCEV *> &Ops) {
3824 return getMinMaxExpr(scUMaxExpr, Ops);
3827 const SCEV *ScalarEvolution::getSMinExpr(const SCEV *LHS,
3828 const SCEV *RHS) {
3829 SmallVector<const SCEV *, 2> Ops = { LHS, RHS };
3830 return getSMinExpr(Ops);
3833 const SCEV *ScalarEvolution::getSMinExpr(SmallVectorImpl<const SCEV *> &Ops) {
3834 return getMinMaxExpr(scSMinExpr, Ops);
3837 const SCEV *ScalarEvolution::getUMinExpr(const SCEV *LHS,
3838 const SCEV *RHS) {
3839 SmallVector<const SCEV *, 2> Ops = { LHS, RHS };
3840 return getUMinExpr(Ops);
3843 const SCEV *ScalarEvolution::getUMinExpr(SmallVectorImpl<const SCEV *> &Ops) {
3844 return getMinMaxExpr(scUMinExpr, Ops);
3847 const SCEV *
3848 ScalarEvolution::getSizeOfScalableVectorExpr(Type *IntTy,
3849 ScalableVectorType *ScalableTy) {
3850 Constant *NullPtr = Constant::getNullValue(ScalableTy->getPointerTo());
3851 Constant *One = ConstantInt::get(IntTy, 1);
3852 Constant *GEP = ConstantExpr::getGetElementPtr(ScalableTy, NullPtr, One);
3853 // Note that the expression we created is the final expression, we don't
3854 // want to simplify it any further Also, if we call a normal getSCEV(),
3855 // we'll end up in an endless recursion. So just create an SCEVUnknown.
3856 return getUnknown(ConstantExpr::getPtrToInt(GEP, IntTy));
3859 const SCEV *ScalarEvolution::getSizeOfExpr(Type *IntTy, Type *AllocTy) {
3860 if (auto *ScalableAllocTy = dyn_cast<ScalableVectorType>(AllocTy))
3861 return getSizeOfScalableVectorExpr(IntTy, ScalableAllocTy);
3862 // We can bypass creating a target-independent constant expression and then
3863 // folding it back into a ConstantInt. This is just a compile-time
3864 // optimization.
3865 return getConstant(IntTy, getDataLayout().getTypeAllocSize(AllocTy));
3868 const SCEV *ScalarEvolution::getStoreSizeOfExpr(Type *IntTy, Type *StoreTy) {
3869 if (auto *ScalableStoreTy = dyn_cast<ScalableVectorType>(StoreTy))
3870 return getSizeOfScalableVectorExpr(IntTy, ScalableStoreTy);
3871 // We can bypass creating a target-independent constant expression and then
3872 // folding it back into a ConstantInt. This is just a compile-time
3873 // optimization.
3874 return getConstant(IntTy, getDataLayout().getTypeStoreSize(StoreTy));
3877 const SCEV *ScalarEvolution::getOffsetOfExpr(Type *IntTy,
3878 StructType *STy,
3879 unsigned FieldNo) {
3880 // We can bypass creating a target-independent constant expression and then
3881 // folding it back into a ConstantInt. This is just a compile-time
3882 // optimization.
3883 return getConstant(
3884 IntTy, getDataLayout().getStructLayout(STy)->getElementOffset(FieldNo));
3887 const SCEV *ScalarEvolution::getUnknown(Value *V) {
3888 // Don't attempt to do anything other than create a SCEVUnknown object
3889 // here. createSCEV only calls getUnknown after checking for all other
3890 // interesting possibilities, and any other code that calls getUnknown
3891 // is doing so in order to hide a value from SCEV canonicalization.
3893 FoldingSetNodeID ID;
3894 ID.AddInteger(scUnknown);
3895 ID.AddPointer(V);
3896 void *IP = nullptr;
3897 if (SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) {
3898 assert(cast<SCEVUnknown>(S)->getValue() == V &&
3899 "Stale SCEVUnknown in uniquing map!");
3900 return S;
3902 SCEV *S = new (SCEVAllocator) SCEVUnknown(ID.Intern(SCEVAllocator), V, this,
3903 FirstUnknown);
3904 FirstUnknown = cast<SCEVUnknown>(S);
3905 UniqueSCEVs.InsertNode(S, IP);
3906 return S;
3909 //===----------------------------------------------------------------------===//
3910 // Basic SCEV Analysis and PHI Idiom Recognition Code
3913 /// Test if values of the given type are analyzable within the SCEV
3914 /// framework. This primarily includes integer types, and it can optionally
3915 /// include pointer types if the ScalarEvolution class has access to
3916 /// target-specific information.
3917 bool ScalarEvolution::isSCEVable(Type *Ty) const {
3918 // Integers and pointers are always SCEVable.
3919 return Ty->isIntOrPtrTy();
3922 /// Return the size in bits of the specified type, for which isSCEVable must
3923 /// return true.
3924 uint64_t ScalarEvolution::getTypeSizeInBits(Type *Ty) const {
3925 assert(isSCEVable(Ty) && "Type is not SCEVable!");
3926 if (Ty->isPointerTy())
3927 return getDataLayout().getIndexTypeSizeInBits(Ty);
3928 return getDataLayout().getTypeSizeInBits(Ty);
3931 /// Return a type with the same bitwidth as the given type and which represents
3932 /// how SCEV will treat the given type, for which isSCEVable must return
3933 /// true. For pointer types, this is the pointer index sized integer type.
3934 Type *ScalarEvolution::getEffectiveSCEVType(Type *Ty) const {
3935 assert(isSCEVable(Ty) && "Type is not SCEVable!");
3937 if (Ty->isIntegerTy())
3938 return Ty;
3940 // The only other support type is pointer.
3941 assert(Ty->isPointerTy() && "Unexpected non-pointer non-integer type!");
3942 return getDataLayout().getIndexType(Ty);
3945 Type *ScalarEvolution::getWiderType(Type *T1, Type *T2) const {
3946 return getTypeSizeInBits(T1) >= getTypeSizeInBits(T2) ? T1 : T2;
3949 const SCEV *ScalarEvolution::getCouldNotCompute() {
3950 return CouldNotCompute.get();
3953 bool ScalarEvolution::checkValidity(const SCEV *S) const {
3954 bool ContainsNulls = SCEVExprContains(S, [](const SCEV *S) {
3955 auto *SU = dyn_cast<SCEVUnknown>(S);
3956 return SU && SU->getValue() == nullptr;
3959 return !ContainsNulls;
3962 bool ScalarEvolution::containsAddRecurrence(const SCEV *S) {
3963 HasRecMapType::iterator I = HasRecMap.find(S);
3964 if (I != HasRecMap.end())
3965 return I->second;
3967 bool FoundAddRec =
3968 SCEVExprContains(S, [](const SCEV *S) { return isa<SCEVAddRecExpr>(S); });
3969 HasRecMap.insert({S, FoundAddRec});
3970 return FoundAddRec;
3973 /// Try to split a SCEVAddExpr into a pair of {SCEV, ConstantInt}.
3974 /// If \p S is a SCEVAddExpr and is composed of a sub SCEV S' and an
3975 /// offset I, then return {S', I}, else return {\p S, nullptr}.
3976 static std::pair<const SCEV *, ConstantInt *> splitAddExpr(const SCEV *S) {
3977 const auto *Add = dyn_cast<SCEVAddExpr>(S);
3978 if (!Add)
3979 return {S, nullptr};
3981 if (Add->getNumOperands() != 2)
3982 return {S, nullptr};
3984 auto *ConstOp = dyn_cast<SCEVConstant>(Add->getOperand(0));
3985 if (!ConstOp)
3986 return {S, nullptr};
3988 return {Add->getOperand(1), ConstOp->getValue()};
3991 /// Return the ValueOffsetPair set for \p S. \p S can be represented
3992 /// by the value and offset from any ValueOffsetPair in the set.
3993 ScalarEvolution::ValueOffsetPairSetVector *
3994 ScalarEvolution::getSCEVValues(const SCEV *S) {
3995 ExprValueMapType::iterator SI = ExprValueMap.find_as(S);
3996 if (SI == ExprValueMap.end())
3997 return nullptr;
3998 #ifndef NDEBUG
3999 if (VerifySCEVMap) {
4000 // Check there is no dangling Value in the set returned.
4001 for (const auto &VE : SI->second)
4002 assert(ValueExprMap.count(VE.first));
4004 #endif
4005 return &SI->second;
4008 /// Erase Value from ValueExprMap and ExprValueMap. ValueExprMap.erase(V)
4009 /// cannot be used separately. eraseValueFromMap should be used to remove
4010 /// V from ValueExprMap and ExprValueMap at the same time.
4011 void ScalarEvolution::eraseValueFromMap(Value *V) {
4012 ValueExprMapType::iterator I = ValueExprMap.find_as(V);
4013 if (I != ValueExprMap.end()) {
4014 const SCEV *S = I->second;
4015 // Remove {V, 0} from the set of ExprValueMap[S]
4016 if (auto *SV = getSCEVValues(S))
4017 SV->remove({V, nullptr});
4019 // Remove {V, Offset} from the set of ExprValueMap[Stripped]
4020 const SCEV *Stripped;
4021 ConstantInt *Offset;
4022 std::tie(Stripped, Offset) = splitAddExpr(S);
4023 if (Offset != nullptr) {
4024 if (auto *SV = getSCEVValues(Stripped))
4025 SV->remove({V, Offset});
4027 ValueExprMap.erase(V);
4031 /// Check whether value has nuw/nsw/exact set but SCEV does not.
4032 /// TODO: In reality it is better to check the poison recursively
4033 /// but this is better than nothing.
4034 static bool SCEVLostPoisonFlags(const SCEV *S, const Value *V) {
4035 if (auto *I = dyn_cast<Instruction>(V)) {
4036 if (isa<OverflowingBinaryOperator>(I)) {
4037 if (auto *NS = dyn_cast<SCEVNAryExpr>(S)) {
4038 if (I->hasNoSignedWrap() && !NS->hasNoSignedWrap())
4039 return true;
4040 if (I->hasNoUnsignedWrap() && !NS->hasNoUnsignedWrap())
4041 return true;
4043 } else if (isa<PossiblyExactOperator>(I) && I->isExact())
4044 return true;
4046 return false;
4049 /// Return an existing SCEV if it exists, otherwise analyze the expression and
4050 /// create a new one.
4051 const SCEV *ScalarEvolution::getSCEV(Value *V) {
4052 assert(isSCEVable(V->getType()) && "Value is not SCEVable!");
4054 const SCEV *S = getExistingSCEV(V);
4055 if (S == nullptr) {
4056 S = createSCEV(V);
4057 // During PHI resolution, it is possible to create two SCEVs for the same
4058 // V, so it is needed to double check whether V->S is inserted into
4059 // ValueExprMap before insert S->{V, 0} into ExprValueMap.
4060 std::pair<ValueExprMapType::iterator, bool> Pair =
4061 ValueExprMap.insert({SCEVCallbackVH(V, this), S});
4062 if (Pair.second && !SCEVLostPoisonFlags(S, V)) {
4063 ExprValueMap[S].insert({V, nullptr});
4065 // If S == Stripped + Offset, add Stripped -> {V, Offset} into
4066 // ExprValueMap.
4067 const SCEV *Stripped = S;
4068 ConstantInt *Offset = nullptr;
4069 std::tie(Stripped, Offset) = splitAddExpr(S);
4070 // If stripped is SCEVUnknown, don't bother to save
4071 // Stripped -> {V, offset}. It doesn't simplify and sometimes even
4072 // increase the complexity of the expansion code.
4073 // If V is GetElementPtrInst, don't save Stripped -> {V, offset}
4074 // because it may generate add/sub instead of GEP in SCEV expansion.
4075 if (Offset != nullptr && !isa<SCEVUnknown>(Stripped) &&
4076 !isa<GetElementPtrInst>(V))
4077 ExprValueMap[Stripped].insert({V, Offset});
4080 return S;
4083 const SCEV *ScalarEvolution::getExistingSCEV(Value *V) {
4084 assert(isSCEVable(V->getType()) && "Value is not SCEVable!");
4086 ValueExprMapType::iterator I = ValueExprMap.find_as(V);
4087 if (I != ValueExprMap.end()) {
4088 const SCEV *S = I->second;
4089 if (checkValidity(S))
4090 return S;
4091 eraseValueFromMap(V);
4092 forgetMemoizedResults(S);
4094 return nullptr;
4097 /// Return a SCEV corresponding to -V = -1*V
4098 const SCEV *ScalarEvolution::getNegativeSCEV(const SCEV *V,
4099 SCEV::NoWrapFlags Flags) {
4100 if (const SCEVConstant *VC = dyn_cast<SCEVConstant>(V))
4101 return getConstant(
4102 cast<ConstantInt>(ConstantExpr::getNeg(VC->getValue())));
4104 Type *Ty = V->getType();
4105 Ty = getEffectiveSCEVType(Ty);
4106 return getMulExpr(V, getMinusOne(Ty), Flags);
4109 /// If Expr computes ~A, return A else return nullptr
4110 static const SCEV *MatchNotExpr(const SCEV *Expr) {
4111 const SCEVAddExpr *Add = dyn_cast<SCEVAddExpr>(Expr);
4112 if (!Add || Add->getNumOperands() != 2 ||
4113 !Add->getOperand(0)->isAllOnesValue())
4114 return nullptr;
4116 const SCEVMulExpr *AddRHS = dyn_cast<SCEVMulExpr>(Add->getOperand(1));
4117 if (!AddRHS || AddRHS->getNumOperands() != 2 ||
4118 !AddRHS->getOperand(0)->isAllOnesValue())
4119 return nullptr;
4121 return AddRHS->getOperand(1);
4124 /// Return a SCEV corresponding to ~V = -1-V
4125 const SCEV *ScalarEvolution::getNotSCEV(const SCEV *V) {
4126 if (const SCEVConstant *VC = dyn_cast<SCEVConstant>(V))
4127 return getConstant(
4128 cast<ConstantInt>(ConstantExpr::getNot(VC->getValue())));
4130 // Fold ~(u|s)(min|max)(~x, ~y) to (u|s)(max|min)(x, y)
4131 if (const SCEVMinMaxExpr *MME = dyn_cast<SCEVMinMaxExpr>(V)) {
4132 auto MatchMinMaxNegation = [&](const SCEVMinMaxExpr *MME) {
4133 SmallVector<const SCEV *, 2> MatchedOperands;
4134 for (const SCEV *Operand : MME->operands()) {
4135 const SCEV *Matched = MatchNotExpr(Operand);
4136 if (!Matched)
4137 return (const SCEV *)nullptr;
4138 MatchedOperands.push_back(Matched);
4140 return getMinMaxExpr(SCEVMinMaxExpr::negate(MME->getSCEVType()),
4141 MatchedOperands);
4143 if (const SCEV *Replaced = MatchMinMaxNegation(MME))
4144 return Replaced;
4147 Type *Ty = V->getType();
4148 Ty = getEffectiveSCEVType(Ty);
4149 return getMinusSCEV(getMinusOne(Ty), V);
4152 /// Compute an expression equivalent to S - getPointerBase(S).
4153 static const SCEV *removePointerBase(ScalarEvolution *SE, const SCEV *P) {
4154 assert(P->getType()->isPointerTy());
4156 if (auto *AddRec = dyn_cast<SCEVAddRecExpr>(P)) {
4157 // The base of an AddRec is the first operand.
4158 SmallVector<const SCEV *> Ops{AddRec->operands()};
4159 Ops[0] = removePointerBase(SE, Ops[0]);
4160 // Don't try to transfer nowrap flags for now. We could in some cases
4161 // (for example, if pointer operand of the AddRec is a SCEVUnknown).
4162 return SE->getAddRecExpr(Ops, AddRec->getLoop(), SCEV::FlagAnyWrap);
4164 if (auto *Add = dyn_cast<SCEVAddExpr>(P)) {
4165 // The base of an Add is the pointer operand.
4166 SmallVector<const SCEV *> Ops{Add->operands()};
4167 const SCEV **PtrOp = nullptr;
4168 for (const SCEV *&AddOp : Ops) {
4169 if (AddOp->getType()->isPointerTy()) {
4170 // If we find an Add with multiple pointer operands, treat it as a
4171 // pointer base to be consistent with getPointerBase. Eventually
4172 // we should be able to assert this is impossible.
4173 if (PtrOp)
4174 return SE->getZero(P->getType());
4175 PtrOp = &AddOp;
4178 *PtrOp = removePointerBase(SE, *PtrOp);
4179 // Don't try to transfer nowrap flags for now. We could in some cases
4180 // (for example, if the pointer operand of the Add is a SCEVUnknown).
4181 return SE->getAddExpr(Ops);
4183 // Any other expression must be a pointer base.
4184 return SE->getZero(P->getType());
4187 const SCEV *ScalarEvolution::getMinusSCEV(const SCEV *LHS, const SCEV *RHS,
4188 SCEV::NoWrapFlags Flags,
4189 unsigned Depth) {
4190 // Fast path: X - X --> 0.
4191 if (LHS == RHS)
4192 return getZero(LHS->getType());
4194 // If we subtract two pointers with different pointer bases, bail.
4195 // Eventually, we're going to add an assertion to getMulExpr that we
4196 // can't multiply by a pointer.
4197 if (RHS->getType()->isPointerTy()) {
4198 if (!LHS->getType()->isPointerTy() ||
4199 getPointerBase(LHS) != getPointerBase(RHS))
4200 return getCouldNotCompute();
4201 LHS = removePointerBase(this, LHS);
4202 RHS = removePointerBase(this, RHS);
4205 // We represent LHS - RHS as LHS + (-1)*RHS. This transformation
4206 // makes it so that we cannot make much use of NUW.
4207 auto AddFlags = SCEV::FlagAnyWrap;
4208 const bool RHSIsNotMinSigned =
4209 !getSignedRangeMin(RHS).isMinSignedValue();
4210 if (maskFlags(Flags, SCEV::FlagNSW) == SCEV::FlagNSW) {
4211 // Let M be the minimum representable signed value. Then (-1)*RHS
4212 // signed-wraps if and only if RHS is M. That can happen even for
4213 // a NSW subtraction because e.g. (-1)*M signed-wraps even though
4214 // -1 - M does not. So to transfer NSW from LHS - RHS to LHS +
4215 // (-1)*RHS, we need to prove that RHS != M.
4217 // If LHS is non-negative and we know that LHS - RHS does not
4218 // signed-wrap, then RHS cannot be M. So we can rule out signed-wrap
4219 // either by proving that RHS > M or that LHS >= 0.
4220 if (RHSIsNotMinSigned || isKnownNonNegative(LHS)) {
4221 AddFlags = SCEV::FlagNSW;
4225 // FIXME: Find a correct way to transfer NSW to (-1)*M when LHS -
4226 // RHS is NSW and LHS >= 0.
4228 // The difficulty here is that the NSW flag may have been proven
4229 // relative to a loop that is to be found in a recurrence in LHS and
4230 // not in RHS. Applying NSW to (-1)*M may then let the NSW have a
4231 // larger scope than intended.
4232 auto NegFlags = RHSIsNotMinSigned ? SCEV::FlagNSW : SCEV::FlagAnyWrap;
4234 return getAddExpr(LHS, getNegativeSCEV(RHS, NegFlags), AddFlags, Depth);
4237 const SCEV *ScalarEvolution::getTruncateOrZeroExtend(const SCEV *V, Type *Ty,
4238 unsigned Depth) {
4239 Type *SrcTy = V->getType();
4240 assert(SrcTy->isIntOrPtrTy() && Ty->isIntOrPtrTy() &&
4241 "Cannot truncate or zero extend with non-integer arguments!");
4242 if (getTypeSizeInBits(SrcTy) == getTypeSizeInBits(Ty))
4243 return V; // No conversion
4244 if (getTypeSizeInBits(SrcTy) > getTypeSizeInBits(Ty))
4245 return getTruncateExpr(V, Ty, Depth);
4246 return getZeroExtendExpr(V, Ty, Depth);
4249 const SCEV *ScalarEvolution::getTruncateOrSignExtend(const SCEV *V, Type *Ty,
4250 unsigned Depth) {
4251 Type *SrcTy = V->getType();
4252 assert(SrcTy->isIntOrPtrTy() && Ty->isIntOrPtrTy() &&
4253 "Cannot truncate or zero extend with non-integer arguments!");
4254 if (getTypeSizeInBits(SrcTy) == getTypeSizeInBits(Ty))
4255 return V; // No conversion
4256 if (getTypeSizeInBits(SrcTy) > getTypeSizeInBits(Ty))
4257 return getTruncateExpr(V, Ty, Depth);
4258 return getSignExtendExpr(V, Ty, Depth);
4261 const SCEV *
4262 ScalarEvolution::getNoopOrZeroExtend(const SCEV *V, Type *Ty) {
4263 Type *SrcTy = V->getType();
4264 assert(SrcTy->isIntOrPtrTy() && Ty->isIntOrPtrTy() &&
4265 "Cannot noop or zero extend with non-integer arguments!");
4266 assert(getTypeSizeInBits(SrcTy) <= getTypeSizeInBits(Ty) &&
4267 "getNoopOrZeroExtend cannot truncate!");
4268 if (getTypeSizeInBits(SrcTy) == getTypeSizeInBits(Ty))
4269 return V; // No conversion
4270 return getZeroExtendExpr(V, Ty);
4273 const SCEV *
4274 ScalarEvolution::getNoopOrSignExtend(const SCEV *V, Type *Ty) {
4275 Type *SrcTy = V->getType();
4276 assert(SrcTy->isIntOrPtrTy() && Ty->isIntOrPtrTy() &&
4277 "Cannot noop or sign extend with non-integer arguments!");
4278 assert(getTypeSizeInBits(SrcTy) <= getTypeSizeInBits(Ty) &&
4279 "getNoopOrSignExtend cannot truncate!");
4280 if (getTypeSizeInBits(SrcTy) == getTypeSizeInBits(Ty))
4281 return V; // No conversion
4282 return getSignExtendExpr(V, Ty);
4285 const SCEV *
4286 ScalarEvolution::getNoopOrAnyExtend(const SCEV *V, Type *Ty) {
4287 Type *SrcTy = V->getType();
4288 assert(SrcTy->isIntOrPtrTy() && Ty->isIntOrPtrTy() &&
4289 "Cannot noop or any extend with non-integer arguments!");
4290 assert(getTypeSizeInBits(SrcTy) <= getTypeSizeInBits(Ty) &&
4291 "getNoopOrAnyExtend cannot truncate!");
4292 if (getTypeSizeInBits(SrcTy) == getTypeSizeInBits(Ty))
4293 return V; // No conversion
4294 return getAnyExtendExpr(V, Ty);
4297 const SCEV *
4298 ScalarEvolution::getTruncateOrNoop(const SCEV *V, Type *Ty) {
4299 Type *SrcTy = V->getType();
4300 assert(SrcTy->isIntOrPtrTy() && Ty->isIntOrPtrTy() &&
4301 "Cannot truncate or noop with non-integer arguments!");
4302 assert(getTypeSizeInBits(SrcTy) >= getTypeSizeInBits(Ty) &&
4303 "getTruncateOrNoop cannot extend!");
4304 if (getTypeSizeInBits(SrcTy) == getTypeSizeInBits(Ty))
4305 return V; // No conversion
4306 return getTruncateExpr(V, Ty);
4309 const SCEV *ScalarEvolution::getUMaxFromMismatchedTypes(const SCEV *LHS,
4310 const SCEV *RHS) {
4311 const SCEV *PromotedLHS = LHS;
4312 const SCEV *PromotedRHS = RHS;
4314 if (getTypeSizeInBits(LHS->getType()) > getTypeSizeInBits(RHS->getType()))
4315 PromotedRHS = getZeroExtendExpr(RHS, LHS->getType());
4316 else
4317 PromotedLHS = getNoopOrZeroExtend(LHS, RHS->getType());
4319 return getUMaxExpr(PromotedLHS, PromotedRHS);
4322 const SCEV *ScalarEvolution::getUMinFromMismatchedTypes(const SCEV *LHS,
4323 const SCEV *RHS) {
4324 SmallVector<const SCEV *, 2> Ops = { LHS, RHS };
4325 return getUMinFromMismatchedTypes(Ops);
4328 const SCEV *ScalarEvolution::getUMinFromMismatchedTypes(
4329 SmallVectorImpl<const SCEV *> &Ops) {
4330 assert(!Ops.empty() && "At least one operand must be!");
4331 // Trivial case.
4332 if (Ops.size() == 1)
4333 return Ops[0];
4335 // Find the max type first.
4336 Type *MaxType = nullptr;
4337 for (auto *S : Ops)
4338 if (MaxType)
4339 MaxType = getWiderType(MaxType, S->getType());
4340 else
4341 MaxType = S->getType();
4342 assert(MaxType && "Failed to find maximum type!");
4344 // Extend all ops to max type.
4345 SmallVector<const SCEV *, 2> PromotedOps;
4346 for (auto *S : Ops)
4347 PromotedOps.push_back(getNoopOrZeroExtend(S, MaxType));
4349 // Generate umin.
4350 return getUMinExpr(PromotedOps);
4353 const SCEV *ScalarEvolution::getPointerBase(const SCEV *V) {
4354 // A pointer operand may evaluate to a nonpointer expression, such as null.
4355 if (!V->getType()->isPointerTy())
4356 return V;
4358 while (true) {
4359 if (auto *AddRec = dyn_cast<SCEVAddRecExpr>(V)) {
4360 V = AddRec->getStart();
4361 } else if (auto *Add = dyn_cast<SCEVAddExpr>(V)) {
4362 const SCEV *PtrOp = nullptr;
4363 for (const SCEV *AddOp : Add->operands()) {
4364 if (AddOp->getType()->isPointerTy()) {
4365 // Cannot find the base of an expression with multiple pointer ops.
4366 if (PtrOp)
4367 return V;
4368 PtrOp = AddOp;
4371 if (!PtrOp) // All operands were non-pointer.
4372 return V;
4373 V = PtrOp;
4374 } else // Not something we can look further into.
4375 return V;
4379 /// Push users of the given Instruction onto the given Worklist.
4380 static void
4381 PushDefUseChildren(Instruction *I,
4382 SmallVectorImpl<Instruction *> &Worklist) {
4383 // Push the def-use children onto the Worklist stack.
4384 for (User *U : I->users())
4385 Worklist.push_back(cast<Instruction>(U));
4388 void ScalarEvolution::forgetSymbolicName(Instruction *PN, const SCEV *SymName) {
4389 SmallVector<Instruction *, 16> Worklist;
4390 PushDefUseChildren(PN, Worklist);
4392 SmallPtrSet<Instruction *, 8> Visited;
4393 Visited.insert(PN);
4394 while (!Worklist.empty()) {
4395 Instruction *I = Worklist.pop_back_val();
4396 if (!Visited.insert(I).second)
4397 continue;
4399 auto It = ValueExprMap.find_as(static_cast<Value *>(I));
4400 if (It != ValueExprMap.end()) {
4401 const SCEV *Old = It->second;
4403 // Short-circuit the def-use traversal if the symbolic name
4404 // ceases to appear in expressions.
4405 if (Old != SymName && !hasOperand(Old, SymName))
4406 continue;
4408 // SCEVUnknown for a PHI either means that it has an unrecognized
4409 // structure, it's a PHI that's in the progress of being computed
4410 // by createNodeForPHI, or it's a single-value PHI. In the first case,
4411 // additional loop trip count information isn't going to change anything.
4412 // In the second case, createNodeForPHI will perform the necessary
4413 // updates on its own when it gets to that point. In the third, we do
4414 // want to forget the SCEVUnknown.
4415 if (!isa<PHINode>(I) ||
4416 !isa<SCEVUnknown>(Old) ||
4417 (I != PN && Old == SymName)) {
4418 eraseValueFromMap(It->first);
4419 forgetMemoizedResults(Old);
4423 PushDefUseChildren(I, Worklist);
4427 namespace {
4429 /// Takes SCEV S and Loop L. For each AddRec sub-expression, use its start
4430 /// expression in case its Loop is L. If it is not L then
4431 /// if IgnoreOtherLoops is true then use AddRec itself
4432 /// otherwise rewrite cannot be done.
4433 /// If SCEV contains non-invariant unknown SCEV rewrite cannot be done.
4434 class SCEVInitRewriter : public SCEVRewriteVisitor<SCEVInitRewriter> {
4435 public:
4436 static const SCEV *rewrite(const SCEV *S, const Loop *L, ScalarEvolution &SE,
4437 bool IgnoreOtherLoops = true) {
4438 SCEVInitRewriter Rewriter(L, SE);
4439 const SCEV *Result = Rewriter.visit(S);
4440 if (Rewriter.hasSeenLoopVariantSCEVUnknown())
4441 return SE.getCouldNotCompute();
4442 return Rewriter.hasSeenOtherLoops() && !IgnoreOtherLoops
4443 ? SE.getCouldNotCompute()
4444 : Result;
4447 const SCEV *visitUnknown(const SCEVUnknown *Expr) {
4448 if (!SE.isLoopInvariant(Expr, L))
4449 SeenLoopVariantSCEVUnknown = true;
4450 return Expr;
4453 const SCEV *visitAddRecExpr(const SCEVAddRecExpr *Expr) {
4454 // Only re-write AddRecExprs for this loop.
4455 if (Expr->getLoop() == L)
4456 return Expr->getStart();
4457 SeenOtherLoops = true;
4458 return Expr;
4461 bool hasSeenLoopVariantSCEVUnknown() { return SeenLoopVariantSCEVUnknown; }
4463 bool hasSeenOtherLoops() { return SeenOtherLoops; }
4465 private:
4466 explicit SCEVInitRewriter(const Loop *L, ScalarEvolution &SE)
4467 : SCEVRewriteVisitor(SE), L(L) {}
4469 const Loop *L;
4470 bool SeenLoopVariantSCEVUnknown = false;
4471 bool SeenOtherLoops = false;
4474 /// Takes SCEV S and Loop L. For each AddRec sub-expression, use its post
4475 /// increment expression in case its Loop is L. If it is not L then
4476 /// use AddRec itself.
4477 /// If SCEV contains non-invariant unknown SCEV rewrite cannot be done.
4478 class SCEVPostIncRewriter : public SCEVRewriteVisitor<SCEVPostIncRewriter> {
4479 public:
4480 static const SCEV *rewrite(const SCEV *S, const Loop *L, ScalarEvolution &SE) {
4481 SCEVPostIncRewriter Rewriter(L, SE);
4482 const SCEV *Result = Rewriter.visit(S);
4483 return Rewriter.hasSeenLoopVariantSCEVUnknown()
4484 ? SE.getCouldNotCompute()
4485 : Result;
4488 const SCEV *visitUnknown(const SCEVUnknown *Expr) {
4489 if (!SE.isLoopInvariant(Expr, L))
4490 SeenLoopVariantSCEVUnknown = true;
4491 return Expr;
4494 const SCEV *visitAddRecExpr(const SCEVAddRecExpr *Expr) {
4495 // Only re-write AddRecExprs for this loop.
4496 if (Expr->getLoop() == L)
4497 return Expr->getPostIncExpr(SE);
4498 SeenOtherLoops = true;
4499 return Expr;
4502 bool hasSeenLoopVariantSCEVUnknown() { return SeenLoopVariantSCEVUnknown; }
4504 bool hasSeenOtherLoops() { return SeenOtherLoops; }
4506 private:
4507 explicit SCEVPostIncRewriter(const Loop *L, ScalarEvolution &SE)
4508 : SCEVRewriteVisitor(SE), L(L) {}
4510 const Loop *L;
4511 bool SeenLoopVariantSCEVUnknown = false;
4512 bool SeenOtherLoops = false;
4515 /// This class evaluates the compare condition by matching it against the
4516 /// condition of loop latch. If there is a match we assume a true value
4517 /// for the condition while building SCEV nodes.
4518 class SCEVBackedgeConditionFolder
4519 : public SCEVRewriteVisitor<SCEVBackedgeConditionFolder> {
4520 public:
4521 static const SCEV *rewrite(const SCEV *S, const Loop *L,
4522 ScalarEvolution &SE) {
4523 bool IsPosBECond = false;
4524 Value *BECond = nullptr;
4525 if (BasicBlock *Latch = L->getLoopLatch()) {
4526 BranchInst *BI = dyn_cast<BranchInst>(Latch->getTerminator());
4527 if (BI && BI->isConditional()) {
4528 assert(BI->getSuccessor(0) != BI->getSuccessor(1) &&
4529 "Both outgoing branches should not target same header!");
4530 BECond = BI->getCondition();
4531 IsPosBECond = BI->getSuccessor(0) == L->getHeader();
4532 } else {
4533 return S;
4536 SCEVBackedgeConditionFolder Rewriter(L, BECond, IsPosBECond, SE);
4537 return Rewriter.visit(S);
4540 const SCEV *visitUnknown(const SCEVUnknown *Expr) {
4541 const SCEV *Result = Expr;
4542 bool InvariantF = SE.isLoopInvariant(Expr, L);
4544 if (!InvariantF) {
4545 Instruction *I = cast<Instruction>(Expr->getValue());
4546 switch (I->getOpcode()) {
4547 case Instruction::Select: {
4548 SelectInst *SI = cast<SelectInst>(I);
4549 Optional<const SCEV *> Res =
4550 compareWithBackedgeCondition(SI->getCondition());
4551 if (Res.hasValue()) {
4552 bool IsOne = cast<SCEVConstant>(Res.getValue())->getValue()->isOne();
4553 Result = SE.getSCEV(IsOne ? SI->getTrueValue() : SI->getFalseValue());
4555 break;
4557 default: {
4558 Optional<const SCEV *> Res = compareWithBackedgeCondition(I);
4559 if (Res.hasValue())
4560 Result = Res.getValue();
4561 break;
4565 return Result;
4568 private:
4569 explicit SCEVBackedgeConditionFolder(const Loop *L, Value *BECond,
4570 bool IsPosBECond, ScalarEvolution &SE)
4571 : SCEVRewriteVisitor(SE), L(L), BackedgeCond(BECond),
4572 IsPositiveBECond(IsPosBECond) {}
4574 Optional<const SCEV *> compareWithBackedgeCondition(Value *IC);
4576 const Loop *L;
4577 /// Loop back condition.
4578 Value *BackedgeCond = nullptr;
4579 /// Set to true if loop back is on positive branch condition.
4580 bool IsPositiveBECond;
4583 Optional<const SCEV *>
4584 SCEVBackedgeConditionFolder::compareWithBackedgeCondition(Value *IC) {
4586 // If value matches the backedge condition for loop latch,
4587 // then return a constant evolution node based on loopback
4588 // branch taken.
4589 if (BackedgeCond == IC)
4590 return IsPositiveBECond ? SE.getOne(Type::getInt1Ty(SE.getContext()))
4591 : SE.getZero(Type::getInt1Ty(SE.getContext()));
4592 return None;
4595 class SCEVShiftRewriter : public SCEVRewriteVisitor<SCEVShiftRewriter> {
4596 public:
4597 static const SCEV *rewrite(const SCEV *S, const Loop *L,
4598 ScalarEvolution &SE) {
4599 SCEVShiftRewriter Rewriter(L, SE);
4600 const SCEV *Result = Rewriter.visit(S);
4601 return Rewriter.isValid() ? Result : SE.getCouldNotCompute();
4604 const SCEV *visitUnknown(const SCEVUnknown *Expr) {
4605 // Only allow AddRecExprs for this loop.
4606 if (!SE.isLoopInvariant(Expr, L))
4607 Valid = false;
4608 return Expr;
4611 const SCEV *visitAddRecExpr(const SCEVAddRecExpr *Expr) {
4612 if (Expr->getLoop() == L && Expr->isAffine())
4613 return SE.getMinusSCEV(Expr, Expr->getStepRecurrence(SE));
4614 Valid = false;
4615 return Expr;
4618 bool isValid() { return Valid; }
4620 private:
4621 explicit SCEVShiftRewriter(const Loop *L, ScalarEvolution &SE)
4622 : SCEVRewriteVisitor(SE), L(L) {}
4624 const Loop *L;
4625 bool Valid = true;
4628 } // end anonymous namespace
4630 SCEV::NoWrapFlags
4631 ScalarEvolution::proveNoWrapViaConstantRanges(const SCEVAddRecExpr *AR) {
4632 if (!AR->isAffine())
4633 return SCEV::FlagAnyWrap;
4635 using OBO = OverflowingBinaryOperator;
4637 SCEV::NoWrapFlags Result = SCEV::FlagAnyWrap;
4639 if (!AR->hasNoSignedWrap()) {
4640 ConstantRange AddRecRange = getSignedRange(AR);
4641 ConstantRange IncRange = getSignedRange(AR->getStepRecurrence(*this));
4643 auto NSWRegion = ConstantRange::makeGuaranteedNoWrapRegion(
4644 Instruction::Add, IncRange, OBO::NoSignedWrap);
4645 if (NSWRegion.contains(AddRecRange))
4646 Result = ScalarEvolution::setFlags(Result, SCEV::FlagNSW);
4649 if (!AR->hasNoUnsignedWrap()) {
4650 ConstantRange AddRecRange = getUnsignedRange(AR);
4651 ConstantRange IncRange = getUnsignedRange(AR->getStepRecurrence(*this));
4653 auto NUWRegion = ConstantRange::makeGuaranteedNoWrapRegion(
4654 Instruction::Add, IncRange, OBO::NoUnsignedWrap);
4655 if (NUWRegion.contains(AddRecRange))
4656 Result = ScalarEvolution::setFlags(Result, SCEV::FlagNUW);
4659 return Result;
4662 SCEV::NoWrapFlags
4663 ScalarEvolution::proveNoSignedWrapViaInduction(const SCEVAddRecExpr *AR) {
4664 SCEV::NoWrapFlags Result = AR->getNoWrapFlags();
4666 if (AR->hasNoSignedWrap())
4667 return Result;
4669 if (!AR->isAffine())
4670 return Result;
4672 const SCEV *Step = AR->getStepRecurrence(*this);
4673 const Loop *L = AR->getLoop();
4675 // Check whether the backedge-taken count is SCEVCouldNotCompute.
4676 // Note that this serves two purposes: It filters out loops that are
4677 // simply not analyzable, and it covers the case where this code is
4678 // being called from within backedge-taken count analysis, such that
4679 // attempting to ask for the backedge-taken count would likely result
4680 // in infinite recursion. In the later case, the analysis code will
4681 // cope with a conservative value, and it will take care to purge
4682 // that value once it has finished.
4683 const SCEV *MaxBECount = getConstantMaxBackedgeTakenCount(L);
4685 // Normally, in the cases we can prove no-overflow via a
4686 // backedge guarding condition, we can also compute a backedge
4687 // taken count for the loop. The exceptions are assumptions and
4688 // guards present in the loop -- SCEV is not great at exploiting
4689 // these to compute max backedge taken counts, but can still use
4690 // these to prove lack of overflow. Use this fact to avoid
4691 // doing extra work that may not pay off.
4693 if (isa<SCEVCouldNotCompute>(MaxBECount) && !HasGuards &&
4694 AC.assumptions().empty())
4695 return Result;
4697 // If the backedge is guarded by a comparison with the pre-inc value the
4698 // addrec is safe. Also, if the entry is guarded by a comparison with the
4699 // start value and the backedge is guarded by a comparison with the post-inc
4700 // value, the addrec is safe.
4701 ICmpInst::Predicate Pred;
4702 const SCEV *OverflowLimit =
4703 getSignedOverflowLimitForStep(Step, &Pred, this);
4704 if (OverflowLimit &&
4705 (isLoopBackedgeGuardedByCond(L, Pred, AR, OverflowLimit) ||
4706 isKnownOnEveryIteration(Pred, AR, OverflowLimit))) {
4707 Result = setFlags(Result, SCEV::FlagNSW);
4709 return Result;
4711 SCEV::NoWrapFlags
4712 ScalarEvolution::proveNoUnsignedWrapViaInduction(const SCEVAddRecExpr *AR) {
4713 SCEV::NoWrapFlags Result = AR->getNoWrapFlags();
4715 if (AR->hasNoUnsignedWrap())
4716 return Result;
4718 if (!AR->isAffine())
4719 return Result;
4721 const SCEV *Step = AR->getStepRecurrence(*this);
4722 unsigned BitWidth = getTypeSizeInBits(AR->getType());
4723 const Loop *L = AR->getLoop();
4725 // Check whether the backedge-taken count is SCEVCouldNotCompute.
4726 // Note that this serves two purposes: It filters out loops that are
4727 // simply not analyzable, and it covers the case where this code is
4728 // being called from within backedge-taken count analysis, such that
4729 // attempting to ask for the backedge-taken count would likely result
4730 // in infinite recursion. In the later case, the analysis code will
4731 // cope with a conservative value, and it will take care to purge
4732 // that value once it has finished.
4733 const SCEV *MaxBECount = getConstantMaxBackedgeTakenCount(L);
4735 // Normally, in the cases we can prove no-overflow via a
4736 // backedge guarding condition, we can also compute a backedge
4737 // taken count for the loop. The exceptions are assumptions and
4738 // guards present in the loop -- SCEV is not great at exploiting
4739 // these to compute max backedge taken counts, but can still use
4740 // these to prove lack of overflow. Use this fact to avoid
4741 // doing extra work that may not pay off.
4743 if (isa<SCEVCouldNotCompute>(MaxBECount) && !HasGuards &&
4744 AC.assumptions().empty())
4745 return Result;
4747 // If the backedge is guarded by a comparison with the pre-inc value the
4748 // addrec is safe. Also, if the entry is guarded by a comparison with the
4749 // start value and the backedge is guarded by a comparison with the post-inc
4750 // value, the addrec is safe.
4751 if (isKnownPositive(Step)) {
4752 const SCEV *N = getConstant(APInt::getMinValue(BitWidth) -
4753 getUnsignedRangeMax(Step));
4754 if (isLoopBackedgeGuardedByCond(L, ICmpInst::ICMP_ULT, AR, N) ||
4755 isKnownOnEveryIteration(ICmpInst::ICMP_ULT, AR, N)) {
4756 Result = setFlags(Result, SCEV::FlagNUW);
4760 return Result;
4763 namespace {
4765 /// Represents an abstract binary operation. This may exist as a
4766 /// normal instruction or constant expression, or may have been
4767 /// derived from an expression tree.
4768 struct BinaryOp {
4769 unsigned Opcode;
4770 Value *LHS;
4771 Value *RHS;
4772 bool IsNSW = false;
4773 bool IsNUW = false;
4775 /// Op is set if this BinaryOp corresponds to a concrete LLVM instruction or
4776 /// constant expression.
4777 Operator *Op = nullptr;
4779 explicit BinaryOp(Operator *Op)
4780 : Opcode(Op->getOpcode()), LHS(Op->getOperand(0)), RHS(Op->getOperand(1)),
4781 Op(Op) {
4782 if (auto *OBO = dyn_cast<OverflowingBinaryOperator>(Op)) {
4783 IsNSW = OBO->hasNoSignedWrap();
4784 IsNUW = OBO->hasNoUnsignedWrap();
4788 explicit BinaryOp(unsigned Opcode, Value *LHS, Value *RHS, bool IsNSW = false,
4789 bool IsNUW = false)
4790 : Opcode(Opcode), LHS(LHS), RHS(RHS), IsNSW(IsNSW), IsNUW(IsNUW) {}
4793 } // end anonymous namespace
4795 /// Try to map \p V into a BinaryOp, and return \c None on failure.
4796 static Optional<BinaryOp> MatchBinaryOp(Value *V, DominatorTree &DT) {
4797 auto *Op = dyn_cast<Operator>(V);
4798 if (!Op)
4799 return None;
4801 // Implementation detail: all the cleverness here should happen without
4802 // creating new SCEV expressions -- our caller knowns tricks to avoid creating
4803 // SCEV expressions when possible, and we should not break that.
4805 switch (Op->getOpcode()) {
4806 case Instruction::Add:
4807 case Instruction::Sub:
4808 case Instruction::Mul:
4809 case Instruction::UDiv:
4810 case Instruction::URem:
4811 case Instruction::And:
4812 case Instruction::Or:
4813 case Instruction::AShr:
4814 case Instruction::Shl:
4815 return BinaryOp(Op);
4817 case Instruction::Xor:
4818 if (auto *RHSC = dyn_cast<ConstantInt>(Op->getOperand(1)))
4819 // If the RHS of the xor is a signmask, then this is just an add.
4820 // Instcombine turns add of signmask into xor as a strength reduction step.
4821 if (RHSC->getValue().isSignMask())
4822 return BinaryOp(Instruction::Add, Op->getOperand(0), Op->getOperand(1));
4823 return BinaryOp(Op);
4825 case Instruction::LShr:
4826 // Turn logical shift right of a constant into a unsigned divide.
4827 if (ConstantInt *SA = dyn_cast<ConstantInt>(Op->getOperand(1))) {
4828 uint32_t BitWidth = cast<IntegerType>(Op->getType())->getBitWidth();
4830 // If the shift count is not less than the bitwidth, the result of
4831 // the shift is undefined. Don't try to analyze it, because the
4832 // resolution chosen here may differ from the resolution chosen in
4833 // other parts of the compiler.
4834 if (SA->getValue().ult(BitWidth)) {
4835 Constant *X =
4836 ConstantInt::get(SA->getContext(),
4837 APInt::getOneBitSet(BitWidth, SA->getZExtValue()));
4838 return BinaryOp(Instruction::UDiv, Op->getOperand(0), X);
4841 return BinaryOp(Op);
4843 case Instruction::ExtractValue: {
4844 auto *EVI = cast<ExtractValueInst>(Op);
4845 if (EVI->getNumIndices() != 1 || EVI->getIndices()[0] != 0)
4846 break;
4848 auto *WO = dyn_cast<WithOverflowInst>(EVI->getAggregateOperand());
4849 if (!WO)
4850 break;
4852 Instruction::BinaryOps BinOp = WO->getBinaryOp();
4853 bool Signed = WO->isSigned();
4854 // TODO: Should add nuw/nsw flags for mul as well.
4855 if (BinOp == Instruction::Mul || !isOverflowIntrinsicNoWrap(WO, DT))
4856 return BinaryOp(BinOp, WO->getLHS(), WO->getRHS());
4858 // Now that we know that all uses of the arithmetic-result component of
4859 // CI are guarded by the overflow check, we can go ahead and pretend
4860 // that the arithmetic is non-overflowing.
4861 return BinaryOp(BinOp, WO->getLHS(), WO->getRHS(),
4862 /* IsNSW = */ Signed, /* IsNUW = */ !Signed);
4865 default:
4866 break;
4869 // Recognise intrinsic loop.decrement.reg, and as this has exactly the same
4870 // semantics as a Sub, return a binary sub expression.
4871 if (auto *II = dyn_cast<IntrinsicInst>(V))
4872 if (II->getIntrinsicID() == Intrinsic::loop_decrement_reg)
4873 return BinaryOp(Instruction::Sub, II->getOperand(0), II->getOperand(1));
4875 return None;
4878 /// Helper function to createAddRecFromPHIWithCasts. We have a phi
4879 /// node whose symbolic (unknown) SCEV is \p SymbolicPHI, which is updated via
4880 /// the loop backedge by a SCEVAddExpr, possibly also with a few casts on the
4881 /// way. This function checks if \p Op, an operand of this SCEVAddExpr,
4882 /// follows one of the following patterns:
4883 /// Op == (SExt ix (Trunc iy (%SymbolicPHI) to ix) to iy)
4884 /// Op == (ZExt ix (Trunc iy (%SymbolicPHI) to ix) to iy)
4885 /// If the SCEV expression of \p Op conforms with one of the expected patterns
4886 /// we return the type of the truncation operation, and indicate whether the
4887 /// truncated type should be treated as signed/unsigned by setting
4888 /// \p Signed to true/false, respectively.
4889 static Type *isSimpleCastedPHI(const SCEV *Op, const SCEVUnknown *SymbolicPHI,
4890 bool &Signed, ScalarEvolution &SE) {
4891 // The case where Op == SymbolicPHI (that is, with no type conversions on
4892 // the way) is handled by the regular add recurrence creating logic and
4893 // would have already been triggered in createAddRecForPHI. Reaching it here
4894 // means that createAddRecFromPHI had failed for this PHI before (e.g.,
4895 // because one of the other operands of the SCEVAddExpr updating this PHI is
4896 // not invariant).
4898 // Here we look for the case where Op = (ext(trunc(SymbolicPHI))), and in
4899 // this case predicates that allow us to prove that Op == SymbolicPHI will
4900 // be added.
4901 if (Op == SymbolicPHI)
4902 return nullptr;
4904 unsigned SourceBits = SE.getTypeSizeInBits(SymbolicPHI->getType());
4905 unsigned NewBits = SE.getTypeSizeInBits(Op->getType());
4906 if (SourceBits != NewBits)
4907 return nullptr;
4909 const SCEVSignExtendExpr *SExt = dyn_cast<SCEVSignExtendExpr>(Op);
4910 const SCEVZeroExtendExpr *ZExt = dyn_cast<SCEVZeroExtendExpr>(Op);
4911 if (!SExt && !ZExt)
4912 return nullptr;
4913 const SCEVTruncateExpr *Trunc =
4914 SExt ? dyn_cast<SCEVTruncateExpr>(SExt->getOperand())
4915 : dyn_cast<SCEVTruncateExpr>(ZExt->getOperand());
4916 if (!Trunc)
4917 return nullptr;
4918 const SCEV *X = Trunc->getOperand();
4919 if (X != SymbolicPHI)
4920 return nullptr;
4921 Signed = SExt != nullptr;
4922 return Trunc->getType();
4925 static const Loop *isIntegerLoopHeaderPHI(const PHINode *PN, LoopInfo &LI) {
4926 if (!PN->getType()->isIntegerTy())
4927 return nullptr;
4928 const Loop *L = LI.getLoopFor(PN->getParent());
4929 if (!L || L->getHeader() != PN->getParent())
4930 return nullptr;
4931 return L;
4934 // Analyze \p SymbolicPHI, a SCEV expression of a phi node, and check if the
4935 // computation that updates the phi follows the following pattern:
4936 // (SExt/ZExt ix (Trunc iy (%SymbolicPHI) to ix) to iy) + InvariantAccum
4937 // which correspond to a phi->trunc->sext/zext->add->phi update chain.
4938 // If so, try to see if it can be rewritten as an AddRecExpr under some
4939 // Predicates. If successful, return them as a pair. Also cache the results
4940 // of the analysis.
4942 // Example usage scenario:
4943 // Say the Rewriter is called for the following SCEV:
4944 // 8 * ((sext i32 (trunc i64 %X to i32) to i64) + %Step)
4945 // where:
4946 // %X = phi i64 (%Start, %BEValue)
4947 // It will visitMul->visitAdd->visitSExt->visitTrunc->visitUnknown(%X),
4948 // and call this function with %SymbolicPHI = %X.
4950 // The analysis will find that the value coming around the backedge has
4951 // the following SCEV:
4952 // BEValue = ((sext i32 (trunc i64 %X to i32) to i64) + %Step)
4953 // Upon concluding that this matches the desired pattern, the function
4954 // will return the pair {NewAddRec, SmallPredsVec} where:
4955 // NewAddRec = {%Start,+,%Step}
4956 // SmallPredsVec = {P1, P2, P3} as follows:
4957 // P1(WrapPred): AR: {trunc(%Start),+,(trunc %Step)}<nsw> Flags: <nssw>
4958 // P2(EqualPred): %Start == (sext i32 (trunc i64 %Start to i32) to i64)
4959 // P3(EqualPred): %Step == (sext i32 (trunc i64 %Step to i32) to i64)
4960 // The returned pair means that SymbolicPHI can be rewritten into NewAddRec
4961 // under the predicates {P1,P2,P3}.
4962 // This predicated rewrite will be cached in PredicatedSCEVRewrites:
4963 // PredicatedSCEVRewrites[{%X,L}] = {NewAddRec, {P1,P2,P3)}
4965 // TODO's:
4967 // 1) Extend the Induction descriptor to also support inductions that involve
4968 // casts: When needed (namely, when we are called in the context of the
4969 // vectorizer induction analysis), a Set of cast instructions will be
4970 // populated by this method, and provided back to isInductionPHI. This is
4971 // needed to allow the vectorizer to properly record them to be ignored by
4972 // the cost model and to avoid vectorizing them (otherwise these casts,
4973 // which are redundant under the runtime overflow checks, will be
4974 // vectorized, which can be costly).
4976 // 2) Support additional induction/PHISCEV patterns: We also want to support
4977 // inductions where the sext-trunc / zext-trunc operations (partly) occur
4978 // after the induction update operation (the induction increment):
4980 // (Trunc iy (SExt/ZExt ix (%SymbolicPHI + InvariantAccum) to iy) to ix)
4981 // which correspond to a phi->add->trunc->sext/zext->phi update chain.
4983 // (Trunc iy ((SExt/ZExt ix (%SymbolicPhi) to iy) + InvariantAccum) to ix)
4984 // which correspond to a phi->trunc->add->sext/zext->phi update chain.
4986 // 3) Outline common code with createAddRecFromPHI to avoid duplication.
4987 Optional<std::pair<const SCEV *, SmallVector<const SCEVPredicate *, 3>>>
4988 ScalarEvolution::createAddRecFromPHIWithCastsImpl(const SCEVUnknown *SymbolicPHI) {
4989 SmallVector<const SCEVPredicate *, 3> Predicates;
4991 // *** Part1: Analyze if we have a phi-with-cast pattern for which we can
4992 // return an AddRec expression under some predicate.
4994 auto *PN = cast<PHINode>(SymbolicPHI->getValue());
4995 const Loop *L = isIntegerLoopHeaderPHI(PN, LI);
4996 assert(L && "Expecting an integer loop header phi");
4998 // The loop may have multiple entrances or multiple exits; we can analyze
4999 // this phi as an addrec if it has a unique entry value and a unique
5000 // backedge value.
5001 Value *BEValueV = nullptr, *StartValueV = nullptr;
5002 for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) {
5003 Value *V = PN->getIncomingValue(i);
5004 if (L->contains(PN->getIncomingBlock(i))) {
5005 if (!BEValueV) {
5006 BEValueV = V;
5007 } else if (BEValueV != V) {
5008 BEValueV = nullptr;
5009 break;
5011 } else if (!StartValueV) {
5012 StartValueV = V;
5013 } else if (StartValueV != V) {
5014 StartValueV = nullptr;
5015 break;
5018 if (!BEValueV || !StartValueV)
5019 return None;
5021 const SCEV *BEValue = getSCEV(BEValueV);
5023 // If the value coming around the backedge is an add with the symbolic
5024 // value we just inserted, possibly with casts that we can ignore under
5025 // an appropriate runtime guard, then we found a simple induction variable!
5026 const auto *Add = dyn_cast<SCEVAddExpr>(BEValue);
5027 if (!Add)
5028 return None;
5030 // If there is a single occurrence of the symbolic value, possibly
5031 // casted, replace it with a recurrence.
5032 unsigned FoundIndex = Add->getNumOperands();
5033 Type *TruncTy = nullptr;
5034 bool Signed;
5035 for (unsigned i = 0, e = Add->getNumOperands(); i != e; ++i)
5036 if ((TruncTy =
5037 isSimpleCastedPHI(Add->getOperand(i), SymbolicPHI, Signed, *this)))
5038 if (FoundIndex == e) {
5039 FoundIndex = i;
5040 break;
5043 if (FoundIndex == Add->getNumOperands())
5044 return None;
5046 // Create an add with everything but the specified operand.
5047 SmallVector<const SCEV *, 8> Ops;
5048 for (unsigned i = 0, e = Add->getNumOperands(); i != e; ++i)
5049 if (i != FoundIndex)
5050 Ops.push_back(Add->getOperand(i));
5051 const SCEV *Accum = getAddExpr(Ops);
5053 // The runtime checks will not be valid if the step amount is
5054 // varying inside the loop.
5055 if (!isLoopInvariant(Accum, L))
5056 return None;
5058 // *** Part2: Create the predicates
5060 // Analysis was successful: we have a phi-with-cast pattern for which we
5061 // can return an AddRec expression under the following predicates:
5063 // P1: A Wrap predicate that guarantees that Trunc(Start) + i*Trunc(Accum)
5064 // fits within the truncated type (does not overflow) for i = 0 to n-1.
5065 // P2: An Equal predicate that guarantees that
5066 // Start = (Ext ix (Trunc iy (Start) to ix) to iy)
5067 // P3: An Equal predicate that guarantees that
5068 // Accum = (Ext ix (Trunc iy (Accum) to ix) to iy)
5070 // As we next prove, the above predicates guarantee that:
5071 // Start + i*Accum = (Ext ix (Trunc iy ( Start + i*Accum ) to ix) to iy)
5074 // More formally, we want to prove that:
5075 // Expr(i+1) = Start + (i+1) * Accum
5076 // = (Ext ix (Trunc iy (Expr(i)) to ix) to iy) + Accum
5078 // Given that:
5079 // 1) Expr(0) = Start
5080 // 2) Expr(1) = Start + Accum
5081 // = (Ext ix (Trunc iy (Start) to ix) to iy) + Accum :: from P2
5082 // 3) Induction hypothesis (step i):
5083 // Expr(i) = (Ext ix (Trunc iy (Expr(i-1)) to ix) to iy) + Accum
5085 // Proof:
5086 // Expr(i+1) =
5087 // = Start + (i+1)*Accum
5088 // = (Start + i*Accum) + Accum
5089 // = Expr(i) + Accum
5090 // = (Ext ix (Trunc iy (Expr(i-1)) to ix) to iy) + Accum + Accum
5091 // :: from step i
5093 // = (Ext ix (Trunc iy (Start + (i-1)*Accum) to ix) to iy) + Accum + Accum
5095 // = (Ext ix (Trunc iy (Start + (i-1)*Accum) to ix) to iy)
5096 // + (Ext ix (Trunc iy (Accum) to ix) to iy)
5097 // + Accum :: from P3
5099 // = (Ext ix (Trunc iy ((Start + (i-1)*Accum) + Accum) to ix) to iy)
5100 // + Accum :: from P1: Ext(x)+Ext(y)=>Ext(x+y)
5102 // = (Ext ix (Trunc iy (Start + i*Accum) to ix) to iy) + Accum
5103 // = (Ext ix (Trunc iy (Expr(i)) to ix) to iy) + Accum
5105 // By induction, the same applies to all iterations 1<=i<n:
5108 // Create a truncated addrec for which we will add a no overflow check (P1).
5109 const SCEV *StartVal = getSCEV(StartValueV);
5110 const SCEV *PHISCEV =
5111 getAddRecExpr(getTruncateExpr(StartVal, TruncTy),
5112 getTruncateExpr(Accum, TruncTy), L, SCEV::FlagAnyWrap);
5114 // PHISCEV can be either a SCEVConstant or a SCEVAddRecExpr.
5115 // ex: If truncated Accum is 0 and StartVal is a constant, then PHISCEV
5116 // will be constant.
5118 // If PHISCEV is a constant, then P1 degenerates into P2 or P3, so we don't
5119 // add P1.
5120 if (const auto *AR = dyn_cast<SCEVAddRecExpr>(PHISCEV)) {
5121 SCEVWrapPredicate::IncrementWrapFlags AddedFlags =
5122 Signed ? SCEVWrapPredicate::IncrementNSSW
5123 : SCEVWrapPredicate::IncrementNUSW;
5124 const SCEVPredicate *AddRecPred = getWrapPredicate(AR, AddedFlags);
5125 Predicates.push_back(AddRecPred);
5128 // Create the Equal Predicates P2,P3:
5130 // It is possible that the predicates P2 and/or P3 are computable at
5131 // compile time due to StartVal and/or Accum being constants.
5132 // If either one is, then we can check that now and escape if either P2
5133 // or P3 is false.
5135 // Construct the extended SCEV: (Ext ix (Trunc iy (Expr) to ix) to iy)
5136 // for each of StartVal and Accum
5137 auto getExtendedExpr = [&](const SCEV *Expr,
5138 bool CreateSignExtend) -> const SCEV * {
5139 assert(isLoopInvariant(Expr, L) && "Expr is expected to be invariant");
5140 const SCEV *TruncatedExpr = getTruncateExpr(Expr, TruncTy);
5141 const SCEV *ExtendedExpr =
5142 CreateSignExtend ? getSignExtendExpr(TruncatedExpr, Expr->getType())
5143 : getZeroExtendExpr(TruncatedExpr, Expr->getType());
5144 return ExtendedExpr;
5147 // Given:
5148 // ExtendedExpr = (Ext ix (Trunc iy (Expr) to ix) to iy
5149 // = getExtendedExpr(Expr)
5150 // Determine whether the predicate P: Expr == ExtendedExpr
5151 // is known to be false at compile time
5152 auto PredIsKnownFalse = [&](const SCEV *Expr,
5153 const SCEV *ExtendedExpr) -> bool {
5154 return Expr != ExtendedExpr &&
5155 isKnownPredicate(ICmpInst::ICMP_NE, Expr, ExtendedExpr);
5158 const SCEV *StartExtended = getExtendedExpr(StartVal, Signed);
5159 if (PredIsKnownFalse(StartVal, StartExtended)) {
5160 LLVM_DEBUG(dbgs() << "P2 is compile-time false\n";);
5161 return None;
5164 // The Step is always Signed (because the overflow checks are either
5165 // NSSW or NUSW)
5166 const SCEV *AccumExtended = getExtendedExpr(Accum, /*CreateSignExtend=*/true);
5167 if (PredIsKnownFalse(Accum, AccumExtended)) {
5168 LLVM_DEBUG(dbgs() << "P3 is compile-time false\n";);
5169 return None;
5172 auto AppendPredicate = [&](const SCEV *Expr,
5173 const SCEV *ExtendedExpr) -> void {
5174 if (Expr != ExtendedExpr &&
5175 !isKnownPredicate(ICmpInst::ICMP_EQ, Expr, ExtendedExpr)) {
5176 const SCEVPredicate *Pred = getEqualPredicate(Expr, ExtendedExpr);
5177 LLVM_DEBUG(dbgs() << "Added Predicate: " << *Pred);
5178 Predicates.push_back(Pred);
5182 AppendPredicate(StartVal, StartExtended);
5183 AppendPredicate(Accum, AccumExtended);
5185 // *** Part3: Predicates are ready. Now go ahead and create the new addrec in
5186 // which the casts had been folded away. The caller can rewrite SymbolicPHI
5187 // into NewAR if it will also add the runtime overflow checks specified in
5188 // Predicates.
5189 auto *NewAR = getAddRecExpr(StartVal, Accum, L, SCEV::FlagAnyWrap);
5191 std::pair<const SCEV *, SmallVector<const SCEVPredicate *, 3>> PredRewrite =
5192 std::make_pair(NewAR, Predicates);
5193 // Remember the result of the analysis for this SCEV at this locayyytion.
5194 PredicatedSCEVRewrites[{SymbolicPHI, L}] = PredRewrite;
5195 return PredRewrite;
5198 Optional<std::pair<const SCEV *, SmallVector<const SCEVPredicate *, 3>>>
5199 ScalarEvolution::createAddRecFromPHIWithCasts(const SCEVUnknown *SymbolicPHI) {
5200 auto *PN = cast<PHINode>(SymbolicPHI->getValue());
5201 const Loop *L = isIntegerLoopHeaderPHI(PN, LI);
5202 if (!L)
5203 return None;
5205 // Check to see if we already analyzed this PHI.
5206 auto I = PredicatedSCEVRewrites.find({SymbolicPHI, L});
5207 if (I != PredicatedSCEVRewrites.end()) {
5208 std::pair<const SCEV *, SmallVector<const SCEVPredicate *, 3>> Rewrite =
5209 I->second;
5210 // Analysis was done before and failed to create an AddRec:
5211 if (Rewrite.first == SymbolicPHI)
5212 return None;
5213 // Analysis was done before and succeeded to create an AddRec under
5214 // a predicate:
5215 assert(isa<SCEVAddRecExpr>(Rewrite.first) && "Expected an AddRec");
5216 assert(!(Rewrite.second).empty() && "Expected to find Predicates");
5217 return Rewrite;
5220 Optional<std::pair<const SCEV *, SmallVector<const SCEVPredicate *, 3>>>
5221 Rewrite = createAddRecFromPHIWithCastsImpl(SymbolicPHI);
5223 // Record in the cache that the analysis failed
5224 if (!Rewrite) {
5225 SmallVector<const SCEVPredicate *, 3> Predicates;
5226 PredicatedSCEVRewrites[{SymbolicPHI, L}] = {SymbolicPHI, Predicates};
5227 return None;
5230 return Rewrite;
5233 // FIXME: This utility is currently required because the Rewriter currently
5234 // does not rewrite this expression:
5235 // {0, +, (sext ix (trunc iy to ix) to iy)}
5236 // into {0, +, %step},
5237 // even when the following Equal predicate exists:
5238 // "%step == (sext ix (trunc iy to ix) to iy)".
5239 bool PredicatedScalarEvolution::areAddRecsEqualWithPreds(
5240 const SCEVAddRecExpr *AR1, const SCEVAddRecExpr *AR2) const {
5241 if (AR1 == AR2)
5242 return true;
5244 auto areExprsEqual = [&](const SCEV *Expr1, const SCEV *Expr2) -> bool {
5245 if (Expr1 != Expr2 && !Preds.implies(SE.getEqualPredicate(Expr1, Expr2)) &&
5246 !Preds.implies(SE.getEqualPredicate(Expr2, Expr1)))
5247 return false;
5248 return true;
5251 if (!areExprsEqual(AR1->getStart(), AR2->getStart()) ||
5252 !areExprsEqual(AR1->getStepRecurrence(SE), AR2->getStepRecurrence(SE)))
5253 return false;
5254 return true;
5257 /// A helper function for createAddRecFromPHI to handle simple cases.
5259 /// This function tries to find an AddRec expression for the simplest (yet most
5260 /// common) cases: PN = PHI(Start, OP(Self, LoopInvariant)).
5261 /// If it fails, createAddRecFromPHI will use a more general, but slow,
5262 /// technique for finding the AddRec expression.
5263 const SCEV *ScalarEvolution::createSimpleAffineAddRec(PHINode *PN,
5264 Value *BEValueV,
5265 Value *StartValueV) {
5266 const Loop *L = LI.getLoopFor(PN->getParent());
5267 assert(L && L->getHeader() == PN->getParent());
5268 assert(BEValueV && StartValueV);
5270 auto BO = MatchBinaryOp(BEValueV, DT);
5271 if (!BO)
5272 return nullptr;
5274 if (BO->Opcode != Instruction::Add)
5275 return nullptr;
5277 const SCEV *Accum = nullptr;
5278 if (BO->LHS == PN && L->isLoopInvariant(BO->RHS))
5279 Accum = getSCEV(BO->RHS);
5280 else if (BO->RHS == PN && L->isLoopInvariant(BO->LHS))
5281 Accum = getSCEV(BO->LHS);
5283 if (!Accum)
5284 return nullptr;
5286 SCEV::NoWrapFlags Flags = SCEV::FlagAnyWrap;
5287 if (BO->IsNUW)
5288 Flags = setFlags(Flags, SCEV::FlagNUW);
5289 if (BO->IsNSW)
5290 Flags = setFlags(Flags, SCEV::FlagNSW);
5292 const SCEV *StartVal = getSCEV(StartValueV);
5293 const SCEV *PHISCEV = getAddRecExpr(StartVal, Accum, L, Flags);
5295 ValueExprMap[SCEVCallbackVH(PN, this)] = PHISCEV;
5297 // We can add Flags to the post-inc expression only if we
5298 // know that it is *undefined behavior* for BEValueV to
5299 // overflow.
5300 if (auto *BEInst = dyn_cast<Instruction>(BEValueV))
5301 if (isLoopInvariant(Accum, L) && isAddRecNeverPoison(BEInst, L))
5302 (void)getAddRecExpr(getAddExpr(StartVal, Accum), Accum, L, Flags);
5304 return PHISCEV;
5307 const SCEV *ScalarEvolution::createAddRecFromPHI(PHINode *PN) {
5308 const Loop *L = LI.getLoopFor(PN->getParent());
5309 if (!L || L->getHeader() != PN->getParent())
5310 return nullptr;
5312 // The loop may have multiple entrances or multiple exits; we can analyze
5313 // this phi as an addrec if it has a unique entry value and a unique
5314 // backedge value.
5315 Value *BEValueV = nullptr, *StartValueV = nullptr;
5316 for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) {
5317 Value *V = PN->getIncomingValue(i);
5318 if (L->contains(PN->getIncomingBlock(i))) {
5319 if (!BEValueV) {
5320 BEValueV = V;
5321 } else if (BEValueV != V) {
5322 BEValueV = nullptr;
5323 break;
5325 } else if (!StartValueV) {
5326 StartValueV = V;
5327 } else if (StartValueV != V) {
5328 StartValueV = nullptr;
5329 break;
5332 if (!BEValueV || !StartValueV)
5333 return nullptr;
5335 assert(ValueExprMap.find_as(PN) == ValueExprMap.end() &&
5336 "PHI node already processed?");
5338 // First, try to find AddRec expression without creating a fictituos symbolic
5339 // value for PN.
5340 if (auto *S = createSimpleAffineAddRec(PN, BEValueV, StartValueV))
5341 return S;
5343 // Handle PHI node value symbolically.
5344 const SCEV *SymbolicName = getUnknown(PN);
5345 ValueExprMap.insert({SCEVCallbackVH(PN, this), SymbolicName});
5347 // Using this symbolic name for the PHI, analyze the value coming around
5348 // the back-edge.
5349 const SCEV *BEValue = getSCEV(BEValueV);
5351 // NOTE: If BEValue is loop invariant, we know that the PHI node just
5352 // has a special value for the first iteration of the loop.
5354 // If the value coming around the backedge is an add with the symbolic
5355 // value we just inserted, then we found a simple induction variable!
5356 if (const SCEVAddExpr *Add = dyn_cast<SCEVAddExpr>(BEValue)) {
5357 // If there is a single occurrence of the symbolic value, replace it
5358 // with a recurrence.
5359 unsigned FoundIndex = Add->getNumOperands();
5360 for (unsigned i = 0, e = Add->getNumOperands(); i != e; ++i)
5361 if (Add->getOperand(i) == SymbolicName)
5362 if (FoundIndex == e) {
5363 FoundIndex = i;
5364 break;
5367 if (FoundIndex != Add->getNumOperands()) {
5368 // Create an add with everything but the specified operand.
5369 SmallVector<const SCEV *, 8> Ops;
5370 for (unsigned i = 0, e = Add->getNumOperands(); i != e; ++i)
5371 if (i != FoundIndex)
5372 Ops.push_back(SCEVBackedgeConditionFolder::rewrite(Add->getOperand(i),
5373 L, *this));
5374 const SCEV *Accum = getAddExpr(Ops);
5376 // This is not a valid addrec if the step amount is varying each
5377 // loop iteration, but is not itself an addrec in this loop.
5378 if (isLoopInvariant(Accum, L) ||
5379 (isa<SCEVAddRecExpr>(Accum) &&
5380 cast<SCEVAddRecExpr>(Accum)->getLoop() == L)) {
5381 SCEV::NoWrapFlags Flags = SCEV::FlagAnyWrap;
5383 if (auto BO = MatchBinaryOp(BEValueV, DT)) {
5384 if (BO->Opcode == Instruction::Add && BO->LHS == PN) {
5385 if (BO->IsNUW)
5386 Flags = setFlags(Flags, SCEV::FlagNUW);
5387 if (BO->IsNSW)
5388 Flags = setFlags(Flags, SCEV::FlagNSW);
5390 } else if (GEPOperator *GEP = dyn_cast<GEPOperator>(BEValueV)) {
5391 // If the increment is an inbounds GEP, then we know the address
5392 // space cannot be wrapped around. We cannot make any guarantee
5393 // about signed or unsigned overflow because pointers are
5394 // unsigned but we may have a negative index from the base
5395 // pointer. We can guarantee that no unsigned wrap occurs if the
5396 // indices form a positive value.
5397 if (GEP->isInBounds() && GEP->getOperand(0) == PN) {
5398 Flags = setFlags(Flags, SCEV::FlagNW);
5400 const SCEV *Ptr = getSCEV(GEP->getPointerOperand());
5401 if (isKnownPositive(getMinusSCEV(getSCEV(GEP), Ptr)))
5402 Flags = setFlags(Flags, SCEV::FlagNUW);
5405 // We cannot transfer nuw and nsw flags from subtraction
5406 // operations -- sub nuw X, Y is not the same as add nuw X, -Y
5407 // for instance.
5410 const SCEV *StartVal = getSCEV(StartValueV);
5411 const SCEV *PHISCEV = getAddRecExpr(StartVal, Accum, L, Flags);
5413 // Okay, for the entire analysis of this edge we assumed the PHI
5414 // to be symbolic. We now need to go back and purge all of the
5415 // entries for the scalars that use the symbolic expression.
5416 forgetSymbolicName(PN, SymbolicName);
5417 ValueExprMap[SCEVCallbackVH(PN, this)] = PHISCEV;
5419 // We can add Flags to the post-inc expression only if we
5420 // know that it is *undefined behavior* for BEValueV to
5421 // overflow.
5422 if (auto *BEInst = dyn_cast<Instruction>(BEValueV))
5423 if (isLoopInvariant(Accum, L) && isAddRecNeverPoison(BEInst, L))
5424 (void)getAddRecExpr(getAddExpr(StartVal, Accum), Accum, L, Flags);
5426 return PHISCEV;
5429 } else {
5430 // Otherwise, this could be a loop like this:
5431 // i = 0; for (j = 1; ..; ++j) { .... i = j; }
5432 // In this case, j = {1,+,1} and BEValue is j.
5433 // Because the other in-value of i (0) fits the evolution of BEValue
5434 // i really is an addrec evolution.
5436 // We can generalize this saying that i is the shifted value of BEValue
5437 // by one iteration:
5438 // PHI(f(0), f({1,+,1})) --> f({0,+,1})
5439 const SCEV *Shifted = SCEVShiftRewriter::rewrite(BEValue, L, *this);
5440 const SCEV *Start = SCEVInitRewriter::rewrite(Shifted, L, *this, false);
5441 if (Shifted != getCouldNotCompute() &&
5442 Start != getCouldNotCompute()) {
5443 const SCEV *StartVal = getSCEV(StartValueV);
5444 if (Start == StartVal) {
5445 // Okay, for the entire analysis of this edge we assumed the PHI
5446 // to be symbolic. We now need to go back and purge all of the
5447 // entries for the scalars that use the symbolic expression.
5448 forgetSymbolicName(PN, SymbolicName);
5449 ValueExprMap[SCEVCallbackVH(PN, this)] = Shifted;
5450 return Shifted;
5455 // Remove the temporary PHI node SCEV that has been inserted while intending
5456 // to create an AddRecExpr for this PHI node. We can not keep this temporary
5457 // as it will prevent later (possibly simpler) SCEV expressions to be added
5458 // to the ValueExprMap.
5459 eraseValueFromMap(PN);
5461 return nullptr;
5464 // Checks if the SCEV S is available at BB. S is considered available at BB
5465 // if S can be materialized at BB without introducing a fault.
5466 static bool IsAvailableOnEntry(const Loop *L, DominatorTree &DT, const SCEV *S,
5467 BasicBlock *BB) {
5468 struct CheckAvailable {
5469 bool TraversalDone = false;
5470 bool Available = true;
5472 const Loop *L = nullptr; // The loop BB is in (can be nullptr)
5473 BasicBlock *BB = nullptr;
5474 DominatorTree &DT;
5476 CheckAvailable(const Loop *L, BasicBlock *BB, DominatorTree &DT)
5477 : L(L), BB(BB), DT(DT) {}
5479 bool setUnavailable() {
5480 TraversalDone = true;
5481 Available = false;
5482 return false;
5485 bool follow(const SCEV *S) {
5486 switch (S->getSCEVType()) {
5487 case scConstant:
5488 case scPtrToInt:
5489 case scTruncate:
5490 case scZeroExtend:
5491 case scSignExtend:
5492 case scAddExpr:
5493 case scMulExpr:
5494 case scUMaxExpr:
5495 case scSMaxExpr:
5496 case scUMinExpr:
5497 case scSMinExpr:
5498 // These expressions are available if their operand(s) is/are.
5499 return true;
5501 case scAddRecExpr: {
5502 // We allow add recurrences that are on the loop BB is in, or some
5503 // outer loop. This guarantees availability because the value of the
5504 // add recurrence at BB is simply the "current" value of the induction
5505 // variable. We can relax this in the future; for instance an add
5506 // recurrence on a sibling dominating loop is also available at BB.
5507 const auto *ARLoop = cast<SCEVAddRecExpr>(S)->getLoop();
5508 if (L && (ARLoop == L || ARLoop->contains(L)))
5509 return true;
5511 return setUnavailable();
5514 case scUnknown: {
5515 // For SCEVUnknown, we check for simple dominance.
5516 const auto *SU = cast<SCEVUnknown>(S);
5517 Value *V = SU->getValue();
5519 if (isa<Argument>(V))
5520 return false;
5522 if (isa<Instruction>(V) && DT.dominates(cast<Instruction>(V), BB))
5523 return false;
5525 return setUnavailable();
5528 case scUDivExpr:
5529 case scCouldNotCompute:
5530 // We do not try to smart about these at all.
5531 return setUnavailable();
5533 llvm_unreachable("Unknown SCEV kind!");
5536 bool isDone() { return TraversalDone; }
5539 CheckAvailable CA(L, BB, DT);
5540 SCEVTraversal<CheckAvailable> ST(CA);
5542 ST.visitAll(S);
5543 return CA.Available;
5546 // Try to match a control flow sequence that branches out at BI and merges back
5547 // at Merge into a "C ? LHS : RHS" select pattern. Return true on a successful
5548 // match.
5549 static bool BrPHIToSelect(DominatorTree &DT, BranchInst *BI, PHINode *Merge,
5550 Value *&C, Value *&LHS, Value *&RHS) {
5551 C = BI->getCondition();
5553 BasicBlockEdge LeftEdge(BI->getParent(), BI->getSuccessor(0));
5554 BasicBlockEdge RightEdge(BI->getParent(), BI->getSuccessor(1));
5556 if (!LeftEdge.isSingleEdge())
5557 return false;
5559 assert(RightEdge.isSingleEdge() && "Follows from LeftEdge.isSingleEdge()");
5561 Use &LeftUse = Merge->getOperandUse(0);
5562 Use &RightUse = Merge->getOperandUse(1);
5564 if (DT.dominates(LeftEdge, LeftUse) && DT.dominates(RightEdge, RightUse)) {
5565 LHS = LeftUse;
5566 RHS = RightUse;
5567 return true;
5570 if (DT.dominates(LeftEdge, RightUse) && DT.dominates(RightEdge, LeftUse)) {
5571 LHS = RightUse;
5572 RHS = LeftUse;
5573 return true;
5576 return false;
5579 const SCEV *ScalarEvolution::createNodeFromSelectLikePHI(PHINode *PN) {
5580 auto IsReachable =
5581 [&](BasicBlock *BB) { return DT.isReachableFromEntry(BB); };
5582 if (PN->getNumIncomingValues() == 2 && all_of(PN->blocks(), IsReachable)) {
5583 const Loop *L = LI.getLoopFor(PN->getParent());
5585 // We don't want to break LCSSA, even in a SCEV expression tree.
5586 for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i)
5587 if (LI.getLoopFor(PN->getIncomingBlock(i)) != L)
5588 return nullptr;
5590 // Try to match
5592 // br %cond, label %left, label %right
5593 // left:
5594 // br label %merge
5595 // right:
5596 // br label %merge
5597 // merge:
5598 // V = phi [ %x, %left ], [ %y, %right ]
5600 // as "select %cond, %x, %y"
5602 BasicBlock *IDom = DT[PN->getParent()]->getIDom()->getBlock();
5603 assert(IDom && "At least the entry block should dominate PN");
5605 auto *BI = dyn_cast<BranchInst>(IDom->getTerminator());
5606 Value *Cond = nullptr, *LHS = nullptr, *RHS = nullptr;
5608 if (BI && BI->isConditional() &&
5609 BrPHIToSelect(DT, BI, PN, Cond, LHS, RHS) &&
5610 IsAvailableOnEntry(L, DT, getSCEV(LHS), PN->getParent()) &&
5611 IsAvailableOnEntry(L, DT, getSCEV(RHS), PN->getParent()))
5612 return createNodeForSelectOrPHI(PN, Cond, LHS, RHS);
5615 return nullptr;
5618 const SCEV *ScalarEvolution::createNodeForPHI(PHINode *PN) {
5619 if (const SCEV *S = createAddRecFromPHI(PN))
5620 return S;
5622 if (const SCEV *S = createNodeFromSelectLikePHI(PN))
5623 return S;
5625 // If the PHI has a single incoming value, follow that value, unless the
5626 // PHI's incoming blocks are in a different loop, in which case doing so
5627 // risks breaking LCSSA form. Instcombine would normally zap these, but
5628 // it doesn't have DominatorTree information, so it may miss cases.
5629 if (Value *V = SimplifyInstruction(PN, {getDataLayout(), &TLI, &DT, &AC}))
5630 if (LI.replacementPreservesLCSSAForm(PN, V))
5631 return getSCEV(V);
5633 // If it's not a loop phi, we can't handle it yet.
5634 return getUnknown(PN);
5637 const SCEV *ScalarEvolution::createNodeForSelectOrPHI(Instruction *I,
5638 Value *Cond,
5639 Value *TrueVal,
5640 Value *FalseVal) {
5641 // Handle "constant" branch or select. This can occur for instance when a
5642 // loop pass transforms an inner loop and moves on to process the outer loop.
5643 if (auto *CI = dyn_cast<ConstantInt>(Cond))
5644 return getSCEV(CI->isOne() ? TrueVal : FalseVal);
5646 // Try to match some simple smax or umax patterns.
5647 auto *ICI = dyn_cast<ICmpInst>(Cond);
5648 if (!ICI)
5649 return getUnknown(I);
5651 Value *LHS = ICI->getOperand(0);
5652 Value *RHS = ICI->getOperand(1);
5654 switch (ICI->getPredicate()) {
5655 case ICmpInst::ICMP_SLT:
5656 case ICmpInst::ICMP_SLE:
5657 case ICmpInst::ICMP_ULT:
5658 case ICmpInst::ICMP_ULE:
5659 std::swap(LHS, RHS);
5660 LLVM_FALLTHROUGH;
5661 case ICmpInst::ICMP_SGT:
5662 case ICmpInst::ICMP_SGE:
5663 case ICmpInst::ICMP_UGT:
5664 case ICmpInst::ICMP_UGE:
5665 // a > b ? a+x : b+x -> max(a, b)+x
5666 // a > b ? b+x : a+x -> min(a, b)+x
5667 if (getTypeSizeInBits(LHS->getType()) <= getTypeSizeInBits(I->getType())) {
5668 bool Signed = ICI->isSigned();
5669 const SCEV *LA = getSCEV(TrueVal);
5670 const SCEV *RA = getSCEV(FalseVal);
5671 const SCEV *LS = getSCEV(LHS);
5672 const SCEV *RS = getSCEV(RHS);
5673 if (LA->getType()->isPointerTy()) {
5674 // FIXME: Handle cases where LS/RS are pointers not equal to LA/RA.
5675 // Need to make sure we can't produce weird expressions involving
5676 // negated pointers.
5677 if (LA == LS && RA == RS)
5678 return Signed ? getSMaxExpr(LS, RS) : getUMaxExpr(LS, RS);
5679 if (LA == RS && RA == LS)
5680 return Signed ? getSMinExpr(LS, RS) : getUMinExpr(LS, RS);
5682 auto CoerceOperand = [&](const SCEV *Op) -> const SCEV * {
5683 if (Op->getType()->isPointerTy()) {
5684 Op = getLosslessPtrToIntExpr(Op);
5685 if (isa<SCEVCouldNotCompute>(Op))
5686 return Op;
5688 if (Signed)
5689 Op = getNoopOrSignExtend(Op, I->getType());
5690 else
5691 Op = getNoopOrZeroExtend(Op, I->getType());
5692 return Op;
5694 LS = CoerceOperand(LS);
5695 RS = CoerceOperand(RS);
5696 if (isa<SCEVCouldNotCompute>(LS) || isa<SCEVCouldNotCompute>(RS))
5697 break;
5698 const SCEV *LDiff = getMinusSCEV(LA, LS);
5699 const SCEV *RDiff = getMinusSCEV(RA, RS);
5700 if (LDiff == RDiff)
5701 return getAddExpr(Signed ? getSMaxExpr(LS, RS) : getUMaxExpr(LS, RS),
5702 LDiff);
5703 LDiff = getMinusSCEV(LA, RS);
5704 RDiff = getMinusSCEV(RA, LS);
5705 if (LDiff == RDiff)
5706 return getAddExpr(Signed ? getSMinExpr(LS, RS) : getUMinExpr(LS, RS),
5707 LDiff);
5709 break;
5710 case ICmpInst::ICMP_NE:
5711 // n != 0 ? n+x : 1+x -> umax(n, 1)+x
5712 if (getTypeSizeInBits(LHS->getType()) <= getTypeSizeInBits(I->getType()) &&
5713 isa<ConstantInt>(RHS) && cast<ConstantInt>(RHS)->isZero()) {
5714 const SCEV *One = getOne(I->getType());
5715 const SCEV *LS = getNoopOrZeroExtend(getSCEV(LHS), I->getType());
5716 const SCEV *LA = getSCEV(TrueVal);
5717 const SCEV *RA = getSCEV(FalseVal);
5718 const SCEV *LDiff = getMinusSCEV(LA, LS);
5719 const SCEV *RDiff = getMinusSCEV(RA, One);
5720 if (LDiff == RDiff)
5721 return getAddExpr(getUMaxExpr(One, LS), LDiff);
5723 break;
5724 case ICmpInst::ICMP_EQ:
5725 // n == 0 ? 1+x : n+x -> umax(n, 1)+x
5726 if (getTypeSizeInBits(LHS->getType()) <= getTypeSizeInBits(I->getType()) &&
5727 isa<ConstantInt>(RHS) && cast<ConstantInt>(RHS)->isZero()) {
5728 const SCEV *One = getOne(I->getType());
5729 const SCEV *LS = getNoopOrZeroExtend(getSCEV(LHS), I->getType());
5730 const SCEV *LA = getSCEV(TrueVal);
5731 const SCEV *RA = getSCEV(FalseVal);
5732 const SCEV *LDiff = getMinusSCEV(LA, One);
5733 const SCEV *RDiff = getMinusSCEV(RA, LS);
5734 if (LDiff == RDiff)
5735 return getAddExpr(getUMaxExpr(One, LS), LDiff);
5737 break;
5738 default:
5739 break;
5742 return getUnknown(I);
5745 /// Expand GEP instructions into add and multiply operations. This allows them
5746 /// to be analyzed by regular SCEV code.
5747 const SCEV *ScalarEvolution::createNodeForGEP(GEPOperator *GEP) {
5748 // Don't attempt to analyze GEPs over unsized objects.
5749 if (!GEP->getSourceElementType()->isSized())
5750 return getUnknown(GEP);
5752 SmallVector<const SCEV *, 4> IndexExprs;
5753 for (Value *Index : GEP->indices())
5754 IndexExprs.push_back(getSCEV(Index));
5755 return getGEPExpr(GEP, IndexExprs);
5758 uint32_t ScalarEvolution::GetMinTrailingZerosImpl(const SCEV *S) {
5759 if (const SCEVConstant *C = dyn_cast<SCEVConstant>(S))
5760 return C->getAPInt().countTrailingZeros();
5762 if (const SCEVPtrToIntExpr *I = dyn_cast<SCEVPtrToIntExpr>(S))
5763 return GetMinTrailingZeros(I->getOperand());
5765 if (const SCEVTruncateExpr *T = dyn_cast<SCEVTruncateExpr>(S))
5766 return std::min(GetMinTrailingZeros(T->getOperand()),
5767 (uint32_t)getTypeSizeInBits(T->getType()));
5769 if (const SCEVZeroExtendExpr *E = dyn_cast<SCEVZeroExtendExpr>(S)) {
5770 uint32_t OpRes = GetMinTrailingZeros(E->getOperand());
5771 return OpRes == getTypeSizeInBits(E->getOperand()->getType())
5772 ? getTypeSizeInBits(E->getType())
5773 : OpRes;
5776 if (const SCEVSignExtendExpr *E = dyn_cast<SCEVSignExtendExpr>(S)) {
5777 uint32_t OpRes = GetMinTrailingZeros(E->getOperand());
5778 return OpRes == getTypeSizeInBits(E->getOperand()->getType())
5779 ? getTypeSizeInBits(E->getType())
5780 : OpRes;
5783 if (const SCEVAddExpr *A = dyn_cast<SCEVAddExpr>(S)) {
5784 // The result is the min of all operands results.
5785 uint32_t MinOpRes = GetMinTrailingZeros(A->getOperand(0));
5786 for (unsigned i = 1, e = A->getNumOperands(); MinOpRes && i != e; ++i)
5787 MinOpRes = std::min(MinOpRes, GetMinTrailingZeros(A->getOperand(i)));
5788 return MinOpRes;
5791 if (const SCEVMulExpr *M = dyn_cast<SCEVMulExpr>(S)) {
5792 // The result is the sum of all operands results.
5793 uint32_t SumOpRes = GetMinTrailingZeros(M->getOperand(0));
5794 uint32_t BitWidth = getTypeSizeInBits(M->getType());
5795 for (unsigned i = 1, e = M->getNumOperands();
5796 SumOpRes != BitWidth && i != e; ++i)
5797 SumOpRes =
5798 std::min(SumOpRes + GetMinTrailingZeros(M->getOperand(i)), BitWidth);
5799 return SumOpRes;
5802 if (const SCEVAddRecExpr *A = dyn_cast<SCEVAddRecExpr>(S)) {
5803 // The result is the min of all operands results.
5804 uint32_t MinOpRes = GetMinTrailingZeros(A->getOperand(0));
5805 for (unsigned i = 1, e = A->getNumOperands(); MinOpRes && i != e; ++i)
5806 MinOpRes = std::min(MinOpRes, GetMinTrailingZeros(A->getOperand(i)));
5807 return MinOpRes;
5810 if (const SCEVSMaxExpr *M = dyn_cast<SCEVSMaxExpr>(S)) {
5811 // The result is the min of all operands results.
5812 uint32_t MinOpRes = GetMinTrailingZeros(M->getOperand(0));
5813 for (unsigned i = 1, e = M->getNumOperands(); MinOpRes && i != e; ++i)
5814 MinOpRes = std::min(MinOpRes, GetMinTrailingZeros(M->getOperand(i)));
5815 return MinOpRes;
5818 if (const SCEVUMaxExpr *M = dyn_cast<SCEVUMaxExpr>(S)) {
5819 // The result is the min of all operands results.
5820 uint32_t MinOpRes = GetMinTrailingZeros(M->getOperand(0));
5821 for (unsigned i = 1, e = M->getNumOperands(); MinOpRes && i != e; ++i)
5822 MinOpRes = std::min(MinOpRes, GetMinTrailingZeros(M->getOperand(i)));
5823 return MinOpRes;
5826 if (const SCEVUnknown *U = dyn_cast<SCEVUnknown>(S)) {
5827 // For a SCEVUnknown, ask ValueTracking.
5828 KnownBits Known = computeKnownBits(U->getValue(), getDataLayout(), 0, &AC, nullptr, &DT);
5829 return Known.countMinTrailingZeros();
5832 // SCEVUDivExpr
5833 return 0;
5836 uint32_t ScalarEvolution::GetMinTrailingZeros(const SCEV *S) {
5837 auto I = MinTrailingZerosCache.find(S);
5838 if (I != MinTrailingZerosCache.end())
5839 return I->second;
5841 uint32_t Result = GetMinTrailingZerosImpl(S);
5842 auto InsertPair = MinTrailingZerosCache.insert({S, Result});
5843 assert(InsertPair.second && "Should insert a new key");
5844 return InsertPair.first->second;
5847 /// Helper method to assign a range to V from metadata present in the IR.
5848 static Optional<ConstantRange> GetRangeFromMetadata(Value *V) {
5849 if (Instruction *I = dyn_cast<Instruction>(V))
5850 if (MDNode *MD = I->getMetadata(LLVMContext::MD_range))
5851 return getConstantRangeFromMetadata(*MD);
5853 return None;
5856 void ScalarEvolution::setNoWrapFlags(SCEVAddRecExpr *AddRec,
5857 SCEV::NoWrapFlags Flags) {
5858 if (AddRec->getNoWrapFlags(Flags) != Flags) {
5859 AddRec->setNoWrapFlags(Flags);
5860 UnsignedRanges.erase(AddRec);
5861 SignedRanges.erase(AddRec);
5865 ConstantRange ScalarEvolution::
5866 getRangeForUnknownRecurrence(const SCEVUnknown *U) {
5867 const DataLayout &DL = getDataLayout();
5869 unsigned BitWidth = getTypeSizeInBits(U->getType());
5870 const ConstantRange FullSet(BitWidth, /*isFullSet=*/true);
5872 // Match a simple recurrence of the form: <start, ShiftOp, Step>, and then
5873 // use information about the trip count to improve our available range. Note
5874 // that the trip count independent cases are already handled by known bits.
5875 // WARNING: The definition of recurrence used here is subtly different than
5876 // the one used by AddRec (and thus most of this file). Step is allowed to
5877 // be arbitrarily loop varying here, where AddRec allows only loop invariant
5878 // and other addrecs in the same loop (for non-affine addrecs). The code
5879 // below intentionally handles the case where step is not loop invariant.
5880 auto *P = dyn_cast<PHINode>(U->getValue());
5881 if (!P)
5882 return FullSet;
5884 // Make sure that no Phi input comes from an unreachable block. Otherwise,
5885 // even the values that are not available in these blocks may come from them,
5886 // and this leads to false-positive recurrence test.
5887 for (auto *Pred : predecessors(P->getParent()))
5888 if (!DT.isReachableFromEntry(Pred))
5889 return FullSet;
5891 BinaryOperator *BO;
5892 Value *Start, *Step;
5893 if (!matchSimpleRecurrence(P, BO, Start, Step))
5894 return FullSet;
5896 // If we found a recurrence in reachable code, we must be in a loop. Note
5897 // that BO might be in some subloop of L, and that's completely okay.
5898 auto *L = LI.getLoopFor(P->getParent());
5899 assert(L && L->getHeader() == P->getParent());
5900 if (!L->contains(BO->getParent()))
5901 // NOTE: This bailout should be an assert instead. However, asserting
5902 // the condition here exposes a case where LoopFusion is querying SCEV
5903 // with malformed loop information during the midst of the transform.
5904 // There doesn't appear to be an obvious fix, so for the moment bailout
5905 // until the caller issue can be fixed. PR49566 tracks the bug.
5906 return FullSet;
5908 // TODO: Extend to other opcodes such as mul, and div
5909 switch (BO->getOpcode()) {
5910 default:
5911 return FullSet;
5912 case Instruction::AShr:
5913 case Instruction::LShr:
5914 case Instruction::Shl:
5915 break;
5918 if (BO->getOperand(0) != P)
5919 // TODO: Handle the power function forms some day.
5920 return FullSet;
5922 unsigned TC = getSmallConstantMaxTripCount(L);
5923 if (!TC || TC >= BitWidth)
5924 return FullSet;
5926 auto KnownStart = computeKnownBits(Start, DL, 0, &AC, nullptr, &DT);
5927 auto KnownStep = computeKnownBits(Step, DL, 0, &AC, nullptr, &DT);
5928 assert(KnownStart.getBitWidth() == BitWidth &&
5929 KnownStep.getBitWidth() == BitWidth);
5931 // Compute total shift amount, being careful of overflow and bitwidths.
5932 auto MaxShiftAmt = KnownStep.getMaxValue();
5933 APInt TCAP(BitWidth, TC-1);
5934 bool Overflow = false;
5935 auto TotalShift = MaxShiftAmt.umul_ov(TCAP, Overflow);
5936 if (Overflow)
5937 return FullSet;
5939 switch (BO->getOpcode()) {
5940 default:
5941 llvm_unreachable("filtered out above");
5942 case Instruction::AShr: {
5943 // For each ashr, three cases:
5944 // shift = 0 => unchanged value
5945 // saturation => 0 or -1
5946 // other => a value closer to zero (of the same sign)
5947 // Thus, the end value is closer to zero than the start.
5948 auto KnownEnd = KnownBits::ashr(KnownStart,
5949 KnownBits::makeConstant(TotalShift));
5950 if (KnownStart.isNonNegative())
5951 // Analogous to lshr (simply not yet canonicalized)
5952 return ConstantRange::getNonEmpty(KnownEnd.getMinValue(),
5953 KnownStart.getMaxValue() + 1);
5954 if (KnownStart.isNegative())
5955 // End >=u Start && End <=s Start
5956 return ConstantRange::getNonEmpty(KnownStart.getMinValue(),
5957 KnownEnd.getMaxValue() + 1);
5958 break;
5960 case Instruction::LShr: {
5961 // For each lshr, three cases:
5962 // shift = 0 => unchanged value
5963 // saturation => 0
5964 // other => a smaller positive number
5965 // Thus, the low end of the unsigned range is the last value produced.
5966 auto KnownEnd = KnownBits::lshr(KnownStart,
5967 KnownBits::makeConstant(TotalShift));
5968 return ConstantRange::getNonEmpty(KnownEnd.getMinValue(),
5969 KnownStart.getMaxValue() + 1);
5971 case Instruction::Shl: {
5972 // Iff no bits are shifted out, value increases on every shift.
5973 auto KnownEnd = KnownBits::shl(KnownStart,
5974 KnownBits::makeConstant(TotalShift));
5975 if (TotalShift.ult(KnownStart.countMinLeadingZeros()))
5976 return ConstantRange(KnownStart.getMinValue(),
5977 KnownEnd.getMaxValue() + 1);
5978 break;
5981 return FullSet;
5984 /// Determine the range for a particular SCEV. If SignHint is
5985 /// HINT_RANGE_UNSIGNED (resp. HINT_RANGE_SIGNED) then getRange prefers ranges
5986 /// with a "cleaner" unsigned (resp. signed) representation.
5987 const ConstantRange &
5988 ScalarEvolution::getRangeRef(const SCEV *S,
5989 ScalarEvolution::RangeSignHint SignHint) {
5990 DenseMap<const SCEV *, ConstantRange> &Cache =
5991 SignHint == ScalarEvolution::HINT_RANGE_UNSIGNED ? UnsignedRanges
5992 : SignedRanges;
5993 ConstantRange::PreferredRangeType RangeType =
5994 SignHint == ScalarEvolution::HINT_RANGE_UNSIGNED
5995 ? ConstantRange::Unsigned : ConstantRange::Signed;
5997 // See if we've computed this range already.
5998 DenseMap<const SCEV *, ConstantRange>::iterator I = Cache.find(S);
5999 if (I != Cache.end())
6000 return I->second;
6002 if (const SCEVConstant *C = dyn_cast<SCEVConstant>(S))
6003 return setRange(C, SignHint, ConstantRange(C->getAPInt()));
6005 unsigned BitWidth = getTypeSizeInBits(S->getType());
6006 ConstantRange ConservativeResult(BitWidth, /*isFullSet=*/true);
6007 using OBO = OverflowingBinaryOperator;
6009 // If the value has known zeros, the maximum value will have those known zeros
6010 // as well.
6011 uint32_t TZ = GetMinTrailingZeros(S);
6012 if (TZ != 0) {
6013 if (SignHint == ScalarEvolution::HINT_RANGE_UNSIGNED)
6014 ConservativeResult =
6015 ConstantRange(APInt::getMinValue(BitWidth),
6016 APInt::getMaxValue(BitWidth).lshr(TZ).shl(TZ) + 1);
6017 else
6018 ConservativeResult = ConstantRange(
6019 APInt::getSignedMinValue(BitWidth),
6020 APInt::getSignedMaxValue(BitWidth).ashr(TZ).shl(TZ) + 1);
6023 if (const SCEVAddExpr *Add = dyn_cast<SCEVAddExpr>(S)) {
6024 ConstantRange X = getRangeRef(Add->getOperand(0), SignHint);
6025 unsigned WrapType = OBO::AnyWrap;
6026 if (Add->hasNoSignedWrap())
6027 WrapType |= OBO::NoSignedWrap;
6028 if (Add->hasNoUnsignedWrap())
6029 WrapType |= OBO::NoUnsignedWrap;
6030 for (unsigned i = 1, e = Add->getNumOperands(); i != e; ++i)
6031 X = X.addWithNoWrap(getRangeRef(Add->getOperand(i), SignHint),
6032 WrapType, RangeType);
6033 return setRange(Add, SignHint,
6034 ConservativeResult.intersectWith(X, RangeType));
6037 if (const SCEVMulExpr *Mul = dyn_cast<SCEVMulExpr>(S)) {
6038 ConstantRange X = getRangeRef(Mul->getOperand(0), SignHint);
6039 for (unsigned i = 1, e = Mul->getNumOperands(); i != e; ++i)
6040 X = X.multiply(getRangeRef(Mul->getOperand(i), SignHint));
6041 return setRange(Mul, SignHint,
6042 ConservativeResult.intersectWith(X, RangeType));
6045 if (const SCEVSMaxExpr *SMax = dyn_cast<SCEVSMaxExpr>(S)) {
6046 ConstantRange X = getRangeRef(SMax->getOperand(0), SignHint);
6047 for (unsigned i = 1, e = SMax->getNumOperands(); i != e; ++i)
6048 X = X.smax(getRangeRef(SMax->getOperand(i), SignHint));
6049 return setRange(SMax, SignHint,
6050 ConservativeResult.intersectWith(X, RangeType));
6053 if (const SCEVUMaxExpr *UMax = dyn_cast<SCEVUMaxExpr>(S)) {
6054 ConstantRange X = getRangeRef(UMax->getOperand(0), SignHint);
6055 for (unsigned i = 1, e = UMax->getNumOperands(); i != e; ++i)
6056 X = X.umax(getRangeRef(UMax->getOperand(i), SignHint));
6057 return setRange(UMax, SignHint,
6058 ConservativeResult.intersectWith(X, RangeType));
6061 if (const SCEVSMinExpr *SMin = dyn_cast<SCEVSMinExpr>(S)) {
6062 ConstantRange X = getRangeRef(SMin->getOperand(0), SignHint);
6063 for (unsigned i = 1, e = SMin->getNumOperands(); i != e; ++i)
6064 X = X.smin(getRangeRef(SMin->getOperand(i), SignHint));
6065 return setRange(SMin, SignHint,
6066 ConservativeResult.intersectWith(X, RangeType));
6069 if (const SCEVUMinExpr *UMin = dyn_cast<SCEVUMinExpr>(S)) {
6070 ConstantRange X = getRangeRef(UMin->getOperand(0), SignHint);
6071 for (unsigned i = 1, e = UMin->getNumOperands(); i != e; ++i)
6072 X = X.umin(getRangeRef(UMin->getOperand(i), SignHint));
6073 return setRange(UMin, SignHint,
6074 ConservativeResult.intersectWith(X, RangeType));
6077 if (const SCEVUDivExpr *UDiv = dyn_cast<SCEVUDivExpr>(S)) {
6078 ConstantRange X = getRangeRef(UDiv->getLHS(), SignHint);
6079 ConstantRange Y = getRangeRef(UDiv->getRHS(), SignHint);
6080 return setRange(UDiv, SignHint,
6081 ConservativeResult.intersectWith(X.udiv(Y), RangeType));
6084 if (const SCEVZeroExtendExpr *ZExt = dyn_cast<SCEVZeroExtendExpr>(S)) {
6085 ConstantRange X = getRangeRef(ZExt->getOperand(), SignHint);
6086 return setRange(ZExt, SignHint,
6087 ConservativeResult.intersectWith(X.zeroExtend(BitWidth),
6088 RangeType));
6091 if (const SCEVSignExtendExpr *SExt = dyn_cast<SCEVSignExtendExpr>(S)) {
6092 ConstantRange X = getRangeRef(SExt->getOperand(), SignHint);
6093 return setRange(SExt, SignHint,
6094 ConservativeResult.intersectWith(X.signExtend(BitWidth),
6095 RangeType));
6098 if (const SCEVPtrToIntExpr *PtrToInt = dyn_cast<SCEVPtrToIntExpr>(S)) {
6099 ConstantRange X = getRangeRef(PtrToInt->getOperand(), SignHint);
6100 return setRange(PtrToInt, SignHint, X);
6103 if (const SCEVTruncateExpr *Trunc = dyn_cast<SCEVTruncateExpr>(S)) {
6104 ConstantRange X = getRangeRef(Trunc->getOperand(), SignHint);
6105 return setRange(Trunc, SignHint,
6106 ConservativeResult.intersectWith(X.truncate(BitWidth),
6107 RangeType));
6110 if (const SCEVAddRecExpr *AddRec = dyn_cast<SCEVAddRecExpr>(S)) {
6111 // If there's no unsigned wrap, the value will never be less than its
6112 // initial value.
6113 if (AddRec->hasNoUnsignedWrap()) {
6114 APInt UnsignedMinValue = getUnsignedRangeMin(AddRec->getStart());
6115 if (!UnsignedMinValue.isNullValue())
6116 ConservativeResult = ConservativeResult.intersectWith(
6117 ConstantRange(UnsignedMinValue, APInt(BitWidth, 0)), RangeType);
6120 // If there's no signed wrap, and all the operands except initial value have
6121 // the same sign or zero, the value won't ever be:
6122 // 1: smaller than initial value if operands are non negative,
6123 // 2: bigger than initial value if operands are non positive.
6124 // For both cases, value can not cross signed min/max boundary.
6125 if (AddRec->hasNoSignedWrap()) {
6126 bool AllNonNeg = true;
6127 bool AllNonPos = true;
6128 for (unsigned i = 1, e = AddRec->getNumOperands(); i != e; ++i) {
6129 if (!isKnownNonNegative(AddRec->getOperand(i)))
6130 AllNonNeg = false;
6131 if (!isKnownNonPositive(AddRec->getOperand(i)))
6132 AllNonPos = false;
6134 if (AllNonNeg)
6135 ConservativeResult = ConservativeResult.intersectWith(
6136 ConstantRange::getNonEmpty(getSignedRangeMin(AddRec->getStart()),
6137 APInt::getSignedMinValue(BitWidth)),
6138 RangeType);
6139 else if (AllNonPos)
6140 ConservativeResult = ConservativeResult.intersectWith(
6141 ConstantRange::getNonEmpty(
6142 APInt::getSignedMinValue(BitWidth),
6143 getSignedRangeMax(AddRec->getStart()) + 1),
6144 RangeType);
6147 // TODO: non-affine addrec
6148 if (AddRec->isAffine()) {
6149 const SCEV *MaxBECount = getConstantMaxBackedgeTakenCount(AddRec->getLoop());
6150 if (!isa<SCEVCouldNotCompute>(MaxBECount) &&
6151 getTypeSizeInBits(MaxBECount->getType()) <= BitWidth) {
6152 auto RangeFromAffine = getRangeForAffineAR(
6153 AddRec->getStart(), AddRec->getStepRecurrence(*this), MaxBECount,
6154 BitWidth);
6155 ConservativeResult =
6156 ConservativeResult.intersectWith(RangeFromAffine, RangeType);
6158 auto RangeFromFactoring = getRangeViaFactoring(
6159 AddRec->getStart(), AddRec->getStepRecurrence(*this), MaxBECount,
6160 BitWidth);
6161 ConservativeResult =
6162 ConservativeResult.intersectWith(RangeFromFactoring, RangeType);
6165 // Now try symbolic BE count and more powerful methods.
6166 if (UseExpensiveRangeSharpening) {
6167 const SCEV *SymbolicMaxBECount =
6168 getSymbolicMaxBackedgeTakenCount(AddRec->getLoop());
6169 if (!isa<SCEVCouldNotCompute>(SymbolicMaxBECount) &&
6170 getTypeSizeInBits(MaxBECount->getType()) <= BitWidth &&
6171 AddRec->hasNoSelfWrap()) {
6172 auto RangeFromAffineNew = getRangeForAffineNoSelfWrappingAR(
6173 AddRec, SymbolicMaxBECount, BitWidth, SignHint);
6174 ConservativeResult =
6175 ConservativeResult.intersectWith(RangeFromAffineNew, RangeType);
6180 return setRange(AddRec, SignHint, std::move(ConservativeResult));
6183 if (const SCEVUnknown *U = dyn_cast<SCEVUnknown>(S)) {
6185 // Check if the IR explicitly contains !range metadata.
6186 Optional<ConstantRange> MDRange = GetRangeFromMetadata(U->getValue());
6187 if (MDRange.hasValue())
6188 ConservativeResult = ConservativeResult.intersectWith(MDRange.getValue(),
6189 RangeType);
6191 // Use facts about recurrences in the underlying IR. Note that add
6192 // recurrences are AddRecExprs and thus don't hit this path. This
6193 // primarily handles shift recurrences.
6194 auto CR = getRangeForUnknownRecurrence(U);
6195 ConservativeResult = ConservativeResult.intersectWith(CR);
6197 // See if ValueTracking can give us a useful range.
6198 const DataLayout &DL = getDataLayout();
6199 KnownBits Known = computeKnownBits(U->getValue(), DL, 0, &AC, nullptr, &DT);
6200 if (Known.getBitWidth() != BitWidth)
6201 Known = Known.zextOrTrunc(BitWidth);
6203 // ValueTracking may be able to compute a tighter result for the number of
6204 // sign bits than for the value of those sign bits.
6205 unsigned NS = ComputeNumSignBits(U->getValue(), DL, 0, &AC, nullptr, &DT);
6206 if (U->getType()->isPointerTy()) {
6207 // If the pointer size is larger than the index size type, this can cause
6208 // NS to be larger than BitWidth. So compensate for this.
6209 unsigned ptrSize = DL.getPointerTypeSizeInBits(U->getType());
6210 int ptrIdxDiff = ptrSize - BitWidth;
6211 if (ptrIdxDiff > 0 && ptrSize > BitWidth && NS > (unsigned)ptrIdxDiff)
6212 NS -= ptrIdxDiff;
6215 if (NS > 1) {
6216 // If we know any of the sign bits, we know all of the sign bits.
6217 if (!Known.Zero.getHiBits(NS).isNullValue())
6218 Known.Zero.setHighBits(NS);
6219 if (!Known.One.getHiBits(NS).isNullValue())
6220 Known.One.setHighBits(NS);
6223 if (Known.getMinValue() != Known.getMaxValue() + 1)
6224 ConservativeResult = ConservativeResult.intersectWith(
6225 ConstantRange(Known.getMinValue(), Known.getMaxValue() + 1),
6226 RangeType);
6227 if (NS > 1)
6228 ConservativeResult = ConservativeResult.intersectWith(
6229 ConstantRange(APInt::getSignedMinValue(BitWidth).ashr(NS - 1),
6230 APInt::getSignedMaxValue(BitWidth).ashr(NS - 1) + 1),
6231 RangeType);
6233 // A range of Phi is a subset of union of all ranges of its input.
6234 if (const PHINode *Phi = dyn_cast<PHINode>(U->getValue())) {
6235 // Make sure that we do not run over cycled Phis.
6236 if (PendingPhiRanges.insert(Phi).second) {
6237 ConstantRange RangeFromOps(BitWidth, /*isFullSet=*/false);
6238 for (auto &Op : Phi->operands()) {
6239 auto OpRange = getRangeRef(getSCEV(Op), SignHint);
6240 RangeFromOps = RangeFromOps.unionWith(OpRange);
6241 // No point to continue if we already have a full set.
6242 if (RangeFromOps.isFullSet())
6243 break;
6245 ConservativeResult =
6246 ConservativeResult.intersectWith(RangeFromOps, RangeType);
6247 bool Erased = PendingPhiRanges.erase(Phi);
6248 assert(Erased && "Failed to erase Phi properly?");
6249 (void) Erased;
6253 return setRange(U, SignHint, std::move(ConservativeResult));
6256 return setRange(S, SignHint, std::move(ConservativeResult));
6259 // Given a StartRange, Step and MaxBECount for an expression compute a range of
6260 // values that the expression can take. Initially, the expression has a value
6261 // from StartRange and then is changed by Step up to MaxBECount times. Signed
6262 // argument defines if we treat Step as signed or unsigned.
6263 static ConstantRange getRangeForAffineARHelper(APInt Step,
6264 const ConstantRange &StartRange,
6265 const APInt &MaxBECount,
6266 unsigned BitWidth, bool Signed) {
6267 // If either Step or MaxBECount is 0, then the expression won't change, and we
6268 // just need to return the initial range.
6269 if (Step == 0 || MaxBECount == 0)
6270 return StartRange;
6272 // If we don't know anything about the initial value (i.e. StartRange is
6273 // FullRange), then we don't know anything about the final range either.
6274 // Return FullRange.
6275 if (StartRange.isFullSet())
6276 return ConstantRange::getFull(BitWidth);
6278 // If Step is signed and negative, then we use its absolute value, but we also
6279 // note that we're moving in the opposite direction.
6280 bool Descending = Signed && Step.isNegative();
6282 if (Signed)
6283 // This is correct even for INT_SMIN. Let's look at i8 to illustrate this:
6284 // abs(INT_SMIN) = abs(-128) = abs(0x80) = -0x80 = 0x80 = 128.
6285 // This equations hold true due to the well-defined wrap-around behavior of
6286 // APInt.
6287 Step = Step.abs();
6289 // Check if Offset is more than full span of BitWidth. If it is, the
6290 // expression is guaranteed to overflow.
6291 if (APInt::getMaxValue(StartRange.getBitWidth()).udiv(Step).ult(MaxBECount))
6292 return ConstantRange::getFull(BitWidth);
6294 // Offset is by how much the expression can change. Checks above guarantee no
6295 // overflow here.
6296 APInt Offset = Step * MaxBECount;
6298 // Minimum value of the final range will match the minimal value of StartRange
6299 // if the expression is increasing and will be decreased by Offset otherwise.
6300 // Maximum value of the final range will match the maximal value of StartRange
6301 // if the expression is decreasing and will be increased by Offset otherwise.
6302 APInt StartLower = StartRange.getLower();
6303 APInt StartUpper = StartRange.getUpper() - 1;
6304 APInt MovedBoundary = Descending ? (StartLower - std::move(Offset))
6305 : (StartUpper + std::move(Offset));
6307 // It's possible that the new minimum/maximum value will fall into the initial
6308 // range (due to wrap around). This means that the expression can take any
6309 // value in this bitwidth, and we have to return full range.
6310 if (StartRange.contains(MovedBoundary))
6311 return ConstantRange::getFull(BitWidth);
6313 APInt NewLower =
6314 Descending ? std::move(MovedBoundary) : std::move(StartLower);
6315 APInt NewUpper =
6316 Descending ? std::move(StartUpper) : std::move(MovedBoundary);
6317 NewUpper += 1;
6319 // No overflow detected, return [StartLower, StartUpper + Offset + 1) range.
6320 return ConstantRange::getNonEmpty(std::move(NewLower), std::move(NewUpper));
6323 ConstantRange ScalarEvolution::getRangeForAffineAR(const SCEV *Start,
6324 const SCEV *Step,
6325 const SCEV *MaxBECount,
6326 unsigned BitWidth) {
6327 assert(!isa<SCEVCouldNotCompute>(MaxBECount) &&
6328 getTypeSizeInBits(MaxBECount->getType()) <= BitWidth &&
6329 "Precondition!");
6331 MaxBECount = getNoopOrZeroExtend(MaxBECount, Start->getType());
6332 APInt MaxBECountValue = getUnsignedRangeMax(MaxBECount);
6334 // First, consider step signed.
6335 ConstantRange StartSRange = getSignedRange(Start);
6336 ConstantRange StepSRange = getSignedRange(Step);
6338 // If Step can be both positive and negative, we need to find ranges for the
6339 // maximum absolute step values in both directions and union them.
6340 ConstantRange SR =
6341 getRangeForAffineARHelper(StepSRange.getSignedMin(), StartSRange,
6342 MaxBECountValue, BitWidth, /* Signed = */ true);
6343 SR = SR.unionWith(getRangeForAffineARHelper(StepSRange.getSignedMax(),
6344 StartSRange, MaxBECountValue,
6345 BitWidth, /* Signed = */ true));
6347 // Next, consider step unsigned.
6348 ConstantRange UR = getRangeForAffineARHelper(
6349 getUnsignedRangeMax(Step), getUnsignedRange(Start),
6350 MaxBECountValue, BitWidth, /* Signed = */ false);
6352 // Finally, intersect signed and unsigned ranges.
6353 return SR.intersectWith(UR, ConstantRange::Smallest);
6356 ConstantRange ScalarEvolution::getRangeForAffineNoSelfWrappingAR(
6357 const SCEVAddRecExpr *AddRec, const SCEV *MaxBECount, unsigned BitWidth,
6358 ScalarEvolution::RangeSignHint SignHint) {
6359 assert(AddRec->isAffine() && "Non-affine AddRecs are not suppored!\n");
6360 assert(AddRec->hasNoSelfWrap() &&
6361 "This only works for non-self-wrapping AddRecs!");
6362 const bool IsSigned = SignHint == HINT_RANGE_SIGNED;
6363 const SCEV *Step = AddRec->getStepRecurrence(*this);
6364 // Only deal with constant step to save compile time.
6365 if (!isa<SCEVConstant>(Step))
6366 return ConstantRange::getFull(BitWidth);
6367 // Let's make sure that we can prove that we do not self-wrap during
6368 // MaxBECount iterations. We need this because MaxBECount is a maximum
6369 // iteration count estimate, and we might infer nw from some exit for which we
6370 // do not know max exit count (or any other side reasoning).
6371 // TODO: Turn into assert at some point.
6372 if (getTypeSizeInBits(MaxBECount->getType()) >
6373 getTypeSizeInBits(AddRec->getType()))
6374 return ConstantRange::getFull(BitWidth);
6375 MaxBECount = getNoopOrZeroExtend(MaxBECount, AddRec->getType());
6376 const SCEV *RangeWidth = getMinusOne(AddRec->getType());
6377 const SCEV *StepAbs = getUMinExpr(Step, getNegativeSCEV(Step));
6378 const SCEV *MaxItersWithoutWrap = getUDivExpr(RangeWidth, StepAbs);
6379 if (!isKnownPredicateViaConstantRanges(ICmpInst::ICMP_ULE, MaxBECount,
6380 MaxItersWithoutWrap))
6381 return ConstantRange::getFull(BitWidth);
6383 ICmpInst::Predicate LEPred =
6384 IsSigned ? ICmpInst::ICMP_SLE : ICmpInst::ICMP_ULE;
6385 ICmpInst::Predicate GEPred =
6386 IsSigned ? ICmpInst::ICMP_SGE : ICmpInst::ICMP_UGE;
6387 const SCEV *End = AddRec->evaluateAtIteration(MaxBECount, *this);
6389 // We know that there is no self-wrap. Let's take Start and End values and
6390 // look at all intermediate values V1, V2, ..., Vn that IndVar takes during
6391 // the iteration. They either lie inside the range [Min(Start, End),
6392 // Max(Start, End)] or outside it:
6394 // Case 1: RangeMin ... Start V1 ... VN End ... RangeMax;
6395 // Case 2: RangeMin Vk ... V1 Start ... End Vn ... Vk + 1 RangeMax;
6397 // No self wrap flag guarantees that the intermediate values cannot be BOTH
6398 // outside and inside the range [Min(Start, End), Max(Start, End)]. Using that
6399 // knowledge, let's try to prove that we are dealing with Case 1. It is so if
6400 // Start <= End and step is positive, or Start >= End and step is negative.
6401 const SCEV *Start = AddRec->getStart();
6402 ConstantRange StartRange = getRangeRef(Start, SignHint);
6403 ConstantRange EndRange = getRangeRef(End, SignHint);
6404 ConstantRange RangeBetween = StartRange.unionWith(EndRange);
6405 // If they already cover full iteration space, we will know nothing useful
6406 // even if we prove what we want to prove.
6407 if (RangeBetween.isFullSet())
6408 return RangeBetween;
6409 // Only deal with ranges that do not wrap (i.e. RangeMin < RangeMax).
6410 bool IsWrappedSet = IsSigned ? RangeBetween.isSignWrappedSet()
6411 : RangeBetween.isWrappedSet();
6412 if (IsWrappedSet)
6413 return ConstantRange::getFull(BitWidth);
6415 if (isKnownPositive(Step) &&
6416 isKnownPredicateViaConstantRanges(LEPred, Start, End))
6417 return RangeBetween;
6418 else if (isKnownNegative(Step) &&
6419 isKnownPredicateViaConstantRanges(GEPred, Start, End))
6420 return RangeBetween;
6421 return ConstantRange::getFull(BitWidth);
6424 ConstantRange ScalarEvolution::getRangeViaFactoring(const SCEV *Start,
6425 const SCEV *Step,
6426 const SCEV *MaxBECount,
6427 unsigned BitWidth) {
6428 // RangeOf({C?A:B,+,C?P:Q}) == RangeOf(C?{A,+,P}:{B,+,Q})
6429 // == RangeOf({A,+,P}) union RangeOf({B,+,Q})
6431 struct SelectPattern {
6432 Value *Condition = nullptr;
6433 APInt TrueValue;
6434 APInt FalseValue;
6436 explicit SelectPattern(ScalarEvolution &SE, unsigned BitWidth,
6437 const SCEV *S) {
6438 Optional<unsigned> CastOp;
6439 APInt Offset(BitWidth, 0);
6441 assert(SE.getTypeSizeInBits(S->getType()) == BitWidth &&
6442 "Should be!");
6444 // Peel off a constant offset:
6445 if (auto *SA = dyn_cast<SCEVAddExpr>(S)) {
6446 // In the future we could consider being smarter here and handle
6447 // {Start+Step,+,Step} too.
6448 if (SA->getNumOperands() != 2 || !isa<SCEVConstant>(SA->getOperand(0)))
6449 return;
6451 Offset = cast<SCEVConstant>(SA->getOperand(0))->getAPInt();
6452 S = SA->getOperand(1);
6455 // Peel off a cast operation
6456 if (auto *SCast = dyn_cast<SCEVIntegralCastExpr>(S)) {
6457 CastOp = SCast->getSCEVType();
6458 S = SCast->getOperand();
6461 using namespace llvm::PatternMatch;
6463 auto *SU = dyn_cast<SCEVUnknown>(S);
6464 const APInt *TrueVal, *FalseVal;
6465 if (!SU ||
6466 !match(SU->getValue(), m_Select(m_Value(Condition), m_APInt(TrueVal),
6467 m_APInt(FalseVal)))) {
6468 Condition = nullptr;
6469 return;
6472 TrueValue = *TrueVal;
6473 FalseValue = *FalseVal;
6475 // Re-apply the cast we peeled off earlier
6476 if (CastOp.hasValue())
6477 switch (*CastOp) {
6478 default:
6479 llvm_unreachable("Unknown SCEV cast type!");
6481 case scTruncate:
6482 TrueValue = TrueValue.trunc(BitWidth);
6483 FalseValue = FalseValue.trunc(BitWidth);
6484 break;
6485 case scZeroExtend:
6486 TrueValue = TrueValue.zext(BitWidth);
6487 FalseValue = FalseValue.zext(BitWidth);
6488 break;
6489 case scSignExtend:
6490 TrueValue = TrueValue.sext(BitWidth);
6491 FalseValue = FalseValue.sext(BitWidth);
6492 break;
6495 // Re-apply the constant offset we peeled off earlier
6496 TrueValue += Offset;
6497 FalseValue += Offset;
6500 bool isRecognized() { return Condition != nullptr; }
6503 SelectPattern StartPattern(*this, BitWidth, Start);
6504 if (!StartPattern.isRecognized())
6505 return ConstantRange::getFull(BitWidth);
6507 SelectPattern StepPattern(*this, BitWidth, Step);
6508 if (!StepPattern.isRecognized())
6509 return ConstantRange::getFull(BitWidth);
6511 if (StartPattern.Condition != StepPattern.Condition) {
6512 // We don't handle this case today; but we could, by considering four
6513 // possibilities below instead of two. I'm not sure if there are cases where
6514 // that will help over what getRange already does, though.
6515 return ConstantRange::getFull(BitWidth);
6518 // NB! Calling ScalarEvolution::getConstant is fine, but we should not try to
6519 // construct arbitrary general SCEV expressions here. This function is called
6520 // from deep in the call stack, and calling getSCEV (on a sext instruction,
6521 // say) can end up caching a suboptimal value.
6523 // FIXME: without the explicit `this` receiver below, MSVC errors out with
6524 // C2352 and C2512 (otherwise it isn't needed).
6526 const SCEV *TrueStart = this->getConstant(StartPattern.TrueValue);
6527 const SCEV *TrueStep = this->getConstant(StepPattern.TrueValue);
6528 const SCEV *FalseStart = this->getConstant(StartPattern.FalseValue);
6529 const SCEV *FalseStep = this->getConstant(StepPattern.FalseValue);
6531 ConstantRange TrueRange =
6532 this->getRangeForAffineAR(TrueStart, TrueStep, MaxBECount, BitWidth);
6533 ConstantRange FalseRange =
6534 this->getRangeForAffineAR(FalseStart, FalseStep, MaxBECount, BitWidth);
6536 return TrueRange.unionWith(FalseRange);
6539 SCEV::NoWrapFlags ScalarEvolution::getNoWrapFlagsFromUB(const Value *V) {
6540 if (isa<ConstantExpr>(V)) return SCEV::FlagAnyWrap;
6541 const BinaryOperator *BinOp = cast<BinaryOperator>(V);
6543 // Return early if there are no flags to propagate to the SCEV.
6544 SCEV::NoWrapFlags Flags = SCEV::FlagAnyWrap;
6545 if (BinOp->hasNoUnsignedWrap())
6546 Flags = ScalarEvolution::setFlags(Flags, SCEV::FlagNUW);
6547 if (BinOp->hasNoSignedWrap())
6548 Flags = ScalarEvolution::setFlags(Flags, SCEV::FlagNSW);
6549 if (Flags == SCEV::FlagAnyWrap)
6550 return SCEV::FlagAnyWrap;
6552 return isSCEVExprNeverPoison(BinOp) ? Flags : SCEV::FlagAnyWrap;
6555 bool ScalarEvolution::isSCEVExprNeverPoison(const Instruction *I) {
6556 // Here we check that I is in the header of the innermost loop containing I,
6557 // since we only deal with instructions in the loop header. The actual loop we
6558 // need to check later will come from an add recurrence, but getting that
6559 // requires computing the SCEV of the operands, which can be expensive. This
6560 // check we can do cheaply to rule out some cases early.
6561 Loop *InnermostContainingLoop = LI.getLoopFor(I->getParent());
6562 if (InnermostContainingLoop == nullptr ||
6563 InnermostContainingLoop->getHeader() != I->getParent())
6564 return false;
6566 // Only proceed if we can prove that I does not yield poison.
6567 if (!programUndefinedIfPoison(I))
6568 return false;
6570 // At this point we know that if I is executed, then it does not wrap
6571 // according to at least one of NSW or NUW. If I is not executed, then we do
6572 // not know if the calculation that I represents would wrap. Multiple
6573 // instructions can map to the same SCEV. If we apply NSW or NUW from I to
6574 // the SCEV, we must guarantee no wrapping for that SCEV also when it is
6575 // derived from other instructions that map to the same SCEV. We cannot make
6576 // that guarantee for cases where I is not executed. So we need to find the
6577 // loop that I is considered in relation to and prove that I is executed for
6578 // every iteration of that loop. That implies that the value that I
6579 // calculates does not wrap anywhere in the loop, so then we can apply the
6580 // flags to the SCEV.
6582 // We check isLoopInvariant to disambiguate in case we are adding recurrences
6583 // from different loops, so that we know which loop to prove that I is
6584 // executed in.
6585 for (unsigned OpIndex = 0; OpIndex < I->getNumOperands(); ++OpIndex) {
6586 // I could be an extractvalue from a call to an overflow intrinsic.
6587 // TODO: We can do better here in some cases.
6588 if (!isSCEVable(I->getOperand(OpIndex)->getType()))
6589 return false;
6590 const SCEV *Op = getSCEV(I->getOperand(OpIndex));
6591 if (auto *AddRec = dyn_cast<SCEVAddRecExpr>(Op)) {
6592 bool AllOtherOpsLoopInvariant = true;
6593 for (unsigned OtherOpIndex = 0; OtherOpIndex < I->getNumOperands();
6594 ++OtherOpIndex) {
6595 if (OtherOpIndex != OpIndex) {
6596 const SCEV *OtherOp = getSCEV(I->getOperand(OtherOpIndex));
6597 if (!isLoopInvariant(OtherOp, AddRec->getLoop())) {
6598 AllOtherOpsLoopInvariant = false;
6599 break;
6603 if (AllOtherOpsLoopInvariant &&
6604 isGuaranteedToExecuteForEveryIteration(I, AddRec->getLoop()))
6605 return true;
6608 return false;
6611 bool ScalarEvolution::isAddRecNeverPoison(const Instruction *I, const Loop *L) {
6612 // If we know that \c I can never be poison period, then that's enough.
6613 if (isSCEVExprNeverPoison(I))
6614 return true;
6616 // For an add recurrence specifically, we assume that infinite loops without
6617 // side effects are undefined behavior, and then reason as follows:
6619 // If the add recurrence is poison in any iteration, it is poison on all
6620 // future iterations (since incrementing poison yields poison). If the result
6621 // of the add recurrence is fed into the loop latch condition and the loop
6622 // does not contain any throws or exiting blocks other than the latch, we now
6623 // have the ability to "choose" whether the backedge is taken or not (by
6624 // choosing a sufficiently evil value for the poison feeding into the branch)
6625 // for every iteration including and after the one in which \p I first became
6626 // poison. There are two possibilities (let's call the iteration in which \p
6627 // I first became poison as K):
6629 // 1. In the set of iterations including and after K, the loop body executes
6630 // no side effects. In this case executing the backege an infinte number
6631 // of times will yield undefined behavior.
6633 // 2. In the set of iterations including and after K, the loop body executes
6634 // at least one side effect. In this case, that specific instance of side
6635 // effect is control dependent on poison, which also yields undefined
6636 // behavior.
6638 auto *ExitingBB = L->getExitingBlock();
6639 auto *LatchBB = L->getLoopLatch();
6640 if (!ExitingBB || !LatchBB || ExitingBB != LatchBB)
6641 return false;
6643 SmallPtrSet<const Instruction *, 16> Pushed;
6644 SmallVector<const Instruction *, 8> PoisonStack;
6646 // We start by assuming \c I, the post-inc add recurrence, is poison. Only
6647 // things that are known to be poison under that assumption go on the
6648 // PoisonStack.
6649 Pushed.insert(I);
6650 PoisonStack.push_back(I);
6652 bool LatchControlDependentOnPoison = false;
6653 while (!PoisonStack.empty() && !LatchControlDependentOnPoison) {
6654 const Instruction *Poison = PoisonStack.pop_back_val();
6656 for (auto *PoisonUser : Poison->users()) {
6657 if (propagatesPoison(cast<Operator>(PoisonUser))) {
6658 if (Pushed.insert(cast<Instruction>(PoisonUser)).second)
6659 PoisonStack.push_back(cast<Instruction>(PoisonUser));
6660 } else if (auto *BI = dyn_cast<BranchInst>(PoisonUser)) {
6661 assert(BI->isConditional() && "Only possibility!");
6662 if (BI->getParent() == LatchBB) {
6663 LatchControlDependentOnPoison = true;
6664 break;
6670 return LatchControlDependentOnPoison && loopHasNoAbnormalExits(L);
6673 ScalarEvolution::LoopProperties
6674 ScalarEvolution::getLoopProperties(const Loop *L) {
6675 using LoopProperties = ScalarEvolution::LoopProperties;
6677 auto Itr = LoopPropertiesCache.find(L);
6678 if (Itr == LoopPropertiesCache.end()) {
6679 auto HasSideEffects = [](Instruction *I) {
6680 if (auto *SI = dyn_cast<StoreInst>(I))
6681 return !SI->isSimple();
6683 return I->mayThrow() || I->mayWriteToMemory();
6686 LoopProperties LP = {/* HasNoAbnormalExits */ true,
6687 /*HasNoSideEffects*/ true};
6689 for (auto *BB : L->getBlocks())
6690 for (auto &I : *BB) {
6691 if (!isGuaranteedToTransferExecutionToSuccessor(&I))
6692 LP.HasNoAbnormalExits = false;
6693 if (HasSideEffects(&I))
6694 LP.HasNoSideEffects = false;
6695 if (!LP.HasNoAbnormalExits && !LP.HasNoSideEffects)
6696 break; // We're already as pessimistic as we can get.
6699 auto InsertPair = LoopPropertiesCache.insert({L, LP});
6700 assert(InsertPair.second && "We just checked!");
6701 Itr = InsertPair.first;
6704 return Itr->second;
6707 bool ScalarEvolution::loopIsFiniteByAssumption(const Loop *L) {
6708 // A mustprogress loop without side effects must be finite.
6709 // TODO: The check used here is very conservative. It's only *specific*
6710 // side effects which are well defined in infinite loops.
6711 return isMustProgress(L) && loopHasNoSideEffects(L);
6714 const SCEV *ScalarEvolution::createSCEV(Value *V) {
6715 if (!isSCEVable(V->getType()))
6716 return getUnknown(V);
6718 if (Instruction *I = dyn_cast<Instruction>(V)) {
6719 // Don't attempt to analyze instructions in blocks that aren't
6720 // reachable. Such instructions don't matter, and they aren't required
6721 // to obey basic rules for definitions dominating uses which this
6722 // analysis depends on.
6723 if (!DT.isReachableFromEntry(I->getParent()))
6724 return getUnknown(UndefValue::get(V->getType()));
6725 } else if (ConstantInt *CI = dyn_cast<ConstantInt>(V))
6726 return getConstant(CI);
6727 else if (GlobalAlias *GA = dyn_cast<GlobalAlias>(V))
6728 return GA->isInterposable() ? getUnknown(V) : getSCEV(GA->getAliasee());
6729 else if (!isa<ConstantExpr>(V))
6730 return getUnknown(V);
6732 Operator *U = cast<Operator>(V);
6733 if (auto BO = MatchBinaryOp(U, DT)) {
6734 switch (BO->Opcode) {
6735 case Instruction::Add: {
6736 // The simple thing to do would be to just call getSCEV on both operands
6737 // and call getAddExpr with the result. However if we're looking at a
6738 // bunch of things all added together, this can be quite inefficient,
6739 // because it leads to N-1 getAddExpr calls for N ultimate operands.
6740 // Instead, gather up all the operands and make a single getAddExpr call.
6741 // LLVM IR canonical form means we need only traverse the left operands.
6742 SmallVector<const SCEV *, 4> AddOps;
6743 do {
6744 if (BO->Op) {
6745 if (auto *OpSCEV = getExistingSCEV(BO->Op)) {
6746 AddOps.push_back(OpSCEV);
6747 break;
6750 // If a NUW or NSW flag can be applied to the SCEV for this
6751 // addition, then compute the SCEV for this addition by itself
6752 // with a separate call to getAddExpr. We need to do that
6753 // instead of pushing the operands of the addition onto AddOps,
6754 // since the flags are only known to apply to this particular
6755 // addition - they may not apply to other additions that can be
6756 // formed with operands from AddOps.
6757 const SCEV *RHS = getSCEV(BO->RHS);
6758 SCEV::NoWrapFlags Flags = getNoWrapFlagsFromUB(BO->Op);
6759 if (Flags != SCEV::FlagAnyWrap) {
6760 const SCEV *LHS = getSCEV(BO->LHS);
6761 if (BO->Opcode == Instruction::Sub)
6762 AddOps.push_back(getMinusSCEV(LHS, RHS, Flags));
6763 else
6764 AddOps.push_back(getAddExpr(LHS, RHS, Flags));
6765 break;
6769 if (BO->Opcode == Instruction::Sub)
6770 AddOps.push_back(getNegativeSCEV(getSCEV(BO->RHS)));
6771 else
6772 AddOps.push_back(getSCEV(BO->RHS));
6774 auto NewBO = MatchBinaryOp(BO->LHS, DT);
6775 if (!NewBO || (NewBO->Opcode != Instruction::Add &&
6776 NewBO->Opcode != Instruction::Sub)) {
6777 AddOps.push_back(getSCEV(BO->LHS));
6778 break;
6780 BO = NewBO;
6781 } while (true);
6783 return getAddExpr(AddOps);
6786 case Instruction::Mul: {
6787 SmallVector<const SCEV *, 4> MulOps;
6788 do {
6789 if (BO->Op) {
6790 if (auto *OpSCEV = getExistingSCEV(BO->Op)) {
6791 MulOps.push_back(OpSCEV);
6792 break;
6795 SCEV::NoWrapFlags Flags = getNoWrapFlagsFromUB(BO->Op);
6796 if (Flags != SCEV::FlagAnyWrap) {
6797 MulOps.push_back(
6798 getMulExpr(getSCEV(BO->LHS), getSCEV(BO->RHS), Flags));
6799 break;
6803 MulOps.push_back(getSCEV(BO->RHS));
6804 auto NewBO = MatchBinaryOp(BO->LHS, DT);
6805 if (!NewBO || NewBO->Opcode != Instruction::Mul) {
6806 MulOps.push_back(getSCEV(BO->LHS));
6807 break;
6809 BO = NewBO;
6810 } while (true);
6812 return getMulExpr(MulOps);
6814 case Instruction::UDiv:
6815 return getUDivExpr(getSCEV(BO->LHS), getSCEV(BO->RHS));
6816 case Instruction::URem:
6817 return getURemExpr(getSCEV(BO->LHS), getSCEV(BO->RHS));
6818 case Instruction::Sub: {
6819 SCEV::NoWrapFlags Flags = SCEV::FlagAnyWrap;
6820 if (BO->Op)
6821 Flags = getNoWrapFlagsFromUB(BO->Op);
6822 return getMinusSCEV(getSCEV(BO->LHS), getSCEV(BO->RHS), Flags);
6824 case Instruction::And:
6825 // For an expression like x&255 that merely masks off the high bits,
6826 // use zext(trunc(x)) as the SCEV expression.
6827 if (ConstantInt *CI = dyn_cast<ConstantInt>(BO->RHS)) {
6828 if (CI->isZero())
6829 return getSCEV(BO->RHS);
6830 if (CI->isMinusOne())
6831 return getSCEV(BO->LHS);
6832 const APInt &A = CI->getValue();
6834 // Instcombine's ShrinkDemandedConstant may strip bits out of
6835 // constants, obscuring what would otherwise be a low-bits mask.
6836 // Use computeKnownBits to compute what ShrinkDemandedConstant
6837 // knew about to reconstruct a low-bits mask value.
6838 unsigned LZ = A.countLeadingZeros();
6839 unsigned TZ = A.countTrailingZeros();
6840 unsigned BitWidth = A.getBitWidth();
6841 KnownBits Known(BitWidth);
6842 computeKnownBits(BO->LHS, Known, getDataLayout(),
6843 0, &AC, nullptr, &DT);
6845 APInt EffectiveMask =
6846 APInt::getLowBitsSet(BitWidth, BitWidth - LZ - TZ).shl(TZ);
6847 if ((LZ != 0 || TZ != 0) && !((~A & ~Known.Zero) & EffectiveMask)) {
6848 const SCEV *MulCount = getConstant(APInt::getOneBitSet(BitWidth, TZ));
6849 const SCEV *LHS = getSCEV(BO->LHS);
6850 const SCEV *ShiftedLHS = nullptr;
6851 if (auto *LHSMul = dyn_cast<SCEVMulExpr>(LHS)) {
6852 if (auto *OpC = dyn_cast<SCEVConstant>(LHSMul->getOperand(0))) {
6853 // For an expression like (x * 8) & 8, simplify the multiply.
6854 unsigned MulZeros = OpC->getAPInt().countTrailingZeros();
6855 unsigned GCD = std::min(MulZeros, TZ);
6856 APInt DivAmt = APInt::getOneBitSet(BitWidth, TZ - GCD);
6857 SmallVector<const SCEV*, 4> MulOps;
6858 MulOps.push_back(getConstant(OpC->getAPInt().lshr(GCD)));
6859 MulOps.append(LHSMul->op_begin() + 1, LHSMul->op_end());
6860 auto *NewMul = getMulExpr(MulOps, LHSMul->getNoWrapFlags());
6861 ShiftedLHS = getUDivExpr(NewMul, getConstant(DivAmt));
6864 if (!ShiftedLHS)
6865 ShiftedLHS = getUDivExpr(LHS, MulCount);
6866 return getMulExpr(
6867 getZeroExtendExpr(
6868 getTruncateExpr(ShiftedLHS,
6869 IntegerType::get(getContext(), BitWidth - LZ - TZ)),
6870 BO->LHS->getType()),
6871 MulCount);
6874 break;
6876 case Instruction::Or:
6877 // If the RHS of the Or is a constant, we may have something like:
6878 // X*4+1 which got turned into X*4|1. Handle this as an Add so loop
6879 // optimizations will transparently handle this case.
6881 // In order for this transformation to be safe, the LHS must be of the
6882 // form X*(2^n) and the Or constant must be less than 2^n.
6883 if (ConstantInt *CI = dyn_cast<ConstantInt>(BO->RHS)) {
6884 const SCEV *LHS = getSCEV(BO->LHS);
6885 const APInt &CIVal = CI->getValue();
6886 if (GetMinTrailingZeros(LHS) >=
6887 (CIVal.getBitWidth() - CIVal.countLeadingZeros())) {
6888 // Build a plain add SCEV.
6889 return getAddExpr(LHS, getSCEV(CI),
6890 (SCEV::NoWrapFlags)(SCEV::FlagNUW | SCEV::FlagNSW));
6893 break;
6895 case Instruction::Xor:
6896 if (ConstantInt *CI = dyn_cast<ConstantInt>(BO->RHS)) {
6897 // If the RHS of xor is -1, then this is a not operation.
6898 if (CI->isMinusOne())
6899 return getNotSCEV(getSCEV(BO->LHS));
6901 // Model xor(and(x, C), C) as and(~x, C), if C is a low-bits mask.
6902 // This is a variant of the check for xor with -1, and it handles
6903 // the case where instcombine has trimmed non-demanded bits out
6904 // of an xor with -1.
6905 if (auto *LBO = dyn_cast<BinaryOperator>(BO->LHS))
6906 if (ConstantInt *LCI = dyn_cast<ConstantInt>(LBO->getOperand(1)))
6907 if (LBO->getOpcode() == Instruction::And &&
6908 LCI->getValue() == CI->getValue())
6909 if (const SCEVZeroExtendExpr *Z =
6910 dyn_cast<SCEVZeroExtendExpr>(getSCEV(BO->LHS))) {
6911 Type *UTy = BO->LHS->getType();
6912 const SCEV *Z0 = Z->getOperand();
6913 Type *Z0Ty = Z0->getType();
6914 unsigned Z0TySize = getTypeSizeInBits(Z0Ty);
6916 // If C is a low-bits mask, the zero extend is serving to
6917 // mask off the high bits. Complement the operand and
6918 // re-apply the zext.
6919 if (CI->getValue().isMask(Z0TySize))
6920 return getZeroExtendExpr(getNotSCEV(Z0), UTy);
6922 // If C is a single bit, it may be in the sign-bit position
6923 // before the zero-extend. In this case, represent the xor
6924 // using an add, which is equivalent, and re-apply the zext.
6925 APInt Trunc = CI->getValue().trunc(Z0TySize);
6926 if (Trunc.zext(getTypeSizeInBits(UTy)) == CI->getValue() &&
6927 Trunc.isSignMask())
6928 return getZeroExtendExpr(getAddExpr(Z0, getConstant(Trunc)),
6929 UTy);
6932 break;
6934 case Instruction::Shl:
6935 // Turn shift left of a constant amount into a multiply.
6936 if (ConstantInt *SA = dyn_cast<ConstantInt>(BO->RHS)) {
6937 uint32_t BitWidth = cast<IntegerType>(SA->getType())->getBitWidth();
6939 // If the shift count is not less than the bitwidth, the result of
6940 // the shift is undefined. Don't try to analyze it, because the
6941 // resolution chosen here may differ from the resolution chosen in
6942 // other parts of the compiler.
6943 if (SA->getValue().uge(BitWidth))
6944 break;
6946 // We can safely preserve the nuw flag in all cases. It's also safe to
6947 // turn a nuw nsw shl into a nuw nsw mul. However, nsw in isolation
6948 // requires special handling. It can be preserved as long as we're not
6949 // left shifting by bitwidth - 1.
6950 auto Flags = SCEV::FlagAnyWrap;
6951 if (BO->Op) {
6952 auto MulFlags = getNoWrapFlagsFromUB(BO->Op);
6953 if ((MulFlags & SCEV::FlagNSW) &&
6954 ((MulFlags & SCEV::FlagNUW) || SA->getValue().ult(BitWidth - 1)))
6955 Flags = (SCEV::NoWrapFlags)(Flags | SCEV::FlagNSW);
6956 if (MulFlags & SCEV::FlagNUW)
6957 Flags = (SCEV::NoWrapFlags)(Flags | SCEV::FlagNUW);
6960 Constant *X = ConstantInt::get(
6961 getContext(), APInt::getOneBitSet(BitWidth, SA->getZExtValue()));
6962 return getMulExpr(getSCEV(BO->LHS), getSCEV(X), Flags);
6964 break;
6966 case Instruction::AShr: {
6967 // AShr X, C, where C is a constant.
6968 ConstantInt *CI = dyn_cast<ConstantInt>(BO->RHS);
6969 if (!CI)
6970 break;
6972 Type *OuterTy = BO->LHS->getType();
6973 uint64_t BitWidth = getTypeSizeInBits(OuterTy);
6974 // If the shift count is not less than the bitwidth, the result of
6975 // the shift is undefined. Don't try to analyze it, because the
6976 // resolution chosen here may differ from the resolution chosen in
6977 // other parts of the compiler.
6978 if (CI->getValue().uge(BitWidth))
6979 break;
6981 if (CI->isZero())
6982 return getSCEV(BO->LHS); // shift by zero --> noop
6984 uint64_t AShrAmt = CI->getZExtValue();
6985 Type *TruncTy = IntegerType::get(getContext(), BitWidth - AShrAmt);
6987 Operator *L = dyn_cast<Operator>(BO->LHS);
6988 if (L && L->getOpcode() == Instruction::Shl) {
6989 // X = Shl A, n
6990 // Y = AShr X, m
6991 // Both n and m are constant.
6993 const SCEV *ShlOp0SCEV = getSCEV(L->getOperand(0));
6994 if (L->getOperand(1) == BO->RHS)
6995 // For a two-shift sext-inreg, i.e. n = m,
6996 // use sext(trunc(x)) as the SCEV expression.
6997 return getSignExtendExpr(
6998 getTruncateExpr(ShlOp0SCEV, TruncTy), OuterTy);
7000 ConstantInt *ShlAmtCI = dyn_cast<ConstantInt>(L->getOperand(1));
7001 if (ShlAmtCI && ShlAmtCI->getValue().ult(BitWidth)) {
7002 uint64_t ShlAmt = ShlAmtCI->getZExtValue();
7003 if (ShlAmt > AShrAmt) {
7004 // When n > m, use sext(mul(trunc(x), 2^(n-m)))) as the SCEV
7005 // expression. We already checked that ShlAmt < BitWidth, so
7006 // the multiplier, 1 << (ShlAmt - AShrAmt), fits into TruncTy as
7007 // ShlAmt - AShrAmt < Amt.
7008 APInt Mul = APInt::getOneBitSet(BitWidth - AShrAmt,
7009 ShlAmt - AShrAmt);
7010 return getSignExtendExpr(
7011 getMulExpr(getTruncateExpr(ShlOp0SCEV, TruncTy),
7012 getConstant(Mul)), OuterTy);
7016 break;
7021 switch (U->getOpcode()) {
7022 case Instruction::Trunc:
7023 return getTruncateExpr(getSCEV(U->getOperand(0)), U->getType());
7025 case Instruction::ZExt:
7026 return getZeroExtendExpr(getSCEV(U->getOperand(0)), U->getType());
7028 case Instruction::SExt:
7029 if (auto BO = MatchBinaryOp(U->getOperand(0), DT)) {
7030 // The NSW flag of a subtract does not always survive the conversion to
7031 // A + (-1)*B. By pushing sign extension onto its operands we are much
7032 // more likely to preserve NSW and allow later AddRec optimisations.
7034 // NOTE: This is effectively duplicating this logic from getSignExtend:
7035 // sext((A + B + ...)<nsw>) --> (sext(A) + sext(B) + ...)<nsw>
7036 // but by that point the NSW information has potentially been lost.
7037 if (BO->Opcode == Instruction::Sub && BO->IsNSW) {
7038 Type *Ty = U->getType();
7039 auto *V1 = getSignExtendExpr(getSCEV(BO->LHS), Ty);
7040 auto *V2 = getSignExtendExpr(getSCEV(BO->RHS), Ty);
7041 return getMinusSCEV(V1, V2, SCEV::FlagNSW);
7044 return getSignExtendExpr(getSCEV(U->getOperand(0)), U->getType());
7046 case Instruction::BitCast:
7047 // BitCasts are no-op casts so we just eliminate the cast.
7048 if (isSCEVable(U->getType()) && isSCEVable(U->getOperand(0)->getType()))
7049 return getSCEV(U->getOperand(0));
7050 break;
7052 case Instruction::PtrToInt: {
7053 // Pointer to integer cast is straight-forward, so do model it.
7054 const SCEV *Op = getSCEV(U->getOperand(0));
7055 Type *DstIntTy = U->getType();
7056 // But only if effective SCEV (integer) type is wide enough to represent
7057 // all possible pointer values.
7058 const SCEV *IntOp = getPtrToIntExpr(Op, DstIntTy);
7059 if (isa<SCEVCouldNotCompute>(IntOp))
7060 return getUnknown(V);
7061 return IntOp;
7063 case Instruction::IntToPtr:
7064 // Just don't deal with inttoptr casts.
7065 return getUnknown(V);
7067 case Instruction::SDiv:
7068 // If both operands are non-negative, this is just an udiv.
7069 if (isKnownNonNegative(getSCEV(U->getOperand(0))) &&
7070 isKnownNonNegative(getSCEV(U->getOperand(1))))
7071 return getUDivExpr(getSCEV(U->getOperand(0)), getSCEV(U->getOperand(1)));
7072 break;
7074 case Instruction::SRem:
7075 // If both operands are non-negative, this is just an urem.
7076 if (isKnownNonNegative(getSCEV(U->getOperand(0))) &&
7077 isKnownNonNegative(getSCEV(U->getOperand(1))))
7078 return getURemExpr(getSCEV(U->getOperand(0)), getSCEV(U->getOperand(1)));
7079 break;
7081 case Instruction::GetElementPtr:
7082 return createNodeForGEP(cast<GEPOperator>(U));
7084 case Instruction::PHI:
7085 return createNodeForPHI(cast<PHINode>(U));
7087 case Instruction::Select:
7088 // U can also be a select constant expr, which let fall through. Since
7089 // createNodeForSelect only works for a condition that is an `ICmpInst`, and
7090 // constant expressions cannot have instructions as operands, we'd have
7091 // returned getUnknown for a select constant expressions anyway.
7092 if (isa<Instruction>(U))
7093 return createNodeForSelectOrPHI(cast<Instruction>(U), U->getOperand(0),
7094 U->getOperand(1), U->getOperand(2));
7095 break;
7097 case Instruction::Call:
7098 case Instruction::Invoke:
7099 if (Value *RV = cast<CallBase>(U)->getReturnedArgOperand())
7100 return getSCEV(RV);
7102 if (auto *II = dyn_cast<IntrinsicInst>(U)) {
7103 switch (II->getIntrinsicID()) {
7104 case Intrinsic::abs:
7105 return getAbsExpr(
7106 getSCEV(II->getArgOperand(0)),
7107 /*IsNSW=*/cast<ConstantInt>(II->getArgOperand(1))->isOne());
7108 case Intrinsic::umax:
7109 return getUMaxExpr(getSCEV(II->getArgOperand(0)),
7110 getSCEV(II->getArgOperand(1)));
7111 case Intrinsic::umin:
7112 return getUMinExpr(getSCEV(II->getArgOperand(0)),
7113 getSCEV(II->getArgOperand(1)));
7114 case Intrinsic::smax:
7115 return getSMaxExpr(getSCEV(II->getArgOperand(0)),
7116 getSCEV(II->getArgOperand(1)));
7117 case Intrinsic::smin:
7118 return getSMinExpr(getSCEV(II->getArgOperand(0)),
7119 getSCEV(II->getArgOperand(1)));
7120 case Intrinsic::usub_sat: {
7121 const SCEV *X = getSCEV(II->getArgOperand(0));
7122 const SCEV *Y = getSCEV(II->getArgOperand(1));
7123 const SCEV *ClampedY = getUMinExpr(X, Y);
7124 return getMinusSCEV(X, ClampedY, SCEV::FlagNUW);
7126 case Intrinsic::uadd_sat: {
7127 const SCEV *X = getSCEV(II->getArgOperand(0));
7128 const SCEV *Y = getSCEV(II->getArgOperand(1));
7129 const SCEV *ClampedX = getUMinExpr(X, getNotSCEV(Y));
7130 return getAddExpr(ClampedX, Y, SCEV::FlagNUW);
7132 case Intrinsic::start_loop_iterations:
7133 // A start_loop_iterations is just equivalent to the first operand for
7134 // SCEV purposes.
7135 return getSCEV(II->getArgOperand(0));
7136 default:
7137 break;
7140 break;
7143 return getUnknown(V);
7146 //===----------------------------------------------------------------------===//
7147 // Iteration Count Computation Code
7150 const SCEV *ScalarEvolution::getTripCountFromExitCount(const SCEV *ExitCount) {
7151 // Get the trip count from the BE count by adding 1. Overflow, results
7152 // in zero which means "unknown".
7153 return getAddExpr(ExitCount, getOne(ExitCount->getType()));
7156 static unsigned getConstantTripCount(const SCEVConstant *ExitCount) {
7157 if (!ExitCount)
7158 return 0;
7160 ConstantInt *ExitConst = ExitCount->getValue();
7162 // Guard against huge trip counts.
7163 if (ExitConst->getValue().getActiveBits() > 32)
7164 return 0;
7166 // In case of integer overflow, this returns 0, which is correct.
7167 return ((unsigned)ExitConst->getZExtValue()) + 1;
7170 unsigned ScalarEvolution::getSmallConstantTripCount(const Loop *L) {
7171 auto *ExitCount = dyn_cast<SCEVConstant>(getBackedgeTakenCount(L, Exact));
7172 return getConstantTripCount(ExitCount);
7175 unsigned
7176 ScalarEvolution::getSmallConstantTripCount(const Loop *L,
7177 const BasicBlock *ExitingBlock) {
7178 assert(ExitingBlock && "Must pass a non-null exiting block!");
7179 assert(L->isLoopExiting(ExitingBlock) &&
7180 "Exiting block must actually branch out of the loop!");
7181 const SCEVConstant *ExitCount =
7182 dyn_cast<SCEVConstant>(getExitCount(L, ExitingBlock));
7183 return getConstantTripCount(ExitCount);
7186 unsigned ScalarEvolution::getSmallConstantMaxTripCount(const Loop *L) {
7187 const auto *MaxExitCount =
7188 dyn_cast<SCEVConstant>(getConstantMaxBackedgeTakenCount(L));
7189 return getConstantTripCount(MaxExitCount);
7192 unsigned ScalarEvolution::getSmallConstantTripMultiple(const Loop *L) {
7193 SmallVector<BasicBlock *, 8> ExitingBlocks;
7194 L->getExitingBlocks(ExitingBlocks);
7196 Optional<unsigned> Res = None;
7197 for (auto *ExitingBB : ExitingBlocks) {
7198 unsigned Multiple = getSmallConstantTripMultiple(L, ExitingBB);
7199 if (!Res)
7200 Res = Multiple;
7201 Res = (unsigned)GreatestCommonDivisor64(*Res, Multiple);
7203 return Res.getValueOr(1);
7206 unsigned ScalarEvolution::getSmallConstantTripMultiple(const Loop *L,
7207 const SCEV *ExitCount) {
7208 if (ExitCount == getCouldNotCompute())
7209 return 1;
7211 // Get the trip count
7212 const SCEV *TCExpr = getTripCountFromExitCount(ExitCount);
7214 const SCEVConstant *TC = dyn_cast<SCEVConstant>(TCExpr);
7215 if (!TC)
7216 // Attempt to factor more general cases. Returns the greatest power of
7217 // two divisor. If overflow happens, the trip count expression is still
7218 // divisible by the greatest power of 2 divisor returned.
7219 return 1U << std::min((uint32_t)31,
7220 GetMinTrailingZeros(applyLoopGuards(TCExpr, L)));
7222 ConstantInt *Result = TC->getValue();
7224 // Guard against huge trip counts (this requires checking
7225 // for zero to handle the case where the trip count == -1 and the
7226 // addition wraps).
7227 if (!Result || Result->getValue().getActiveBits() > 32 ||
7228 Result->getValue().getActiveBits() == 0)
7229 return 1;
7231 return (unsigned)Result->getZExtValue();
7234 /// Returns the largest constant divisor of the trip count of this loop as a
7235 /// normal unsigned value, if possible. This means that the actual trip count is
7236 /// always a multiple of the returned value (don't forget the trip count could
7237 /// very well be zero as well!).
7239 /// Returns 1 if the trip count is unknown or not guaranteed to be the
7240 /// multiple of a constant (which is also the case if the trip count is simply
7241 /// constant, use getSmallConstantTripCount for that case), Will also return 1
7242 /// if the trip count is very large (>= 2^32).
7244 /// As explained in the comments for getSmallConstantTripCount, this assumes
7245 /// that control exits the loop via ExitingBlock.
7246 unsigned
7247 ScalarEvolution::getSmallConstantTripMultiple(const Loop *L,
7248 const BasicBlock *ExitingBlock) {
7249 assert(ExitingBlock && "Must pass a non-null exiting block!");
7250 assert(L->isLoopExiting(ExitingBlock) &&
7251 "Exiting block must actually branch out of the loop!");
7252 const SCEV *ExitCount = getExitCount(L, ExitingBlock);
7253 return getSmallConstantTripMultiple(L, ExitCount);
7256 const SCEV *ScalarEvolution::getExitCount(const Loop *L,
7257 const BasicBlock *ExitingBlock,
7258 ExitCountKind Kind) {
7259 switch (Kind) {
7260 case Exact:
7261 case SymbolicMaximum:
7262 return getBackedgeTakenInfo(L).getExact(ExitingBlock, this);
7263 case ConstantMaximum:
7264 return getBackedgeTakenInfo(L).getConstantMax(ExitingBlock, this);
7266 llvm_unreachable("Invalid ExitCountKind!");
7269 const SCEV *
7270 ScalarEvolution::getPredicatedBackedgeTakenCount(const Loop *L,
7271 SCEVUnionPredicate &Preds) {
7272 return getPredicatedBackedgeTakenInfo(L).getExact(L, this, &Preds);
7275 const SCEV *ScalarEvolution::getBackedgeTakenCount(const Loop *L,
7276 ExitCountKind Kind) {
7277 switch (Kind) {
7278 case Exact:
7279 return getBackedgeTakenInfo(L).getExact(L, this);
7280 case ConstantMaximum:
7281 return getBackedgeTakenInfo(L).getConstantMax(this);
7282 case SymbolicMaximum:
7283 return getBackedgeTakenInfo(L).getSymbolicMax(L, this);
7285 llvm_unreachable("Invalid ExitCountKind!");
7288 bool ScalarEvolution::isBackedgeTakenCountMaxOrZero(const Loop *L) {
7289 return getBackedgeTakenInfo(L).isConstantMaxOrZero(this);
7292 /// Push PHI nodes in the header of the given loop onto the given Worklist.
7293 static void
7294 PushLoopPHIs(const Loop *L, SmallVectorImpl<Instruction *> &Worklist) {
7295 BasicBlock *Header = L->getHeader();
7297 // Push all Loop-header PHIs onto the Worklist stack.
7298 for (PHINode &PN : Header->phis())
7299 Worklist.push_back(&PN);
7302 const ScalarEvolution::BackedgeTakenInfo &
7303 ScalarEvolution::getPredicatedBackedgeTakenInfo(const Loop *L) {
7304 auto &BTI = getBackedgeTakenInfo(L);
7305 if (BTI.hasFullInfo())
7306 return BTI;
7308 auto Pair = PredicatedBackedgeTakenCounts.insert({L, BackedgeTakenInfo()});
7310 if (!Pair.second)
7311 return Pair.first->second;
7313 BackedgeTakenInfo Result =
7314 computeBackedgeTakenCount(L, /*AllowPredicates=*/true);
7316 return PredicatedBackedgeTakenCounts.find(L)->second = std::move(Result);
7319 ScalarEvolution::BackedgeTakenInfo &
7320 ScalarEvolution::getBackedgeTakenInfo(const Loop *L) {
7321 // Initially insert an invalid entry for this loop. If the insertion
7322 // succeeds, proceed to actually compute a backedge-taken count and
7323 // update the value. The temporary CouldNotCompute value tells SCEV
7324 // code elsewhere that it shouldn't attempt to request a new
7325 // backedge-taken count, which could result in infinite recursion.
7326 std::pair<DenseMap<const Loop *, BackedgeTakenInfo>::iterator, bool> Pair =
7327 BackedgeTakenCounts.insert({L, BackedgeTakenInfo()});
7328 if (!Pair.second)
7329 return Pair.first->second;
7331 // computeBackedgeTakenCount may allocate memory for its result. Inserting it
7332 // into the BackedgeTakenCounts map transfers ownership. Otherwise, the result
7333 // must be cleared in this scope.
7334 BackedgeTakenInfo Result = computeBackedgeTakenCount(L);
7336 // In product build, there are no usage of statistic.
7337 (void)NumTripCountsComputed;
7338 (void)NumTripCountsNotComputed;
7339 #if LLVM_ENABLE_STATS || !defined(NDEBUG)
7340 const SCEV *BEExact = Result.getExact(L, this);
7341 if (BEExact != getCouldNotCompute()) {
7342 assert(isLoopInvariant(BEExact, L) &&
7343 isLoopInvariant(Result.getConstantMax(this), L) &&
7344 "Computed backedge-taken count isn't loop invariant for loop!");
7345 ++NumTripCountsComputed;
7346 } else if (Result.getConstantMax(this) == getCouldNotCompute() &&
7347 isa<PHINode>(L->getHeader()->begin())) {
7348 // Only count loops that have phi nodes as not being computable.
7349 ++NumTripCountsNotComputed;
7351 #endif // LLVM_ENABLE_STATS || !defined(NDEBUG)
7353 // Now that we know more about the trip count for this loop, forget any
7354 // existing SCEV values for PHI nodes in this loop since they are only
7355 // conservative estimates made without the benefit of trip count
7356 // information. This is similar to the code in forgetLoop, except that
7357 // it handles SCEVUnknown PHI nodes specially.
7358 if (Result.hasAnyInfo()) {
7359 SmallVector<Instruction *, 16> Worklist;
7360 PushLoopPHIs(L, Worklist);
7362 SmallPtrSet<Instruction *, 8> Discovered;
7363 while (!Worklist.empty()) {
7364 Instruction *I = Worklist.pop_back_val();
7366 ValueExprMapType::iterator It =
7367 ValueExprMap.find_as(static_cast<Value *>(I));
7368 if (It != ValueExprMap.end()) {
7369 const SCEV *Old = It->second;
7371 // SCEVUnknown for a PHI either means that it has an unrecognized
7372 // structure, or it's a PHI that's in the progress of being computed
7373 // by createNodeForPHI. In the former case, additional loop trip
7374 // count information isn't going to change anything. In the later
7375 // case, createNodeForPHI will perform the necessary updates on its
7376 // own when it gets to that point.
7377 if (!isa<PHINode>(I) || !isa<SCEVUnknown>(Old)) {
7378 eraseValueFromMap(It->first);
7379 forgetMemoizedResults(Old);
7381 if (PHINode *PN = dyn_cast<PHINode>(I))
7382 ConstantEvolutionLoopExitValue.erase(PN);
7385 // Since we don't need to invalidate anything for correctness and we're
7386 // only invalidating to make SCEV's results more precise, we get to stop
7387 // early to avoid invalidating too much. This is especially important in
7388 // cases like:
7390 // %v = f(pn0, pn1) // pn0 and pn1 used through some other phi node
7391 // loop0:
7392 // %pn0 = phi
7393 // ...
7394 // loop1:
7395 // %pn1 = phi
7396 // ...
7398 // where both loop0 and loop1's backedge taken count uses the SCEV
7399 // expression for %v. If we don't have the early stop below then in cases
7400 // like the above, getBackedgeTakenInfo(loop1) will clear out the trip
7401 // count for loop0 and getBackedgeTakenInfo(loop0) will clear out the trip
7402 // count for loop1, effectively nullifying SCEV's trip count cache.
7403 for (auto *U : I->users())
7404 if (auto *I = dyn_cast<Instruction>(U)) {
7405 auto *LoopForUser = LI.getLoopFor(I->getParent());
7406 if (LoopForUser && L->contains(LoopForUser) &&
7407 Discovered.insert(I).second)
7408 Worklist.push_back(I);
7413 // Re-lookup the insert position, since the call to
7414 // computeBackedgeTakenCount above could result in a
7415 // recusive call to getBackedgeTakenInfo (on a different
7416 // loop), which would invalidate the iterator computed
7417 // earlier.
7418 return BackedgeTakenCounts.find(L)->second = std::move(Result);
7421 void ScalarEvolution::forgetAllLoops() {
7422 // This method is intended to forget all info about loops. It should
7423 // invalidate caches as if the following happened:
7424 // - The trip counts of all loops have changed arbitrarily
7425 // - Every llvm::Value has been updated in place to produce a different
7426 // result.
7427 BackedgeTakenCounts.clear();
7428 PredicatedBackedgeTakenCounts.clear();
7429 LoopPropertiesCache.clear();
7430 ConstantEvolutionLoopExitValue.clear();
7431 ValueExprMap.clear();
7432 ValuesAtScopes.clear();
7433 LoopDispositions.clear();
7434 BlockDispositions.clear();
7435 UnsignedRanges.clear();
7436 SignedRanges.clear();
7437 ExprValueMap.clear();
7438 HasRecMap.clear();
7439 MinTrailingZerosCache.clear();
7440 PredicatedSCEVRewrites.clear();
7443 void ScalarEvolution::forgetLoop(const Loop *L) {
7444 SmallVector<const Loop *, 16> LoopWorklist(1, L);
7445 SmallVector<Instruction *, 32> Worklist;
7446 SmallPtrSet<Instruction *, 16> Visited;
7448 // Iterate over all the loops and sub-loops to drop SCEV information.
7449 while (!LoopWorklist.empty()) {
7450 auto *CurrL = LoopWorklist.pop_back_val();
7452 // Drop any stored trip count value.
7453 BackedgeTakenCounts.erase(CurrL);
7454 PredicatedBackedgeTakenCounts.erase(CurrL);
7456 // Drop information about predicated SCEV rewrites for this loop.
7457 for (auto I = PredicatedSCEVRewrites.begin();
7458 I != PredicatedSCEVRewrites.end();) {
7459 std::pair<const SCEV *, const Loop *> Entry = I->first;
7460 if (Entry.second == CurrL)
7461 PredicatedSCEVRewrites.erase(I++);
7462 else
7463 ++I;
7466 auto LoopUsersItr = LoopUsers.find(CurrL);
7467 if (LoopUsersItr != LoopUsers.end()) {
7468 for (auto *S : LoopUsersItr->second)
7469 forgetMemoizedResults(S);
7470 LoopUsers.erase(LoopUsersItr);
7473 // Drop information about expressions based on loop-header PHIs.
7474 PushLoopPHIs(CurrL, Worklist);
7476 while (!Worklist.empty()) {
7477 Instruction *I = Worklist.pop_back_val();
7478 if (!Visited.insert(I).second)
7479 continue;
7481 ValueExprMapType::iterator It =
7482 ValueExprMap.find_as(static_cast<Value *>(I));
7483 if (It != ValueExprMap.end()) {
7484 eraseValueFromMap(It->first);
7485 forgetMemoizedResults(It->second);
7486 if (PHINode *PN = dyn_cast<PHINode>(I))
7487 ConstantEvolutionLoopExitValue.erase(PN);
7490 PushDefUseChildren(I, Worklist);
7493 LoopPropertiesCache.erase(CurrL);
7494 // Forget all contained loops too, to avoid dangling entries in the
7495 // ValuesAtScopes map.
7496 LoopWorklist.append(CurrL->begin(), CurrL->end());
7500 void ScalarEvolution::forgetTopmostLoop(const Loop *L) {
7501 while (Loop *Parent = L->getParentLoop())
7502 L = Parent;
7503 forgetLoop(L);
7506 void ScalarEvolution::forgetValue(Value *V) {
7507 Instruction *I = dyn_cast<Instruction>(V);
7508 if (!I) return;
7510 // Drop information about expressions based on loop-header PHIs.
7511 SmallVector<Instruction *, 16> Worklist;
7512 Worklist.push_back(I);
7514 SmallPtrSet<Instruction *, 8> Visited;
7515 while (!Worklist.empty()) {
7516 I = Worklist.pop_back_val();
7517 if (!Visited.insert(I).second)
7518 continue;
7520 ValueExprMapType::iterator It =
7521 ValueExprMap.find_as(static_cast<Value *>(I));
7522 if (It != ValueExprMap.end()) {
7523 eraseValueFromMap(It->first);
7524 forgetMemoizedResults(It->second);
7525 if (PHINode *PN = dyn_cast<PHINode>(I))
7526 ConstantEvolutionLoopExitValue.erase(PN);
7529 PushDefUseChildren(I, Worklist);
7533 void ScalarEvolution::forgetLoopDispositions(const Loop *L) {
7534 LoopDispositions.clear();
7537 /// Get the exact loop backedge taken count considering all loop exits. A
7538 /// computable result can only be returned for loops with all exiting blocks
7539 /// dominating the latch. howFarToZero assumes that the limit of each loop test
7540 /// is never skipped. This is a valid assumption as long as the loop exits via
7541 /// that test. For precise results, it is the caller's responsibility to specify
7542 /// the relevant loop exiting block using getExact(ExitingBlock, SE).
7543 const SCEV *
7544 ScalarEvolution::BackedgeTakenInfo::getExact(const Loop *L, ScalarEvolution *SE,
7545 SCEVUnionPredicate *Preds) const {
7546 // If any exits were not computable, the loop is not computable.
7547 if (!isComplete() || ExitNotTaken.empty())
7548 return SE->getCouldNotCompute();
7550 const BasicBlock *Latch = L->getLoopLatch();
7551 // All exiting blocks we have collected must dominate the only backedge.
7552 if (!Latch)
7553 return SE->getCouldNotCompute();
7555 // All exiting blocks we have gathered dominate loop's latch, so exact trip
7556 // count is simply a minimum out of all these calculated exit counts.
7557 SmallVector<const SCEV *, 2> Ops;
7558 for (auto &ENT : ExitNotTaken) {
7559 const SCEV *BECount = ENT.ExactNotTaken;
7560 assert(BECount != SE->getCouldNotCompute() && "Bad exit SCEV!");
7561 assert(SE->DT.dominates(ENT.ExitingBlock, Latch) &&
7562 "We should only have known counts for exiting blocks that dominate "
7563 "latch!");
7565 Ops.push_back(BECount);
7567 if (Preds && !ENT.hasAlwaysTruePredicate())
7568 Preds->add(ENT.Predicate.get());
7570 assert((Preds || ENT.hasAlwaysTruePredicate()) &&
7571 "Predicate should be always true!");
7574 return SE->getUMinFromMismatchedTypes(Ops);
7577 /// Get the exact not taken count for this loop exit.
7578 const SCEV *
7579 ScalarEvolution::BackedgeTakenInfo::getExact(const BasicBlock *ExitingBlock,
7580 ScalarEvolution *SE) const {
7581 for (auto &ENT : ExitNotTaken)
7582 if (ENT.ExitingBlock == ExitingBlock && ENT.hasAlwaysTruePredicate())
7583 return ENT.ExactNotTaken;
7585 return SE->getCouldNotCompute();
7588 const SCEV *ScalarEvolution::BackedgeTakenInfo::getConstantMax(
7589 const BasicBlock *ExitingBlock, ScalarEvolution *SE) const {
7590 for (auto &ENT : ExitNotTaken)
7591 if (ENT.ExitingBlock == ExitingBlock && ENT.hasAlwaysTruePredicate())
7592 return ENT.MaxNotTaken;
7594 return SE->getCouldNotCompute();
7597 /// getConstantMax - Get the constant max backedge taken count for the loop.
7598 const SCEV *
7599 ScalarEvolution::BackedgeTakenInfo::getConstantMax(ScalarEvolution *SE) const {
7600 auto PredicateNotAlwaysTrue = [](const ExitNotTakenInfo &ENT) {
7601 return !ENT.hasAlwaysTruePredicate();
7604 if (any_of(ExitNotTaken, PredicateNotAlwaysTrue) || !getConstantMax())
7605 return SE->getCouldNotCompute();
7607 assert((isa<SCEVCouldNotCompute>(getConstantMax()) ||
7608 isa<SCEVConstant>(getConstantMax())) &&
7609 "No point in having a non-constant max backedge taken count!");
7610 return getConstantMax();
7613 const SCEV *
7614 ScalarEvolution::BackedgeTakenInfo::getSymbolicMax(const Loop *L,
7615 ScalarEvolution *SE) {
7616 if (!SymbolicMax)
7617 SymbolicMax = SE->computeSymbolicMaxBackedgeTakenCount(L);
7618 return SymbolicMax;
7621 bool ScalarEvolution::BackedgeTakenInfo::isConstantMaxOrZero(
7622 ScalarEvolution *SE) const {
7623 auto PredicateNotAlwaysTrue = [](const ExitNotTakenInfo &ENT) {
7624 return !ENT.hasAlwaysTruePredicate();
7626 return MaxOrZero && !any_of(ExitNotTaken, PredicateNotAlwaysTrue);
7629 bool ScalarEvolution::BackedgeTakenInfo::hasOperand(const SCEV *S) const {
7630 return Operands.contains(S);
7633 ScalarEvolution::ExitLimit::ExitLimit(const SCEV *E)
7634 : ExitLimit(E, E, false, None) {
7637 ScalarEvolution::ExitLimit::ExitLimit(
7638 const SCEV *E, const SCEV *M, bool MaxOrZero,
7639 ArrayRef<const SmallPtrSetImpl<const SCEVPredicate *> *> PredSetList)
7640 : ExactNotTaken(E), MaxNotTaken(M), MaxOrZero(MaxOrZero) {
7641 assert((isa<SCEVCouldNotCompute>(ExactNotTaken) ||
7642 !isa<SCEVCouldNotCompute>(MaxNotTaken)) &&
7643 "Exact is not allowed to be less precise than Max");
7644 assert((isa<SCEVCouldNotCompute>(MaxNotTaken) ||
7645 isa<SCEVConstant>(MaxNotTaken)) &&
7646 "No point in having a non-constant max backedge taken count!");
7647 for (auto *PredSet : PredSetList)
7648 for (auto *P : *PredSet)
7649 addPredicate(P);
7650 assert((isa<SCEVCouldNotCompute>(E) || !E->getType()->isPointerTy()) &&
7651 "Backedge count should be int");
7652 assert((isa<SCEVCouldNotCompute>(M) || !M->getType()->isPointerTy()) &&
7653 "Max backedge count should be int");
7656 ScalarEvolution::ExitLimit::ExitLimit(
7657 const SCEV *E, const SCEV *M, bool MaxOrZero,
7658 const SmallPtrSetImpl<const SCEVPredicate *> &PredSet)
7659 : ExitLimit(E, M, MaxOrZero, {&PredSet}) {
7662 ScalarEvolution::ExitLimit::ExitLimit(const SCEV *E, const SCEV *M,
7663 bool MaxOrZero)
7664 : ExitLimit(E, M, MaxOrZero, None) {
7667 class SCEVRecordOperands {
7668 SmallPtrSetImpl<const SCEV *> &Operands;
7670 public:
7671 SCEVRecordOperands(SmallPtrSetImpl<const SCEV *> &Operands)
7672 : Operands(Operands) {}
7673 bool follow(const SCEV *S) {
7674 Operands.insert(S);
7675 return true;
7677 bool isDone() { return false; }
7680 /// Allocate memory for BackedgeTakenInfo and copy the not-taken count of each
7681 /// computable exit into a persistent ExitNotTakenInfo array.
7682 ScalarEvolution::BackedgeTakenInfo::BackedgeTakenInfo(
7683 ArrayRef<ScalarEvolution::BackedgeTakenInfo::EdgeExitInfo> ExitCounts,
7684 bool IsComplete, const SCEV *ConstantMax, bool MaxOrZero)
7685 : ConstantMax(ConstantMax), IsComplete(IsComplete), MaxOrZero(MaxOrZero) {
7686 using EdgeExitInfo = ScalarEvolution::BackedgeTakenInfo::EdgeExitInfo;
7688 ExitNotTaken.reserve(ExitCounts.size());
7689 std::transform(
7690 ExitCounts.begin(), ExitCounts.end(), std::back_inserter(ExitNotTaken),
7691 [&](const EdgeExitInfo &EEI) {
7692 BasicBlock *ExitBB = EEI.first;
7693 const ExitLimit &EL = EEI.second;
7694 if (EL.Predicates.empty())
7695 return ExitNotTakenInfo(ExitBB, EL.ExactNotTaken, EL.MaxNotTaken,
7696 nullptr);
7698 std::unique_ptr<SCEVUnionPredicate> Predicate(new SCEVUnionPredicate);
7699 for (auto *Pred : EL.Predicates)
7700 Predicate->add(Pred);
7702 return ExitNotTakenInfo(ExitBB, EL.ExactNotTaken, EL.MaxNotTaken,
7703 std::move(Predicate));
7705 assert((isa<SCEVCouldNotCompute>(ConstantMax) ||
7706 isa<SCEVConstant>(ConstantMax)) &&
7707 "No point in having a non-constant max backedge taken count!");
7709 SCEVRecordOperands RecordOperands(Operands);
7710 SCEVTraversal<SCEVRecordOperands> ST(RecordOperands);
7711 if (!isa<SCEVCouldNotCompute>(ConstantMax))
7712 ST.visitAll(ConstantMax);
7713 for (auto &ENT : ExitNotTaken)
7714 if (!isa<SCEVCouldNotCompute>(ENT.ExactNotTaken))
7715 ST.visitAll(ENT.ExactNotTaken);
7718 /// Compute the number of times the backedge of the specified loop will execute.
7719 ScalarEvolution::BackedgeTakenInfo
7720 ScalarEvolution::computeBackedgeTakenCount(const Loop *L,
7721 bool AllowPredicates) {
7722 SmallVector<BasicBlock *, 8> ExitingBlocks;
7723 L->getExitingBlocks(ExitingBlocks);
7725 using EdgeExitInfo = ScalarEvolution::BackedgeTakenInfo::EdgeExitInfo;
7727 SmallVector<EdgeExitInfo, 4> ExitCounts;
7728 bool CouldComputeBECount = true;
7729 BasicBlock *Latch = L->getLoopLatch(); // may be NULL.
7730 const SCEV *MustExitMaxBECount = nullptr;
7731 const SCEV *MayExitMaxBECount = nullptr;
7732 bool MustExitMaxOrZero = false;
7734 // Compute the ExitLimit for each loop exit. Use this to populate ExitCounts
7735 // and compute maxBECount.
7736 // Do a union of all the predicates here.
7737 for (unsigned i = 0, e = ExitingBlocks.size(); i != e; ++i) {
7738 BasicBlock *ExitBB = ExitingBlocks[i];
7740 // We canonicalize untaken exits to br (constant), ignore them so that
7741 // proving an exit untaken doesn't negatively impact our ability to reason
7742 // about the loop as whole.
7743 if (auto *BI = dyn_cast<BranchInst>(ExitBB->getTerminator()))
7744 if (auto *CI = dyn_cast<ConstantInt>(BI->getCondition())) {
7745 bool ExitIfTrue = !L->contains(BI->getSuccessor(0));
7746 if ((ExitIfTrue && CI->isZero()) || (!ExitIfTrue && CI->isOne()))
7747 continue;
7750 ExitLimit EL = computeExitLimit(L, ExitBB, AllowPredicates);
7752 assert((AllowPredicates || EL.Predicates.empty()) &&
7753 "Predicated exit limit when predicates are not allowed!");
7755 // 1. For each exit that can be computed, add an entry to ExitCounts.
7756 // CouldComputeBECount is true only if all exits can be computed.
7757 if (EL.ExactNotTaken == getCouldNotCompute())
7758 // We couldn't compute an exact value for this exit, so
7759 // we won't be able to compute an exact value for the loop.
7760 CouldComputeBECount = false;
7761 else
7762 ExitCounts.emplace_back(ExitBB, EL);
7764 // 2. Derive the loop's MaxBECount from each exit's max number of
7765 // non-exiting iterations. Partition the loop exits into two kinds:
7766 // LoopMustExits and LoopMayExits.
7768 // If the exit dominates the loop latch, it is a LoopMustExit otherwise it
7769 // is a LoopMayExit. If any computable LoopMustExit is found, then
7770 // MaxBECount is the minimum EL.MaxNotTaken of computable
7771 // LoopMustExits. Otherwise, MaxBECount is conservatively the maximum
7772 // EL.MaxNotTaken, where CouldNotCompute is considered greater than any
7773 // computable EL.MaxNotTaken.
7774 if (EL.MaxNotTaken != getCouldNotCompute() && Latch &&
7775 DT.dominates(ExitBB, Latch)) {
7776 if (!MustExitMaxBECount) {
7777 MustExitMaxBECount = EL.MaxNotTaken;
7778 MustExitMaxOrZero = EL.MaxOrZero;
7779 } else {
7780 MustExitMaxBECount =
7781 getUMinFromMismatchedTypes(MustExitMaxBECount, EL.MaxNotTaken);
7783 } else if (MayExitMaxBECount != getCouldNotCompute()) {
7784 if (!MayExitMaxBECount || EL.MaxNotTaken == getCouldNotCompute())
7785 MayExitMaxBECount = EL.MaxNotTaken;
7786 else {
7787 MayExitMaxBECount =
7788 getUMaxFromMismatchedTypes(MayExitMaxBECount, EL.MaxNotTaken);
7792 const SCEV *MaxBECount = MustExitMaxBECount ? MustExitMaxBECount :
7793 (MayExitMaxBECount ? MayExitMaxBECount : getCouldNotCompute());
7794 // The loop backedge will be taken the maximum or zero times if there's
7795 // a single exit that must be taken the maximum or zero times.
7796 bool MaxOrZero = (MustExitMaxOrZero && ExitingBlocks.size() == 1);
7797 return BackedgeTakenInfo(std::move(ExitCounts), CouldComputeBECount,
7798 MaxBECount, MaxOrZero);
7801 ScalarEvolution::ExitLimit
7802 ScalarEvolution::computeExitLimit(const Loop *L, BasicBlock *ExitingBlock,
7803 bool AllowPredicates) {
7804 assert(L->contains(ExitingBlock) && "Exit count for non-loop block?");
7805 // If our exiting block does not dominate the latch, then its connection with
7806 // loop's exit limit may be far from trivial.
7807 const BasicBlock *Latch = L->getLoopLatch();
7808 if (!Latch || !DT.dominates(ExitingBlock, Latch))
7809 return getCouldNotCompute();
7811 bool IsOnlyExit = (L->getExitingBlock() != nullptr);
7812 Instruction *Term = ExitingBlock->getTerminator();
7813 if (BranchInst *BI = dyn_cast<BranchInst>(Term)) {
7814 assert(BI->isConditional() && "If unconditional, it can't be in loop!");
7815 bool ExitIfTrue = !L->contains(BI->getSuccessor(0));
7816 assert(ExitIfTrue == L->contains(BI->getSuccessor(1)) &&
7817 "It should have one successor in loop and one exit block!");
7818 // Proceed to the next level to examine the exit condition expression.
7819 return computeExitLimitFromCond(
7820 L, BI->getCondition(), ExitIfTrue,
7821 /*ControlsExit=*/IsOnlyExit, AllowPredicates);
7824 if (SwitchInst *SI = dyn_cast<SwitchInst>(Term)) {
7825 // For switch, make sure that there is a single exit from the loop.
7826 BasicBlock *Exit = nullptr;
7827 for (auto *SBB : successors(ExitingBlock))
7828 if (!L->contains(SBB)) {
7829 if (Exit) // Multiple exit successors.
7830 return getCouldNotCompute();
7831 Exit = SBB;
7833 assert(Exit && "Exiting block must have at least one exit");
7834 return computeExitLimitFromSingleExitSwitch(L, SI, Exit,
7835 /*ControlsExit=*/IsOnlyExit);
7838 return getCouldNotCompute();
7841 ScalarEvolution::ExitLimit ScalarEvolution::computeExitLimitFromCond(
7842 const Loop *L, Value *ExitCond, bool ExitIfTrue,
7843 bool ControlsExit, bool AllowPredicates) {
7844 ScalarEvolution::ExitLimitCacheTy Cache(L, ExitIfTrue, AllowPredicates);
7845 return computeExitLimitFromCondCached(Cache, L, ExitCond, ExitIfTrue,
7846 ControlsExit, AllowPredicates);
7849 Optional<ScalarEvolution::ExitLimit>
7850 ScalarEvolution::ExitLimitCache::find(const Loop *L, Value *ExitCond,
7851 bool ExitIfTrue, bool ControlsExit,
7852 bool AllowPredicates) {
7853 (void)this->L;
7854 (void)this->ExitIfTrue;
7855 (void)this->AllowPredicates;
7857 assert(this->L == L && this->ExitIfTrue == ExitIfTrue &&
7858 this->AllowPredicates == AllowPredicates &&
7859 "Variance in assumed invariant key components!");
7860 auto Itr = TripCountMap.find({ExitCond, ControlsExit});
7861 if (Itr == TripCountMap.end())
7862 return None;
7863 return Itr->second;
7866 void ScalarEvolution::ExitLimitCache::insert(const Loop *L, Value *ExitCond,
7867 bool ExitIfTrue,
7868 bool ControlsExit,
7869 bool AllowPredicates,
7870 const ExitLimit &EL) {
7871 assert(this->L == L && this->ExitIfTrue == ExitIfTrue &&
7872 this->AllowPredicates == AllowPredicates &&
7873 "Variance in assumed invariant key components!");
7875 auto InsertResult = TripCountMap.insert({{ExitCond, ControlsExit}, EL});
7876 assert(InsertResult.second && "Expected successful insertion!");
7877 (void)InsertResult;
7878 (void)ExitIfTrue;
7881 ScalarEvolution::ExitLimit ScalarEvolution::computeExitLimitFromCondCached(
7882 ExitLimitCacheTy &Cache, const Loop *L, Value *ExitCond, bool ExitIfTrue,
7883 bool ControlsExit, bool AllowPredicates) {
7885 if (auto MaybeEL =
7886 Cache.find(L, ExitCond, ExitIfTrue, ControlsExit, AllowPredicates))
7887 return *MaybeEL;
7889 ExitLimit EL = computeExitLimitFromCondImpl(Cache, L, ExitCond, ExitIfTrue,
7890 ControlsExit, AllowPredicates);
7891 Cache.insert(L, ExitCond, ExitIfTrue, ControlsExit, AllowPredicates, EL);
7892 return EL;
7895 ScalarEvolution::ExitLimit ScalarEvolution::computeExitLimitFromCondImpl(
7896 ExitLimitCacheTy &Cache, const Loop *L, Value *ExitCond, bool ExitIfTrue,
7897 bool ControlsExit, bool AllowPredicates) {
7898 // Handle BinOp conditions (And, Or).
7899 if (auto LimitFromBinOp = computeExitLimitFromCondFromBinOp(
7900 Cache, L, ExitCond, ExitIfTrue, ControlsExit, AllowPredicates))
7901 return *LimitFromBinOp;
7903 // With an icmp, it may be feasible to compute an exact backedge-taken count.
7904 // Proceed to the next level to examine the icmp.
7905 if (ICmpInst *ExitCondICmp = dyn_cast<ICmpInst>(ExitCond)) {
7906 ExitLimit EL =
7907 computeExitLimitFromICmp(L, ExitCondICmp, ExitIfTrue, ControlsExit);
7908 if (EL.hasFullInfo() || !AllowPredicates)
7909 return EL;
7911 // Try again, but use SCEV predicates this time.
7912 return computeExitLimitFromICmp(L, ExitCondICmp, ExitIfTrue, ControlsExit,
7913 /*AllowPredicates=*/true);
7916 // Check for a constant condition. These are normally stripped out by
7917 // SimplifyCFG, but ScalarEvolution may be used by a pass which wishes to
7918 // preserve the CFG and is temporarily leaving constant conditions
7919 // in place.
7920 if (ConstantInt *CI = dyn_cast<ConstantInt>(ExitCond)) {
7921 if (ExitIfTrue == !CI->getZExtValue())
7922 // The backedge is always taken.
7923 return getCouldNotCompute();
7924 else
7925 // The backedge is never taken.
7926 return getZero(CI->getType());
7929 // If it's not an integer or pointer comparison then compute it the hard way.
7930 return computeExitCountExhaustively(L, ExitCond, ExitIfTrue);
7933 Optional<ScalarEvolution::ExitLimit>
7934 ScalarEvolution::computeExitLimitFromCondFromBinOp(
7935 ExitLimitCacheTy &Cache, const Loop *L, Value *ExitCond, bool ExitIfTrue,
7936 bool ControlsExit, bool AllowPredicates) {
7937 // Check if the controlling expression for this loop is an And or Or.
7938 Value *Op0, *Op1;
7939 bool IsAnd = false;
7940 if (match(ExitCond, m_LogicalAnd(m_Value(Op0), m_Value(Op1))))
7941 IsAnd = true;
7942 else if (match(ExitCond, m_LogicalOr(m_Value(Op0), m_Value(Op1))))
7943 IsAnd = false;
7944 else
7945 return None;
7947 // EitherMayExit is true in these two cases:
7948 // br (and Op0 Op1), loop, exit
7949 // br (or Op0 Op1), exit, loop
7950 bool EitherMayExit = IsAnd ^ ExitIfTrue;
7951 ExitLimit EL0 = computeExitLimitFromCondCached(Cache, L, Op0, ExitIfTrue,
7952 ControlsExit && !EitherMayExit,
7953 AllowPredicates);
7954 ExitLimit EL1 = computeExitLimitFromCondCached(Cache, L, Op1, ExitIfTrue,
7955 ControlsExit && !EitherMayExit,
7956 AllowPredicates);
7958 // Be robust against unsimplified IR for the form "op i1 X, NeutralElement"
7959 const Constant *NeutralElement = ConstantInt::get(ExitCond->getType(), IsAnd);
7960 if (isa<ConstantInt>(Op1))
7961 return Op1 == NeutralElement ? EL0 : EL1;
7962 if (isa<ConstantInt>(Op0))
7963 return Op0 == NeutralElement ? EL1 : EL0;
7965 const SCEV *BECount = getCouldNotCompute();
7966 const SCEV *MaxBECount = getCouldNotCompute();
7967 if (EitherMayExit) {
7968 // Both conditions must be same for the loop to continue executing.
7969 // Choose the less conservative count.
7970 // If ExitCond is a short-circuit form (select), using
7971 // umin(EL0.ExactNotTaken, EL1.ExactNotTaken) is unsafe in general.
7972 // To see the detailed examples, please see
7973 // test/Analysis/ScalarEvolution/exit-count-select.ll
7974 bool PoisonSafe = isa<BinaryOperator>(ExitCond);
7975 if (!PoisonSafe)
7976 // Even if ExitCond is select, we can safely derive BECount using both
7977 // EL0 and EL1 in these cases:
7978 // (1) EL0.ExactNotTaken is non-zero
7979 // (2) EL1.ExactNotTaken is non-poison
7980 // (3) EL0.ExactNotTaken is zero (BECount should be simply zero and
7981 // it cannot be umin(0, ..))
7982 // The PoisonSafe assignment below is simplified and the assertion after
7983 // BECount calculation fully guarantees the condition (3).
7984 PoisonSafe = isa<SCEVConstant>(EL0.ExactNotTaken) ||
7985 isa<SCEVConstant>(EL1.ExactNotTaken);
7986 if (EL0.ExactNotTaken != getCouldNotCompute() &&
7987 EL1.ExactNotTaken != getCouldNotCompute() && PoisonSafe) {
7988 BECount =
7989 getUMinFromMismatchedTypes(EL0.ExactNotTaken, EL1.ExactNotTaken);
7991 // If EL0.ExactNotTaken was zero and ExitCond was a short-circuit form,
7992 // it should have been simplified to zero (see the condition (3) above)
7993 assert(!isa<BinaryOperator>(ExitCond) || !EL0.ExactNotTaken->isZero() ||
7994 BECount->isZero());
7996 if (EL0.MaxNotTaken == getCouldNotCompute())
7997 MaxBECount = EL1.MaxNotTaken;
7998 else if (EL1.MaxNotTaken == getCouldNotCompute())
7999 MaxBECount = EL0.MaxNotTaken;
8000 else
8001 MaxBECount = getUMinFromMismatchedTypes(EL0.MaxNotTaken, EL1.MaxNotTaken);
8002 } else {
8003 // Both conditions must be same at the same time for the loop to exit.
8004 // For now, be conservative.
8005 if (EL0.ExactNotTaken == EL1.ExactNotTaken)
8006 BECount = EL0.ExactNotTaken;
8009 // There are cases (e.g. PR26207) where computeExitLimitFromCond is able
8010 // to be more aggressive when computing BECount than when computing
8011 // MaxBECount. In these cases it is possible for EL0.ExactNotTaken and
8012 // EL1.ExactNotTaken to match, but for EL0.MaxNotTaken and EL1.MaxNotTaken
8013 // to not.
8014 if (isa<SCEVCouldNotCompute>(MaxBECount) &&
8015 !isa<SCEVCouldNotCompute>(BECount))
8016 MaxBECount = getConstant(getUnsignedRangeMax(BECount));
8018 return ExitLimit(BECount, MaxBECount, false,
8019 { &EL0.Predicates, &EL1.Predicates });
8022 ScalarEvolution::ExitLimit
8023 ScalarEvolution::computeExitLimitFromICmp(const Loop *L,
8024 ICmpInst *ExitCond,
8025 bool ExitIfTrue,
8026 bool ControlsExit,
8027 bool AllowPredicates) {
8028 // If the condition was exit on true, convert the condition to exit on false
8029 ICmpInst::Predicate Pred;
8030 if (!ExitIfTrue)
8031 Pred = ExitCond->getPredicate();
8032 else
8033 Pred = ExitCond->getInversePredicate();
8034 const ICmpInst::Predicate OriginalPred = Pred;
8036 // Handle common loops like: for (X = "string"; *X; ++X)
8037 if (LoadInst *LI = dyn_cast<LoadInst>(ExitCond->getOperand(0)))
8038 if (Constant *RHS = dyn_cast<Constant>(ExitCond->getOperand(1))) {
8039 ExitLimit ItCnt =
8040 computeLoadConstantCompareExitLimit(LI, RHS, L, Pred);
8041 if (ItCnt.hasAnyInfo())
8042 return ItCnt;
8045 const SCEV *LHS = getSCEV(ExitCond->getOperand(0));
8046 const SCEV *RHS = getSCEV(ExitCond->getOperand(1));
8048 // Try to evaluate any dependencies out of the loop.
8049 LHS = getSCEVAtScope(LHS, L);
8050 RHS = getSCEVAtScope(RHS, L);
8052 // At this point, we would like to compute how many iterations of the
8053 // loop the predicate will return true for these inputs.
8054 if (isLoopInvariant(LHS, L) && !isLoopInvariant(RHS, L)) {
8055 // If there is a loop-invariant, force it into the RHS.
8056 std::swap(LHS, RHS);
8057 Pred = ICmpInst::getSwappedPredicate(Pred);
8060 // Simplify the operands before analyzing them.
8061 (void)SimplifyICmpOperands(Pred, LHS, RHS);
8063 // If we have a comparison of a chrec against a constant, try to use value
8064 // ranges to answer this query.
8065 if (const SCEVConstant *RHSC = dyn_cast<SCEVConstant>(RHS))
8066 if (const SCEVAddRecExpr *AddRec = dyn_cast<SCEVAddRecExpr>(LHS))
8067 if (AddRec->getLoop() == L) {
8068 // Form the constant range.
8069 ConstantRange CompRange =
8070 ConstantRange::makeExactICmpRegion(Pred, RHSC->getAPInt());
8072 const SCEV *Ret = AddRec->getNumIterationsInRange(CompRange, *this);
8073 if (!isa<SCEVCouldNotCompute>(Ret)) return Ret;
8076 switch (Pred) {
8077 case ICmpInst::ICMP_NE: { // while (X != Y)
8078 // Convert to: while (X-Y != 0)
8079 if (LHS->getType()->isPointerTy()) {
8080 LHS = getLosslessPtrToIntExpr(LHS);
8081 if (isa<SCEVCouldNotCompute>(LHS))
8082 return LHS;
8084 if (RHS->getType()->isPointerTy()) {
8085 RHS = getLosslessPtrToIntExpr(RHS);
8086 if (isa<SCEVCouldNotCompute>(RHS))
8087 return RHS;
8089 ExitLimit EL = howFarToZero(getMinusSCEV(LHS, RHS), L, ControlsExit,
8090 AllowPredicates);
8091 if (EL.hasAnyInfo()) return EL;
8092 break;
8094 case ICmpInst::ICMP_EQ: { // while (X == Y)
8095 // Convert to: while (X-Y == 0)
8096 if (LHS->getType()->isPointerTy()) {
8097 LHS = getLosslessPtrToIntExpr(LHS);
8098 if (isa<SCEVCouldNotCompute>(LHS))
8099 return LHS;
8101 if (RHS->getType()->isPointerTy()) {
8102 RHS = getLosslessPtrToIntExpr(RHS);
8103 if (isa<SCEVCouldNotCompute>(RHS))
8104 return RHS;
8106 ExitLimit EL = howFarToNonZero(getMinusSCEV(LHS, RHS), L);
8107 if (EL.hasAnyInfo()) return EL;
8108 break;
8110 case ICmpInst::ICMP_SLT:
8111 case ICmpInst::ICMP_ULT: { // while (X < Y)
8112 bool IsSigned = Pred == ICmpInst::ICMP_SLT;
8113 ExitLimit EL = howManyLessThans(LHS, RHS, L, IsSigned, ControlsExit,
8114 AllowPredicates);
8115 if (EL.hasAnyInfo()) return EL;
8116 break;
8118 case ICmpInst::ICMP_SGT:
8119 case ICmpInst::ICMP_UGT: { // while (X > Y)
8120 bool IsSigned = Pred == ICmpInst::ICMP_SGT;
8121 ExitLimit EL =
8122 howManyGreaterThans(LHS, RHS, L, IsSigned, ControlsExit,
8123 AllowPredicates);
8124 if (EL.hasAnyInfo()) return EL;
8125 break;
8127 default:
8128 break;
8131 auto *ExhaustiveCount =
8132 computeExitCountExhaustively(L, ExitCond, ExitIfTrue);
8134 if (!isa<SCEVCouldNotCompute>(ExhaustiveCount))
8135 return ExhaustiveCount;
8137 return computeShiftCompareExitLimit(ExitCond->getOperand(0),
8138 ExitCond->getOperand(1), L, OriginalPred);
8141 ScalarEvolution::ExitLimit
8142 ScalarEvolution::computeExitLimitFromSingleExitSwitch(const Loop *L,
8143 SwitchInst *Switch,
8144 BasicBlock *ExitingBlock,
8145 bool ControlsExit) {
8146 assert(!L->contains(ExitingBlock) && "Not an exiting block!");
8148 // Give up if the exit is the default dest of a switch.
8149 if (Switch->getDefaultDest() == ExitingBlock)
8150 return getCouldNotCompute();
8152 assert(L->contains(Switch->getDefaultDest()) &&
8153 "Default case must not exit the loop!");
8154 const SCEV *LHS = getSCEVAtScope(Switch->getCondition(), L);
8155 const SCEV *RHS = getConstant(Switch->findCaseDest(ExitingBlock));
8157 // while (X != Y) --> while (X-Y != 0)
8158 ExitLimit EL = howFarToZero(getMinusSCEV(LHS, RHS), L, ControlsExit);
8159 if (EL.hasAnyInfo())
8160 return EL;
8162 return getCouldNotCompute();
8165 static ConstantInt *
8166 EvaluateConstantChrecAtConstant(const SCEVAddRecExpr *AddRec, ConstantInt *C,
8167 ScalarEvolution &SE) {
8168 const SCEV *InVal = SE.getConstant(C);
8169 const SCEV *Val = AddRec->evaluateAtIteration(InVal, SE);
8170 assert(isa<SCEVConstant>(Val) &&
8171 "Evaluation of SCEV at constant didn't fold correctly?");
8172 return cast<SCEVConstant>(Val)->getValue();
8175 /// Given an exit condition of 'icmp op load X, cst', try to see if we can
8176 /// compute the backedge execution count.
8177 ScalarEvolution::ExitLimit
8178 ScalarEvolution::computeLoadConstantCompareExitLimit(
8179 LoadInst *LI,
8180 Constant *RHS,
8181 const Loop *L,
8182 ICmpInst::Predicate predicate) {
8183 if (LI->isVolatile()) return getCouldNotCompute();
8185 // Check to see if the loaded pointer is a getelementptr of a global.
8186 // TODO: Use SCEV instead of manually grubbing with GEPs.
8187 GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(LI->getOperand(0));
8188 if (!GEP) return getCouldNotCompute();
8190 // Make sure that it is really a constant global we are gepping, with an
8191 // initializer, and make sure the first IDX is really 0.
8192 GlobalVariable *GV = dyn_cast<GlobalVariable>(GEP->getOperand(0));
8193 if (!GV || !GV->isConstant() || !GV->hasDefinitiveInitializer() ||
8194 GEP->getNumOperands() < 3 || !isa<Constant>(GEP->getOperand(1)) ||
8195 !cast<Constant>(GEP->getOperand(1))->isNullValue())
8196 return getCouldNotCompute();
8198 // Okay, we allow one non-constant index into the GEP instruction.
8199 Value *VarIdx = nullptr;
8200 std::vector<Constant*> Indexes;
8201 unsigned VarIdxNum = 0;
8202 for (unsigned i = 2, e = GEP->getNumOperands(); i != e; ++i)
8203 if (ConstantInt *CI = dyn_cast<ConstantInt>(GEP->getOperand(i))) {
8204 Indexes.push_back(CI);
8205 } else if (!isa<ConstantInt>(GEP->getOperand(i))) {
8206 if (VarIdx) return getCouldNotCompute(); // Multiple non-constant idx's.
8207 VarIdx = GEP->getOperand(i);
8208 VarIdxNum = i-2;
8209 Indexes.push_back(nullptr);
8212 // Loop-invariant loads may be a byproduct of loop optimization. Skip them.
8213 if (!VarIdx)
8214 return getCouldNotCompute();
8216 // Okay, we know we have a (load (gep GV, 0, X)) comparison with a constant.
8217 // Check to see if X is a loop variant variable value now.
8218 const SCEV *Idx = getSCEV(VarIdx);
8219 Idx = getSCEVAtScope(Idx, L);
8221 // We can only recognize very limited forms of loop index expressions, in
8222 // particular, only affine AddRec's like {C1,+,C2}<L>.
8223 const SCEVAddRecExpr *IdxExpr = dyn_cast<SCEVAddRecExpr>(Idx);
8224 if (!IdxExpr || IdxExpr->getLoop() != L || !IdxExpr->isAffine() ||
8225 isLoopInvariant(IdxExpr, L) ||
8226 !isa<SCEVConstant>(IdxExpr->getOperand(0)) ||
8227 !isa<SCEVConstant>(IdxExpr->getOperand(1)))
8228 return getCouldNotCompute();
8230 unsigned MaxSteps = MaxBruteForceIterations;
8231 for (unsigned IterationNum = 0; IterationNum != MaxSteps; ++IterationNum) {
8232 ConstantInt *ItCst = ConstantInt::get(
8233 cast<IntegerType>(IdxExpr->getType()), IterationNum);
8234 ConstantInt *Val = EvaluateConstantChrecAtConstant(IdxExpr, ItCst, *this);
8236 // Form the GEP offset.
8237 Indexes[VarIdxNum] = Val;
8239 Constant *Result = ConstantFoldLoadThroughGEPIndices(GV->getInitializer(),
8240 Indexes);
8241 if (!Result) break; // Cannot compute!
8243 // Evaluate the condition for this iteration.
8244 Result = ConstantExpr::getICmp(predicate, Result, RHS);
8245 if (!isa<ConstantInt>(Result)) break; // Couldn't decide for sure
8246 if (cast<ConstantInt>(Result)->getValue().isMinValue()) {
8247 ++NumArrayLenItCounts;
8248 return getConstant(ItCst); // Found terminating iteration!
8251 return getCouldNotCompute();
8254 ScalarEvolution::ExitLimit ScalarEvolution::computeShiftCompareExitLimit(
8255 Value *LHS, Value *RHSV, const Loop *L, ICmpInst::Predicate Pred) {
8256 ConstantInt *RHS = dyn_cast<ConstantInt>(RHSV);
8257 if (!RHS)
8258 return getCouldNotCompute();
8260 const BasicBlock *Latch = L->getLoopLatch();
8261 if (!Latch)
8262 return getCouldNotCompute();
8264 const BasicBlock *Predecessor = L->getLoopPredecessor();
8265 if (!Predecessor)
8266 return getCouldNotCompute();
8268 // Return true if V is of the form "LHS `shift_op` <positive constant>".
8269 // Return LHS in OutLHS and shift_opt in OutOpCode.
8270 auto MatchPositiveShift =
8271 [](Value *V, Value *&OutLHS, Instruction::BinaryOps &OutOpCode) {
8273 using namespace PatternMatch;
8275 ConstantInt *ShiftAmt;
8276 if (match(V, m_LShr(m_Value(OutLHS), m_ConstantInt(ShiftAmt))))
8277 OutOpCode = Instruction::LShr;
8278 else if (match(V, m_AShr(m_Value(OutLHS), m_ConstantInt(ShiftAmt))))
8279 OutOpCode = Instruction::AShr;
8280 else if (match(V, m_Shl(m_Value(OutLHS), m_ConstantInt(ShiftAmt))))
8281 OutOpCode = Instruction::Shl;
8282 else
8283 return false;
8285 return ShiftAmt->getValue().isStrictlyPositive();
8288 // Recognize a "shift recurrence" either of the form %iv or of %iv.shifted in
8290 // loop:
8291 // %iv = phi i32 [ %iv.shifted, %loop ], [ %val, %preheader ]
8292 // %iv.shifted = lshr i32 %iv, <positive constant>
8294 // Return true on a successful match. Return the corresponding PHI node (%iv
8295 // above) in PNOut and the opcode of the shift operation in OpCodeOut.
8296 auto MatchShiftRecurrence =
8297 [&](Value *V, PHINode *&PNOut, Instruction::BinaryOps &OpCodeOut) {
8298 Optional<Instruction::BinaryOps> PostShiftOpCode;
8301 Instruction::BinaryOps OpC;
8302 Value *V;
8304 // If we encounter a shift instruction, "peel off" the shift operation,
8305 // and remember that we did so. Later when we inspect %iv's backedge
8306 // value, we will make sure that the backedge value uses the same
8307 // operation.
8309 // Note: the peeled shift operation does not have to be the same
8310 // instruction as the one feeding into the PHI's backedge value. We only
8311 // really care about it being the same *kind* of shift instruction --
8312 // that's all that is required for our later inferences to hold.
8313 if (MatchPositiveShift(LHS, V, OpC)) {
8314 PostShiftOpCode = OpC;
8315 LHS = V;
8319 PNOut = dyn_cast<PHINode>(LHS);
8320 if (!PNOut || PNOut->getParent() != L->getHeader())
8321 return false;
8323 Value *BEValue = PNOut->getIncomingValueForBlock(Latch);
8324 Value *OpLHS;
8326 return
8327 // The backedge value for the PHI node must be a shift by a positive
8328 // amount
8329 MatchPositiveShift(BEValue, OpLHS, OpCodeOut) &&
8331 // of the PHI node itself
8332 OpLHS == PNOut &&
8334 // and the kind of shift should be match the kind of shift we peeled
8335 // off, if any.
8336 (!PostShiftOpCode.hasValue() || *PostShiftOpCode == OpCodeOut);
8339 PHINode *PN;
8340 Instruction::BinaryOps OpCode;
8341 if (!MatchShiftRecurrence(LHS, PN, OpCode))
8342 return getCouldNotCompute();
8344 const DataLayout &DL = getDataLayout();
8346 // The key rationale for this optimization is that for some kinds of shift
8347 // recurrences, the value of the recurrence "stabilizes" to either 0 or -1
8348 // within a finite number of iterations. If the condition guarding the
8349 // backedge (in the sense that the backedge is taken if the condition is true)
8350 // is false for the value the shift recurrence stabilizes to, then we know
8351 // that the backedge is taken only a finite number of times.
8353 ConstantInt *StableValue = nullptr;
8354 switch (OpCode) {
8355 default:
8356 llvm_unreachable("Impossible case!");
8358 case Instruction::AShr: {
8359 // {K,ashr,<positive-constant>} stabilizes to signum(K) in at most
8360 // bitwidth(K) iterations.
8361 Value *FirstValue = PN->getIncomingValueForBlock(Predecessor);
8362 KnownBits Known = computeKnownBits(FirstValue, DL, 0, &AC,
8363 Predecessor->getTerminator(), &DT);
8364 auto *Ty = cast<IntegerType>(RHS->getType());
8365 if (Known.isNonNegative())
8366 StableValue = ConstantInt::get(Ty, 0);
8367 else if (Known.isNegative())
8368 StableValue = ConstantInt::get(Ty, -1, true);
8369 else
8370 return getCouldNotCompute();
8372 break;
8374 case Instruction::LShr:
8375 case Instruction::Shl:
8376 // Both {K,lshr,<positive-constant>} and {K,shl,<positive-constant>}
8377 // stabilize to 0 in at most bitwidth(K) iterations.
8378 StableValue = ConstantInt::get(cast<IntegerType>(RHS->getType()), 0);
8379 break;
8382 auto *Result =
8383 ConstantFoldCompareInstOperands(Pred, StableValue, RHS, DL, &TLI);
8384 assert(Result->getType()->isIntegerTy(1) &&
8385 "Otherwise cannot be an operand to a branch instruction");
8387 if (Result->isZeroValue()) {
8388 unsigned BitWidth = getTypeSizeInBits(RHS->getType());
8389 const SCEV *UpperBound =
8390 getConstant(getEffectiveSCEVType(RHS->getType()), BitWidth);
8391 return ExitLimit(getCouldNotCompute(), UpperBound, false);
8394 return getCouldNotCompute();
8397 /// Return true if we can constant fold an instruction of the specified type,
8398 /// assuming that all operands were constants.
8399 static bool CanConstantFold(const Instruction *I) {
8400 if (isa<BinaryOperator>(I) || isa<CmpInst>(I) ||
8401 isa<SelectInst>(I) || isa<CastInst>(I) || isa<GetElementPtrInst>(I) ||
8402 isa<LoadInst>(I) || isa<ExtractValueInst>(I))
8403 return true;
8405 if (const CallInst *CI = dyn_cast<CallInst>(I))
8406 if (const Function *F = CI->getCalledFunction())
8407 return canConstantFoldCallTo(CI, F);
8408 return false;
8411 /// Determine whether this instruction can constant evolve within this loop
8412 /// assuming its operands can all constant evolve.
8413 static bool canConstantEvolve(Instruction *I, const Loop *L) {
8414 // An instruction outside of the loop can't be derived from a loop PHI.
8415 if (!L->contains(I)) return false;
8417 if (isa<PHINode>(I)) {
8418 // We don't currently keep track of the control flow needed to evaluate
8419 // PHIs, so we cannot handle PHIs inside of loops.
8420 return L->getHeader() == I->getParent();
8423 // If we won't be able to constant fold this expression even if the operands
8424 // are constants, bail early.
8425 return CanConstantFold(I);
8428 /// getConstantEvolvingPHIOperands - Implement getConstantEvolvingPHI by
8429 /// recursing through each instruction operand until reaching a loop header phi.
8430 static PHINode *
8431 getConstantEvolvingPHIOperands(Instruction *UseInst, const Loop *L,
8432 DenseMap<Instruction *, PHINode *> &PHIMap,
8433 unsigned Depth) {
8434 if (Depth > MaxConstantEvolvingDepth)
8435 return nullptr;
8437 // Otherwise, we can evaluate this instruction if all of its operands are
8438 // constant or derived from a PHI node themselves.
8439 PHINode *PHI = nullptr;
8440 for (Value *Op : UseInst->operands()) {
8441 if (isa<Constant>(Op)) continue;
8443 Instruction *OpInst = dyn_cast<Instruction>(Op);
8444 if (!OpInst || !canConstantEvolve(OpInst, L)) return nullptr;
8446 PHINode *P = dyn_cast<PHINode>(OpInst);
8447 if (!P)
8448 // If this operand is already visited, reuse the prior result.
8449 // We may have P != PHI if this is the deepest point at which the
8450 // inconsistent paths meet.
8451 P = PHIMap.lookup(OpInst);
8452 if (!P) {
8453 // Recurse and memoize the results, whether a phi is found or not.
8454 // This recursive call invalidates pointers into PHIMap.
8455 P = getConstantEvolvingPHIOperands(OpInst, L, PHIMap, Depth + 1);
8456 PHIMap[OpInst] = P;
8458 if (!P)
8459 return nullptr; // Not evolving from PHI
8460 if (PHI && PHI != P)
8461 return nullptr; // Evolving from multiple different PHIs.
8462 PHI = P;
8464 // This is a expression evolving from a constant PHI!
8465 return PHI;
8468 /// getConstantEvolvingPHI - Given an LLVM value and a loop, return a PHI node
8469 /// in the loop that V is derived from. We allow arbitrary operations along the
8470 /// way, but the operands of an operation must either be constants or a value
8471 /// derived from a constant PHI. If this expression does not fit with these
8472 /// constraints, return null.
8473 static PHINode *getConstantEvolvingPHI(Value *V, const Loop *L) {
8474 Instruction *I = dyn_cast<Instruction>(V);
8475 if (!I || !canConstantEvolve(I, L)) return nullptr;
8477 if (PHINode *PN = dyn_cast<PHINode>(I))
8478 return PN;
8480 // Record non-constant instructions contained by the loop.
8481 DenseMap<Instruction *, PHINode *> PHIMap;
8482 return getConstantEvolvingPHIOperands(I, L, PHIMap, 0);
8485 /// EvaluateExpression - Given an expression that passes the
8486 /// getConstantEvolvingPHI predicate, evaluate its value assuming the PHI node
8487 /// in the loop has the value PHIVal. If we can't fold this expression for some
8488 /// reason, return null.
8489 static Constant *EvaluateExpression(Value *V, const Loop *L,
8490 DenseMap<Instruction *, Constant *> &Vals,
8491 const DataLayout &DL,
8492 const TargetLibraryInfo *TLI) {
8493 // Convenient constant check, but redundant for recursive calls.
8494 if (Constant *C = dyn_cast<Constant>(V)) return C;
8495 Instruction *I = dyn_cast<Instruction>(V);
8496 if (!I) return nullptr;
8498 if (Constant *C = Vals.lookup(I)) return C;
8500 // An instruction inside the loop depends on a value outside the loop that we
8501 // weren't given a mapping for, or a value such as a call inside the loop.
8502 if (!canConstantEvolve(I, L)) return nullptr;
8504 // An unmapped PHI can be due to a branch or another loop inside this loop,
8505 // or due to this not being the initial iteration through a loop where we
8506 // couldn't compute the evolution of this particular PHI last time.
8507 if (isa<PHINode>(I)) return nullptr;
8509 std::vector<Constant*> Operands(I->getNumOperands());
8511 for (unsigned i = 0, e = I->getNumOperands(); i != e; ++i) {
8512 Instruction *Operand = dyn_cast<Instruction>(I->getOperand(i));
8513 if (!Operand) {
8514 Operands[i] = dyn_cast<Constant>(I->getOperand(i));
8515 if (!Operands[i]) return nullptr;
8516 continue;
8518 Constant *C = EvaluateExpression(Operand, L, Vals, DL, TLI);
8519 Vals[Operand] = C;
8520 if (!C) return nullptr;
8521 Operands[i] = C;
8524 if (CmpInst *CI = dyn_cast<CmpInst>(I))
8525 return ConstantFoldCompareInstOperands(CI->getPredicate(), Operands[0],
8526 Operands[1], DL, TLI);
8527 if (LoadInst *LI = dyn_cast<LoadInst>(I)) {
8528 if (!LI->isVolatile())
8529 return ConstantFoldLoadFromConstPtr(Operands[0], LI->getType(), DL);
8531 return ConstantFoldInstOperands(I, Operands, DL, TLI);
8535 // If every incoming value to PN except the one for BB is a specific Constant,
8536 // return that, else return nullptr.
8537 static Constant *getOtherIncomingValue(PHINode *PN, BasicBlock *BB) {
8538 Constant *IncomingVal = nullptr;
8540 for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) {
8541 if (PN->getIncomingBlock(i) == BB)
8542 continue;
8544 auto *CurrentVal = dyn_cast<Constant>(PN->getIncomingValue(i));
8545 if (!CurrentVal)
8546 return nullptr;
8548 if (IncomingVal != CurrentVal) {
8549 if (IncomingVal)
8550 return nullptr;
8551 IncomingVal = CurrentVal;
8555 return IncomingVal;
8558 /// getConstantEvolutionLoopExitValue - If we know that the specified Phi is
8559 /// in the header of its containing loop, we know the loop executes a
8560 /// constant number of times, and the PHI node is just a recurrence
8561 /// involving constants, fold it.
8562 Constant *
8563 ScalarEvolution::getConstantEvolutionLoopExitValue(PHINode *PN,
8564 const APInt &BEs,
8565 const Loop *L) {
8566 auto I = ConstantEvolutionLoopExitValue.find(PN);
8567 if (I != ConstantEvolutionLoopExitValue.end())
8568 return I->second;
8570 if (BEs.ugt(MaxBruteForceIterations))
8571 return ConstantEvolutionLoopExitValue[PN] = nullptr; // Not going to evaluate it.
8573 Constant *&RetVal = ConstantEvolutionLoopExitValue[PN];
8575 DenseMap<Instruction *, Constant *> CurrentIterVals;
8576 BasicBlock *Header = L->getHeader();
8577 assert(PN->getParent() == Header && "Can't evaluate PHI not in loop header!");
8579 BasicBlock *Latch = L->getLoopLatch();
8580 if (!Latch)
8581 return nullptr;
8583 for (PHINode &PHI : Header->phis()) {
8584 if (auto *StartCST = getOtherIncomingValue(&PHI, Latch))
8585 CurrentIterVals[&PHI] = StartCST;
8587 if (!CurrentIterVals.count(PN))
8588 return RetVal = nullptr;
8590 Value *BEValue = PN->getIncomingValueForBlock(Latch);
8592 // Execute the loop symbolically to determine the exit value.
8593 assert(BEs.getActiveBits() < CHAR_BIT * sizeof(unsigned) &&
8594 "BEs is <= MaxBruteForceIterations which is an 'unsigned'!");
8596 unsigned NumIterations = BEs.getZExtValue(); // must be in range
8597 unsigned IterationNum = 0;
8598 const DataLayout &DL = getDataLayout();
8599 for (; ; ++IterationNum) {
8600 if (IterationNum == NumIterations)
8601 return RetVal = CurrentIterVals[PN]; // Got exit value!
8603 // Compute the value of the PHIs for the next iteration.
8604 // EvaluateExpression adds non-phi values to the CurrentIterVals map.
8605 DenseMap<Instruction *, Constant *> NextIterVals;
8606 Constant *NextPHI =
8607 EvaluateExpression(BEValue, L, CurrentIterVals, DL, &TLI);
8608 if (!NextPHI)
8609 return nullptr; // Couldn't evaluate!
8610 NextIterVals[PN] = NextPHI;
8612 bool StoppedEvolving = NextPHI == CurrentIterVals[PN];
8614 // Also evaluate the other PHI nodes. However, we don't get to stop if we
8615 // cease to be able to evaluate one of them or if they stop evolving,
8616 // because that doesn't necessarily prevent us from computing PN.
8617 SmallVector<std::pair<PHINode *, Constant *>, 8> PHIsToCompute;
8618 for (const auto &I : CurrentIterVals) {
8619 PHINode *PHI = dyn_cast<PHINode>(I.first);
8620 if (!PHI || PHI == PN || PHI->getParent() != Header) continue;
8621 PHIsToCompute.emplace_back(PHI, I.second);
8623 // We use two distinct loops because EvaluateExpression may invalidate any
8624 // iterators into CurrentIterVals.
8625 for (const auto &I : PHIsToCompute) {
8626 PHINode *PHI = I.first;
8627 Constant *&NextPHI = NextIterVals[PHI];
8628 if (!NextPHI) { // Not already computed.
8629 Value *BEValue = PHI->getIncomingValueForBlock(Latch);
8630 NextPHI = EvaluateExpression(BEValue, L, CurrentIterVals, DL, &TLI);
8632 if (NextPHI != I.second)
8633 StoppedEvolving = false;
8636 // If all entries in CurrentIterVals == NextIterVals then we can stop
8637 // iterating, the loop can't continue to change.
8638 if (StoppedEvolving)
8639 return RetVal = CurrentIterVals[PN];
8641 CurrentIterVals.swap(NextIterVals);
8645 const SCEV *ScalarEvolution::computeExitCountExhaustively(const Loop *L,
8646 Value *Cond,
8647 bool ExitWhen) {
8648 PHINode *PN = getConstantEvolvingPHI(Cond, L);
8649 if (!PN) return getCouldNotCompute();
8651 // If the loop is canonicalized, the PHI will have exactly two entries.
8652 // That's the only form we support here.
8653 if (PN->getNumIncomingValues() != 2) return getCouldNotCompute();
8655 DenseMap<Instruction *, Constant *> CurrentIterVals;
8656 BasicBlock *Header = L->getHeader();
8657 assert(PN->getParent() == Header && "Can't evaluate PHI not in loop header!");
8659 BasicBlock *Latch = L->getLoopLatch();
8660 assert(Latch && "Should follow from NumIncomingValues == 2!");
8662 for (PHINode &PHI : Header->phis()) {
8663 if (auto *StartCST = getOtherIncomingValue(&PHI, Latch))
8664 CurrentIterVals[&PHI] = StartCST;
8666 if (!CurrentIterVals.count(PN))
8667 return getCouldNotCompute();
8669 // Okay, we find a PHI node that defines the trip count of this loop. Execute
8670 // the loop symbolically to determine when the condition gets a value of
8671 // "ExitWhen".
8672 unsigned MaxIterations = MaxBruteForceIterations; // Limit analysis.
8673 const DataLayout &DL = getDataLayout();
8674 for (unsigned IterationNum = 0; IterationNum != MaxIterations;++IterationNum){
8675 auto *CondVal = dyn_cast_or_null<ConstantInt>(
8676 EvaluateExpression(Cond, L, CurrentIterVals, DL, &TLI));
8678 // Couldn't symbolically evaluate.
8679 if (!CondVal) return getCouldNotCompute();
8681 if (CondVal->getValue() == uint64_t(ExitWhen)) {
8682 ++NumBruteForceTripCountsComputed;
8683 return getConstant(Type::getInt32Ty(getContext()), IterationNum);
8686 // Update all the PHI nodes for the next iteration.
8687 DenseMap<Instruction *, Constant *> NextIterVals;
8689 // Create a list of which PHIs we need to compute. We want to do this before
8690 // calling EvaluateExpression on them because that may invalidate iterators
8691 // into CurrentIterVals.
8692 SmallVector<PHINode *, 8> PHIsToCompute;
8693 for (const auto &I : CurrentIterVals) {
8694 PHINode *PHI = dyn_cast<PHINode>(I.first);
8695 if (!PHI || PHI->getParent() != Header) continue;
8696 PHIsToCompute.push_back(PHI);
8698 for (PHINode *PHI : PHIsToCompute) {
8699 Constant *&NextPHI = NextIterVals[PHI];
8700 if (NextPHI) continue; // Already computed!
8702 Value *BEValue = PHI->getIncomingValueForBlock(Latch);
8703 NextPHI = EvaluateExpression(BEValue, L, CurrentIterVals, DL, &TLI);
8705 CurrentIterVals.swap(NextIterVals);
8708 // Too many iterations were needed to evaluate.
8709 return getCouldNotCompute();
8712 const SCEV *ScalarEvolution::getSCEVAtScope(const SCEV *V, const Loop *L) {
8713 SmallVector<std::pair<const Loop *, const SCEV *>, 2> &Values =
8714 ValuesAtScopes[V];
8715 // Check to see if we've folded this expression at this loop before.
8716 for (auto &LS : Values)
8717 if (LS.first == L)
8718 return LS.second ? LS.second : V;
8720 Values.emplace_back(L, nullptr);
8722 // Otherwise compute it.
8723 const SCEV *C = computeSCEVAtScope(V, L);
8724 for (auto &LS : reverse(ValuesAtScopes[V]))
8725 if (LS.first == L) {
8726 LS.second = C;
8727 break;
8729 return C;
8732 /// This builds up a Constant using the ConstantExpr interface. That way, we
8733 /// will return Constants for objects which aren't represented by a
8734 /// SCEVConstant, because SCEVConstant is restricted to ConstantInt.
8735 /// Returns NULL if the SCEV isn't representable as a Constant.
8736 static Constant *BuildConstantFromSCEV(const SCEV *V) {
8737 switch (V->getSCEVType()) {
8738 case scCouldNotCompute:
8739 case scAddRecExpr:
8740 return nullptr;
8741 case scConstant:
8742 return cast<SCEVConstant>(V)->getValue();
8743 case scUnknown:
8744 return dyn_cast<Constant>(cast<SCEVUnknown>(V)->getValue());
8745 case scSignExtend: {
8746 const SCEVSignExtendExpr *SS = cast<SCEVSignExtendExpr>(V);
8747 if (Constant *CastOp = BuildConstantFromSCEV(SS->getOperand()))
8748 return ConstantExpr::getSExt(CastOp, SS->getType());
8749 return nullptr;
8751 case scZeroExtend: {
8752 const SCEVZeroExtendExpr *SZ = cast<SCEVZeroExtendExpr>(V);
8753 if (Constant *CastOp = BuildConstantFromSCEV(SZ->getOperand()))
8754 return ConstantExpr::getZExt(CastOp, SZ->getType());
8755 return nullptr;
8757 case scPtrToInt: {
8758 const SCEVPtrToIntExpr *P2I = cast<SCEVPtrToIntExpr>(V);
8759 if (Constant *CastOp = BuildConstantFromSCEV(P2I->getOperand()))
8760 return ConstantExpr::getPtrToInt(CastOp, P2I->getType());
8762 return nullptr;
8764 case scTruncate: {
8765 const SCEVTruncateExpr *ST = cast<SCEVTruncateExpr>(V);
8766 if (Constant *CastOp = BuildConstantFromSCEV(ST->getOperand()))
8767 return ConstantExpr::getTrunc(CastOp, ST->getType());
8768 return nullptr;
8770 case scAddExpr: {
8771 const SCEVAddExpr *SA = cast<SCEVAddExpr>(V);
8772 if (Constant *C = BuildConstantFromSCEV(SA->getOperand(0))) {
8773 if (PointerType *PTy = dyn_cast<PointerType>(C->getType())) {
8774 unsigned AS = PTy->getAddressSpace();
8775 Type *DestPtrTy = Type::getInt8PtrTy(C->getContext(), AS);
8776 C = ConstantExpr::getBitCast(C, DestPtrTy);
8778 for (unsigned i = 1, e = SA->getNumOperands(); i != e; ++i) {
8779 Constant *C2 = BuildConstantFromSCEV(SA->getOperand(i));
8780 if (!C2)
8781 return nullptr;
8783 // First pointer!
8784 if (!C->getType()->isPointerTy() && C2->getType()->isPointerTy()) {
8785 unsigned AS = C2->getType()->getPointerAddressSpace();
8786 std::swap(C, C2);
8787 Type *DestPtrTy = Type::getInt8PtrTy(C->getContext(), AS);
8788 // The offsets have been converted to bytes. We can add bytes to an
8789 // i8* by GEP with the byte count in the first index.
8790 C = ConstantExpr::getBitCast(C, DestPtrTy);
8793 // Don't bother trying to sum two pointers. We probably can't
8794 // statically compute a load that results from it anyway.
8795 if (C2->getType()->isPointerTy())
8796 return nullptr;
8798 if (C->getType()->isPointerTy()) {
8799 C = ConstantExpr::getGetElementPtr(Type::getInt8Ty(C->getContext()),
8800 C, C2);
8801 } else {
8802 C = ConstantExpr::getAdd(C, C2);
8805 return C;
8807 return nullptr;
8809 case scMulExpr: {
8810 const SCEVMulExpr *SM = cast<SCEVMulExpr>(V);
8811 if (Constant *C = BuildConstantFromSCEV(SM->getOperand(0))) {
8812 // Don't bother with pointers at all.
8813 if (C->getType()->isPointerTy())
8814 return nullptr;
8815 for (unsigned i = 1, e = SM->getNumOperands(); i != e; ++i) {
8816 Constant *C2 = BuildConstantFromSCEV(SM->getOperand(i));
8817 if (!C2 || C2->getType()->isPointerTy())
8818 return nullptr;
8819 C = ConstantExpr::getMul(C, C2);
8821 return C;
8823 return nullptr;
8825 case scUDivExpr: {
8826 const SCEVUDivExpr *SU = cast<SCEVUDivExpr>(V);
8827 if (Constant *LHS = BuildConstantFromSCEV(SU->getLHS()))
8828 if (Constant *RHS = BuildConstantFromSCEV(SU->getRHS()))
8829 if (LHS->getType() == RHS->getType())
8830 return ConstantExpr::getUDiv(LHS, RHS);
8831 return nullptr;
8833 case scSMaxExpr:
8834 case scUMaxExpr:
8835 case scSMinExpr:
8836 case scUMinExpr:
8837 return nullptr; // TODO: smax, umax, smin, umax.
8839 llvm_unreachable("Unknown SCEV kind!");
8842 const SCEV *ScalarEvolution::computeSCEVAtScope(const SCEV *V, const Loop *L) {
8843 if (isa<SCEVConstant>(V)) return V;
8845 // If this instruction is evolved from a constant-evolving PHI, compute the
8846 // exit value from the loop without using SCEVs.
8847 if (const SCEVUnknown *SU = dyn_cast<SCEVUnknown>(V)) {
8848 if (Instruction *I = dyn_cast<Instruction>(SU->getValue())) {
8849 if (PHINode *PN = dyn_cast<PHINode>(I)) {
8850 const Loop *CurrLoop = this->LI[I->getParent()];
8851 // Looking for loop exit value.
8852 if (CurrLoop && CurrLoop->getParentLoop() == L &&
8853 PN->getParent() == CurrLoop->getHeader()) {
8854 // Okay, there is no closed form solution for the PHI node. Check
8855 // to see if the loop that contains it has a known backedge-taken
8856 // count. If so, we may be able to force computation of the exit
8857 // value.
8858 const SCEV *BackedgeTakenCount = getBackedgeTakenCount(CurrLoop);
8859 // This trivial case can show up in some degenerate cases where
8860 // the incoming IR has not yet been fully simplified.
8861 if (BackedgeTakenCount->isZero()) {
8862 Value *InitValue = nullptr;
8863 bool MultipleInitValues = false;
8864 for (unsigned i = 0; i < PN->getNumIncomingValues(); i++) {
8865 if (!CurrLoop->contains(PN->getIncomingBlock(i))) {
8866 if (!InitValue)
8867 InitValue = PN->getIncomingValue(i);
8868 else if (InitValue != PN->getIncomingValue(i)) {
8869 MultipleInitValues = true;
8870 break;
8874 if (!MultipleInitValues && InitValue)
8875 return getSCEV(InitValue);
8877 // Do we have a loop invariant value flowing around the backedge
8878 // for a loop which must execute the backedge?
8879 if (!isa<SCEVCouldNotCompute>(BackedgeTakenCount) &&
8880 isKnownPositive(BackedgeTakenCount) &&
8881 PN->getNumIncomingValues() == 2) {
8883 unsigned InLoopPred =
8884 CurrLoop->contains(PN->getIncomingBlock(0)) ? 0 : 1;
8885 Value *BackedgeVal = PN->getIncomingValue(InLoopPred);
8886 if (CurrLoop->isLoopInvariant(BackedgeVal))
8887 return getSCEV(BackedgeVal);
8889 if (auto *BTCC = dyn_cast<SCEVConstant>(BackedgeTakenCount)) {
8890 // Okay, we know how many times the containing loop executes. If
8891 // this is a constant evolving PHI node, get the final value at
8892 // the specified iteration number.
8893 Constant *RV = getConstantEvolutionLoopExitValue(
8894 PN, BTCC->getAPInt(), CurrLoop);
8895 if (RV) return getSCEV(RV);
8899 // If there is a single-input Phi, evaluate it at our scope. If we can
8900 // prove that this replacement does not break LCSSA form, use new value.
8901 if (PN->getNumOperands() == 1) {
8902 const SCEV *Input = getSCEV(PN->getOperand(0));
8903 const SCEV *InputAtScope = getSCEVAtScope(Input, L);
8904 // TODO: We can generalize it using LI.replacementPreservesLCSSAForm,
8905 // for the simplest case just support constants.
8906 if (isa<SCEVConstant>(InputAtScope)) return InputAtScope;
8910 // Okay, this is an expression that we cannot symbolically evaluate
8911 // into a SCEV. Check to see if it's possible to symbolically evaluate
8912 // the arguments into constants, and if so, try to constant propagate the
8913 // result. This is particularly useful for computing loop exit values.
8914 if (CanConstantFold(I)) {
8915 SmallVector<Constant *, 4> Operands;
8916 bool MadeImprovement = false;
8917 for (Value *Op : I->operands()) {
8918 if (Constant *C = dyn_cast<Constant>(Op)) {
8919 Operands.push_back(C);
8920 continue;
8923 // If any of the operands is non-constant and if they are
8924 // non-integer and non-pointer, don't even try to analyze them
8925 // with scev techniques.
8926 if (!isSCEVable(Op->getType()))
8927 return V;
8929 const SCEV *OrigV = getSCEV(Op);
8930 const SCEV *OpV = getSCEVAtScope(OrigV, L);
8931 MadeImprovement |= OrigV != OpV;
8933 Constant *C = BuildConstantFromSCEV(OpV);
8934 if (!C) return V;
8935 if (C->getType() != Op->getType())
8936 C = ConstantExpr::getCast(CastInst::getCastOpcode(C, false,
8937 Op->getType(),
8938 false),
8939 C, Op->getType());
8940 Operands.push_back(C);
8943 // Check to see if getSCEVAtScope actually made an improvement.
8944 if (MadeImprovement) {
8945 Constant *C = nullptr;
8946 const DataLayout &DL = getDataLayout();
8947 if (const CmpInst *CI = dyn_cast<CmpInst>(I))
8948 C = ConstantFoldCompareInstOperands(CI->getPredicate(), Operands[0],
8949 Operands[1], DL, &TLI);
8950 else if (const LoadInst *Load = dyn_cast<LoadInst>(I)) {
8951 if (!Load->isVolatile())
8952 C = ConstantFoldLoadFromConstPtr(Operands[0], Load->getType(),
8953 DL);
8954 } else
8955 C = ConstantFoldInstOperands(I, Operands, DL, &TLI);
8956 if (!C) return V;
8957 return getSCEV(C);
8962 // This is some other type of SCEVUnknown, just return it.
8963 return V;
8966 if (const SCEVCommutativeExpr *Comm = dyn_cast<SCEVCommutativeExpr>(V)) {
8967 // Avoid performing the look-up in the common case where the specified
8968 // expression has no loop-variant portions.
8969 for (unsigned i = 0, e = Comm->getNumOperands(); i != e; ++i) {
8970 const SCEV *OpAtScope = getSCEVAtScope(Comm->getOperand(i), L);
8971 if (OpAtScope != Comm->getOperand(i)) {
8972 // Okay, at least one of these operands is loop variant but might be
8973 // foldable. Build a new instance of the folded commutative expression.
8974 SmallVector<const SCEV *, 8> NewOps(Comm->op_begin(),
8975 Comm->op_begin()+i);
8976 NewOps.push_back(OpAtScope);
8978 for (++i; i != e; ++i) {
8979 OpAtScope = getSCEVAtScope(Comm->getOperand(i), L);
8980 NewOps.push_back(OpAtScope);
8982 if (isa<SCEVAddExpr>(Comm))
8983 return getAddExpr(NewOps, Comm->getNoWrapFlags());
8984 if (isa<SCEVMulExpr>(Comm))
8985 return getMulExpr(NewOps, Comm->getNoWrapFlags());
8986 if (isa<SCEVMinMaxExpr>(Comm))
8987 return getMinMaxExpr(Comm->getSCEVType(), NewOps);
8988 llvm_unreachable("Unknown commutative SCEV type!");
8991 // If we got here, all operands are loop invariant.
8992 return Comm;
8995 if (const SCEVUDivExpr *Div = dyn_cast<SCEVUDivExpr>(V)) {
8996 const SCEV *LHS = getSCEVAtScope(Div->getLHS(), L);
8997 const SCEV *RHS = getSCEVAtScope(Div->getRHS(), L);
8998 if (LHS == Div->getLHS() && RHS == Div->getRHS())
8999 return Div; // must be loop invariant
9000 return getUDivExpr(LHS, RHS);
9003 // If this is a loop recurrence for a loop that does not contain L, then we
9004 // are dealing with the final value computed by the loop.
9005 if (const SCEVAddRecExpr *AddRec = dyn_cast<SCEVAddRecExpr>(V)) {
9006 // First, attempt to evaluate each operand.
9007 // Avoid performing the look-up in the common case where the specified
9008 // expression has no loop-variant portions.
9009 for (unsigned i = 0, e = AddRec->getNumOperands(); i != e; ++i) {
9010 const SCEV *OpAtScope = getSCEVAtScope(AddRec->getOperand(i), L);
9011 if (OpAtScope == AddRec->getOperand(i))
9012 continue;
9014 // Okay, at least one of these operands is loop variant but might be
9015 // foldable. Build a new instance of the folded commutative expression.
9016 SmallVector<const SCEV *, 8> NewOps(AddRec->op_begin(),
9017 AddRec->op_begin()+i);
9018 NewOps.push_back(OpAtScope);
9019 for (++i; i != e; ++i)
9020 NewOps.push_back(getSCEVAtScope(AddRec->getOperand(i), L));
9022 const SCEV *FoldedRec =
9023 getAddRecExpr(NewOps, AddRec->getLoop(),
9024 AddRec->getNoWrapFlags(SCEV::FlagNW));
9025 AddRec = dyn_cast<SCEVAddRecExpr>(FoldedRec);
9026 // The addrec may be folded to a nonrecurrence, for example, if the
9027 // induction variable is multiplied by zero after constant folding. Go
9028 // ahead and return the folded value.
9029 if (!AddRec)
9030 return FoldedRec;
9031 break;
9034 // If the scope is outside the addrec's loop, evaluate it by using the
9035 // loop exit value of the addrec.
9036 if (!AddRec->getLoop()->contains(L)) {
9037 // To evaluate this recurrence, we need to know how many times the AddRec
9038 // loop iterates. Compute this now.
9039 const SCEV *BackedgeTakenCount = getBackedgeTakenCount(AddRec->getLoop());
9040 if (BackedgeTakenCount == getCouldNotCompute()) return AddRec;
9042 // Then, evaluate the AddRec.
9043 return AddRec->evaluateAtIteration(BackedgeTakenCount, *this);
9046 return AddRec;
9049 if (const SCEVZeroExtendExpr *Cast = dyn_cast<SCEVZeroExtendExpr>(V)) {
9050 const SCEV *Op = getSCEVAtScope(Cast->getOperand(), L);
9051 if (Op == Cast->getOperand())
9052 return Cast; // must be loop invariant
9053 return getZeroExtendExpr(Op, Cast->getType());
9056 if (const SCEVSignExtendExpr *Cast = dyn_cast<SCEVSignExtendExpr>(V)) {
9057 const SCEV *Op = getSCEVAtScope(Cast->getOperand(), L);
9058 if (Op == Cast->getOperand())
9059 return Cast; // must be loop invariant
9060 return getSignExtendExpr(Op, Cast->getType());
9063 if (const SCEVTruncateExpr *Cast = dyn_cast<SCEVTruncateExpr>(V)) {
9064 const SCEV *Op = getSCEVAtScope(Cast->getOperand(), L);
9065 if (Op == Cast->getOperand())
9066 return Cast; // must be loop invariant
9067 return getTruncateExpr(Op, Cast->getType());
9070 if (const SCEVPtrToIntExpr *Cast = dyn_cast<SCEVPtrToIntExpr>(V)) {
9071 const SCEV *Op = getSCEVAtScope(Cast->getOperand(), L);
9072 if (Op == Cast->getOperand())
9073 return Cast; // must be loop invariant
9074 return getPtrToIntExpr(Op, Cast->getType());
9077 llvm_unreachable("Unknown SCEV type!");
9080 const SCEV *ScalarEvolution::getSCEVAtScope(Value *V, const Loop *L) {
9081 return getSCEVAtScope(getSCEV(V), L);
9084 const SCEV *ScalarEvolution::stripInjectiveFunctions(const SCEV *S) const {
9085 if (const SCEVZeroExtendExpr *ZExt = dyn_cast<SCEVZeroExtendExpr>(S))
9086 return stripInjectiveFunctions(ZExt->getOperand());
9087 if (const SCEVSignExtendExpr *SExt = dyn_cast<SCEVSignExtendExpr>(S))
9088 return stripInjectiveFunctions(SExt->getOperand());
9089 return S;
9092 /// Finds the minimum unsigned root of the following equation:
9094 /// A * X = B (mod N)
9096 /// where N = 2^BW and BW is the common bit width of A and B. The signedness of
9097 /// A and B isn't important.
9099 /// If the equation does not have a solution, SCEVCouldNotCompute is returned.
9100 static const SCEV *SolveLinEquationWithOverflow(const APInt &A, const SCEV *B,
9101 ScalarEvolution &SE) {
9102 uint32_t BW = A.getBitWidth();
9103 assert(BW == SE.getTypeSizeInBits(B->getType()));
9104 assert(A != 0 && "A must be non-zero.");
9106 // 1. D = gcd(A, N)
9108 // The gcd of A and N may have only one prime factor: 2. The number of
9109 // trailing zeros in A is its multiplicity
9110 uint32_t Mult2 = A.countTrailingZeros();
9111 // D = 2^Mult2
9113 // 2. Check if B is divisible by D.
9115 // B is divisible by D if and only if the multiplicity of prime factor 2 for B
9116 // is not less than multiplicity of this prime factor for D.
9117 if (SE.GetMinTrailingZeros(B) < Mult2)
9118 return SE.getCouldNotCompute();
9120 // 3. Compute I: the multiplicative inverse of (A / D) in arithmetic
9121 // modulo (N / D).
9123 // If D == 1, (N / D) == N == 2^BW, so we need one extra bit to represent
9124 // (N / D) in general. The inverse itself always fits into BW bits, though,
9125 // so we immediately truncate it.
9126 APInt AD = A.lshr(Mult2).zext(BW + 1); // AD = A / D
9127 APInt Mod(BW + 1, 0);
9128 Mod.setBit(BW - Mult2); // Mod = N / D
9129 APInt I = AD.multiplicativeInverse(Mod).trunc(BW);
9131 // 4. Compute the minimum unsigned root of the equation:
9132 // I * (B / D) mod (N / D)
9133 // To simplify the computation, we factor out the divide by D:
9134 // (I * B mod N) / D
9135 const SCEV *D = SE.getConstant(APInt::getOneBitSet(BW, Mult2));
9136 return SE.getUDivExactExpr(SE.getMulExpr(B, SE.getConstant(I)), D);
9139 /// For a given quadratic addrec, generate coefficients of the corresponding
9140 /// quadratic equation, multiplied by a common value to ensure that they are
9141 /// integers.
9142 /// The returned value is a tuple { A, B, C, M, BitWidth }, where
9143 /// Ax^2 + Bx + C is the quadratic function, M is the value that A, B and C
9144 /// were multiplied by, and BitWidth is the bit width of the original addrec
9145 /// coefficients.
9146 /// This function returns None if the addrec coefficients are not compile-
9147 /// time constants.
9148 static Optional<std::tuple<APInt, APInt, APInt, APInt, unsigned>>
9149 GetQuadraticEquation(const SCEVAddRecExpr *AddRec) {
9150 assert(AddRec->getNumOperands() == 3 && "This is not a quadratic chrec!");
9151 const SCEVConstant *LC = dyn_cast<SCEVConstant>(AddRec->getOperand(0));
9152 const SCEVConstant *MC = dyn_cast<SCEVConstant>(AddRec->getOperand(1));
9153 const SCEVConstant *NC = dyn_cast<SCEVConstant>(AddRec->getOperand(2));
9154 LLVM_DEBUG(dbgs() << __func__ << ": analyzing quadratic addrec: "
9155 << *AddRec << '\n');
9157 // We currently can only solve this if the coefficients are constants.
9158 if (!LC || !MC || !NC) {
9159 LLVM_DEBUG(dbgs() << __func__ << ": coefficients are not constant\n");
9160 return None;
9163 APInt L = LC->getAPInt();
9164 APInt M = MC->getAPInt();
9165 APInt N = NC->getAPInt();
9166 assert(!N.isNullValue() && "This is not a quadratic addrec");
9168 unsigned BitWidth = LC->getAPInt().getBitWidth();
9169 unsigned NewWidth = BitWidth + 1;
9170 LLVM_DEBUG(dbgs() << __func__ << ": addrec coeff bw: "
9171 << BitWidth << '\n');
9172 // The sign-extension (as opposed to a zero-extension) here matches the
9173 // extension used in SolveQuadraticEquationWrap (with the same motivation).
9174 N = N.sext(NewWidth);
9175 M = M.sext(NewWidth);
9176 L = L.sext(NewWidth);
9178 // The increments are M, M+N, M+2N, ..., so the accumulated values are
9179 // L+M, (L+M)+(M+N), (L+M)+(M+N)+(M+2N), ..., that is,
9180 // L+M, L+2M+N, L+3M+3N, ...
9181 // After n iterations the accumulated value Acc is L + nM + n(n-1)/2 N.
9183 // The equation Acc = 0 is then
9184 // L + nM + n(n-1)/2 N = 0, or 2L + 2M n + n(n-1) N = 0.
9185 // In a quadratic form it becomes:
9186 // N n^2 + (2M-N) n + 2L = 0.
9188 APInt A = N;
9189 APInt B = 2 * M - A;
9190 APInt C = 2 * L;
9191 APInt T = APInt(NewWidth, 2);
9192 LLVM_DEBUG(dbgs() << __func__ << ": equation " << A << "x^2 + " << B
9193 << "x + " << C << ", coeff bw: " << NewWidth
9194 << ", multiplied by " << T << '\n');
9195 return std::make_tuple(A, B, C, T, BitWidth);
9198 /// Helper function to compare optional APInts:
9199 /// (a) if X and Y both exist, return min(X, Y),
9200 /// (b) if neither X nor Y exist, return None,
9201 /// (c) if exactly one of X and Y exists, return that value.
9202 static Optional<APInt> MinOptional(Optional<APInt> X, Optional<APInt> Y) {
9203 if (X.hasValue() && Y.hasValue()) {
9204 unsigned W = std::max(X->getBitWidth(), Y->getBitWidth());
9205 APInt XW = X->sextOrSelf(W);
9206 APInt YW = Y->sextOrSelf(W);
9207 return XW.slt(YW) ? *X : *Y;
9209 if (!X.hasValue() && !Y.hasValue())
9210 return None;
9211 return X.hasValue() ? *X : *Y;
9214 /// Helper function to truncate an optional APInt to a given BitWidth.
9215 /// When solving addrec-related equations, it is preferable to return a value
9216 /// that has the same bit width as the original addrec's coefficients. If the
9217 /// solution fits in the original bit width, truncate it (except for i1).
9218 /// Returning a value of a different bit width may inhibit some optimizations.
9220 /// In general, a solution to a quadratic equation generated from an addrec
9221 /// may require BW+1 bits, where BW is the bit width of the addrec's
9222 /// coefficients. The reason is that the coefficients of the quadratic
9223 /// equation are BW+1 bits wide (to avoid truncation when converting from
9224 /// the addrec to the equation).
9225 static Optional<APInt> TruncIfPossible(Optional<APInt> X, unsigned BitWidth) {
9226 if (!X.hasValue())
9227 return None;
9228 unsigned W = X->getBitWidth();
9229 if (BitWidth > 1 && BitWidth < W && X->isIntN(BitWidth))
9230 return X->trunc(BitWidth);
9231 return X;
9234 /// Let c(n) be the value of the quadratic chrec {L,+,M,+,N} after n
9235 /// iterations. The values L, M, N are assumed to be signed, and they
9236 /// should all have the same bit widths.
9237 /// Find the least n >= 0 such that c(n) = 0 in the arithmetic modulo 2^BW,
9238 /// where BW is the bit width of the addrec's coefficients.
9239 /// If the calculated value is a BW-bit integer (for BW > 1), it will be
9240 /// returned as such, otherwise the bit width of the returned value may
9241 /// be greater than BW.
9243 /// This function returns None if
9244 /// (a) the addrec coefficients are not constant, or
9245 /// (b) SolveQuadraticEquationWrap was unable to find a solution. For cases
9246 /// like x^2 = 5, no integer solutions exist, in other cases an integer
9247 /// solution may exist, but SolveQuadraticEquationWrap may fail to find it.
9248 static Optional<APInt>
9249 SolveQuadraticAddRecExact(const SCEVAddRecExpr *AddRec, ScalarEvolution &SE) {
9250 APInt A, B, C, M;
9251 unsigned BitWidth;
9252 auto T = GetQuadraticEquation(AddRec);
9253 if (!T.hasValue())
9254 return None;
9256 std::tie(A, B, C, M, BitWidth) = *T;
9257 LLVM_DEBUG(dbgs() << __func__ << ": solving for unsigned overflow\n");
9258 Optional<APInt> X = APIntOps::SolveQuadraticEquationWrap(A, B, C, BitWidth+1);
9259 if (!X.hasValue())
9260 return None;
9262 ConstantInt *CX = ConstantInt::get(SE.getContext(), *X);
9263 ConstantInt *V = EvaluateConstantChrecAtConstant(AddRec, CX, SE);
9264 if (!V->isZero())
9265 return None;
9267 return TruncIfPossible(X, BitWidth);
9270 /// Let c(n) be the value of the quadratic chrec {0,+,M,+,N} after n
9271 /// iterations. The values M, N are assumed to be signed, and they
9272 /// should all have the same bit widths.
9273 /// Find the least n such that c(n) does not belong to the given range,
9274 /// while c(n-1) does.
9276 /// This function returns None if
9277 /// (a) the addrec coefficients are not constant, or
9278 /// (b) SolveQuadraticEquationWrap was unable to find a solution for the
9279 /// bounds of the range.
9280 static Optional<APInt>
9281 SolveQuadraticAddRecRange(const SCEVAddRecExpr *AddRec,
9282 const ConstantRange &Range, ScalarEvolution &SE) {
9283 assert(AddRec->getOperand(0)->isZero() &&
9284 "Starting value of addrec should be 0");
9285 LLVM_DEBUG(dbgs() << __func__ << ": solving boundary crossing for range "
9286 << Range << ", addrec " << *AddRec << '\n');
9287 // This case is handled in getNumIterationsInRange. Here we can assume that
9288 // we start in the range.
9289 assert(Range.contains(APInt(SE.getTypeSizeInBits(AddRec->getType()), 0)) &&
9290 "Addrec's initial value should be in range");
9292 APInt A, B, C, M;
9293 unsigned BitWidth;
9294 auto T = GetQuadraticEquation(AddRec);
9295 if (!T.hasValue())
9296 return None;
9298 // Be careful about the return value: there can be two reasons for not
9299 // returning an actual number. First, if no solutions to the equations
9300 // were found, and second, if the solutions don't leave the given range.
9301 // The first case means that the actual solution is "unknown", the second
9302 // means that it's known, but not valid. If the solution is unknown, we
9303 // cannot make any conclusions.
9304 // Return a pair: the optional solution and a flag indicating if the
9305 // solution was found.
9306 auto SolveForBoundary = [&](APInt Bound) -> std::pair<Optional<APInt>,bool> {
9307 // Solve for signed overflow and unsigned overflow, pick the lower
9308 // solution.
9309 LLVM_DEBUG(dbgs() << "SolveQuadraticAddRecRange: checking boundary "
9310 << Bound << " (before multiplying by " << M << ")\n");
9311 Bound *= M; // The quadratic equation multiplier.
9313 Optional<APInt> SO = None;
9314 if (BitWidth > 1) {
9315 LLVM_DEBUG(dbgs() << "SolveQuadraticAddRecRange: solving for "
9316 "signed overflow\n");
9317 SO = APIntOps::SolveQuadraticEquationWrap(A, B, -Bound, BitWidth);
9319 LLVM_DEBUG(dbgs() << "SolveQuadraticAddRecRange: solving for "
9320 "unsigned overflow\n");
9321 Optional<APInt> UO = APIntOps::SolveQuadraticEquationWrap(A, B, -Bound,
9322 BitWidth+1);
9324 auto LeavesRange = [&] (const APInt &X) {
9325 ConstantInt *C0 = ConstantInt::get(SE.getContext(), X);
9326 ConstantInt *V0 = EvaluateConstantChrecAtConstant(AddRec, C0, SE);
9327 if (Range.contains(V0->getValue()))
9328 return false;
9329 // X should be at least 1, so X-1 is non-negative.
9330 ConstantInt *C1 = ConstantInt::get(SE.getContext(), X-1);
9331 ConstantInt *V1 = EvaluateConstantChrecAtConstant(AddRec, C1, SE);
9332 if (Range.contains(V1->getValue()))
9333 return true;
9334 return false;
9337 // If SolveQuadraticEquationWrap returns None, it means that there can
9338 // be a solution, but the function failed to find it. We cannot treat it
9339 // as "no solution".
9340 if (!SO.hasValue() || !UO.hasValue())
9341 return { None, false };
9343 // Check the smaller value first to see if it leaves the range.
9344 // At this point, both SO and UO must have values.
9345 Optional<APInt> Min = MinOptional(SO, UO);
9346 if (LeavesRange(*Min))
9347 return { Min, true };
9348 Optional<APInt> Max = Min == SO ? UO : SO;
9349 if (LeavesRange(*Max))
9350 return { Max, true };
9352 // Solutions were found, but were eliminated, hence the "true".
9353 return { None, true };
9356 std::tie(A, B, C, M, BitWidth) = *T;
9357 // Lower bound is inclusive, subtract 1 to represent the exiting value.
9358 APInt Lower = Range.getLower().sextOrSelf(A.getBitWidth()) - 1;
9359 APInt Upper = Range.getUpper().sextOrSelf(A.getBitWidth());
9360 auto SL = SolveForBoundary(Lower);
9361 auto SU = SolveForBoundary(Upper);
9362 // If any of the solutions was unknown, no meaninigful conclusions can
9363 // be made.
9364 if (!SL.second || !SU.second)
9365 return None;
9367 // Claim: The correct solution is not some value between Min and Max.
9369 // Justification: Assuming that Min and Max are different values, one of
9370 // them is when the first signed overflow happens, the other is when the
9371 // first unsigned overflow happens. Crossing the range boundary is only
9372 // possible via an overflow (treating 0 as a special case of it, modeling
9373 // an overflow as crossing k*2^W for some k).
9375 // The interesting case here is when Min was eliminated as an invalid
9376 // solution, but Max was not. The argument is that if there was another
9377 // overflow between Min and Max, it would also have been eliminated if
9378 // it was considered.
9380 // For a given boundary, it is possible to have two overflows of the same
9381 // type (signed/unsigned) without having the other type in between: this
9382 // can happen when the vertex of the parabola is between the iterations
9383 // corresponding to the overflows. This is only possible when the two
9384 // overflows cross k*2^W for the same k. In such case, if the second one
9385 // left the range (and was the first one to do so), the first overflow
9386 // would have to enter the range, which would mean that either we had left
9387 // the range before or that we started outside of it. Both of these cases
9388 // are contradictions.
9390 // Claim: In the case where SolveForBoundary returns None, the correct
9391 // solution is not some value between the Max for this boundary and the
9392 // Min of the other boundary.
9394 // Justification: Assume that we had such Max_A and Min_B corresponding
9395 // to range boundaries A and B and such that Max_A < Min_B. If there was
9396 // a solution between Max_A and Min_B, it would have to be caused by an
9397 // overflow corresponding to either A or B. It cannot correspond to B,
9398 // since Min_B is the first occurrence of such an overflow. If it
9399 // corresponded to A, it would have to be either a signed or an unsigned
9400 // overflow that is larger than both eliminated overflows for A. But
9401 // between the eliminated overflows and this overflow, the values would
9402 // cover the entire value space, thus crossing the other boundary, which
9403 // is a contradiction.
9405 return TruncIfPossible(MinOptional(SL.first, SU.first), BitWidth);
9408 ScalarEvolution::ExitLimit
9409 ScalarEvolution::howFarToZero(const SCEV *V, const Loop *L, bool ControlsExit,
9410 bool AllowPredicates) {
9412 // This is only used for loops with a "x != y" exit test. The exit condition
9413 // is now expressed as a single expression, V = x-y. So the exit test is
9414 // effectively V != 0. We know and take advantage of the fact that this
9415 // expression only being used in a comparison by zero context.
9417 SmallPtrSet<const SCEVPredicate *, 4> Predicates;
9418 // If the value is a constant
9419 if (const SCEVConstant *C = dyn_cast<SCEVConstant>(V)) {
9420 // If the value is already zero, the branch will execute zero times.
9421 if (C->getValue()->isZero()) return C;
9422 return getCouldNotCompute(); // Otherwise it will loop infinitely.
9425 const SCEVAddRecExpr *AddRec =
9426 dyn_cast<SCEVAddRecExpr>(stripInjectiveFunctions(V));
9428 if (!AddRec && AllowPredicates)
9429 // Try to make this an AddRec using runtime tests, in the first X
9430 // iterations of this loop, where X is the SCEV expression found by the
9431 // algorithm below.
9432 AddRec = convertSCEVToAddRecWithPredicates(V, L, Predicates);
9434 if (!AddRec || AddRec->getLoop() != L)
9435 return getCouldNotCompute();
9437 // If this is a quadratic (3-term) AddRec {L,+,M,+,N}, find the roots of
9438 // the quadratic equation to solve it.
9439 if (AddRec->isQuadratic() && AddRec->getType()->isIntegerTy()) {
9440 // We can only use this value if the chrec ends up with an exact zero
9441 // value at this index. When solving for "X*X != 5", for example, we
9442 // should not accept a root of 2.
9443 if (auto S = SolveQuadraticAddRecExact(AddRec, *this)) {
9444 const auto *R = cast<SCEVConstant>(getConstant(S.getValue()));
9445 return ExitLimit(R, R, false, Predicates);
9447 return getCouldNotCompute();
9450 // Otherwise we can only handle this if it is affine.
9451 if (!AddRec->isAffine())
9452 return getCouldNotCompute();
9454 // If this is an affine expression, the execution count of this branch is
9455 // the minimum unsigned root of the following equation:
9457 // Start + Step*N = 0 (mod 2^BW)
9459 // equivalent to:
9461 // Step*N = -Start (mod 2^BW)
9463 // where BW is the common bit width of Start and Step.
9465 // Get the initial value for the loop.
9466 const SCEV *Start = getSCEVAtScope(AddRec->getStart(), L->getParentLoop());
9467 const SCEV *Step = getSCEVAtScope(AddRec->getOperand(1), L->getParentLoop());
9469 // For now we handle only constant steps.
9471 // TODO: Handle a nonconstant Step given AddRec<NUW>. If the
9472 // AddRec is NUW, then (in an unsigned sense) it cannot be counting up to wrap
9473 // to 0, it must be counting down to equal 0. Consequently, N = Start / -Step.
9474 // We have not yet seen any such cases.
9475 const SCEVConstant *StepC = dyn_cast<SCEVConstant>(Step);
9476 if (!StepC || StepC->getValue()->isZero())
9477 return getCouldNotCompute();
9479 // For positive steps (counting up until unsigned overflow):
9480 // N = -Start/Step (as unsigned)
9481 // For negative steps (counting down to zero):
9482 // N = Start/-Step
9483 // First compute the unsigned distance from zero in the direction of Step.
9484 bool CountDown = StepC->getAPInt().isNegative();
9485 const SCEV *Distance = CountDown ? Start : getNegativeSCEV(Start);
9487 // Handle unitary steps, which cannot wraparound.
9488 // 1*N = -Start; -1*N = Start (mod 2^BW), so:
9489 // N = Distance (as unsigned)
9490 if (StepC->getValue()->isOne() || StepC->getValue()->isMinusOne()) {
9491 APInt MaxBECount = getUnsignedRangeMax(applyLoopGuards(Distance, L));
9492 APInt MaxBECountBase = getUnsignedRangeMax(Distance);
9493 if (MaxBECountBase.ult(MaxBECount))
9494 MaxBECount = MaxBECountBase;
9496 // When a loop like "for (int i = 0; i != n; ++i) { /* body */ }" is rotated,
9497 // we end up with a loop whose backedge-taken count is n - 1. Detect this
9498 // case, and see if we can improve the bound.
9500 // Explicitly handling this here is necessary because getUnsignedRange
9501 // isn't context-sensitive; it doesn't know that we only care about the
9502 // range inside the loop.
9503 const SCEV *Zero = getZero(Distance->getType());
9504 const SCEV *One = getOne(Distance->getType());
9505 const SCEV *DistancePlusOne = getAddExpr(Distance, One);
9506 if (isLoopEntryGuardedByCond(L, ICmpInst::ICMP_NE, DistancePlusOne, Zero)) {
9507 // If Distance + 1 doesn't overflow, we can compute the maximum distance
9508 // as "unsigned_max(Distance + 1) - 1".
9509 ConstantRange CR = getUnsignedRange(DistancePlusOne);
9510 MaxBECount = APIntOps::umin(MaxBECount, CR.getUnsignedMax() - 1);
9512 return ExitLimit(Distance, getConstant(MaxBECount), false, Predicates);
9515 // If the condition controls loop exit (the loop exits only if the expression
9516 // is true) and the addition is no-wrap we can use unsigned divide to
9517 // compute the backedge count. In this case, the step may not divide the
9518 // distance, but we don't care because if the condition is "missed" the loop
9519 // will have undefined behavior due to wrapping.
9520 if (ControlsExit && AddRec->hasNoSelfWrap() &&
9521 loopHasNoAbnormalExits(AddRec->getLoop())) {
9522 const SCEV *Exact =
9523 getUDivExpr(Distance, CountDown ? getNegativeSCEV(Step) : Step);
9524 const SCEV *Max = getCouldNotCompute();
9525 if (Exact != getCouldNotCompute()) {
9526 APInt MaxInt = getUnsignedRangeMax(applyLoopGuards(Exact, L));
9527 APInt BaseMaxInt = getUnsignedRangeMax(Exact);
9528 if (BaseMaxInt.ult(MaxInt))
9529 Max = getConstant(BaseMaxInt);
9530 else
9531 Max = getConstant(MaxInt);
9533 return ExitLimit(Exact, Max, false, Predicates);
9536 // Solve the general equation.
9537 const SCEV *E = SolveLinEquationWithOverflow(StepC->getAPInt(),
9538 getNegativeSCEV(Start), *this);
9539 const SCEV *M = E == getCouldNotCompute()
9541 : getConstant(getUnsignedRangeMax(E));
9542 return ExitLimit(E, M, false, Predicates);
9545 ScalarEvolution::ExitLimit
9546 ScalarEvolution::howFarToNonZero(const SCEV *V, const Loop *L) {
9547 // Loops that look like: while (X == 0) are very strange indeed. We don't
9548 // handle them yet except for the trivial case. This could be expanded in the
9549 // future as needed.
9551 // If the value is a constant, check to see if it is known to be non-zero
9552 // already. If so, the backedge will execute zero times.
9553 if (const SCEVConstant *C = dyn_cast<SCEVConstant>(V)) {
9554 if (!C->getValue()->isZero())
9555 return getZero(C->getType());
9556 return getCouldNotCompute(); // Otherwise it will loop infinitely.
9559 // We could implement others, but I really doubt anyone writes loops like
9560 // this, and if they did, they would already be constant folded.
9561 return getCouldNotCompute();
9564 std::pair<const BasicBlock *, const BasicBlock *>
9565 ScalarEvolution::getPredecessorWithUniqueSuccessorForBB(const BasicBlock *BB)
9566 const {
9567 // If the block has a unique predecessor, then there is no path from the
9568 // predecessor to the block that does not go through the direct edge
9569 // from the predecessor to the block.
9570 if (const BasicBlock *Pred = BB->getSinglePredecessor())
9571 return {Pred, BB};
9573 // A loop's header is defined to be a block that dominates the loop.
9574 // If the header has a unique predecessor outside the loop, it must be
9575 // a block that has exactly one successor that can reach the loop.
9576 if (const Loop *L = LI.getLoopFor(BB))
9577 return {L->getLoopPredecessor(), L->getHeader()};
9579 return {nullptr, nullptr};
9582 /// SCEV structural equivalence is usually sufficient for testing whether two
9583 /// expressions are equal, however for the purposes of looking for a condition
9584 /// guarding a loop, it can be useful to be a little more general, since a
9585 /// front-end may have replicated the controlling expression.
9586 static bool HasSameValue(const SCEV *A, const SCEV *B) {
9587 // Quick check to see if they are the same SCEV.
9588 if (A == B) return true;
9590 auto ComputesEqualValues = [](const Instruction *A, const Instruction *B) {
9591 // Not all instructions that are "identical" compute the same value. For
9592 // instance, two distinct alloca instructions allocating the same type are
9593 // identical and do not read memory; but compute distinct values.
9594 return A->isIdenticalTo(B) && (isa<BinaryOperator>(A) || isa<GetElementPtrInst>(A));
9597 // Otherwise, if they're both SCEVUnknown, it's possible that they hold
9598 // two different instructions with the same value. Check for this case.
9599 if (const SCEVUnknown *AU = dyn_cast<SCEVUnknown>(A))
9600 if (const SCEVUnknown *BU = dyn_cast<SCEVUnknown>(B))
9601 if (const Instruction *AI = dyn_cast<Instruction>(AU->getValue()))
9602 if (const Instruction *BI = dyn_cast<Instruction>(BU->getValue()))
9603 if (ComputesEqualValues(AI, BI))
9604 return true;
9606 // Otherwise assume they may have a different value.
9607 return false;
9610 bool ScalarEvolution::SimplifyICmpOperands(ICmpInst::Predicate &Pred,
9611 const SCEV *&LHS, const SCEV *&RHS,
9612 unsigned Depth) {
9613 bool Changed = false;
9614 // Simplifies ICMP to trivial true or false by turning it into '0 == 0' or
9615 // '0 != 0'.
9616 auto TrivialCase = [&](bool TriviallyTrue) {
9617 LHS = RHS = getConstant(ConstantInt::getFalse(getContext()));
9618 Pred = TriviallyTrue ? ICmpInst::ICMP_EQ : ICmpInst::ICMP_NE;
9619 return true;
9621 // If we hit the max recursion limit bail out.
9622 if (Depth >= 3)
9623 return false;
9625 // Canonicalize a constant to the right side.
9626 if (const SCEVConstant *LHSC = dyn_cast<SCEVConstant>(LHS)) {
9627 // Check for both operands constant.
9628 if (const SCEVConstant *RHSC = dyn_cast<SCEVConstant>(RHS)) {
9629 if (ConstantExpr::getICmp(Pred,
9630 LHSC->getValue(),
9631 RHSC->getValue())->isNullValue())
9632 return TrivialCase(false);
9633 else
9634 return TrivialCase(true);
9636 // Otherwise swap the operands to put the constant on the right.
9637 std::swap(LHS, RHS);
9638 Pred = ICmpInst::getSwappedPredicate(Pred);
9639 Changed = true;
9642 // If we're comparing an addrec with a value which is loop-invariant in the
9643 // addrec's loop, put the addrec on the left. Also make a dominance check,
9644 // as both operands could be addrecs loop-invariant in each other's loop.
9645 if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(RHS)) {
9646 const Loop *L = AR->getLoop();
9647 if (isLoopInvariant(LHS, L) && properlyDominates(LHS, L->getHeader())) {
9648 std::swap(LHS, RHS);
9649 Pred = ICmpInst::getSwappedPredicate(Pred);
9650 Changed = true;
9654 // If there's a constant operand, canonicalize comparisons with boundary
9655 // cases, and canonicalize *-or-equal comparisons to regular comparisons.
9656 if (const SCEVConstant *RC = dyn_cast<SCEVConstant>(RHS)) {
9657 const APInt &RA = RC->getAPInt();
9659 bool SimplifiedByConstantRange = false;
9661 if (!ICmpInst::isEquality(Pred)) {
9662 ConstantRange ExactCR = ConstantRange::makeExactICmpRegion(Pred, RA);
9663 if (ExactCR.isFullSet())
9664 return TrivialCase(true);
9665 else if (ExactCR.isEmptySet())
9666 return TrivialCase(false);
9668 APInt NewRHS;
9669 CmpInst::Predicate NewPred;
9670 if (ExactCR.getEquivalentICmp(NewPred, NewRHS) &&
9671 ICmpInst::isEquality(NewPred)) {
9672 // We were able to convert an inequality to an equality.
9673 Pred = NewPred;
9674 RHS = getConstant(NewRHS);
9675 Changed = SimplifiedByConstantRange = true;
9679 if (!SimplifiedByConstantRange) {
9680 switch (Pred) {
9681 default:
9682 break;
9683 case ICmpInst::ICMP_EQ:
9684 case ICmpInst::ICMP_NE:
9685 // Fold ((-1) * %a) + %b == 0 (equivalent to %b-%a == 0) into %a == %b.
9686 if (!RA)
9687 if (const SCEVAddExpr *AE = dyn_cast<SCEVAddExpr>(LHS))
9688 if (const SCEVMulExpr *ME =
9689 dyn_cast<SCEVMulExpr>(AE->getOperand(0)))
9690 if (AE->getNumOperands() == 2 && ME->getNumOperands() == 2 &&
9691 ME->getOperand(0)->isAllOnesValue()) {
9692 RHS = AE->getOperand(1);
9693 LHS = ME->getOperand(1);
9694 Changed = true;
9696 break;
9699 // The "Should have been caught earlier!" messages refer to the fact
9700 // that the ExactCR.isFullSet() or ExactCR.isEmptySet() check above
9701 // should have fired on the corresponding cases, and canonicalized the
9702 // check to trivial case.
9704 case ICmpInst::ICMP_UGE:
9705 assert(!RA.isMinValue() && "Should have been caught earlier!");
9706 Pred = ICmpInst::ICMP_UGT;
9707 RHS = getConstant(RA - 1);
9708 Changed = true;
9709 break;
9710 case ICmpInst::ICMP_ULE:
9711 assert(!RA.isMaxValue() && "Should have been caught earlier!");
9712 Pred = ICmpInst::ICMP_ULT;
9713 RHS = getConstant(RA + 1);
9714 Changed = true;
9715 break;
9716 case ICmpInst::ICMP_SGE:
9717 assert(!RA.isMinSignedValue() && "Should have been caught earlier!");
9718 Pred = ICmpInst::ICMP_SGT;
9719 RHS = getConstant(RA - 1);
9720 Changed = true;
9721 break;
9722 case ICmpInst::ICMP_SLE:
9723 assert(!RA.isMaxSignedValue() && "Should have been caught earlier!");
9724 Pred = ICmpInst::ICMP_SLT;
9725 RHS = getConstant(RA + 1);
9726 Changed = true;
9727 break;
9732 // Check for obvious equality.
9733 if (HasSameValue(LHS, RHS)) {
9734 if (ICmpInst::isTrueWhenEqual(Pred))
9735 return TrivialCase(true);
9736 if (ICmpInst::isFalseWhenEqual(Pred))
9737 return TrivialCase(false);
9740 // If possible, canonicalize GE/LE comparisons to GT/LT comparisons, by
9741 // adding or subtracting 1 from one of the operands.
9742 switch (Pred) {
9743 case ICmpInst::ICMP_SLE:
9744 if (!getSignedRangeMax(RHS).isMaxSignedValue()) {
9745 RHS = getAddExpr(getConstant(RHS->getType(), 1, true), RHS,
9746 SCEV::FlagNSW);
9747 Pred = ICmpInst::ICMP_SLT;
9748 Changed = true;
9749 } else if (!getSignedRangeMin(LHS).isMinSignedValue()) {
9750 LHS = getAddExpr(getConstant(RHS->getType(), (uint64_t)-1, true), LHS,
9751 SCEV::FlagNSW);
9752 Pred = ICmpInst::ICMP_SLT;
9753 Changed = true;
9755 break;
9756 case ICmpInst::ICMP_SGE:
9757 if (!getSignedRangeMin(RHS).isMinSignedValue()) {
9758 RHS = getAddExpr(getConstant(RHS->getType(), (uint64_t)-1, true), RHS,
9759 SCEV::FlagNSW);
9760 Pred = ICmpInst::ICMP_SGT;
9761 Changed = true;
9762 } else if (!getSignedRangeMax(LHS).isMaxSignedValue()) {
9763 LHS = getAddExpr(getConstant(RHS->getType(), 1, true), LHS,
9764 SCEV::FlagNSW);
9765 Pred = ICmpInst::ICMP_SGT;
9766 Changed = true;
9768 break;
9769 case ICmpInst::ICMP_ULE:
9770 if (!getUnsignedRangeMax(RHS).isMaxValue()) {
9771 RHS = getAddExpr(getConstant(RHS->getType(), 1, true), RHS,
9772 SCEV::FlagNUW);
9773 Pred = ICmpInst::ICMP_ULT;
9774 Changed = true;
9775 } else if (!getUnsignedRangeMin(LHS).isMinValue()) {
9776 LHS = getAddExpr(getConstant(RHS->getType(), (uint64_t)-1, true), LHS);
9777 Pred = ICmpInst::ICMP_ULT;
9778 Changed = true;
9780 break;
9781 case ICmpInst::ICMP_UGE:
9782 if (!getUnsignedRangeMin(RHS).isMinValue()) {
9783 RHS = getAddExpr(getConstant(RHS->getType(), (uint64_t)-1, true), RHS);
9784 Pred = ICmpInst::ICMP_UGT;
9785 Changed = true;
9786 } else if (!getUnsignedRangeMax(LHS).isMaxValue()) {
9787 LHS = getAddExpr(getConstant(RHS->getType(), 1, true), LHS,
9788 SCEV::FlagNUW);
9789 Pred = ICmpInst::ICMP_UGT;
9790 Changed = true;
9792 break;
9793 default:
9794 break;
9797 // TODO: More simplifications are possible here.
9799 // Recursively simplify until we either hit a recursion limit or nothing
9800 // changes.
9801 if (Changed)
9802 return SimplifyICmpOperands(Pred, LHS, RHS, Depth+1);
9804 return Changed;
9807 bool ScalarEvolution::isKnownNegative(const SCEV *S) {
9808 return getSignedRangeMax(S).isNegative();
9811 bool ScalarEvolution::isKnownPositive(const SCEV *S) {
9812 return getSignedRangeMin(S).isStrictlyPositive();
9815 bool ScalarEvolution::isKnownNonNegative(const SCEV *S) {
9816 return !getSignedRangeMin(S).isNegative();
9819 bool ScalarEvolution::isKnownNonPositive(const SCEV *S) {
9820 return !getSignedRangeMax(S).isStrictlyPositive();
9823 bool ScalarEvolution::isKnownNonZero(const SCEV *S) {
9824 return getUnsignedRangeMin(S) != 0;
9827 std::pair<const SCEV *, const SCEV *>
9828 ScalarEvolution::SplitIntoInitAndPostInc(const Loop *L, const SCEV *S) {
9829 // Compute SCEV on entry of loop L.
9830 const SCEV *Start = SCEVInitRewriter::rewrite(S, L, *this);
9831 if (Start == getCouldNotCompute())
9832 return { Start, Start };
9833 // Compute post increment SCEV for loop L.
9834 const SCEV *PostInc = SCEVPostIncRewriter::rewrite(S, L, *this);
9835 assert(PostInc != getCouldNotCompute() && "Unexpected could not compute");
9836 return { Start, PostInc };
9839 bool ScalarEvolution::isKnownViaInduction(ICmpInst::Predicate Pred,
9840 const SCEV *LHS, const SCEV *RHS) {
9841 // First collect all loops.
9842 SmallPtrSet<const Loop *, 8> LoopsUsed;
9843 getUsedLoops(LHS, LoopsUsed);
9844 getUsedLoops(RHS, LoopsUsed);
9846 if (LoopsUsed.empty())
9847 return false;
9849 // Domination relationship must be a linear order on collected loops.
9850 #ifndef NDEBUG
9851 for (auto *L1 : LoopsUsed)
9852 for (auto *L2 : LoopsUsed)
9853 assert((DT.dominates(L1->getHeader(), L2->getHeader()) ||
9854 DT.dominates(L2->getHeader(), L1->getHeader())) &&
9855 "Domination relationship is not a linear order");
9856 #endif
9858 const Loop *MDL =
9859 *std::max_element(LoopsUsed.begin(), LoopsUsed.end(),
9860 [&](const Loop *L1, const Loop *L2) {
9861 return DT.properlyDominates(L1->getHeader(), L2->getHeader());
9864 // Get init and post increment value for LHS.
9865 auto SplitLHS = SplitIntoInitAndPostInc(MDL, LHS);
9866 // if LHS contains unknown non-invariant SCEV then bail out.
9867 if (SplitLHS.first == getCouldNotCompute())
9868 return false;
9869 assert (SplitLHS.second != getCouldNotCompute() && "Unexpected CNC");
9870 // Get init and post increment value for RHS.
9871 auto SplitRHS = SplitIntoInitAndPostInc(MDL, RHS);
9872 // if RHS contains unknown non-invariant SCEV then bail out.
9873 if (SplitRHS.first == getCouldNotCompute())
9874 return false;
9875 assert (SplitRHS.second != getCouldNotCompute() && "Unexpected CNC");
9876 // It is possible that init SCEV contains an invariant load but it does
9877 // not dominate MDL and is not available at MDL loop entry, so we should
9878 // check it here.
9879 if (!isAvailableAtLoopEntry(SplitLHS.first, MDL) ||
9880 !isAvailableAtLoopEntry(SplitRHS.first, MDL))
9881 return false;
9883 // It seems backedge guard check is faster than entry one so in some cases
9884 // it can speed up whole estimation by short circuit
9885 return isLoopBackedgeGuardedByCond(MDL, Pred, SplitLHS.second,
9886 SplitRHS.second) &&
9887 isLoopEntryGuardedByCond(MDL, Pred, SplitLHS.first, SplitRHS.first);
9890 bool ScalarEvolution::isKnownPredicate(ICmpInst::Predicate Pred,
9891 const SCEV *LHS, const SCEV *RHS) {
9892 // Canonicalize the inputs first.
9893 (void)SimplifyICmpOperands(Pred, LHS, RHS);
9895 if (isKnownViaInduction(Pred, LHS, RHS))
9896 return true;
9898 if (isKnownPredicateViaSplitting(Pred, LHS, RHS))
9899 return true;
9901 // Otherwise see what can be done with some simple reasoning.
9902 return isKnownViaNonRecursiveReasoning(Pred, LHS, RHS);
9905 Optional<bool> ScalarEvolution::evaluatePredicate(ICmpInst::Predicate Pred,
9906 const SCEV *LHS,
9907 const SCEV *RHS) {
9908 if (isKnownPredicate(Pred, LHS, RHS))
9909 return true;
9910 else if (isKnownPredicate(ICmpInst::getInversePredicate(Pred), LHS, RHS))
9911 return false;
9912 return None;
9915 bool ScalarEvolution::isKnownPredicateAt(ICmpInst::Predicate Pred,
9916 const SCEV *LHS, const SCEV *RHS,
9917 const Instruction *Context) {
9918 // TODO: Analyze guards and assumes from Context's block.
9919 return isKnownPredicate(Pred, LHS, RHS) ||
9920 isBasicBlockEntryGuardedByCond(Context->getParent(), Pred, LHS, RHS);
9923 Optional<bool>
9924 ScalarEvolution::evaluatePredicateAt(ICmpInst::Predicate Pred, const SCEV *LHS,
9925 const SCEV *RHS,
9926 const Instruction *Context) {
9927 Optional<bool> KnownWithoutContext = evaluatePredicate(Pred, LHS, RHS);
9928 if (KnownWithoutContext)
9929 return KnownWithoutContext;
9931 if (isBasicBlockEntryGuardedByCond(Context->getParent(), Pred, LHS, RHS))
9932 return true;
9933 else if (isBasicBlockEntryGuardedByCond(Context->getParent(),
9934 ICmpInst::getInversePredicate(Pred),
9935 LHS, RHS))
9936 return false;
9937 return None;
9940 bool ScalarEvolution::isKnownOnEveryIteration(ICmpInst::Predicate Pred,
9941 const SCEVAddRecExpr *LHS,
9942 const SCEV *RHS) {
9943 const Loop *L = LHS->getLoop();
9944 return isLoopEntryGuardedByCond(L, Pred, LHS->getStart(), RHS) &&
9945 isLoopBackedgeGuardedByCond(L, Pred, LHS->getPostIncExpr(*this), RHS);
9948 Optional<ScalarEvolution::MonotonicPredicateType>
9949 ScalarEvolution::getMonotonicPredicateType(const SCEVAddRecExpr *LHS,
9950 ICmpInst::Predicate Pred) {
9951 auto Result = getMonotonicPredicateTypeImpl(LHS, Pred);
9953 #ifndef NDEBUG
9954 // Verify an invariant: inverting the predicate should turn a monotonically
9955 // increasing change to a monotonically decreasing one, and vice versa.
9956 if (Result) {
9957 auto ResultSwapped =
9958 getMonotonicPredicateTypeImpl(LHS, ICmpInst::getSwappedPredicate(Pred));
9960 assert(ResultSwapped.hasValue() && "should be able to analyze both!");
9961 assert(ResultSwapped.getValue() != Result.getValue() &&
9962 "monotonicity should flip as we flip the predicate");
9964 #endif
9966 return Result;
9969 Optional<ScalarEvolution::MonotonicPredicateType>
9970 ScalarEvolution::getMonotonicPredicateTypeImpl(const SCEVAddRecExpr *LHS,
9971 ICmpInst::Predicate Pred) {
9972 // A zero step value for LHS means the induction variable is essentially a
9973 // loop invariant value. We don't really depend on the predicate actually
9974 // flipping from false to true (for increasing predicates, and the other way
9975 // around for decreasing predicates), all we care about is that *if* the
9976 // predicate changes then it only changes from false to true.
9978 // A zero step value in itself is not very useful, but there may be places
9979 // where SCEV can prove X >= 0 but not prove X > 0, so it is helpful to be
9980 // as general as possible.
9982 // Only handle LE/LT/GE/GT predicates.
9983 if (!ICmpInst::isRelational(Pred))
9984 return None;
9986 bool IsGreater = ICmpInst::isGE(Pred) || ICmpInst::isGT(Pred);
9987 assert((IsGreater || ICmpInst::isLE(Pred) || ICmpInst::isLT(Pred)) &&
9988 "Should be greater or less!");
9990 // Check that AR does not wrap.
9991 if (ICmpInst::isUnsigned(Pred)) {
9992 if (!LHS->hasNoUnsignedWrap())
9993 return None;
9994 return IsGreater ? MonotonicallyIncreasing : MonotonicallyDecreasing;
9995 } else {
9996 assert(ICmpInst::isSigned(Pred) &&
9997 "Relational predicate is either signed or unsigned!");
9998 if (!LHS->hasNoSignedWrap())
9999 return None;
10001 const SCEV *Step = LHS->getStepRecurrence(*this);
10003 if (isKnownNonNegative(Step))
10004 return IsGreater ? MonotonicallyIncreasing : MonotonicallyDecreasing;
10006 if (isKnownNonPositive(Step))
10007 return !IsGreater ? MonotonicallyIncreasing : MonotonicallyDecreasing;
10009 return None;
10013 Optional<ScalarEvolution::LoopInvariantPredicate>
10014 ScalarEvolution::getLoopInvariantPredicate(ICmpInst::Predicate Pred,
10015 const SCEV *LHS, const SCEV *RHS,
10016 const Loop *L) {
10018 // If there is a loop-invariant, force it into the RHS, otherwise bail out.
10019 if (!isLoopInvariant(RHS, L)) {
10020 if (!isLoopInvariant(LHS, L))
10021 return None;
10023 std::swap(LHS, RHS);
10024 Pred = ICmpInst::getSwappedPredicate(Pred);
10027 const SCEVAddRecExpr *ArLHS = dyn_cast<SCEVAddRecExpr>(LHS);
10028 if (!ArLHS || ArLHS->getLoop() != L)
10029 return None;
10031 auto MonotonicType = getMonotonicPredicateType(ArLHS, Pred);
10032 if (!MonotonicType)
10033 return None;
10034 // If the predicate "ArLHS `Pred` RHS" monotonically increases from false to
10035 // true as the loop iterates, and the backedge is control dependent on
10036 // "ArLHS `Pred` RHS" == true then we can reason as follows:
10038 // * if the predicate was false in the first iteration then the predicate
10039 // is never evaluated again, since the loop exits without taking the
10040 // backedge.
10041 // * if the predicate was true in the first iteration then it will
10042 // continue to be true for all future iterations since it is
10043 // monotonically increasing.
10045 // For both the above possibilities, we can replace the loop varying
10046 // predicate with its value on the first iteration of the loop (which is
10047 // loop invariant).
10049 // A similar reasoning applies for a monotonically decreasing predicate, by
10050 // replacing true with false and false with true in the above two bullets.
10051 bool Increasing = *MonotonicType == ScalarEvolution::MonotonicallyIncreasing;
10052 auto P = Increasing ? Pred : ICmpInst::getInversePredicate(Pred);
10054 if (!isLoopBackedgeGuardedByCond(L, P, LHS, RHS))
10055 return None;
10057 return ScalarEvolution::LoopInvariantPredicate(Pred, ArLHS->getStart(), RHS);
10060 Optional<ScalarEvolution::LoopInvariantPredicate>
10061 ScalarEvolution::getLoopInvariantExitCondDuringFirstIterations(
10062 ICmpInst::Predicate Pred, const SCEV *LHS, const SCEV *RHS, const Loop *L,
10063 const Instruction *Context, const SCEV *MaxIter) {
10064 // Try to prove the following set of facts:
10065 // - The predicate is monotonic in the iteration space.
10066 // - If the check does not fail on the 1st iteration:
10067 // - No overflow will happen during first MaxIter iterations;
10068 // - It will not fail on the MaxIter'th iteration.
10069 // If the check does fail on the 1st iteration, we leave the loop and no
10070 // other checks matter.
10072 // If there is a loop-invariant, force it into the RHS, otherwise bail out.
10073 if (!isLoopInvariant(RHS, L)) {
10074 if (!isLoopInvariant(LHS, L))
10075 return None;
10077 std::swap(LHS, RHS);
10078 Pred = ICmpInst::getSwappedPredicate(Pred);
10081 auto *AR = dyn_cast<SCEVAddRecExpr>(LHS);
10082 if (!AR || AR->getLoop() != L)
10083 return None;
10085 // The predicate must be relational (i.e. <, <=, >=, >).
10086 if (!ICmpInst::isRelational(Pred))
10087 return None;
10089 // TODO: Support steps other than +/- 1.
10090 const SCEV *Step = AR->getStepRecurrence(*this);
10091 auto *One = getOne(Step->getType());
10092 auto *MinusOne = getNegativeSCEV(One);
10093 if (Step != One && Step != MinusOne)
10094 return None;
10096 // Type mismatch here means that MaxIter is potentially larger than max
10097 // unsigned value in start type, which mean we cannot prove no wrap for the
10098 // indvar.
10099 if (AR->getType() != MaxIter->getType())
10100 return None;
10102 // Value of IV on suggested last iteration.
10103 const SCEV *Last = AR->evaluateAtIteration(MaxIter, *this);
10104 // Does it still meet the requirement?
10105 if (!isLoopBackedgeGuardedByCond(L, Pred, Last, RHS))
10106 return None;
10107 // Because step is +/- 1 and MaxIter has same type as Start (i.e. it does
10108 // not exceed max unsigned value of this type), this effectively proves
10109 // that there is no wrap during the iteration. To prove that there is no
10110 // signed/unsigned wrap, we need to check that
10111 // Start <= Last for step = 1 or Start >= Last for step = -1.
10112 ICmpInst::Predicate NoOverflowPred =
10113 CmpInst::isSigned(Pred) ? ICmpInst::ICMP_SLE : ICmpInst::ICMP_ULE;
10114 if (Step == MinusOne)
10115 NoOverflowPred = CmpInst::getSwappedPredicate(NoOverflowPred);
10116 const SCEV *Start = AR->getStart();
10117 if (!isKnownPredicateAt(NoOverflowPred, Start, Last, Context))
10118 return None;
10120 // Everything is fine.
10121 return ScalarEvolution::LoopInvariantPredicate(Pred, Start, RHS);
10124 bool ScalarEvolution::isKnownPredicateViaConstantRanges(
10125 ICmpInst::Predicate Pred, const SCEV *LHS, const SCEV *RHS) {
10126 if (HasSameValue(LHS, RHS))
10127 return ICmpInst::isTrueWhenEqual(Pred);
10129 // This code is split out from isKnownPredicate because it is called from
10130 // within isLoopEntryGuardedByCond.
10132 auto CheckRanges = [&](const ConstantRange &RangeLHS,
10133 const ConstantRange &RangeRHS) {
10134 return RangeLHS.icmp(Pred, RangeRHS);
10137 // The check at the top of the function catches the case where the values are
10138 // known to be equal.
10139 if (Pred == CmpInst::ICMP_EQ)
10140 return false;
10142 if (Pred == CmpInst::ICMP_NE) {
10143 if (CheckRanges(getSignedRange(LHS), getSignedRange(RHS)) ||
10144 CheckRanges(getUnsignedRange(LHS), getUnsignedRange(RHS)))
10145 return true;
10146 auto *Diff = getMinusSCEV(LHS, RHS);
10147 return !isa<SCEVCouldNotCompute>(Diff) && isKnownNonZero(Diff);
10150 if (CmpInst::isSigned(Pred))
10151 return CheckRanges(getSignedRange(LHS), getSignedRange(RHS));
10153 return CheckRanges(getUnsignedRange(LHS), getUnsignedRange(RHS));
10156 bool ScalarEvolution::isKnownPredicateViaNoOverflow(ICmpInst::Predicate Pred,
10157 const SCEV *LHS,
10158 const SCEV *RHS) {
10159 // Match X to (A + C1)<ExpectedFlags> and Y to (A + C2)<ExpectedFlags>, where
10160 // C1 and C2 are constant integers. If either X or Y are not add expressions,
10161 // consider them as X + 0 and Y + 0 respectively. C1 and C2 are returned via
10162 // OutC1 and OutC2.
10163 auto MatchBinaryAddToConst = [this](const SCEV *X, const SCEV *Y,
10164 APInt &OutC1, APInt &OutC2,
10165 SCEV::NoWrapFlags ExpectedFlags) {
10166 const SCEV *XNonConstOp, *XConstOp;
10167 const SCEV *YNonConstOp, *YConstOp;
10168 SCEV::NoWrapFlags XFlagsPresent;
10169 SCEV::NoWrapFlags YFlagsPresent;
10171 if (!splitBinaryAdd(X, XConstOp, XNonConstOp, XFlagsPresent)) {
10172 XConstOp = getZero(X->getType());
10173 XNonConstOp = X;
10174 XFlagsPresent = ExpectedFlags;
10176 if (!isa<SCEVConstant>(XConstOp) ||
10177 (XFlagsPresent & ExpectedFlags) != ExpectedFlags)
10178 return false;
10180 if (!splitBinaryAdd(Y, YConstOp, YNonConstOp, YFlagsPresent)) {
10181 YConstOp = getZero(Y->getType());
10182 YNonConstOp = Y;
10183 YFlagsPresent = ExpectedFlags;
10186 if (!isa<SCEVConstant>(YConstOp) ||
10187 (YFlagsPresent & ExpectedFlags) != ExpectedFlags)
10188 return false;
10190 if (YNonConstOp != XNonConstOp)
10191 return false;
10193 OutC1 = cast<SCEVConstant>(XConstOp)->getAPInt();
10194 OutC2 = cast<SCEVConstant>(YConstOp)->getAPInt();
10196 return true;
10199 APInt C1;
10200 APInt C2;
10202 switch (Pred) {
10203 default:
10204 break;
10206 case ICmpInst::ICMP_SGE:
10207 std::swap(LHS, RHS);
10208 LLVM_FALLTHROUGH;
10209 case ICmpInst::ICMP_SLE:
10210 // (X + C1)<nsw> s<= (X + C2)<nsw> if C1 s<= C2.
10211 if (MatchBinaryAddToConst(LHS, RHS, C1, C2, SCEV::FlagNSW) && C1.sle(C2))
10212 return true;
10214 break;
10216 case ICmpInst::ICMP_SGT:
10217 std::swap(LHS, RHS);
10218 LLVM_FALLTHROUGH;
10219 case ICmpInst::ICMP_SLT:
10220 // (X + C1)<nsw> s< (X + C2)<nsw> if C1 s< C2.
10221 if (MatchBinaryAddToConst(LHS, RHS, C1, C2, SCEV::FlagNSW) && C1.slt(C2))
10222 return true;
10224 break;
10226 case ICmpInst::ICMP_UGE:
10227 std::swap(LHS, RHS);
10228 LLVM_FALLTHROUGH;
10229 case ICmpInst::ICMP_ULE:
10230 // (X + C1)<nuw> u<= (X + C2)<nuw> for C1 u<= C2.
10231 if (MatchBinaryAddToConst(RHS, LHS, C2, C1, SCEV::FlagNUW) && C1.ule(C2))
10232 return true;
10234 break;
10236 case ICmpInst::ICMP_UGT:
10237 std::swap(LHS, RHS);
10238 LLVM_FALLTHROUGH;
10239 case ICmpInst::ICMP_ULT:
10240 // (X + C1)<nuw> u< (X + C2)<nuw> if C1 u< C2.
10241 if (MatchBinaryAddToConst(RHS, LHS, C2, C1, SCEV::FlagNUW) && C1.ult(C2))
10242 return true;
10243 break;
10246 return false;
10249 bool ScalarEvolution::isKnownPredicateViaSplitting(ICmpInst::Predicate Pred,
10250 const SCEV *LHS,
10251 const SCEV *RHS) {
10252 if (Pred != ICmpInst::ICMP_ULT || ProvingSplitPredicate)
10253 return false;
10255 // Allowing arbitrary number of activations of isKnownPredicateViaSplitting on
10256 // the stack can result in exponential time complexity.
10257 SaveAndRestore<bool> Restore(ProvingSplitPredicate, true);
10259 // If L >= 0 then I `ult` L <=> I >= 0 && I `slt` L
10261 // To prove L >= 0 we use isKnownNonNegative whereas to prove I >= 0 we use
10262 // isKnownPredicate. isKnownPredicate is more powerful, but also more
10263 // expensive; and using isKnownNonNegative(RHS) is sufficient for most of the
10264 // interesting cases seen in practice. We can consider "upgrading" L >= 0 to
10265 // use isKnownPredicate later if needed.
10266 return isKnownNonNegative(RHS) &&
10267 isKnownPredicate(CmpInst::ICMP_SGE, LHS, getZero(LHS->getType())) &&
10268 isKnownPredicate(CmpInst::ICMP_SLT, LHS, RHS);
10271 bool ScalarEvolution::isImpliedViaGuard(const BasicBlock *BB,
10272 ICmpInst::Predicate Pred,
10273 const SCEV *LHS, const SCEV *RHS) {
10274 // No need to even try if we know the module has no guards.
10275 if (!HasGuards)
10276 return false;
10278 return any_of(*BB, [&](const Instruction &I) {
10279 using namespace llvm::PatternMatch;
10281 Value *Condition;
10282 return match(&I, m_Intrinsic<Intrinsic::experimental_guard>(
10283 m_Value(Condition))) &&
10284 isImpliedCond(Pred, LHS, RHS, Condition, false);
10288 /// isLoopBackedgeGuardedByCond - Test whether the backedge of the loop is
10289 /// protected by a conditional between LHS and RHS. This is used to
10290 /// to eliminate casts.
10291 bool
10292 ScalarEvolution::isLoopBackedgeGuardedByCond(const Loop *L,
10293 ICmpInst::Predicate Pred,
10294 const SCEV *LHS, const SCEV *RHS) {
10295 // Interpret a null as meaning no loop, where there is obviously no guard
10296 // (interprocedural conditions notwithstanding).
10297 if (!L) return true;
10299 if (VerifyIR)
10300 assert(!verifyFunction(*L->getHeader()->getParent(), &dbgs()) &&
10301 "This cannot be done on broken IR!");
10304 if (isKnownViaNonRecursiveReasoning(Pred, LHS, RHS))
10305 return true;
10307 BasicBlock *Latch = L->getLoopLatch();
10308 if (!Latch)
10309 return false;
10311 BranchInst *LoopContinuePredicate =
10312 dyn_cast<BranchInst>(Latch->getTerminator());
10313 if (LoopContinuePredicate && LoopContinuePredicate->isConditional() &&
10314 isImpliedCond(Pred, LHS, RHS,
10315 LoopContinuePredicate->getCondition(),
10316 LoopContinuePredicate->getSuccessor(0) != L->getHeader()))
10317 return true;
10319 // We don't want more than one activation of the following loops on the stack
10320 // -- that can lead to O(n!) time complexity.
10321 if (WalkingBEDominatingConds)
10322 return false;
10324 SaveAndRestore<bool> ClearOnExit(WalkingBEDominatingConds, true);
10326 // See if we can exploit a trip count to prove the predicate.
10327 const auto &BETakenInfo = getBackedgeTakenInfo(L);
10328 const SCEV *LatchBECount = BETakenInfo.getExact(Latch, this);
10329 if (LatchBECount != getCouldNotCompute()) {
10330 // We know that Latch branches back to the loop header exactly
10331 // LatchBECount times. This means the backdege condition at Latch is
10332 // equivalent to "{0,+,1} u< LatchBECount".
10333 Type *Ty = LatchBECount->getType();
10334 auto NoWrapFlags = SCEV::NoWrapFlags(SCEV::FlagNUW | SCEV::FlagNW);
10335 const SCEV *LoopCounter =
10336 getAddRecExpr(getZero(Ty), getOne(Ty), L, NoWrapFlags);
10337 if (isImpliedCond(Pred, LHS, RHS, ICmpInst::ICMP_ULT, LoopCounter,
10338 LatchBECount))
10339 return true;
10342 // Check conditions due to any @llvm.assume intrinsics.
10343 for (auto &AssumeVH : AC.assumptions()) {
10344 if (!AssumeVH)
10345 continue;
10346 auto *CI = cast<CallInst>(AssumeVH);
10347 if (!DT.dominates(CI, Latch->getTerminator()))
10348 continue;
10350 if (isImpliedCond(Pred, LHS, RHS, CI->getArgOperand(0), false))
10351 return true;
10354 // If the loop is not reachable from the entry block, we risk running into an
10355 // infinite loop as we walk up into the dom tree. These loops do not matter
10356 // anyway, so we just return a conservative answer when we see them.
10357 if (!DT.isReachableFromEntry(L->getHeader()))
10358 return false;
10360 if (isImpliedViaGuard(Latch, Pred, LHS, RHS))
10361 return true;
10363 for (DomTreeNode *DTN = DT[Latch], *HeaderDTN = DT[L->getHeader()];
10364 DTN != HeaderDTN; DTN = DTN->getIDom()) {
10365 assert(DTN && "should reach the loop header before reaching the root!");
10367 BasicBlock *BB = DTN->getBlock();
10368 if (isImpliedViaGuard(BB, Pred, LHS, RHS))
10369 return true;
10371 BasicBlock *PBB = BB->getSinglePredecessor();
10372 if (!PBB)
10373 continue;
10375 BranchInst *ContinuePredicate = dyn_cast<BranchInst>(PBB->getTerminator());
10376 if (!ContinuePredicate || !ContinuePredicate->isConditional())
10377 continue;
10379 Value *Condition = ContinuePredicate->getCondition();
10381 // If we have an edge `E` within the loop body that dominates the only
10382 // latch, the condition guarding `E` also guards the backedge. This
10383 // reasoning works only for loops with a single latch.
10385 BasicBlockEdge DominatingEdge(PBB, BB);
10386 if (DominatingEdge.isSingleEdge()) {
10387 // We're constructively (and conservatively) enumerating edges within the
10388 // loop body that dominate the latch. The dominator tree better agree
10389 // with us on this:
10390 assert(DT.dominates(DominatingEdge, Latch) && "should be!");
10392 if (isImpliedCond(Pred, LHS, RHS, Condition,
10393 BB != ContinuePredicate->getSuccessor(0)))
10394 return true;
10398 return false;
10401 bool ScalarEvolution::isBasicBlockEntryGuardedByCond(const BasicBlock *BB,
10402 ICmpInst::Predicate Pred,
10403 const SCEV *LHS,
10404 const SCEV *RHS) {
10405 if (VerifyIR)
10406 assert(!verifyFunction(*BB->getParent(), &dbgs()) &&
10407 "This cannot be done on broken IR!");
10409 // If we cannot prove strict comparison (e.g. a > b), maybe we can prove
10410 // the facts (a >= b && a != b) separately. A typical situation is when the
10411 // non-strict comparison is known from ranges and non-equality is known from
10412 // dominating predicates. If we are proving strict comparison, we always try
10413 // to prove non-equality and non-strict comparison separately.
10414 auto NonStrictPredicate = ICmpInst::getNonStrictPredicate(Pred);
10415 const bool ProvingStrictComparison = (Pred != NonStrictPredicate);
10416 bool ProvedNonStrictComparison = false;
10417 bool ProvedNonEquality = false;
10419 auto SplitAndProve =
10420 [&](std::function<bool(ICmpInst::Predicate)> Fn) -> bool {
10421 if (!ProvedNonStrictComparison)
10422 ProvedNonStrictComparison = Fn(NonStrictPredicate);
10423 if (!ProvedNonEquality)
10424 ProvedNonEquality = Fn(ICmpInst::ICMP_NE);
10425 if (ProvedNonStrictComparison && ProvedNonEquality)
10426 return true;
10427 return false;
10430 if (ProvingStrictComparison) {
10431 auto ProofFn = [&](ICmpInst::Predicate P) {
10432 return isKnownViaNonRecursiveReasoning(P, LHS, RHS);
10434 if (SplitAndProve(ProofFn))
10435 return true;
10438 // Try to prove (Pred, LHS, RHS) using isImpliedViaGuard.
10439 auto ProveViaGuard = [&](const BasicBlock *Block) {
10440 if (isImpliedViaGuard(Block, Pred, LHS, RHS))
10441 return true;
10442 if (ProvingStrictComparison) {
10443 auto ProofFn = [&](ICmpInst::Predicate P) {
10444 return isImpliedViaGuard(Block, P, LHS, RHS);
10446 if (SplitAndProve(ProofFn))
10447 return true;
10449 return false;
10452 // Try to prove (Pred, LHS, RHS) using isImpliedCond.
10453 auto ProveViaCond = [&](const Value *Condition, bool Inverse) {
10454 const Instruction *Context = &BB->front();
10455 if (isImpliedCond(Pred, LHS, RHS, Condition, Inverse, Context))
10456 return true;
10457 if (ProvingStrictComparison) {
10458 auto ProofFn = [&](ICmpInst::Predicate P) {
10459 return isImpliedCond(P, LHS, RHS, Condition, Inverse, Context);
10461 if (SplitAndProve(ProofFn))
10462 return true;
10464 return false;
10467 // Starting at the block's predecessor, climb up the predecessor chain, as long
10468 // as there are predecessors that can be found that have unique successors
10469 // leading to the original block.
10470 const Loop *ContainingLoop = LI.getLoopFor(BB);
10471 const BasicBlock *PredBB;
10472 if (ContainingLoop && ContainingLoop->getHeader() == BB)
10473 PredBB = ContainingLoop->getLoopPredecessor();
10474 else
10475 PredBB = BB->getSinglePredecessor();
10476 for (std::pair<const BasicBlock *, const BasicBlock *> Pair(PredBB, BB);
10477 Pair.first; Pair = getPredecessorWithUniqueSuccessorForBB(Pair.first)) {
10478 if (ProveViaGuard(Pair.first))
10479 return true;
10481 const BranchInst *LoopEntryPredicate =
10482 dyn_cast<BranchInst>(Pair.first->getTerminator());
10483 if (!LoopEntryPredicate ||
10484 LoopEntryPredicate->isUnconditional())
10485 continue;
10487 if (ProveViaCond(LoopEntryPredicate->getCondition(),
10488 LoopEntryPredicate->getSuccessor(0) != Pair.second))
10489 return true;
10492 // Check conditions due to any @llvm.assume intrinsics.
10493 for (auto &AssumeVH : AC.assumptions()) {
10494 if (!AssumeVH)
10495 continue;
10496 auto *CI = cast<CallInst>(AssumeVH);
10497 if (!DT.dominates(CI, BB))
10498 continue;
10500 if (ProveViaCond(CI->getArgOperand(0), false))
10501 return true;
10504 return false;
10507 bool ScalarEvolution::isLoopEntryGuardedByCond(const Loop *L,
10508 ICmpInst::Predicate Pred,
10509 const SCEV *LHS,
10510 const SCEV *RHS) {
10511 // Interpret a null as meaning no loop, where there is obviously no guard
10512 // (interprocedural conditions notwithstanding).
10513 if (!L)
10514 return false;
10516 // Both LHS and RHS must be available at loop entry.
10517 assert(isAvailableAtLoopEntry(LHS, L) &&
10518 "LHS is not available at Loop Entry");
10519 assert(isAvailableAtLoopEntry(RHS, L) &&
10520 "RHS is not available at Loop Entry");
10522 if (isKnownViaNonRecursiveReasoning(Pred, LHS, RHS))
10523 return true;
10525 return isBasicBlockEntryGuardedByCond(L->getHeader(), Pred, LHS, RHS);
10528 bool ScalarEvolution::isImpliedCond(ICmpInst::Predicate Pred, const SCEV *LHS,
10529 const SCEV *RHS,
10530 const Value *FoundCondValue, bool Inverse,
10531 const Instruction *Context) {
10532 // False conditions implies anything. Do not bother analyzing it further.
10533 if (FoundCondValue ==
10534 ConstantInt::getBool(FoundCondValue->getContext(), Inverse))
10535 return true;
10537 if (!PendingLoopPredicates.insert(FoundCondValue).second)
10538 return false;
10540 auto ClearOnExit =
10541 make_scope_exit([&]() { PendingLoopPredicates.erase(FoundCondValue); });
10543 // Recursively handle And and Or conditions.
10544 const Value *Op0, *Op1;
10545 if (match(FoundCondValue, m_LogicalAnd(m_Value(Op0), m_Value(Op1)))) {
10546 if (!Inverse)
10547 return isImpliedCond(Pred, LHS, RHS, Op0, Inverse, Context) ||
10548 isImpliedCond(Pred, LHS, RHS, Op1, Inverse, Context);
10549 } else if (match(FoundCondValue, m_LogicalOr(m_Value(Op0), m_Value(Op1)))) {
10550 if (Inverse)
10551 return isImpliedCond(Pred, LHS, RHS, Op0, Inverse, Context) ||
10552 isImpliedCond(Pred, LHS, RHS, Op1, Inverse, Context);
10555 const ICmpInst *ICI = dyn_cast<ICmpInst>(FoundCondValue);
10556 if (!ICI) return false;
10558 // Now that we found a conditional branch that dominates the loop or controls
10559 // the loop latch. Check to see if it is the comparison we are looking for.
10560 ICmpInst::Predicate FoundPred;
10561 if (Inverse)
10562 FoundPred = ICI->getInversePredicate();
10563 else
10564 FoundPred = ICI->getPredicate();
10566 const SCEV *FoundLHS = getSCEV(ICI->getOperand(0));
10567 const SCEV *FoundRHS = getSCEV(ICI->getOperand(1));
10569 return isImpliedCond(Pred, LHS, RHS, FoundPred, FoundLHS, FoundRHS, Context);
10572 bool ScalarEvolution::isImpliedCond(ICmpInst::Predicate Pred, const SCEV *LHS,
10573 const SCEV *RHS,
10574 ICmpInst::Predicate FoundPred,
10575 const SCEV *FoundLHS, const SCEV *FoundRHS,
10576 const Instruction *Context) {
10577 // Balance the types.
10578 if (getTypeSizeInBits(LHS->getType()) <
10579 getTypeSizeInBits(FoundLHS->getType())) {
10580 // For unsigned and equality predicates, try to prove that both found
10581 // operands fit into narrow unsigned range. If so, try to prove facts in
10582 // narrow types.
10583 if (!CmpInst::isSigned(FoundPred) && !FoundLHS->getType()->isPointerTy()) {
10584 auto *NarrowType = LHS->getType();
10585 auto *WideType = FoundLHS->getType();
10586 auto BitWidth = getTypeSizeInBits(NarrowType);
10587 const SCEV *MaxValue = getZeroExtendExpr(
10588 getConstant(APInt::getMaxValue(BitWidth)), WideType);
10589 if (isKnownPredicate(ICmpInst::ICMP_ULE, FoundLHS, MaxValue) &&
10590 isKnownPredicate(ICmpInst::ICMP_ULE, FoundRHS, MaxValue)) {
10591 const SCEV *TruncFoundLHS = getTruncateExpr(FoundLHS, NarrowType);
10592 const SCEV *TruncFoundRHS = getTruncateExpr(FoundRHS, NarrowType);
10593 if (isImpliedCondBalancedTypes(Pred, LHS, RHS, FoundPred, TruncFoundLHS,
10594 TruncFoundRHS, Context))
10595 return true;
10599 if (LHS->getType()->isPointerTy())
10600 return false;
10601 if (CmpInst::isSigned(Pred)) {
10602 LHS = getSignExtendExpr(LHS, FoundLHS->getType());
10603 RHS = getSignExtendExpr(RHS, FoundLHS->getType());
10604 } else {
10605 LHS = getZeroExtendExpr(LHS, FoundLHS->getType());
10606 RHS = getZeroExtendExpr(RHS, FoundLHS->getType());
10608 } else if (getTypeSizeInBits(LHS->getType()) >
10609 getTypeSizeInBits(FoundLHS->getType())) {
10610 if (FoundLHS->getType()->isPointerTy())
10611 return false;
10612 if (CmpInst::isSigned(FoundPred)) {
10613 FoundLHS = getSignExtendExpr(FoundLHS, LHS->getType());
10614 FoundRHS = getSignExtendExpr(FoundRHS, LHS->getType());
10615 } else {
10616 FoundLHS = getZeroExtendExpr(FoundLHS, LHS->getType());
10617 FoundRHS = getZeroExtendExpr(FoundRHS, LHS->getType());
10620 return isImpliedCondBalancedTypes(Pred, LHS, RHS, FoundPred, FoundLHS,
10621 FoundRHS, Context);
10624 bool ScalarEvolution::isImpliedCondBalancedTypes(
10625 ICmpInst::Predicate Pred, const SCEV *LHS, const SCEV *RHS,
10626 ICmpInst::Predicate FoundPred, const SCEV *FoundLHS, const SCEV *FoundRHS,
10627 const Instruction *Context) {
10628 assert(getTypeSizeInBits(LHS->getType()) ==
10629 getTypeSizeInBits(FoundLHS->getType()) &&
10630 "Types should be balanced!");
10631 // Canonicalize the query to match the way instcombine will have
10632 // canonicalized the comparison.
10633 if (SimplifyICmpOperands(Pred, LHS, RHS))
10634 if (LHS == RHS)
10635 return CmpInst::isTrueWhenEqual(Pred);
10636 if (SimplifyICmpOperands(FoundPred, FoundLHS, FoundRHS))
10637 if (FoundLHS == FoundRHS)
10638 return CmpInst::isFalseWhenEqual(FoundPred);
10640 // Check to see if we can make the LHS or RHS match.
10641 if (LHS == FoundRHS || RHS == FoundLHS) {
10642 if (isa<SCEVConstant>(RHS)) {
10643 std::swap(FoundLHS, FoundRHS);
10644 FoundPred = ICmpInst::getSwappedPredicate(FoundPred);
10645 } else {
10646 std::swap(LHS, RHS);
10647 Pred = ICmpInst::getSwappedPredicate(Pred);
10651 // Check whether the found predicate is the same as the desired predicate.
10652 if (FoundPred == Pred)
10653 return isImpliedCondOperands(Pred, LHS, RHS, FoundLHS, FoundRHS, Context);
10655 // Check whether swapping the found predicate makes it the same as the
10656 // desired predicate.
10657 if (ICmpInst::getSwappedPredicate(FoundPred) == Pred) {
10658 // We can write the implication
10659 // 0. LHS Pred RHS <- FoundLHS SwapPred FoundRHS
10660 // using one of the following ways:
10661 // 1. LHS Pred RHS <- FoundRHS Pred FoundLHS
10662 // 2. RHS SwapPred LHS <- FoundLHS SwapPred FoundRHS
10663 // 3. LHS Pred RHS <- ~FoundLHS Pred ~FoundRHS
10664 // 4. ~LHS SwapPred ~RHS <- FoundLHS SwapPred FoundRHS
10665 // Forms 1. and 2. require swapping the operands of one condition. Don't
10666 // do this if it would break canonical constant/addrec ordering.
10667 if (!isa<SCEVConstant>(RHS) && !isa<SCEVAddRecExpr>(LHS))
10668 return isImpliedCondOperands(FoundPred, RHS, LHS, FoundLHS, FoundRHS,
10669 Context);
10670 if (!isa<SCEVConstant>(FoundRHS) && !isa<SCEVAddRecExpr>(FoundLHS))
10671 return isImpliedCondOperands(Pred, LHS, RHS, FoundRHS, FoundLHS, Context);
10673 // Don't try to getNotSCEV pointers.
10674 if (LHS->getType()->isPointerTy() || FoundLHS->getType()->isPointerTy())
10675 return false;
10677 // There's no clear preference between forms 3. and 4., try both.
10678 return isImpliedCondOperands(FoundPred, getNotSCEV(LHS), getNotSCEV(RHS),
10679 FoundLHS, FoundRHS, Context) ||
10680 isImpliedCondOperands(Pred, LHS, RHS, getNotSCEV(FoundLHS),
10681 getNotSCEV(FoundRHS), Context);
10684 // Unsigned comparison is the same as signed comparison when both the operands
10685 // are non-negative.
10686 if (CmpInst::isUnsigned(FoundPred) &&
10687 CmpInst::getSignedPredicate(FoundPred) == Pred &&
10688 isKnownNonNegative(FoundLHS) && isKnownNonNegative(FoundRHS))
10689 return isImpliedCondOperands(Pred, LHS, RHS, FoundLHS, FoundRHS, Context);
10691 // Check if we can make progress by sharpening ranges.
10692 if (FoundPred == ICmpInst::ICMP_NE &&
10693 (isa<SCEVConstant>(FoundLHS) || isa<SCEVConstant>(FoundRHS))) {
10695 const SCEVConstant *C = nullptr;
10696 const SCEV *V = nullptr;
10698 if (isa<SCEVConstant>(FoundLHS)) {
10699 C = cast<SCEVConstant>(FoundLHS);
10700 V = FoundRHS;
10701 } else {
10702 C = cast<SCEVConstant>(FoundRHS);
10703 V = FoundLHS;
10706 // The guarding predicate tells us that C != V. If the known range
10707 // of V is [C, t), we can sharpen the range to [C + 1, t). The
10708 // range we consider has to correspond to same signedness as the
10709 // predicate we're interested in folding.
10711 APInt Min = ICmpInst::isSigned(Pred) ?
10712 getSignedRangeMin(V) : getUnsignedRangeMin(V);
10714 if (Min == C->getAPInt()) {
10715 // Given (V >= Min && V != Min) we conclude V >= (Min + 1).
10716 // This is true even if (Min + 1) wraps around -- in case of
10717 // wraparound, (Min + 1) < Min, so (V >= Min => V >= (Min + 1)).
10719 APInt SharperMin = Min + 1;
10721 switch (Pred) {
10722 case ICmpInst::ICMP_SGE:
10723 case ICmpInst::ICMP_UGE:
10724 // We know V `Pred` SharperMin. If this implies LHS `Pred`
10725 // RHS, we're done.
10726 if (isImpliedCondOperands(Pred, LHS, RHS, V, getConstant(SharperMin),
10727 Context))
10728 return true;
10729 LLVM_FALLTHROUGH;
10731 case ICmpInst::ICMP_SGT:
10732 case ICmpInst::ICMP_UGT:
10733 // We know from the range information that (V `Pred` Min ||
10734 // V == Min). We know from the guarding condition that !(V
10735 // == Min). This gives us
10737 // V `Pred` Min || V == Min && !(V == Min)
10738 // => V `Pred` Min
10740 // If V `Pred` Min implies LHS `Pred` RHS, we're done.
10742 if (isImpliedCondOperands(Pred, LHS, RHS, V, getConstant(Min),
10743 Context))
10744 return true;
10745 break;
10747 // `LHS < RHS` and `LHS <= RHS` are handled in the same way as `RHS > LHS` and `RHS >= LHS` respectively.
10748 case ICmpInst::ICMP_SLE:
10749 case ICmpInst::ICMP_ULE:
10750 if (isImpliedCondOperands(CmpInst::getSwappedPredicate(Pred), RHS,
10751 LHS, V, getConstant(SharperMin), Context))
10752 return true;
10753 LLVM_FALLTHROUGH;
10755 case ICmpInst::ICMP_SLT:
10756 case ICmpInst::ICMP_ULT:
10757 if (isImpliedCondOperands(CmpInst::getSwappedPredicate(Pred), RHS,
10758 LHS, V, getConstant(Min), Context))
10759 return true;
10760 break;
10762 default:
10763 // No change
10764 break;
10769 // Check whether the actual condition is beyond sufficient.
10770 if (FoundPred == ICmpInst::ICMP_EQ)
10771 if (ICmpInst::isTrueWhenEqual(Pred))
10772 if (isImpliedCondOperands(Pred, LHS, RHS, FoundLHS, FoundRHS, Context))
10773 return true;
10774 if (Pred == ICmpInst::ICMP_NE)
10775 if (!ICmpInst::isTrueWhenEqual(FoundPred))
10776 if (isImpliedCondOperands(FoundPred, LHS, RHS, FoundLHS, FoundRHS,
10777 Context))
10778 return true;
10780 // Otherwise assume the worst.
10781 return false;
10784 bool ScalarEvolution::splitBinaryAdd(const SCEV *Expr,
10785 const SCEV *&L, const SCEV *&R,
10786 SCEV::NoWrapFlags &Flags) {
10787 const auto *AE = dyn_cast<SCEVAddExpr>(Expr);
10788 if (!AE || AE->getNumOperands() != 2)
10789 return false;
10791 L = AE->getOperand(0);
10792 R = AE->getOperand(1);
10793 Flags = AE->getNoWrapFlags();
10794 return true;
10797 Optional<APInt> ScalarEvolution::computeConstantDifference(const SCEV *More,
10798 const SCEV *Less) {
10799 // We avoid subtracting expressions here because this function is usually
10800 // fairly deep in the call stack (i.e. is called many times).
10802 // X - X = 0.
10803 if (More == Less)
10804 return APInt(getTypeSizeInBits(More->getType()), 0);
10806 if (isa<SCEVAddRecExpr>(Less) && isa<SCEVAddRecExpr>(More)) {
10807 const auto *LAR = cast<SCEVAddRecExpr>(Less);
10808 const auto *MAR = cast<SCEVAddRecExpr>(More);
10810 if (LAR->getLoop() != MAR->getLoop())
10811 return None;
10813 // We look at affine expressions only; not for correctness but to keep
10814 // getStepRecurrence cheap.
10815 if (!LAR->isAffine() || !MAR->isAffine())
10816 return None;
10818 if (LAR->getStepRecurrence(*this) != MAR->getStepRecurrence(*this))
10819 return None;
10821 Less = LAR->getStart();
10822 More = MAR->getStart();
10824 // fall through
10827 if (isa<SCEVConstant>(Less) && isa<SCEVConstant>(More)) {
10828 const auto &M = cast<SCEVConstant>(More)->getAPInt();
10829 const auto &L = cast<SCEVConstant>(Less)->getAPInt();
10830 return M - L;
10833 SCEV::NoWrapFlags Flags;
10834 const SCEV *LLess = nullptr, *RLess = nullptr;
10835 const SCEV *LMore = nullptr, *RMore = nullptr;
10836 const SCEVConstant *C1 = nullptr, *C2 = nullptr;
10837 // Compare (X + C1) vs X.
10838 if (splitBinaryAdd(Less, LLess, RLess, Flags))
10839 if ((C1 = dyn_cast<SCEVConstant>(LLess)))
10840 if (RLess == More)
10841 return -(C1->getAPInt());
10843 // Compare X vs (X + C2).
10844 if (splitBinaryAdd(More, LMore, RMore, Flags))
10845 if ((C2 = dyn_cast<SCEVConstant>(LMore)))
10846 if (RMore == Less)
10847 return C2->getAPInt();
10849 // Compare (X + C1) vs (X + C2).
10850 if (C1 && C2 && RLess == RMore)
10851 return C2->getAPInt() - C1->getAPInt();
10853 return None;
10856 bool ScalarEvolution::isImpliedCondOperandsViaAddRecStart(
10857 ICmpInst::Predicate Pred, const SCEV *LHS, const SCEV *RHS,
10858 const SCEV *FoundLHS, const SCEV *FoundRHS, const Instruction *Context) {
10859 // Try to recognize the following pattern:
10861 // FoundRHS = ...
10862 // ...
10863 // loop:
10864 // FoundLHS = {Start,+,W}
10865 // context_bb: // Basic block from the same loop
10866 // known(Pred, FoundLHS, FoundRHS)
10868 // If some predicate is known in the context of a loop, it is also known on
10869 // each iteration of this loop, including the first iteration. Therefore, in
10870 // this case, `FoundLHS Pred FoundRHS` implies `Start Pred FoundRHS`. Try to
10871 // prove the original pred using this fact.
10872 if (!Context)
10873 return false;
10874 const BasicBlock *ContextBB = Context->getParent();
10875 // Make sure AR varies in the context block.
10876 if (auto *AR = dyn_cast<SCEVAddRecExpr>(FoundLHS)) {
10877 const Loop *L = AR->getLoop();
10878 // Make sure that context belongs to the loop and executes on 1st iteration
10879 // (if it ever executes at all).
10880 if (!L->contains(ContextBB) || !DT.dominates(ContextBB, L->getLoopLatch()))
10881 return false;
10882 if (!isAvailableAtLoopEntry(FoundRHS, AR->getLoop()))
10883 return false;
10884 return isImpliedCondOperands(Pred, LHS, RHS, AR->getStart(), FoundRHS);
10887 if (auto *AR = dyn_cast<SCEVAddRecExpr>(FoundRHS)) {
10888 const Loop *L = AR->getLoop();
10889 // Make sure that context belongs to the loop and executes on 1st iteration
10890 // (if it ever executes at all).
10891 if (!L->contains(ContextBB) || !DT.dominates(ContextBB, L->getLoopLatch()))
10892 return false;
10893 if (!isAvailableAtLoopEntry(FoundLHS, AR->getLoop()))
10894 return false;
10895 return isImpliedCondOperands(Pred, LHS, RHS, FoundLHS, AR->getStart());
10898 return false;
10901 bool ScalarEvolution::isImpliedCondOperandsViaNoOverflow(
10902 ICmpInst::Predicate Pred, const SCEV *LHS, const SCEV *RHS,
10903 const SCEV *FoundLHS, const SCEV *FoundRHS) {
10904 if (Pred != CmpInst::ICMP_SLT && Pred != CmpInst::ICMP_ULT)
10905 return false;
10907 const auto *AddRecLHS = dyn_cast<SCEVAddRecExpr>(LHS);
10908 if (!AddRecLHS)
10909 return false;
10911 const auto *AddRecFoundLHS = dyn_cast<SCEVAddRecExpr>(FoundLHS);
10912 if (!AddRecFoundLHS)
10913 return false;
10915 // We'd like to let SCEV reason about control dependencies, so we constrain
10916 // both the inequalities to be about add recurrences on the same loop. This
10917 // way we can use isLoopEntryGuardedByCond later.
10919 const Loop *L = AddRecFoundLHS->getLoop();
10920 if (L != AddRecLHS->getLoop())
10921 return false;
10923 // FoundLHS u< FoundRHS u< -C => (FoundLHS + C) u< (FoundRHS + C) ... (1)
10925 // FoundLHS s< FoundRHS s< INT_MIN - C => (FoundLHS + C) s< (FoundRHS + C)
10926 // ... (2)
10928 // Informal proof for (2), assuming (1) [*]:
10930 // We'll also assume (A s< B) <=> ((A + INT_MIN) u< (B + INT_MIN)) ... (3)[**]
10932 // Then
10934 // FoundLHS s< FoundRHS s< INT_MIN - C
10935 // <=> (FoundLHS + INT_MIN) u< (FoundRHS + INT_MIN) u< -C [ using (3) ]
10936 // <=> (FoundLHS + INT_MIN + C) u< (FoundRHS + INT_MIN + C) [ using (1) ]
10937 // <=> (FoundLHS + INT_MIN + C + INT_MIN) s<
10938 // (FoundRHS + INT_MIN + C + INT_MIN) [ using (3) ]
10939 // <=> FoundLHS + C s< FoundRHS + C
10941 // [*]: (1) can be proved by ruling out overflow.
10943 // [**]: This can be proved by analyzing all the four possibilities:
10944 // (A s< 0, B s< 0), (A s< 0, B s>= 0), (A s>= 0, B s< 0) and
10945 // (A s>= 0, B s>= 0).
10947 // Note:
10948 // Despite (2), "FoundRHS s< INT_MIN - C" does not mean that "FoundRHS + C"
10949 // will not sign underflow. For instance, say FoundLHS = (i8 -128), FoundRHS
10950 // = (i8 -127) and C = (i8 -100). Then INT_MIN - C = (i8 -28), and FoundRHS
10951 // s< (INT_MIN - C). Lack of sign overflow / underflow in "FoundRHS + C" is
10952 // neither necessary nor sufficient to prove "(FoundLHS + C) s< (FoundRHS +
10953 // C)".
10955 Optional<APInt> LDiff = computeConstantDifference(LHS, FoundLHS);
10956 Optional<APInt> RDiff = computeConstantDifference(RHS, FoundRHS);
10957 if (!LDiff || !RDiff || *LDiff != *RDiff)
10958 return false;
10960 if (LDiff->isMinValue())
10961 return true;
10963 APInt FoundRHSLimit;
10965 if (Pred == CmpInst::ICMP_ULT) {
10966 FoundRHSLimit = -(*RDiff);
10967 } else {
10968 assert(Pred == CmpInst::ICMP_SLT && "Checked above!");
10969 FoundRHSLimit = APInt::getSignedMinValue(getTypeSizeInBits(RHS->getType())) - *RDiff;
10972 // Try to prove (1) or (2), as needed.
10973 return isAvailableAtLoopEntry(FoundRHS, L) &&
10974 isLoopEntryGuardedByCond(L, Pred, FoundRHS,
10975 getConstant(FoundRHSLimit));
10978 bool ScalarEvolution::isImpliedViaMerge(ICmpInst::Predicate Pred,
10979 const SCEV *LHS, const SCEV *RHS,
10980 const SCEV *FoundLHS,
10981 const SCEV *FoundRHS, unsigned Depth) {
10982 const PHINode *LPhi = nullptr, *RPhi = nullptr;
10984 auto ClearOnExit = make_scope_exit([&]() {
10985 if (LPhi) {
10986 bool Erased = PendingMerges.erase(LPhi);
10987 assert(Erased && "Failed to erase LPhi!");
10988 (void)Erased;
10990 if (RPhi) {
10991 bool Erased = PendingMerges.erase(RPhi);
10992 assert(Erased && "Failed to erase RPhi!");
10993 (void)Erased;
10997 // Find respective Phis and check that they are not being pending.
10998 if (const SCEVUnknown *LU = dyn_cast<SCEVUnknown>(LHS))
10999 if (auto *Phi = dyn_cast<PHINode>(LU->getValue())) {
11000 if (!PendingMerges.insert(Phi).second)
11001 return false;
11002 LPhi = Phi;
11004 if (const SCEVUnknown *RU = dyn_cast<SCEVUnknown>(RHS))
11005 if (auto *Phi = dyn_cast<PHINode>(RU->getValue())) {
11006 // If we detect a loop of Phi nodes being processed by this method, for
11007 // example:
11009 // %a = phi i32 [ %some1, %preheader ], [ %b, %latch ]
11010 // %b = phi i32 [ %some2, %preheader ], [ %a, %latch ]
11012 // we don't want to deal with a case that complex, so return conservative
11013 // answer false.
11014 if (!PendingMerges.insert(Phi).second)
11015 return false;
11016 RPhi = Phi;
11019 // If none of LHS, RHS is a Phi, nothing to do here.
11020 if (!LPhi && !RPhi)
11021 return false;
11023 // If there is a SCEVUnknown Phi we are interested in, make it left.
11024 if (!LPhi) {
11025 std::swap(LHS, RHS);
11026 std::swap(FoundLHS, FoundRHS);
11027 std::swap(LPhi, RPhi);
11028 Pred = ICmpInst::getSwappedPredicate(Pred);
11031 assert(LPhi && "LPhi should definitely be a SCEVUnknown Phi!");
11032 const BasicBlock *LBB = LPhi->getParent();
11033 const SCEVAddRecExpr *RAR = dyn_cast<SCEVAddRecExpr>(RHS);
11035 auto ProvedEasily = [&](const SCEV *S1, const SCEV *S2) {
11036 return isKnownViaNonRecursiveReasoning(Pred, S1, S2) ||
11037 isImpliedCondOperandsViaRanges(Pred, S1, S2, FoundLHS, FoundRHS) ||
11038 isImpliedViaOperations(Pred, S1, S2, FoundLHS, FoundRHS, Depth);
11041 if (RPhi && RPhi->getParent() == LBB) {
11042 // Case one: RHS is also a SCEVUnknown Phi from the same basic block.
11043 // If we compare two Phis from the same block, and for each entry block
11044 // the predicate is true for incoming values from this block, then the
11045 // predicate is also true for the Phis.
11046 for (const BasicBlock *IncBB : predecessors(LBB)) {
11047 const SCEV *L = getSCEV(LPhi->getIncomingValueForBlock(IncBB));
11048 const SCEV *R = getSCEV(RPhi->getIncomingValueForBlock(IncBB));
11049 if (!ProvedEasily(L, R))
11050 return false;
11052 } else if (RAR && RAR->getLoop()->getHeader() == LBB) {
11053 // Case two: RHS is also a Phi from the same basic block, and it is an
11054 // AddRec. It means that there is a loop which has both AddRec and Unknown
11055 // PHIs, for it we can compare incoming values of AddRec from above the loop
11056 // and latch with their respective incoming values of LPhi.
11057 // TODO: Generalize to handle loops with many inputs in a header.
11058 if (LPhi->getNumIncomingValues() != 2) return false;
11060 auto *RLoop = RAR->getLoop();
11061 auto *Predecessor = RLoop->getLoopPredecessor();
11062 assert(Predecessor && "Loop with AddRec with no predecessor?");
11063 const SCEV *L1 = getSCEV(LPhi->getIncomingValueForBlock(Predecessor));
11064 if (!ProvedEasily(L1, RAR->getStart()))
11065 return false;
11066 auto *Latch = RLoop->getLoopLatch();
11067 assert(Latch && "Loop with AddRec with no latch?");
11068 const SCEV *L2 = getSCEV(LPhi->getIncomingValueForBlock(Latch));
11069 if (!ProvedEasily(L2, RAR->getPostIncExpr(*this)))
11070 return false;
11071 } else {
11072 // In all other cases go over inputs of LHS and compare each of them to RHS,
11073 // the predicate is true for (LHS, RHS) if it is true for all such pairs.
11074 // At this point RHS is either a non-Phi, or it is a Phi from some block
11075 // different from LBB.
11076 for (const BasicBlock *IncBB : predecessors(LBB)) {
11077 // Check that RHS is available in this block.
11078 if (!dominates(RHS, IncBB))
11079 return false;
11080 const SCEV *L = getSCEV(LPhi->getIncomingValueForBlock(IncBB));
11081 // Make sure L does not refer to a value from a potentially previous
11082 // iteration of a loop.
11083 if (!properlyDominates(L, IncBB))
11084 return false;
11085 if (!ProvedEasily(L, RHS))
11086 return false;
11089 return true;
11092 bool ScalarEvolution::isImpliedCondOperands(ICmpInst::Predicate Pred,
11093 const SCEV *LHS, const SCEV *RHS,
11094 const SCEV *FoundLHS,
11095 const SCEV *FoundRHS,
11096 const Instruction *Context) {
11097 if (isImpliedCondOperandsViaRanges(Pred, LHS, RHS, FoundLHS, FoundRHS))
11098 return true;
11100 if (isImpliedCondOperandsViaNoOverflow(Pred, LHS, RHS, FoundLHS, FoundRHS))
11101 return true;
11103 if (isImpliedCondOperandsViaAddRecStart(Pred, LHS, RHS, FoundLHS, FoundRHS,
11104 Context))
11105 return true;
11107 return isImpliedCondOperandsHelper(Pred, LHS, RHS,
11108 FoundLHS, FoundRHS);
11111 /// Is MaybeMinMaxExpr an (U|S)(Min|Max) of Candidate and some other values?
11112 template <typename MinMaxExprType>
11113 static bool IsMinMaxConsistingOf(const SCEV *MaybeMinMaxExpr,
11114 const SCEV *Candidate) {
11115 const MinMaxExprType *MinMaxExpr = dyn_cast<MinMaxExprType>(MaybeMinMaxExpr);
11116 if (!MinMaxExpr)
11117 return false;
11119 return is_contained(MinMaxExpr->operands(), Candidate);
11122 static bool IsKnownPredicateViaAddRecStart(ScalarEvolution &SE,
11123 ICmpInst::Predicate Pred,
11124 const SCEV *LHS, const SCEV *RHS) {
11125 // If both sides are affine addrecs for the same loop, with equal
11126 // steps, and we know the recurrences don't wrap, then we only
11127 // need to check the predicate on the starting values.
11129 if (!ICmpInst::isRelational(Pred))
11130 return false;
11132 const SCEVAddRecExpr *LAR = dyn_cast<SCEVAddRecExpr>(LHS);
11133 if (!LAR)
11134 return false;
11135 const SCEVAddRecExpr *RAR = dyn_cast<SCEVAddRecExpr>(RHS);
11136 if (!RAR)
11137 return false;
11138 if (LAR->getLoop() != RAR->getLoop())
11139 return false;
11140 if (!LAR->isAffine() || !RAR->isAffine())
11141 return false;
11143 if (LAR->getStepRecurrence(SE) != RAR->getStepRecurrence(SE))
11144 return false;
11146 SCEV::NoWrapFlags NW = ICmpInst::isSigned(Pred) ?
11147 SCEV::FlagNSW : SCEV::FlagNUW;
11148 if (!LAR->getNoWrapFlags(NW) || !RAR->getNoWrapFlags(NW))
11149 return false;
11151 return SE.isKnownPredicate(Pred, LAR->getStart(), RAR->getStart());
11154 /// Is LHS `Pred` RHS true on the virtue of LHS or RHS being a Min or Max
11155 /// expression?
11156 static bool IsKnownPredicateViaMinOrMax(ScalarEvolution &SE,
11157 ICmpInst::Predicate Pred,
11158 const SCEV *LHS, const SCEV *RHS) {
11159 switch (Pred) {
11160 default:
11161 return false;
11163 case ICmpInst::ICMP_SGE:
11164 std::swap(LHS, RHS);
11165 LLVM_FALLTHROUGH;
11166 case ICmpInst::ICMP_SLE:
11167 return
11168 // min(A, ...) <= A
11169 IsMinMaxConsistingOf<SCEVSMinExpr>(LHS, RHS) ||
11170 // A <= max(A, ...)
11171 IsMinMaxConsistingOf<SCEVSMaxExpr>(RHS, LHS);
11173 case ICmpInst::ICMP_UGE:
11174 std::swap(LHS, RHS);
11175 LLVM_FALLTHROUGH;
11176 case ICmpInst::ICMP_ULE:
11177 return
11178 // min(A, ...) <= A
11179 IsMinMaxConsistingOf<SCEVUMinExpr>(LHS, RHS) ||
11180 // A <= max(A, ...)
11181 IsMinMaxConsistingOf<SCEVUMaxExpr>(RHS, LHS);
11184 llvm_unreachable("covered switch fell through?!");
11187 bool ScalarEvolution::isImpliedViaOperations(ICmpInst::Predicate Pred,
11188 const SCEV *LHS, const SCEV *RHS,
11189 const SCEV *FoundLHS,
11190 const SCEV *FoundRHS,
11191 unsigned Depth) {
11192 assert(getTypeSizeInBits(LHS->getType()) ==
11193 getTypeSizeInBits(RHS->getType()) &&
11194 "LHS and RHS have different sizes?");
11195 assert(getTypeSizeInBits(FoundLHS->getType()) ==
11196 getTypeSizeInBits(FoundRHS->getType()) &&
11197 "FoundLHS and FoundRHS have different sizes?");
11198 // We want to avoid hurting the compile time with analysis of too big trees.
11199 if (Depth > MaxSCEVOperationsImplicationDepth)
11200 return false;
11202 // We only want to work with GT comparison so far.
11203 if (Pred == ICmpInst::ICMP_ULT || Pred == ICmpInst::ICMP_SLT) {
11204 Pred = CmpInst::getSwappedPredicate(Pred);
11205 std::swap(LHS, RHS);
11206 std::swap(FoundLHS, FoundRHS);
11209 // For unsigned, try to reduce it to corresponding signed comparison.
11210 if (Pred == ICmpInst::ICMP_UGT)
11211 // We can replace unsigned predicate with its signed counterpart if all
11212 // involved values are non-negative.
11213 // TODO: We could have better support for unsigned.
11214 if (isKnownNonNegative(FoundLHS) && isKnownNonNegative(FoundRHS)) {
11215 // Knowing that both FoundLHS and FoundRHS are non-negative, and knowing
11216 // FoundLHS >u FoundRHS, we also know that FoundLHS >s FoundRHS. Let us
11217 // use this fact to prove that LHS and RHS are non-negative.
11218 const SCEV *MinusOne = getMinusOne(LHS->getType());
11219 if (isImpliedCondOperands(ICmpInst::ICMP_SGT, LHS, MinusOne, FoundLHS,
11220 FoundRHS) &&
11221 isImpliedCondOperands(ICmpInst::ICMP_SGT, RHS, MinusOne, FoundLHS,
11222 FoundRHS))
11223 Pred = ICmpInst::ICMP_SGT;
11226 if (Pred != ICmpInst::ICMP_SGT)
11227 return false;
11229 auto GetOpFromSExt = [&](const SCEV *S) {
11230 if (auto *Ext = dyn_cast<SCEVSignExtendExpr>(S))
11231 return Ext->getOperand();
11232 // TODO: If S is a SCEVConstant then you can cheaply "strip" the sext off
11233 // the constant in some cases.
11234 return S;
11237 // Acquire values from extensions.
11238 auto *OrigLHS = LHS;
11239 auto *OrigFoundLHS = FoundLHS;
11240 LHS = GetOpFromSExt(LHS);
11241 FoundLHS = GetOpFromSExt(FoundLHS);
11243 // Is the SGT predicate can be proved trivially or using the found context.
11244 auto IsSGTViaContext = [&](const SCEV *S1, const SCEV *S2) {
11245 return isKnownViaNonRecursiveReasoning(ICmpInst::ICMP_SGT, S1, S2) ||
11246 isImpliedViaOperations(ICmpInst::ICMP_SGT, S1, S2, OrigFoundLHS,
11247 FoundRHS, Depth + 1);
11250 if (auto *LHSAddExpr = dyn_cast<SCEVAddExpr>(LHS)) {
11251 // We want to avoid creation of any new non-constant SCEV. Since we are
11252 // going to compare the operands to RHS, we should be certain that we don't
11253 // need any size extensions for this. So let's decline all cases when the
11254 // sizes of types of LHS and RHS do not match.
11255 // TODO: Maybe try to get RHS from sext to catch more cases?
11256 if (getTypeSizeInBits(LHS->getType()) != getTypeSizeInBits(RHS->getType()))
11257 return false;
11259 // Should not overflow.
11260 if (!LHSAddExpr->hasNoSignedWrap())
11261 return false;
11263 auto *LL = LHSAddExpr->getOperand(0);
11264 auto *LR = LHSAddExpr->getOperand(1);
11265 auto *MinusOne = getMinusOne(RHS->getType());
11267 // Checks that S1 >= 0 && S2 > RHS, trivially or using the found context.
11268 auto IsSumGreaterThanRHS = [&](const SCEV *S1, const SCEV *S2) {
11269 return IsSGTViaContext(S1, MinusOne) && IsSGTViaContext(S2, RHS);
11271 // Try to prove the following rule:
11272 // (LHS = LL + LR) && (LL >= 0) && (LR > RHS) => (LHS > RHS).
11273 // (LHS = LL + LR) && (LR >= 0) && (LL > RHS) => (LHS > RHS).
11274 if (IsSumGreaterThanRHS(LL, LR) || IsSumGreaterThanRHS(LR, LL))
11275 return true;
11276 } else if (auto *LHSUnknownExpr = dyn_cast<SCEVUnknown>(LHS)) {
11277 Value *LL, *LR;
11278 // FIXME: Once we have SDiv implemented, we can get rid of this matching.
11280 using namespace llvm::PatternMatch;
11282 if (match(LHSUnknownExpr->getValue(), m_SDiv(m_Value(LL), m_Value(LR)))) {
11283 // Rules for division.
11284 // We are going to perform some comparisons with Denominator and its
11285 // derivative expressions. In general case, creating a SCEV for it may
11286 // lead to a complex analysis of the entire graph, and in particular it
11287 // can request trip count recalculation for the same loop. This would
11288 // cache as SCEVCouldNotCompute to avoid the infinite recursion. To avoid
11289 // this, we only want to create SCEVs that are constants in this section.
11290 // So we bail if Denominator is not a constant.
11291 if (!isa<ConstantInt>(LR))
11292 return false;
11294 auto *Denominator = cast<SCEVConstant>(getSCEV(LR));
11296 // We want to make sure that LHS = FoundLHS / Denominator. If it is so,
11297 // then a SCEV for the numerator already exists and matches with FoundLHS.
11298 auto *Numerator = getExistingSCEV(LL);
11299 if (!Numerator || Numerator->getType() != FoundLHS->getType())
11300 return false;
11302 // Make sure that the numerator matches with FoundLHS and the denominator
11303 // is positive.
11304 if (!HasSameValue(Numerator, FoundLHS) || !isKnownPositive(Denominator))
11305 return false;
11307 auto *DTy = Denominator->getType();
11308 auto *FRHSTy = FoundRHS->getType();
11309 if (DTy->isPointerTy() != FRHSTy->isPointerTy())
11310 // One of types is a pointer and another one is not. We cannot extend
11311 // them properly to a wider type, so let us just reject this case.
11312 // TODO: Usage of getEffectiveSCEVType for DTy, FRHSTy etc should help
11313 // to avoid this check.
11314 return false;
11316 // Given that:
11317 // FoundLHS > FoundRHS, LHS = FoundLHS / Denominator, Denominator > 0.
11318 auto *WTy = getWiderType(DTy, FRHSTy);
11319 auto *DenominatorExt = getNoopOrSignExtend(Denominator, WTy);
11320 auto *FoundRHSExt = getNoopOrSignExtend(FoundRHS, WTy);
11322 // Try to prove the following rule:
11323 // (FoundRHS > Denominator - 2) && (RHS <= 0) => (LHS > RHS).
11324 // For example, given that FoundLHS > 2. It means that FoundLHS is at
11325 // least 3. If we divide it by Denominator < 4, we will have at least 1.
11326 auto *DenomMinusTwo = getMinusSCEV(DenominatorExt, getConstant(WTy, 2));
11327 if (isKnownNonPositive(RHS) &&
11328 IsSGTViaContext(FoundRHSExt, DenomMinusTwo))
11329 return true;
11331 // Try to prove the following rule:
11332 // (FoundRHS > -1 - Denominator) && (RHS < 0) => (LHS > RHS).
11333 // For example, given that FoundLHS > -3. Then FoundLHS is at least -2.
11334 // If we divide it by Denominator > 2, then:
11335 // 1. If FoundLHS is negative, then the result is 0.
11336 // 2. If FoundLHS is non-negative, then the result is non-negative.
11337 // Anyways, the result is non-negative.
11338 auto *MinusOne = getMinusOne(WTy);
11339 auto *NegDenomMinusOne = getMinusSCEV(MinusOne, DenominatorExt);
11340 if (isKnownNegative(RHS) &&
11341 IsSGTViaContext(FoundRHSExt, NegDenomMinusOne))
11342 return true;
11346 // If our expression contained SCEVUnknown Phis, and we split it down and now
11347 // need to prove something for them, try to prove the predicate for every
11348 // possible incoming values of those Phis.
11349 if (isImpliedViaMerge(Pred, OrigLHS, RHS, OrigFoundLHS, FoundRHS, Depth + 1))
11350 return true;
11352 return false;
11355 static bool isKnownPredicateExtendIdiom(ICmpInst::Predicate Pred,
11356 const SCEV *LHS, const SCEV *RHS) {
11357 // zext x u<= sext x, sext x s<= zext x
11358 switch (Pred) {
11359 case ICmpInst::ICMP_SGE:
11360 std::swap(LHS, RHS);
11361 LLVM_FALLTHROUGH;
11362 case ICmpInst::ICMP_SLE: {
11363 // If operand >=s 0 then ZExt == SExt. If operand <s 0 then SExt <s ZExt.
11364 const SCEVSignExtendExpr *SExt = dyn_cast<SCEVSignExtendExpr>(LHS);
11365 const SCEVZeroExtendExpr *ZExt = dyn_cast<SCEVZeroExtendExpr>(RHS);
11366 if (SExt && ZExt && SExt->getOperand() == ZExt->getOperand())
11367 return true;
11368 break;
11370 case ICmpInst::ICMP_UGE:
11371 std::swap(LHS, RHS);
11372 LLVM_FALLTHROUGH;
11373 case ICmpInst::ICMP_ULE: {
11374 // If operand >=s 0 then ZExt == SExt. If operand <s 0 then ZExt <u SExt.
11375 const SCEVZeroExtendExpr *ZExt = dyn_cast<SCEVZeroExtendExpr>(LHS);
11376 const SCEVSignExtendExpr *SExt = dyn_cast<SCEVSignExtendExpr>(RHS);
11377 if (SExt && ZExt && SExt->getOperand() == ZExt->getOperand())
11378 return true;
11379 break;
11381 default:
11382 break;
11384 return false;
11387 bool
11388 ScalarEvolution::isKnownViaNonRecursiveReasoning(ICmpInst::Predicate Pred,
11389 const SCEV *LHS, const SCEV *RHS) {
11390 return isKnownPredicateExtendIdiom(Pred, LHS, RHS) ||
11391 isKnownPredicateViaConstantRanges(Pred, LHS, RHS) ||
11392 IsKnownPredicateViaMinOrMax(*this, Pred, LHS, RHS) ||
11393 IsKnownPredicateViaAddRecStart(*this, Pred, LHS, RHS) ||
11394 isKnownPredicateViaNoOverflow(Pred, LHS, RHS);
11397 bool
11398 ScalarEvolution::isImpliedCondOperandsHelper(ICmpInst::Predicate Pred,
11399 const SCEV *LHS, const SCEV *RHS,
11400 const SCEV *FoundLHS,
11401 const SCEV *FoundRHS) {
11402 switch (Pred) {
11403 default: llvm_unreachable("Unexpected ICmpInst::Predicate value!");
11404 case ICmpInst::ICMP_EQ:
11405 case ICmpInst::ICMP_NE:
11406 if (HasSameValue(LHS, FoundLHS) && HasSameValue(RHS, FoundRHS))
11407 return true;
11408 break;
11409 case ICmpInst::ICMP_SLT:
11410 case ICmpInst::ICMP_SLE:
11411 if (isKnownViaNonRecursiveReasoning(ICmpInst::ICMP_SLE, LHS, FoundLHS) &&
11412 isKnownViaNonRecursiveReasoning(ICmpInst::ICMP_SGE, RHS, FoundRHS))
11413 return true;
11414 break;
11415 case ICmpInst::ICMP_SGT:
11416 case ICmpInst::ICMP_SGE:
11417 if (isKnownViaNonRecursiveReasoning(ICmpInst::ICMP_SGE, LHS, FoundLHS) &&
11418 isKnownViaNonRecursiveReasoning(ICmpInst::ICMP_SLE, RHS, FoundRHS))
11419 return true;
11420 break;
11421 case ICmpInst::ICMP_ULT:
11422 case ICmpInst::ICMP_ULE:
11423 if (isKnownViaNonRecursiveReasoning(ICmpInst::ICMP_ULE, LHS, FoundLHS) &&
11424 isKnownViaNonRecursiveReasoning(ICmpInst::ICMP_UGE, RHS, FoundRHS))
11425 return true;
11426 break;
11427 case ICmpInst::ICMP_UGT:
11428 case ICmpInst::ICMP_UGE:
11429 if (isKnownViaNonRecursiveReasoning(ICmpInst::ICMP_UGE, LHS, FoundLHS) &&
11430 isKnownViaNonRecursiveReasoning(ICmpInst::ICMP_ULE, RHS, FoundRHS))
11431 return true;
11432 break;
11435 // Maybe it can be proved via operations?
11436 if (isImpliedViaOperations(Pred, LHS, RHS, FoundLHS, FoundRHS))
11437 return true;
11439 return false;
11442 bool ScalarEvolution::isImpliedCondOperandsViaRanges(ICmpInst::Predicate Pred,
11443 const SCEV *LHS,
11444 const SCEV *RHS,
11445 const SCEV *FoundLHS,
11446 const SCEV *FoundRHS) {
11447 if (!isa<SCEVConstant>(RHS) || !isa<SCEVConstant>(FoundRHS))
11448 // The restriction on `FoundRHS` be lifted easily -- it exists only to
11449 // reduce the compile time impact of this optimization.
11450 return false;
11452 Optional<APInt> Addend = computeConstantDifference(LHS, FoundLHS);
11453 if (!Addend)
11454 return false;
11456 const APInt &ConstFoundRHS = cast<SCEVConstant>(FoundRHS)->getAPInt();
11458 // `FoundLHSRange` is the range we know `FoundLHS` to be in by virtue of the
11459 // antecedent "`FoundLHS` `Pred` `FoundRHS`".
11460 ConstantRange FoundLHSRange =
11461 ConstantRange::makeExactICmpRegion(Pred, ConstFoundRHS);
11463 // Since `LHS` is `FoundLHS` + `Addend`, we can compute a range for `LHS`:
11464 ConstantRange LHSRange = FoundLHSRange.add(ConstantRange(*Addend));
11466 // We can also compute the range of values for `LHS` that satisfy the
11467 // consequent, "`LHS` `Pred` `RHS`":
11468 const APInt &ConstRHS = cast<SCEVConstant>(RHS)->getAPInt();
11469 // The antecedent implies the consequent if every value of `LHS` that
11470 // satisfies the antecedent also satisfies the consequent.
11471 return LHSRange.icmp(Pred, ConstRHS);
11474 bool ScalarEvolution::canIVOverflowOnLT(const SCEV *RHS, const SCEV *Stride,
11475 bool IsSigned) {
11476 assert(isKnownPositive(Stride) && "Positive stride expected!");
11478 unsigned BitWidth = getTypeSizeInBits(RHS->getType());
11479 const SCEV *One = getOne(Stride->getType());
11481 if (IsSigned) {
11482 APInt MaxRHS = getSignedRangeMax(RHS);
11483 APInt MaxValue = APInt::getSignedMaxValue(BitWidth);
11484 APInt MaxStrideMinusOne = getSignedRangeMax(getMinusSCEV(Stride, One));
11486 // SMaxRHS + SMaxStrideMinusOne > SMaxValue => overflow!
11487 return (std::move(MaxValue) - MaxStrideMinusOne).slt(MaxRHS);
11490 APInt MaxRHS = getUnsignedRangeMax(RHS);
11491 APInt MaxValue = APInt::getMaxValue(BitWidth);
11492 APInt MaxStrideMinusOne = getUnsignedRangeMax(getMinusSCEV(Stride, One));
11494 // UMaxRHS + UMaxStrideMinusOne > UMaxValue => overflow!
11495 return (std::move(MaxValue) - MaxStrideMinusOne).ult(MaxRHS);
11498 bool ScalarEvolution::canIVOverflowOnGT(const SCEV *RHS, const SCEV *Stride,
11499 bool IsSigned) {
11501 unsigned BitWidth = getTypeSizeInBits(RHS->getType());
11502 const SCEV *One = getOne(Stride->getType());
11504 if (IsSigned) {
11505 APInt MinRHS = getSignedRangeMin(RHS);
11506 APInt MinValue = APInt::getSignedMinValue(BitWidth);
11507 APInt MaxStrideMinusOne = getSignedRangeMax(getMinusSCEV(Stride, One));
11509 // SMinRHS - SMaxStrideMinusOne < SMinValue => overflow!
11510 return (std::move(MinValue) + MaxStrideMinusOne).sgt(MinRHS);
11513 APInt MinRHS = getUnsignedRangeMin(RHS);
11514 APInt MinValue = APInt::getMinValue(BitWidth);
11515 APInt MaxStrideMinusOne = getUnsignedRangeMax(getMinusSCEV(Stride, One));
11517 // UMinRHS - UMaxStrideMinusOne < UMinValue => overflow!
11518 return (std::move(MinValue) + MaxStrideMinusOne).ugt(MinRHS);
11521 const SCEV *ScalarEvolution::getUDivCeilSCEV(const SCEV *N, const SCEV *D) {
11522 // umin(N, 1) + floor((N - umin(N, 1)) / D)
11523 // This is equivalent to "1 + floor((N - 1) / D)" for N != 0. The umin
11524 // expression fixes the case of N=0.
11525 const SCEV *MinNOne = getUMinExpr(N, getOne(N->getType()));
11526 const SCEV *NMinusOne = getMinusSCEV(N, MinNOne);
11527 return getAddExpr(MinNOne, getUDivExpr(NMinusOne, D));
11530 const SCEV *ScalarEvolution::computeMaxBECountForLT(const SCEV *Start,
11531 const SCEV *Stride,
11532 const SCEV *End,
11533 unsigned BitWidth,
11534 bool IsSigned) {
11535 // The logic in this function assumes we can represent a positive stride.
11536 // If we can't, the backedge-taken count must be zero.
11537 if (IsSigned && BitWidth == 1)
11538 return getZero(Stride->getType());
11540 // Calculate the maximum backedge count based on the range of values
11541 // permitted by Start, End, and Stride.
11542 APInt MinStart =
11543 IsSigned ? getSignedRangeMin(Start) : getUnsignedRangeMin(Start);
11545 APInt MinStride =
11546 IsSigned ? getSignedRangeMin(Stride) : getUnsignedRangeMin(Stride);
11548 // We assume either the stride is positive, or the backedge-taken count
11549 // is zero. So force StrideForMaxBECount to be at least one.
11550 APInt One(BitWidth, 1);
11551 APInt StrideForMaxBECount = IsSigned ? APIntOps::smax(One, MinStride)
11552 : APIntOps::umax(One, MinStride);
11554 APInt MaxValue = IsSigned ? APInt::getSignedMaxValue(BitWidth)
11555 : APInt::getMaxValue(BitWidth);
11556 APInt Limit = MaxValue - (StrideForMaxBECount - 1);
11558 // Although End can be a MAX expression we estimate MaxEnd considering only
11559 // the case End = RHS of the loop termination condition. This is safe because
11560 // in the other case (End - Start) is zero, leading to a zero maximum backedge
11561 // taken count.
11562 APInt MaxEnd = IsSigned ? APIntOps::smin(getSignedRangeMax(End), Limit)
11563 : APIntOps::umin(getUnsignedRangeMax(End), Limit);
11565 // MaxBECount = ceil((max(MaxEnd, MinStart) - MinStart) / Stride)
11566 MaxEnd = IsSigned ? APIntOps::smax(MaxEnd, MinStart)
11567 : APIntOps::umax(MaxEnd, MinStart);
11569 return getUDivCeilSCEV(getConstant(MaxEnd - MinStart) /* Delta */,
11570 getConstant(StrideForMaxBECount) /* Step */);
11573 ScalarEvolution::ExitLimit
11574 ScalarEvolution::howManyLessThans(const SCEV *LHS, const SCEV *RHS,
11575 const Loop *L, bool IsSigned,
11576 bool ControlsExit, bool AllowPredicates) {
11577 SmallPtrSet<const SCEVPredicate *, 4> Predicates;
11579 const SCEVAddRecExpr *IV = dyn_cast<SCEVAddRecExpr>(LHS);
11580 bool PredicatedIV = false;
11582 if (!IV && AllowPredicates) {
11583 // Try to make this an AddRec using runtime tests, in the first X
11584 // iterations of this loop, where X is the SCEV expression found by the
11585 // algorithm below.
11586 IV = convertSCEVToAddRecWithPredicates(LHS, L, Predicates);
11587 PredicatedIV = true;
11590 // Avoid weird loops
11591 if (!IV || IV->getLoop() != L || !IV->isAffine())
11592 return getCouldNotCompute();
11594 // A precondition of this method is that the condition being analyzed
11595 // reaches an exiting branch which dominates the latch. Given that, we can
11596 // assume that an increment which violates the nowrap specification and
11597 // produces poison must cause undefined behavior when the resulting poison
11598 // value is branched upon and thus we can conclude that the backedge is
11599 // taken no more often than would be required to produce that poison value.
11600 // Note that a well defined loop can exit on the iteration which violates
11601 // the nowrap specification if there is another exit (either explicit or
11602 // implicit/exceptional) which causes the loop to execute before the
11603 // exiting instruction we're analyzing would trigger UB.
11604 auto WrapType = IsSigned ? SCEV::FlagNSW : SCEV::FlagNUW;
11605 bool NoWrap = ControlsExit && IV->getNoWrapFlags(WrapType);
11606 ICmpInst::Predicate Cond = IsSigned ? ICmpInst::ICMP_SLT : ICmpInst::ICMP_ULT;
11608 const SCEV *Stride = IV->getStepRecurrence(*this);
11610 bool PositiveStride = isKnownPositive(Stride);
11612 // Avoid negative or zero stride values.
11613 if (!PositiveStride) {
11614 // We can compute the correct backedge taken count for loops with unknown
11615 // strides if we can prove that the loop is not an infinite loop with side
11616 // effects. Here's the loop structure we are trying to handle -
11618 // i = start
11619 // do {
11620 // A[i] = i;
11621 // i += s;
11622 // } while (i < end);
11624 // The backedge taken count for such loops is evaluated as -
11625 // (max(end, start + stride) - start - 1) /u stride
11627 // The additional preconditions that we need to check to prove correctness
11628 // of the above formula is as follows -
11630 // a) IV is either nuw or nsw depending upon signedness (indicated by the
11631 // NoWrap flag).
11632 // b) loop is single exit with no side effects.
11635 // Precondition a) implies that if the stride is negative, this is a single
11636 // trip loop. The backedge taken count formula reduces to zero in this case.
11638 // Precondition b) implies that if rhs is invariant in L, then unknown
11639 // stride being zero means the backedge can't be taken without UB.
11641 // The positive stride case is the same as isKnownPositive(Stride) returning
11642 // true (original behavior of the function).
11644 // We want to make sure that the stride is truly unknown as there are edge
11645 // cases where ScalarEvolution propagates no wrap flags to the
11646 // post-increment/decrement IV even though the increment/decrement operation
11647 // itself is wrapping. The computed backedge taken count may be wrong in
11648 // such cases. This is prevented by checking that the stride is not known to
11649 // be either positive or non-positive. For example, no wrap flags are
11650 // propagated to the post-increment IV of this loop with a trip count of 2 -
11652 // unsigned char i;
11653 // for(i=127; i<128; i+=129)
11654 // A[i] = i;
11656 if (PredicatedIV || !NoWrap || isKnownNonPositive(Stride) ||
11657 !loopIsFiniteByAssumption(L))
11658 return getCouldNotCompute();
11660 if (!isKnownNonZero(Stride)) {
11661 // If we have a step of zero, and RHS isn't invariant in L, we don't know
11662 // if it might eventually be greater than start and if so, on which
11663 // iteration. We can't even produce a useful upper bound.
11664 if (!isLoopInvariant(RHS, L))
11665 return getCouldNotCompute();
11667 // We allow a potentially zero stride, but we need to divide by stride
11668 // below. Since the loop can't be infinite and this check must control
11669 // the sole exit, we can infer the exit must be taken on the first
11670 // iteration (e.g. backedge count = 0) if the stride is zero. Given that,
11671 // we know the numerator in the divides below must be zero, so we can
11672 // pick an arbitrary non-zero value for the denominator (e.g. stride)
11673 // and produce the right result.
11674 // FIXME: Handle the case where Stride is poison?
11675 auto wouldZeroStrideBeUB = [&]() {
11676 // Proof by contradiction. Suppose the stride were zero. If we can
11677 // prove that the backedge *is* taken on the first iteration, then since
11678 // we know this condition controls the sole exit, we must have an
11679 // infinite loop. We can't have a (well defined) infinite loop per
11680 // check just above.
11681 // Note: The (Start - Stride) term is used to get the start' term from
11682 // (start' + stride,+,stride). Remember that we only care about the
11683 // result of this expression when stride == 0 at runtime.
11684 auto *StartIfZero = getMinusSCEV(IV->getStart(), Stride);
11685 return isLoopEntryGuardedByCond(L, Cond, StartIfZero, RHS);
11687 if (!wouldZeroStrideBeUB()) {
11688 Stride = getUMaxExpr(Stride, getOne(Stride->getType()));
11691 } else if (!Stride->isOne() && !NoWrap) {
11692 auto isUBOnWrap = [&]() {
11693 // Can we prove this loop *must* be UB if overflow of IV occurs?
11694 // Reasoning goes as follows:
11695 // * Suppose the IV did self wrap.
11696 // * If Stride evenly divides the iteration space, then once wrap
11697 // occurs, the loop must revisit the same values.
11698 // * We know that RHS is invariant, and that none of those values
11699 // caused this exit to be taken previously. Thus, this exit is
11700 // dynamically dead.
11701 // * If this is the sole exit, then a dead exit implies the loop
11702 // must be infinite if there are no abnormal exits.
11703 // * If the loop were infinite, then it must either not be mustprogress
11704 // or have side effects. Otherwise, it must be UB.
11705 // * It can't (by assumption), be UB so we have contradicted our
11706 // premise and can conclude the IV did not in fact self-wrap.
11707 // From no-self-wrap, we need to then prove no-(un)signed-wrap. This
11708 // follows trivially from the fact that every (un)signed-wrapped, but
11709 // not self-wrapped value must be LT than the last value before
11710 // (un)signed wrap. Since we know that last value didn't exit, nor
11711 // will any smaller one.
11713 if (!isLoopInvariant(RHS, L))
11714 return false;
11716 auto *StrideC = dyn_cast<SCEVConstant>(Stride);
11717 if (!StrideC || !StrideC->getAPInt().isPowerOf2())
11718 return false;
11720 if (!ControlsExit || !loopHasNoAbnormalExits(L))
11721 return false;
11723 return loopIsFiniteByAssumption(L);
11726 // Avoid proven overflow cases: this will ensure that the backedge taken
11727 // count will not generate any unsigned overflow. Relaxed no-overflow
11728 // conditions exploit NoWrapFlags, allowing to optimize in presence of
11729 // undefined behaviors like the case of C language.
11730 if (canIVOverflowOnLT(RHS, Stride, IsSigned) && !isUBOnWrap())
11731 return getCouldNotCompute();
11734 // On all paths just preceeding, we established the following invariant:
11735 // IV can be assumed not to overflow up to and including the exiting
11736 // iteration. We proved this in one of two ways:
11737 // 1) We can show overflow doesn't occur before the exiting iteration
11738 // 1a) canIVOverflowOnLT, and b) step of one
11739 // 2) We can show that if overflow occurs, the loop must execute UB
11740 // before any possible exit.
11741 // Note that we have not yet proved RHS invariant (in general).
11743 const SCEV *Start = IV->getStart();
11745 // Preserve pointer-typed Start/RHS to pass to isLoopEntryGuardedByCond.
11746 // Use integer-typed versions for actual computation.
11747 const SCEV *OrigStart = Start;
11748 const SCEV *OrigRHS = RHS;
11749 if (Start->getType()->isPointerTy()) {
11750 Start = getLosslessPtrToIntExpr(Start);
11751 if (isa<SCEVCouldNotCompute>(Start))
11752 return Start;
11754 if (RHS->getType()->isPointerTy()) {
11755 RHS = getLosslessPtrToIntExpr(RHS);
11756 if (isa<SCEVCouldNotCompute>(RHS))
11757 return RHS;
11760 // When the RHS is not invariant, we do not know the end bound of the loop and
11761 // cannot calculate the ExactBECount needed by ExitLimit. However, we can
11762 // calculate the MaxBECount, given the start, stride and max value for the end
11763 // bound of the loop (RHS), and the fact that IV does not overflow (which is
11764 // checked above).
11765 if (!isLoopInvariant(RHS, L)) {
11766 const SCEV *MaxBECount = computeMaxBECountForLT(
11767 Start, Stride, RHS, getTypeSizeInBits(LHS->getType()), IsSigned);
11768 return ExitLimit(getCouldNotCompute() /* ExactNotTaken */, MaxBECount,
11769 false /*MaxOrZero*/, Predicates);
11772 // We use the expression (max(End,Start)-Start)/Stride to describe the
11773 // backedge count, as if the backedge is taken at least once max(End,Start)
11774 // is End and so the result is as above, and if not max(End,Start) is Start
11775 // so we get a backedge count of zero.
11776 const SCEV *BECount = nullptr;
11777 auto *StartMinusStride = getMinusSCEV(OrigStart, Stride);
11778 // Can we prove (max(RHS,Start) > Start - Stride?
11779 if (isLoopEntryGuardedByCond(L, Cond, StartMinusStride, Start) &&
11780 isLoopEntryGuardedByCond(L, Cond, StartMinusStride, RHS)) {
11781 // In this case, we can use a refined formula for computing backedge taken
11782 // count. The general formula remains:
11783 // "End-Start /uceiling Stride" where "End = max(RHS,Start)"
11784 // We want to use the alternate formula:
11785 // "((End - 1) - (Start - Stride)) /u Stride"
11786 // Let's do a quick case analysis to show these are equivalent under
11787 // our precondition that max(RHS,Start) > Start - Stride.
11788 // * For RHS <= Start, the backedge-taken count must be zero.
11789 // "((End - 1) - (Start - Stride)) /u Stride" reduces to
11790 // "((Start - 1) - (Start - Stride)) /u Stride" which simplies to
11791 // "Stride - 1 /u Stride" which is indeed zero for all non-zero values
11792 // of Stride. For 0 stride, we've use umin(1,Stride) above, reducing
11793 // this to the stride of 1 case.
11794 // * For RHS >= Start, the backedge count must be "RHS-Start /uceil Stride".
11795 // "((End - 1) - (Start - Stride)) /u Stride" reduces to
11796 // "((RHS - 1) - (Start - Stride)) /u Stride" reassociates to
11797 // "((RHS - (Start - Stride) - 1) /u Stride".
11798 // Our preconditions trivially imply no overflow in that form.
11799 const SCEV *MinusOne = getMinusOne(Stride->getType());
11800 const SCEV *Numerator =
11801 getMinusSCEV(getAddExpr(RHS, MinusOne), StartMinusStride);
11802 if (!isa<SCEVCouldNotCompute>(Numerator)) {
11803 BECount = getUDivExpr(Numerator, Stride);
11807 const SCEV *BECountIfBackedgeTaken = nullptr;
11808 if (!BECount) {
11809 auto canProveRHSGreaterThanEqualStart = [&]() {
11810 auto CondGE = IsSigned ? ICmpInst::ICMP_SGE : ICmpInst::ICMP_UGE;
11811 if (isLoopEntryGuardedByCond(L, CondGE, OrigRHS, OrigStart))
11812 return true;
11814 // (RHS > Start - 1) implies RHS >= Start.
11815 // * "RHS >= Start" is trivially equivalent to "RHS > Start - 1" if
11816 // "Start - 1" doesn't overflow.
11817 // * For signed comparison, if Start - 1 does overflow, it's equal
11818 // to INT_MAX, and "RHS >s INT_MAX" is trivially false.
11819 // * For unsigned comparison, if Start - 1 does overflow, it's equal
11820 // to UINT_MAX, and "RHS >u UINT_MAX" is trivially false.
11822 // FIXME: Should isLoopEntryGuardedByCond do this for us?
11823 auto CondGT = IsSigned ? ICmpInst::ICMP_SGT : ICmpInst::ICMP_UGT;
11824 auto *StartMinusOne = getAddExpr(OrigStart,
11825 getMinusOne(OrigStart->getType()));
11826 return isLoopEntryGuardedByCond(L, CondGT, OrigRHS, StartMinusOne);
11829 // If we know that RHS >= Start in the context of loop, then we know that
11830 // max(RHS, Start) = RHS at this point.
11831 const SCEV *End;
11832 if (canProveRHSGreaterThanEqualStart()) {
11833 End = RHS;
11834 } else {
11835 // If RHS < Start, the backedge will be taken zero times. So in
11836 // general, we can write the backedge-taken count as:
11838 // RHS >= Start ? ceil(RHS - Start) / Stride : 0
11840 // We convert it to the following to make it more convenient for SCEV:
11842 // ceil(max(RHS, Start) - Start) / Stride
11843 End = IsSigned ? getSMaxExpr(RHS, Start) : getUMaxExpr(RHS, Start);
11845 // See what would happen if we assume the backedge is taken. This is
11846 // used to compute MaxBECount.
11847 BECountIfBackedgeTaken = getUDivCeilSCEV(getMinusSCEV(RHS, Start), Stride);
11850 // At this point, we know:
11852 // 1. If IsSigned, Start <=s End; otherwise, Start <=u End
11853 // 2. The index variable doesn't overflow.
11855 // Therefore, we know N exists such that
11856 // (Start + Stride * N) >= End, and computing "(Start + Stride * N)"
11857 // doesn't overflow.
11859 // Using this information, try to prove whether the addition in
11860 // "(Start - End) + (Stride - 1)" has unsigned overflow.
11861 const SCEV *One = getOne(Stride->getType());
11862 bool MayAddOverflow = [&] {
11863 if (auto *StrideC = dyn_cast<SCEVConstant>(Stride)) {
11864 if (StrideC->getAPInt().isPowerOf2()) {
11865 // Suppose Stride is a power of two, and Start/End are unsigned
11866 // integers. Let UMAX be the largest representable unsigned
11867 // integer.
11869 // By the preconditions of this function, we know
11870 // "(Start + Stride * N) >= End", and this doesn't overflow.
11871 // As a formula:
11873 // End <= (Start + Stride * N) <= UMAX
11875 // Subtracting Start from all the terms:
11877 // End - Start <= Stride * N <= UMAX - Start
11879 // Since Start is unsigned, UMAX - Start <= UMAX. Therefore:
11881 // End - Start <= Stride * N <= UMAX
11883 // Stride * N is a multiple of Stride. Therefore,
11885 // End - Start <= Stride * N <= UMAX - (UMAX mod Stride)
11887 // Since Stride is a power of two, UMAX + 1 is divisible by Stride.
11888 // Therefore, UMAX mod Stride == Stride - 1. So we can write:
11890 // End - Start <= Stride * N <= UMAX - Stride - 1
11892 // Dropping the middle term:
11894 // End - Start <= UMAX - Stride - 1
11896 // Adding Stride - 1 to both sides:
11898 // (End - Start) + (Stride - 1) <= UMAX
11900 // In other words, the addition doesn't have unsigned overflow.
11902 // A similar proof works if we treat Start/End as signed values.
11903 // Just rewrite steps before "End - Start <= Stride * N <= UMAX" to
11904 // use signed max instead of unsigned max. Note that we're trying
11905 // to prove a lack of unsigned overflow in either case.
11906 return false;
11909 if (Start == Stride || Start == getMinusSCEV(Stride, One)) {
11910 // If Start is equal to Stride, (End - Start) + (Stride - 1) == End - 1.
11911 // If !IsSigned, 0 <u Stride == Start <=u End; so 0 <u End - 1 <u End.
11912 // If IsSigned, 0 <s Stride == Start <=s End; so 0 <s End - 1 <s End.
11914 // If Start is equal to Stride - 1, (End - Start) + Stride - 1 == End.
11915 return false;
11917 return true;
11918 }();
11920 const SCEV *Delta = getMinusSCEV(End, Start);
11921 if (!MayAddOverflow) {
11922 // floor((D + (S - 1)) / S)
11923 // We prefer this formulation if it's legal because it's fewer operations.
11924 BECount =
11925 getUDivExpr(getAddExpr(Delta, getMinusSCEV(Stride, One)), Stride);
11926 } else {
11927 BECount = getUDivCeilSCEV(Delta, Stride);
11931 const SCEV *MaxBECount;
11932 bool MaxOrZero = false;
11933 if (isa<SCEVConstant>(BECount)) {
11934 MaxBECount = BECount;
11935 } else if (BECountIfBackedgeTaken &&
11936 isa<SCEVConstant>(BECountIfBackedgeTaken)) {
11937 // If we know exactly how many times the backedge will be taken if it's
11938 // taken at least once, then the backedge count will either be that or
11939 // zero.
11940 MaxBECount = BECountIfBackedgeTaken;
11941 MaxOrZero = true;
11942 } else {
11943 MaxBECount = computeMaxBECountForLT(
11944 Start, Stride, RHS, getTypeSizeInBits(LHS->getType()), IsSigned);
11947 if (isa<SCEVCouldNotCompute>(MaxBECount) &&
11948 !isa<SCEVCouldNotCompute>(BECount))
11949 MaxBECount = getConstant(getUnsignedRangeMax(BECount));
11951 return ExitLimit(BECount, MaxBECount, MaxOrZero, Predicates);
11954 ScalarEvolution::ExitLimit
11955 ScalarEvolution::howManyGreaterThans(const SCEV *LHS, const SCEV *RHS,
11956 const Loop *L, bool IsSigned,
11957 bool ControlsExit, bool AllowPredicates) {
11958 SmallPtrSet<const SCEVPredicate *, 4> Predicates;
11959 // We handle only IV > Invariant
11960 if (!isLoopInvariant(RHS, L))
11961 return getCouldNotCompute();
11963 const SCEVAddRecExpr *IV = dyn_cast<SCEVAddRecExpr>(LHS);
11964 if (!IV && AllowPredicates)
11965 // Try to make this an AddRec using runtime tests, in the first X
11966 // iterations of this loop, where X is the SCEV expression found by the
11967 // algorithm below.
11968 IV = convertSCEVToAddRecWithPredicates(LHS, L, Predicates);
11970 // Avoid weird loops
11971 if (!IV || IV->getLoop() != L || !IV->isAffine())
11972 return getCouldNotCompute();
11974 auto WrapType = IsSigned ? SCEV::FlagNSW : SCEV::FlagNUW;
11975 bool NoWrap = ControlsExit && IV->getNoWrapFlags(WrapType);
11976 ICmpInst::Predicate Cond = IsSigned ? ICmpInst::ICMP_SGT : ICmpInst::ICMP_UGT;
11978 const SCEV *Stride = getNegativeSCEV(IV->getStepRecurrence(*this));
11980 // Avoid negative or zero stride values
11981 if (!isKnownPositive(Stride))
11982 return getCouldNotCompute();
11984 // Avoid proven overflow cases: this will ensure that the backedge taken count
11985 // will not generate any unsigned overflow. Relaxed no-overflow conditions
11986 // exploit NoWrapFlags, allowing to optimize in presence of undefined
11987 // behaviors like the case of C language.
11988 if (!Stride->isOne() && !NoWrap)
11989 if (canIVOverflowOnGT(RHS, Stride, IsSigned))
11990 return getCouldNotCompute();
11992 const SCEV *Start = IV->getStart();
11993 const SCEV *End = RHS;
11994 if (!isLoopEntryGuardedByCond(L, Cond, getAddExpr(Start, Stride), RHS)) {
11995 // If we know that Start >= RHS in the context of loop, then we know that
11996 // min(RHS, Start) = RHS at this point.
11997 if (isLoopEntryGuardedByCond(
11998 L, IsSigned ? ICmpInst::ICMP_SGE : ICmpInst::ICMP_UGE, Start, RHS))
11999 End = RHS;
12000 else
12001 End = IsSigned ? getSMinExpr(RHS, Start) : getUMinExpr(RHS, Start);
12004 if (Start->getType()->isPointerTy()) {
12005 Start = getLosslessPtrToIntExpr(Start);
12006 if (isa<SCEVCouldNotCompute>(Start))
12007 return Start;
12009 if (End->getType()->isPointerTy()) {
12010 End = getLosslessPtrToIntExpr(End);
12011 if (isa<SCEVCouldNotCompute>(End))
12012 return End;
12015 // Compute ((Start - End) + (Stride - 1)) / Stride.
12016 // FIXME: This can overflow. Holding off on fixing this for now;
12017 // howManyGreaterThans will hopefully be gone soon.
12018 const SCEV *One = getOne(Stride->getType());
12019 const SCEV *BECount = getUDivExpr(
12020 getAddExpr(getMinusSCEV(Start, End), getMinusSCEV(Stride, One)), Stride);
12022 APInt MaxStart = IsSigned ? getSignedRangeMax(Start)
12023 : getUnsignedRangeMax(Start);
12025 APInt MinStride = IsSigned ? getSignedRangeMin(Stride)
12026 : getUnsignedRangeMin(Stride);
12028 unsigned BitWidth = getTypeSizeInBits(LHS->getType());
12029 APInt Limit = IsSigned ? APInt::getSignedMinValue(BitWidth) + (MinStride - 1)
12030 : APInt::getMinValue(BitWidth) + (MinStride - 1);
12032 // Although End can be a MIN expression we estimate MinEnd considering only
12033 // the case End = RHS. This is safe because in the other case (Start - End)
12034 // is zero, leading to a zero maximum backedge taken count.
12035 APInt MinEnd =
12036 IsSigned ? APIntOps::smax(getSignedRangeMin(RHS), Limit)
12037 : APIntOps::umax(getUnsignedRangeMin(RHS), Limit);
12039 const SCEV *MaxBECount = isa<SCEVConstant>(BECount)
12040 ? BECount
12041 : getUDivCeilSCEV(getConstant(MaxStart - MinEnd),
12042 getConstant(MinStride));
12044 if (isa<SCEVCouldNotCompute>(MaxBECount))
12045 MaxBECount = BECount;
12047 return ExitLimit(BECount, MaxBECount, false, Predicates);
12050 const SCEV *SCEVAddRecExpr::getNumIterationsInRange(const ConstantRange &Range,
12051 ScalarEvolution &SE) const {
12052 if (Range.isFullSet()) // Infinite loop.
12053 return SE.getCouldNotCompute();
12055 // If the start is a non-zero constant, shift the range to simplify things.
12056 if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(getStart()))
12057 if (!SC->getValue()->isZero()) {
12058 SmallVector<const SCEV *, 4> Operands(operands());
12059 Operands[0] = SE.getZero(SC->getType());
12060 const SCEV *Shifted = SE.getAddRecExpr(Operands, getLoop(),
12061 getNoWrapFlags(FlagNW));
12062 if (const auto *ShiftedAddRec = dyn_cast<SCEVAddRecExpr>(Shifted))
12063 return ShiftedAddRec->getNumIterationsInRange(
12064 Range.subtract(SC->getAPInt()), SE);
12065 // This is strange and shouldn't happen.
12066 return SE.getCouldNotCompute();
12069 // The only time we can solve this is when we have all constant indices.
12070 // Otherwise, we cannot determine the overflow conditions.
12071 if (any_of(operands(), [](const SCEV *Op) { return !isa<SCEVConstant>(Op); }))
12072 return SE.getCouldNotCompute();
12074 // Okay at this point we know that all elements of the chrec are constants and
12075 // that the start element is zero.
12077 // First check to see if the range contains zero. If not, the first
12078 // iteration exits.
12079 unsigned BitWidth = SE.getTypeSizeInBits(getType());
12080 if (!Range.contains(APInt(BitWidth, 0)))
12081 return SE.getZero(getType());
12083 if (isAffine()) {
12084 // If this is an affine expression then we have this situation:
12085 // Solve {0,+,A} in Range === Ax in Range
12087 // We know that zero is in the range. If A is positive then we know that
12088 // the upper value of the range must be the first possible exit value.
12089 // If A is negative then the lower of the range is the last possible loop
12090 // value. Also note that we already checked for a full range.
12091 APInt A = cast<SCEVConstant>(getOperand(1))->getAPInt();
12092 APInt End = A.sge(1) ? (Range.getUpper() - 1) : Range.getLower();
12094 // The exit value should be (End+A)/A.
12095 APInt ExitVal = (End + A).udiv(A);
12096 ConstantInt *ExitValue = ConstantInt::get(SE.getContext(), ExitVal);
12098 // Evaluate at the exit value. If we really did fall out of the valid
12099 // range, then we computed our trip count, otherwise wrap around or other
12100 // things must have happened.
12101 ConstantInt *Val = EvaluateConstantChrecAtConstant(this, ExitValue, SE);
12102 if (Range.contains(Val->getValue()))
12103 return SE.getCouldNotCompute(); // Something strange happened
12105 // Ensure that the previous value is in the range. This is a sanity check.
12106 assert(Range.contains(
12107 EvaluateConstantChrecAtConstant(this,
12108 ConstantInt::get(SE.getContext(), ExitVal - 1), SE)->getValue()) &&
12109 "Linear scev computation is off in a bad way!");
12110 return SE.getConstant(ExitValue);
12113 if (isQuadratic()) {
12114 if (auto S = SolveQuadraticAddRecRange(this, Range, SE))
12115 return SE.getConstant(S.getValue());
12118 return SE.getCouldNotCompute();
12121 const SCEVAddRecExpr *
12122 SCEVAddRecExpr::getPostIncExpr(ScalarEvolution &SE) const {
12123 assert(getNumOperands() > 1 && "AddRec with zero step?");
12124 // There is a temptation to just call getAddExpr(this, getStepRecurrence(SE)),
12125 // but in this case we cannot guarantee that the value returned will be an
12126 // AddRec because SCEV does not have a fixed point where it stops
12127 // simplification: it is legal to return ({rec1} + {rec2}). For example, it
12128 // may happen if we reach arithmetic depth limit while simplifying. So we
12129 // construct the returned value explicitly.
12130 SmallVector<const SCEV *, 3> Ops;
12131 // If this is {A,+,B,+,C,...,+,N}, then its step is {B,+,C,+,...,+,N}, and
12132 // (this + Step) is {A+B,+,B+C,+...,+,N}.
12133 for (unsigned i = 0, e = getNumOperands() - 1; i < e; ++i)
12134 Ops.push_back(SE.getAddExpr(getOperand(i), getOperand(i + 1)));
12135 // We know that the last operand is not a constant zero (otherwise it would
12136 // have been popped out earlier). This guarantees us that if the result has
12137 // the same last operand, then it will also not be popped out, meaning that
12138 // the returned value will be an AddRec.
12139 const SCEV *Last = getOperand(getNumOperands() - 1);
12140 assert(!Last->isZero() && "Recurrency with zero step?");
12141 Ops.push_back(Last);
12142 return cast<SCEVAddRecExpr>(SE.getAddRecExpr(Ops, getLoop(),
12143 SCEV::FlagAnyWrap));
12146 // Return true when S contains at least an undef value.
12147 static inline bool containsUndefs(const SCEV *S) {
12148 return SCEVExprContains(S, [](const SCEV *S) {
12149 if (const auto *SU = dyn_cast<SCEVUnknown>(S))
12150 return isa<UndefValue>(SU->getValue());
12151 return false;
12155 namespace {
12157 // Collect all steps of SCEV expressions.
12158 struct SCEVCollectStrides {
12159 ScalarEvolution &SE;
12160 SmallVectorImpl<const SCEV *> &Strides;
12162 SCEVCollectStrides(ScalarEvolution &SE, SmallVectorImpl<const SCEV *> &S)
12163 : SE(SE), Strides(S) {}
12165 bool follow(const SCEV *S) {
12166 if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(S))
12167 Strides.push_back(AR->getStepRecurrence(SE));
12168 return true;
12171 bool isDone() const { return false; }
12174 // Collect all SCEVUnknown and SCEVMulExpr expressions.
12175 struct SCEVCollectTerms {
12176 SmallVectorImpl<const SCEV *> &Terms;
12178 SCEVCollectTerms(SmallVectorImpl<const SCEV *> &T) : Terms(T) {}
12180 bool follow(const SCEV *S) {
12181 if (isa<SCEVUnknown>(S) || isa<SCEVMulExpr>(S) ||
12182 isa<SCEVSignExtendExpr>(S)) {
12183 if (!containsUndefs(S))
12184 Terms.push_back(S);
12186 // Stop recursion: once we collected a term, do not walk its operands.
12187 return false;
12190 // Keep looking.
12191 return true;
12194 bool isDone() const { return false; }
12197 // Check if a SCEV contains an AddRecExpr.
12198 struct SCEVHasAddRec {
12199 bool &ContainsAddRec;
12201 SCEVHasAddRec(bool &ContainsAddRec) : ContainsAddRec(ContainsAddRec) {
12202 ContainsAddRec = false;
12205 bool follow(const SCEV *S) {
12206 if (isa<SCEVAddRecExpr>(S)) {
12207 ContainsAddRec = true;
12209 // Stop recursion: once we collected a term, do not walk its operands.
12210 return false;
12213 // Keep looking.
12214 return true;
12217 bool isDone() const { return false; }
12220 // Find factors that are multiplied with an expression that (possibly as a
12221 // subexpression) contains an AddRecExpr. In the expression:
12223 // 8 * (100 + %p * %q * (%a + {0, +, 1}_loop))
12225 // "%p * %q" are factors multiplied by the expression "(%a + {0, +, 1}_loop)"
12226 // that contains the AddRec {0, +, 1}_loop. %p * %q are likely to be array size
12227 // parameters as they form a product with an induction variable.
12229 // This collector expects all array size parameters to be in the same MulExpr.
12230 // It might be necessary to later add support for collecting parameters that are
12231 // spread over different nested MulExpr.
12232 struct SCEVCollectAddRecMultiplies {
12233 SmallVectorImpl<const SCEV *> &Terms;
12234 ScalarEvolution &SE;
12236 SCEVCollectAddRecMultiplies(SmallVectorImpl<const SCEV *> &T, ScalarEvolution &SE)
12237 : Terms(T), SE(SE) {}
12239 bool follow(const SCEV *S) {
12240 if (auto *Mul = dyn_cast<SCEVMulExpr>(S)) {
12241 bool HasAddRec = false;
12242 SmallVector<const SCEV *, 0> Operands;
12243 for (auto Op : Mul->operands()) {
12244 const SCEVUnknown *Unknown = dyn_cast<SCEVUnknown>(Op);
12245 if (Unknown && !isa<CallInst>(Unknown->getValue())) {
12246 Operands.push_back(Op);
12247 } else if (Unknown) {
12248 HasAddRec = true;
12249 } else {
12250 bool ContainsAddRec = false;
12251 SCEVHasAddRec ContiansAddRec(ContainsAddRec);
12252 visitAll(Op, ContiansAddRec);
12253 HasAddRec |= ContainsAddRec;
12256 if (Operands.size() == 0)
12257 return true;
12259 if (!HasAddRec)
12260 return false;
12262 Terms.push_back(SE.getMulExpr(Operands));
12263 // Stop recursion: once we collected a term, do not walk its operands.
12264 return false;
12267 // Keep looking.
12268 return true;
12271 bool isDone() const { return false; }
12274 } // end anonymous namespace
12276 /// Find parametric terms in this SCEVAddRecExpr. We first for parameters in
12277 /// two places:
12278 /// 1) The strides of AddRec expressions.
12279 /// 2) Unknowns that are multiplied with AddRec expressions.
12280 void ScalarEvolution::collectParametricTerms(const SCEV *Expr,
12281 SmallVectorImpl<const SCEV *> &Terms) {
12282 SmallVector<const SCEV *, 4> Strides;
12283 SCEVCollectStrides StrideCollector(*this, Strides);
12284 visitAll(Expr, StrideCollector);
12286 LLVM_DEBUG({
12287 dbgs() << "Strides:\n";
12288 for (const SCEV *S : Strides)
12289 dbgs() << *S << "\n";
12292 for (const SCEV *S : Strides) {
12293 SCEVCollectTerms TermCollector(Terms);
12294 visitAll(S, TermCollector);
12297 LLVM_DEBUG({
12298 dbgs() << "Terms:\n";
12299 for (const SCEV *T : Terms)
12300 dbgs() << *T << "\n";
12303 SCEVCollectAddRecMultiplies MulCollector(Terms, *this);
12304 visitAll(Expr, MulCollector);
12307 static bool findArrayDimensionsRec(ScalarEvolution &SE,
12308 SmallVectorImpl<const SCEV *> &Terms,
12309 SmallVectorImpl<const SCEV *> &Sizes) {
12310 int Last = Terms.size() - 1;
12311 const SCEV *Step = Terms[Last];
12313 // End of recursion.
12314 if (Last == 0) {
12315 if (const SCEVMulExpr *M = dyn_cast<SCEVMulExpr>(Step)) {
12316 SmallVector<const SCEV *, 2> Qs;
12317 for (const SCEV *Op : M->operands())
12318 if (!isa<SCEVConstant>(Op))
12319 Qs.push_back(Op);
12321 Step = SE.getMulExpr(Qs);
12324 Sizes.push_back(Step);
12325 return true;
12328 for (const SCEV *&Term : Terms) {
12329 // Normalize the terms before the next call to findArrayDimensionsRec.
12330 const SCEV *Q, *R;
12331 SCEVDivision::divide(SE, Term, Step, &Q, &R);
12333 // Bail out when GCD does not evenly divide one of the terms.
12334 if (!R->isZero())
12335 return false;
12337 Term = Q;
12340 // Remove all SCEVConstants.
12341 erase_if(Terms, [](const SCEV *E) { return isa<SCEVConstant>(E); });
12343 if (Terms.size() > 0)
12344 if (!findArrayDimensionsRec(SE, Terms, Sizes))
12345 return false;
12347 Sizes.push_back(Step);
12348 return true;
12351 // Returns true when one of the SCEVs of Terms contains a SCEVUnknown parameter.
12352 static inline bool containsParameters(SmallVectorImpl<const SCEV *> &Terms) {
12353 for (const SCEV *T : Terms)
12354 if (SCEVExprContains(T, [](const SCEV *S) { return isa<SCEVUnknown>(S); }))
12355 return true;
12357 return false;
12360 // Return the number of product terms in S.
12361 static inline int numberOfTerms(const SCEV *S) {
12362 if (const SCEVMulExpr *Expr = dyn_cast<SCEVMulExpr>(S))
12363 return Expr->getNumOperands();
12364 return 1;
12367 static const SCEV *removeConstantFactors(ScalarEvolution &SE, const SCEV *T) {
12368 if (isa<SCEVConstant>(T))
12369 return nullptr;
12371 if (isa<SCEVUnknown>(T))
12372 return T;
12374 if (const SCEVMulExpr *M = dyn_cast<SCEVMulExpr>(T)) {
12375 SmallVector<const SCEV *, 2> Factors;
12376 for (const SCEV *Op : M->operands())
12377 if (!isa<SCEVConstant>(Op))
12378 Factors.push_back(Op);
12380 return SE.getMulExpr(Factors);
12383 return T;
12386 /// Return the size of an element read or written by Inst.
12387 const SCEV *ScalarEvolution::getElementSize(Instruction *Inst) {
12388 Type *Ty;
12389 if (StoreInst *Store = dyn_cast<StoreInst>(Inst))
12390 Ty = Store->getValueOperand()->getType();
12391 else if (LoadInst *Load = dyn_cast<LoadInst>(Inst))
12392 Ty = Load->getType();
12393 else
12394 return nullptr;
12396 Type *ETy = getEffectiveSCEVType(PointerType::getUnqual(Ty));
12397 return getSizeOfExpr(ETy, Ty);
12400 void ScalarEvolution::findArrayDimensions(SmallVectorImpl<const SCEV *> &Terms,
12401 SmallVectorImpl<const SCEV *> &Sizes,
12402 const SCEV *ElementSize) {
12403 if (Terms.size() < 1 || !ElementSize)
12404 return;
12406 // Early return when Terms do not contain parameters: we do not delinearize
12407 // non parametric SCEVs.
12408 if (!containsParameters(Terms))
12409 return;
12411 LLVM_DEBUG({
12412 dbgs() << "Terms:\n";
12413 for (const SCEV *T : Terms)
12414 dbgs() << *T << "\n";
12417 // Remove duplicates.
12418 array_pod_sort(Terms.begin(), Terms.end());
12419 Terms.erase(std::unique(Terms.begin(), Terms.end()), Terms.end());
12421 // Put larger terms first.
12422 llvm::sort(Terms, [](const SCEV *LHS, const SCEV *RHS) {
12423 return numberOfTerms(LHS) > numberOfTerms(RHS);
12426 // Try to divide all terms by the element size. If term is not divisible by
12427 // element size, proceed with the original term.
12428 for (const SCEV *&Term : Terms) {
12429 const SCEV *Q, *R;
12430 SCEVDivision::divide(*this, Term, ElementSize, &Q, &R);
12431 if (!Q->isZero())
12432 Term = Q;
12435 SmallVector<const SCEV *, 4> NewTerms;
12437 // Remove constant factors.
12438 for (const SCEV *T : Terms)
12439 if (const SCEV *NewT = removeConstantFactors(*this, T))
12440 NewTerms.push_back(NewT);
12442 LLVM_DEBUG({
12443 dbgs() << "Terms after sorting:\n";
12444 for (const SCEV *T : NewTerms)
12445 dbgs() << *T << "\n";
12448 if (NewTerms.empty() || !findArrayDimensionsRec(*this, NewTerms, Sizes)) {
12449 Sizes.clear();
12450 return;
12453 // The last element to be pushed into Sizes is the size of an element.
12454 Sizes.push_back(ElementSize);
12456 LLVM_DEBUG({
12457 dbgs() << "Sizes:\n";
12458 for (const SCEV *S : Sizes)
12459 dbgs() << *S << "\n";
12463 void ScalarEvolution::computeAccessFunctions(
12464 const SCEV *Expr, SmallVectorImpl<const SCEV *> &Subscripts,
12465 SmallVectorImpl<const SCEV *> &Sizes) {
12466 // Early exit in case this SCEV is not an affine multivariate function.
12467 if (Sizes.empty())
12468 return;
12470 if (auto *AR = dyn_cast<SCEVAddRecExpr>(Expr))
12471 if (!AR->isAffine())
12472 return;
12474 const SCEV *Res = Expr;
12475 int Last = Sizes.size() - 1;
12476 for (int i = Last; i >= 0; i--) {
12477 const SCEV *Q, *R;
12478 SCEVDivision::divide(*this, Res, Sizes[i], &Q, &R);
12480 LLVM_DEBUG({
12481 dbgs() << "Res: " << *Res << "\n";
12482 dbgs() << "Sizes[i]: " << *Sizes[i] << "\n";
12483 dbgs() << "Res divided by Sizes[i]:\n";
12484 dbgs() << "Quotient: " << *Q << "\n";
12485 dbgs() << "Remainder: " << *R << "\n";
12488 Res = Q;
12490 // Do not record the last subscript corresponding to the size of elements in
12491 // the array.
12492 if (i == Last) {
12494 // Bail out if the remainder is too complex.
12495 if (isa<SCEVAddRecExpr>(R)) {
12496 Subscripts.clear();
12497 Sizes.clear();
12498 return;
12501 continue;
12504 // Record the access function for the current subscript.
12505 Subscripts.push_back(R);
12508 // Also push in last position the remainder of the last division: it will be
12509 // the access function of the innermost dimension.
12510 Subscripts.push_back(Res);
12512 std::reverse(Subscripts.begin(), Subscripts.end());
12514 LLVM_DEBUG({
12515 dbgs() << "Subscripts:\n";
12516 for (const SCEV *S : Subscripts)
12517 dbgs() << *S << "\n";
12521 /// Splits the SCEV into two vectors of SCEVs representing the subscripts and
12522 /// sizes of an array access. Returns the remainder of the delinearization that
12523 /// is the offset start of the array. The SCEV->delinearize algorithm computes
12524 /// the multiples of SCEV coefficients: that is a pattern matching of sub
12525 /// expressions in the stride and base of a SCEV corresponding to the
12526 /// computation of a GCD (greatest common divisor) of base and stride. When
12527 /// SCEV->delinearize fails, it returns the SCEV unchanged.
12529 /// For example: when analyzing the memory access A[i][j][k] in this loop nest
12531 /// void foo(long n, long m, long o, double A[n][m][o]) {
12533 /// for (long i = 0; i < n; i++)
12534 /// for (long j = 0; j < m; j++)
12535 /// for (long k = 0; k < o; k++)
12536 /// A[i][j][k] = 1.0;
12537 /// }
12539 /// the delinearization input is the following AddRec SCEV:
12541 /// AddRec: {{{%A,+,(8 * %m * %o)}<%for.i>,+,(8 * %o)}<%for.j>,+,8}<%for.k>
12543 /// From this SCEV, we are able to say that the base offset of the access is %A
12544 /// because it appears as an offset that does not divide any of the strides in
12545 /// the loops:
12547 /// CHECK: Base offset: %A
12549 /// and then SCEV->delinearize determines the size of some of the dimensions of
12550 /// the array as these are the multiples by which the strides are happening:
12552 /// CHECK: ArrayDecl[UnknownSize][%m][%o] with elements of sizeof(double) bytes.
12554 /// Note that the outermost dimension remains of UnknownSize because there are
12555 /// no strides that would help identifying the size of the last dimension: when
12556 /// the array has been statically allocated, one could compute the size of that
12557 /// dimension by dividing the overall size of the array by the size of the known
12558 /// dimensions: %m * %o * 8.
12560 /// Finally delinearize provides the access functions for the array reference
12561 /// that does correspond to A[i][j][k] of the above C testcase:
12563 /// CHECK: ArrayRef[{0,+,1}<%for.i>][{0,+,1}<%for.j>][{0,+,1}<%for.k>]
12565 /// The testcases are checking the output of a function pass:
12566 /// DelinearizationPass that walks through all loads and stores of a function
12567 /// asking for the SCEV of the memory access with respect to all enclosing
12568 /// loops, calling SCEV->delinearize on that and printing the results.
12569 void ScalarEvolution::delinearize(const SCEV *Expr,
12570 SmallVectorImpl<const SCEV *> &Subscripts,
12571 SmallVectorImpl<const SCEV *> &Sizes,
12572 const SCEV *ElementSize) {
12573 // First step: collect parametric terms.
12574 SmallVector<const SCEV *, 4> Terms;
12575 collectParametricTerms(Expr, Terms);
12577 if (Terms.empty())
12578 return;
12580 // Second step: find subscript sizes.
12581 findArrayDimensions(Terms, Sizes, ElementSize);
12583 if (Sizes.empty())
12584 return;
12586 // Third step: compute the access functions for each subscript.
12587 computeAccessFunctions(Expr, Subscripts, Sizes);
12589 if (Subscripts.empty())
12590 return;
12592 LLVM_DEBUG({
12593 dbgs() << "succeeded to delinearize " << *Expr << "\n";
12594 dbgs() << "ArrayDecl[UnknownSize]";
12595 for (const SCEV *S : Sizes)
12596 dbgs() << "[" << *S << "]";
12598 dbgs() << "\nArrayRef";
12599 for (const SCEV *S : Subscripts)
12600 dbgs() << "[" << *S << "]";
12601 dbgs() << "\n";
12605 bool ScalarEvolution::getIndexExpressionsFromGEP(
12606 const GetElementPtrInst *GEP, SmallVectorImpl<const SCEV *> &Subscripts,
12607 SmallVectorImpl<int> &Sizes) {
12608 assert(Subscripts.empty() && Sizes.empty() &&
12609 "Expected output lists to be empty on entry to this function.");
12610 assert(GEP && "getIndexExpressionsFromGEP called with a null GEP");
12611 Type *Ty = nullptr;
12612 bool DroppedFirstDim = false;
12613 for (unsigned i = 1; i < GEP->getNumOperands(); i++) {
12614 const SCEV *Expr = getSCEV(GEP->getOperand(i));
12615 if (i == 1) {
12616 Ty = GEP->getSourceElementType();
12617 if (auto *Const = dyn_cast<SCEVConstant>(Expr))
12618 if (Const->getValue()->isZero()) {
12619 DroppedFirstDim = true;
12620 continue;
12622 Subscripts.push_back(Expr);
12623 continue;
12626 auto *ArrayTy = dyn_cast<ArrayType>(Ty);
12627 if (!ArrayTy) {
12628 Subscripts.clear();
12629 Sizes.clear();
12630 return false;
12633 Subscripts.push_back(Expr);
12634 if (!(DroppedFirstDim && i == 2))
12635 Sizes.push_back(ArrayTy->getNumElements());
12637 Ty = ArrayTy->getElementType();
12639 return !Subscripts.empty();
12642 //===----------------------------------------------------------------------===//
12643 // SCEVCallbackVH Class Implementation
12644 //===----------------------------------------------------------------------===//
12646 void ScalarEvolution::SCEVCallbackVH::deleted() {
12647 assert(SE && "SCEVCallbackVH called with a null ScalarEvolution!");
12648 if (PHINode *PN = dyn_cast<PHINode>(getValPtr()))
12649 SE->ConstantEvolutionLoopExitValue.erase(PN);
12650 SE->eraseValueFromMap(getValPtr());
12651 // this now dangles!
12654 void ScalarEvolution::SCEVCallbackVH::allUsesReplacedWith(Value *V) {
12655 assert(SE && "SCEVCallbackVH called with a null ScalarEvolution!");
12657 // Forget all the expressions associated with users of the old value,
12658 // so that future queries will recompute the expressions using the new
12659 // value.
12660 Value *Old = getValPtr();
12661 SmallVector<User *, 16> Worklist(Old->users());
12662 SmallPtrSet<User *, 8> Visited;
12663 while (!Worklist.empty()) {
12664 User *U = Worklist.pop_back_val();
12665 // Deleting the Old value will cause this to dangle. Postpone
12666 // that until everything else is done.
12667 if (U == Old)
12668 continue;
12669 if (!Visited.insert(U).second)
12670 continue;
12671 if (PHINode *PN = dyn_cast<PHINode>(U))
12672 SE->ConstantEvolutionLoopExitValue.erase(PN);
12673 SE->eraseValueFromMap(U);
12674 llvm::append_range(Worklist, U->users());
12676 // Delete the Old value.
12677 if (PHINode *PN = dyn_cast<PHINode>(Old))
12678 SE->ConstantEvolutionLoopExitValue.erase(PN);
12679 SE->eraseValueFromMap(Old);
12680 // this now dangles!
12683 ScalarEvolution::SCEVCallbackVH::SCEVCallbackVH(Value *V, ScalarEvolution *se)
12684 : CallbackVH(V), SE(se) {}
12686 //===----------------------------------------------------------------------===//
12687 // ScalarEvolution Class Implementation
12688 //===----------------------------------------------------------------------===//
12690 ScalarEvolution::ScalarEvolution(Function &F, TargetLibraryInfo &TLI,
12691 AssumptionCache &AC, DominatorTree &DT,
12692 LoopInfo &LI)
12693 : F(F), TLI(TLI), AC(AC), DT(DT), LI(LI),
12694 CouldNotCompute(new SCEVCouldNotCompute()), ValuesAtScopes(64),
12695 LoopDispositions(64), BlockDispositions(64) {
12696 // To use guards for proving predicates, we need to scan every instruction in
12697 // relevant basic blocks, and not just terminators. Doing this is a waste of
12698 // time if the IR does not actually contain any calls to
12699 // @llvm.experimental.guard, so do a quick check and remember this beforehand.
12701 // This pessimizes the case where a pass that preserves ScalarEvolution wants
12702 // to _add_ guards to the module when there weren't any before, and wants
12703 // ScalarEvolution to optimize based on those guards. For now we prefer to be
12704 // efficient in lieu of being smart in that rather obscure case.
12706 auto *GuardDecl = F.getParent()->getFunction(
12707 Intrinsic::getName(Intrinsic::experimental_guard));
12708 HasGuards = GuardDecl && !GuardDecl->use_empty();
12711 ScalarEvolution::ScalarEvolution(ScalarEvolution &&Arg)
12712 : F(Arg.F), HasGuards(Arg.HasGuards), TLI(Arg.TLI), AC(Arg.AC), DT(Arg.DT),
12713 LI(Arg.LI), CouldNotCompute(std::move(Arg.CouldNotCompute)),
12714 ValueExprMap(std::move(Arg.ValueExprMap)),
12715 PendingLoopPredicates(std::move(Arg.PendingLoopPredicates)),
12716 PendingPhiRanges(std::move(Arg.PendingPhiRanges)),
12717 PendingMerges(std::move(Arg.PendingMerges)),
12718 MinTrailingZerosCache(std::move(Arg.MinTrailingZerosCache)),
12719 BackedgeTakenCounts(std::move(Arg.BackedgeTakenCounts)),
12720 PredicatedBackedgeTakenCounts(
12721 std::move(Arg.PredicatedBackedgeTakenCounts)),
12722 ConstantEvolutionLoopExitValue(
12723 std::move(Arg.ConstantEvolutionLoopExitValue)),
12724 ValuesAtScopes(std::move(Arg.ValuesAtScopes)),
12725 LoopDispositions(std::move(Arg.LoopDispositions)),
12726 LoopPropertiesCache(std::move(Arg.LoopPropertiesCache)),
12727 BlockDispositions(std::move(Arg.BlockDispositions)),
12728 UnsignedRanges(std::move(Arg.UnsignedRanges)),
12729 SignedRanges(std::move(Arg.SignedRanges)),
12730 UniqueSCEVs(std::move(Arg.UniqueSCEVs)),
12731 UniquePreds(std::move(Arg.UniquePreds)),
12732 SCEVAllocator(std::move(Arg.SCEVAllocator)),
12733 LoopUsers(std::move(Arg.LoopUsers)),
12734 PredicatedSCEVRewrites(std::move(Arg.PredicatedSCEVRewrites)),
12735 FirstUnknown(Arg.FirstUnknown) {
12736 Arg.FirstUnknown = nullptr;
12739 ScalarEvolution::~ScalarEvolution() {
12740 // Iterate through all the SCEVUnknown instances and call their
12741 // destructors, so that they release their references to their values.
12742 for (SCEVUnknown *U = FirstUnknown; U;) {
12743 SCEVUnknown *Tmp = U;
12744 U = U->Next;
12745 Tmp->~SCEVUnknown();
12747 FirstUnknown = nullptr;
12749 ExprValueMap.clear();
12750 ValueExprMap.clear();
12751 HasRecMap.clear();
12752 BackedgeTakenCounts.clear();
12753 PredicatedBackedgeTakenCounts.clear();
12755 assert(PendingLoopPredicates.empty() && "isImpliedCond garbage");
12756 assert(PendingPhiRanges.empty() && "getRangeRef garbage");
12757 assert(PendingMerges.empty() && "isImpliedViaMerge garbage");
12758 assert(!WalkingBEDominatingConds && "isLoopBackedgeGuardedByCond garbage!");
12759 assert(!ProvingSplitPredicate && "ProvingSplitPredicate garbage!");
12762 bool ScalarEvolution::hasLoopInvariantBackedgeTakenCount(const Loop *L) {
12763 return !isa<SCEVCouldNotCompute>(getBackedgeTakenCount(L));
12766 static void PrintLoopInfo(raw_ostream &OS, ScalarEvolution *SE,
12767 const Loop *L) {
12768 // Print all inner loops first
12769 for (Loop *I : *L)
12770 PrintLoopInfo(OS, SE, I);
12772 OS << "Loop ";
12773 L->getHeader()->printAsOperand(OS, /*PrintType=*/false);
12774 OS << ": ";
12776 SmallVector<BasicBlock *, 8> ExitingBlocks;
12777 L->getExitingBlocks(ExitingBlocks);
12778 if (ExitingBlocks.size() != 1)
12779 OS << "<multiple exits> ";
12781 if (SE->hasLoopInvariantBackedgeTakenCount(L))
12782 OS << "backedge-taken count is " << *SE->getBackedgeTakenCount(L) << "\n";
12783 else
12784 OS << "Unpredictable backedge-taken count.\n";
12786 if (ExitingBlocks.size() > 1)
12787 for (BasicBlock *ExitingBlock : ExitingBlocks) {
12788 OS << " exit count for " << ExitingBlock->getName() << ": "
12789 << *SE->getExitCount(L, ExitingBlock) << "\n";
12792 OS << "Loop ";
12793 L->getHeader()->printAsOperand(OS, /*PrintType=*/false);
12794 OS << ": ";
12796 if (!isa<SCEVCouldNotCompute>(SE->getConstantMaxBackedgeTakenCount(L))) {
12797 OS << "max backedge-taken count is " << *SE->getConstantMaxBackedgeTakenCount(L);
12798 if (SE->isBackedgeTakenCountMaxOrZero(L))
12799 OS << ", actual taken count either this or zero.";
12800 } else {
12801 OS << "Unpredictable max backedge-taken count. ";
12804 OS << "\n"
12805 "Loop ";
12806 L->getHeader()->printAsOperand(OS, /*PrintType=*/false);
12807 OS << ": ";
12809 SCEVUnionPredicate Pred;
12810 auto PBT = SE->getPredicatedBackedgeTakenCount(L, Pred);
12811 if (!isa<SCEVCouldNotCompute>(PBT)) {
12812 OS << "Predicated backedge-taken count is " << *PBT << "\n";
12813 OS << " Predicates:\n";
12814 Pred.print(OS, 4);
12815 } else {
12816 OS << "Unpredictable predicated backedge-taken count. ";
12818 OS << "\n";
12820 if (SE->hasLoopInvariantBackedgeTakenCount(L)) {
12821 OS << "Loop ";
12822 L->getHeader()->printAsOperand(OS, /*PrintType=*/false);
12823 OS << ": ";
12824 OS << "Trip multiple is " << SE->getSmallConstantTripMultiple(L) << "\n";
12828 static StringRef loopDispositionToStr(ScalarEvolution::LoopDisposition LD) {
12829 switch (LD) {
12830 case ScalarEvolution::LoopVariant:
12831 return "Variant";
12832 case ScalarEvolution::LoopInvariant:
12833 return "Invariant";
12834 case ScalarEvolution::LoopComputable:
12835 return "Computable";
12837 llvm_unreachable("Unknown ScalarEvolution::LoopDisposition kind!");
12840 void ScalarEvolution::print(raw_ostream &OS) const {
12841 // ScalarEvolution's implementation of the print method is to print
12842 // out SCEV values of all instructions that are interesting. Doing
12843 // this potentially causes it to create new SCEV objects though,
12844 // which technically conflicts with the const qualifier. This isn't
12845 // observable from outside the class though, so casting away the
12846 // const isn't dangerous.
12847 ScalarEvolution &SE = *const_cast<ScalarEvolution *>(this);
12849 if (ClassifyExpressions) {
12850 OS << "Classifying expressions for: ";
12851 F.printAsOperand(OS, /*PrintType=*/false);
12852 OS << "\n";
12853 for (Instruction &I : instructions(F))
12854 if (isSCEVable(I.getType()) && !isa<CmpInst>(I)) {
12855 OS << I << '\n';
12856 OS << " --> ";
12857 const SCEV *SV = SE.getSCEV(&I);
12858 SV->print(OS);
12859 if (!isa<SCEVCouldNotCompute>(SV)) {
12860 OS << " U: ";
12861 SE.getUnsignedRange(SV).print(OS);
12862 OS << " S: ";
12863 SE.getSignedRange(SV).print(OS);
12866 const Loop *L = LI.getLoopFor(I.getParent());
12868 const SCEV *AtUse = SE.getSCEVAtScope(SV, L);
12869 if (AtUse != SV) {
12870 OS << " --> ";
12871 AtUse->print(OS);
12872 if (!isa<SCEVCouldNotCompute>(AtUse)) {
12873 OS << " U: ";
12874 SE.getUnsignedRange(AtUse).print(OS);
12875 OS << " S: ";
12876 SE.getSignedRange(AtUse).print(OS);
12880 if (L) {
12881 OS << "\t\t" "Exits: ";
12882 const SCEV *ExitValue = SE.getSCEVAtScope(SV, L->getParentLoop());
12883 if (!SE.isLoopInvariant(ExitValue, L)) {
12884 OS << "<<Unknown>>";
12885 } else {
12886 OS << *ExitValue;
12889 bool First = true;
12890 for (auto *Iter = L; Iter; Iter = Iter->getParentLoop()) {
12891 if (First) {
12892 OS << "\t\t" "LoopDispositions: { ";
12893 First = false;
12894 } else {
12895 OS << ", ";
12898 Iter->getHeader()->printAsOperand(OS, /*PrintType=*/false);
12899 OS << ": " << loopDispositionToStr(SE.getLoopDisposition(SV, Iter));
12902 for (auto *InnerL : depth_first(L)) {
12903 if (InnerL == L)
12904 continue;
12905 if (First) {
12906 OS << "\t\t" "LoopDispositions: { ";
12907 First = false;
12908 } else {
12909 OS << ", ";
12912 InnerL->getHeader()->printAsOperand(OS, /*PrintType=*/false);
12913 OS << ": " << loopDispositionToStr(SE.getLoopDisposition(SV, InnerL));
12916 OS << " }";
12919 OS << "\n";
12923 OS << "Determining loop execution counts for: ";
12924 F.printAsOperand(OS, /*PrintType=*/false);
12925 OS << "\n";
12926 for (Loop *I : LI)
12927 PrintLoopInfo(OS, &SE, I);
12930 ScalarEvolution::LoopDisposition
12931 ScalarEvolution::getLoopDisposition(const SCEV *S, const Loop *L) {
12932 auto &Values = LoopDispositions[S];
12933 for (auto &V : Values) {
12934 if (V.getPointer() == L)
12935 return V.getInt();
12937 Values.emplace_back(L, LoopVariant);
12938 LoopDisposition D = computeLoopDisposition(S, L);
12939 auto &Values2 = LoopDispositions[S];
12940 for (auto &V : make_range(Values2.rbegin(), Values2.rend())) {
12941 if (V.getPointer() == L) {
12942 V.setInt(D);
12943 break;
12946 return D;
12949 ScalarEvolution::LoopDisposition
12950 ScalarEvolution::computeLoopDisposition(const SCEV *S, const Loop *L) {
12951 switch (S->getSCEVType()) {
12952 case scConstant:
12953 return LoopInvariant;
12954 case scPtrToInt:
12955 case scTruncate:
12956 case scZeroExtend:
12957 case scSignExtend:
12958 return getLoopDisposition(cast<SCEVCastExpr>(S)->getOperand(), L);
12959 case scAddRecExpr: {
12960 const SCEVAddRecExpr *AR = cast<SCEVAddRecExpr>(S);
12962 // If L is the addrec's loop, it's computable.
12963 if (AR->getLoop() == L)
12964 return LoopComputable;
12966 // Add recurrences are never invariant in the function-body (null loop).
12967 if (!L)
12968 return LoopVariant;
12970 // Everything that is not defined at loop entry is variant.
12971 if (DT.dominates(L->getHeader(), AR->getLoop()->getHeader()))
12972 return LoopVariant;
12973 assert(!L->contains(AR->getLoop()) && "Containing loop's header does not"
12974 " dominate the contained loop's header?");
12976 // This recurrence is invariant w.r.t. L if AR's loop contains L.
12977 if (AR->getLoop()->contains(L))
12978 return LoopInvariant;
12980 // This recurrence is variant w.r.t. L if any of its operands
12981 // are variant.
12982 for (auto *Op : AR->operands())
12983 if (!isLoopInvariant(Op, L))
12984 return LoopVariant;
12986 // Otherwise it's loop-invariant.
12987 return LoopInvariant;
12989 case scAddExpr:
12990 case scMulExpr:
12991 case scUMaxExpr:
12992 case scSMaxExpr:
12993 case scUMinExpr:
12994 case scSMinExpr: {
12995 bool HasVarying = false;
12996 for (auto *Op : cast<SCEVNAryExpr>(S)->operands()) {
12997 LoopDisposition D = getLoopDisposition(Op, L);
12998 if (D == LoopVariant)
12999 return LoopVariant;
13000 if (D == LoopComputable)
13001 HasVarying = true;
13003 return HasVarying ? LoopComputable : LoopInvariant;
13005 case scUDivExpr: {
13006 const SCEVUDivExpr *UDiv = cast<SCEVUDivExpr>(S);
13007 LoopDisposition LD = getLoopDisposition(UDiv->getLHS(), L);
13008 if (LD == LoopVariant)
13009 return LoopVariant;
13010 LoopDisposition RD = getLoopDisposition(UDiv->getRHS(), L);
13011 if (RD == LoopVariant)
13012 return LoopVariant;
13013 return (LD == LoopInvariant && RD == LoopInvariant) ?
13014 LoopInvariant : LoopComputable;
13016 case scUnknown:
13017 // All non-instruction values are loop invariant. All instructions are loop
13018 // invariant if they are not contained in the specified loop.
13019 // Instructions are never considered invariant in the function body
13020 // (null loop) because they are defined within the "loop".
13021 if (auto *I = dyn_cast<Instruction>(cast<SCEVUnknown>(S)->getValue()))
13022 return (L && !L->contains(I)) ? LoopInvariant : LoopVariant;
13023 return LoopInvariant;
13024 case scCouldNotCompute:
13025 llvm_unreachable("Attempt to use a SCEVCouldNotCompute object!");
13027 llvm_unreachable("Unknown SCEV kind!");
13030 bool ScalarEvolution::isLoopInvariant(const SCEV *S, const Loop *L) {
13031 return getLoopDisposition(S, L) == LoopInvariant;
13034 bool ScalarEvolution::hasComputableLoopEvolution(const SCEV *S, const Loop *L) {
13035 return getLoopDisposition(S, L) == LoopComputable;
13038 ScalarEvolution::BlockDisposition
13039 ScalarEvolution::getBlockDisposition(const SCEV *S, const BasicBlock *BB) {
13040 auto &Values = BlockDispositions[S];
13041 for (auto &V : Values) {
13042 if (V.getPointer() == BB)
13043 return V.getInt();
13045 Values.emplace_back(BB, DoesNotDominateBlock);
13046 BlockDisposition D = computeBlockDisposition(S, BB);
13047 auto &Values2 = BlockDispositions[S];
13048 for (auto &V : make_range(Values2.rbegin(), Values2.rend())) {
13049 if (V.getPointer() == BB) {
13050 V.setInt(D);
13051 break;
13054 return D;
13057 ScalarEvolution::BlockDisposition
13058 ScalarEvolution::computeBlockDisposition(const SCEV *S, const BasicBlock *BB) {
13059 switch (S->getSCEVType()) {
13060 case scConstant:
13061 return ProperlyDominatesBlock;
13062 case scPtrToInt:
13063 case scTruncate:
13064 case scZeroExtend:
13065 case scSignExtend:
13066 return getBlockDisposition(cast<SCEVCastExpr>(S)->getOperand(), BB);
13067 case scAddRecExpr: {
13068 // This uses a "dominates" query instead of "properly dominates" query
13069 // to test for proper dominance too, because the instruction which
13070 // produces the addrec's value is a PHI, and a PHI effectively properly
13071 // dominates its entire containing block.
13072 const SCEVAddRecExpr *AR = cast<SCEVAddRecExpr>(S);
13073 if (!DT.dominates(AR->getLoop()->getHeader(), BB))
13074 return DoesNotDominateBlock;
13076 // Fall through into SCEVNAryExpr handling.
13077 LLVM_FALLTHROUGH;
13079 case scAddExpr:
13080 case scMulExpr:
13081 case scUMaxExpr:
13082 case scSMaxExpr:
13083 case scUMinExpr:
13084 case scSMinExpr: {
13085 const SCEVNAryExpr *NAry = cast<SCEVNAryExpr>(S);
13086 bool Proper = true;
13087 for (const SCEV *NAryOp : NAry->operands()) {
13088 BlockDisposition D = getBlockDisposition(NAryOp, BB);
13089 if (D == DoesNotDominateBlock)
13090 return DoesNotDominateBlock;
13091 if (D == DominatesBlock)
13092 Proper = false;
13094 return Proper ? ProperlyDominatesBlock : DominatesBlock;
13096 case scUDivExpr: {
13097 const SCEVUDivExpr *UDiv = cast<SCEVUDivExpr>(S);
13098 const SCEV *LHS = UDiv->getLHS(), *RHS = UDiv->getRHS();
13099 BlockDisposition LD = getBlockDisposition(LHS, BB);
13100 if (LD == DoesNotDominateBlock)
13101 return DoesNotDominateBlock;
13102 BlockDisposition RD = getBlockDisposition(RHS, BB);
13103 if (RD == DoesNotDominateBlock)
13104 return DoesNotDominateBlock;
13105 return (LD == ProperlyDominatesBlock && RD == ProperlyDominatesBlock) ?
13106 ProperlyDominatesBlock : DominatesBlock;
13108 case scUnknown:
13109 if (Instruction *I =
13110 dyn_cast<Instruction>(cast<SCEVUnknown>(S)->getValue())) {
13111 if (I->getParent() == BB)
13112 return DominatesBlock;
13113 if (DT.properlyDominates(I->getParent(), BB))
13114 return ProperlyDominatesBlock;
13115 return DoesNotDominateBlock;
13117 return ProperlyDominatesBlock;
13118 case scCouldNotCompute:
13119 llvm_unreachable("Attempt to use a SCEVCouldNotCompute object!");
13121 llvm_unreachable("Unknown SCEV kind!");
13124 bool ScalarEvolution::dominates(const SCEV *S, const BasicBlock *BB) {
13125 return getBlockDisposition(S, BB) >= DominatesBlock;
13128 bool ScalarEvolution::properlyDominates(const SCEV *S, const BasicBlock *BB) {
13129 return getBlockDisposition(S, BB) == ProperlyDominatesBlock;
13132 bool ScalarEvolution::hasOperand(const SCEV *S, const SCEV *Op) const {
13133 return SCEVExprContains(S, [&](const SCEV *Expr) { return Expr == Op; });
13136 void
13137 ScalarEvolution::forgetMemoizedResults(const SCEV *S) {
13138 ValuesAtScopes.erase(S);
13139 LoopDispositions.erase(S);
13140 BlockDispositions.erase(S);
13141 UnsignedRanges.erase(S);
13142 SignedRanges.erase(S);
13143 ExprValueMap.erase(S);
13144 HasRecMap.erase(S);
13145 MinTrailingZerosCache.erase(S);
13147 for (auto I = PredicatedSCEVRewrites.begin();
13148 I != PredicatedSCEVRewrites.end();) {
13149 std::pair<const SCEV *, const Loop *> Entry = I->first;
13150 if (Entry.first == S)
13151 PredicatedSCEVRewrites.erase(I++);
13152 else
13153 ++I;
13156 auto RemoveSCEVFromBackedgeMap =
13157 [S](DenseMap<const Loop *, BackedgeTakenInfo> &Map) {
13158 for (auto I = Map.begin(), E = Map.end(); I != E;) {
13159 BackedgeTakenInfo &BEInfo = I->second;
13160 if (BEInfo.hasOperand(S))
13161 Map.erase(I++);
13162 else
13163 ++I;
13167 RemoveSCEVFromBackedgeMap(BackedgeTakenCounts);
13168 RemoveSCEVFromBackedgeMap(PredicatedBackedgeTakenCounts);
13171 void
13172 ScalarEvolution::getUsedLoops(const SCEV *S,
13173 SmallPtrSetImpl<const Loop *> &LoopsUsed) {
13174 struct FindUsedLoops {
13175 FindUsedLoops(SmallPtrSetImpl<const Loop *> &LoopsUsed)
13176 : LoopsUsed(LoopsUsed) {}
13177 SmallPtrSetImpl<const Loop *> &LoopsUsed;
13178 bool follow(const SCEV *S) {
13179 if (auto *AR = dyn_cast<SCEVAddRecExpr>(S))
13180 LoopsUsed.insert(AR->getLoop());
13181 return true;
13184 bool isDone() const { return false; }
13187 FindUsedLoops F(LoopsUsed);
13188 SCEVTraversal<FindUsedLoops>(F).visitAll(S);
13191 void ScalarEvolution::addToLoopUseLists(const SCEV *S) {
13192 SmallPtrSet<const Loop *, 8> LoopsUsed;
13193 getUsedLoops(S, LoopsUsed);
13194 for (auto *L : LoopsUsed)
13195 LoopUsers[L].push_back(S);
13198 void ScalarEvolution::verify() const {
13199 ScalarEvolution &SE = *const_cast<ScalarEvolution *>(this);
13200 ScalarEvolution SE2(F, TLI, AC, DT, LI);
13202 SmallVector<Loop *, 8> LoopStack(LI.begin(), LI.end());
13204 // Map's SCEV expressions from one ScalarEvolution "universe" to another.
13205 struct SCEVMapper : public SCEVRewriteVisitor<SCEVMapper> {
13206 SCEVMapper(ScalarEvolution &SE) : SCEVRewriteVisitor<SCEVMapper>(SE) {}
13208 const SCEV *visitConstant(const SCEVConstant *Constant) {
13209 return SE.getConstant(Constant->getAPInt());
13212 const SCEV *visitUnknown(const SCEVUnknown *Expr) {
13213 return SE.getUnknown(Expr->getValue());
13216 const SCEV *visitCouldNotCompute(const SCEVCouldNotCompute *Expr) {
13217 return SE.getCouldNotCompute();
13221 SCEVMapper SCM(SE2);
13223 while (!LoopStack.empty()) {
13224 auto *L = LoopStack.pop_back_val();
13225 llvm::append_range(LoopStack, *L);
13227 auto *CurBECount = SCM.visit(
13228 const_cast<ScalarEvolution *>(this)->getBackedgeTakenCount(L));
13229 auto *NewBECount = SE2.getBackedgeTakenCount(L);
13231 if (CurBECount == SE2.getCouldNotCompute() ||
13232 NewBECount == SE2.getCouldNotCompute()) {
13233 // NB! This situation is legal, but is very suspicious -- whatever pass
13234 // change the loop to make a trip count go from could not compute to
13235 // computable or vice-versa *should have* invalidated SCEV. However, we
13236 // choose not to assert here (for now) since we don't want false
13237 // positives.
13238 continue;
13241 if (containsUndefs(CurBECount) || containsUndefs(NewBECount)) {
13242 // SCEV treats "undef" as an unknown but consistent value (i.e. it does
13243 // not propagate undef aggressively). This means we can (and do) fail
13244 // verification in cases where a transform makes the trip count of a loop
13245 // go from "undef" to "undef+1" (say). The transform is fine, since in
13246 // both cases the loop iterates "undef" times, but SCEV thinks we
13247 // increased the trip count of the loop by 1 incorrectly.
13248 continue;
13251 if (SE.getTypeSizeInBits(CurBECount->getType()) >
13252 SE.getTypeSizeInBits(NewBECount->getType()))
13253 NewBECount = SE2.getZeroExtendExpr(NewBECount, CurBECount->getType());
13254 else if (SE.getTypeSizeInBits(CurBECount->getType()) <
13255 SE.getTypeSizeInBits(NewBECount->getType()))
13256 CurBECount = SE2.getZeroExtendExpr(CurBECount, NewBECount->getType());
13258 const SCEV *Delta = SE2.getMinusSCEV(CurBECount, NewBECount);
13260 // Unless VerifySCEVStrict is set, we only compare constant deltas.
13261 if ((VerifySCEVStrict || isa<SCEVConstant>(Delta)) && !Delta->isZero()) {
13262 dbgs() << "Trip Count for " << *L << " Changed!\n";
13263 dbgs() << "Old: " << *CurBECount << "\n";
13264 dbgs() << "New: " << *NewBECount << "\n";
13265 dbgs() << "Delta: " << *Delta << "\n";
13266 std::abort();
13270 // Collect all valid loops currently in LoopInfo.
13271 SmallPtrSet<Loop *, 32> ValidLoops;
13272 SmallVector<Loop *, 32> Worklist(LI.begin(), LI.end());
13273 while (!Worklist.empty()) {
13274 Loop *L = Worklist.pop_back_val();
13275 if (ValidLoops.contains(L))
13276 continue;
13277 ValidLoops.insert(L);
13278 Worklist.append(L->begin(), L->end());
13280 // Check for SCEV expressions referencing invalid/deleted loops.
13281 for (auto &KV : ValueExprMap) {
13282 auto *AR = dyn_cast<SCEVAddRecExpr>(KV.second);
13283 if (!AR)
13284 continue;
13285 assert(ValidLoops.contains(AR->getLoop()) &&
13286 "AddRec references invalid loop");
13290 bool ScalarEvolution::invalidate(
13291 Function &F, const PreservedAnalyses &PA,
13292 FunctionAnalysisManager::Invalidator &Inv) {
13293 // Invalidate the ScalarEvolution object whenever it isn't preserved or one
13294 // of its dependencies is invalidated.
13295 auto PAC = PA.getChecker<ScalarEvolutionAnalysis>();
13296 return !(PAC.preserved() || PAC.preservedSet<AllAnalysesOn<Function>>()) ||
13297 Inv.invalidate<AssumptionAnalysis>(F, PA) ||
13298 Inv.invalidate<DominatorTreeAnalysis>(F, PA) ||
13299 Inv.invalidate<LoopAnalysis>(F, PA);
13302 AnalysisKey ScalarEvolutionAnalysis::Key;
13304 ScalarEvolution ScalarEvolutionAnalysis::run(Function &F,
13305 FunctionAnalysisManager &AM) {
13306 return ScalarEvolution(F, AM.getResult<TargetLibraryAnalysis>(F),
13307 AM.getResult<AssumptionAnalysis>(F),
13308 AM.getResult<DominatorTreeAnalysis>(F),
13309 AM.getResult<LoopAnalysis>(F));
13312 PreservedAnalyses
13313 ScalarEvolutionVerifierPass::run(Function &F, FunctionAnalysisManager &AM) {
13314 AM.getResult<ScalarEvolutionAnalysis>(F).verify();
13315 return PreservedAnalyses::all();
13318 PreservedAnalyses
13319 ScalarEvolutionPrinterPass::run(Function &F, FunctionAnalysisManager &AM) {
13320 // For compatibility with opt's -analyze feature under legacy pass manager
13321 // which was not ported to NPM. This keeps tests using
13322 // update_analyze_test_checks.py working.
13323 OS << "Printing analysis 'Scalar Evolution Analysis' for function '"
13324 << F.getName() << "':\n";
13325 AM.getResult<ScalarEvolutionAnalysis>(F).print(OS);
13326 return PreservedAnalyses::all();
13329 INITIALIZE_PASS_BEGIN(ScalarEvolutionWrapperPass, "scalar-evolution",
13330 "Scalar Evolution Analysis", false, true)
13331 INITIALIZE_PASS_DEPENDENCY(AssumptionCacheTracker)
13332 INITIALIZE_PASS_DEPENDENCY(LoopInfoWrapperPass)
13333 INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass)
13334 INITIALIZE_PASS_DEPENDENCY(TargetLibraryInfoWrapperPass)
13335 INITIALIZE_PASS_END(ScalarEvolutionWrapperPass, "scalar-evolution",
13336 "Scalar Evolution Analysis", false, true)
13338 char ScalarEvolutionWrapperPass::ID = 0;
13340 ScalarEvolutionWrapperPass::ScalarEvolutionWrapperPass() : FunctionPass(ID) {
13341 initializeScalarEvolutionWrapperPassPass(*PassRegistry::getPassRegistry());
13344 bool ScalarEvolutionWrapperPass::runOnFunction(Function &F) {
13345 SE.reset(new ScalarEvolution(
13346 F, getAnalysis<TargetLibraryInfoWrapperPass>().getTLI(F),
13347 getAnalysis<AssumptionCacheTracker>().getAssumptionCache(F),
13348 getAnalysis<DominatorTreeWrapperPass>().getDomTree(),
13349 getAnalysis<LoopInfoWrapperPass>().getLoopInfo()));
13350 return false;
13353 void ScalarEvolutionWrapperPass::releaseMemory() { SE.reset(); }
13355 void ScalarEvolutionWrapperPass::print(raw_ostream &OS, const Module *) const {
13356 SE->print(OS);
13359 void ScalarEvolutionWrapperPass::verifyAnalysis() const {
13360 if (!VerifySCEV)
13361 return;
13363 SE->verify();
13366 void ScalarEvolutionWrapperPass::getAnalysisUsage(AnalysisUsage &AU) const {
13367 AU.setPreservesAll();
13368 AU.addRequiredTransitive<AssumptionCacheTracker>();
13369 AU.addRequiredTransitive<LoopInfoWrapperPass>();
13370 AU.addRequiredTransitive<DominatorTreeWrapperPass>();
13371 AU.addRequiredTransitive<TargetLibraryInfoWrapperPass>();
13374 const SCEVPredicate *ScalarEvolution::getEqualPredicate(const SCEV *LHS,
13375 const SCEV *RHS) {
13376 FoldingSetNodeID ID;
13377 assert(LHS->getType() == RHS->getType() &&
13378 "Type mismatch between LHS and RHS");
13379 // Unique this node based on the arguments
13380 ID.AddInteger(SCEVPredicate::P_Equal);
13381 ID.AddPointer(LHS);
13382 ID.AddPointer(RHS);
13383 void *IP = nullptr;
13384 if (const auto *S = UniquePreds.FindNodeOrInsertPos(ID, IP))
13385 return S;
13386 SCEVEqualPredicate *Eq = new (SCEVAllocator)
13387 SCEVEqualPredicate(ID.Intern(SCEVAllocator), LHS, RHS);
13388 UniquePreds.InsertNode(Eq, IP);
13389 return Eq;
13392 const SCEVPredicate *ScalarEvolution::getWrapPredicate(
13393 const SCEVAddRecExpr *AR,
13394 SCEVWrapPredicate::IncrementWrapFlags AddedFlags) {
13395 FoldingSetNodeID ID;
13396 // Unique this node based on the arguments
13397 ID.AddInteger(SCEVPredicate::P_Wrap);
13398 ID.AddPointer(AR);
13399 ID.AddInteger(AddedFlags);
13400 void *IP = nullptr;
13401 if (const auto *S = UniquePreds.FindNodeOrInsertPos(ID, IP))
13402 return S;
13403 auto *OF = new (SCEVAllocator)
13404 SCEVWrapPredicate(ID.Intern(SCEVAllocator), AR, AddedFlags);
13405 UniquePreds.InsertNode(OF, IP);
13406 return OF;
13409 namespace {
13411 class SCEVPredicateRewriter : public SCEVRewriteVisitor<SCEVPredicateRewriter> {
13412 public:
13414 /// Rewrites \p S in the context of a loop L and the SCEV predication
13415 /// infrastructure.
13417 /// If \p Pred is non-null, the SCEV expression is rewritten to respect the
13418 /// equivalences present in \p Pred.
13420 /// If \p NewPreds is non-null, rewrite is free to add further predicates to
13421 /// \p NewPreds such that the result will be an AddRecExpr.
13422 static const SCEV *rewrite(const SCEV *S, const Loop *L, ScalarEvolution &SE,
13423 SmallPtrSetImpl<const SCEVPredicate *> *NewPreds,
13424 SCEVUnionPredicate *Pred) {
13425 SCEVPredicateRewriter Rewriter(L, SE, NewPreds, Pred);
13426 return Rewriter.visit(S);
13429 const SCEV *visitUnknown(const SCEVUnknown *Expr) {
13430 if (Pred) {
13431 auto ExprPreds = Pred->getPredicatesForExpr(Expr);
13432 for (auto *Pred : ExprPreds)
13433 if (const auto *IPred = dyn_cast<SCEVEqualPredicate>(Pred))
13434 if (IPred->getLHS() == Expr)
13435 return IPred->getRHS();
13437 return convertToAddRecWithPreds(Expr);
13440 const SCEV *visitZeroExtendExpr(const SCEVZeroExtendExpr *Expr) {
13441 const SCEV *Operand = visit(Expr->getOperand());
13442 const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(Operand);
13443 if (AR && AR->getLoop() == L && AR->isAffine()) {
13444 // This couldn't be folded because the operand didn't have the nuw
13445 // flag. Add the nusw flag as an assumption that we could make.
13446 const SCEV *Step = AR->getStepRecurrence(SE);
13447 Type *Ty = Expr->getType();
13448 if (addOverflowAssumption(AR, SCEVWrapPredicate::IncrementNUSW))
13449 return SE.getAddRecExpr(SE.getZeroExtendExpr(AR->getStart(), Ty),
13450 SE.getSignExtendExpr(Step, Ty), L,
13451 AR->getNoWrapFlags());
13453 return SE.getZeroExtendExpr(Operand, Expr->getType());
13456 const SCEV *visitSignExtendExpr(const SCEVSignExtendExpr *Expr) {
13457 const SCEV *Operand = visit(Expr->getOperand());
13458 const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(Operand);
13459 if (AR && AR->getLoop() == L && AR->isAffine()) {
13460 // This couldn't be folded because the operand didn't have the nsw
13461 // flag. Add the nssw flag as an assumption that we could make.
13462 const SCEV *Step = AR->getStepRecurrence(SE);
13463 Type *Ty = Expr->getType();
13464 if (addOverflowAssumption(AR, SCEVWrapPredicate::IncrementNSSW))
13465 return SE.getAddRecExpr(SE.getSignExtendExpr(AR->getStart(), Ty),
13466 SE.getSignExtendExpr(Step, Ty), L,
13467 AR->getNoWrapFlags());
13469 return SE.getSignExtendExpr(Operand, Expr->getType());
13472 private:
13473 explicit SCEVPredicateRewriter(const Loop *L, ScalarEvolution &SE,
13474 SmallPtrSetImpl<const SCEVPredicate *> *NewPreds,
13475 SCEVUnionPredicate *Pred)
13476 : SCEVRewriteVisitor(SE), NewPreds(NewPreds), Pred(Pred), L(L) {}
13478 bool addOverflowAssumption(const SCEVPredicate *P) {
13479 if (!NewPreds) {
13480 // Check if we've already made this assumption.
13481 return Pred && Pred->implies(P);
13483 NewPreds->insert(P);
13484 return true;
13487 bool addOverflowAssumption(const SCEVAddRecExpr *AR,
13488 SCEVWrapPredicate::IncrementWrapFlags AddedFlags) {
13489 auto *A = SE.getWrapPredicate(AR, AddedFlags);
13490 return addOverflowAssumption(A);
13493 // If \p Expr represents a PHINode, we try to see if it can be represented
13494 // as an AddRec, possibly under a predicate (PHISCEVPred). If it is possible
13495 // to add this predicate as a runtime overflow check, we return the AddRec.
13496 // If \p Expr does not meet these conditions (is not a PHI node, or we
13497 // couldn't create an AddRec for it, or couldn't add the predicate), we just
13498 // return \p Expr.
13499 const SCEV *convertToAddRecWithPreds(const SCEVUnknown *Expr) {
13500 if (!isa<PHINode>(Expr->getValue()))
13501 return Expr;
13502 Optional<std::pair<const SCEV *, SmallVector<const SCEVPredicate *, 3>>>
13503 PredicatedRewrite = SE.createAddRecFromPHIWithCasts(Expr);
13504 if (!PredicatedRewrite)
13505 return Expr;
13506 for (auto *P : PredicatedRewrite->second){
13507 // Wrap predicates from outer loops are not supported.
13508 if (auto *WP = dyn_cast<const SCEVWrapPredicate>(P)) {
13509 auto *AR = cast<const SCEVAddRecExpr>(WP->getExpr());
13510 if (L != AR->getLoop())
13511 return Expr;
13513 if (!addOverflowAssumption(P))
13514 return Expr;
13516 return PredicatedRewrite->first;
13519 SmallPtrSetImpl<const SCEVPredicate *> *NewPreds;
13520 SCEVUnionPredicate *Pred;
13521 const Loop *L;
13524 } // end anonymous namespace
13526 const SCEV *ScalarEvolution::rewriteUsingPredicate(const SCEV *S, const Loop *L,
13527 SCEVUnionPredicate &Preds) {
13528 return SCEVPredicateRewriter::rewrite(S, L, *this, nullptr, &Preds);
13531 const SCEVAddRecExpr *ScalarEvolution::convertSCEVToAddRecWithPredicates(
13532 const SCEV *S, const Loop *L,
13533 SmallPtrSetImpl<const SCEVPredicate *> &Preds) {
13534 SmallPtrSet<const SCEVPredicate *, 4> TransformPreds;
13535 S = SCEVPredicateRewriter::rewrite(S, L, *this, &TransformPreds, nullptr);
13536 auto *AddRec = dyn_cast<SCEVAddRecExpr>(S);
13538 if (!AddRec)
13539 return nullptr;
13541 // Since the transformation was successful, we can now transfer the SCEV
13542 // predicates.
13543 for (auto *P : TransformPreds)
13544 Preds.insert(P);
13546 return AddRec;
13549 /// SCEV predicates
13550 SCEVPredicate::SCEVPredicate(const FoldingSetNodeIDRef ID,
13551 SCEVPredicateKind Kind)
13552 : FastID(ID), Kind(Kind) {}
13554 SCEVEqualPredicate::SCEVEqualPredicate(const FoldingSetNodeIDRef ID,
13555 const SCEV *LHS, const SCEV *RHS)
13556 : SCEVPredicate(ID, P_Equal), LHS(LHS), RHS(RHS) {
13557 assert(LHS->getType() == RHS->getType() && "LHS and RHS types don't match");
13558 assert(LHS != RHS && "LHS and RHS are the same SCEV");
13561 bool SCEVEqualPredicate::implies(const SCEVPredicate *N) const {
13562 const auto *Op = dyn_cast<SCEVEqualPredicate>(N);
13564 if (!Op)
13565 return false;
13567 return Op->LHS == LHS && Op->RHS == RHS;
13570 bool SCEVEqualPredicate::isAlwaysTrue() const { return false; }
13572 const SCEV *SCEVEqualPredicate::getExpr() const { return LHS; }
13574 void SCEVEqualPredicate::print(raw_ostream &OS, unsigned Depth) const {
13575 OS.indent(Depth) << "Equal predicate: " << *LHS << " == " << *RHS << "\n";
13578 SCEVWrapPredicate::SCEVWrapPredicate(const FoldingSetNodeIDRef ID,
13579 const SCEVAddRecExpr *AR,
13580 IncrementWrapFlags Flags)
13581 : SCEVPredicate(ID, P_Wrap), AR(AR), Flags(Flags) {}
13583 const SCEV *SCEVWrapPredicate::getExpr() const { return AR; }
13585 bool SCEVWrapPredicate::implies(const SCEVPredicate *N) const {
13586 const auto *Op = dyn_cast<SCEVWrapPredicate>(N);
13588 return Op && Op->AR == AR && setFlags(Flags, Op->Flags) == Flags;
13591 bool SCEVWrapPredicate::isAlwaysTrue() const {
13592 SCEV::NoWrapFlags ScevFlags = AR->getNoWrapFlags();
13593 IncrementWrapFlags IFlags = Flags;
13595 if (ScalarEvolution::setFlags(ScevFlags, SCEV::FlagNSW) == ScevFlags)
13596 IFlags = clearFlags(IFlags, IncrementNSSW);
13598 return IFlags == IncrementAnyWrap;
13601 void SCEVWrapPredicate::print(raw_ostream &OS, unsigned Depth) const {
13602 OS.indent(Depth) << *getExpr() << " Added Flags: ";
13603 if (SCEVWrapPredicate::IncrementNUSW & getFlags())
13604 OS << "<nusw>";
13605 if (SCEVWrapPredicate::IncrementNSSW & getFlags())
13606 OS << "<nssw>";
13607 OS << "\n";
13610 SCEVWrapPredicate::IncrementWrapFlags
13611 SCEVWrapPredicate::getImpliedFlags(const SCEVAddRecExpr *AR,
13612 ScalarEvolution &SE) {
13613 IncrementWrapFlags ImpliedFlags = IncrementAnyWrap;
13614 SCEV::NoWrapFlags StaticFlags = AR->getNoWrapFlags();
13616 // We can safely transfer the NSW flag as NSSW.
13617 if (ScalarEvolution::setFlags(StaticFlags, SCEV::FlagNSW) == StaticFlags)
13618 ImpliedFlags = IncrementNSSW;
13620 if (ScalarEvolution::setFlags(StaticFlags, SCEV::FlagNUW) == StaticFlags) {
13621 // If the increment is positive, the SCEV NUW flag will also imply the
13622 // WrapPredicate NUSW flag.
13623 if (const auto *Step = dyn_cast<SCEVConstant>(AR->getStepRecurrence(SE)))
13624 if (Step->getValue()->getValue().isNonNegative())
13625 ImpliedFlags = setFlags(ImpliedFlags, IncrementNUSW);
13628 return ImpliedFlags;
13631 /// Union predicates don't get cached so create a dummy set ID for it.
13632 SCEVUnionPredicate::SCEVUnionPredicate()
13633 : SCEVPredicate(FoldingSetNodeIDRef(nullptr, 0), P_Union) {}
13635 bool SCEVUnionPredicate::isAlwaysTrue() const {
13636 return all_of(Preds,
13637 [](const SCEVPredicate *I) { return I->isAlwaysTrue(); });
13640 ArrayRef<const SCEVPredicate *>
13641 SCEVUnionPredicate::getPredicatesForExpr(const SCEV *Expr) {
13642 auto I = SCEVToPreds.find(Expr);
13643 if (I == SCEVToPreds.end())
13644 return ArrayRef<const SCEVPredicate *>();
13645 return I->second;
13648 bool SCEVUnionPredicate::implies(const SCEVPredicate *N) const {
13649 if (const auto *Set = dyn_cast<SCEVUnionPredicate>(N))
13650 return all_of(Set->Preds,
13651 [this](const SCEVPredicate *I) { return this->implies(I); });
13653 auto ScevPredsIt = SCEVToPreds.find(N->getExpr());
13654 if (ScevPredsIt == SCEVToPreds.end())
13655 return false;
13656 auto &SCEVPreds = ScevPredsIt->second;
13658 return any_of(SCEVPreds,
13659 [N](const SCEVPredicate *I) { return I->implies(N); });
13662 const SCEV *SCEVUnionPredicate::getExpr() const { return nullptr; }
13664 void SCEVUnionPredicate::print(raw_ostream &OS, unsigned Depth) const {
13665 for (auto Pred : Preds)
13666 Pred->print(OS, Depth);
13669 void SCEVUnionPredicate::add(const SCEVPredicate *N) {
13670 if (const auto *Set = dyn_cast<SCEVUnionPredicate>(N)) {
13671 for (auto Pred : Set->Preds)
13672 add(Pred);
13673 return;
13676 if (implies(N))
13677 return;
13679 const SCEV *Key = N->getExpr();
13680 assert(Key && "Only SCEVUnionPredicate doesn't have an "
13681 " associated expression!");
13683 SCEVToPreds[Key].push_back(N);
13684 Preds.push_back(N);
13687 PredicatedScalarEvolution::PredicatedScalarEvolution(ScalarEvolution &SE,
13688 Loop &L)
13689 : SE(SE), L(L) {}
13691 const SCEV *PredicatedScalarEvolution::getSCEV(Value *V) {
13692 const SCEV *Expr = SE.getSCEV(V);
13693 RewriteEntry &Entry = RewriteMap[Expr];
13695 // If we already have an entry and the version matches, return it.
13696 if (Entry.second && Generation == Entry.first)
13697 return Entry.second;
13699 // We found an entry but it's stale. Rewrite the stale entry
13700 // according to the current predicate.
13701 if (Entry.second)
13702 Expr = Entry.second;
13704 const SCEV *NewSCEV = SE.rewriteUsingPredicate(Expr, &L, Preds);
13705 Entry = {Generation, NewSCEV};
13707 return NewSCEV;
13710 const SCEV *PredicatedScalarEvolution::getBackedgeTakenCount() {
13711 if (!BackedgeCount) {
13712 SCEVUnionPredicate BackedgePred;
13713 BackedgeCount = SE.getPredicatedBackedgeTakenCount(&L, BackedgePred);
13714 addPredicate(BackedgePred);
13716 return BackedgeCount;
13719 void PredicatedScalarEvolution::addPredicate(const SCEVPredicate &Pred) {
13720 if (Preds.implies(&Pred))
13721 return;
13722 Preds.add(&Pred);
13723 updateGeneration();
13726 const SCEVUnionPredicate &PredicatedScalarEvolution::getUnionPredicate() const {
13727 return Preds;
13730 void PredicatedScalarEvolution::updateGeneration() {
13731 // If the generation number wrapped recompute everything.
13732 if (++Generation == 0) {
13733 for (auto &II : RewriteMap) {
13734 const SCEV *Rewritten = II.second.second;
13735 II.second = {Generation, SE.rewriteUsingPredicate(Rewritten, &L, Preds)};
13740 void PredicatedScalarEvolution::setNoOverflow(
13741 Value *V, SCEVWrapPredicate::IncrementWrapFlags Flags) {
13742 const SCEV *Expr = getSCEV(V);
13743 const auto *AR = cast<SCEVAddRecExpr>(Expr);
13745 auto ImpliedFlags = SCEVWrapPredicate::getImpliedFlags(AR, SE);
13747 // Clear the statically implied flags.
13748 Flags = SCEVWrapPredicate::clearFlags(Flags, ImpliedFlags);
13749 addPredicate(*SE.getWrapPredicate(AR, Flags));
13751 auto II = FlagsMap.insert({V, Flags});
13752 if (!II.second)
13753 II.first->second = SCEVWrapPredicate::setFlags(Flags, II.first->second);
13756 bool PredicatedScalarEvolution::hasNoOverflow(
13757 Value *V, SCEVWrapPredicate::IncrementWrapFlags Flags) {
13758 const SCEV *Expr = getSCEV(V);
13759 const auto *AR = cast<SCEVAddRecExpr>(Expr);
13761 Flags = SCEVWrapPredicate::clearFlags(
13762 Flags, SCEVWrapPredicate::getImpliedFlags(AR, SE));
13764 auto II = FlagsMap.find(V);
13766 if (II != FlagsMap.end())
13767 Flags = SCEVWrapPredicate::clearFlags(Flags, II->second);
13769 return Flags == SCEVWrapPredicate::IncrementAnyWrap;
13772 const SCEVAddRecExpr *PredicatedScalarEvolution::getAsAddRec(Value *V) {
13773 const SCEV *Expr = this->getSCEV(V);
13774 SmallPtrSet<const SCEVPredicate *, 4> NewPreds;
13775 auto *New = SE.convertSCEVToAddRecWithPredicates(Expr, &L, NewPreds);
13777 if (!New)
13778 return nullptr;
13780 for (auto *P : NewPreds)
13781 Preds.add(P);
13783 updateGeneration();
13784 RewriteMap[SE.getSCEV(V)] = {Generation, New};
13785 return New;
13788 PredicatedScalarEvolution::PredicatedScalarEvolution(
13789 const PredicatedScalarEvolution &Init)
13790 : RewriteMap(Init.RewriteMap), SE(Init.SE), L(Init.L), Preds(Init.Preds),
13791 Generation(Init.Generation), BackedgeCount(Init.BackedgeCount) {
13792 for (auto I : Init.FlagsMap)
13793 FlagsMap.insert(I);
13796 void PredicatedScalarEvolution::print(raw_ostream &OS, unsigned Depth) const {
13797 // For each block.
13798 for (auto *BB : L.getBlocks())
13799 for (auto &I : *BB) {
13800 if (!SE.isSCEVable(I.getType()))
13801 continue;
13803 auto *Expr = SE.getSCEV(&I);
13804 auto II = RewriteMap.find(Expr);
13806 if (II == RewriteMap.end())
13807 continue;
13809 // Don't print things that are not interesting.
13810 if (II->second.second == Expr)
13811 continue;
13813 OS.indent(Depth) << "[PSE]" << I << ":\n";
13814 OS.indent(Depth + 2) << *Expr << "\n";
13815 OS.indent(Depth + 2) << "--> " << *II->second.second << "\n";
13819 // Match the mathematical pattern A - (A / B) * B, where A and B can be
13820 // arbitrary expressions. Also match zext (trunc A to iB) to iY, which is used
13821 // for URem with constant power-of-2 second operands.
13822 // It's not always easy, as A and B can be folded (imagine A is X / 2, and B is
13823 // 4, A / B becomes X / 8).
13824 bool ScalarEvolution::matchURem(const SCEV *Expr, const SCEV *&LHS,
13825 const SCEV *&RHS) {
13826 // Try to match 'zext (trunc A to iB) to iY', which is used
13827 // for URem with constant power-of-2 second operands. Make sure the size of
13828 // the operand A matches the size of the whole expressions.
13829 if (const auto *ZExt = dyn_cast<SCEVZeroExtendExpr>(Expr))
13830 if (const auto *Trunc = dyn_cast<SCEVTruncateExpr>(ZExt->getOperand(0))) {
13831 LHS = Trunc->getOperand();
13832 // Bail out if the type of the LHS is larger than the type of the
13833 // expression for now.
13834 if (getTypeSizeInBits(LHS->getType()) >
13835 getTypeSizeInBits(Expr->getType()))
13836 return false;
13837 if (LHS->getType() != Expr->getType())
13838 LHS = getZeroExtendExpr(LHS, Expr->getType());
13839 RHS = getConstant(APInt(getTypeSizeInBits(Expr->getType()), 1)
13840 << getTypeSizeInBits(Trunc->getType()));
13841 return true;
13843 const auto *Add = dyn_cast<SCEVAddExpr>(Expr);
13844 if (Add == nullptr || Add->getNumOperands() != 2)
13845 return false;
13847 const SCEV *A = Add->getOperand(1);
13848 const auto *Mul = dyn_cast<SCEVMulExpr>(Add->getOperand(0));
13850 if (Mul == nullptr)
13851 return false;
13853 const auto MatchURemWithDivisor = [&](const SCEV *B) {
13854 // (SomeExpr + (-(SomeExpr / B) * B)).
13855 if (Expr == getURemExpr(A, B)) {
13856 LHS = A;
13857 RHS = B;
13858 return true;
13860 return false;
13863 // (SomeExpr + (-1 * (SomeExpr / B) * B)).
13864 if (Mul->getNumOperands() == 3 && isa<SCEVConstant>(Mul->getOperand(0)))
13865 return MatchURemWithDivisor(Mul->getOperand(1)) ||
13866 MatchURemWithDivisor(Mul->getOperand(2));
13868 // (SomeExpr + ((-SomeExpr / B) * B)) or (SomeExpr + ((SomeExpr / B) * -B)).
13869 if (Mul->getNumOperands() == 2)
13870 return MatchURemWithDivisor(Mul->getOperand(1)) ||
13871 MatchURemWithDivisor(Mul->getOperand(0)) ||
13872 MatchURemWithDivisor(getNegativeSCEV(Mul->getOperand(1))) ||
13873 MatchURemWithDivisor(getNegativeSCEV(Mul->getOperand(0)));
13874 return false;
13877 const SCEV *
13878 ScalarEvolution::computeSymbolicMaxBackedgeTakenCount(const Loop *L) {
13879 SmallVector<BasicBlock*, 16> ExitingBlocks;
13880 L->getExitingBlocks(ExitingBlocks);
13882 // Form an expression for the maximum exit count possible for this loop. We
13883 // merge the max and exact information to approximate a version of
13884 // getConstantMaxBackedgeTakenCount which isn't restricted to just constants.
13885 SmallVector<const SCEV*, 4> ExitCounts;
13886 for (BasicBlock *ExitingBB : ExitingBlocks) {
13887 const SCEV *ExitCount = getExitCount(L, ExitingBB);
13888 if (isa<SCEVCouldNotCompute>(ExitCount))
13889 ExitCount = getExitCount(L, ExitingBB,
13890 ScalarEvolution::ConstantMaximum);
13891 if (!isa<SCEVCouldNotCompute>(ExitCount)) {
13892 assert(DT.dominates(ExitingBB, L->getLoopLatch()) &&
13893 "We should only have known counts for exiting blocks that "
13894 "dominate latch!");
13895 ExitCounts.push_back(ExitCount);
13898 if (ExitCounts.empty())
13899 return getCouldNotCompute();
13900 return getUMinFromMismatchedTypes(ExitCounts);
13903 /// This rewriter is similar to SCEVParameterRewriter (it replaces SCEVUnknown
13904 /// components following the Map (Value -> SCEV)), but skips AddRecExpr because
13905 /// we cannot guarantee that the replacement is loop invariant in the loop of
13906 /// the AddRec.
13907 class SCEVLoopGuardRewriter : public SCEVRewriteVisitor<SCEVLoopGuardRewriter> {
13908 ValueToSCEVMapTy &Map;
13910 public:
13911 SCEVLoopGuardRewriter(ScalarEvolution &SE, ValueToSCEVMapTy &M)
13912 : SCEVRewriteVisitor(SE), Map(M) {}
13914 const SCEV *visitAddRecExpr(const SCEVAddRecExpr *Expr) { return Expr; }
13916 const SCEV *visitUnknown(const SCEVUnknown *Expr) {
13917 auto I = Map.find(Expr->getValue());
13918 if (I == Map.end())
13919 return Expr;
13920 return I->second;
13924 const SCEV *ScalarEvolution::applyLoopGuards(const SCEV *Expr, const Loop *L) {
13925 auto CollectCondition = [&](ICmpInst::Predicate Predicate, const SCEV *LHS,
13926 const SCEV *RHS, ValueToSCEVMapTy &RewriteMap) {
13927 // If we have LHS == 0, check if LHS is computing a property of some unknown
13928 // SCEV %v which we can rewrite %v to express explicitly.
13929 const SCEVConstant *RHSC = dyn_cast<SCEVConstant>(RHS);
13930 if (Predicate == CmpInst::ICMP_EQ && RHSC &&
13931 RHSC->getValue()->isNullValue()) {
13932 // If LHS is A % B, i.e. A % B == 0, rewrite A to (A /u B) * B to
13933 // explicitly express that.
13934 const SCEV *URemLHS = nullptr;
13935 const SCEV *URemRHS = nullptr;
13936 if (matchURem(LHS, URemLHS, URemRHS)) {
13937 if (const SCEVUnknown *LHSUnknown = dyn_cast<SCEVUnknown>(URemLHS)) {
13938 Value *V = LHSUnknown->getValue();
13939 auto Multiple =
13940 getMulExpr(getUDivExpr(URemLHS, URemRHS), URemRHS,
13941 (SCEV::NoWrapFlags)(SCEV::FlagNUW | SCEV::FlagNSW));
13942 RewriteMap[V] = Multiple;
13943 return;
13948 if (!isa<SCEVUnknown>(LHS) && isa<SCEVUnknown>(RHS)) {
13949 std::swap(LHS, RHS);
13950 Predicate = CmpInst::getSwappedPredicate(Predicate);
13953 // Check for a condition of the form (-C1 + X < C2). InstCombine will
13954 // create this form when combining two checks of the form (X u< C2 + C1) and
13955 // (X >=u C1).
13956 auto MatchRangeCheckIdiom = [this, Predicate, LHS, RHS, &RewriteMap]() {
13957 auto *AddExpr = dyn_cast<SCEVAddExpr>(LHS);
13958 if (!AddExpr || AddExpr->getNumOperands() != 2)
13959 return false;
13961 auto *C1 = dyn_cast<SCEVConstant>(AddExpr->getOperand(0));
13962 auto *LHSUnknown = dyn_cast<SCEVUnknown>(AddExpr->getOperand(1));
13963 auto *C2 = dyn_cast<SCEVConstant>(RHS);
13964 if (!C1 || !C2 || !LHSUnknown)
13965 return false;
13967 auto ExactRegion =
13968 ConstantRange::makeExactICmpRegion(Predicate, C2->getAPInt())
13969 .sub(C1->getAPInt());
13971 // Bail out, unless we have a non-wrapping, monotonic range.
13972 if (ExactRegion.isWrappedSet() || ExactRegion.isFullSet())
13973 return false;
13974 auto I = RewriteMap.find(LHSUnknown->getValue());
13975 const SCEV *RewrittenLHS = I != RewriteMap.end() ? I->second : LHS;
13976 RewriteMap[LHSUnknown->getValue()] = getUMaxExpr(
13977 getConstant(ExactRegion.getUnsignedMin()),
13978 getUMinExpr(RewrittenLHS, getConstant(ExactRegion.getUnsignedMax())));
13979 return true;
13981 if (MatchRangeCheckIdiom())
13982 return;
13984 // For now, limit to conditions that provide information about unknown
13985 // expressions. RHS also cannot contain add recurrences.
13986 auto *LHSUnknown = dyn_cast<SCEVUnknown>(LHS);
13987 if (!LHSUnknown || containsAddRecurrence(RHS))
13988 return;
13990 // Check whether LHS has already been rewritten. In that case we want to
13991 // chain further rewrites onto the already rewritten value.
13992 auto I = RewriteMap.find(LHSUnknown->getValue());
13993 const SCEV *RewrittenLHS = I != RewriteMap.end() ? I->second : LHS;
13994 const SCEV *RewrittenRHS = nullptr;
13995 switch (Predicate) {
13996 case CmpInst::ICMP_ULT:
13997 RewrittenRHS =
13998 getUMinExpr(RewrittenLHS, getMinusSCEV(RHS, getOne(RHS->getType())));
13999 break;
14000 case CmpInst::ICMP_SLT:
14001 RewrittenRHS =
14002 getSMinExpr(RewrittenLHS, getMinusSCEV(RHS, getOne(RHS->getType())));
14003 break;
14004 case CmpInst::ICMP_ULE:
14005 RewrittenRHS = getUMinExpr(RewrittenLHS, RHS);
14006 break;
14007 case CmpInst::ICMP_SLE:
14008 RewrittenRHS = getSMinExpr(RewrittenLHS, RHS);
14009 break;
14010 case CmpInst::ICMP_UGT:
14011 RewrittenRHS =
14012 getUMaxExpr(RewrittenLHS, getAddExpr(RHS, getOne(RHS->getType())));
14013 break;
14014 case CmpInst::ICMP_SGT:
14015 RewrittenRHS =
14016 getSMaxExpr(RewrittenLHS, getAddExpr(RHS, getOne(RHS->getType())));
14017 break;
14018 case CmpInst::ICMP_UGE:
14019 RewrittenRHS = getUMaxExpr(RewrittenLHS, RHS);
14020 break;
14021 case CmpInst::ICMP_SGE:
14022 RewrittenRHS = getSMaxExpr(RewrittenLHS, RHS);
14023 break;
14024 case CmpInst::ICMP_EQ:
14025 if (isa<SCEVConstant>(RHS))
14026 RewrittenRHS = RHS;
14027 break;
14028 case CmpInst::ICMP_NE:
14029 if (isa<SCEVConstant>(RHS) &&
14030 cast<SCEVConstant>(RHS)->getValue()->isNullValue())
14031 RewrittenRHS = getUMaxExpr(RewrittenLHS, getOne(RHS->getType()));
14032 break;
14033 default:
14034 break;
14037 if (RewrittenRHS)
14038 RewriteMap[LHSUnknown->getValue()] = RewrittenRHS;
14040 // Starting at the loop predecessor, climb up the predecessor chain, as long
14041 // as there are predecessors that can be found that have unique successors
14042 // leading to the original header.
14043 // TODO: share this logic with isLoopEntryGuardedByCond.
14044 ValueToSCEVMapTy RewriteMap;
14045 for (std::pair<const BasicBlock *, const BasicBlock *> Pair(
14046 L->getLoopPredecessor(), L->getHeader());
14047 Pair.first; Pair = getPredecessorWithUniqueSuccessorForBB(Pair.first)) {
14049 const BranchInst *LoopEntryPredicate =
14050 dyn_cast<BranchInst>(Pair.first->getTerminator());
14051 if (!LoopEntryPredicate || LoopEntryPredicate->isUnconditional())
14052 continue;
14054 bool EnterIfTrue = LoopEntryPredicate->getSuccessor(0) == Pair.second;
14055 SmallVector<Value *, 8> Worklist;
14056 SmallPtrSet<Value *, 8> Visited;
14057 Worklist.push_back(LoopEntryPredicate->getCondition());
14058 while (!Worklist.empty()) {
14059 Value *Cond = Worklist.pop_back_val();
14060 if (!Visited.insert(Cond).second)
14061 continue;
14063 if (auto *Cmp = dyn_cast<ICmpInst>(Cond)) {
14064 auto Predicate =
14065 EnterIfTrue ? Cmp->getPredicate() : Cmp->getInversePredicate();
14066 CollectCondition(Predicate, getSCEV(Cmp->getOperand(0)),
14067 getSCEV(Cmp->getOperand(1)), RewriteMap);
14068 continue;
14071 Value *L, *R;
14072 if (EnterIfTrue ? match(Cond, m_LogicalAnd(m_Value(L), m_Value(R)))
14073 : match(Cond, m_LogicalOr(m_Value(L), m_Value(R)))) {
14074 Worklist.push_back(L);
14075 Worklist.push_back(R);
14080 // Also collect information from assumptions dominating the loop.
14081 for (auto &AssumeVH : AC.assumptions()) {
14082 if (!AssumeVH)
14083 continue;
14084 auto *AssumeI = cast<CallInst>(AssumeVH);
14085 auto *Cmp = dyn_cast<ICmpInst>(AssumeI->getOperand(0));
14086 if (!Cmp || !DT.dominates(AssumeI, L->getHeader()))
14087 continue;
14088 CollectCondition(Cmp->getPredicate(), getSCEV(Cmp->getOperand(0)),
14089 getSCEV(Cmp->getOperand(1)), RewriteMap);
14092 if (RewriteMap.empty())
14093 return Expr;
14094 SCEVLoopGuardRewriter Rewriter(*this, RewriteMap);
14095 return Rewriter.visit(Expr);