[ARM] Generate 8.1-m CSINC, CSNEG and CSINV instructions.
[llvm-core.git] / lib / Analysis / LazyValueInfo.cpp
blobbfe9aeda2966b89081a4b0bca18bdf8327b18bfc
1 //===- LazyValueInfo.cpp - Value constraint analysis ------------*- C++ -*-===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file defines the interface for lazy computation of value constraint
10 // information.
12 //===----------------------------------------------------------------------===//
14 #include "llvm/Analysis/LazyValueInfo.h"
15 #include "llvm/ADT/DenseSet.h"
16 #include "llvm/ADT/Optional.h"
17 #include "llvm/ADT/STLExtras.h"
18 #include "llvm/Analysis/AssumptionCache.h"
19 #include "llvm/Analysis/ConstantFolding.h"
20 #include "llvm/Analysis/InstructionSimplify.h"
21 #include "llvm/Analysis/TargetLibraryInfo.h"
22 #include "llvm/Analysis/ValueTracking.h"
23 #include "llvm/Analysis/ValueLattice.h"
24 #include "llvm/IR/AssemblyAnnotationWriter.h"
25 #include "llvm/IR/CFG.h"
26 #include "llvm/IR/ConstantRange.h"
27 #include "llvm/IR/Constants.h"
28 #include "llvm/IR/DataLayout.h"
29 #include "llvm/IR/Dominators.h"
30 #include "llvm/IR/Instructions.h"
31 #include "llvm/IR/IntrinsicInst.h"
32 #include "llvm/IR/Intrinsics.h"
33 #include "llvm/IR/LLVMContext.h"
34 #include "llvm/IR/PatternMatch.h"
35 #include "llvm/IR/ValueHandle.h"
36 #include "llvm/Support/Debug.h"
37 #include "llvm/Support/FormattedStream.h"
38 #include "llvm/Support/raw_ostream.h"
39 #include <map>
40 using namespace llvm;
41 using namespace PatternMatch;
43 #define DEBUG_TYPE "lazy-value-info"
45 // This is the number of worklist items we will process to try to discover an
46 // answer for a given value.
47 static const unsigned MaxProcessedPerValue = 500;
49 char LazyValueInfoWrapperPass::ID = 0;
50 INITIALIZE_PASS_BEGIN(LazyValueInfoWrapperPass, "lazy-value-info",
51 "Lazy Value Information Analysis", false, true)
52 INITIALIZE_PASS_DEPENDENCY(AssumptionCacheTracker)
53 INITIALIZE_PASS_DEPENDENCY(TargetLibraryInfoWrapperPass)
54 INITIALIZE_PASS_END(LazyValueInfoWrapperPass, "lazy-value-info",
55 "Lazy Value Information Analysis", false, true)
57 namespace llvm {
58 FunctionPass *createLazyValueInfoPass() { return new LazyValueInfoWrapperPass(); }
61 AnalysisKey LazyValueAnalysis::Key;
63 /// Returns true if this lattice value represents at most one possible value.
64 /// This is as precise as any lattice value can get while still representing
65 /// reachable code.
66 static bool hasSingleValue(const ValueLatticeElement &Val) {
67 if (Val.isConstantRange() &&
68 Val.getConstantRange().isSingleElement())
69 // Integer constants are single element ranges
70 return true;
71 if (Val.isConstant())
72 // Non integer constants
73 return true;
74 return false;
77 /// Combine two sets of facts about the same value into a single set of
78 /// facts. Note that this method is not suitable for merging facts along
79 /// different paths in a CFG; that's what the mergeIn function is for. This
80 /// is for merging facts gathered about the same value at the same location
81 /// through two independent means.
82 /// Notes:
83 /// * This method does not promise to return the most precise possible lattice
84 /// value implied by A and B. It is allowed to return any lattice element
85 /// which is at least as strong as *either* A or B (unless our facts
86 /// conflict, see below).
87 /// * Due to unreachable code, the intersection of two lattice values could be
88 /// contradictory. If this happens, we return some valid lattice value so as
89 /// not confuse the rest of LVI. Ideally, we'd always return Undefined, but
90 /// we do not make this guarantee. TODO: This would be a useful enhancement.
91 static ValueLatticeElement intersect(const ValueLatticeElement &A,
92 const ValueLatticeElement &B) {
93 // Undefined is the strongest state. It means the value is known to be along
94 // an unreachable path.
95 if (A.isUndefined())
96 return A;
97 if (B.isUndefined())
98 return B;
100 // If we gave up for one, but got a useable fact from the other, use it.
101 if (A.isOverdefined())
102 return B;
103 if (B.isOverdefined())
104 return A;
106 // Can't get any more precise than constants.
107 if (hasSingleValue(A))
108 return A;
109 if (hasSingleValue(B))
110 return B;
112 // Could be either constant range or not constant here.
113 if (!A.isConstantRange() || !B.isConstantRange()) {
114 // TODO: Arbitrary choice, could be improved
115 return A;
118 // Intersect two constant ranges
119 ConstantRange Range =
120 A.getConstantRange().intersectWith(B.getConstantRange());
121 // Note: An empty range is implicitly converted to overdefined internally.
122 // TODO: We could instead use Undefined here since we've proven a conflict
123 // and thus know this path must be unreachable.
124 return ValueLatticeElement::getRange(std::move(Range));
127 //===----------------------------------------------------------------------===//
128 // LazyValueInfoCache Decl
129 //===----------------------------------------------------------------------===//
131 namespace {
132 /// A callback value handle updates the cache when values are erased.
133 class LazyValueInfoCache;
134 struct LVIValueHandle final : public CallbackVH {
135 // Needs to access getValPtr(), which is protected.
136 friend struct DenseMapInfo<LVIValueHandle>;
138 LazyValueInfoCache *Parent;
140 LVIValueHandle(Value *V, LazyValueInfoCache *P)
141 : CallbackVH(V), Parent(P) { }
143 void deleted() override;
144 void allUsesReplacedWith(Value *V) override {
145 deleted();
148 } // end anonymous namespace
150 namespace {
151 /// This is the cache kept by LazyValueInfo which
152 /// maintains information about queries across the clients' queries.
153 class LazyValueInfoCache {
154 /// This is all of the cached block information for exactly one Value*.
155 /// The entries are sorted by the BasicBlock* of the
156 /// entries, allowing us to do a lookup with a binary search.
157 /// Over-defined lattice values are recorded in OverDefinedCache to reduce
158 /// memory overhead.
159 struct ValueCacheEntryTy {
160 ValueCacheEntryTy(Value *V, LazyValueInfoCache *P) : Handle(V, P) {}
161 LVIValueHandle Handle;
162 SmallDenseMap<PoisoningVH<BasicBlock>, ValueLatticeElement, 4> BlockVals;
165 /// This tracks, on a per-block basis, the set of values that are
166 /// over-defined at the end of that block.
167 typedef DenseMap<PoisoningVH<BasicBlock>, SmallPtrSet<Value *, 4>>
168 OverDefinedCacheTy;
169 /// Keep track of all blocks that we have ever seen, so we
170 /// don't spend time removing unused blocks from our caches.
171 DenseSet<PoisoningVH<BasicBlock> > SeenBlocks;
173 /// This is all of the cached information for all values,
174 /// mapped from Value* to key information.
175 DenseMap<Value *, std::unique_ptr<ValueCacheEntryTy>> ValueCache;
176 OverDefinedCacheTy OverDefinedCache;
179 public:
180 void insertResult(Value *Val, BasicBlock *BB,
181 const ValueLatticeElement &Result) {
182 SeenBlocks.insert(BB);
184 // Insert over-defined values into their own cache to reduce memory
185 // overhead.
186 if (Result.isOverdefined())
187 OverDefinedCache[BB].insert(Val);
188 else {
189 auto It = ValueCache.find_as(Val);
190 if (It == ValueCache.end()) {
191 ValueCache[Val] = std::make_unique<ValueCacheEntryTy>(Val, this);
192 It = ValueCache.find_as(Val);
193 assert(It != ValueCache.end() && "Val was just added to the map!");
195 It->second->BlockVals[BB] = Result;
199 bool isOverdefined(Value *V, BasicBlock *BB) const {
200 auto ODI = OverDefinedCache.find(BB);
202 if (ODI == OverDefinedCache.end())
203 return false;
205 return ODI->second.count(V);
208 bool hasCachedValueInfo(Value *V, BasicBlock *BB) const {
209 if (isOverdefined(V, BB))
210 return true;
212 auto I = ValueCache.find_as(V);
213 if (I == ValueCache.end())
214 return false;
216 return I->second->BlockVals.count(BB);
219 ValueLatticeElement getCachedValueInfo(Value *V, BasicBlock *BB) const {
220 if (isOverdefined(V, BB))
221 return ValueLatticeElement::getOverdefined();
223 auto I = ValueCache.find_as(V);
224 if (I == ValueCache.end())
225 return ValueLatticeElement();
226 auto BBI = I->second->BlockVals.find(BB);
227 if (BBI == I->second->BlockVals.end())
228 return ValueLatticeElement();
229 return BBI->second;
232 /// clear - Empty the cache.
233 void clear() {
234 SeenBlocks.clear();
235 ValueCache.clear();
236 OverDefinedCache.clear();
239 /// Inform the cache that a given value has been deleted.
240 void eraseValue(Value *V);
242 /// This is part of the update interface to inform the cache
243 /// that a block has been deleted.
244 void eraseBlock(BasicBlock *BB);
246 /// Updates the cache to remove any influence an overdefined value in
247 /// OldSucc might have (unless also overdefined in NewSucc). This just
248 /// flushes elements from the cache and does not add any.
249 void threadEdgeImpl(BasicBlock *OldSucc,BasicBlock *NewSucc);
251 friend struct LVIValueHandle;
255 void LazyValueInfoCache::eraseValue(Value *V) {
256 for (auto I = OverDefinedCache.begin(), E = OverDefinedCache.end(); I != E;) {
257 // Copy and increment the iterator immediately so we can erase behind
258 // ourselves.
259 auto Iter = I++;
260 SmallPtrSetImpl<Value *> &ValueSet = Iter->second;
261 ValueSet.erase(V);
262 if (ValueSet.empty())
263 OverDefinedCache.erase(Iter);
266 ValueCache.erase(V);
269 void LVIValueHandle::deleted() {
270 // This erasure deallocates *this, so it MUST happen after we're done
271 // using any and all members of *this.
272 Parent->eraseValue(*this);
275 void LazyValueInfoCache::eraseBlock(BasicBlock *BB) {
276 // Shortcut if we have never seen this block.
277 DenseSet<PoisoningVH<BasicBlock> >::iterator I = SeenBlocks.find(BB);
278 if (I == SeenBlocks.end())
279 return;
280 SeenBlocks.erase(I);
282 auto ODI = OverDefinedCache.find(BB);
283 if (ODI != OverDefinedCache.end())
284 OverDefinedCache.erase(ODI);
286 for (auto &I : ValueCache)
287 I.second->BlockVals.erase(BB);
290 void LazyValueInfoCache::threadEdgeImpl(BasicBlock *OldSucc,
291 BasicBlock *NewSucc) {
292 // When an edge in the graph has been threaded, values that we could not
293 // determine a value for before (i.e. were marked overdefined) may be
294 // possible to solve now. We do NOT try to proactively update these values.
295 // Instead, we clear their entries from the cache, and allow lazy updating to
296 // recompute them when needed.
298 // The updating process is fairly simple: we need to drop cached info
299 // for all values that were marked overdefined in OldSucc, and for those same
300 // values in any successor of OldSucc (except NewSucc) in which they were
301 // also marked overdefined.
302 std::vector<BasicBlock*> worklist;
303 worklist.push_back(OldSucc);
305 auto I = OverDefinedCache.find(OldSucc);
306 if (I == OverDefinedCache.end())
307 return; // Nothing to process here.
308 SmallVector<Value *, 4> ValsToClear(I->second.begin(), I->second.end());
310 // Use a worklist to perform a depth-first search of OldSucc's successors.
311 // NOTE: We do not need a visited list since any blocks we have already
312 // visited will have had their overdefined markers cleared already, and we
313 // thus won't loop to their successors.
314 while (!worklist.empty()) {
315 BasicBlock *ToUpdate = worklist.back();
316 worklist.pop_back();
318 // Skip blocks only accessible through NewSucc.
319 if (ToUpdate == NewSucc) continue;
321 // If a value was marked overdefined in OldSucc, and is here too...
322 auto OI = OverDefinedCache.find(ToUpdate);
323 if (OI == OverDefinedCache.end())
324 continue;
325 SmallPtrSetImpl<Value *> &ValueSet = OI->second;
327 bool changed = false;
328 for (Value *V : ValsToClear) {
329 if (!ValueSet.erase(V))
330 continue;
332 // If we removed anything, then we potentially need to update
333 // blocks successors too.
334 changed = true;
336 if (ValueSet.empty()) {
337 OverDefinedCache.erase(OI);
338 break;
342 if (!changed) continue;
344 worklist.insert(worklist.end(), succ_begin(ToUpdate), succ_end(ToUpdate));
349 namespace {
350 /// An assembly annotator class to print LazyValueCache information in
351 /// comments.
352 class LazyValueInfoImpl;
353 class LazyValueInfoAnnotatedWriter : public AssemblyAnnotationWriter {
354 LazyValueInfoImpl *LVIImpl;
355 // While analyzing which blocks we can solve values for, we need the dominator
356 // information. Since this is an optional parameter in LVI, we require this
357 // DomTreeAnalysis pass in the printer pass, and pass the dominator
358 // tree to the LazyValueInfoAnnotatedWriter.
359 DominatorTree &DT;
361 public:
362 LazyValueInfoAnnotatedWriter(LazyValueInfoImpl *L, DominatorTree &DTree)
363 : LVIImpl(L), DT(DTree) {}
365 virtual void emitBasicBlockStartAnnot(const BasicBlock *BB,
366 formatted_raw_ostream &OS);
368 virtual void emitInstructionAnnot(const Instruction *I,
369 formatted_raw_ostream &OS);
372 namespace {
373 // The actual implementation of the lazy analysis and update. Note that the
374 // inheritance from LazyValueInfoCache is intended to be temporary while
375 // splitting the code and then transitioning to a has-a relationship.
376 class LazyValueInfoImpl {
378 /// Cached results from previous queries
379 LazyValueInfoCache TheCache;
381 /// This stack holds the state of the value solver during a query.
382 /// It basically emulates the callstack of the naive
383 /// recursive value lookup process.
384 SmallVector<std::pair<BasicBlock*, Value*>, 8> BlockValueStack;
386 /// Keeps track of which block-value pairs are in BlockValueStack.
387 DenseSet<std::pair<BasicBlock*, Value*> > BlockValueSet;
389 /// Push BV onto BlockValueStack unless it's already in there.
390 /// Returns true on success.
391 bool pushBlockValue(const std::pair<BasicBlock *, Value *> &BV) {
392 if (!BlockValueSet.insert(BV).second)
393 return false; // It's already in the stack.
395 LLVM_DEBUG(dbgs() << "PUSH: " << *BV.second << " in "
396 << BV.first->getName() << "\n");
397 BlockValueStack.push_back(BV);
398 return true;
401 AssumptionCache *AC; ///< A pointer to the cache of @llvm.assume calls.
402 const DataLayout &DL; ///< A mandatory DataLayout
403 DominatorTree *DT; ///< An optional DT pointer.
404 DominatorTree *DisabledDT; ///< Stores DT if it's disabled.
406 ValueLatticeElement getBlockValue(Value *Val, BasicBlock *BB);
407 bool getEdgeValue(Value *V, BasicBlock *F, BasicBlock *T,
408 ValueLatticeElement &Result, Instruction *CxtI = nullptr);
409 bool hasBlockValue(Value *Val, BasicBlock *BB);
411 // These methods process one work item and may add more. A false value
412 // returned means that the work item was not completely processed and must
413 // be revisited after going through the new items.
414 bool solveBlockValue(Value *Val, BasicBlock *BB);
415 bool solveBlockValueImpl(ValueLatticeElement &Res, Value *Val,
416 BasicBlock *BB);
417 bool solveBlockValueNonLocal(ValueLatticeElement &BBLV, Value *Val,
418 BasicBlock *BB);
419 bool solveBlockValuePHINode(ValueLatticeElement &BBLV, PHINode *PN,
420 BasicBlock *BB);
421 bool solveBlockValueSelect(ValueLatticeElement &BBLV, SelectInst *S,
422 BasicBlock *BB);
423 Optional<ConstantRange> getRangeForOperand(unsigned Op, Instruction *I,
424 BasicBlock *BB);
425 bool solveBlockValueBinaryOpImpl(
426 ValueLatticeElement &BBLV, Instruction *I, BasicBlock *BB,
427 std::function<ConstantRange(const ConstantRange &,
428 const ConstantRange &)> OpFn);
429 bool solveBlockValueBinaryOp(ValueLatticeElement &BBLV, BinaryOperator *BBI,
430 BasicBlock *BB);
431 bool solveBlockValueCast(ValueLatticeElement &BBLV, CastInst *CI,
432 BasicBlock *BB);
433 bool solveBlockValueOverflowIntrinsic(
434 ValueLatticeElement &BBLV, WithOverflowInst *WO, BasicBlock *BB);
435 bool solveBlockValueIntrinsic(ValueLatticeElement &BBLV, IntrinsicInst *II,
436 BasicBlock *BB);
437 bool solveBlockValueExtractValue(ValueLatticeElement &BBLV,
438 ExtractValueInst *EVI, BasicBlock *BB);
439 void intersectAssumeOrGuardBlockValueConstantRange(Value *Val,
440 ValueLatticeElement &BBLV,
441 Instruction *BBI);
443 void solve();
445 public:
446 /// This is the query interface to determine the lattice
447 /// value for the specified Value* at the end of the specified block.
448 ValueLatticeElement getValueInBlock(Value *V, BasicBlock *BB,
449 Instruction *CxtI = nullptr);
451 /// This is the query interface to determine the lattice
452 /// value for the specified Value* at the specified instruction (generally
453 /// from an assume intrinsic).
454 ValueLatticeElement getValueAt(Value *V, Instruction *CxtI);
456 /// This is the query interface to determine the lattice
457 /// value for the specified Value* that is true on the specified edge.
458 ValueLatticeElement getValueOnEdge(Value *V, BasicBlock *FromBB,
459 BasicBlock *ToBB,
460 Instruction *CxtI = nullptr);
462 /// Complete flush all previously computed values
463 void clear() {
464 TheCache.clear();
467 /// Printing the LazyValueInfo Analysis.
468 void printLVI(Function &F, DominatorTree &DTree, raw_ostream &OS) {
469 LazyValueInfoAnnotatedWriter Writer(this, DTree);
470 F.print(OS, &Writer);
473 /// This is part of the update interface to inform the cache
474 /// that a block has been deleted.
475 void eraseBlock(BasicBlock *BB) {
476 TheCache.eraseBlock(BB);
479 /// Disables use of the DominatorTree within LVI.
480 void disableDT() {
481 if (DT) {
482 assert(!DisabledDT && "Both DT and DisabledDT are not nullptr!");
483 std::swap(DT, DisabledDT);
487 /// Enables use of the DominatorTree within LVI. Does nothing if the class
488 /// instance was initialized without a DT pointer.
489 void enableDT() {
490 if (DisabledDT) {
491 assert(!DT && "Both DT and DisabledDT are not nullptr!");
492 std::swap(DT, DisabledDT);
496 /// This is the update interface to inform the cache that an edge from
497 /// PredBB to OldSucc has been threaded to be from PredBB to NewSucc.
498 void threadEdge(BasicBlock *PredBB,BasicBlock *OldSucc,BasicBlock *NewSucc);
500 LazyValueInfoImpl(AssumptionCache *AC, const DataLayout &DL,
501 DominatorTree *DT = nullptr)
502 : AC(AC), DL(DL), DT(DT), DisabledDT(nullptr) {}
504 } // end anonymous namespace
507 void LazyValueInfoImpl::solve() {
508 SmallVector<std::pair<BasicBlock *, Value *>, 8> StartingStack(
509 BlockValueStack.begin(), BlockValueStack.end());
511 unsigned processedCount = 0;
512 while (!BlockValueStack.empty()) {
513 processedCount++;
514 // Abort if we have to process too many values to get a result for this one.
515 // Because of the design of the overdefined cache currently being per-block
516 // to avoid naming-related issues (IE it wants to try to give different
517 // results for the same name in different blocks), overdefined results don't
518 // get cached globally, which in turn means we will often try to rediscover
519 // the same overdefined result again and again. Once something like
520 // PredicateInfo is used in LVI or CVP, we should be able to make the
521 // overdefined cache global, and remove this throttle.
522 if (processedCount > MaxProcessedPerValue) {
523 LLVM_DEBUG(
524 dbgs() << "Giving up on stack because we are getting too deep\n");
525 // Fill in the original values
526 while (!StartingStack.empty()) {
527 std::pair<BasicBlock *, Value *> &e = StartingStack.back();
528 TheCache.insertResult(e.second, e.first,
529 ValueLatticeElement::getOverdefined());
530 StartingStack.pop_back();
532 BlockValueSet.clear();
533 BlockValueStack.clear();
534 return;
536 std::pair<BasicBlock *, Value *> e = BlockValueStack.back();
537 assert(BlockValueSet.count(e) && "Stack value should be in BlockValueSet!");
539 if (solveBlockValue(e.second, e.first)) {
540 // The work item was completely processed.
541 assert(BlockValueStack.back() == e && "Nothing should have been pushed!");
542 assert(TheCache.hasCachedValueInfo(e.second, e.first) &&
543 "Result should be in cache!");
545 LLVM_DEBUG(
546 dbgs() << "POP " << *e.second << " in " << e.first->getName() << " = "
547 << TheCache.getCachedValueInfo(e.second, e.first) << "\n");
549 BlockValueStack.pop_back();
550 BlockValueSet.erase(e);
551 } else {
552 // More work needs to be done before revisiting.
553 assert(BlockValueStack.back() != e && "Stack should have been pushed!");
558 bool LazyValueInfoImpl::hasBlockValue(Value *Val, BasicBlock *BB) {
559 // If already a constant, there is nothing to compute.
560 if (isa<Constant>(Val))
561 return true;
563 return TheCache.hasCachedValueInfo(Val, BB);
566 ValueLatticeElement LazyValueInfoImpl::getBlockValue(Value *Val,
567 BasicBlock *BB) {
568 // If already a constant, there is nothing to compute.
569 if (Constant *VC = dyn_cast<Constant>(Val))
570 return ValueLatticeElement::get(VC);
572 return TheCache.getCachedValueInfo(Val, BB);
575 static ValueLatticeElement getFromRangeMetadata(Instruction *BBI) {
576 switch (BBI->getOpcode()) {
577 default: break;
578 case Instruction::Load:
579 case Instruction::Call:
580 case Instruction::Invoke:
581 if (MDNode *Ranges = BBI->getMetadata(LLVMContext::MD_range))
582 if (isa<IntegerType>(BBI->getType())) {
583 return ValueLatticeElement::getRange(
584 getConstantRangeFromMetadata(*Ranges));
586 break;
588 // Nothing known - will be intersected with other facts
589 return ValueLatticeElement::getOverdefined();
592 bool LazyValueInfoImpl::solveBlockValue(Value *Val, BasicBlock *BB) {
593 if (isa<Constant>(Val))
594 return true;
596 if (TheCache.hasCachedValueInfo(Val, BB)) {
597 // If we have a cached value, use that.
598 LLVM_DEBUG(dbgs() << " reuse BB '" << BB->getName() << "' val="
599 << TheCache.getCachedValueInfo(Val, BB) << '\n');
601 // Since we're reusing a cached value, we don't need to update the
602 // OverDefinedCache. The cache will have been properly updated whenever the
603 // cached value was inserted.
604 return true;
607 // Hold off inserting this value into the Cache in case we have to return
608 // false and come back later.
609 ValueLatticeElement Res;
610 if (!solveBlockValueImpl(Res, Val, BB))
611 // Work pushed, will revisit
612 return false;
614 TheCache.insertResult(Val, BB, Res);
615 return true;
618 bool LazyValueInfoImpl::solveBlockValueImpl(ValueLatticeElement &Res,
619 Value *Val, BasicBlock *BB) {
621 Instruction *BBI = dyn_cast<Instruction>(Val);
622 if (!BBI || BBI->getParent() != BB)
623 return solveBlockValueNonLocal(Res, Val, BB);
625 if (PHINode *PN = dyn_cast<PHINode>(BBI))
626 return solveBlockValuePHINode(Res, PN, BB);
628 if (auto *SI = dyn_cast<SelectInst>(BBI))
629 return solveBlockValueSelect(Res, SI, BB);
631 // If this value is a nonnull pointer, record it's range and bailout. Note
632 // that for all other pointer typed values, we terminate the search at the
633 // definition. We could easily extend this to look through geps, bitcasts,
634 // and the like to prove non-nullness, but it's not clear that's worth it
635 // compile time wise. The context-insensitive value walk done inside
636 // isKnownNonZero gets most of the profitable cases at much less expense.
637 // This does mean that we have a sensitivity to where the defining
638 // instruction is placed, even if it could legally be hoisted much higher.
639 // That is unfortunate.
640 PointerType *PT = dyn_cast<PointerType>(BBI->getType());
641 if (PT && isKnownNonZero(BBI, DL)) {
642 Res = ValueLatticeElement::getNot(ConstantPointerNull::get(PT));
643 return true;
645 if (BBI->getType()->isIntegerTy()) {
646 if (auto *CI = dyn_cast<CastInst>(BBI))
647 return solveBlockValueCast(Res, CI, BB);
649 if (BinaryOperator *BO = dyn_cast<BinaryOperator>(BBI))
650 return solveBlockValueBinaryOp(Res, BO, BB);
652 if (auto *EVI = dyn_cast<ExtractValueInst>(BBI))
653 return solveBlockValueExtractValue(Res, EVI, BB);
655 if (auto *II = dyn_cast<IntrinsicInst>(BBI))
656 return solveBlockValueIntrinsic(Res, II, BB);
659 LLVM_DEBUG(dbgs() << " compute BB '" << BB->getName()
660 << "' - unknown inst def found.\n");
661 Res = getFromRangeMetadata(BBI);
662 return true;
665 static bool InstructionDereferencesPointer(Instruction *I, Value *Ptr) {
666 if (LoadInst *L = dyn_cast<LoadInst>(I)) {
667 return L->getPointerAddressSpace() == 0 &&
668 GetUnderlyingObject(L->getPointerOperand(),
669 L->getModule()->getDataLayout()) == Ptr;
671 if (StoreInst *S = dyn_cast<StoreInst>(I)) {
672 return S->getPointerAddressSpace() == 0 &&
673 GetUnderlyingObject(S->getPointerOperand(),
674 S->getModule()->getDataLayout()) == Ptr;
676 if (MemIntrinsic *MI = dyn_cast<MemIntrinsic>(I)) {
677 if (MI->isVolatile()) return false;
679 // FIXME: check whether it has a valuerange that excludes zero?
680 ConstantInt *Len = dyn_cast<ConstantInt>(MI->getLength());
681 if (!Len || Len->isZero()) return false;
683 if (MI->getDestAddressSpace() == 0)
684 if (GetUnderlyingObject(MI->getRawDest(),
685 MI->getModule()->getDataLayout()) == Ptr)
686 return true;
687 if (MemTransferInst *MTI = dyn_cast<MemTransferInst>(MI))
688 if (MTI->getSourceAddressSpace() == 0)
689 if (GetUnderlyingObject(MTI->getRawSource(),
690 MTI->getModule()->getDataLayout()) == Ptr)
691 return true;
693 return false;
696 /// Return true if the allocation associated with Val is ever dereferenced
697 /// within the given basic block. This establishes the fact Val is not null,
698 /// but does not imply that the memory at Val is dereferenceable. (Val may
699 /// point off the end of the dereferenceable part of the object.)
700 static bool isObjectDereferencedInBlock(Value *Val, BasicBlock *BB) {
701 assert(Val->getType()->isPointerTy());
703 const DataLayout &DL = BB->getModule()->getDataLayout();
704 Value *UnderlyingVal = GetUnderlyingObject(Val, DL);
705 // If 'GetUnderlyingObject' didn't converge, skip it. It won't converge
706 // inside InstructionDereferencesPointer either.
707 if (UnderlyingVal == GetUnderlyingObject(UnderlyingVal, DL, 1))
708 for (Instruction &I : *BB)
709 if (InstructionDereferencesPointer(&I, UnderlyingVal))
710 return true;
711 return false;
714 bool LazyValueInfoImpl::solveBlockValueNonLocal(ValueLatticeElement &BBLV,
715 Value *Val, BasicBlock *BB) {
716 ValueLatticeElement Result; // Start Undefined.
718 // If this is the entry block, we must be asking about an argument. The
719 // value is overdefined.
720 if (BB == &BB->getParent()->getEntryBlock()) {
721 assert(isa<Argument>(Val) && "Unknown live-in to the entry block");
722 // Before giving up, see if we can prove the pointer non-null local to
723 // this particular block.
724 PointerType *PTy = dyn_cast<PointerType>(Val->getType());
725 if (PTy &&
726 (isKnownNonZero(Val, DL) ||
727 (isObjectDereferencedInBlock(Val, BB) &&
728 !NullPointerIsDefined(BB->getParent(), PTy->getAddressSpace())))) {
729 Result = ValueLatticeElement::getNot(ConstantPointerNull::get(PTy));
730 } else {
731 Result = ValueLatticeElement::getOverdefined();
733 BBLV = Result;
734 return true;
737 // Loop over all of our predecessors, merging what we know from them into
738 // result. If we encounter an unexplored predecessor, we eagerly explore it
739 // in a depth first manner. In practice, this has the effect of discovering
740 // paths we can't analyze eagerly without spending compile times analyzing
741 // other paths. This heuristic benefits from the fact that predecessors are
742 // frequently arranged such that dominating ones come first and we quickly
743 // find a path to function entry. TODO: We should consider explicitly
744 // canonicalizing to make this true rather than relying on this happy
745 // accident.
746 for (pred_iterator PI = pred_begin(BB), E = pred_end(BB); PI != E; ++PI) {
747 ValueLatticeElement EdgeResult;
748 if (!getEdgeValue(Val, *PI, BB, EdgeResult))
749 // Explore that input, then return here
750 return false;
752 Result.mergeIn(EdgeResult, DL);
754 // If we hit overdefined, exit early. The BlockVals entry is already set
755 // to overdefined.
756 if (Result.isOverdefined()) {
757 LLVM_DEBUG(dbgs() << " compute BB '" << BB->getName()
758 << "' - overdefined because of pred (non local).\n");
759 // Before giving up, see if we can prove the pointer non-null local to
760 // this particular block.
761 PointerType *PTy = dyn_cast<PointerType>(Val->getType());
762 if (PTy && isObjectDereferencedInBlock(Val, BB) &&
763 !NullPointerIsDefined(BB->getParent(), PTy->getAddressSpace())) {
764 Result = ValueLatticeElement::getNot(ConstantPointerNull::get(PTy));
767 BBLV = Result;
768 return true;
772 // Return the merged value, which is more precise than 'overdefined'.
773 assert(!Result.isOverdefined());
774 BBLV = Result;
775 return true;
778 bool LazyValueInfoImpl::solveBlockValuePHINode(ValueLatticeElement &BBLV,
779 PHINode *PN, BasicBlock *BB) {
780 ValueLatticeElement Result; // Start Undefined.
782 // Loop over all of our predecessors, merging what we know from them into
783 // result. See the comment about the chosen traversal order in
784 // solveBlockValueNonLocal; the same reasoning applies here.
785 for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) {
786 BasicBlock *PhiBB = PN->getIncomingBlock(i);
787 Value *PhiVal = PN->getIncomingValue(i);
788 ValueLatticeElement EdgeResult;
789 // Note that we can provide PN as the context value to getEdgeValue, even
790 // though the results will be cached, because PN is the value being used as
791 // the cache key in the caller.
792 if (!getEdgeValue(PhiVal, PhiBB, BB, EdgeResult, PN))
793 // Explore that input, then return here
794 return false;
796 Result.mergeIn(EdgeResult, DL);
798 // If we hit overdefined, exit early. The BlockVals entry is already set
799 // to overdefined.
800 if (Result.isOverdefined()) {
801 LLVM_DEBUG(dbgs() << " compute BB '" << BB->getName()
802 << "' - overdefined because of pred (local).\n");
804 BBLV = Result;
805 return true;
809 // Return the merged value, which is more precise than 'overdefined'.
810 assert(!Result.isOverdefined() && "Possible PHI in entry block?");
811 BBLV = Result;
812 return true;
815 static ValueLatticeElement getValueFromCondition(Value *Val, Value *Cond,
816 bool isTrueDest = true);
818 // If we can determine a constraint on the value given conditions assumed by
819 // the program, intersect those constraints with BBLV
820 void LazyValueInfoImpl::intersectAssumeOrGuardBlockValueConstantRange(
821 Value *Val, ValueLatticeElement &BBLV, Instruction *BBI) {
822 BBI = BBI ? BBI : dyn_cast<Instruction>(Val);
823 if (!BBI)
824 return;
826 for (auto &AssumeVH : AC->assumptionsFor(Val)) {
827 if (!AssumeVH)
828 continue;
829 auto *I = cast<CallInst>(AssumeVH);
830 if (!isValidAssumeForContext(I, BBI, DT))
831 continue;
833 BBLV = intersect(BBLV, getValueFromCondition(Val, I->getArgOperand(0)));
836 // If guards are not used in the module, don't spend time looking for them
837 auto *GuardDecl = BBI->getModule()->getFunction(
838 Intrinsic::getName(Intrinsic::experimental_guard));
839 if (!GuardDecl || GuardDecl->use_empty())
840 return;
842 if (BBI->getIterator() == BBI->getParent()->begin())
843 return;
844 for (Instruction &I : make_range(std::next(BBI->getIterator().getReverse()),
845 BBI->getParent()->rend())) {
846 Value *Cond = nullptr;
847 if (match(&I, m_Intrinsic<Intrinsic::experimental_guard>(m_Value(Cond))))
848 BBLV = intersect(BBLV, getValueFromCondition(Val, Cond));
852 bool LazyValueInfoImpl::solveBlockValueSelect(ValueLatticeElement &BBLV,
853 SelectInst *SI, BasicBlock *BB) {
855 // Recurse on our inputs if needed
856 if (!hasBlockValue(SI->getTrueValue(), BB)) {
857 if (pushBlockValue(std::make_pair(BB, SI->getTrueValue())))
858 return false;
859 BBLV = ValueLatticeElement::getOverdefined();
860 return true;
862 ValueLatticeElement TrueVal = getBlockValue(SI->getTrueValue(), BB);
863 // If we hit overdefined, don't ask more queries. We want to avoid poisoning
864 // extra slots in the table if we can.
865 if (TrueVal.isOverdefined()) {
866 BBLV = ValueLatticeElement::getOverdefined();
867 return true;
870 if (!hasBlockValue(SI->getFalseValue(), BB)) {
871 if (pushBlockValue(std::make_pair(BB, SI->getFalseValue())))
872 return false;
873 BBLV = ValueLatticeElement::getOverdefined();
874 return true;
876 ValueLatticeElement FalseVal = getBlockValue(SI->getFalseValue(), BB);
877 // If we hit overdefined, don't ask more queries. We want to avoid poisoning
878 // extra slots in the table if we can.
879 if (FalseVal.isOverdefined()) {
880 BBLV = ValueLatticeElement::getOverdefined();
881 return true;
884 if (TrueVal.isConstantRange() && FalseVal.isConstantRange()) {
885 const ConstantRange &TrueCR = TrueVal.getConstantRange();
886 const ConstantRange &FalseCR = FalseVal.getConstantRange();
887 Value *LHS = nullptr;
888 Value *RHS = nullptr;
889 SelectPatternResult SPR = matchSelectPattern(SI, LHS, RHS);
890 // Is this a min specifically of our two inputs? (Avoid the risk of
891 // ValueTracking getting smarter looking back past our immediate inputs.)
892 if (SelectPatternResult::isMinOrMax(SPR.Flavor) &&
893 LHS == SI->getTrueValue() && RHS == SI->getFalseValue()) {
894 ConstantRange ResultCR = [&]() {
895 switch (SPR.Flavor) {
896 default:
897 llvm_unreachable("unexpected minmax type!");
898 case SPF_SMIN: /// Signed minimum
899 return TrueCR.smin(FalseCR);
900 case SPF_UMIN: /// Unsigned minimum
901 return TrueCR.umin(FalseCR);
902 case SPF_SMAX: /// Signed maximum
903 return TrueCR.smax(FalseCR);
904 case SPF_UMAX: /// Unsigned maximum
905 return TrueCR.umax(FalseCR);
907 }();
908 BBLV = ValueLatticeElement::getRange(ResultCR);
909 return true;
912 if (SPR.Flavor == SPF_ABS) {
913 if (LHS == SI->getTrueValue()) {
914 BBLV = ValueLatticeElement::getRange(TrueCR.abs());
915 return true;
917 if (LHS == SI->getFalseValue()) {
918 BBLV = ValueLatticeElement::getRange(FalseCR.abs());
919 return true;
923 if (SPR.Flavor == SPF_NABS) {
924 ConstantRange Zero(APInt::getNullValue(TrueCR.getBitWidth()));
925 if (LHS == SI->getTrueValue()) {
926 BBLV = ValueLatticeElement::getRange(Zero.sub(TrueCR.abs()));
927 return true;
929 if (LHS == SI->getFalseValue()) {
930 BBLV = ValueLatticeElement::getRange(Zero.sub(FalseCR.abs()));
931 return true;
936 // Can we constrain the facts about the true and false values by using the
937 // condition itself? This shows up with idioms like e.g. select(a > 5, a, 5).
938 // TODO: We could potentially refine an overdefined true value above.
939 Value *Cond = SI->getCondition();
940 TrueVal = intersect(TrueVal,
941 getValueFromCondition(SI->getTrueValue(), Cond, true));
942 FalseVal = intersect(FalseVal,
943 getValueFromCondition(SI->getFalseValue(), Cond, false));
945 // Handle clamp idioms such as:
946 // %24 = constantrange<0, 17>
947 // %39 = icmp eq i32 %24, 0
948 // %40 = add i32 %24, -1
949 // %siv.next = select i1 %39, i32 16, i32 %40
950 // %siv.next = constantrange<0, 17> not <-1, 17>
951 // In general, this can handle any clamp idiom which tests the edge
952 // condition via an equality or inequality.
953 if (auto *ICI = dyn_cast<ICmpInst>(Cond)) {
954 ICmpInst::Predicate Pred = ICI->getPredicate();
955 Value *A = ICI->getOperand(0);
956 if (ConstantInt *CIBase = dyn_cast<ConstantInt>(ICI->getOperand(1))) {
957 auto addConstants = [](ConstantInt *A, ConstantInt *B) {
958 assert(A->getType() == B->getType());
959 return ConstantInt::get(A->getType(), A->getValue() + B->getValue());
961 // See if either input is A + C2, subject to the constraint from the
962 // condition that A != C when that input is used. We can assume that
963 // that input doesn't include C + C2.
964 ConstantInt *CIAdded;
965 switch (Pred) {
966 default: break;
967 case ICmpInst::ICMP_EQ:
968 if (match(SI->getFalseValue(), m_Add(m_Specific(A),
969 m_ConstantInt(CIAdded)))) {
970 auto ResNot = addConstants(CIBase, CIAdded);
971 FalseVal = intersect(FalseVal,
972 ValueLatticeElement::getNot(ResNot));
974 break;
975 case ICmpInst::ICMP_NE:
976 if (match(SI->getTrueValue(), m_Add(m_Specific(A),
977 m_ConstantInt(CIAdded)))) {
978 auto ResNot = addConstants(CIBase, CIAdded);
979 TrueVal = intersect(TrueVal,
980 ValueLatticeElement::getNot(ResNot));
982 break;
987 ValueLatticeElement Result; // Start Undefined.
988 Result.mergeIn(TrueVal, DL);
989 Result.mergeIn(FalseVal, DL);
990 BBLV = Result;
991 return true;
994 Optional<ConstantRange> LazyValueInfoImpl::getRangeForOperand(unsigned Op,
995 Instruction *I,
996 BasicBlock *BB) {
997 if (!hasBlockValue(I->getOperand(Op), BB))
998 if (pushBlockValue(std::make_pair(BB, I->getOperand(Op))))
999 return None;
1001 const unsigned OperandBitWidth =
1002 DL.getTypeSizeInBits(I->getOperand(Op)->getType());
1003 ConstantRange Range = ConstantRange::getFull(OperandBitWidth);
1004 if (hasBlockValue(I->getOperand(Op), BB)) {
1005 ValueLatticeElement Val = getBlockValue(I->getOperand(Op), BB);
1006 intersectAssumeOrGuardBlockValueConstantRange(I->getOperand(Op), Val, I);
1007 if (Val.isConstantRange())
1008 Range = Val.getConstantRange();
1010 return Range;
1013 bool LazyValueInfoImpl::solveBlockValueCast(ValueLatticeElement &BBLV,
1014 CastInst *CI,
1015 BasicBlock *BB) {
1016 if (!CI->getOperand(0)->getType()->isSized()) {
1017 // Without knowing how wide the input is, we can't analyze it in any useful
1018 // way.
1019 BBLV = ValueLatticeElement::getOverdefined();
1020 return true;
1023 // Filter out casts we don't know how to reason about before attempting to
1024 // recurse on our operand. This can cut a long search short if we know we're
1025 // not going to be able to get any useful information anways.
1026 switch (CI->getOpcode()) {
1027 case Instruction::Trunc:
1028 case Instruction::SExt:
1029 case Instruction::ZExt:
1030 case Instruction::BitCast:
1031 break;
1032 default:
1033 // Unhandled instructions are overdefined.
1034 LLVM_DEBUG(dbgs() << " compute BB '" << BB->getName()
1035 << "' - overdefined (unknown cast).\n");
1036 BBLV = ValueLatticeElement::getOverdefined();
1037 return true;
1040 // Figure out the range of the LHS. If that fails, we still apply the
1041 // transfer rule on the full set since we may be able to locally infer
1042 // interesting facts.
1043 Optional<ConstantRange> LHSRes = getRangeForOperand(0, CI, BB);
1044 if (!LHSRes.hasValue())
1045 // More work to do before applying this transfer rule.
1046 return false;
1047 ConstantRange LHSRange = LHSRes.getValue();
1049 const unsigned ResultBitWidth = CI->getType()->getIntegerBitWidth();
1051 // NOTE: We're currently limited by the set of operations that ConstantRange
1052 // can evaluate symbolically. Enhancing that set will allows us to analyze
1053 // more definitions.
1054 BBLV = ValueLatticeElement::getRange(LHSRange.castOp(CI->getOpcode(),
1055 ResultBitWidth));
1056 return true;
1059 bool LazyValueInfoImpl::solveBlockValueBinaryOpImpl(
1060 ValueLatticeElement &BBLV, Instruction *I, BasicBlock *BB,
1061 std::function<ConstantRange(const ConstantRange &,
1062 const ConstantRange &)> OpFn) {
1063 // Figure out the ranges of the operands. If that fails, use a
1064 // conservative range, but apply the transfer rule anyways. This
1065 // lets us pick up facts from expressions like "and i32 (call i32
1066 // @foo()), 32"
1067 Optional<ConstantRange> LHSRes = getRangeForOperand(0, I, BB);
1068 Optional<ConstantRange> RHSRes = getRangeForOperand(1, I, BB);
1069 if (!LHSRes.hasValue() || !RHSRes.hasValue())
1070 // More work to do before applying this transfer rule.
1071 return false;
1073 ConstantRange LHSRange = LHSRes.getValue();
1074 ConstantRange RHSRange = RHSRes.getValue();
1075 BBLV = ValueLatticeElement::getRange(OpFn(LHSRange, RHSRange));
1076 return true;
1079 bool LazyValueInfoImpl::solveBlockValueBinaryOp(ValueLatticeElement &BBLV,
1080 BinaryOperator *BO,
1081 BasicBlock *BB) {
1083 assert(BO->getOperand(0)->getType()->isSized() &&
1084 "all operands to binary operators are sized");
1085 if (BO->getOpcode() == Instruction::Xor) {
1086 // Xor is the only operation not supported by ConstantRange::binaryOp().
1087 LLVM_DEBUG(dbgs() << " compute BB '" << BB->getName()
1088 << "' - overdefined (unknown binary operator).\n");
1089 BBLV = ValueLatticeElement::getOverdefined();
1090 return true;
1093 return solveBlockValueBinaryOpImpl(BBLV, BO, BB,
1094 [BO](const ConstantRange &CR1, const ConstantRange &CR2) {
1095 return CR1.binaryOp(BO->getOpcode(), CR2);
1099 bool LazyValueInfoImpl::solveBlockValueOverflowIntrinsic(
1100 ValueLatticeElement &BBLV, WithOverflowInst *WO, BasicBlock *BB) {
1101 return solveBlockValueBinaryOpImpl(BBLV, WO, BB,
1102 [WO](const ConstantRange &CR1, const ConstantRange &CR2) {
1103 return CR1.binaryOp(WO->getBinaryOp(), CR2);
1107 bool LazyValueInfoImpl::solveBlockValueIntrinsic(
1108 ValueLatticeElement &BBLV, IntrinsicInst *II, BasicBlock *BB) {
1109 switch (II->getIntrinsicID()) {
1110 case Intrinsic::uadd_sat:
1111 return solveBlockValueBinaryOpImpl(BBLV, II, BB,
1112 [](const ConstantRange &CR1, const ConstantRange &CR2) {
1113 return CR1.uadd_sat(CR2);
1115 case Intrinsic::usub_sat:
1116 return solveBlockValueBinaryOpImpl(BBLV, II, BB,
1117 [](const ConstantRange &CR1, const ConstantRange &CR2) {
1118 return CR1.usub_sat(CR2);
1120 case Intrinsic::sadd_sat:
1121 return solveBlockValueBinaryOpImpl(BBLV, II, BB,
1122 [](const ConstantRange &CR1, const ConstantRange &CR2) {
1123 return CR1.sadd_sat(CR2);
1125 case Intrinsic::ssub_sat:
1126 return solveBlockValueBinaryOpImpl(BBLV, II, BB,
1127 [](const ConstantRange &CR1, const ConstantRange &CR2) {
1128 return CR1.ssub_sat(CR2);
1130 default:
1131 LLVM_DEBUG(dbgs() << " compute BB '" << BB->getName()
1132 << "' - overdefined (unknown intrinsic).\n");
1133 BBLV = ValueLatticeElement::getOverdefined();
1134 return true;
1138 bool LazyValueInfoImpl::solveBlockValueExtractValue(
1139 ValueLatticeElement &BBLV, ExtractValueInst *EVI, BasicBlock *BB) {
1140 if (auto *WO = dyn_cast<WithOverflowInst>(EVI->getAggregateOperand()))
1141 if (EVI->getNumIndices() == 1 && *EVI->idx_begin() == 0)
1142 return solveBlockValueOverflowIntrinsic(BBLV, WO, BB);
1144 LLVM_DEBUG(dbgs() << " compute BB '" << BB->getName()
1145 << "' - overdefined (unknown extractvalue).\n");
1146 BBLV = ValueLatticeElement::getOverdefined();
1147 return true;
1150 static ValueLatticeElement getValueFromICmpCondition(Value *Val, ICmpInst *ICI,
1151 bool isTrueDest) {
1152 Value *LHS = ICI->getOperand(0);
1153 Value *RHS = ICI->getOperand(1);
1154 CmpInst::Predicate Predicate = ICI->getPredicate();
1156 if (isa<Constant>(RHS)) {
1157 if (ICI->isEquality() && LHS == Val) {
1158 // We know that V has the RHS constant if this is a true SETEQ or
1159 // false SETNE.
1160 if (isTrueDest == (Predicate == ICmpInst::ICMP_EQ))
1161 return ValueLatticeElement::get(cast<Constant>(RHS));
1162 else
1163 return ValueLatticeElement::getNot(cast<Constant>(RHS));
1167 if (!Val->getType()->isIntegerTy())
1168 return ValueLatticeElement::getOverdefined();
1170 // Use ConstantRange::makeAllowedICmpRegion in order to determine the possible
1171 // range of Val guaranteed by the condition. Recognize comparisons in the from
1172 // of:
1173 // icmp <pred> Val, ...
1174 // icmp <pred> (add Val, Offset), ...
1175 // The latter is the range checking idiom that InstCombine produces. Subtract
1176 // the offset from the allowed range for RHS in this case.
1178 // Val or (add Val, Offset) can be on either hand of the comparison
1179 if (LHS != Val && !match(LHS, m_Add(m_Specific(Val), m_ConstantInt()))) {
1180 std::swap(LHS, RHS);
1181 Predicate = CmpInst::getSwappedPredicate(Predicate);
1184 ConstantInt *Offset = nullptr;
1185 if (LHS != Val)
1186 match(LHS, m_Add(m_Specific(Val), m_ConstantInt(Offset)));
1188 if (LHS == Val || Offset) {
1189 // Calculate the range of values that are allowed by the comparison
1190 ConstantRange RHSRange(RHS->getType()->getIntegerBitWidth(),
1191 /*isFullSet=*/true);
1192 if (ConstantInt *CI = dyn_cast<ConstantInt>(RHS))
1193 RHSRange = ConstantRange(CI->getValue());
1194 else if (Instruction *I = dyn_cast<Instruction>(RHS))
1195 if (auto *Ranges = I->getMetadata(LLVMContext::MD_range))
1196 RHSRange = getConstantRangeFromMetadata(*Ranges);
1198 // If we're interested in the false dest, invert the condition
1199 CmpInst::Predicate Pred =
1200 isTrueDest ? Predicate : CmpInst::getInversePredicate(Predicate);
1201 ConstantRange TrueValues =
1202 ConstantRange::makeAllowedICmpRegion(Pred, RHSRange);
1204 if (Offset) // Apply the offset from above.
1205 TrueValues = TrueValues.subtract(Offset->getValue());
1207 return ValueLatticeElement::getRange(std::move(TrueValues));
1210 return ValueLatticeElement::getOverdefined();
1213 // Handle conditions of the form
1214 // extractvalue(op.with.overflow(%x, C), 1).
1215 static ValueLatticeElement getValueFromOverflowCondition(
1216 Value *Val, WithOverflowInst *WO, bool IsTrueDest) {
1217 // TODO: This only works with a constant RHS for now. We could also compute
1218 // the range of the RHS, but this doesn't fit into the current structure of
1219 // the edge value calculation.
1220 const APInt *C;
1221 if (WO->getLHS() != Val || !match(WO->getRHS(), m_APInt(C)))
1222 return ValueLatticeElement::getOverdefined();
1224 // Calculate the possible values of %x for which no overflow occurs.
1225 ConstantRange NWR = ConstantRange::makeExactNoWrapRegion(
1226 WO->getBinaryOp(), *C, WO->getNoWrapKind());
1228 // If overflow is false, %x is constrained to NWR. If overflow is true, %x is
1229 // constrained to it's inverse (all values that might cause overflow).
1230 if (IsTrueDest)
1231 NWR = NWR.inverse();
1232 return ValueLatticeElement::getRange(NWR);
1235 static ValueLatticeElement
1236 getValueFromCondition(Value *Val, Value *Cond, bool isTrueDest,
1237 DenseMap<Value*, ValueLatticeElement> &Visited);
1239 static ValueLatticeElement
1240 getValueFromConditionImpl(Value *Val, Value *Cond, bool isTrueDest,
1241 DenseMap<Value*, ValueLatticeElement> &Visited) {
1242 if (ICmpInst *ICI = dyn_cast<ICmpInst>(Cond))
1243 return getValueFromICmpCondition(Val, ICI, isTrueDest);
1245 if (auto *EVI = dyn_cast<ExtractValueInst>(Cond))
1246 if (auto *WO = dyn_cast<WithOverflowInst>(EVI->getAggregateOperand()))
1247 if (EVI->getNumIndices() == 1 && *EVI->idx_begin() == 1)
1248 return getValueFromOverflowCondition(Val, WO, isTrueDest);
1250 // Handle conditions in the form of (cond1 && cond2), we know that on the
1251 // true dest path both of the conditions hold. Similarly for conditions of
1252 // the form (cond1 || cond2), we know that on the false dest path neither
1253 // condition holds.
1254 BinaryOperator *BO = dyn_cast<BinaryOperator>(Cond);
1255 if (!BO || (isTrueDest && BO->getOpcode() != BinaryOperator::And) ||
1256 (!isTrueDest && BO->getOpcode() != BinaryOperator::Or))
1257 return ValueLatticeElement::getOverdefined();
1259 // Prevent infinite recursion if Cond references itself as in this example:
1260 // Cond: "%tmp4 = and i1 %tmp4, undef"
1261 // BL: "%tmp4 = and i1 %tmp4, undef"
1262 // BR: "i1 undef"
1263 Value *BL = BO->getOperand(0);
1264 Value *BR = BO->getOperand(1);
1265 if (BL == Cond || BR == Cond)
1266 return ValueLatticeElement::getOverdefined();
1268 return intersect(getValueFromCondition(Val, BL, isTrueDest, Visited),
1269 getValueFromCondition(Val, BR, isTrueDest, Visited));
1272 static ValueLatticeElement
1273 getValueFromCondition(Value *Val, Value *Cond, bool isTrueDest,
1274 DenseMap<Value*, ValueLatticeElement> &Visited) {
1275 auto I = Visited.find(Cond);
1276 if (I != Visited.end())
1277 return I->second;
1279 auto Result = getValueFromConditionImpl(Val, Cond, isTrueDest, Visited);
1280 Visited[Cond] = Result;
1281 return Result;
1284 ValueLatticeElement getValueFromCondition(Value *Val, Value *Cond,
1285 bool isTrueDest) {
1286 assert(Cond && "precondition");
1287 DenseMap<Value*, ValueLatticeElement> Visited;
1288 return getValueFromCondition(Val, Cond, isTrueDest, Visited);
1291 // Return true if Usr has Op as an operand, otherwise false.
1292 static bool usesOperand(User *Usr, Value *Op) {
1293 return find(Usr->operands(), Op) != Usr->op_end();
1296 // Return true if the instruction type of Val is supported by
1297 // constantFoldUser(). Currently CastInst and BinaryOperator only. Call this
1298 // before calling constantFoldUser() to find out if it's even worth attempting
1299 // to call it.
1300 static bool isOperationFoldable(User *Usr) {
1301 return isa<CastInst>(Usr) || isa<BinaryOperator>(Usr);
1304 // Check if Usr can be simplified to an integer constant when the value of one
1305 // of its operands Op is an integer constant OpConstVal. If so, return it as an
1306 // lattice value range with a single element or otherwise return an overdefined
1307 // lattice value.
1308 static ValueLatticeElement constantFoldUser(User *Usr, Value *Op,
1309 const APInt &OpConstVal,
1310 const DataLayout &DL) {
1311 assert(isOperationFoldable(Usr) && "Precondition");
1312 Constant* OpConst = Constant::getIntegerValue(Op->getType(), OpConstVal);
1313 // Check if Usr can be simplified to a constant.
1314 if (auto *CI = dyn_cast<CastInst>(Usr)) {
1315 assert(CI->getOperand(0) == Op && "Operand 0 isn't Op");
1316 if (auto *C = dyn_cast_or_null<ConstantInt>(
1317 SimplifyCastInst(CI->getOpcode(), OpConst,
1318 CI->getDestTy(), DL))) {
1319 return ValueLatticeElement::getRange(ConstantRange(C->getValue()));
1321 } else if (auto *BO = dyn_cast<BinaryOperator>(Usr)) {
1322 bool Op0Match = BO->getOperand(0) == Op;
1323 bool Op1Match = BO->getOperand(1) == Op;
1324 assert((Op0Match || Op1Match) &&
1325 "Operand 0 nor Operand 1 isn't a match");
1326 Value *LHS = Op0Match ? OpConst : BO->getOperand(0);
1327 Value *RHS = Op1Match ? OpConst : BO->getOperand(1);
1328 if (auto *C = dyn_cast_or_null<ConstantInt>(
1329 SimplifyBinOp(BO->getOpcode(), LHS, RHS, DL))) {
1330 return ValueLatticeElement::getRange(ConstantRange(C->getValue()));
1333 return ValueLatticeElement::getOverdefined();
1336 /// Compute the value of Val on the edge BBFrom -> BBTo. Returns false if
1337 /// Val is not constrained on the edge. Result is unspecified if return value
1338 /// is false.
1339 static bool getEdgeValueLocal(Value *Val, BasicBlock *BBFrom,
1340 BasicBlock *BBTo, ValueLatticeElement &Result) {
1341 // TODO: Handle more complex conditionals. If (v == 0 || v2 < 1) is false, we
1342 // know that v != 0.
1343 if (BranchInst *BI = dyn_cast<BranchInst>(BBFrom->getTerminator())) {
1344 // If this is a conditional branch and only one successor goes to BBTo, then
1345 // we may be able to infer something from the condition.
1346 if (BI->isConditional() &&
1347 BI->getSuccessor(0) != BI->getSuccessor(1)) {
1348 bool isTrueDest = BI->getSuccessor(0) == BBTo;
1349 assert(BI->getSuccessor(!isTrueDest) == BBTo &&
1350 "BBTo isn't a successor of BBFrom");
1351 Value *Condition = BI->getCondition();
1353 // If V is the condition of the branch itself, then we know exactly what
1354 // it is.
1355 if (Condition == Val) {
1356 Result = ValueLatticeElement::get(ConstantInt::get(
1357 Type::getInt1Ty(Val->getContext()), isTrueDest));
1358 return true;
1361 // If the condition of the branch is an equality comparison, we may be
1362 // able to infer the value.
1363 Result = getValueFromCondition(Val, Condition, isTrueDest);
1364 if (!Result.isOverdefined())
1365 return true;
1367 if (User *Usr = dyn_cast<User>(Val)) {
1368 assert(Result.isOverdefined() && "Result isn't overdefined");
1369 // Check with isOperationFoldable() first to avoid linearly iterating
1370 // over the operands unnecessarily which can be expensive for
1371 // instructions with many operands.
1372 if (isa<IntegerType>(Usr->getType()) && isOperationFoldable(Usr)) {
1373 const DataLayout &DL = BBTo->getModule()->getDataLayout();
1374 if (usesOperand(Usr, Condition)) {
1375 // If Val has Condition as an operand and Val can be folded into a
1376 // constant with either Condition == true or Condition == false,
1377 // propagate the constant.
1378 // eg.
1379 // ; %Val is true on the edge to %then.
1380 // %Val = and i1 %Condition, true.
1381 // br %Condition, label %then, label %else
1382 APInt ConditionVal(1, isTrueDest ? 1 : 0);
1383 Result = constantFoldUser(Usr, Condition, ConditionVal, DL);
1384 } else {
1385 // If one of Val's operand has an inferred value, we may be able to
1386 // infer the value of Val.
1387 // eg.
1388 // ; %Val is 94 on the edge to %then.
1389 // %Val = add i8 %Op, 1
1390 // %Condition = icmp eq i8 %Op, 93
1391 // br i1 %Condition, label %then, label %else
1392 for (unsigned i = 0; i < Usr->getNumOperands(); ++i) {
1393 Value *Op = Usr->getOperand(i);
1394 ValueLatticeElement OpLatticeVal =
1395 getValueFromCondition(Op, Condition, isTrueDest);
1396 if (Optional<APInt> OpConst = OpLatticeVal.asConstantInteger()) {
1397 Result = constantFoldUser(Usr, Op, OpConst.getValue(), DL);
1398 break;
1404 if (!Result.isOverdefined())
1405 return true;
1409 // If the edge was formed by a switch on the value, then we may know exactly
1410 // what it is.
1411 if (SwitchInst *SI = dyn_cast<SwitchInst>(BBFrom->getTerminator())) {
1412 Value *Condition = SI->getCondition();
1413 if (!isa<IntegerType>(Val->getType()))
1414 return false;
1415 bool ValUsesConditionAndMayBeFoldable = false;
1416 if (Condition != Val) {
1417 // Check if Val has Condition as an operand.
1418 if (User *Usr = dyn_cast<User>(Val))
1419 ValUsesConditionAndMayBeFoldable = isOperationFoldable(Usr) &&
1420 usesOperand(Usr, Condition);
1421 if (!ValUsesConditionAndMayBeFoldable)
1422 return false;
1424 assert((Condition == Val || ValUsesConditionAndMayBeFoldable) &&
1425 "Condition != Val nor Val doesn't use Condition");
1427 bool DefaultCase = SI->getDefaultDest() == BBTo;
1428 unsigned BitWidth = Val->getType()->getIntegerBitWidth();
1429 ConstantRange EdgesVals(BitWidth, DefaultCase/*isFullSet*/);
1431 for (auto Case : SI->cases()) {
1432 APInt CaseValue = Case.getCaseValue()->getValue();
1433 ConstantRange EdgeVal(CaseValue);
1434 if (ValUsesConditionAndMayBeFoldable) {
1435 User *Usr = cast<User>(Val);
1436 const DataLayout &DL = BBTo->getModule()->getDataLayout();
1437 ValueLatticeElement EdgeLatticeVal =
1438 constantFoldUser(Usr, Condition, CaseValue, DL);
1439 if (EdgeLatticeVal.isOverdefined())
1440 return false;
1441 EdgeVal = EdgeLatticeVal.getConstantRange();
1443 if (DefaultCase) {
1444 // It is possible that the default destination is the destination of
1445 // some cases. We cannot perform difference for those cases.
1446 // We know Condition != CaseValue in BBTo. In some cases we can use
1447 // this to infer Val == f(Condition) is != f(CaseValue). For now, we
1448 // only do this when f is identity (i.e. Val == Condition), but we
1449 // should be able to do this for any injective f.
1450 if (Case.getCaseSuccessor() != BBTo && Condition == Val)
1451 EdgesVals = EdgesVals.difference(EdgeVal);
1452 } else if (Case.getCaseSuccessor() == BBTo)
1453 EdgesVals = EdgesVals.unionWith(EdgeVal);
1455 Result = ValueLatticeElement::getRange(std::move(EdgesVals));
1456 return true;
1458 return false;
1461 /// Compute the value of Val on the edge BBFrom -> BBTo or the value at
1462 /// the basic block if the edge does not constrain Val.
1463 bool LazyValueInfoImpl::getEdgeValue(Value *Val, BasicBlock *BBFrom,
1464 BasicBlock *BBTo,
1465 ValueLatticeElement &Result,
1466 Instruction *CxtI) {
1467 // If already a constant, there is nothing to compute.
1468 if (Constant *VC = dyn_cast<Constant>(Val)) {
1469 Result = ValueLatticeElement::get(VC);
1470 return true;
1473 ValueLatticeElement LocalResult;
1474 if (!getEdgeValueLocal(Val, BBFrom, BBTo, LocalResult))
1475 // If we couldn't constrain the value on the edge, LocalResult doesn't
1476 // provide any information.
1477 LocalResult = ValueLatticeElement::getOverdefined();
1479 if (hasSingleValue(LocalResult)) {
1480 // Can't get any more precise here
1481 Result = LocalResult;
1482 return true;
1485 if (!hasBlockValue(Val, BBFrom)) {
1486 if (pushBlockValue(std::make_pair(BBFrom, Val)))
1487 return false;
1488 // No new information.
1489 Result = LocalResult;
1490 return true;
1493 // Try to intersect ranges of the BB and the constraint on the edge.
1494 ValueLatticeElement InBlock = getBlockValue(Val, BBFrom);
1495 intersectAssumeOrGuardBlockValueConstantRange(Val, InBlock,
1496 BBFrom->getTerminator());
1497 // We can use the context instruction (generically the ultimate instruction
1498 // the calling pass is trying to simplify) here, even though the result of
1499 // this function is generally cached when called from the solve* functions
1500 // (and that cached result might be used with queries using a different
1501 // context instruction), because when this function is called from the solve*
1502 // functions, the context instruction is not provided. When called from
1503 // LazyValueInfoImpl::getValueOnEdge, the context instruction is provided,
1504 // but then the result is not cached.
1505 intersectAssumeOrGuardBlockValueConstantRange(Val, InBlock, CxtI);
1507 Result = intersect(LocalResult, InBlock);
1508 return true;
1511 ValueLatticeElement LazyValueInfoImpl::getValueInBlock(Value *V, BasicBlock *BB,
1512 Instruction *CxtI) {
1513 LLVM_DEBUG(dbgs() << "LVI Getting block end value " << *V << " at '"
1514 << BB->getName() << "'\n");
1516 assert(BlockValueStack.empty() && BlockValueSet.empty());
1517 if (!hasBlockValue(V, BB)) {
1518 pushBlockValue(std::make_pair(BB, V));
1519 solve();
1521 ValueLatticeElement Result = getBlockValue(V, BB);
1522 intersectAssumeOrGuardBlockValueConstantRange(V, Result, CxtI);
1524 LLVM_DEBUG(dbgs() << " Result = " << Result << "\n");
1525 return Result;
1528 ValueLatticeElement LazyValueInfoImpl::getValueAt(Value *V, Instruction *CxtI) {
1529 LLVM_DEBUG(dbgs() << "LVI Getting value " << *V << " at '" << CxtI->getName()
1530 << "'\n");
1532 if (auto *C = dyn_cast<Constant>(V))
1533 return ValueLatticeElement::get(C);
1535 ValueLatticeElement Result = ValueLatticeElement::getOverdefined();
1536 if (auto *I = dyn_cast<Instruction>(V))
1537 Result = getFromRangeMetadata(I);
1538 intersectAssumeOrGuardBlockValueConstantRange(V, Result, CxtI);
1540 LLVM_DEBUG(dbgs() << " Result = " << Result << "\n");
1541 return Result;
1544 ValueLatticeElement LazyValueInfoImpl::
1545 getValueOnEdge(Value *V, BasicBlock *FromBB, BasicBlock *ToBB,
1546 Instruction *CxtI) {
1547 LLVM_DEBUG(dbgs() << "LVI Getting edge value " << *V << " from '"
1548 << FromBB->getName() << "' to '" << ToBB->getName()
1549 << "'\n");
1551 ValueLatticeElement Result;
1552 if (!getEdgeValue(V, FromBB, ToBB, Result, CxtI)) {
1553 solve();
1554 bool WasFastQuery = getEdgeValue(V, FromBB, ToBB, Result, CxtI);
1555 (void)WasFastQuery;
1556 assert(WasFastQuery && "More work to do after problem solved?");
1559 LLVM_DEBUG(dbgs() << " Result = " << Result << "\n");
1560 return Result;
1563 void LazyValueInfoImpl::threadEdge(BasicBlock *PredBB, BasicBlock *OldSucc,
1564 BasicBlock *NewSucc) {
1565 TheCache.threadEdgeImpl(OldSucc, NewSucc);
1568 //===----------------------------------------------------------------------===//
1569 // LazyValueInfo Impl
1570 //===----------------------------------------------------------------------===//
1572 /// This lazily constructs the LazyValueInfoImpl.
1573 static LazyValueInfoImpl &getImpl(void *&PImpl, AssumptionCache *AC,
1574 const DataLayout *DL,
1575 DominatorTree *DT = nullptr) {
1576 if (!PImpl) {
1577 assert(DL && "getCache() called with a null DataLayout");
1578 PImpl = new LazyValueInfoImpl(AC, *DL, DT);
1580 return *static_cast<LazyValueInfoImpl*>(PImpl);
1583 bool LazyValueInfoWrapperPass::runOnFunction(Function &F) {
1584 Info.AC = &getAnalysis<AssumptionCacheTracker>().getAssumptionCache(F);
1585 const DataLayout &DL = F.getParent()->getDataLayout();
1587 DominatorTreeWrapperPass *DTWP =
1588 getAnalysisIfAvailable<DominatorTreeWrapperPass>();
1589 Info.DT = DTWP ? &DTWP->getDomTree() : nullptr;
1590 Info.TLI = &getAnalysis<TargetLibraryInfoWrapperPass>().getTLI();
1592 if (Info.PImpl)
1593 getImpl(Info.PImpl, Info.AC, &DL, Info.DT).clear();
1595 // Fully lazy.
1596 return false;
1599 void LazyValueInfoWrapperPass::getAnalysisUsage(AnalysisUsage &AU) const {
1600 AU.setPreservesAll();
1601 AU.addRequired<AssumptionCacheTracker>();
1602 AU.addRequired<TargetLibraryInfoWrapperPass>();
1605 LazyValueInfo &LazyValueInfoWrapperPass::getLVI() { return Info; }
1607 LazyValueInfo::~LazyValueInfo() { releaseMemory(); }
1609 void LazyValueInfo::releaseMemory() {
1610 // If the cache was allocated, free it.
1611 if (PImpl) {
1612 delete &getImpl(PImpl, AC, nullptr);
1613 PImpl = nullptr;
1617 bool LazyValueInfo::invalidate(Function &F, const PreservedAnalyses &PA,
1618 FunctionAnalysisManager::Invalidator &Inv) {
1619 // We need to invalidate if we have either failed to preserve this analyses
1620 // result directly or if any of its dependencies have been invalidated.
1621 auto PAC = PA.getChecker<LazyValueAnalysis>();
1622 if (!(PAC.preserved() || PAC.preservedSet<AllAnalysesOn<Function>>()) ||
1623 (DT && Inv.invalidate<DominatorTreeAnalysis>(F, PA)))
1624 return true;
1626 return false;
1629 void LazyValueInfoWrapperPass::releaseMemory() { Info.releaseMemory(); }
1631 LazyValueInfo LazyValueAnalysis::run(Function &F,
1632 FunctionAnalysisManager &FAM) {
1633 auto &AC = FAM.getResult<AssumptionAnalysis>(F);
1634 auto &TLI = FAM.getResult<TargetLibraryAnalysis>(F);
1635 auto *DT = FAM.getCachedResult<DominatorTreeAnalysis>(F);
1637 return LazyValueInfo(&AC, &F.getParent()->getDataLayout(), &TLI, DT);
1640 /// Returns true if we can statically tell that this value will never be a
1641 /// "useful" constant. In practice, this means we've got something like an
1642 /// alloca or a malloc call for which a comparison against a constant can
1643 /// only be guarding dead code. Note that we are potentially giving up some
1644 /// precision in dead code (a constant result) in favour of avoiding a
1645 /// expensive search for a easily answered common query.
1646 static bool isKnownNonConstant(Value *V) {
1647 V = V->stripPointerCasts();
1648 // The return val of alloc cannot be a Constant.
1649 if (isa<AllocaInst>(V))
1650 return true;
1651 return false;
1654 Constant *LazyValueInfo::getConstant(Value *V, BasicBlock *BB,
1655 Instruction *CxtI) {
1656 // Bail out early if V is known not to be a Constant.
1657 if (isKnownNonConstant(V))
1658 return nullptr;
1660 const DataLayout &DL = BB->getModule()->getDataLayout();
1661 ValueLatticeElement Result =
1662 getImpl(PImpl, AC, &DL, DT).getValueInBlock(V, BB, CxtI);
1664 if (Result.isConstant())
1665 return Result.getConstant();
1666 if (Result.isConstantRange()) {
1667 const ConstantRange &CR = Result.getConstantRange();
1668 if (const APInt *SingleVal = CR.getSingleElement())
1669 return ConstantInt::get(V->getContext(), *SingleVal);
1671 return nullptr;
1674 ConstantRange LazyValueInfo::getConstantRange(Value *V, BasicBlock *BB,
1675 Instruction *CxtI) {
1676 assert(V->getType()->isIntegerTy());
1677 unsigned Width = V->getType()->getIntegerBitWidth();
1678 const DataLayout &DL = BB->getModule()->getDataLayout();
1679 ValueLatticeElement Result =
1680 getImpl(PImpl, AC, &DL, DT).getValueInBlock(V, BB, CxtI);
1681 if (Result.isUndefined())
1682 return ConstantRange::getEmpty(Width);
1683 if (Result.isConstantRange())
1684 return Result.getConstantRange();
1685 // We represent ConstantInt constants as constant ranges but other kinds
1686 // of integer constants, i.e. ConstantExpr will be tagged as constants
1687 assert(!(Result.isConstant() && isa<ConstantInt>(Result.getConstant())) &&
1688 "ConstantInt value must be represented as constantrange");
1689 return ConstantRange::getFull(Width);
1692 /// Determine whether the specified value is known to be a
1693 /// constant on the specified edge. Return null if not.
1694 Constant *LazyValueInfo::getConstantOnEdge(Value *V, BasicBlock *FromBB,
1695 BasicBlock *ToBB,
1696 Instruction *CxtI) {
1697 const DataLayout &DL = FromBB->getModule()->getDataLayout();
1698 ValueLatticeElement Result =
1699 getImpl(PImpl, AC, &DL, DT).getValueOnEdge(V, FromBB, ToBB, CxtI);
1701 if (Result.isConstant())
1702 return Result.getConstant();
1703 if (Result.isConstantRange()) {
1704 const ConstantRange &CR = Result.getConstantRange();
1705 if (const APInt *SingleVal = CR.getSingleElement())
1706 return ConstantInt::get(V->getContext(), *SingleVal);
1708 return nullptr;
1711 ConstantRange LazyValueInfo::getConstantRangeOnEdge(Value *V,
1712 BasicBlock *FromBB,
1713 BasicBlock *ToBB,
1714 Instruction *CxtI) {
1715 unsigned Width = V->getType()->getIntegerBitWidth();
1716 const DataLayout &DL = FromBB->getModule()->getDataLayout();
1717 ValueLatticeElement Result =
1718 getImpl(PImpl, AC, &DL, DT).getValueOnEdge(V, FromBB, ToBB, CxtI);
1720 if (Result.isUndefined())
1721 return ConstantRange::getEmpty(Width);
1722 if (Result.isConstantRange())
1723 return Result.getConstantRange();
1724 // We represent ConstantInt constants as constant ranges but other kinds
1725 // of integer constants, i.e. ConstantExpr will be tagged as constants
1726 assert(!(Result.isConstant() && isa<ConstantInt>(Result.getConstant())) &&
1727 "ConstantInt value must be represented as constantrange");
1728 return ConstantRange::getFull(Width);
1731 static LazyValueInfo::Tristate
1732 getPredicateResult(unsigned Pred, Constant *C, const ValueLatticeElement &Val,
1733 const DataLayout &DL, TargetLibraryInfo *TLI) {
1734 // If we know the value is a constant, evaluate the conditional.
1735 Constant *Res = nullptr;
1736 if (Val.isConstant()) {
1737 Res = ConstantFoldCompareInstOperands(Pred, Val.getConstant(), C, DL, TLI);
1738 if (ConstantInt *ResCI = dyn_cast<ConstantInt>(Res))
1739 return ResCI->isZero() ? LazyValueInfo::False : LazyValueInfo::True;
1740 return LazyValueInfo::Unknown;
1743 if (Val.isConstantRange()) {
1744 ConstantInt *CI = dyn_cast<ConstantInt>(C);
1745 if (!CI) return LazyValueInfo::Unknown;
1747 const ConstantRange &CR = Val.getConstantRange();
1748 if (Pred == ICmpInst::ICMP_EQ) {
1749 if (!CR.contains(CI->getValue()))
1750 return LazyValueInfo::False;
1752 if (CR.isSingleElement())
1753 return LazyValueInfo::True;
1754 } else if (Pred == ICmpInst::ICMP_NE) {
1755 if (!CR.contains(CI->getValue()))
1756 return LazyValueInfo::True;
1758 if (CR.isSingleElement())
1759 return LazyValueInfo::False;
1760 } else {
1761 // Handle more complex predicates.
1762 ConstantRange TrueValues = ConstantRange::makeExactICmpRegion(
1763 (ICmpInst::Predicate)Pred, CI->getValue());
1764 if (TrueValues.contains(CR))
1765 return LazyValueInfo::True;
1766 if (TrueValues.inverse().contains(CR))
1767 return LazyValueInfo::False;
1769 return LazyValueInfo::Unknown;
1772 if (Val.isNotConstant()) {
1773 // If this is an equality comparison, we can try to fold it knowing that
1774 // "V != C1".
1775 if (Pred == ICmpInst::ICMP_EQ) {
1776 // !C1 == C -> false iff C1 == C.
1777 Res = ConstantFoldCompareInstOperands(ICmpInst::ICMP_NE,
1778 Val.getNotConstant(), C, DL,
1779 TLI);
1780 if (Res->isNullValue())
1781 return LazyValueInfo::False;
1782 } else if (Pred == ICmpInst::ICMP_NE) {
1783 // !C1 != C -> true iff C1 == C.
1784 Res = ConstantFoldCompareInstOperands(ICmpInst::ICMP_NE,
1785 Val.getNotConstant(), C, DL,
1786 TLI);
1787 if (Res->isNullValue())
1788 return LazyValueInfo::True;
1790 return LazyValueInfo::Unknown;
1793 return LazyValueInfo::Unknown;
1796 /// Determine whether the specified value comparison with a constant is known to
1797 /// be true or false on the specified CFG edge. Pred is a CmpInst predicate.
1798 LazyValueInfo::Tristate
1799 LazyValueInfo::getPredicateOnEdge(unsigned Pred, Value *V, Constant *C,
1800 BasicBlock *FromBB, BasicBlock *ToBB,
1801 Instruction *CxtI) {
1802 const DataLayout &DL = FromBB->getModule()->getDataLayout();
1803 ValueLatticeElement Result =
1804 getImpl(PImpl, AC, &DL, DT).getValueOnEdge(V, FromBB, ToBB, CxtI);
1806 return getPredicateResult(Pred, C, Result, DL, TLI);
1809 LazyValueInfo::Tristate
1810 LazyValueInfo::getPredicateAt(unsigned Pred, Value *V, Constant *C,
1811 Instruction *CxtI) {
1812 // Is or is not NonNull are common predicates being queried. If
1813 // isKnownNonZero can tell us the result of the predicate, we can
1814 // return it quickly. But this is only a fastpath, and falling
1815 // through would still be correct.
1816 const DataLayout &DL = CxtI->getModule()->getDataLayout();
1817 if (V->getType()->isPointerTy() && C->isNullValue() &&
1818 isKnownNonZero(V->stripPointerCastsSameRepresentation(), DL)) {
1819 if (Pred == ICmpInst::ICMP_EQ)
1820 return LazyValueInfo::False;
1821 else if (Pred == ICmpInst::ICMP_NE)
1822 return LazyValueInfo::True;
1824 ValueLatticeElement Result = getImpl(PImpl, AC, &DL, DT).getValueAt(V, CxtI);
1825 Tristate Ret = getPredicateResult(Pred, C, Result, DL, TLI);
1826 if (Ret != Unknown)
1827 return Ret;
1829 // Note: The following bit of code is somewhat distinct from the rest of LVI;
1830 // LVI as a whole tries to compute a lattice value which is conservatively
1831 // correct at a given location. In this case, we have a predicate which we
1832 // weren't able to prove about the merged result, and we're pushing that
1833 // predicate back along each incoming edge to see if we can prove it
1834 // separately for each input. As a motivating example, consider:
1835 // bb1:
1836 // %v1 = ... ; constantrange<1, 5>
1837 // br label %merge
1838 // bb2:
1839 // %v2 = ... ; constantrange<10, 20>
1840 // br label %merge
1841 // merge:
1842 // %phi = phi [%v1, %v2] ; constantrange<1,20>
1843 // %pred = icmp eq i32 %phi, 8
1844 // We can't tell from the lattice value for '%phi' that '%pred' is false
1845 // along each path, but by checking the predicate over each input separately,
1846 // we can.
1847 // We limit the search to one step backwards from the current BB and value.
1848 // We could consider extending this to search further backwards through the
1849 // CFG and/or value graph, but there are non-obvious compile time vs quality
1850 // tradeoffs.
1851 if (CxtI) {
1852 BasicBlock *BB = CxtI->getParent();
1854 // Function entry or an unreachable block. Bail to avoid confusing
1855 // analysis below.
1856 pred_iterator PI = pred_begin(BB), PE = pred_end(BB);
1857 if (PI == PE)
1858 return Unknown;
1860 // If V is a PHI node in the same block as the context, we need to ask
1861 // questions about the predicate as applied to the incoming value along
1862 // each edge. This is useful for eliminating cases where the predicate is
1863 // known along all incoming edges.
1864 if (auto *PHI = dyn_cast<PHINode>(V))
1865 if (PHI->getParent() == BB) {
1866 Tristate Baseline = Unknown;
1867 for (unsigned i = 0, e = PHI->getNumIncomingValues(); i < e; i++) {
1868 Value *Incoming = PHI->getIncomingValue(i);
1869 BasicBlock *PredBB = PHI->getIncomingBlock(i);
1870 // Note that PredBB may be BB itself.
1871 Tristate Result = getPredicateOnEdge(Pred, Incoming, C, PredBB, BB,
1872 CxtI);
1874 // Keep going as long as we've seen a consistent known result for
1875 // all inputs.
1876 Baseline = (i == 0) ? Result /* First iteration */
1877 : (Baseline == Result ? Baseline : Unknown); /* All others */
1878 if (Baseline == Unknown)
1879 break;
1881 if (Baseline != Unknown)
1882 return Baseline;
1885 // For a comparison where the V is outside this block, it's possible
1886 // that we've branched on it before. Look to see if the value is known
1887 // on all incoming edges.
1888 if (!isa<Instruction>(V) ||
1889 cast<Instruction>(V)->getParent() != BB) {
1890 // For predecessor edge, determine if the comparison is true or false
1891 // on that edge. If they're all true or all false, we can conclude
1892 // the value of the comparison in this block.
1893 Tristate Baseline = getPredicateOnEdge(Pred, V, C, *PI, BB, CxtI);
1894 if (Baseline != Unknown) {
1895 // Check that all remaining incoming values match the first one.
1896 while (++PI != PE) {
1897 Tristate Ret = getPredicateOnEdge(Pred, V, C, *PI, BB, CxtI);
1898 if (Ret != Baseline) break;
1900 // If we terminated early, then one of the values didn't match.
1901 if (PI == PE) {
1902 return Baseline;
1907 return Unknown;
1910 void LazyValueInfo::threadEdge(BasicBlock *PredBB, BasicBlock *OldSucc,
1911 BasicBlock *NewSucc) {
1912 if (PImpl) {
1913 const DataLayout &DL = PredBB->getModule()->getDataLayout();
1914 getImpl(PImpl, AC, &DL, DT).threadEdge(PredBB, OldSucc, NewSucc);
1918 void LazyValueInfo::eraseBlock(BasicBlock *BB) {
1919 if (PImpl) {
1920 const DataLayout &DL = BB->getModule()->getDataLayout();
1921 getImpl(PImpl, AC, &DL, DT).eraseBlock(BB);
1926 void LazyValueInfo::printLVI(Function &F, DominatorTree &DTree, raw_ostream &OS) {
1927 if (PImpl) {
1928 getImpl(PImpl, AC, DL, DT).printLVI(F, DTree, OS);
1932 void LazyValueInfo::disableDT() {
1933 if (PImpl)
1934 getImpl(PImpl, AC, DL, DT).disableDT();
1937 void LazyValueInfo::enableDT() {
1938 if (PImpl)
1939 getImpl(PImpl, AC, DL, DT).enableDT();
1942 // Print the LVI for the function arguments at the start of each basic block.
1943 void LazyValueInfoAnnotatedWriter::emitBasicBlockStartAnnot(
1944 const BasicBlock *BB, formatted_raw_ostream &OS) {
1945 // Find if there are latticevalues defined for arguments of the function.
1946 auto *F = BB->getParent();
1947 for (auto &Arg : F->args()) {
1948 ValueLatticeElement Result = LVIImpl->getValueInBlock(
1949 const_cast<Argument *>(&Arg), const_cast<BasicBlock *>(BB));
1950 if (Result.isUndefined())
1951 continue;
1952 OS << "; LatticeVal for: '" << Arg << "' is: " << Result << "\n";
1956 // This function prints the LVI analysis for the instruction I at the beginning
1957 // of various basic blocks. It relies on calculated values that are stored in
1958 // the LazyValueInfoCache, and in the absence of cached values, recalculate the
1959 // LazyValueInfo for `I`, and print that info.
1960 void LazyValueInfoAnnotatedWriter::emitInstructionAnnot(
1961 const Instruction *I, formatted_raw_ostream &OS) {
1963 auto *ParentBB = I->getParent();
1964 SmallPtrSet<const BasicBlock*, 16> BlocksContainingLVI;
1965 // We can generate (solve) LVI values only for blocks that are dominated by
1966 // the I's parent. However, to avoid generating LVI for all dominating blocks,
1967 // that contain redundant/uninteresting information, we print LVI for
1968 // blocks that may use this LVI information (such as immediate successor
1969 // blocks, and blocks that contain uses of `I`).
1970 auto printResult = [&](const BasicBlock *BB) {
1971 if (!BlocksContainingLVI.insert(BB).second)
1972 return;
1973 ValueLatticeElement Result = LVIImpl->getValueInBlock(
1974 const_cast<Instruction *>(I), const_cast<BasicBlock *>(BB));
1975 OS << "; LatticeVal for: '" << *I << "' in BB: '";
1976 BB->printAsOperand(OS, false);
1977 OS << "' is: " << Result << "\n";
1980 printResult(ParentBB);
1981 // Print the LVI analysis results for the immediate successor blocks, that
1982 // are dominated by `ParentBB`.
1983 for (auto *BBSucc : successors(ParentBB))
1984 if (DT.dominates(ParentBB, BBSucc))
1985 printResult(BBSucc);
1987 // Print LVI in blocks where `I` is used.
1988 for (auto *U : I->users())
1989 if (auto *UseI = dyn_cast<Instruction>(U))
1990 if (!isa<PHINode>(UseI) || DT.dominates(ParentBB, UseI->getParent()))
1991 printResult(UseI->getParent());
1995 namespace {
1996 // Printer class for LazyValueInfo results.
1997 class LazyValueInfoPrinter : public FunctionPass {
1998 public:
1999 static char ID; // Pass identification, replacement for typeid
2000 LazyValueInfoPrinter() : FunctionPass(ID) {
2001 initializeLazyValueInfoPrinterPass(*PassRegistry::getPassRegistry());
2004 void getAnalysisUsage(AnalysisUsage &AU) const override {
2005 AU.setPreservesAll();
2006 AU.addRequired<LazyValueInfoWrapperPass>();
2007 AU.addRequired<DominatorTreeWrapperPass>();
2010 // Get the mandatory dominator tree analysis and pass this in to the
2011 // LVIPrinter. We cannot rely on the LVI's DT, since it's optional.
2012 bool runOnFunction(Function &F) override {
2013 dbgs() << "LVI for function '" << F.getName() << "':\n";
2014 auto &LVI = getAnalysis<LazyValueInfoWrapperPass>().getLVI();
2015 auto &DTree = getAnalysis<DominatorTreeWrapperPass>().getDomTree();
2016 LVI.printLVI(F, DTree, dbgs());
2017 return false;
2022 char LazyValueInfoPrinter::ID = 0;
2023 INITIALIZE_PASS_BEGIN(LazyValueInfoPrinter, "print-lazy-value-info",
2024 "Lazy Value Info Printer Pass", false, false)
2025 INITIALIZE_PASS_DEPENDENCY(LazyValueInfoWrapperPass)
2026 INITIALIZE_PASS_END(LazyValueInfoPrinter, "print-lazy-value-info",
2027 "Lazy Value Info Printer Pass", false, false)