Fix for PR34888.
[llvm-core.git] / lib / Analysis / LazyValueInfo.cpp
blob3a9dac5783f735a3a0705ba64e1cd2910e479838
1 //===- LazyValueInfo.cpp - Value constraint analysis ------------*- C++ -*-===//
2 //
3 // The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 // This file defines the interface for lazy computation of value constraint
11 // information.
13 //===----------------------------------------------------------------------===//
15 #include "llvm/Analysis/LazyValueInfo.h"
16 #include "llvm/ADT/DenseSet.h"
17 #include "llvm/ADT/STLExtras.h"
18 #include "llvm/Analysis/AssumptionCache.h"
19 #include "llvm/Analysis/ConstantFolding.h"
20 #include "llvm/Analysis/InstructionSimplify.h"
21 #include "llvm/Analysis/TargetLibraryInfo.h"
22 #include "llvm/Analysis/ValueTracking.h"
23 #include "llvm/Analysis/ValueLattice.h"
24 #include "llvm/IR/AssemblyAnnotationWriter.h"
25 #include "llvm/IR/CFG.h"
26 #include "llvm/IR/ConstantRange.h"
27 #include "llvm/IR/Constants.h"
28 #include "llvm/IR/DataLayout.h"
29 #include "llvm/IR/Dominators.h"
30 #include "llvm/IR/Instructions.h"
31 #include "llvm/IR/IntrinsicInst.h"
32 #include "llvm/IR/Intrinsics.h"
33 #include "llvm/IR/LLVMContext.h"
34 #include "llvm/IR/PatternMatch.h"
35 #include "llvm/IR/ValueHandle.h"
36 #include "llvm/Support/Debug.h"
37 #include "llvm/Support/FormattedStream.h"
38 #include "llvm/Support/raw_ostream.h"
39 #include <map>
40 #include <stack>
41 using namespace llvm;
42 using namespace PatternMatch;
44 #define DEBUG_TYPE "lazy-value-info"
46 // This is the number of worklist items we will process to try to discover an
47 // answer for a given value.
48 static const unsigned MaxProcessedPerValue = 500;
50 char LazyValueInfoWrapperPass::ID = 0;
51 INITIALIZE_PASS_BEGIN(LazyValueInfoWrapperPass, "lazy-value-info",
52 "Lazy Value Information Analysis", false, true)
53 INITIALIZE_PASS_DEPENDENCY(AssumptionCacheTracker)
54 INITIALIZE_PASS_DEPENDENCY(TargetLibraryInfoWrapperPass)
55 INITIALIZE_PASS_END(LazyValueInfoWrapperPass, "lazy-value-info",
56 "Lazy Value Information Analysis", false, true)
58 namespace llvm {
59 FunctionPass *createLazyValueInfoPass() { return new LazyValueInfoWrapperPass(); }
62 AnalysisKey LazyValueAnalysis::Key;
64 /// Returns true if this lattice value represents at most one possible value.
65 /// This is as precise as any lattice value can get while still representing
66 /// reachable code.
67 static bool hasSingleValue(const ValueLatticeElement &Val) {
68 if (Val.isConstantRange() &&
69 Val.getConstantRange().isSingleElement())
70 // Integer constants are single element ranges
71 return true;
72 if (Val.isConstant())
73 // Non integer constants
74 return true;
75 return false;
78 /// Combine two sets of facts about the same value into a single set of
79 /// facts. Note that this method is not suitable for merging facts along
80 /// different paths in a CFG; that's what the mergeIn function is for. This
81 /// is for merging facts gathered about the same value at the same location
82 /// through two independent means.
83 /// Notes:
84 /// * This method does not promise to return the most precise possible lattice
85 /// value implied by A and B. It is allowed to return any lattice element
86 /// which is at least as strong as *either* A or B (unless our facts
87 /// conflict, see below).
88 /// * Due to unreachable code, the intersection of two lattice values could be
89 /// contradictory. If this happens, we return some valid lattice value so as
90 /// not confuse the rest of LVI. Ideally, we'd always return Undefined, but
91 /// we do not make this guarantee. TODO: This would be a useful enhancement.
92 static ValueLatticeElement intersect(const ValueLatticeElement &A,
93 const ValueLatticeElement &B) {
94 // Undefined is the strongest state. It means the value is known to be along
95 // an unreachable path.
96 if (A.isUndefined())
97 return A;
98 if (B.isUndefined())
99 return B;
101 // If we gave up for one, but got a useable fact from the other, use it.
102 if (A.isOverdefined())
103 return B;
104 if (B.isOverdefined())
105 return A;
107 // Can't get any more precise than constants.
108 if (hasSingleValue(A))
109 return A;
110 if (hasSingleValue(B))
111 return B;
113 // Could be either constant range or not constant here.
114 if (!A.isConstantRange() || !B.isConstantRange()) {
115 // TODO: Arbitrary choice, could be improved
116 return A;
119 // Intersect two constant ranges
120 ConstantRange Range =
121 A.getConstantRange().intersectWith(B.getConstantRange());
122 // Note: An empty range is implicitly converted to overdefined internally.
123 // TODO: We could instead use Undefined here since we've proven a conflict
124 // and thus know this path must be unreachable.
125 return ValueLatticeElement::getRange(std::move(Range));
128 //===----------------------------------------------------------------------===//
129 // LazyValueInfoCache Decl
130 //===----------------------------------------------------------------------===//
132 namespace {
133 /// A callback value handle updates the cache when values are erased.
134 class LazyValueInfoCache;
135 struct LVIValueHandle final : public CallbackVH {
136 // Needs to access getValPtr(), which is protected.
137 friend struct DenseMapInfo<LVIValueHandle>;
139 LazyValueInfoCache *Parent;
141 LVIValueHandle(Value *V, LazyValueInfoCache *P)
142 : CallbackVH(V), Parent(P) { }
144 void deleted() override;
145 void allUsesReplacedWith(Value *V) override {
146 deleted();
149 } // end anonymous namespace
151 namespace {
152 /// This is the cache kept by LazyValueInfo which
153 /// maintains information about queries across the clients' queries.
154 class LazyValueInfoCache {
155 /// This is all of the cached block information for exactly one Value*.
156 /// The entries are sorted by the BasicBlock* of the
157 /// entries, allowing us to do a lookup with a binary search.
158 /// Over-defined lattice values are recorded in OverDefinedCache to reduce
159 /// memory overhead.
160 struct ValueCacheEntryTy {
161 ValueCacheEntryTy(Value *V, LazyValueInfoCache *P) : Handle(V, P) {}
162 LVIValueHandle Handle;
163 SmallDenseMap<PoisoningVH<BasicBlock>, ValueLatticeElement, 4> BlockVals;
166 /// This tracks, on a per-block basis, the set of values that are
167 /// over-defined at the end of that block.
168 typedef DenseMap<PoisoningVH<BasicBlock>, SmallPtrSet<Value *, 4>>
169 OverDefinedCacheTy;
170 /// Keep track of all blocks that we have ever seen, so we
171 /// don't spend time removing unused blocks from our caches.
172 DenseSet<PoisoningVH<BasicBlock> > SeenBlocks;
174 /// This is all of the cached information for all values,
175 /// mapped from Value* to key information.
176 DenseMap<Value *, std::unique_ptr<ValueCacheEntryTy>> ValueCache;
177 OverDefinedCacheTy OverDefinedCache;
180 public:
181 void insertResult(Value *Val, BasicBlock *BB,
182 const ValueLatticeElement &Result) {
183 SeenBlocks.insert(BB);
185 // Insert over-defined values into their own cache to reduce memory
186 // overhead.
187 if (Result.isOverdefined())
188 OverDefinedCache[BB].insert(Val);
189 else {
190 auto It = ValueCache.find_as(Val);
191 if (It == ValueCache.end()) {
192 ValueCache[Val] = make_unique<ValueCacheEntryTy>(Val, this);
193 It = ValueCache.find_as(Val);
194 assert(It != ValueCache.end() && "Val was just added to the map!");
196 It->second->BlockVals[BB] = Result;
200 bool isOverdefined(Value *V, BasicBlock *BB) const {
201 auto ODI = OverDefinedCache.find(BB);
203 if (ODI == OverDefinedCache.end())
204 return false;
206 return ODI->second.count(V);
209 bool hasCachedValueInfo(Value *V, BasicBlock *BB) const {
210 if (isOverdefined(V, BB))
211 return true;
213 auto I = ValueCache.find_as(V);
214 if (I == ValueCache.end())
215 return false;
217 return I->second->BlockVals.count(BB);
220 ValueLatticeElement getCachedValueInfo(Value *V, BasicBlock *BB) const {
221 if (isOverdefined(V, BB))
222 return ValueLatticeElement::getOverdefined();
224 auto I = ValueCache.find_as(V);
225 if (I == ValueCache.end())
226 return ValueLatticeElement();
227 auto BBI = I->second->BlockVals.find(BB);
228 if (BBI == I->second->BlockVals.end())
229 return ValueLatticeElement();
230 return BBI->second;
233 /// clear - Empty the cache.
234 void clear() {
235 SeenBlocks.clear();
236 ValueCache.clear();
237 OverDefinedCache.clear();
240 /// Inform the cache that a given value has been deleted.
241 void eraseValue(Value *V);
243 /// This is part of the update interface to inform the cache
244 /// that a block has been deleted.
245 void eraseBlock(BasicBlock *BB);
247 /// Updates the cache to remove any influence an overdefined value in
248 /// OldSucc might have (unless also overdefined in NewSucc). This just
249 /// flushes elements from the cache and does not add any.
250 void threadEdgeImpl(BasicBlock *OldSucc,BasicBlock *NewSucc);
252 friend struct LVIValueHandle;
256 void LazyValueInfoCache::eraseValue(Value *V) {
257 for (auto I = OverDefinedCache.begin(), E = OverDefinedCache.end(); I != E;) {
258 // Copy and increment the iterator immediately so we can erase behind
259 // ourselves.
260 auto Iter = I++;
261 SmallPtrSetImpl<Value *> &ValueSet = Iter->second;
262 ValueSet.erase(V);
263 if (ValueSet.empty())
264 OverDefinedCache.erase(Iter);
267 ValueCache.erase(V);
270 void LVIValueHandle::deleted() {
271 // This erasure deallocates *this, so it MUST happen after we're done
272 // using any and all members of *this.
273 Parent->eraseValue(*this);
276 void LazyValueInfoCache::eraseBlock(BasicBlock *BB) {
277 // Shortcut if we have never seen this block.
278 DenseSet<PoisoningVH<BasicBlock> >::iterator I = SeenBlocks.find(BB);
279 if (I == SeenBlocks.end())
280 return;
281 SeenBlocks.erase(I);
283 auto ODI = OverDefinedCache.find(BB);
284 if (ODI != OverDefinedCache.end())
285 OverDefinedCache.erase(ODI);
287 for (auto &I : ValueCache)
288 I.second->BlockVals.erase(BB);
291 void LazyValueInfoCache::threadEdgeImpl(BasicBlock *OldSucc,
292 BasicBlock *NewSucc) {
293 // When an edge in the graph has been threaded, values that we could not
294 // determine a value for before (i.e. were marked overdefined) may be
295 // possible to solve now. We do NOT try to proactively update these values.
296 // Instead, we clear their entries from the cache, and allow lazy updating to
297 // recompute them when needed.
299 // The updating process is fairly simple: we need to drop cached info
300 // for all values that were marked overdefined in OldSucc, and for those same
301 // values in any successor of OldSucc (except NewSucc) in which they were
302 // also marked overdefined.
303 std::vector<BasicBlock*> worklist;
304 worklist.push_back(OldSucc);
306 auto I = OverDefinedCache.find(OldSucc);
307 if (I == OverDefinedCache.end())
308 return; // Nothing to process here.
309 SmallVector<Value *, 4> ValsToClear(I->second.begin(), I->second.end());
311 // Use a worklist to perform a depth-first search of OldSucc's successors.
312 // NOTE: We do not need a visited list since any blocks we have already
313 // visited will have had their overdefined markers cleared already, and we
314 // thus won't loop to their successors.
315 while (!worklist.empty()) {
316 BasicBlock *ToUpdate = worklist.back();
317 worklist.pop_back();
319 // Skip blocks only accessible through NewSucc.
320 if (ToUpdate == NewSucc) continue;
322 // If a value was marked overdefined in OldSucc, and is here too...
323 auto OI = OverDefinedCache.find(ToUpdate);
324 if (OI == OverDefinedCache.end())
325 continue;
326 SmallPtrSetImpl<Value *> &ValueSet = OI->second;
328 bool changed = false;
329 for (Value *V : ValsToClear) {
330 if (!ValueSet.erase(V))
331 continue;
333 // If we removed anything, then we potentially need to update
334 // blocks successors too.
335 changed = true;
337 if (ValueSet.empty()) {
338 OverDefinedCache.erase(OI);
339 break;
343 if (!changed) continue;
345 worklist.insert(worklist.end(), succ_begin(ToUpdate), succ_end(ToUpdate));
350 namespace {
351 /// An assembly annotator class to print LazyValueCache information in
352 /// comments.
353 class LazyValueInfoImpl;
354 class LazyValueInfoAnnotatedWriter : public AssemblyAnnotationWriter {
355 LazyValueInfoImpl *LVIImpl;
356 // While analyzing which blocks we can solve values for, we need the dominator
357 // information. Since this is an optional parameter in LVI, we require this
358 // DomTreeAnalysis pass in the printer pass, and pass the dominator
359 // tree to the LazyValueInfoAnnotatedWriter.
360 DominatorTree &DT;
362 public:
363 LazyValueInfoAnnotatedWriter(LazyValueInfoImpl *L, DominatorTree &DTree)
364 : LVIImpl(L), DT(DTree) {}
366 virtual void emitBasicBlockStartAnnot(const BasicBlock *BB,
367 formatted_raw_ostream &OS);
369 virtual void emitInstructionAnnot(const Instruction *I,
370 formatted_raw_ostream &OS);
373 namespace {
374 // The actual implementation of the lazy analysis and update. Note that the
375 // inheritance from LazyValueInfoCache is intended to be temporary while
376 // splitting the code and then transitioning to a has-a relationship.
377 class LazyValueInfoImpl {
379 /// Cached results from previous queries
380 LazyValueInfoCache TheCache;
382 /// This stack holds the state of the value solver during a query.
383 /// It basically emulates the callstack of the naive
384 /// recursive value lookup process.
385 SmallVector<std::pair<BasicBlock*, Value*>, 8> BlockValueStack;
387 /// Keeps track of which block-value pairs are in BlockValueStack.
388 DenseSet<std::pair<BasicBlock*, Value*> > BlockValueSet;
390 /// Push BV onto BlockValueStack unless it's already in there.
391 /// Returns true on success.
392 bool pushBlockValue(const std::pair<BasicBlock *, Value *> &BV) {
393 if (!BlockValueSet.insert(BV).second)
394 return false; // It's already in the stack.
396 DEBUG(dbgs() << "PUSH: " << *BV.second << " in " << BV.first->getName()
397 << "\n");
398 BlockValueStack.push_back(BV);
399 return true;
402 AssumptionCache *AC; ///< A pointer to the cache of @llvm.assume calls.
403 const DataLayout &DL; ///< A mandatory DataLayout
404 DominatorTree *DT; ///< An optional DT pointer.
406 ValueLatticeElement getBlockValue(Value *Val, BasicBlock *BB);
407 bool getEdgeValue(Value *V, BasicBlock *F, BasicBlock *T,
408 ValueLatticeElement &Result, Instruction *CxtI = nullptr);
409 bool hasBlockValue(Value *Val, BasicBlock *BB);
411 // These methods process one work item and may add more. A false value
412 // returned means that the work item was not completely processed and must
413 // be revisited after going through the new items.
414 bool solveBlockValue(Value *Val, BasicBlock *BB);
415 bool solveBlockValueImpl(ValueLatticeElement &Res, Value *Val,
416 BasicBlock *BB);
417 bool solveBlockValueNonLocal(ValueLatticeElement &BBLV, Value *Val,
418 BasicBlock *BB);
419 bool solveBlockValuePHINode(ValueLatticeElement &BBLV, PHINode *PN,
420 BasicBlock *BB);
421 bool solveBlockValueSelect(ValueLatticeElement &BBLV, SelectInst *S,
422 BasicBlock *BB);
423 bool solveBlockValueBinaryOp(ValueLatticeElement &BBLV, BinaryOperator *BBI,
424 BasicBlock *BB);
425 bool solveBlockValueCast(ValueLatticeElement &BBLV, CastInst *CI,
426 BasicBlock *BB);
427 void intersectAssumeOrGuardBlockValueConstantRange(Value *Val,
428 ValueLatticeElement &BBLV,
429 Instruction *BBI);
431 void solve();
433 public:
434 /// This is the query interface to determine the lattice
435 /// value for the specified Value* at the end of the specified block.
436 ValueLatticeElement getValueInBlock(Value *V, BasicBlock *BB,
437 Instruction *CxtI = nullptr);
439 /// This is the query interface to determine the lattice
440 /// value for the specified Value* at the specified instruction (generally
441 /// from an assume intrinsic).
442 ValueLatticeElement getValueAt(Value *V, Instruction *CxtI);
444 /// This is the query interface to determine the lattice
445 /// value for the specified Value* that is true on the specified edge.
446 ValueLatticeElement getValueOnEdge(Value *V, BasicBlock *FromBB,
447 BasicBlock *ToBB,
448 Instruction *CxtI = nullptr);
450 /// Complete flush all previously computed values
451 void clear() {
452 TheCache.clear();
455 /// Printing the LazyValueInfo Analysis.
456 void printLVI(Function &F, DominatorTree &DTree, raw_ostream &OS) {
457 LazyValueInfoAnnotatedWriter Writer(this, DTree);
458 F.print(OS, &Writer);
461 /// This is part of the update interface to inform the cache
462 /// that a block has been deleted.
463 void eraseBlock(BasicBlock *BB) {
464 TheCache.eraseBlock(BB);
467 /// This is the update interface to inform the cache that an edge from
468 /// PredBB to OldSucc has been threaded to be from PredBB to NewSucc.
469 void threadEdge(BasicBlock *PredBB,BasicBlock *OldSucc,BasicBlock *NewSucc);
471 LazyValueInfoImpl(AssumptionCache *AC, const DataLayout &DL,
472 DominatorTree *DT = nullptr)
473 : AC(AC), DL(DL), DT(DT) {}
475 } // end anonymous namespace
478 void LazyValueInfoImpl::solve() {
479 SmallVector<std::pair<BasicBlock *, Value *>, 8> StartingStack(
480 BlockValueStack.begin(), BlockValueStack.end());
482 unsigned processedCount = 0;
483 while (!BlockValueStack.empty()) {
484 processedCount++;
485 // Abort if we have to process too many values to get a result for this one.
486 // Because of the design of the overdefined cache currently being per-block
487 // to avoid naming-related issues (IE it wants to try to give different
488 // results for the same name in different blocks), overdefined results don't
489 // get cached globally, which in turn means we will often try to rediscover
490 // the same overdefined result again and again. Once something like
491 // PredicateInfo is used in LVI or CVP, we should be able to make the
492 // overdefined cache global, and remove this throttle.
493 if (processedCount > MaxProcessedPerValue) {
494 DEBUG(dbgs() << "Giving up on stack because we are getting too deep\n");
495 // Fill in the original values
496 while (!StartingStack.empty()) {
497 std::pair<BasicBlock *, Value *> &e = StartingStack.back();
498 TheCache.insertResult(e.second, e.first,
499 ValueLatticeElement::getOverdefined());
500 StartingStack.pop_back();
502 BlockValueSet.clear();
503 BlockValueStack.clear();
504 return;
506 std::pair<BasicBlock *, Value *> e = BlockValueStack.back();
507 assert(BlockValueSet.count(e) && "Stack value should be in BlockValueSet!");
509 if (solveBlockValue(e.second, e.first)) {
510 // The work item was completely processed.
511 assert(BlockValueStack.back() == e && "Nothing should have been pushed!");
512 assert(TheCache.hasCachedValueInfo(e.second, e.first) &&
513 "Result should be in cache!");
515 DEBUG(dbgs() << "POP " << *e.second << " in " << e.first->getName()
516 << " = " << TheCache.getCachedValueInfo(e.second, e.first) << "\n");
518 BlockValueStack.pop_back();
519 BlockValueSet.erase(e);
520 } else {
521 // More work needs to be done before revisiting.
522 assert(BlockValueStack.back() != e && "Stack should have been pushed!");
527 bool LazyValueInfoImpl::hasBlockValue(Value *Val, BasicBlock *BB) {
528 // If already a constant, there is nothing to compute.
529 if (isa<Constant>(Val))
530 return true;
532 return TheCache.hasCachedValueInfo(Val, BB);
535 ValueLatticeElement LazyValueInfoImpl::getBlockValue(Value *Val,
536 BasicBlock *BB) {
537 // If already a constant, there is nothing to compute.
538 if (Constant *VC = dyn_cast<Constant>(Val))
539 return ValueLatticeElement::get(VC);
541 return TheCache.getCachedValueInfo(Val, BB);
544 static ValueLatticeElement getFromRangeMetadata(Instruction *BBI) {
545 switch (BBI->getOpcode()) {
546 default: break;
547 case Instruction::Load:
548 case Instruction::Call:
549 case Instruction::Invoke:
550 if (MDNode *Ranges = BBI->getMetadata(LLVMContext::MD_range))
551 if (isa<IntegerType>(BBI->getType())) {
552 return ValueLatticeElement::getRange(
553 getConstantRangeFromMetadata(*Ranges));
555 break;
557 // Nothing known - will be intersected with other facts
558 return ValueLatticeElement::getOverdefined();
561 bool LazyValueInfoImpl::solveBlockValue(Value *Val, BasicBlock *BB) {
562 if (isa<Constant>(Val))
563 return true;
565 if (TheCache.hasCachedValueInfo(Val, BB)) {
566 // If we have a cached value, use that.
567 DEBUG(dbgs() << " reuse BB '" << BB->getName()
568 << "' val=" << TheCache.getCachedValueInfo(Val, BB) << '\n');
570 // Since we're reusing a cached value, we don't need to update the
571 // OverDefinedCache. The cache will have been properly updated whenever the
572 // cached value was inserted.
573 return true;
576 // Hold off inserting this value into the Cache in case we have to return
577 // false and come back later.
578 ValueLatticeElement Res;
579 if (!solveBlockValueImpl(Res, Val, BB))
580 // Work pushed, will revisit
581 return false;
583 TheCache.insertResult(Val, BB, Res);
584 return true;
587 bool LazyValueInfoImpl::solveBlockValueImpl(ValueLatticeElement &Res,
588 Value *Val, BasicBlock *BB) {
590 Instruction *BBI = dyn_cast<Instruction>(Val);
591 if (!BBI || BBI->getParent() != BB)
592 return solveBlockValueNonLocal(Res, Val, BB);
594 if (PHINode *PN = dyn_cast<PHINode>(BBI))
595 return solveBlockValuePHINode(Res, PN, BB);
597 if (auto *SI = dyn_cast<SelectInst>(BBI))
598 return solveBlockValueSelect(Res, SI, BB);
600 // If this value is a nonnull pointer, record it's range and bailout. Note
601 // that for all other pointer typed values, we terminate the search at the
602 // definition. We could easily extend this to look through geps, bitcasts,
603 // and the like to prove non-nullness, but it's not clear that's worth it
604 // compile time wise. The context-insensitive value walk done inside
605 // isKnownNonZero gets most of the profitable cases at much less expense.
606 // This does mean that we have a sensativity to where the defining
607 // instruction is placed, even if it could legally be hoisted much higher.
608 // That is unfortunate.
609 PointerType *PT = dyn_cast<PointerType>(BBI->getType());
610 if (PT && isKnownNonZero(BBI, DL)) {
611 Res = ValueLatticeElement::getNot(ConstantPointerNull::get(PT));
612 return true;
614 if (BBI->getType()->isIntegerTy()) {
615 if (auto *CI = dyn_cast<CastInst>(BBI))
616 return solveBlockValueCast(Res, CI, BB);
618 BinaryOperator *BO = dyn_cast<BinaryOperator>(BBI);
619 if (BO && isa<ConstantInt>(BO->getOperand(1)))
620 return solveBlockValueBinaryOp(Res, BO, BB);
623 DEBUG(dbgs() << " compute BB '" << BB->getName()
624 << "' - unknown inst def found.\n");
625 Res = getFromRangeMetadata(BBI);
626 return true;
629 static bool InstructionDereferencesPointer(Instruction *I, Value *Ptr) {
630 if (LoadInst *L = dyn_cast<LoadInst>(I)) {
631 return L->getPointerAddressSpace() == 0 &&
632 GetUnderlyingObject(L->getPointerOperand(),
633 L->getModule()->getDataLayout()) == Ptr;
635 if (StoreInst *S = dyn_cast<StoreInst>(I)) {
636 return S->getPointerAddressSpace() == 0 &&
637 GetUnderlyingObject(S->getPointerOperand(),
638 S->getModule()->getDataLayout()) == Ptr;
640 if (MemIntrinsic *MI = dyn_cast<MemIntrinsic>(I)) {
641 if (MI->isVolatile()) return false;
643 // FIXME: check whether it has a valuerange that excludes zero?
644 ConstantInt *Len = dyn_cast<ConstantInt>(MI->getLength());
645 if (!Len || Len->isZero()) return false;
647 if (MI->getDestAddressSpace() == 0)
648 if (GetUnderlyingObject(MI->getRawDest(),
649 MI->getModule()->getDataLayout()) == Ptr)
650 return true;
651 if (MemTransferInst *MTI = dyn_cast<MemTransferInst>(MI))
652 if (MTI->getSourceAddressSpace() == 0)
653 if (GetUnderlyingObject(MTI->getRawSource(),
654 MTI->getModule()->getDataLayout()) == Ptr)
655 return true;
657 return false;
660 /// Return true if the allocation associated with Val is ever dereferenced
661 /// within the given basic block. This establishes the fact Val is not null,
662 /// but does not imply that the memory at Val is dereferenceable. (Val may
663 /// point off the end of the dereferenceable part of the object.)
664 static bool isObjectDereferencedInBlock(Value *Val, BasicBlock *BB) {
665 assert(Val->getType()->isPointerTy());
667 const DataLayout &DL = BB->getModule()->getDataLayout();
668 Value *UnderlyingVal = GetUnderlyingObject(Val, DL);
669 // If 'GetUnderlyingObject' didn't converge, skip it. It won't converge
670 // inside InstructionDereferencesPointer either.
671 if (UnderlyingVal == GetUnderlyingObject(UnderlyingVal, DL, 1))
672 for (Instruction &I : *BB)
673 if (InstructionDereferencesPointer(&I, UnderlyingVal))
674 return true;
675 return false;
678 bool LazyValueInfoImpl::solveBlockValueNonLocal(ValueLatticeElement &BBLV,
679 Value *Val, BasicBlock *BB) {
680 ValueLatticeElement Result; // Start Undefined.
682 // If this is the entry block, we must be asking about an argument. The
683 // value is overdefined.
684 if (BB == &BB->getParent()->getEntryBlock()) {
685 assert(isa<Argument>(Val) && "Unknown live-in to the entry block");
686 // Before giving up, see if we can prove the pointer non-null local to
687 // this particular block.
688 if (Val->getType()->isPointerTy() &&
689 (isKnownNonZero(Val, DL) || isObjectDereferencedInBlock(Val, BB))) {
690 PointerType *PTy = cast<PointerType>(Val->getType());
691 Result = ValueLatticeElement::getNot(ConstantPointerNull::get(PTy));
692 } else {
693 Result = ValueLatticeElement::getOverdefined();
695 BBLV = Result;
696 return true;
699 // Loop over all of our predecessors, merging what we know from them into
700 // result. If we encounter an unexplored predecessor, we eagerly explore it
701 // in a depth first manner. In practice, this has the effect of discovering
702 // paths we can't analyze eagerly without spending compile times analyzing
703 // other paths. This heuristic benefits from the fact that predecessors are
704 // frequently arranged such that dominating ones come first and we quickly
705 // find a path to function entry. TODO: We should consider explicitly
706 // canonicalizing to make this true rather than relying on this happy
707 // accident.
708 for (pred_iterator PI = pred_begin(BB), E = pred_end(BB); PI != E; ++PI) {
709 ValueLatticeElement EdgeResult;
710 if (!getEdgeValue(Val, *PI, BB, EdgeResult))
711 // Explore that input, then return here
712 return false;
714 Result.mergeIn(EdgeResult, DL);
716 // If we hit overdefined, exit early. The BlockVals entry is already set
717 // to overdefined.
718 if (Result.isOverdefined()) {
719 DEBUG(dbgs() << " compute BB '" << BB->getName()
720 << "' - overdefined because of pred (non local).\n");
721 // Before giving up, see if we can prove the pointer non-null local to
722 // this particular block.
723 if (Val->getType()->isPointerTy() &&
724 isObjectDereferencedInBlock(Val, BB)) {
725 PointerType *PTy = cast<PointerType>(Val->getType());
726 Result = ValueLatticeElement::getNot(ConstantPointerNull::get(PTy));
729 BBLV = Result;
730 return true;
734 // Return the merged value, which is more precise than 'overdefined'.
735 assert(!Result.isOverdefined());
736 BBLV = Result;
737 return true;
740 bool LazyValueInfoImpl::solveBlockValuePHINode(ValueLatticeElement &BBLV,
741 PHINode *PN, BasicBlock *BB) {
742 ValueLatticeElement Result; // Start Undefined.
744 // Loop over all of our predecessors, merging what we know from them into
745 // result. See the comment about the chosen traversal order in
746 // solveBlockValueNonLocal; the same reasoning applies here.
747 for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) {
748 BasicBlock *PhiBB = PN->getIncomingBlock(i);
749 Value *PhiVal = PN->getIncomingValue(i);
750 ValueLatticeElement EdgeResult;
751 // Note that we can provide PN as the context value to getEdgeValue, even
752 // though the results will be cached, because PN is the value being used as
753 // the cache key in the caller.
754 if (!getEdgeValue(PhiVal, PhiBB, BB, EdgeResult, PN))
755 // Explore that input, then return here
756 return false;
758 Result.mergeIn(EdgeResult, DL);
760 // If we hit overdefined, exit early. The BlockVals entry is already set
761 // to overdefined.
762 if (Result.isOverdefined()) {
763 DEBUG(dbgs() << " compute BB '" << BB->getName()
764 << "' - overdefined because of pred (local).\n");
766 BBLV = Result;
767 return true;
771 // Return the merged value, which is more precise than 'overdefined'.
772 assert(!Result.isOverdefined() && "Possible PHI in entry block?");
773 BBLV = Result;
774 return true;
777 static ValueLatticeElement getValueFromCondition(Value *Val, Value *Cond,
778 bool isTrueDest = true);
780 // If we can determine a constraint on the value given conditions assumed by
781 // the program, intersect those constraints with BBLV
782 void LazyValueInfoImpl::intersectAssumeOrGuardBlockValueConstantRange(
783 Value *Val, ValueLatticeElement &BBLV, Instruction *BBI) {
784 BBI = BBI ? BBI : dyn_cast<Instruction>(Val);
785 if (!BBI)
786 return;
788 for (auto &AssumeVH : AC->assumptionsFor(Val)) {
789 if (!AssumeVH)
790 continue;
791 auto *I = cast<CallInst>(AssumeVH);
792 if (!isValidAssumeForContext(I, BBI, DT))
793 continue;
795 BBLV = intersect(BBLV, getValueFromCondition(Val, I->getArgOperand(0)));
798 // If guards are not used in the module, don't spend time looking for them
799 auto *GuardDecl = BBI->getModule()->getFunction(
800 Intrinsic::getName(Intrinsic::experimental_guard));
801 if (!GuardDecl || GuardDecl->use_empty())
802 return;
804 for (Instruction &I : make_range(BBI->getIterator().getReverse(),
805 BBI->getParent()->rend())) {
806 Value *Cond = nullptr;
807 if (match(&I, m_Intrinsic<Intrinsic::experimental_guard>(m_Value(Cond))))
808 BBLV = intersect(BBLV, getValueFromCondition(Val, Cond));
812 bool LazyValueInfoImpl::solveBlockValueSelect(ValueLatticeElement &BBLV,
813 SelectInst *SI, BasicBlock *BB) {
815 // Recurse on our inputs if needed
816 if (!hasBlockValue(SI->getTrueValue(), BB)) {
817 if (pushBlockValue(std::make_pair(BB, SI->getTrueValue())))
818 return false;
819 BBLV = ValueLatticeElement::getOverdefined();
820 return true;
822 ValueLatticeElement TrueVal = getBlockValue(SI->getTrueValue(), BB);
823 // If we hit overdefined, don't ask more queries. We want to avoid poisoning
824 // extra slots in the table if we can.
825 if (TrueVal.isOverdefined()) {
826 BBLV = ValueLatticeElement::getOverdefined();
827 return true;
830 if (!hasBlockValue(SI->getFalseValue(), BB)) {
831 if (pushBlockValue(std::make_pair(BB, SI->getFalseValue())))
832 return false;
833 BBLV = ValueLatticeElement::getOverdefined();
834 return true;
836 ValueLatticeElement FalseVal = getBlockValue(SI->getFalseValue(), BB);
837 // If we hit overdefined, don't ask more queries. We want to avoid poisoning
838 // extra slots in the table if we can.
839 if (FalseVal.isOverdefined()) {
840 BBLV = ValueLatticeElement::getOverdefined();
841 return true;
844 if (TrueVal.isConstantRange() && FalseVal.isConstantRange()) {
845 const ConstantRange &TrueCR = TrueVal.getConstantRange();
846 const ConstantRange &FalseCR = FalseVal.getConstantRange();
847 Value *LHS = nullptr;
848 Value *RHS = nullptr;
849 SelectPatternResult SPR = matchSelectPattern(SI, LHS, RHS);
850 // Is this a min specifically of our two inputs? (Avoid the risk of
851 // ValueTracking getting smarter looking back past our immediate inputs.)
852 if (SelectPatternResult::isMinOrMax(SPR.Flavor) &&
853 LHS == SI->getTrueValue() && RHS == SI->getFalseValue()) {
854 ConstantRange ResultCR = [&]() {
855 switch (SPR.Flavor) {
856 default:
857 llvm_unreachable("unexpected minmax type!");
858 case SPF_SMIN: /// Signed minimum
859 return TrueCR.smin(FalseCR);
860 case SPF_UMIN: /// Unsigned minimum
861 return TrueCR.umin(FalseCR);
862 case SPF_SMAX: /// Signed maximum
863 return TrueCR.smax(FalseCR);
864 case SPF_UMAX: /// Unsigned maximum
865 return TrueCR.umax(FalseCR);
867 }();
868 BBLV = ValueLatticeElement::getRange(ResultCR);
869 return true;
872 // TODO: ABS, NABS from the SelectPatternResult
875 // Can we constrain the facts about the true and false values by using the
876 // condition itself? This shows up with idioms like e.g. select(a > 5, a, 5).
877 // TODO: We could potentially refine an overdefined true value above.
878 Value *Cond = SI->getCondition();
879 TrueVal = intersect(TrueVal,
880 getValueFromCondition(SI->getTrueValue(), Cond, true));
881 FalseVal = intersect(FalseVal,
882 getValueFromCondition(SI->getFalseValue(), Cond, false));
884 // Handle clamp idioms such as:
885 // %24 = constantrange<0, 17>
886 // %39 = icmp eq i32 %24, 0
887 // %40 = add i32 %24, -1
888 // %siv.next = select i1 %39, i32 16, i32 %40
889 // %siv.next = constantrange<0, 17> not <-1, 17>
890 // In general, this can handle any clamp idiom which tests the edge
891 // condition via an equality or inequality.
892 if (auto *ICI = dyn_cast<ICmpInst>(Cond)) {
893 ICmpInst::Predicate Pred = ICI->getPredicate();
894 Value *A = ICI->getOperand(0);
895 if (ConstantInt *CIBase = dyn_cast<ConstantInt>(ICI->getOperand(1))) {
896 auto addConstants = [](ConstantInt *A, ConstantInt *B) {
897 assert(A->getType() == B->getType());
898 return ConstantInt::get(A->getType(), A->getValue() + B->getValue());
900 // See if either input is A + C2, subject to the constraint from the
901 // condition that A != C when that input is used. We can assume that
902 // that input doesn't include C + C2.
903 ConstantInt *CIAdded;
904 switch (Pred) {
905 default: break;
906 case ICmpInst::ICMP_EQ:
907 if (match(SI->getFalseValue(), m_Add(m_Specific(A),
908 m_ConstantInt(CIAdded)))) {
909 auto ResNot = addConstants(CIBase, CIAdded);
910 FalseVal = intersect(FalseVal,
911 ValueLatticeElement::getNot(ResNot));
913 break;
914 case ICmpInst::ICMP_NE:
915 if (match(SI->getTrueValue(), m_Add(m_Specific(A),
916 m_ConstantInt(CIAdded)))) {
917 auto ResNot = addConstants(CIBase, CIAdded);
918 TrueVal = intersect(TrueVal,
919 ValueLatticeElement::getNot(ResNot));
921 break;
926 ValueLatticeElement Result; // Start Undefined.
927 Result.mergeIn(TrueVal, DL);
928 Result.mergeIn(FalseVal, DL);
929 BBLV = Result;
930 return true;
933 bool LazyValueInfoImpl::solveBlockValueCast(ValueLatticeElement &BBLV,
934 CastInst *CI,
935 BasicBlock *BB) {
936 if (!CI->getOperand(0)->getType()->isSized()) {
937 // Without knowing how wide the input is, we can't analyze it in any useful
938 // way.
939 BBLV = ValueLatticeElement::getOverdefined();
940 return true;
943 // Filter out casts we don't know how to reason about before attempting to
944 // recurse on our operand. This can cut a long search short if we know we're
945 // not going to be able to get any useful information anways.
946 switch (CI->getOpcode()) {
947 case Instruction::Trunc:
948 case Instruction::SExt:
949 case Instruction::ZExt:
950 case Instruction::BitCast:
951 break;
952 default:
953 // Unhandled instructions are overdefined.
954 DEBUG(dbgs() << " compute BB '" << BB->getName()
955 << "' - overdefined (unknown cast).\n");
956 BBLV = ValueLatticeElement::getOverdefined();
957 return true;
960 // Figure out the range of the LHS. If that fails, we still apply the
961 // transfer rule on the full set since we may be able to locally infer
962 // interesting facts.
963 if (!hasBlockValue(CI->getOperand(0), BB))
964 if (pushBlockValue(std::make_pair(BB, CI->getOperand(0))))
965 // More work to do before applying this transfer rule.
966 return false;
968 const unsigned OperandBitWidth =
969 DL.getTypeSizeInBits(CI->getOperand(0)->getType());
970 ConstantRange LHSRange = ConstantRange(OperandBitWidth);
971 if (hasBlockValue(CI->getOperand(0), BB)) {
972 ValueLatticeElement LHSVal = getBlockValue(CI->getOperand(0), BB);
973 intersectAssumeOrGuardBlockValueConstantRange(CI->getOperand(0), LHSVal,
974 CI);
975 if (LHSVal.isConstantRange())
976 LHSRange = LHSVal.getConstantRange();
979 const unsigned ResultBitWidth = CI->getType()->getIntegerBitWidth();
981 // NOTE: We're currently limited by the set of operations that ConstantRange
982 // can evaluate symbolically. Enhancing that set will allows us to analyze
983 // more definitions.
984 BBLV = ValueLatticeElement::getRange(LHSRange.castOp(CI->getOpcode(),
985 ResultBitWidth));
986 return true;
989 bool LazyValueInfoImpl::solveBlockValueBinaryOp(ValueLatticeElement &BBLV,
990 BinaryOperator *BO,
991 BasicBlock *BB) {
993 assert(BO->getOperand(0)->getType()->isSized() &&
994 "all operands to binary operators are sized");
996 // Filter out operators we don't know how to reason about before attempting to
997 // recurse on our operand(s). This can cut a long search short if we know
998 // we're not going to be able to get any useful information anyways.
999 switch (BO->getOpcode()) {
1000 case Instruction::Add:
1001 case Instruction::Sub:
1002 case Instruction::Mul:
1003 case Instruction::UDiv:
1004 case Instruction::Shl:
1005 case Instruction::LShr:
1006 case Instruction::And:
1007 case Instruction::Or:
1008 // continue into the code below
1009 break;
1010 default:
1011 // Unhandled instructions are overdefined.
1012 DEBUG(dbgs() << " compute BB '" << BB->getName()
1013 << "' - overdefined (unknown binary operator).\n");
1014 BBLV = ValueLatticeElement::getOverdefined();
1015 return true;
1018 // Figure out the range of the LHS. If that fails, use a conservative range,
1019 // but apply the transfer rule anyways. This lets us pick up facts from
1020 // expressions like "and i32 (call i32 @foo()), 32"
1021 if (!hasBlockValue(BO->getOperand(0), BB))
1022 if (pushBlockValue(std::make_pair(BB, BO->getOperand(0))))
1023 // More work to do before applying this transfer rule.
1024 return false;
1026 const unsigned OperandBitWidth =
1027 DL.getTypeSizeInBits(BO->getOperand(0)->getType());
1028 ConstantRange LHSRange = ConstantRange(OperandBitWidth);
1029 if (hasBlockValue(BO->getOperand(0), BB)) {
1030 ValueLatticeElement LHSVal = getBlockValue(BO->getOperand(0), BB);
1031 intersectAssumeOrGuardBlockValueConstantRange(BO->getOperand(0), LHSVal,
1032 BO);
1033 if (LHSVal.isConstantRange())
1034 LHSRange = LHSVal.getConstantRange();
1037 ConstantInt *RHS = cast<ConstantInt>(BO->getOperand(1));
1038 ConstantRange RHSRange = ConstantRange(RHS->getValue());
1040 // NOTE: We're currently limited by the set of operations that ConstantRange
1041 // can evaluate symbolically. Enhancing that set will allows us to analyze
1042 // more definitions.
1043 Instruction::BinaryOps BinOp = BO->getOpcode();
1044 BBLV = ValueLatticeElement::getRange(LHSRange.binaryOp(BinOp, RHSRange));
1045 return true;
1048 static ValueLatticeElement getValueFromICmpCondition(Value *Val, ICmpInst *ICI,
1049 bool isTrueDest) {
1050 Value *LHS = ICI->getOperand(0);
1051 Value *RHS = ICI->getOperand(1);
1052 CmpInst::Predicate Predicate = ICI->getPredicate();
1054 if (isa<Constant>(RHS)) {
1055 if (ICI->isEquality() && LHS == Val) {
1056 // We know that V has the RHS constant if this is a true SETEQ or
1057 // false SETNE.
1058 if (isTrueDest == (Predicate == ICmpInst::ICMP_EQ))
1059 return ValueLatticeElement::get(cast<Constant>(RHS));
1060 else
1061 return ValueLatticeElement::getNot(cast<Constant>(RHS));
1065 if (!Val->getType()->isIntegerTy())
1066 return ValueLatticeElement::getOverdefined();
1068 // Use ConstantRange::makeAllowedICmpRegion in order to determine the possible
1069 // range of Val guaranteed by the condition. Recognize comparisons in the from
1070 // of:
1071 // icmp <pred> Val, ...
1072 // icmp <pred> (add Val, Offset), ...
1073 // The latter is the range checking idiom that InstCombine produces. Subtract
1074 // the offset from the allowed range for RHS in this case.
1076 // Val or (add Val, Offset) can be on either hand of the comparison
1077 if (LHS != Val && !match(LHS, m_Add(m_Specific(Val), m_ConstantInt()))) {
1078 std::swap(LHS, RHS);
1079 Predicate = CmpInst::getSwappedPredicate(Predicate);
1082 ConstantInt *Offset = nullptr;
1083 if (LHS != Val)
1084 match(LHS, m_Add(m_Specific(Val), m_ConstantInt(Offset)));
1086 if (LHS == Val || Offset) {
1087 // Calculate the range of values that are allowed by the comparison
1088 ConstantRange RHSRange(RHS->getType()->getIntegerBitWidth(),
1089 /*isFullSet=*/true);
1090 if (ConstantInt *CI = dyn_cast<ConstantInt>(RHS))
1091 RHSRange = ConstantRange(CI->getValue());
1092 else if (Instruction *I = dyn_cast<Instruction>(RHS))
1093 if (auto *Ranges = I->getMetadata(LLVMContext::MD_range))
1094 RHSRange = getConstantRangeFromMetadata(*Ranges);
1096 // If we're interested in the false dest, invert the condition
1097 CmpInst::Predicate Pred =
1098 isTrueDest ? Predicate : CmpInst::getInversePredicate(Predicate);
1099 ConstantRange TrueValues =
1100 ConstantRange::makeAllowedICmpRegion(Pred, RHSRange);
1102 if (Offset) // Apply the offset from above.
1103 TrueValues = TrueValues.subtract(Offset->getValue());
1105 return ValueLatticeElement::getRange(std::move(TrueValues));
1108 return ValueLatticeElement::getOverdefined();
1111 static ValueLatticeElement
1112 getValueFromCondition(Value *Val, Value *Cond, bool isTrueDest,
1113 DenseMap<Value*, ValueLatticeElement> &Visited);
1115 static ValueLatticeElement
1116 getValueFromConditionImpl(Value *Val, Value *Cond, bool isTrueDest,
1117 DenseMap<Value*, ValueLatticeElement> &Visited) {
1118 if (ICmpInst *ICI = dyn_cast<ICmpInst>(Cond))
1119 return getValueFromICmpCondition(Val, ICI, isTrueDest);
1121 // Handle conditions in the form of (cond1 && cond2), we know that on the
1122 // true dest path both of the conditions hold. Similarly for conditions of
1123 // the form (cond1 || cond2), we know that on the false dest path neither
1124 // condition holds.
1125 BinaryOperator *BO = dyn_cast<BinaryOperator>(Cond);
1126 if (!BO || (isTrueDest && BO->getOpcode() != BinaryOperator::And) ||
1127 (!isTrueDest && BO->getOpcode() != BinaryOperator::Or))
1128 return ValueLatticeElement::getOverdefined();
1130 auto RHS = getValueFromCondition(Val, BO->getOperand(0), isTrueDest, Visited);
1131 auto LHS = getValueFromCondition(Val, BO->getOperand(1), isTrueDest, Visited);
1132 return intersect(RHS, LHS);
1135 static ValueLatticeElement
1136 getValueFromCondition(Value *Val, Value *Cond, bool isTrueDest,
1137 DenseMap<Value*, ValueLatticeElement> &Visited) {
1138 auto I = Visited.find(Cond);
1139 if (I != Visited.end())
1140 return I->second;
1142 auto Result = getValueFromConditionImpl(Val, Cond, isTrueDest, Visited);
1143 Visited[Cond] = Result;
1144 return Result;
1147 ValueLatticeElement getValueFromCondition(Value *Val, Value *Cond,
1148 bool isTrueDest) {
1149 assert(Cond && "precondition");
1150 DenseMap<Value*, ValueLatticeElement> Visited;
1151 return getValueFromCondition(Val, Cond, isTrueDest, Visited);
1154 // Return true if Usr has Op as an operand, otherwise false.
1155 static bool usesOperand(User *Usr, Value *Op) {
1156 return find(Usr->operands(), Op) != Usr->op_end();
1159 // Return true if the instruction type of Val is supported by
1160 // constantFoldUser(). Currently CastInst and BinaryOperator only. Call this
1161 // before calling constantFoldUser() to find out if it's even worth attempting
1162 // to call it.
1163 static bool isOperationFoldable(User *Usr) {
1164 return isa<CastInst>(Usr) || isa<BinaryOperator>(Usr);
1167 // Check if Usr can be simplified to an integer constant when the value of one
1168 // of its operands Op is an integer constant OpConstVal. If so, return it as an
1169 // lattice value range with a single element or otherwise return an overdefined
1170 // lattice value.
1171 static ValueLatticeElement constantFoldUser(User *Usr, Value *Op,
1172 const APInt &OpConstVal,
1173 const DataLayout &DL) {
1174 assert(isOperationFoldable(Usr) && "Precondition");
1175 Constant* OpConst = Constant::getIntegerValue(Op->getType(), OpConstVal);
1176 // Check if Usr can be simplified to a constant.
1177 if (auto *CI = dyn_cast<CastInst>(Usr)) {
1178 assert(CI->getOperand(0) == Op && "Operand 0 isn't Op");
1179 if (auto *C = dyn_cast_or_null<ConstantInt>(
1180 SimplifyCastInst(CI->getOpcode(), OpConst,
1181 CI->getDestTy(), DL))) {
1182 return ValueLatticeElement::getRange(ConstantRange(C->getValue()));
1184 } else if (auto *BO = dyn_cast<BinaryOperator>(Usr)) {
1185 bool Op0Match = BO->getOperand(0) == Op;
1186 bool Op1Match = BO->getOperand(1) == Op;
1187 assert((Op0Match || Op1Match) &&
1188 "Operand 0 nor Operand 1 isn't a match");
1189 Value *LHS = Op0Match ? OpConst : BO->getOperand(0);
1190 Value *RHS = Op1Match ? OpConst : BO->getOperand(1);
1191 if (auto *C = dyn_cast_or_null<ConstantInt>(
1192 SimplifyBinOp(BO->getOpcode(), LHS, RHS, DL))) {
1193 return ValueLatticeElement::getRange(ConstantRange(C->getValue()));
1196 return ValueLatticeElement::getOverdefined();
1199 /// \brief Compute the value of Val on the edge BBFrom -> BBTo. Returns false if
1200 /// Val is not constrained on the edge. Result is unspecified if return value
1201 /// is false.
1202 static bool getEdgeValueLocal(Value *Val, BasicBlock *BBFrom,
1203 BasicBlock *BBTo, ValueLatticeElement &Result) {
1204 // TODO: Handle more complex conditionals. If (v == 0 || v2 < 1) is false, we
1205 // know that v != 0.
1206 if (BranchInst *BI = dyn_cast<BranchInst>(BBFrom->getTerminator())) {
1207 // If this is a conditional branch and only one successor goes to BBTo, then
1208 // we may be able to infer something from the condition.
1209 if (BI->isConditional() &&
1210 BI->getSuccessor(0) != BI->getSuccessor(1)) {
1211 bool isTrueDest = BI->getSuccessor(0) == BBTo;
1212 assert(BI->getSuccessor(!isTrueDest) == BBTo &&
1213 "BBTo isn't a successor of BBFrom");
1214 Value *Condition = BI->getCondition();
1216 // If V is the condition of the branch itself, then we know exactly what
1217 // it is.
1218 if (Condition == Val) {
1219 Result = ValueLatticeElement::get(ConstantInt::get(
1220 Type::getInt1Ty(Val->getContext()), isTrueDest));
1221 return true;
1224 // If the condition of the branch is an equality comparison, we may be
1225 // able to infer the value.
1226 Result = getValueFromCondition(Val, Condition, isTrueDest);
1227 if (!Result.isOverdefined())
1228 return true;
1230 if (User *Usr = dyn_cast<User>(Val)) {
1231 assert(Result.isOverdefined() && "Result isn't overdefined");
1232 // Check with isOperationFoldable() first to avoid linearly iterating
1233 // over the operands unnecessarily which can be expensive for
1234 // instructions with many operands.
1235 if (isa<IntegerType>(Usr->getType()) && isOperationFoldable(Usr)) {
1236 const DataLayout &DL = BBTo->getModule()->getDataLayout();
1237 if (usesOperand(Usr, Condition)) {
1238 // If Val has Condition as an operand and Val can be folded into a
1239 // constant with either Condition == true or Condition == false,
1240 // propagate the constant.
1241 // eg.
1242 // ; %Val is true on the edge to %then.
1243 // %Val = and i1 %Condition, true.
1244 // br %Condition, label %then, label %else
1245 APInt ConditionVal(1, isTrueDest ? 1 : 0);
1246 Result = constantFoldUser(Usr, Condition, ConditionVal, DL);
1247 } else {
1248 // If one of Val's operand has an inferred value, we may be able to
1249 // infer the value of Val.
1250 // eg.
1251 // ; %Val is 94 on the edge to %then.
1252 // %Val = add i8 %Op, 1
1253 // %Condition = icmp eq i8 %Op, 93
1254 // br i1 %Condition, label %then, label %else
1255 for (unsigned i = 0; i < Usr->getNumOperands(); ++i) {
1256 Value *Op = Usr->getOperand(i);
1257 ValueLatticeElement OpLatticeVal =
1258 getValueFromCondition(Op, Condition, isTrueDest);
1259 if (Optional<APInt> OpConst = OpLatticeVal.asConstantInteger()) {
1260 Result = constantFoldUser(Usr, Op, OpConst.getValue(), DL);
1261 break;
1267 if (!Result.isOverdefined())
1268 return true;
1272 // If the edge was formed by a switch on the value, then we may know exactly
1273 // what it is.
1274 if (SwitchInst *SI = dyn_cast<SwitchInst>(BBFrom->getTerminator())) {
1275 Value *Condition = SI->getCondition();
1276 if (!isa<IntegerType>(Val->getType()))
1277 return false;
1278 bool ValUsesConditionAndMayBeFoldable = false;
1279 if (Condition != Val) {
1280 // Check if Val has Condition as an operand.
1281 if (User *Usr = dyn_cast<User>(Val))
1282 ValUsesConditionAndMayBeFoldable = isOperationFoldable(Usr) &&
1283 usesOperand(Usr, Condition);
1284 if (!ValUsesConditionAndMayBeFoldable)
1285 return false;
1287 assert((Condition == Val || ValUsesConditionAndMayBeFoldable) &&
1288 "Condition != Val nor Val doesn't use Condition");
1290 bool DefaultCase = SI->getDefaultDest() == BBTo;
1291 unsigned BitWidth = Val->getType()->getIntegerBitWidth();
1292 ConstantRange EdgesVals(BitWidth, DefaultCase/*isFullSet*/);
1294 for (auto Case : SI->cases()) {
1295 APInt CaseValue = Case.getCaseValue()->getValue();
1296 ConstantRange EdgeVal(CaseValue);
1297 if (ValUsesConditionAndMayBeFoldable) {
1298 User *Usr = cast<User>(Val);
1299 const DataLayout &DL = BBTo->getModule()->getDataLayout();
1300 ValueLatticeElement EdgeLatticeVal =
1301 constantFoldUser(Usr, Condition, CaseValue, DL);
1302 if (EdgeLatticeVal.isOverdefined())
1303 return false;
1304 EdgeVal = EdgeLatticeVal.getConstantRange();
1306 if (DefaultCase) {
1307 // It is possible that the default destination is the destination of
1308 // some cases. We cannot perform difference for those cases.
1309 // We know Condition != CaseValue in BBTo. In some cases we can use
1310 // this to infer Val == f(Condition) is != f(CaseValue). For now, we
1311 // only do this when f is identity (i.e. Val == Condition), but we
1312 // should be able to do this for any injective f.
1313 if (Case.getCaseSuccessor() != BBTo && Condition == Val)
1314 EdgesVals = EdgesVals.difference(EdgeVal);
1315 } else if (Case.getCaseSuccessor() == BBTo)
1316 EdgesVals = EdgesVals.unionWith(EdgeVal);
1318 Result = ValueLatticeElement::getRange(std::move(EdgesVals));
1319 return true;
1321 return false;
1324 /// \brief Compute the value of Val on the edge BBFrom -> BBTo or the value at
1325 /// the basic block if the edge does not constrain Val.
1326 bool LazyValueInfoImpl::getEdgeValue(Value *Val, BasicBlock *BBFrom,
1327 BasicBlock *BBTo,
1328 ValueLatticeElement &Result,
1329 Instruction *CxtI) {
1330 // If already a constant, there is nothing to compute.
1331 if (Constant *VC = dyn_cast<Constant>(Val)) {
1332 Result = ValueLatticeElement::get(VC);
1333 return true;
1336 ValueLatticeElement LocalResult;
1337 if (!getEdgeValueLocal(Val, BBFrom, BBTo, LocalResult))
1338 // If we couldn't constrain the value on the edge, LocalResult doesn't
1339 // provide any information.
1340 LocalResult = ValueLatticeElement::getOverdefined();
1342 if (hasSingleValue(LocalResult)) {
1343 // Can't get any more precise here
1344 Result = LocalResult;
1345 return true;
1348 if (!hasBlockValue(Val, BBFrom)) {
1349 if (pushBlockValue(std::make_pair(BBFrom, Val)))
1350 return false;
1351 // No new information.
1352 Result = LocalResult;
1353 return true;
1356 // Try to intersect ranges of the BB and the constraint on the edge.
1357 ValueLatticeElement InBlock = getBlockValue(Val, BBFrom);
1358 intersectAssumeOrGuardBlockValueConstantRange(Val, InBlock,
1359 BBFrom->getTerminator());
1360 // We can use the context instruction (generically the ultimate instruction
1361 // the calling pass is trying to simplify) here, even though the result of
1362 // this function is generally cached when called from the solve* functions
1363 // (and that cached result might be used with queries using a different
1364 // context instruction), because when this function is called from the solve*
1365 // functions, the context instruction is not provided. When called from
1366 // LazyValueInfoImpl::getValueOnEdge, the context instruction is provided,
1367 // but then the result is not cached.
1368 intersectAssumeOrGuardBlockValueConstantRange(Val, InBlock, CxtI);
1370 Result = intersect(LocalResult, InBlock);
1371 return true;
1374 ValueLatticeElement LazyValueInfoImpl::getValueInBlock(Value *V, BasicBlock *BB,
1375 Instruction *CxtI) {
1376 DEBUG(dbgs() << "LVI Getting block end value " << *V << " at '"
1377 << BB->getName() << "'\n");
1379 assert(BlockValueStack.empty() && BlockValueSet.empty());
1380 if (!hasBlockValue(V, BB)) {
1381 pushBlockValue(std::make_pair(BB, V));
1382 solve();
1384 ValueLatticeElement Result = getBlockValue(V, BB);
1385 intersectAssumeOrGuardBlockValueConstantRange(V, Result, CxtI);
1387 DEBUG(dbgs() << " Result = " << Result << "\n");
1388 return Result;
1391 ValueLatticeElement LazyValueInfoImpl::getValueAt(Value *V, Instruction *CxtI) {
1392 DEBUG(dbgs() << "LVI Getting value " << *V << " at '"
1393 << CxtI->getName() << "'\n");
1395 if (auto *C = dyn_cast<Constant>(V))
1396 return ValueLatticeElement::get(C);
1398 ValueLatticeElement Result = ValueLatticeElement::getOverdefined();
1399 if (auto *I = dyn_cast<Instruction>(V))
1400 Result = getFromRangeMetadata(I);
1401 intersectAssumeOrGuardBlockValueConstantRange(V, Result, CxtI);
1403 DEBUG(dbgs() << " Result = " << Result << "\n");
1404 return Result;
1407 ValueLatticeElement LazyValueInfoImpl::
1408 getValueOnEdge(Value *V, BasicBlock *FromBB, BasicBlock *ToBB,
1409 Instruction *CxtI) {
1410 DEBUG(dbgs() << "LVI Getting edge value " << *V << " from '"
1411 << FromBB->getName() << "' to '" << ToBB->getName() << "'\n");
1413 ValueLatticeElement Result;
1414 if (!getEdgeValue(V, FromBB, ToBB, Result, CxtI)) {
1415 solve();
1416 bool WasFastQuery = getEdgeValue(V, FromBB, ToBB, Result, CxtI);
1417 (void)WasFastQuery;
1418 assert(WasFastQuery && "More work to do after problem solved?");
1421 DEBUG(dbgs() << " Result = " << Result << "\n");
1422 return Result;
1425 void LazyValueInfoImpl::threadEdge(BasicBlock *PredBB, BasicBlock *OldSucc,
1426 BasicBlock *NewSucc) {
1427 TheCache.threadEdgeImpl(OldSucc, NewSucc);
1430 //===----------------------------------------------------------------------===//
1431 // LazyValueInfo Impl
1432 //===----------------------------------------------------------------------===//
1434 /// This lazily constructs the LazyValueInfoImpl.
1435 static LazyValueInfoImpl &getImpl(void *&PImpl, AssumptionCache *AC,
1436 const DataLayout *DL,
1437 DominatorTree *DT = nullptr) {
1438 if (!PImpl) {
1439 assert(DL && "getCache() called with a null DataLayout");
1440 PImpl = new LazyValueInfoImpl(AC, *DL, DT);
1442 return *static_cast<LazyValueInfoImpl*>(PImpl);
1445 bool LazyValueInfoWrapperPass::runOnFunction(Function &F) {
1446 Info.AC = &getAnalysis<AssumptionCacheTracker>().getAssumptionCache(F);
1447 const DataLayout &DL = F.getParent()->getDataLayout();
1449 DominatorTreeWrapperPass *DTWP =
1450 getAnalysisIfAvailable<DominatorTreeWrapperPass>();
1451 Info.DT = DTWP ? &DTWP->getDomTree() : nullptr;
1452 Info.TLI = &getAnalysis<TargetLibraryInfoWrapperPass>().getTLI();
1454 if (Info.PImpl)
1455 getImpl(Info.PImpl, Info.AC, &DL, Info.DT).clear();
1457 // Fully lazy.
1458 return false;
1461 void LazyValueInfoWrapperPass::getAnalysisUsage(AnalysisUsage &AU) const {
1462 AU.setPreservesAll();
1463 AU.addRequired<AssumptionCacheTracker>();
1464 AU.addRequired<TargetLibraryInfoWrapperPass>();
1467 LazyValueInfo &LazyValueInfoWrapperPass::getLVI() { return Info; }
1469 LazyValueInfo::~LazyValueInfo() { releaseMemory(); }
1471 void LazyValueInfo::releaseMemory() {
1472 // If the cache was allocated, free it.
1473 if (PImpl) {
1474 delete &getImpl(PImpl, AC, nullptr);
1475 PImpl = nullptr;
1479 bool LazyValueInfo::invalidate(Function &F, const PreservedAnalyses &PA,
1480 FunctionAnalysisManager::Invalidator &Inv) {
1481 // We need to invalidate if we have either failed to preserve this analyses
1482 // result directly or if any of its dependencies have been invalidated.
1483 auto PAC = PA.getChecker<LazyValueAnalysis>();
1484 if (!(PAC.preserved() || PAC.preservedSet<AllAnalysesOn<Function>>()) ||
1485 (DT && Inv.invalidate<DominatorTreeAnalysis>(F, PA)))
1486 return true;
1488 return false;
1491 void LazyValueInfoWrapperPass::releaseMemory() { Info.releaseMemory(); }
1493 LazyValueInfo LazyValueAnalysis::run(Function &F,
1494 FunctionAnalysisManager &FAM) {
1495 auto &AC = FAM.getResult<AssumptionAnalysis>(F);
1496 auto &TLI = FAM.getResult<TargetLibraryAnalysis>(F);
1497 auto *DT = FAM.getCachedResult<DominatorTreeAnalysis>(F);
1499 return LazyValueInfo(&AC, &F.getParent()->getDataLayout(), &TLI, DT);
1502 /// Returns true if we can statically tell that this value will never be a
1503 /// "useful" constant. In practice, this means we've got something like an
1504 /// alloca or a malloc call for which a comparison against a constant can
1505 /// only be guarding dead code. Note that we are potentially giving up some
1506 /// precision in dead code (a constant result) in favour of avoiding a
1507 /// expensive search for a easily answered common query.
1508 static bool isKnownNonConstant(Value *V) {
1509 V = V->stripPointerCasts();
1510 // The return val of alloc cannot be a Constant.
1511 if (isa<AllocaInst>(V))
1512 return true;
1513 return false;
1516 Constant *LazyValueInfo::getConstant(Value *V, BasicBlock *BB,
1517 Instruction *CxtI) {
1518 // Bail out early if V is known not to be a Constant.
1519 if (isKnownNonConstant(V))
1520 return nullptr;
1522 const DataLayout &DL = BB->getModule()->getDataLayout();
1523 ValueLatticeElement Result =
1524 getImpl(PImpl, AC, &DL, DT).getValueInBlock(V, BB, CxtI);
1526 if (Result.isConstant())
1527 return Result.getConstant();
1528 if (Result.isConstantRange()) {
1529 const ConstantRange &CR = Result.getConstantRange();
1530 if (const APInt *SingleVal = CR.getSingleElement())
1531 return ConstantInt::get(V->getContext(), *SingleVal);
1533 return nullptr;
1536 ConstantRange LazyValueInfo::getConstantRange(Value *V, BasicBlock *BB,
1537 Instruction *CxtI) {
1538 assert(V->getType()->isIntegerTy());
1539 unsigned Width = V->getType()->getIntegerBitWidth();
1540 const DataLayout &DL = BB->getModule()->getDataLayout();
1541 ValueLatticeElement Result =
1542 getImpl(PImpl, AC, &DL, DT).getValueInBlock(V, BB, CxtI);
1543 if (Result.isUndefined())
1544 return ConstantRange(Width, /*isFullSet=*/false);
1545 if (Result.isConstantRange())
1546 return Result.getConstantRange();
1547 // We represent ConstantInt constants as constant ranges but other kinds
1548 // of integer constants, i.e. ConstantExpr will be tagged as constants
1549 assert(!(Result.isConstant() && isa<ConstantInt>(Result.getConstant())) &&
1550 "ConstantInt value must be represented as constantrange");
1551 return ConstantRange(Width, /*isFullSet=*/true);
1554 /// Determine whether the specified value is known to be a
1555 /// constant on the specified edge. Return null if not.
1556 Constant *LazyValueInfo::getConstantOnEdge(Value *V, BasicBlock *FromBB,
1557 BasicBlock *ToBB,
1558 Instruction *CxtI) {
1559 const DataLayout &DL = FromBB->getModule()->getDataLayout();
1560 ValueLatticeElement Result =
1561 getImpl(PImpl, AC, &DL, DT).getValueOnEdge(V, FromBB, ToBB, CxtI);
1563 if (Result.isConstant())
1564 return Result.getConstant();
1565 if (Result.isConstantRange()) {
1566 const ConstantRange &CR = Result.getConstantRange();
1567 if (const APInt *SingleVal = CR.getSingleElement())
1568 return ConstantInt::get(V->getContext(), *SingleVal);
1570 return nullptr;
1573 ConstantRange LazyValueInfo::getConstantRangeOnEdge(Value *V,
1574 BasicBlock *FromBB,
1575 BasicBlock *ToBB,
1576 Instruction *CxtI) {
1577 unsigned Width = V->getType()->getIntegerBitWidth();
1578 const DataLayout &DL = FromBB->getModule()->getDataLayout();
1579 ValueLatticeElement Result =
1580 getImpl(PImpl, AC, &DL, DT).getValueOnEdge(V, FromBB, ToBB, CxtI);
1582 if (Result.isUndefined())
1583 return ConstantRange(Width, /*isFullSet=*/false);
1584 if (Result.isConstantRange())
1585 return Result.getConstantRange();
1586 // We represent ConstantInt constants as constant ranges but other kinds
1587 // of integer constants, i.e. ConstantExpr will be tagged as constants
1588 assert(!(Result.isConstant() && isa<ConstantInt>(Result.getConstant())) &&
1589 "ConstantInt value must be represented as constantrange");
1590 return ConstantRange(Width, /*isFullSet=*/true);
1593 static LazyValueInfo::Tristate
1594 getPredicateResult(unsigned Pred, Constant *C, const ValueLatticeElement &Val,
1595 const DataLayout &DL, TargetLibraryInfo *TLI) {
1596 // If we know the value is a constant, evaluate the conditional.
1597 Constant *Res = nullptr;
1598 if (Val.isConstant()) {
1599 Res = ConstantFoldCompareInstOperands(Pred, Val.getConstant(), C, DL, TLI);
1600 if (ConstantInt *ResCI = dyn_cast<ConstantInt>(Res))
1601 return ResCI->isZero() ? LazyValueInfo::False : LazyValueInfo::True;
1602 return LazyValueInfo::Unknown;
1605 if (Val.isConstantRange()) {
1606 ConstantInt *CI = dyn_cast<ConstantInt>(C);
1607 if (!CI) return LazyValueInfo::Unknown;
1609 const ConstantRange &CR = Val.getConstantRange();
1610 if (Pred == ICmpInst::ICMP_EQ) {
1611 if (!CR.contains(CI->getValue()))
1612 return LazyValueInfo::False;
1614 if (CR.isSingleElement())
1615 return LazyValueInfo::True;
1616 } else if (Pred == ICmpInst::ICMP_NE) {
1617 if (!CR.contains(CI->getValue()))
1618 return LazyValueInfo::True;
1620 if (CR.isSingleElement())
1621 return LazyValueInfo::False;
1622 } else {
1623 // Handle more complex predicates.
1624 ConstantRange TrueValues = ConstantRange::makeExactICmpRegion(
1625 (ICmpInst::Predicate)Pred, CI->getValue());
1626 if (TrueValues.contains(CR))
1627 return LazyValueInfo::True;
1628 if (TrueValues.inverse().contains(CR))
1629 return LazyValueInfo::False;
1631 return LazyValueInfo::Unknown;
1634 if (Val.isNotConstant()) {
1635 // If this is an equality comparison, we can try to fold it knowing that
1636 // "V != C1".
1637 if (Pred == ICmpInst::ICMP_EQ) {
1638 // !C1 == C -> false iff C1 == C.
1639 Res = ConstantFoldCompareInstOperands(ICmpInst::ICMP_NE,
1640 Val.getNotConstant(), C, DL,
1641 TLI);
1642 if (Res->isNullValue())
1643 return LazyValueInfo::False;
1644 } else if (Pred == ICmpInst::ICMP_NE) {
1645 // !C1 != C -> true iff C1 == C.
1646 Res = ConstantFoldCompareInstOperands(ICmpInst::ICMP_NE,
1647 Val.getNotConstant(), C, DL,
1648 TLI);
1649 if (Res->isNullValue())
1650 return LazyValueInfo::True;
1652 return LazyValueInfo::Unknown;
1655 return LazyValueInfo::Unknown;
1658 /// Determine whether the specified value comparison with a constant is known to
1659 /// be true or false on the specified CFG edge. Pred is a CmpInst predicate.
1660 LazyValueInfo::Tristate
1661 LazyValueInfo::getPredicateOnEdge(unsigned Pred, Value *V, Constant *C,
1662 BasicBlock *FromBB, BasicBlock *ToBB,
1663 Instruction *CxtI) {
1664 const DataLayout &DL = FromBB->getModule()->getDataLayout();
1665 ValueLatticeElement Result =
1666 getImpl(PImpl, AC, &DL, DT).getValueOnEdge(V, FromBB, ToBB, CxtI);
1668 return getPredicateResult(Pred, C, Result, DL, TLI);
1671 LazyValueInfo::Tristate
1672 LazyValueInfo::getPredicateAt(unsigned Pred, Value *V, Constant *C,
1673 Instruction *CxtI) {
1674 // Is or is not NonNull are common predicates being queried. If
1675 // isKnownNonZero can tell us the result of the predicate, we can
1676 // return it quickly. But this is only a fastpath, and falling
1677 // through would still be correct.
1678 const DataLayout &DL = CxtI->getModule()->getDataLayout();
1679 if (V->getType()->isPointerTy() && C->isNullValue() &&
1680 isKnownNonZero(V->stripPointerCasts(), DL)) {
1681 if (Pred == ICmpInst::ICMP_EQ)
1682 return LazyValueInfo::False;
1683 else if (Pred == ICmpInst::ICMP_NE)
1684 return LazyValueInfo::True;
1686 ValueLatticeElement Result = getImpl(PImpl, AC, &DL, DT).getValueAt(V, CxtI);
1687 Tristate Ret = getPredicateResult(Pred, C, Result, DL, TLI);
1688 if (Ret != Unknown)
1689 return Ret;
1691 // Note: The following bit of code is somewhat distinct from the rest of LVI;
1692 // LVI as a whole tries to compute a lattice value which is conservatively
1693 // correct at a given location. In this case, we have a predicate which we
1694 // weren't able to prove about the merged result, and we're pushing that
1695 // predicate back along each incoming edge to see if we can prove it
1696 // separately for each input. As a motivating example, consider:
1697 // bb1:
1698 // %v1 = ... ; constantrange<1, 5>
1699 // br label %merge
1700 // bb2:
1701 // %v2 = ... ; constantrange<10, 20>
1702 // br label %merge
1703 // merge:
1704 // %phi = phi [%v1, %v2] ; constantrange<1,20>
1705 // %pred = icmp eq i32 %phi, 8
1706 // We can't tell from the lattice value for '%phi' that '%pred' is false
1707 // along each path, but by checking the predicate over each input separately,
1708 // we can.
1709 // We limit the search to one step backwards from the current BB and value.
1710 // We could consider extending this to search further backwards through the
1711 // CFG and/or value graph, but there are non-obvious compile time vs quality
1712 // tradeoffs.
1713 if (CxtI) {
1714 BasicBlock *BB = CxtI->getParent();
1716 // Function entry or an unreachable block. Bail to avoid confusing
1717 // analysis below.
1718 pred_iterator PI = pred_begin(BB), PE = pred_end(BB);
1719 if (PI == PE)
1720 return Unknown;
1722 // If V is a PHI node in the same block as the context, we need to ask
1723 // questions about the predicate as applied to the incoming value along
1724 // each edge. This is useful for eliminating cases where the predicate is
1725 // known along all incoming edges.
1726 if (auto *PHI = dyn_cast<PHINode>(V))
1727 if (PHI->getParent() == BB) {
1728 Tristate Baseline = Unknown;
1729 for (unsigned i = 0, e = PHI->getNumIncomingValues(); i < e; i++) {
1730 Value *Incoming = PHI->getIncomingValue(i);
1731 BasicBlock *PredBB = PHI->getIncomingBlock(i);
1732 // Note that PredBB may be BB itself.
1733 Tristate Result = getPredicateOnEdge(Pred, Incoming, C, PredBB, BB,
1734 CxtI);
1736 // Keep going as long as we've seen a consistent known result for
1737 // all inputs.
1738 Baseline = (i == 0) ? Result /* First iteration */
1739 : (Baseline == Result ? Baseline : Unknown); /* All others */
1740 if (Baseline == Unknown)
1741 break;
1743 if (Baseline != Unknown)
1744 return Baseline;
1747 // For a comparison where the V is outside this block, it's possible
1748 // that we've branched on it before. Look to see if the value is known
1749 // on all incoming edges.
1750 if (!isa<Instruction>(V) ||
1751 cast<Instruction>(V)->getParent() != BB) {
1752 // For predecessor edge, determine if the comparison is true or false
1753 // on that edge. If they're all true or all false, we can conclude
1754 // the value of the comparison in this block.
1755 Tristate Baseline = getPredicateOnEdge(Pred, V, C, *PI, BB, CxtI);
1756 if (Baseline != Unknown) {
1757 // Check that all remaining incoming values match the first one.
1758 while (++PI != PE) {
1759 Tristate Ret = getPredicateOnEdge(Pred, V, C, *PI, BB, CxtI);
1760 if (Ret != Baseline) break;
1762 // If we terminated early, then one of the values didn't match.
1763 if (PI == PE) {
1764 return Baseline;
1769 return Unknown;
1772 void LazyValueInfo::threadEdge(BasicBlock *PredBB, BasicBlock *OldSucc,
1773 BasicBlock *NewSucc) {
1774 if (PImpl) {
1775 const DataLayout &DL = PredBB->getModule()->getDataLayout();
1776 getImpl(PImpl, AC, &DL, DT).threadEdge(PredBB, OldSucc, NewSucc);
1780 void LazyValueInfo::eraseBlock(BasicBlock *BB) {
1781 if (PImpl) {
1782 const DataLayout &DL = BB->getModule()->getDataLayout();
1783 getImpl(PImpl, AC, &DL, DT).eraseBlock(BB);
1788 void LazyValueInfo::printLVI(Function &F, DominatorTree &DTree, raw_ostream &OS) {
1789 if (PImpl) {
1790 getImpl(PImpl, AC, DL, DT).printLVI(F, DTree, OS);
1794 // Print the LVI for the function arguments at the start of each basic block.
1795 void LazyValueInfoAnnotatedWriter::emitBasicBlockStartAnnot(
1796 const BasicBlock *BB, formatted_raw_ostream &OS) {
1797 // Find if there are latticevalues defined for arguments of the function.
1798 auto *F = BB->getParent();
1799 for (auto &Arg : F->args()) {
1800 ValueLatticeElement Result = LVIImpl->getValueInBlock(
1801 const_cast<Argument *>(&Arg), const_cast<BasicBlock *>(BB));
1802 if (Result.isUndefined())
1803 continue;
1804 OS << "; LatticeVal for: '" << Arg << "' is: " << Result << "\n";
1808 // This function prints the LVI analysis for the instruction I at the beginning
1809 // of various basic blocks. It relies on calculated values that are stored in
1810 // the LazyValueInfoCache, and in the absence of cached values, recalculte the
1811 // LazyValueInfo for `I`, and print that info.
1812 void LazyValueInfoAnnotatedWriter::emitInstructionAnnot(
1813 const Instruction *I, formatted_raw_ostream &OS) {
1815 auto *ParentBB = I->getParent();
1816 SmallPtrSet<const BasicBlock*, 16> BlocksContainingLVI;
1817 // We can generate (solve) LVI values only for blocks that are dominated by
1818 // the I's parent. However, to avoid generating LVI for all dominating blocks,
1819 // that contain redundant/uninteresting information, we print LVI for
1820 // blocks that may use this LVI information (such as immediate successor
1821 // blocks, and blocks that contain uses of `I`).
1822 auto printResult = [&](const BasicBlock *BB) {
1823 if (!BlocksContainingLVI.insert(BB).second)
1824 return;
1825 ValueLatticeElement Result = LVIImpl->getValueInBlock(
1826 const_cast<Instruction *>(I), const_cast<BasicBlock *>(BB));
1827 OS << "; LatticeVal for: '" << *I << "' in BB: '";
1828 BB->printAsOperand(OS, false);
1829 OS << "' is: " << Result << "\n";
1832 printResult(ParentBB);
1833 // Print the LVI analysis results for the the immediate successor blocks, that
1834 // are dominated by `ParentBB`.
1835 for (auto *BBSucc : successors(ParentBB))
1836 if (DT.dominates(ParentBB, BBSucc))
1837 printResult(BBSucc);
1839 // Print LVI in blocks where `I` is used.
1840 for (auto *U : I->users())
1841 if (auto *UseI = dyn_cast<Instruction>(U))
1842 if (!isa<PHINode>(UseI) || DT.dominates(ParentBB, UseI->getParent()))
1843 printResult(UseI->getParent());
1847 namespace {
1848 // Printer class for LazyValueInfo results.
1849 class LazyValueInfoPrinter : public FunctionPass {
1850 public:
1851 static char ID; // Pass identification, replacement for typeid
1852 LazyValueInfoPrinter() : FunctionPass(ID) {
1853 initializeLazyValueInfoPrinterPass(*PassRegistry::getPassRegistry());
1856 void getAnalysisUsage(AnalysisUsage &AU) const override {
1857 AU.setPreservesAll();
1858 AU.addRequired<LazyValueInfoWrapperPass>();
1859 AU.addRequired<DominatorTreeWrapperPass>();
1862 // Get the mandatory dominator tree analysis and pass this in to the
1863 // LVIPrinter. We cannot rely on the LVI's DT, since it's optional.
1864 bool runOnFunction(Function &F) override {
1865 dbgs() << "LVI for function '" << F.getName() << "':\n";
1866 auto &LVI = getAnalysis<LazyValueInfoWrapperPass>().getLVI();
1867 auto &DTree = getAnalysis<DominatorTreeWrapperPass>().getDomTree();
1868 LVI.printLVI(F, DTree, dbgs());
1869 return false;
1874 char LazyValueInfoPrinter::ID = 0;
1875 INITIALIZE_PASS_BEGIN(LazyValueInfoPrinter, "print-lazy-value-info",
1876 "Lazy Value Info Printer Pass", false, false)
1877 INITIALIZE_PASS_DEPENDENCY(LazyValueInfoWrapperPass)
1878 INITIALIZE_PASS_END(LazyValueInfoPrinter, "print-lazy-value-info",
1879 "Lazy Value Info Printer Pass", false, false)