1 //===- MemoryDependenceAnalysis.cpp - Mem Deps Implementation --*- C++ -*-===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file implements an analysis that determines, for a given memory
11 // operation, what preceding memory operations it depends on. It builds on
12 // alias analysis information, and tries to provide a lazy, caching interface to
13 // a common kind of alias information query.
15 //===----------------------------------------------------------------------===//
17 #define DEBUG_TYPE "memdep"
18 #include "llvm/Analysis/MemoryDependenceAnalysis.h"
19 #include "llvm/Analysis/ValueTracking.h"
20 #include "llvm/Instructions.h"
21 #include "llvm/IntrinsicInst.h"
22 #include "llvm/Function.h"
23 #include "llvm/LLVMContext.h"
24 #include "llvm/Analysis/AliasAnalysis.h"
25 #include "llvm/Analysis/Dominators.h"
26 #include "llvm/Analysis/InstructionSimplify.h"
27 #include "llvm/Analysis/MemoryBuiltins.h"
28 #include "llvm/Analysis/PHITransAddr.h"
29 #include "llvm/Analysis/ValueTracking.h"
30 #include "llvm/ADT/Statistic.h"
31 #include "llvm/ADT/STLExtras.h"
32 #include "llvm/Support/PredIteratorCache.h"
33 #include "llvm/Support/Debug.h"
34 #include "llvm/Target/TargetData.h"
37 STATISTIC(NumCacheNonLocal
, "Number of fully cached non-local responses");
38 STATISTIC(NumCacheDirtyNonLocal
, "Number of dirty cached non-local responses");
39 STATISTIC(NumUncacheNonLocal
, "Number of uncached non-local responses");
41 STATISTIC(NumCacheNonLocalPtr
,
42 "Number of fully cached non-local ptr responses");
43 STATISTIC(NumCacheDirtyNonLocalPtr
,
44 "Number of cached, but dirty, non-local ptr responses");
45 STATISTIC(NumUncacheNonLocalPtr
,
46 "Number of uncached non-local ptr responses");
47 STATISTIC(NumCacheCompleteNonLocalPtr
,
48 "Number of block queries that were completely cached");
50 char MemoryDependenceAnalysis::ID
= 0;
52 // Register this pass...
53 INITIALIZE_PASS_BEGIN(MemoryDependenceAnalysis
, "memdep",
54 "Memory Dependence Analysis", false, true)
55 INITIALIZE_AG_DEPENDENCY(AliasAnalysis
)
56 INITIALIZE_PASS_END(MemoryDependenceAnalysis
, "memdep",
57 "Memory Dependence Analysis", false, true)
59 MemoryDependenceAnalysis::MemoryDependenceAnalysis()
60 : FunctionPass(ID
), PredCache(0) {
61 initializeMemoryDependenceAnalysisPass(*PassRegistry::getPassRegistry());
63 MemoryDependenceAnalysis::~MemoryDependenceAnalysis() {
66 /// Clean up memory in between runs
67 void MemoryDependenceAnalysis::releaseMemory() {
70 NonLocalPointerDeps
.clear();
71 ReverseLocalDeps
.clear();
72 ReverseNonLocalDeps
.clear();
73 ReverseNonLocalPtrDeps
.clear();
79 /// getAnalysisUsage - Does not modify anything. It uses Alias Analysis.
81 void MemoryDependenceAnalysis::getAnalysisUsage(AnalysisUsage
&AU
) const {
83 AU
.addRequiredTransitive
<AliasAnalysis
>();
86 bool MemoryDependenceAnalysis::runOnFunction(Function
&) {
87 AA
= &getAnalysis
<AliasAnalysis
>();
88 TD
= getAnalysisIfAvailable
<TargetData
>();
90 PredCache
.reset(new PredIteratorCache());
94 /// RemoveFromReverseMap - This is a helper function that removes Val from
95 /// 'Inst's set in ReverseMap. If the set becomes empty, remove Inst's entry.
96 template <typename KeyTy
>
97 static void RemoveFromReverseMap(DenseMap
<Instruction
*,
98 SmallPtrSet
<KeyTy
, 4> > &ReverseMap
,
99 Instruction
*Inst
, KeyTy Val
) {
100 typename DenseMap
<Instruction
*, SmallPtrSet
<KeyTy
, 4> >::iterator
101 InstIt
= ReverseMap
.find(Inst
);
102 assert(InstIt
!= ReverseMap
.end() && "Reverse map out of sync?");
103 bool Found
= InstIt
->second
.erase(Val
);
104 assert(Found
&& "Invalid reverse map!"); (void)Found
;
105 if (InstIt
->second
.empty())
106 ReverseMap
.erase(InstIt
);
109 /// GetLocation - If the given instruction references a specific memory
110 /// location, fill in Loc with the details, otherwise set Loc.Ptr to null.
111 /// Return a ModRefInfo value describing the general behavior of the
114 AliasAnalysis::ModRefResult
GetLocation(const Instruction
*Inst
,
115 AliasAnalysis::Location
&Loc
,
117 if (const LoadInst
*LI
= dyn_cast
<LoadInst
>(Inst
)) {
118 if (LI
->isVolatile()) {
119 Loc
= AliasAnalysis::Location();
120 return AliasAnalysis::ModRef
;
122 Loc
= AA
->getLocation(LI
);
123 return AliasAnalysis::Ref
;
126 if (const StoreInst
*SI
= dyn_cast
<StoreInst
>(Inst
)) {
127 if (SI
->isVolatile()) {
128 Loc
= AliasAnalysis::Location();
129 return AliasAnalysis::ModRef
;
131 Loc
= AA
->getLocation(SI
);
132 return AliasAnalysis::Mod
;
135 if (const VAArgInst
*V
= dyn_cast
<VAArgInst
>(Inst
)) {
136 Loc
= AA
->getLocation(V
);
137 return AliasAnalysis::ModRef
;
140 if (const CallInst
*CI
= isFreeCall(Inst
)) {
141 // calls to free() deallocate the entire structure
142 Loc
= AliasAnalysis::Location(CI
->getArgOperand(0));
143 return AliasAnalysis::Mod
;
146 if (const IntrinsicInst
*II
= dyn_cast
<IntrinsicInst
>(Inst
))
147 switch (II
->getIntrinsicID()) {
148 case Intrinsic::lifetime_start
:
149 case Intrinsic::lifetime_end
:
150 case Intrinsic::invariant_start
:
151 Loc
= AliasAnalysis::Location(II
->getArgOperand(1),
152 cast
<ConstantInt
>(II
->getArgOperand(0))
154 II
->getMetadata(LLVMContext::MD_tbaa
));
155 // These intrinsics don't really modify the memory, but returning Mod
156 // will allow them to be handled conservatively.
157 return AliasAnalysis::Mod
;
158 case Intrinsic::invariant_end
:
159 Loc
= AliasAnalysis::Location(II
->getArgOperand(2),
160 cast
<ConstantInt
>(II
->getArgOperand(1))
162 II
->getMetadata(LLVMContext::MD_tbaa
));
163 // These intrinsics don't really modify the memory, but returning Mod
164 // will allow them to be handled conservatively.
165 return AliasAnalysis::Mod
;
170 // Otherwise, just do the coarse-grained thing that always works.
171 if (Inst
->mayWriteToMemory())
172 return AliasAnalysis::ModRef
;
173 if (Inst
->mayReadFromMemory())
174 return AliasAnalysis::Ref
;
175 return AliasAnalysis::NoModRef
;
178 /// getCallSiteDependencyFrom - Private helper for finding the local
179 /// dependencies of a call site.
180 MemDepResult
MemoryDependenceAnalysis::
181 getCallSiteDependencyFrom(CallSite CS
, bool isReadOnlyCall
,
182 BasicBlock::iterator ScanIt
, BasicBlock
*BB
) {
183 // Walk backwards through the block, looking for dependencies
184 while (ScanIt
!= BB
->begin()) {
185 Instruction
*Inst
= --ScanIt
;
187 // If this inst is a memory op, get the pointer it accessed
188 AliasAnalysis::Location Loc
;
189 AliasAnalysis::ModRefResult MR
= GetLocation(Inst
, Loc
, AA
);
191 // A simple instruction.
192 if (AA
->getModRefInfo(CS
, Loc
) != AliasAnalysis::NoModRef
)
193 return MemDepResult::getClobber(Inst
);
197 if (CallSite InstCS
= cast
<Value
>(Inst
)) {
198 // Debug intrinsics don't cause dependences.
199 if (isa
<DbgInfoIntrinsic
>(Inst
)) continue;
200 // If these two calls do not interfere, look past it.
201 switch (AA
->getModRefInfo(CS
, InstCS
)) {
202 case AliasAnalysis::NoModRef
:
203 // If the two calls are the same, return InstCS as a Def, so that
204 // CS can be found redundant and eliminated.
205 if (isReadOnlyCall
&& !(MR
& AliasAnalysis::Mod
) &&
206 CS
.getInstruction()->isIdenticalToWhenDefined(Inst
))
207 return MemDepResult::getDef(Inst
);
209 // Otherwise if the two calls don't interact (e.g. InstCS is readnone)
213 return MemDepResult::getClobber(Inst
);
218 // No dependence found. If this is the entry block of the function, it is a
219 // clobber, otherwise it is non-local.
220 if (BB
!= &BB
->getParent()->getEntryBlock())
221 return MemDepResult::getNonLocal();
222 return MemDepResult::getClobber(ScanIt
);
225 /// isLoadLoadClobberIfExtendedToFullWidth - Return true if LI is a load that
226 /// would fully overlap MemLoc if done as a wider legal integer load.
228 /// MemLocBase, MemLocOffset are lazily computed here the first time the
229 /// base/offs of memloc is needed.
231 isLoadLoadClobberIfExtendedToFullWidth(const AliasAnalysis::Location
&MemLoc
,
232 const Value
*&MemLocBase
,
234 const LoadInst
*LI
, TargetData
*TD
) {
235 // If we have no target data, we can't do this.
236 if (TD
== 0) return false;
238 // If we haven't already computed the base/offset of MemLoc, do so now.
240 MemLocBase
= GetPointerBaseWithConstantOffset(MemLoc
.Ptr
, MemLocOffs
, *TD
);
242 // Get the base of this load.
244 const Value
*LIBase
=
245 GetPointerBaseWithConstantOffset(LI
->getPointerOperand(), LIOffs
, *TD
);
247 // If the two pointers are not based on the same pointer, we can't tell that
249 if (LIBase
!= MemLocBase
) return false;
251 // Okay, the two values are based on the same pointer, but returned as
252 // no-alias. This happens when we have things like two byte loads at "P+1"
253 // and "P+3". Check to see if increasing the size of the "LI" load up to its
254 // alignment (or the largest native integer type) will allow us to load all
255 // the bits required by MemLoc.
257 // If MemLoc is before LI, then no widening of LI will help us out.
258 if (MemLocOffs
< LIOffs
) return false;
260 // Get the alignment of the load in bytes. We assume that it is safe to load
261 // any legal integer up to this size without a problem. For example, if we're
262 // looking at an i8 load on x86-32 that is known 1024 byte aligned, we can
263 // widen it up to an i32 load. If it is known 2-byte aligned, we can widen it
265 unsigned LoadAlign
= LI
->getAlignment();
267 int64_t MemLocEnd
= MemLocOffs
+MemLoc
.Size
;
269 // If no amount of rounding up will let MemLoc fit into LI, then bail out.
270 if (LIOffs
+LoadAlign
< MemLocEnd
) return false;
272 // This is the size of the load to try. Start with the next larger power of
274 unsigned NewLoadByteSize
= LI
->getType()->getPrimitiveSizeInBits()/8U;
275 NewLoadByteSize
= NextPowerOf2(NewLoadByteSize
);
278 // If this load size is bigger than our known alignment or would not fit
279 // into a native integer register, then we fail.
280 if (NewLoadByteSize
> LoadAlign
||
281 !TD
->fitsInLegalInteger(NewLoadByteSize
*8))
284 // If a load of this width would include all of MemLoc, then we succeed.
285 if (LIOffs
+NewLoadByteSize
>= MemLocEnd
)
288 NewLoadByteSize
<<= 1;
294 /// getPointerDependencyFrom - Return the instruction on which a memory
295 /// location depends. If isLoad is true, this routine ignores may-aliases with
296 /// read-only operations. If isLoad is false, this routine ignores may-aliases
297 /// with reads from read-only locations.
298 MemDepResult
MemoryDependenceAnalysis::
299 getPointerDependencyFrom(const AliasAnalysis::Location
&MemLoc
, bool isLoad
,
300 BasicBlock::iterator ScanIt
, BasicBlock
*BB
) {
302 const Value
*MemLocBase
= 0;
303 int64_t MemLocOffset
= 0;
305 // Walk backwards through the basic block, looking for dependencies.
306 while (ScanIt
!= BB
->begin()) {
307 Instruction
*Inst
= --ScanIt
;
309 if (IntrinsicInst
*II
= dyn_cast
<IntrinsicInst
>(Inst
)) {
310 // Debug intrinsics don't (and can't) cause dependences.
311 if (isa
<DbgInfoIntrinsic
>(II
)) continue;
313 // If we reach a lifetime begin or end marker, then the query ends here
314 // because the value is undefined.
315 if (II
->getIntrinsicID() == Intrinsic::lifetime_start
) {
316 // FIXME: This only considers queries directly on the invariant-tagged
317 // pointer, not on query pointers that are indexed off of them. It'd
318 // be nice to handle that at some point (the right approach is to use
319 // GetPointerBaseWithConstantOffset).
320 if (AA
->isMustAlias(AliasAnalysis::Location(II
->getArgOperand(1)),
322 return MemDepResult::getDef(II
);
327 // Values depend on loads if the pointers are must aliased. This means that
328 // a load depends on another must aliased load from the same value.
329 if (LoadInst
*LI
= dyn_cast
<LoadInst
>(Inst
)) {
330 AliasAnalysis::Location LoadLoc
= AA
->getLocation(LI
);
332 // If we found a pointer, check if it could be the same as our pointer.
333 AliasAnalysis::AliasResult R
= AA
->alias(LoadLoc
, MemLoc
);
336 if (R
== AliasAnalysis::NoAlias
) {
337 // If this is an over-aligned integer load (for example,
338 // "load i8* %P, align 4") see if it would obviously overlap with the
339 // queried location if widened to a larger load (e.g. if the queried
340 // location is 1 byte at P+1). If so, return it as a load/load
341 // clobber result, allowing the client to decide to widen the load if
343 if (const IntegerType
*ITy
= dyn_cast
<IntegerType
>(LI
->getType()))
344 if (LI
->getAlignment()*8 > ITy
->getPrimitiveSizeInBits() &&
345 isLoadLoadClobberIfExtendedToFullWidth(MemLoc
, MemLocBase
,
346 MemLocOffset
, LI
, TD
))
347 return MemDepResult::getClobber(Inst
);
352 // Must aliased loads are defs of each other.
353 if (R
== AliasAnalysis::MustAlias
)
354 return MemDepResult::getDef(Inst
);
356 // If we have a partial alias, then return this as a clobber for the
358 if (R
== AliasAnalysis::PartialAlias
)
359 return MemDepResult::getClobber(Inst
);
361 // Random may-alias loads don't depend on each other without a
366 // Stores don't depend on other no-aliased accesses.
367 if (R
== AliasAnalysis::NoAlias
)
370 // Stores don't alias loads from read-only memory.
371 if (AA
->pointsToConstantMemory(LoadLoc
))
374 // Stores depend on may/must aliased loads.
375 return MemDepResult::getDef(Inst
);
378 if (StoreInst
*SI
= dyn_cast
<StoreInst
>(Inst
)) {
379 // If alias analysis can tell that this store is guaranteed to not modify
380 // the query pointer, ignore it. Use getModRefInfo to handle cases where
381 // the query pointer points to constant memory etc.
382 if (AA
->getModRefInfo(SI
, MemLoc
) == AliasAnalysis::NoModRef
)
385 // Ok, this store might clobber the query pointer. Check to see if it is
386 // a must alias: in this case, we want to return this as a def.
387 AliasAnalysis::Location StoreLoc
= AA
->getLocation(SI
);
389 // If we found a pointer, check if it could be the same as our pointer.
390 AliasAnalysis::AliasResult R
= AA
->alias(StoreLoc
, MemLoc
);
392 if (R
== AliasAnalysis::NoAlias
)
394 if (R
== AliasAnalysis::MustAlias
)
395 return MemDepResult::getDef(Inst
);
396 return MemDepResult::getClobber(Inst
);
399 // If this is an allocation, and if we know that the accessed pointer is to
400 // the allocation, return Def. This means that there is no dependence and
401 // the access can be optimized based on that. For example, a load could
403 // Note: Only determine this to be a malloc if Inst is the malloc call, not
404 // a subsequent bitcast of the malloc call result. There can be stores to
405 // the malloced memory between the malloc call and its bitcast uses, and we
406 // need to continue scanning until the malloc call.
407 if (isa
<AllocaInst
>(Inst
) ||
408 (isa
<CallInst
>(Inst
) && extractMallocCall(Inst
))) {
409 const Value
*AccessPtr
= GetUnderlyingObject(MemLoc
.Ptr
, TD
);
411 if (AccessPtr
== Inst
|| AA
->isMustAlias(Inst
, AccessPtr
))
412 return MemDepResult::getDef(Inst
);
416 // See if this instruction (e.g. a call or vaarg) mod/ref's the pointer.
417 switch (AA
->getModRefInfo(Inst
, MemLoc
)) {
418 case AliasAnalysis::NoModRef
:
419 // If the call has no effect on the queried pointer, just ignore it.
421 case AliasAnalysis::Mod
:
422 return MemDepResult::getClobber(Inst
);
423 case AliasAnalysis::Ref
:
424 // If the call is known to never store to the pointer, and if this is a
425 // load query, we can safely ignore it (scan past it).
429 // Otherwise, there is a potential dependence. Return a clobber.
430 return MemDepResult::getClobber(Inst
);
434 // No dependence found. If this is the entry block of the function, it is a
435 // clobber, otherwise it is non-local.
436 if (BB
!= &BB
->getParent()->getEntryBlock())
437 return MemDepResult::getNonLocal();
438 return MemDepResult::getClobber(ScanIt
);
441 /// getDependency - Return the instruction on which a memory operation
443 MemDepResult
MemoryDependenceAnalysis::getDependency(Instruction
*QueryInst
) {
444 Instruction
*ScanPos
= QueryInst
;
446 // Check for a cached result
447 MemDepResult
&LocalCache
= LocalDeps
[QueryInst
];
449 // If the cached entry is non-dirty, just return it. Note that this depends
450 // on MemDepResult's default constructing to 'dirty'.
451 if (!LocalCache
.isDirty())
454 // Otherwise, if we have a dirty entry, we know we can start the scan at that
455 // instruction, which may save us some work.
456 if (Instruction
*Inst
= LocalCache
.getInst()) {
459 RemoveFromReverseMap(ReverseLocalDeps
, Inst
, QueryInst
);
462 BasicBlock
*QueryParent
= QueryInst
->getParent();
465 if (BasicBlock::iterator(QueryInst
) == QueryParent
->begin()) {
466 // No dependence found. If this is the entry block of the function, it is a
467 // clobber, otherwise it is non-local.
468 if (QueryParent
!= &QueryParent
->getParent()->getEntryBlock())
469 LocalCache
= MemDepResult::getNonLocal();
471 LocalCache
= MemDepResult::getClobber(QueryInst
);
473 AliasAnalysis::Location MemLoc
;
474 AliasAnalysis::ModRefResult MR
= GetLocation(QueryInst
, MemLoc
, AA
);
476 // If we can do a pointer scan, make it happen.
477 bool isLoad
= !(MR
& AliasAnalysis::Mod
);
478 if (IntrinsicInst
*II
= dyn_cast
<IntrinsicInst
>(QueryInst
))
479 isLoad
|= II
->getIntrinsicID() == Intrinsic::lifetime_end
;
481 LocalCache
= getPointerDependencyFrom(MemLoc
, isLoad
, ScanPos
,
483 } else if (isa
<CallInst
>(QueryInst
) || isa
<InvokeInst
>(QueryInst
)) {
484 CallSite
QueryCS(QueryInst
);
485 bool isReadOnly
= AA
->onlyReadsMemory(QueryCS
);
486 LocalCache
= getCallSiteDependencyFrom(QueryCS
, isReadOnly
, ScanPos
,
489 // Non-memory instruction.
490 LocalCache
= MemDepResult::getClobber(--BasicBlock::iterator(ScanPos
));
493 // Remember the result!
494 if (Instruction
*I
= LocalCache
.getInst())
495 ReverseLocalDeps
[I
].insert(QueryInst
);
501 /// AssertSorted - This method is used when -debug is specified to verify that
502 /// cache arrays are properly kept sorted.
503 static void AssertSorted(MemoryDependenceAnalysis::NonLocalDepInfo
&Cache
,
505 if (Count
== -1) Count
= Cache
.size();
506 if (Count
== 0) return;
508 for (unsigned i
= 1; i
!= unsigned(Count
); ++i
)
509 assert(!(Cache
[i
] < Cache
[i
-1]) && "Cache isn't sorted!");
513 /// getNonLocalCallDependency - Perform a full dependency query for the
514 /// specified call, returning the set of blocks that the value is
515 /// potentially live across. The returned set of results will include a
516 /// "NonLocal" result for all blocks where the value is live across.
518 /// This method assumes the instruction returns a "NonLocal" dependency
519 /// within its own block.
521 /// This returns a reference to an internal data structure that may be
522 /// invalidated on the next non-local query or when an instruction is
523 /// removed. Clients must copy this data if they want it around longer than
525 const MemoryDependenceAnalysis::NonLocalDepInfo
&
526 MemoryDependenceAnalysis::getNonLocalCallDependency(CallSite QueryCS
) {
527 assert(getDependency(QueryCS
.getInstruction()).isNonLocal() &&
528 "getNonLocalCallDependency should only be used on calls with non-local deps!");
529 PerInstNLInfo
&CacheP
= NonLocalDeps
[QueryCS
.getInstruction()];
530 NonLocalDepInfo
&Cache
= CacheP
.first
;
532 /// DirtyBlocks - This is the set of blocks that need to be recomputed. In
533 /// the cached case, this can happen due to instructions being deleted etc. In
534 /// the uncached case, this starts out as the set of predecessors we care
536 SmallVector
<BasicBlock
*, 32> DirtyBlocks
;
538 if (!Cache
.empty()) {
539 // Okay, we have a cache entry. If we know it is not dirty, just return it
540 // with no computation.
541 if (!CacheP
.second
) {
546 // If we already have a partially computed set of results, scan them to
547 // determine what is dirty, seeding our initial DirtyBlocks worklist.
548 for (NonLocalDepInfo::iterator I
= Cache
.begin(), E
= Cache
.end();
550 if (I
->getResult().isDirty())
551 DirtyBlocks
.push_back(I
->getBB());
553 // Sort the cache so that we can do fast binary search lookups below.
554 std::sort(Cache
.begin(), Cache
.end());
556 ++NumCacheDirtyNonLocal
;
557 //cerr << "CACHED CASE: " << DirtyBlocks.size() << " dirty: "
558 // << Cache.size() << " cached: " << *QueryInst;
560 // Seed DirtyBlocks with each of the preds of QueryInst's block.
561 BasicBlock
*QueryBB
= QueryCS
.getInstruction()->getParent();
562 for (BasicBlock
**PI
= PredCache
->GetPreds(QueryBB
); *PI
; ++PI
)
563 DirtyBlocks
.push_back(*PI
);
564 ++NumUncacheNonLocal
;
567 // isReadonlyCall - If this is a read-only call, we can be more aggressive.
568 bool isReadonlyCall
= AA
->onlyReadsMemory(QueryCS
);
570 SmallPtrSet
<BasicBlock
*, 64> Visited
;
572 unsigned NumSortedEntries
= Cache
.size();
573 DEBUG(AssertSorted(Cache
));
575 // Iterate while we still have blocks to update.
576 while (!DirtyBlocks
.empty()) {
577 BasicBlock
*DirtyBB
= DirtyBlocks
.back();
578 DirtyBlocks
.pop_back();
580 // Already processed this block?
581 if (!Visited
.insert(DirtyBB
))
584 // Do a binary search to see if we already have an entry for this block in
585 // the cache set. If so, find it.
586 DEBUG(AssertSorted(Cache
, NumSortedEntries
));
587 NonLocalDepInfo::iterator Entry
=
588 std::upper_bound(Cache
.begin(), Cache
.begin()+NumSortedEntries
,
589 NonLocalDepEntry(DirtyBB
));
590 if (Entry
!= Cache
.begin() && prior(Entry
)->getBB() == DirtyBB
)
593 NonLocalDepEntry
*ExistingResult
= 0;
594 if (Entry
!= Cache
.begin()+NumSortedEntries
&&
595 Entry
->getBB() == DirtyBB
) {
596 // If we already have an entry, and if it isn't already dirty, the block
598 if (!Entry
->getResult().isDirty())
601 // Otherwise, remember this slot so we can update the value.
602 ExistingResult
= &*Entry
;
605 // If the dirty entry has a pointer, start scanning from it so we don't have
606 // to rescan the entire block.
607 BasicBlock::iterator ScanPos
= DirtyBB
->end();
608 if (ExistingResult
) {
609 if (Instruction
*Inst
= ExistingResult
->getResult().getInst()) {
611 // We're removing QueryInst's use of Inst.
612 RemoveFromReverseMap(ReverseNonLocalDeps
, Inst
,
613 QueryCS
.getInstruction());
617 // Find out if this block has a local dependency for QueryInst.
620 if (ScanPos
!= DirtyBB
->begin()) {
621 Dep
= getCallSiteDependencyFrom(QueryCS
, isReadonlyCall
,ScanPos
, DirtyBB
);
622 } else if (DirtyBB
!= &DirtyBB
->getParent()->getEntryBlock()) {
623 // No dependence found. If this is the entry block of the function, it is
624 // a clobber, otherwise it is non-local.
625 Dep
= MemDepResult::getNonLocal();
627 Dep
= MemDepResult::getClobber(ScanPos
);
630 // If we had a dirty entry for the block, update it. Otherwise, just add
633 ExistingResult
->setResult(Dep
);
635 Cache
.push_back(NonLocalDepEntry(DirtyBB
, Dep
));
637 // If the block has a dependency (i.e. it isn't completely transparent to
638 // the value), remember the association!
639 if (!Dep
.isNonLocal()) {
640 // Keep the ReverseNonLocalDeps map up to date so we can efficiently
641 // update this when we remove instructions.
642 if (Instruction
*Inst
= Dep
.getInst())
643 ReverseNonLocalDeps
[Inst
].insert(QueryCS
.getInstruction());
646 // If the block *is* completely transparent to the load, we need to check
647 // the predecessors of this block. Add them to our worklist.
648 for (BasicBlock
**PI
= PredCache
->GetPreds(DirtyBB
); *PI
; ++PI
)
649 DirtyBlocks
.push_back(*PI
);
656 /// getNonLocalPointerDependency - Perform a full dependency query for an
657 /// access to the specified (non-volatile) memory location, returning the
658 /// set of instructions that either define or clobber the value.
660 /// This method assumes the pointer has a "NonLocal" dependency within its
663 void MemoryDependenceAnalysis::
664 getNonLocalPointerDependency(const AliasAnalysis::Location
&Loc
, bool isLoad
,
666 SmallVectorImpl
<NonLocalDepResult
> &Result
) {
667 assert(Loc
.Ptr
->getType()->isPointerTy() &&
668 "Can't get pointer deps of a non-pointer!");
671 PHITransAddr
Address(const_cast<Value
*>(Loc
.Ptr
), TD
);
673 // This is the set of blocks we've inspected, and the pointer we consider in
674 // each block. Because of critical edges, we currently bail out if querying
675 // a block with multiple different pointers. This can happen during PHI
677 DenseMap
<BasicBlock
*, Value
*> Visited
;
678 if (!getNonLocalPointerDepFromBB(Address
, Loc
, isLoad
, FromBB
,
679 Result
, Visited
, true))
682 Result
.push_back(NonLocalDepResult(FromBB
,
683 MemDepResult::getClobber(FromBB
->begin()),
684 const_cast<Value
*>(Loc
.Ptr
)));
687 /// GetNonLocalInfoForBlock - Compute the memdep value for BB with
688 /// Pointer/PointeeSize using either cached information in Cache or by doing a
689 /// lookup (which may use dirty cache info if available). If we do a lookup,
690 /// add the result to the cache.
691 MemDepResult
MemoryDependenceAnalysis::
692 GetNonLocalInfoForBlock(const AliasAnalysis::Location
&Loc
,
693 bool isLoad
, BasicBlock
*BB
,
694 NonLocalDepInfo
*Cache
, unsigned NumSortedEntries
) {
696 // Do a binary search to see if we already have an entry for this block in
697 // the cache set. If so, find it.
698 NonLocalDepInfo::iterator Entry
=
699 std::upper_bound(Cache
->begin(), Cache
->begin()+NumSortedEntries
,
700 NonLocalDepEntry(BB
));
701 if (Entry
!= Cache
->begin() && (Entry
-1)->getBB() == BB
)
704 NonLocalDepEntry
*ExistingResult
= 0;
705 if (Entry
!= Cache
->begin()+NumSortedEntries
&& Entry
->getBB() == BB
)
706 ExistingResult
= &*Entry
;
708 // If we have a cached entry, and it is non-dirty, use it as the value for
710 if (ExistingResult
&& !ExistingResult
->getResult().isDirty()) {
711 ++NumCacheNonLocalPtr
;
712 return ExistingResult
->getResult();
715 // Otherwise, we have to scan for the value. If we have a dirty cache
716 // entry, start scanning from its position, otherwise we scan from the end
718 BasicBlock::iterator ScanPos
= BB
->end();
719 if (ExistingResult
&& ExistingResult
->getResult().getInst()) {
720 assert(ExistingResult
->getResult().getInst()->getParent() == BB
&&
721 "Instruction invalidated?");
722 ++NumCacheDirtyNonLocalPtr
;
723 ScanPos
= ExistingResult
->getResult().getInst();
725 // Eliminating the dirty entry from 'Cache', so update the reverse info.
726 ValueIsLoadPair
CacheKey(Loc
.Ptr
, isLoad
);
727 RemoveFromReverseMap(ReverseNonLocalPtrDeps
, ScanPos
, CacheKey
);
729 ++NumUncacheNonLocalPtr
;
732 // Scan the block for the dependency.
733 MemDepResult Dep
= getPointerDependencyFrom(Loc
, isLoad
, ScanPos
, BB
);
735 // If we had a dirty entry for the block, update it. Otherwise, just add
738 ExistingResult
->setResult(Dep
);
740 Cache
->push_back(NonLocalDepEntry(BB
, Dep
));
742 // If the block has a dependency (i.e. it isn't completely transparent to
743 // the value), remember the reverse association because we just added it
745 if (Dep
.isNonLocal())
748 // Keep the ReverseNonLocalPtrDeps map up to date so we can efficiently
749 // update MemDep when we remove instructions.
750 Instruction
*Inst
= Dep
.getInst();
751 assert(Inst
&& "Didn't depend on anything?");
752 ValueIsLoadPair
CacheKey(Loc
.Ptr
, isLoad
);
753 ReverseNonLocalPtrDeps
[Inst
].insert(CacheKey
);
757 /// SortNonLocalDepInfoCache - Sort the a NonLocalDepInfo cache, given a certain
758 /// number of elements in the array that are already properly ordered. This is
759 /// optimized for the case when only a few entries are added.
761 SortNonLocalDepInfoCache(MemoryDependenceAnalysis::NonLocalDepInfo
&Cache
,
762 unsigned NumSortedEntries
) {
763 switch (Cache
.size() - NumSortedEntries
) {
765 // done, no new entries.
768 // Two new entries, insert the last one into place.
769 NonLocalDepEntry Val
= Cache
.back();
771 MemoryDependenceAnalysis::NonLocalDepInfo::iterator Entry
=
772 std::upper_bound(Cache
.begin(), Cache
.end()-1, Val
);
773 Cache
.insert(Entry
, Val
);
777 // One new entry, Just insert the new value at the appropriate position.
778 if (Cache
.size() != 1) {
779 NonLocalDepEntry Val
= Cache
.back();
781 MemoryDependenceAnalysis::NonLocalDepInfo::iterator Entry
=
782 std::upper_bound(Cache
.begin(), Cache
.end(), Val
);
783 Cache
.insert(Entry
, Val
);
787 // Added many values, do a full scale sort.
788 std::sort(Cache
.begin(), Cache
.end());
793 /// getNonLocalPointerDepFromBB - Perform a dependency query based on
794 /// pointer/pointeesize starting at the end of StartBB. Add any clobber/def
795 /// results to the results vector and keep track of which blocks are visited in
798 /// This has special behavior for the first block queries (when SkipFirstBlock
799 /// is true). In this special case, it ignores the contents of the specified
800 /// block and starts returning dependence info for its predecessors.
802 /// This function returns false on success, or true to indicate that it could
803 /// not compute dependence information for some reason. This should be treated
804 /// as a clobber dependence on the first instruction in the predecessor block.
805 bool MemoryDependenceAnalysis::
806 getNonLocalPointerDepFromBB(const PHITransAddr
&Pointer
,
807 const AliasAnalysis::Location
&Loc
,
808 bool isLoad
, BasicBlock
*StartBB
,
809 SmallVectorImpl
<NonLocalDepResult
> &Result
,
810 DenseMap
<BasicBlock
*, Value
*> &Visited
,
811 bool SkipFirstBlock
) {
813 // Look up the cached info for Pointer.
814 ValueIsLoadPair
CacheKey(Pointer
.getAddr(), isLoad
);
816 // Set up a temporary NLPI value. If the map doesn't yet have an entry for
817 // CacheKey, this value will be inserted as the associated value. Otherwise,
818 // it'll be ignored, and we'll have to check to see if the cached size and
819 // tbaa tag are consistent with the current query.
820 NonLocalPointerInfo InitialNLPI
;
821 InitialNLPI
.Size
= Loc
.Size
;
822 InitialNLPI
.TBAATag
= Loc
.TBAATag
;
824 // Get the NLPI for CacheKey, inserting one into the map if it doesn't
826 std::pair
<CachedNonLocalPointerInfo::iterator
, bool> Pair
=
827 NonLocalPointerDeps
.insert(std::make_pair(CacheKey
, InitialNLPI
));
828 NonLocalPointerInfo
*CacheInfo
= &Pair
.first
->second
;
830 // If we already have a cache entry for this CacheKey, we may need to do some
831 // work to reconcile the cache entry and the current query.
833 if (CacheInfo
->Size
< Loc
.Size
) {
834 // The query's Size is greater than the cached one. Throw out the
835 // cached data and procede with the query at the greater size.
836 CacheInfo
->Pair
= BBSkipFirstBlockPair();
837 CacheInfo
->Size
= Loc
.Size
;
838 for (NonLocalDepInfo::iterator DI
= CacheInfo
->NonLocalDeps
.begin(),
839 DE
= CacheInfo
->NonLocalDeps
.end(); DI
!= DE
; ++DI
)
840 if (Instruction
*Inst
= DI
->getResult().getInst())
841 RemoveFromReverseMap(ReverseNonLocalPtrDeps
, Inst
, CacheKey
);
842 CacheInfo
->NonLocalDeps
.clear();
843 } else if (CacheInfo
->Size
> Loc
.Size
) {
844 // This query's Size is less than the cached one. Conservatively restart
845 // the query using the greater size.
846 return getNonLocalPointerDepFromBB(Pointer
,
847 Loc
.getWithNewSize(CacheInfo
->Size
),
848 isLoad
, StartBB
, Result
, Visited
,
852 // If the query's TBAATag is inconsistent with the cached one,
853 // conservatively throw out the cached data and restart the query with
855 if (CacheInfo
->TBAATag
!= Loc
.TBAATag
) {
856 if (CacheInfo
->TBAATag
) {
857 CacheInfo
->Pair
= BBSkipFirstBlockPair();
858 CacheInfo
->TBAATag
= 0;
859 for (NonLocalDepInfo::iterator DI
= CacheInfo
->NonLocalDeps
.begin(),
860 DE
= CacheInfo
->NonLocalDeps
.end(); DI
!= DE
; ++DI
)
861 if (Instruction
*Inst
= DI
->getResult().getInst())
862 RemoveFromReverseMap(ReverseNonLocalPtrDeps
, Inst
, CacheKey
);
863 CacheInfo
->NonLocalDeps
.clear();
866 return getNonLocalPointerDepFromBB(Pointer
, Loc
.getWithoutTBAATag(),
867 isLoad
, StartBB
, Result
, Visited
,
872 NonLocalDepInfo
*Cache
= &CacheInfo
->NonLocalDeps
;
874 // If we have valid cached information for exactly the block we are
875 // investigating, just return it with no recomputation.
876 if (CacheInfo
->Pair
== BBSkipFirstBlockPair(StartBB
, SkipFirstBlock
)) {
877 // We have a fully cached result for this query then we can just return the
878 // cached results and populate the visited set. However, we have to verify
879 // that we don't already have conflicting results for these blocks. Check
880 // to ensure that if a block in the results set is in the visited set that
881 // it was for the same pointer query.
882 if (!Visited
.empty()) {
883 for (NonLocalDepInfo::iterator I
= Cache
->begin(), E
= Cache
->end();
885 DenseMap
<BasicBlock
*, Value
*>::iterator VI
= Visited
.find(I
->getBB());
886 if (VI
== Visited
.end() || VI
->second
== Pointer
.getAddr())
889 // We have a pointer mismatch in a block. Just return clobber, saying
890 // that something was clobbered in this result. We could also do a
891 // non-fully cached query, but there is little point in doing this.
896 Value
*Addr
= Pointer
.getAddr();
897 for (NonLocalDepInfo::iterator I
= Cache
->begin(), E
= Cache
->end();
899 Visited
.insert(std::make_pair(I
->getBB(), Addr
));
900 if (!I
->getResult().isNonLocal())
901 Result
.push_back(NonLocalDepResult(I
->getBB(), I
->getResult(), Addr
));
903 ++NumCacheCompleteNonLocalPtr
;
907 // Otherwise, either this is a new block, a block with an invalid cache
908 // pointer or one that we're about to invalidate by putting more info into it
909 // than its valid cache info. If empty, the result will be valid cache info,
910 // otherwise it isn't.
912 CacheInfo
->Pair
= BBSkipFirstBlockPair(StartBB
, SkipFirstBlock
);
914 CacheInfo
->Pair
= BBSkipFirstBlockPair();
916 SmallVector
<BasicBlock
*, 32> Worklist
;
917 Worklist
.push_back(StartBB
);
919 // Keep track of the entries that we know are sorted. Previously cached
920 // entries will all be sorted. The entries we add we only sort on demand (we
921 // don't insert every element into its sorted position). We know that we
922 // won't get any reuse from currently inserted values, because we don't
923 // revisit blocks after we insert info for them.
924 unsigned NumSortedEntries
= Cache
->size();
925 DEBUG(AssertSorted(*Cache
));
927 while (!Worklist
.empty()) {
928 BasicBlock
*BB
= Worklist
.pop_back_val();
930 // Skip the first block if we have it.
931 if (!SkipFirstBlock
) {
932 // Analyze the dependency of *Pointer in FromBB. See if we already have
934 assert(Visited
.count(BB
) && "Should check 'visited' before adding to WL");
936 // Get the dependency info for Pointer in BB. If we have cached
937 // information, we will use it, otherwise we compute it.
938 DEBUG(AssertSorted(*Cache
, NumSortedEntries
));
939 MemDepResult Dep
= GetNonLocalInfoForBlock(Loc
, isLoad
, BB
, Cache
,
942 // If we got a Def or Clobber, add this to the list of results.
943 if (!Dep
.isNonLocal()) {
944 Result
.push_back(NonLocalDepResult(BB
, Dep
, Pointer
.getAddr()));
949 // If 'Pointer' is an instruction defined in this block, then we need to do
950 // phi translation to change it into a value live in the predecessor block.
951 // If not, we just add the predecessors to the worklist and scan them with
953 if (!Pointer
.NeedsPHITranslationFromBlock(BB
)) {
954 SkipFirstBlock
= false;
955 for (BasicBlock
**PI
= PredCache
->GetPreds(BB
); *PI
; ++PI
) {
956 // Verify that we haven't looked at this block yet.
957 std::pair
<DenseMap
<BasicBlock
*,Value
*>::iterator
, bool>
958 InsertRes
= Visited
.insert(std::make_pair(*PI
, Pointer
.getAddr()));
959 if (InsertRes
.second
) {
960 // First time we've looked at *PI.
961 Worklist
.push_back(*PI
);
965 // If we have seen this block before, but it was with a different
966 // pointer then we have a phi translation failure and we have to treat
967 // this as a clobber.
968 if (InsertRes
.first
->second
!= Pointer
.getAddr())
969 goto PredTranslationFailure
;
974 // We do need to do phi translation, if we know ahead of time we can't phi
975 // translate this value, don't even try.
976 if (!Pointer
.IsPotentiallyPHITranslatable())
977 goto PredTranslationFailure
;
979 // We may have added values to the cache list before this PHI translation.
980 // If so, we haven't done anything to ensure that the cache remains sorted.
981 // Sort it now (if needed) so that recursive invocations of
982 // getNonLocalPointerDepFromBB and other routines that could reuse the cache
983 // value will only see properly sorted cache arrays.
984 if (Cache
&& NumSortedEntries
!= Cache
->size()) {
985 SortNonLocalDepInfoCache(*Cache
, NumSortedEntries
);
986 NumSortedEntries
= Cache
->size();
990 for (BasicBlock
**PI
= PredCache
->GetPreds(BB
); *PI
; ++PI
) {
991 BasicBlock
*Pred
= *PI
;
993 // Get the PHI translated pointer in this predecessor. This can fail if
994 // not translatable, in which case the getAddr() returns null.
995 PHITransAddr
PredPointer(Pointer
);
996 PredPointer
.PHITranslateValue(BB
, Pred
, 0);
998 Value
*PredPtrVal
= PredPointer
.getAddr();
1000 // Check to see if we have already visited this pred block with another
1001 // pointer. If so, we can't do this lookup. This failure can occur
1002 // with PHI translation when a critical edge exists and the PHI node in
1003 // the successor translates to a pointer value different than the
1004 // pointer the block was first analyzed with.
1005 std::pair
<DenseMap
<BasicBlock
*,Value
*>::iterator
, bool>
1006 InsertRes
= Visited
.insert(std::make_pair(Pred
, PredPtrVal
));
1008 if (!InsertRes
.second
) {
1009 // If the predecessor was visited with PredPtr, then we already did
1010 // the analysis and can ignore it.
1011 if (InsertRes
.first
->second
== PredPtrVal
)
1014 // Otherwise, the block was previously analyzed with a different
1015 // pointer. We can't represent the result of this case, so we just
1016 // treat this as a phi translation failure.
1017 goto PredTranslationFailure
;
1020 // If PHI translation was unable to find an available pointer in this
1021 // predecessor, then we have to assume that the pointer is clobbered in
1022 // that predecessor. We can still do PRE of the load, which would insert
1023 // a computation of the pointer in this predecessor.
1024 if (PredPtrVal
== 0) {
1025 // Add the entry to the Result list.
1026 NonLocalDepResult
Entry(Pred
,
1027 MemDepResult::getClobber(Pred
->getTerminator()),
1029 Result
.push_back(Entry
);
1031 // Since we had a phi translation failure, the cache for CacheKey won't
1032 // include all of the entries that we need to immediately satisfy future
1033 // queries. Mark this in NonLocalPointerDeps by setting the
1034 // BBSkipFirstBlockPair pointer to null. This requires reuse of the
1035 // cached value to do more work but not miss the phi trans failure.
1036 NonLocalPointerInfo
&NLPI
= NonLocalPointerDeps
[CacheKey
];
1037 NLPI
.Pair
= BBSkipFirstBlockPair();
1041 // FIXME: it is entirely possible that PHI translating will end up with
1042 // the same value. Consider PHI translating something like:
1043 // X = phi [x, bb1], [y, bb2]. PHI translating for bb1 doesn't *need*
1044 // to recurse here, pedantically speaking.
1046 // If we have a problem phi translating, fall through to the code below
1047 // to handle the failure condition.
1048 if (getNonLocalPointerDepFromBB(PredPointer
,
1049 Loc
.getWithNewPtr(PredPointer
.getAddr()),
1052 goto PredTranslationFailure
;
1055 // Refresh the CacheInfo/Cache pointer so that it isn't invalidated.
1056 CacheInfo
= &NonLocalPointerDeps
[CacheKey
];
1057 Cache
= &CacheInfo
->NonLocalDeps
;
1058 NumSortedEntries
= Cache
->size();
1060 // Since we did phi translation, the "Cache" set won't contain all of the
1061 // results for the query. This is ok (we can still use it to accelerate
1062 // specific block queries) but we can't do the fastpath "return all
1063 // results from the set" Clear out the indicator for this.
1064 CacheInfo
->Pair
= BBSkipFirstBlockPair();
1065 SkipFirstBlock
= false;
1068 PredTranslationFailure
:
1071 // Refresh the CacheInfo/Cache pointer if it got invalidated.
1072 CacheInfo
= &NonLocalPointerDeps
[CacheKey
];
1073 Cache
= &CacheInfo
->NonLocalDeps
;
1074 NumSortedEntries
= Cache
->size();
1077 // Since we failed phi translation, the "Cache" set won't contain all of the
1078 // results for the query. This is ok (we can still use it to accelerate
1079 // specific block queries) but we can't do the fastpath "return all
1080 // results from the set". Clear out the indicator for this.
1081 CacheInfo
->Pair
= BBSkipFirstBlockPair();
1083 // If *nothing* works, mark the pointer as being clobbered by the first
1084 // instruction in this block.
1086 // If this is the magic first block, return this as a clobber of the whole
1087 // incoming value. Since we can't phi translate to one of the predecessors,
1088 // we have to bail out.
1092 for (NonLocalDepInfo::reverse_iterator I
= Cache
->rbegin(); ; ++I
) {
1093 assert(I
!= Cache
->rend() && "Didn't find current block??");
1094 if (I
->getBB() != BB
)
1097 assert(I
->getResult().isNonLocal() &&
1098 "Should only be here with transparent block");
1099 I
->setResult(MemDepResult::getClobber(BB
->begin()));
1100 ReverseNonLocalPtrDeps
[BB
->begin()].insert(CacheKey
);
1101 Result
.push_back(NonLocalDepResult(I
->getBB(), I
->getResult(),
1102 Pointer
.getAddr()));
1107 // Okay, we're done now. If we added new values to the cache, re-sort it.
1108 SortNonLocalDepInfoCache(*Cache
, NumSortedEntries
);
1109 DEBUG(AssertSorted(*Cache
));
1113 /// RemoveCachedNonLocalPointerDependencies - If P exists in
1114 /// CachedNonLocalPointerInfo, remove it.
1115 void MemoryDependenceAnalysis::
1116 RemoveCachedNonLocalPointerDependencies(ValueIsLoadPair P
) {
1117 CachedNonLocalPointerInfo::iterator It
=
1118 NonLocalPointerDeps
.find(P
);
1119 if (It
== NonLocalPointerDeps
.end()) return;
1121 // Remove all of the entries in the BB->val map. This involves removing
1122 // instructions from the reverse map.
1123 NonLocalDepInfo
&PInfo
= It
->second
.NonLocalDeps
;
1125 for (unsigned i
= 0, e
= PInfo
.size(); i
!= e
; ++i
) {
1126 Instruction
*Target
= PInfo
[i
].getResult().getInst();
1127 if (Target
== 0) continue; // Ignore non-local dep results.
1128 assert(Target
->getParent() == PInfo
[i
].getBB());
1130 // Eliminating the dirty entry from 'Cache', so update the reverse info.
1131 RemoveFromReverseMap(ReverseNonLocalPtrDeps
, Target
, P
);
1134 // Remove P from NonLocalPointerDeps (which deletes NonLocalDepInfo).
1135 NonLocalPointerDeps
.erase(It
);
1139 /// invalidateCachedPointerInfo - This method is used to invalidate cached
1140 /// information about the specified pointer, because it may be too
1141 /// conservative in memdep. This is an optional call that can be used when
1142 /// the client detects an equivalence between the pointer and some other
1143 /// value and replaces the other value with ptr. This can make Ptr available
1144 /// in more places that cached info does not necessarily keep.
1145 void MemoryDependenceAnalysis::invalidateCachedPointerInfo(Value
*Ptr
) {
1146 // If Ptr isn't really a pointer, just ignore it.
1147 if (!Ptr
->getType()->isPointerTy()) return;
1148 // Flush store info for the pointer.
1149 RemoveCachedNonLocalPointerDependencies(ValueIsLoadPair(Ptr
, false));
1150 // Flush load info for the pointer.
1151 RemoveCachedNonLocalPointerDependencies(ValueIsLoadPair(Ptr
, true));
1154 /// invalidateCachedPredecessors - Clear the PredIteratorCache info.
1155 /// This needs to be done when the CFG changes, e.g., due to splitting
1157 void MemoryDependenceAnalysis::invalidateCachedPredecessors() {
1161 /// removeInstruction - Remove an instruction from the dependence analysis,
1162 /// updating the dependence of instructions that previously depended on it.
1163 /// This method attempts to keep the cache coherent using the reverse map.
1164 void MemoryDependenceAnalysis::removeInstruction(Instruction
*RemInst
) {
1165 // Walk through the Non-local dependencies, removing this one as the value
1166 // for any cached queries.
1167 NonLocalDepMapType::iterator NLDI
= NonLocalDeps
.find(RemInst
);
1168 if (NLDI
!= NonLocalDeps
.end()) {
1169 NonLocalDepInfo
&BlockMap
= NLDI
->second
.first
;
1170 for (NonLocalDepInfo::iterator DI
= BlockMap
.begin(), DE
= BlockMap
.end();
1172 if (Instruction
*Inst
= DI
->getResult().getInst())
1173 RemoveFromReverseMap(ReverseNonLocalDeps
, Inst
, RemInst
);
1174 NonLocalDeps
.erase(NLDI
);
1177 // If we have a cached local dependence query for this instruction, remove it.
1179 LocalDepMapType::iterator LocalDepEntry
= LocalDeps
.find(RemInst
);
1180 if (LocalDepEntry
!= LocalDeps
.end()) {
1181 // Remove us from DepInst's reverse set now that the local dep info is gone.
1182 if (Instruction
*Inst
= LocalDepEntry
->second
.getInst())
1183 RemoveFromReverseMap(ReverseLocalDeps
, Inst
, RemInst
);
1185 // Remove this local dependency info.
1186 LocalDeps
.erase(LocalDepEntry
);
1189 // If we have any cached pointer dependencies on this instruction, remove
1190 // them. If the instruction has non-pointer type, then it can't be a pointer
1193 // Remove it from both the load info and the store info. The instruction
1194 // can't be in either of these maps if it is non-pointer.
1195 if (RemInst
->getType()->isPointerTy()) {
1196 RemoveCachedNonLocalPointerDependencies(ValueIsLoadPair(RemInst
, false));
1197 RemoveCachedNonLocalPointerDependencies(ValueIsLoadPair(RemInst
, true));
1200 // Loop over all of the things that depend on the instruction we're removing.
1202 SmallVector
<std::pair
<Instruction
*, Instruction
*>, 8> ReverseDepsToAdd
;
1204 // If we find RemInst as a clobber or Def in any of the maps for other values,
1205 // we need to replace its entry with a dirty version of the instruction after
1206 // it. If RemInst is a terminator, we use a null dirty value.
1208 // Using a dirty version of the instruction after RemInst saves having to scan
1209 // the entire block to get to this point.
1210 MemDepResult NewDirtyVal
;
1211 if (!RemInst
->isTerminator())
1212 NewDirtyVal
= MemDepResult::getDirty(++BasicBlock::iterator(RemInst
));
1214 ReverseDepMapType::iterator ReverseDepIt
= ReverseLocalDeps
.find(RemInst
);
1215 if (ReverseDepIt
!= ReverseLocalDeps
.end()) {
1216 SmallPtrSet
<Instruction
*, 4> &ReverseDeps
= ReverseDepIt
->second
;
1217 // RemInst can't be the terminator if it has local stuff depending on it.
1218 assert(!ReverseDeps
.empty() && !isa
<TerminatorInst
>(RemInst
) &&
1219 "Nothing can locally depend on a terminator");
1221 for (SmallPtrSet
<Instruction
*, 4>::iterator I
= ReverseDeps
.begin(),
1222 E
= ReverseDeps
.end(); I
!= E
; ++I
) {
1223 Instruction
*InstDependingOnRemInst
= *I
;
1224 assert(InstDependingOnRemInst
!= RemInst
&&
1225 "Already removed our local dep info");
1227 LocalDeps
[InstDependingOnRemInst
] = NewDirtyVal
;
1229 // Make sure to remember that new things depend on NewDepInst.
1230 assert(NewDirtyVal
.getInst() && "There is no way something else can have "
1231 "a local dep on this if it is a terminator!");
1232 ReverseDepsToAdd
.push_back(std::make_pair(NewDirtyVal
.getInst(),
1233 InstDependingOnRemInst
));
1236 ReverseLocalDeps
.erase(ReverseDepIt
);
1238 // Add new reverse deps after scanning the set, to avoid invalidating the
1239 // 'ReverseDeps' reference.
1240 while (!ReverseDepsToAdd
.empty()) {
1241 ReverseLocalDeps
[ReverseDepsToAdd
.back().first
]
1242 .insert(ReverseDepsToAdd
.back().second
);
1243 ReverseDepsToAdd
.pop_back();
1247 ReverseDepIt
= ReverseNonLocalDeps
.find(RemInst
);
1248 if (ReverseDepIt
!= ReverseNonLocalDeps
.end()) {
1249 SmallPtrSet
<Instruction
*, 4> &Set
= ReverseDepIt
->second
;
1250 for (SmallPtrSet
<Instruction
*, 4>::iterator I
= Set
.begin(), E
= Set
.end();
1252 assert(*I
!= RemInst
&& "Already removed NonLocalDep info for RemInst");
1254 PerInstNLInfo
&INLD
= NonLocalDeps
[*I
];
1255 // The information is now dirty!
1258 for (NonLocalDepInfo::iterator DI
= INLD
.first
.begin(),
1259 DE
= INLD
.first
.end(); DI
!= DE
; ++DI
) {
1260 if (DI
->getResult().getInst() != RemInst
) continue;
1262 // Convert to a dirty entry for the subsequent instruction.
1263 DI
->setResult(NewDirtyVal
);
1265 if (Instruction
*NextI
= NewDirtyVal
.getInst())
1266 ReverseDepsToAdd
.push_back(std::make_pair(NextI
, *I
));
1270 ReverseNonLocalDeps
.erase(ReverseDepIt
);
1272 // Add new reverse deps after scanning the set, to avoid invalidating 'Set'
1273 while (!ReverseDepsToAdd
.empty()) {
1274 ReverseNonLocalDeps
[ReverseDepsToAdd
.back().first
]
1275 .insert(ReverseDepsToAdd
.back().second
);
1276 ReverseDepsToAdd
.pop_back();
1280 // If the instruction is in ReverseNonLocalPtrDeps then it appears as a
1281 // value in the NonLocalPointerDeps info.
1282 ReverseNonLocalPtrDepTy::iterator ReversePtrDepIt
=
1283 ReverseNonLocalPtrDeps
.find(RemInst
);
1284 if (ReversePtrDepIt
!= ReverseNonLocalPtrDeps
.end()) {
1285 SmallPtrSet
<ValueIsLoadPair
, 4> &Set
= ReversePtrDepIt
->second
;
1286 SmallVector
<std::pair
<Instruction
*, ValueIsLoadPair
>,8> ReversePtrDepsToAdd
;
1288 for (SmallPtrSet
<ValueIsLoadPair
, 4>::iterator I
= Set
.begin(),
1289 E
= Set
.end(); I
!= E
; ++I
) {
1290 ValueIsLoadPair P
= *I
;
1291 assert(P
.getPointer() != RemInst
&&
1292 "Already removed NonLocalPointerDeps info for RemInst");
1294 NonLocalDepInfo
&NLPDI
= NonLocalPointerDeps
[P
].NonLocalDeps
;
1296 // The cache is not valid for any specific block anymore.
1297 NonLocalPointerDeps
[P
].Pair
= BBSkipFirstBlockPair();
1299 // Update any entries for RemInst to use the instruction after it.
1300 for (NonLocalDepInfo::iterator DI
= NLPDI
.begin(), DE
= NLPDI
.end();
1302 if (DI
->getResult().getInst() != RemInst
) continue;
1304 // Convert to a dirty entry for the subsequent instruction.
1305 DI
->setResult(NewDirtyVal
);
1307 if (Instruction
*NewDirtyInst
= NewDirtyVal
.getInst())
1308 ReversePtrDepsToAdd
.push_back(std::make_pair(NewDirtyInst
, P
));
1311 // Re-sort the NonLocalDepInfo. Changing the dirty entry to its
1312 // subsequent value may invalidate the sortedness.
1313 std::sort(NLPDI
.begin(), NLPDI
.end());
1316 ReverseNonLocalPtrDeps
.erase(ReversePtrDepIt
);
1318 while (!ReversePtrDepsToAdd
.empty()) {
1319 ReverseNonLocalPtrDeps
[ReversePtrDepsToAdd
.back().first
]
1320 .insert(ReversePtrDepsToAdd
.back().second
);
1321 ReversePtrDepsToAdd
.pop_back();
1326 assert(!NonLocalDeps
.count(RemInst
) && "RemInst got reinserted?");
1327 AA
->deleteValue(RemInst
);
1328 DEBUG(verifyRemoved(RemInst
));
1330 /// verifyRemoved - Verify that the specified instruction does not occur
1331 /// in our internal data structures.
1332 void MemoryDependenceAnalysis::verifyRemoved(Instruction
*D
) const {
1333 for (LocalDepMapType::const_iterator I
= LocalDeps
.begin(),
1334 E
= LocalDeps
.end(); I
!= E
; ++I
) {
1335 assert(I
->first
!= D
&& "Inst occurs in data structures");
1336 assert(I
->second
.getInst() != D
&&
1337 "Inst occurs in data structures");
1340 for (CachedNonLocalPointerInfo::const_iterator I
=NonLocalPointerDeps
.begin(),
1341 E
= NonLocalPointerDeps
.end(); I
!= E
; ++I
) {
1342 assert(I
->first
.getPointer() != D
&& "Inst occurs in NLPD map key");
1343 const NonLocalDepInfo
&Val
= I
->second
.NonLocalDeps
;
1344 for (NonLocalDepInfo::const_iterator II
= Val
.begin(), E
= Val
.end();
1346 assert(II
->getResult().getInst() != D
&& "Inst occurs as NLPD value");
1349 for (NonLocalDepMapType::const_iterator I
= NonLocalDeps
.begin(),
1350 E
= NonLocalDeps
.end(); I
!= E
; ++I
) {
1351 assert(I
->first
!= D
&& "Inst occurs in data structures");
1352 const PerInstNLInfo
&INLD
= I
->second
;
1353 for (NonLocalDepInfo::const_iterator II
= INLD
.first
.begin(),
1354 EE
= INLD
.first
.end(); II
!= EE
; ++II
)
1355 assert(II
->getResult().getInst() != D
&& "Inst occurs in data structures");
1358 for (ReverseDepMapType::const_iterator I
= ReverseLocalDeps
.begin(),
1359 E
= ReverseLocalDeps
.end(); I
!= E
; ++I
) {
1360 assert(I
->first
!= D
&& "Inst occurs in data structures");
1361 for (SmallPtrSet
<Instruction
*, 4>::const_iterator II
= I
->second
.begin(),
1362 EE
= I
->second
.end(); II
!= EE
; ++II
)
1363 assert(*II
!= D
&& "Inst occurs in data structures");
1366 for (ReverseDepMapType::const_iterator I
= ReverseNonLocalDeps
.begin(),
1367 E
= ReverseNonLocalDeps
.end();
1369 assert(I
->first
!= D
&& "Inst occurs in data structures");
1370 for (SmallPtrSet
<Instruction
*, 4>::const_iterator II
= I
->second
.begin(),
1371 EE
= I
->second
.end(); II
!= EE
; ++II
)
1372 assert(*II
!= D
&& "Inst occurs in data structures");
1375 for (ReverseNonLocalPtrDepTy::const_iterator
1376 I
= ReverseNonLocalPtrDeps
.begin(),
1377 E
= ReverseNonLocalPtrDeps
.end(); I
!= E
; ++I
) {
1378 assert(I
->first
!= D
&& "Inst occurs in rev NLPD map");
1380 for (SmallPtrSet
<ValueIsLoadPair
, 4>::const_iterator II
= I
->second
.begin(),
1381 E
= I
->second
.end(); II
!= E
; ++II
)
1382 assert(*II
!= ValueIsLoadPair(D
, false) &&
1383 *II
!= ValueIsLoadPair(D
, true) &&
1384 "Inst occurs in ReverseNonLocalPtrDeps map");