1 //===- MemoryDependenceAnalysis.cpp - Mem Deps Implementation -------------===//
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7 //===----------------------------------------------------------------------===//
9 // This file implements an analysis that determines, for a given memory
10 // operation, what preceding memory operations it depends on. It builds on
11 // alias analysis information, and tries to provide a lazy, caching interface to
12 // a common kind of alias information query.
14 //===----------------------------------------------------------------------===//
16 #include "llvm/Analysis/MemoryDependenceAnalysis.h"
17 #include "llvm/ADT/DenseMap.h"
18 #include "llvm/ADT/STLExtras.h"
19 #include "llvm/ADT/SmallPtrSet.h"
20 #include "llvm/ADT/SmallVector.h"
21 #include "llvm/ADT/Statistic.h"
22 #include "llvm/Analysis/AliasAnalysis.h"
23 #include "llvm/Analysis/AssumptionCache.h"
24 #include "llvm/Analysis/MemoryBuiltins.h"
25 #include "llvm/Analysis/MemoryLocation.h"
26 #include "llvm/Analysis/PHITransAddr.h"
27 #include "llvm/Analysis/PhiValues.h"
28 #include "llvm/Analysis/TargetLibraryInfo.h"
29 #include "llvm/Analysis/ValueTracking.h"
30 #include "llvm/IR/Attributes.h"
31 #include "llvm/IR/BasicBlock.h"
32 #include "llvm/IR/Constants.h"
33 #include "llvm/IR/DataLayout.h"
34 #include "llvm/IR/DerivedTypes.h"
35 #include "llvm/IR/Dominators.h"
36 #include "llvm/IR/Function.h"
37 #include "llvm/IR/InstrTypes.h"
38 #include "llvm/IR/Instruction.h"
39 #include "llvm/IR/Instructions.h"
40 #include "llvm/IR/IntrinsicInst.h"
41 #include "llvm/IR/LLVMContext.h"
42 #include "llvm/IR/Metadata.h"
43 #include "llvm/IR/Module.h"
44 #include "llvm/IR/PredIteratorCache.h"
45 #include "llvm/IR/Type.h"
46 #include "llvm/IR/Use.h"
47 #include "llvm/IR/User.h"
48 #include "llvm/IR/Value.h"
49 #include "llvm/InitializePasses.h"
50 #include "llvm/Pass.h"
51 #include "llvm/Support/AtomicOrdering.h"
52 #include "llvm/Support/Casting.h"
53 #include "llvm/Support/CommandLine.h"
54 #include "llvm/Support/Compiler.h"
55 #include "llvm/Support/Debug.h"
56 #include "llvm/Support/MathExtras.h"
65 #define DEBUG_TYPE "memdep"
67 STATISTIC(NumCacheNonLocal
, "Number of fully cached non-local responses");
68 STATISTIC(NumCacheDirtyNonLocal
, "Number of dirty cached non-local responses");
69 STATISTIC(NumUncacheNonLocal
, "Number of uncached non-local responses");
71 STATISTIC(NumCacheNonLocalPtr
,
72 "Number of fully cached non-local ptr responses");
73 STATISTIC(NumCacheDirtyNonLocalPtr
,
74 "Number of cached, but dirty, non-local ptr responses");
75 STATISTIC(NumUncacheNonLocalPtr
, "Number of uncached non-local ptr responses");
76 STATISTIC(NumCacheCompleteNonLocalPtr
,
77 "Number of block queries that were completely cached");
79 // Limit for the number of instructions to scan in a block.
81 static cl::opt
<unsigned> BlockScanLimit(
82 "memdep-block-scan-limit", cl::Hidden
, cl::init(100),
83 cl::desc("The number of instructions to scan in a block in memory "
84 "dependency analysis (default = 100)"));
86 static cl::opt
<unsigned>
87 BlockNumberLimit("memdep-block-number-limit", cl::Hidden
, cl::init(1000),
88 cl::desc("The number of blocks to scan during memory "
89 "dependency analysis (default = 1000)"));
91 // Limit on the number of memdep results to process.
92 static const unsigned int NumResultsLimit
= 100;
94 /// This is a helper function that removes Val from 'Inst's set in ReverseMap.
96 /// If the set becomes empty, remove Inst's entry.
97 template <typename KeyTy
>
99 RemoveFromReverseMap(DenseMap
<Instruction
*, SmallPtrSet
<KeyTy
, 4>> &ReverseMap
,
100 Instruction
*Inst
, KeyTy Val
) {
101 typename DenseMap
<Instruction
*, SmallPtrSet
<KeyTy
, 4>>::iterator InstIt
=
102 ReverseMap
.find(Inst
);
103 assert(InstIt
!= ReverseMap
.end() && "Reverse map out of sync?");
104 bool Found
= InstIt
->second
.erase(Val
);
105 assert(Found
&& "Invalid reverse map!");
107 if (InstIt
->second
.empty())
108 ReverseMap
.erase(InstIt
);
111 /// If the given instruction references a specific memory location, fill in Loc
112 /// with the details, otherwise set Loc.Ptr to null.
114 /// Returns a ModRefInfo value describing the general behavior of the
116 static ModRefInfo
GetLocation(const Instruction
*Inst
, MemoryLocation
&Loc
,
117 const TargetLibraryInfo
&TLI
) {
118 if (const LoadInst
*LI
= dyn_cast
<LoadInst
>(Inst
)) {
119 if (LI
->isUnordered()) {
120 Loc
= MemoryLocation::get(LI
);
121 return ModRefInfo::Ref
;
123 if (LI
->getOrdering() == AtomicOrdering::Monotonic
) {
124 Loc
= MemoryLocation::get(LI
);
125 return ModRefInfo::ModRef
;
127 Loc
= MemoryLocation();
128 return ModRefInfo::ModRef
;
131 if (const StoreInst
*SI
= dyn_cast
<StoreInst
>(Inst
)) {
132 if (SI
->isUnordered()) {
133 Loc
= MemoryLocation::get(SI
);
134 return ModRefInfo::Mod
;
136 if (SI
->getOrdering() == AtomicOrdering::Monotonic
) {
137 Loc
= MemoryLocation::get(SI
);
138 return ModRefInfo::ModRef
;
140 Loc
= MemoryLocation();
141 return ModRefInfo::ModRef
;
144 if (const VAArgInst
*V
= dyn_cast
<VAArgInst
>(Inst
)) {
145 Loc
= MemoryLocation::get(V
);
146 return ModRefInfo::ModRef
;
149 if (const CallInst
*CI
= isFreeCall(Inst
, &TLI
)) {
150 // calls to free() deallocate the entire structure
151 Loc
= MemoryLocation::getAfter(CI
->getArgOperand(0));
152 return ModRefInfo::Mod
;
155 if (const IntrinsicInst
*II
= dyn_cast
<IntrinsicInst
>(Inst
)) {
156 switch (II
->getIntrinsicID()) {
157 case Intrinsic::lifetime_start
:
158 case Intrinsic::lifetime_end
:
159 case Intrinsic::invariant_start
:
160 Loc
= MemoryLocation::getForArgument(II
, 1, TLI
);
161 // These intrinsics don't really modify the memory, but returning Mod
162 // will allow them to be handled conservatively.
163 return ModRefInfo::Mod
;
164 case Intrinsic::invariant_end
:
165 Loc
= MemoryLocation::getForArgument(II
, 2, TLI
);
166 // These intrinsics don't really modify the memory, but returning Mod
167 // will allow them to be handled conservatively.
168 return ModRefInfo::Mod
;
169 case Intrinsic::masked_load
:
170 Loc
= MemoryLocation::getForArgument(II
, 0, TLI
);
171 return ModRefInfo::Ref
;
172 case Intrinsic::masked_store
:
173 Loc
= MemoryLocation::getForArgument(II
, 1, TLI
);
174 return ModRefInfo::Mod
;
180 // Otherwise, just do the coarse-grained thing that always works.
181 if (Inst
->mayWriteToMemory())
182 return ModRefInfo::ModRef
;
183 if (Inst
->mayReadFromMemory())
184 return ModRefInfo::Ref
;
185 return ModRefInfo::NoModRef
;
188 /// Private helper for finding the local dependencies of a call site.
189 MemDepResult
MemoryDependenceResults::getCallDependencyFrom(
190 CallBase
*Call
, bool isReadOnlyCall
, BasicBlock::iterator ScanIt
,
192 unsigned Limit
= getDefaultBlockScanLimit();
194 // Walk backwards through the block, looking for dependencies.
195 while (ScanIt
!= BB
->begin()) {
196 Instruction
*Inst
= &*--ScanIt
;
197 // Debug intrinsics don't cause dependences and should not affect Limit
198 if (isa
<DbgInfoIntrinsic
>(Inst
))
201 // Limit the amount of scanning we do so we don't end up with quadratic
202 // running time on extreme testcases.
205 return MemDepResult::getUnknown();
207 // If this inst is a memory op, get the pointer it accessed
209 ModRefInfo MR
= GetLocation(Inst
, Loc
, TLI
);
211 // A simple instruction.
212 if (isModOrRefSet(AA
.getModRefInfo(Call
, Loc
)))
213 return MemDepResult::getClobber(Inst
);
217 if (auto *CallB
= dyn_cast
<CallBase
>(Inst
)) {
218 // If these two calls do not interfere, look past it.
219 if (isNoModRef(AA
.getModRefInfo(Call
, CallB
))) {
220 // If the two calls are the same, return Inst as a Def, so that
221 // Call can be found redundant and eliminated.
222 if (isReadOnlyCall
&& !isModSet(MR
) &&
223 Call
->isIdenticalToWhenDefined(CallB
))
224 return MemDepResult::getDef(Inst
);
226 // Otherwise if the two calls don't interact (e.g. CallB is readnone)
230 return MemDepResult::getClobber(Inst
);
233 // If we could not obtain a pointer for the instruction and the instruction
234 // touches memory then assume that this is a dependency.
235 if (isModOrRefSet(MR
))
236 return MemDepResult::getClobber(Inst
);
239 // No dependence found. If this is the entry block of the function, it is
240 // unknown, otherwise it is non-local.
241 if (BB
!= &BB
->getParent()->getEntryBlock())
242 return MemDepResult::getNonLocal();
243 return MemDepResult::getNonFuncLocal();
246 MemDepResult
MemoryDependenceResults::getPointerDependencyFrom(
247 const MemoryLocation
&MemLoc
, bool isLoad
, BasicBlock::iterator ScanIt
,
248 BasicBlock
*BB
, Instruction
*QueryInst
, unsigned *Limit
,
249 BatchAAResults
&BatchAA
) {
250 MemDepResult InvariantGroupDependency
= MemDepResult::getUnknown();
251 if (QueryInst
!= nullptr) {
252 if (auto *LI
= dyn_cast
<LoadInst
>(QueryInst
)) {
253 InvariantGroupDependency
= getInvariantGroupPointerDependency(LI
, BB
);
255 if (InvariantGroupDependency
.isDef())
256 return InvariantGroupDependency
;
259 MemDepResult SimpleDep
= getSimplePointerDependencyFrom(
260 MemLoc
, isLoad
, ScanIt
, BB
, QueryInst
, Limit
, BatchAA
);
261 if (SimpleDep
.isDef())
263 // Non-local invariant group dependency indicates there is non local Def
264 // (it only returns nonLocal if it finds nonLocal def), which is better than
265 // local clobber and everything else.
266 if (InvariantGroupDependency
.isNonLocal())
267 return InvariantGroupDependency
;
269 assert(InvariantGroupDependency
.isUnknown() &&
270 "InvariantGroupDependency should be only unknown at this point");
274 MemDepResult
MemoryDependenceResults::getPointerDependencyFrom(
275 const MemoryLocation
&MemLoc
, bool isLoad
, BasicBlock::iterator ScanIt
,
276 BasicBlock
*BB
, Instruction
*QueryInst
, unsigned *Limit
) {
277 BatchAAResults
BatchAA(AA
);
278 return getPointerDependencyFrom(MemLoc
, isLoad
, ScanIt
, BB
, QueryInst
, Limit
,
283 MemoryDependenceResults::getInvariantGroupPointerDependency(LoadInst
*LI
,
286 if (!LI
->hasMetadata(LLVMContext::MD_invariant_group
))
287 return MemDepResult::getUnknown();
289 // Take the ptr operand after all casts and geps 0. This way we can search
290 // cast graph down only.
291 Value
*LoadOperand
= LI
->getPointerOperand()->stripPointerCasts();
293 // It's is not safe to walk the use list of global value, because function
294 // passes aren't allowed to look outside their functions.
295 // FIXME: this could be fixed by filtering instructions from outside
296 // of current function.
297 if (isa
<GlobalValue
>(LoadOperand
))
298 return MemDepResult::getUnknown();
300 // Queue to process all pointers that are equivalent to load operand.
301 SmallVector
<const Value
*, 8> LoadOperandsQueue
;
302 LoadOperandsQueue
.push_back(LoadOperand
);
304 Instruction
*ClosestDependency
= nullptr;
305 // Order of instructions in uses list is unpredictible. In order to always
306 // get the same result, we will look for the closest dominance.
307 auto GetClosestDependency
= [this](Instruction
*Best
, Instruction
*Other
) {
308 assert(Other
&& "Must call it with not null instruction");
309 if (Best
== nullptr || DT
.dominates(Best
, Other
))
314 // FIXME: This loop is O(N^2) because dominates can be O(n) and in worst case
315 // we will see all the instructions. This should be fixed in MSSA.
316 while (!LoadOperandsQueue
.empty()) {
317 const Value
*Ptr
= LoadOperandsQueue
.pop_back_val();
318 assert(Ptr
&& !isa
<GlobalValue
>(Ptr
) &&
319 "Null or GlobalValue should not be inserted");
321 for (const Use
&Us
: Ptr
->uses()) {
322 auto *U
= dyn_cast
<Instruction
>(Us
.getUser());
323 if (!U
|| U
== LI
|| !DT
.dominates(U
, LI
))
326 // Bitcast or gep with zeros are using Ptr. Add to queue to check it's
327 // users. U = bitcast Ptr
328 if (isa
<BitCastInst
>(U
)) {
329 LoadOperandsQueue
.push_back(U
);
332 // Gep with zeros is equivalent to bitcast.
333 // FIXME: we are not sure if some bitcast should be canonicalized to gep 0
334 // or gep 0 to bitcast because of SROA, so there are 2 forms. When
335 // typeless pointers will be ready then both cases will be gone
336 // (and this BFS also won't be needed).
337 if (auto *GEP
= dyn_cast
<GetElementPtrInst
>(U
))
338 if (GEP
->hasAllZeroIndices()) {
339 LoadOperandsQueue
.push_back(U
);
343 // If we hit load/store with the same invariant.group metadata (and the
344 // same pointer operand) we can assume that value pointed by pointer
345 // operand didn't change.
346 if ((isa
<LoadInst
>(U
) ||
347 (isa
<StoreInst
>(U
) &&
348 cast
<StoreInst
>(U
)->getPointerOperand() == Ptr
)) &&
349 U
->hasMetadata(LLVMContext::MD_invariant_group
))
350 ClosestDependency
= GetClosestDependency(ClosestDependency
, U
);
354 if (!ClosestDependency
)
355 return MemDepResult::getUnknown();
356 if (ClosestDependency
->getParent() == BB
)
357 return MemDepResult::getDef(ClosestDependency
);
358 // Def(U) can't be returned here because it is non-local. If local
359 // dependency won't be found then return nonLocal counting that the
360 // user will call getNonLocalPointerDependency, which will return cached
362 NonLocalDefsCache
.try_emplace(
363 LI
, NonLocalDepResult(ClosestDependency
->getParent(),
364 MemDepResult::getDef(ClosestDependency
), nullptr));
365 ReverseNonLocalDefsCache
[ClosestDependency
].insert(LI
);
366 return MemDepResult::getNonLocal();
369 MemDepResult
MemoryDependenceResults::getSimplePointerDependencyFrom(
370 const MemoryLocation
&MemLoc
, bool isLoad
, BasicBlock::iterator ScanIt
,
371 BasicBlock
*BB
, Instruction
*QueryInst
, unsigned *Limit
,
372 BatchAAResults
&BatchAA
) {
373 bool isInvariantLoad
= false;
375 unsigned DefaultLimit
= getDefaultBlockScanLimit();
377 Limit
= &DefaultLimit
;
379 // We must be careful with atomic accesses, as they may allow another thread
380 // to touch this location, clobbering it. We are conservative: if the
381 // QueryInst is not a simple (non-atomic) memory access, we automatically
382 // return getClobber.
383 // If it is simple, we know based on the results of
384 // "Compiler testing via a theory of sound optimisations in the C11/C++11
385 // memory model" in PLDI 2013, that a non-atomic location can only be
386 // clobbered between a pair of a release and an acquire action, with no
387 // access to the location in between.
388 // Here is an example for giving the general intuition behind this rule.
389 // In the following code:
391 // release action; [1]
392 // acquire action; [4]
394 // It is unsafe to replace %val by 0 because another thread may be running:
395 // acquire action; [2]
397 // release action; [3]
398 // with synchronization from 1 to 2 and from 3 to 4, resulting in %val
399 // being 42. A key property of this program however is that if either
400 // 1 or 4 were missing, there would be a race between the store of 42
401 // either the store of 0 or the load (making the whole program racy).
402 // The paper mentioned above shows that the same property is respected
403 // by every program that can detect any optimization of that kind: either
404 // it is racy (undefined) or there is a release followed by an acquire
405 // between the pair of accesses under consideration.
407 // If the load is invariant, we "know" that it doesn't alias *any* write. We
408 // do want to respect mustalias results since defs are useful for value
409 // forwarding, but any mayalias write can be assumed to be noalias.
410 // Arguably, this logic should be pushed inside AliasAnalysis itself.
411 if (isLoad
&& QueryInst
) {
412 LoadInst
*LI
= dyn_cast
<LoadInst
>(QueryInst
);
413 if (LI
&& LI
->hasMetadata(LLVMContext::MD_invariant_load
))
414 isInvariantLoad
= true;
417 // Return "true" if and only if the instruction I is either a non-simple
418 // load or a non-simple store.
419 auto isNonSimpleLoadOrStore
= [](Instruction
*I
) -> bool {
420 if (auto *LI
= dyn_cast
<LoadInst
>(I
))
421 return !LI
->isSimple();
422 if (auto *SI
= dyn_cast
<StoreInst
>(I
))
423 return !SI
->isSimple();
427 // Return "true" if I is not a load and not a store, but it does access
429 auto isOtherMemAccess
= [](Instruction
*I
) -> bool {
430 return !isa
<LoadInst
>(I
) && !isa
<StoreInst
>(I
) && I
->mayReadOrWriteMemory();
433 // Walk backwards through the basic block, looking for dependencies.
434 while (ScanIt
!= BB
->begin()) {
435 Instruction
*Inst
= &*--ScanIt
;
437 if (IntrinsicInst
*II
= dyn_cast
<IntrinsicInst
>(Inst
))
438 // Debug intrinsics don't (and can't) cause dependencies.
439 if (isa
<DbgInfoIntrinsic
>(II
))
442 // Limit the amount of scanning we do so we don't end up with quadratic
443 // running time on extreme testcases.
446 return MemDepResult::getUnknown();
448 if (IntrinsicInst
*II
= dyn_cast
<IntrinsicInst
>(Inst
)) {
449 // If we reach a lifetime begin or end marker, then the query ends here
450 // because the value is undefined.
451 Intrinsic::ID ID
= II
->getIntrinsicID();
453 case Intrinsic::lifetime_start
: {
454 // FIXME: This only considers queries directly on the invariant-tagged
455 // pointer, not on query pointers that are indexed off of them. It'd
456 // be nice to handle that at some point (the right approach is to use
457 // GetPointerBaseWithConstantOffset).
458 MemoryLocation ArgLoc
= MemoryLocation::getAfter(II
->getArgOperand(1));
459 if (BatchAA
.isMustAlias(ArgLoc
, MemLoc
))
460 return MemDepResult::getDef(II
);
463 case Intrinsic::masked_load
:
464 case Intrinsic::masked_store
: {
466 /*ModRefInfo MR =*/ GetLocation(II
, Loc
, TLI
);
467 AliasResult R
= BatchAA
.alias(Loc
, MemLoc
);
468 if (R
== AliasResult::NoAlias
)
470 if (R
== AliasResult::MustAlias
)
471 return MemDepResult::getDef(II
);
472 if (ID
== Intrinsic::masked_load
)
474 return MemDepResult::getClobber(II
);
479 // Values depend on loads if the pointers are must aliased. This means
480 // that a load depends on another must aliased load from the same value.
481 // One exception is atomic loads: a value can depend on an atomic load that
482 // it does not alias with when this atomic load indicates that another
483 // thread may be accessing the location.
484 if (LoadInst
*LI
= dyn_cast
<LoadInst
>(Inst
)) {
485 // While volatile access cannot be eliminated, they do not have to clobber
486 // non-aliasing locations, as normal accesses, for example, can be safely
487 // reordered with volatile accesses.
488 if (LI
->isVolatile()) {
490 // Original QueryInst *may* be volatile
491 return MemDepResult::getClobber(LI
);
492 if (QueryInst
->isVolatile())
493 // Ordering required if QueryInst is itself volatile
494 return MemDepResult::getClobber(LI
);
495 // Otherwise, volatile doesn't imply any special ordering
498 // Atomic loads have complications involved.
499 // A Monotonic (or higher) load is OK if the query inst is itself not
501 // FIXME: This is overly conservative.
502 if (LI
->isAtomic() && isStrongerThanUnordered(LI
->getOrdering())) {
503 if (!QueryInst
|| isNonSimpleLoadOrStore(QueryInst
) ||
504 isOtherMemAccess(QueryInst
))
505 return MemDepResult::getClobber(LI
);
506 if (LI
->getOrdering() != AtomicOrdering::Monotonic
)
507 return MemDepResult::getClobber(LI
);
510 MemoryLocation LoadLoc
= MemoryLocation::get(LI
);
512 // If we found a pointer, check if it could be the same as our pointer.
513 AliasResult R
= BatchAA
.alias(LoadLoc
, MemLoc
);
516 if (R
== AliasResult::NoAlias
)
519 // Must aliased loads are defs of each other.
520 if (R
== AliasResult::MustAlias
)
521 return MemDepResult::getDef(Inst
);
523 // If we have a partial alias, then return this as a clobber for the
525 if (R
== AliasResult::PartialAlias
&& R
.hasOffset()) {
526 ClobberOffsets
[LI
] = R
.getOffset();
527 return MemDepResult::getClobber(Inst
);
530 // Random may-alias loads don't depend on each other without a
535 // Stores don't depend on other no-aliased accesses.
536 if (R
== AliasResult::NoAlias
)
539 // Stores don't alias loads from read-only memory.
540 if (BatchAA
.pointsToConstantMemory(LoadLoc
))
543 // Stores depend on may/must aliased loads.
544 return MemDepResult::getDef(Inst
);
547 if (StoreInst
*SI
= dyn_cast
<StoreInst
>(Inst
)) {
548 // Atomic stores have complications involved.
549 // A Monotonic store is OK if the query inst is itself not atomic.
550 // FIXME: This is overly conservative.
551 if (!SI
->isUnordered() && SI
->isAtomic()) {
552 if (!QueryInst
|| isNonSimpleLoadOrStore(QueryInst
) ||
553 isOtherMemAccess(QueryInst
))
554 return MemDepResult::getClobber(SI
);
555 if (SI
->getOrdering() != AtomicOrdering::Monotonic
)
556 return MemDepResult::getClobber(SI
);
559 // FIXME: this is overly conservative.
560 // While volatile access cannot be eliminated, they do not have to clobber
561 // non-aliasing locations, as normal accesses can for example be reordered
562 // with volatile accesses.
563 if (SI
->isVolatile())
564 if (!QueryInst
|| isNonSimpleLoadOrStore(QueryInst
) ||
565 isOtherMemAccess(QueryInst
))
566 return MemDepResult::getClobber(SI
);
568 // If alias analysis can tell that this store is guaranteed to not modify
569 // the query pointer, ignore it. Use getModRefInfo to handle cases where
570 // the query pointer points to constant memory etc.
571 if (!isModOrRefSet(BatchAA
.getModRefInfo(SI
, MemLoc
)))
574 // Ok, this store might clobber the query pointer. Check to see if it is
575 // a must alias: in this case, we want to return this as a def.
576 // FIXME: Use ModRefInfo::Must bit from getModRefInfo call above.
577 MemoryLocation StoreLoc
= MemoryLocation::get(SI
);
579 // If we found a pointer, check if it could be the same as our pointer.
580 AliasResult R
= BatchAA
.alias(StoreLoc
, MemLoc
);
582 if (R
== AliasResult::NoAlias
)
584 if (R
== AliasResult::MustAlias
)
585 return MemDepResult::getDef(Inst
);
588 return MemDepResult::getClobber(Inst
);
591 // If this is an allocation, and if we know that the accessed pointer is to
592 // the allocation, return Def. This means that there is no dependence and
593 // the access can be optimized based on that. For example, a load could
594 // turn into undef. Note that we can bypass the allocation itself when
595 // looking for a clobber in many cases; that's an alias property and is
596 // handled by BasicAA.
597 if (isa
<AllocaInst
>(Inst
) || isNoAliasFn(Inst
, &TLI
)) {
598 const Value
*AccessPtr
= getUnderlyingObject(MemLoc
.Ptr
);
599 if (AccessPtr
== Inst
|| BatchAA
.isMustAlias(Inst
, AccessPtr
))
600 return MemDepResult::getDef(Inst
);
606 // A release fence requires that all stores complete before it, but does
607 // not prevent the reordering of following loads or stores 'before' the
608 // fence. As a result, we look past it when finding a dependency for
609 // loads. DSE uses this to find preceding stores to delete and thus we
610 // can't bypass the fence if the query instruction is a store.
611 if (FenceInst
*FI
= dyn_cast
<FenceInst
>(Inst
))
612 if (isLoad
&& FI
->getOrdering() == AtomicOrdering::Release
)
615 // See if this instruction (e.g. a call or vaarg) mod/ref's the pointer.
616 ModRefInfo MR
= BatchAA
.getModRefInfo(Inst
, MemLoc
);
617 // If necessary, perform additional analysis.
618 if (isModAndRefSet(MR
))
619 MR
= BatchAA
.callCapturesBefore(Inst
, MemLoc
, &DT
);
620 switch (clearMust(MR
)) {
621 case ModRefInfo::NoModRef
:
622 // If the call has no effect on the queried pointer, just ignore it.
624 case ModRefInfo::Mod
:
625 return MemDepResult::getClobber(Inst
);
626 case ModRefInfo::Ref
:
627 // If the call is known to never store to the pointer, and if this is a
628 // load query, we can safely ignore it (scan past it).
633 // Otherwise, there is a potential dependence. Return a clobber.
634 return MemDepResult::getClobber(Inst
);
638 // No dependence found. If this is the entry block of the function, it is
639 // unknown, otherwise it is non-local.
640 if (BB
!= &BB
->getParent()->getEntryBlock())
641 return MemDepResult::getNonLocal();
642 return MemDepResult::getNonFuncLocal();
645 MemDepResult
MemoryDependenceResults::getDependency(Instruction
*QueryInst
) {
646 ClobberOffsets
.clear();
647 Instruction
*ScanPos
= QueryInst
;
649 // Check for a cached result
650 MemDepResult
&LocalCache
= LocalDeps
[QueryInst
];
652 // If the cached entry is non-dirty, just return it. Note that this depends
653 // on MemDepResult's default constructing to 'dirty'.
654 if (!LocalCache
.isDirty())
657 // Otherwise, if we have a dirty entry, we know we can start the scan at that
658 // instruction, which may save us some work.
659 if (Instruction
*Inst
= LocalCache
.getInst()) {
662 RemoveFromReverseMap(ReverseLocalDeps
, Inst
, QueryInst
);
665 BasicBlock
*QueryParent
= QueryInst
->getParent();
668 if (BasicBlock::iterator(QueryInst
) == QueryParent
->begin()) {
669 // No dependence found. If this is the entry block of the function, it is
670 // unknown, otherwise it is non-local.
671 if (QueryParent
!= &QueryParent
->getParent()->getEntryBlock())
672 LocalCache
= MemDepResult::getNonLocal();
674 LocalCache
= MemDepResult::getNonFuncLocal();
676 MemoryLocation MemLoc
;
677 ModRefInfo MR
= GetLocation(QueryInst
, MemLoc
, TLI
);
679 // If we can do a pointer scan, make it happen.
680 bool isLoad
= !isModSet(MR
);
681 if (auto *II
= dyn_cast
<IntrinsicInst
>(QueryInst
))
682 isLoad
|= II
->getIntrinsicID() == Intrinsic::lifetime_start
;
685 getPointerDependencyFrom(MemLoc
, isLoad
, ScanPos
->getIterator(),
686 QueryParent
, QueryInst
, nullptr);
687 } else if (auto *QueryCall
= dyn_cast
<CallBase
>(QueryInst
)) {
688 bool isReadOnly
= AA
.onlyReadsMemory(QueryCall
);
689 LocalCache
= getCallDependencyFrom(QueryCall
, isReadOnly
,
690 ScanPos
->getIterator(), QueryParent
);
692 // Non-memory instruction.
693 LocalCache
= MemDepResult::getUnknown();
696 // Remember the result!
697 if (Instruction
*I
= LocalCache
.getInst())
698 ReverseLocalDeps
[I
].insert(QueryInst
);
704 /// This method is used when -debug is specified to verify that cache arrays
705 /// are properly kept sorted.
706 static void AssertSorted(MemoryDependenceResults::NonLocalDepInfo
&Cache
,
709 Count
= Cache
.size();
710 assert(std::is_sorted(Cache
.begin(), Cache
.begin() + Count
) &&
711 "Cache isn't sorted!");
715 const MemoryDependenceResults::NonLocalDepInfo
&
716 MemoryDependenceResults::getNonLocalCallDependency(CallBase
*QueryCall
) {
717 assert(getDependency(QueryCall
).isNonLocal() &&
718 "getNonLocalCallDependency should only be used on calls with "
720 PerInstNLInfo
&CacheP
= NonLocalDepsMap
[QueryCall
];
721 NonLocalDepInfo
&Cache
= CacheP
.first
;
723 // This is the set of blocks that need to be recomputed. In the cached case,
724 // this can happen due to instructions being deleted etc. In the uncached
725 // case, this starts out as the set of predecessors we care about.
726 SmallVector
<BasicBlock
*, 32> DirtyBlocks
;
728 if (!Cache
.empty()) {
729 // Okay, we have a cache entry. If we know it is not dirty, just return it
730 // with no computation.
731 if (!CacheP
.second
) {
736 // If we already have a partially computed set of results, scan them to
737 // determine what is dirty, seeding our initial DirtyBlocks worklist.
738 for (auto &Entry
: Cache
)
739 if (Entry
.getResult().isDirty())
740 DirtyBlocks
.push_back(Entry
.getBB());
742 // Sort the cache so that we can do fast binary search lookups below.
745 ++NumCacheDirtyNonLocal
;
746 // cerr << "CACHED CASE: " << DirtyBlocks.size() << " dirty: "
747 // << Cache.size() << " cached: " << *QueryInst;
749 // Seed DirtyBlocks with each of the preds of QueryInst's block.
750 BasicBlock
*QueryBB
= QueryCall
->getParent();
751 append_range(DirtyBlocks
, PredCache
.get(QueryBB
));
752 ++NumUncacheNonLocal
;
755 // isReadonlyCall - If this is a read-only call, we can be more aggressive.
756 bool isReadonlyCall
= AA
.onlyReadsMemory(QueryCall
);
758 SmallPtrSet
<BasicBlock
*, 32> Visited
;
760 unsigned NumSortedEntries
= Cache
.size();
761 LLVM_DEBUG(AssertSorted(Cache
));
763 // Iterate while we still have blocks to update.
764 while (!DirtyBlocks
.empty()) {
765 BasicBlock
*DirtyBB
= DirtyBlocks
.pop_back_val();
767 // Already processed this block?
768 if (!Visited
.insert(DirtyBB
).second
)
771 // Do a binary search to see if we already have an entry for this block in
772 // the cache set. If so, find it.
773 LLVM_DEBUG(AssertSorted(Cache
, NumSortedEntries
));
774 NonLocalDepInfo::iterator Entry
=
775 std::upper_bound(Cache
.begin(), Cache
.begin() + NumSortedEntries
,
776 NonLocalDepEntry(DirtyBB
));
777 if (Entry
!= Cache
.begin() && std::prev(Entry
)->getBB() == DirtyBB
)
780 NonLocalDepEntry
*ExistingResult
= nullptr;
781 if (Entry
!= Cache
.begin() + NumSortedEntries
&&
782 Entry
->getBB() == DirtyBB
) {
783 // If we already have an entry, and if it isn't already dirty, the block
785 if (!Entry
->getResult().isDirty())
788 // Otherwise, remember this slot so we can update the value.
789 ExistingResult
= &*Entry
;
792 // If the dirty entry has a pointer, start scanning from it so we don't have
793 // to rescan the entire block.
794 BasicBlock::iterator ScanPos
= DirtyBB
->end();
795 if (ExistingResult
) {
796 if (Instruction
*Inst
= ExistingResult
->getResult().getInst()) {
797 ScanPos
= Inst
->getIterator();
798 // We're removing QueryInst's use of Inst.
799 RemoveFromReverseMap
<Instruction
*>(ReverseNonLocalDeps
, Inst
,
804 // Find out if this block has a local dependency for QueryInst.
807 if (ScanPos
!= DirtyBB
->begin()) {
808 Dep
= getCallDependencyFrom(QueryCall
, isReadonlyCall
, ScanPos
, DirtyBB
);
809 } else if (DirtyBB
!= &DirtyBB
->getParent()->getEntryBlock()) {
810 // No dependence found. If this is the entry block of the function, it is
811 // a clobber, otherwise it is unknown.
812 Dep
= MemDepResult::getNonLocal();
814 Dep
= MemDepResult::getNonFuncLocal();
817 // If we had a dirty entry for the block, update it. Otherwise, just add
820 ExistingResult
->setResult(Dep
);
822 Cache
.push_back(NonLocalDepEntry(DirtyBB
, Dep
));
824 // If the block has a dependency (i.e. it isn't completely transparent to
825 // the value), remember the association!
826 if (!Dep
.isNonLocal()) {
827 // Keep the ReverseNonLocalDeps map up to date so we can efficiently
828 // update this when we remove instructions.
829 if (Instruction
*Inst
= Dep
.getInst())
830 ReverseNonLocalDeps
[Inst
].insert(QueryCall
);
833 // If the block *is* completely transparent to the load, we need to check
834 // the predecessors of this block. Add them to our worklist.
835 append_range(DirtyBlocks
, PredCache
.get(DirtyBB
));
842 void MemoryDependenceResults::getNonLocalPointerDependency(
843 Instruction
*QueryInst
, SmallVectorImpl
<NonLocalDepResult
> &Result
) {
844 const MemoryLocation Loc
= MemoryLocation::get(QueryInst
);
845 bool isLoad
= isa
<LoadInst
>(QueryInst
);
846 BasicBlock
*FromBB
= QueryInst
->getParent();
849 assert(Loc
.Ptr
->getType()->isPointerTy() &&
850 "Can't get pointer deps of a non-pointer!");
853 // Check if there is cached Def with invariant.group.
854 auto NonLocalDefIt
= NonLocalDefsCache
.find(QueryInst
);
855 if (NonLocalDefIt
!= NonLocalDefsCache
.end()) {
856 Result
.push_back(NonLocalDefIt
->second
);
857 ReverseNonLocalDefsCache
[NonLocalDefIt
->second
.getResult().getInst()]
859 NonLocalDefsCache
.erase(NonLocalDefIt
);
863 // This routine does not expect to deal with volatile instructions.
864 // Doing so would require piping through the QueryInst all the way through.
865 // TODO: volatiles can't be elided, but they can be reordered with other
866 // non-volatile accesses.
868 // We currently give up on any instruction which is ordered, but we do handle
869 // atomic instructions which are unordered.
870 // TODO: Handle ordered instructions
871 auto isOrdered
= [](Instruction
*Inst
) {
872 if (LoadInst
*LI
= dyn_cast
<LoadInst
>(Inst
)) {
873 return !LI
->isUnordered();
874 } else if (StoreInst
*SI
= dyn_cast
<StoreInst
>(Inst
)) {
875 return !SI
->isUnordered();
879 if (QueryInst
->isVolatile() || isOrdered(QueryInst
)) {
880 Result
.push_back(NonLocalDepResult(FromBB
, MemDepResult::getUnknown(),
881 const_cast<Value
*>(Loc
.Ptr
)));
884 const DataLayout
&DL
= FromBB
->getModule()->getDataLayout();
885 PHITransAddr
Address(const_cast<Value
*>(Loc
.Ptr
), DL
, &AC
);
887 // This is the set of blocks we've inspected, and the pointer we consider in
888 // each block. Because of critical edges, we currently bail out if querying
889 // a block with multiple different pointers. This can happen during PHI
891 DenseMap
<BasicBlock
*, Value
*> Visited
;
892 if (getNonLocalPointerDepFromBB(QueryInst
, Address
, Loc
, isLoad
, FromBB
,
893 Result
, Visited
, true))
896 Result
.push_back(NonLocalDepResult(FromBB
, MemDepResult::getUnknown(),
897 const_cast<Value
*>(Loc
.Ptr
)));
900 /// Compute the memdep value for BB with Pointer/PointeeSize using either
901 /// cached information in Cache or by doing a lookup (which may use dirty cache
902 /// info if available).
904 /// If we do a lookup, add the result to the cache.
905 MemDepResult
MemoryDependenceResults::getNonLocalInfoForBlock(
906 Instruction
*QueryInst
, const MemoryLocation
&Loc
, bool isLoad
,
907 BasicBlock
*BB
, NonLocalDepInfo
*Cache
, unsigned NumSortedEntries
,
908 BatchAAResults
&BatchAA
) {
910 bool isInvariantLoad
= false;
912 if (LoadInst
*LI
= dyn_cast_or_null
<LoadInst
>(QueryInst
))
913 isInvariantLoad
= LI
->getMetadata(LLVMContext::MD_invariant_load
);
915 // Do a binary search to see if we already have an entry for this block in
916 // the cache set. If so, find it.
917 NonLocalDepInfo::iterator Entry
= std::upper_bound(
918 Cache
->begin(), Cache
->begin() + NumSortedEntries
, NonLocalDepEntry(BB
));
919 if (Entry
!= Cache
->begin() && (Entry
- 1)->getBB() == BB
)
922 NonLocalDepEntry
*ExistingResult
= nullptr;
923 if (Entry
!= Cache
->begin() + NumSortedEntries
&& Entry
->getBB() == BB
)
924 ExistingResult
= &*Entry
;
926 // Use cached result for invariant load only if there is no dependency for non
927 // invariant load. In this case invariant load can not have any dependency as
929 if (ExistingResult
&& isInvariantLoad
&&
930 !ExistingResult
->getResult().isNonFuncLocal())
931 ExistingResult
= nullptr;
933 // If we have a cached entry, and it is non-dirty, use it as the value for
935 if (ExistingResult
&& !ExistingResult
->getResult().isDirty()) {
936 ++NumCacheNonLocalPtr
;
937 return ExistingResult
->getResult();
940 // Otherwise, we have to scan for the value. If we have a dirty cache
941 // entry, start scanning from its position, otherwise we scan from the end
943 BasicBlock::iterator ScanPos
= BB
->end();
944 if (ExistingResult
&& ExistingResult
->getResult().getInst()) {
945 assert(ExistingResult
->getResult().getInst()->getParent() == BB
&&
946 "Instruction invalidated?");
947 ++NumCacheDirtyNonLocalPtr
;
948 ScanPos
= ExistingResult
->getResult().getInst()->getIterator();
950 // Eliminating the dirty entry from 'Cache', so update the reverse info.
951 ValueIsLoadPair
CacheKey(Loc
.Ptr
, isLoad
);
952 RemoveFromReverseMap(ReverseNonLocalPtrDeps
, &*ScanPos
, CacheKey
);
954 ++NumUncacheNonLocalPtr
;
957 // Scan the block for the dependency.
958 MemDepResult Dep
= getPointerDependencyFrom(Loc
, isLoad
, ScanPos
, BB
,
959 QueryInst
, nullptr, BatchAA
);
961 // Don't cache results for invariant load.
965 // If we had a dirty entry for the block, update it. Otherwise, just add
968 ExistingResult
->setResult(Dep
);
970 Cache
->push_back(NonLocalDepEntry(BB
, Dep
));
972 // If the block has a dependency (i.e. it isn't completely transparent to
973 // the value), remember the reverse association because we just added it
975 if (!Dep
.isDef() && !Dep
.isClobber())
978 // Keep the ReverseNonLocalPtrDeps map up to date so we can efficiently
979 // update MemDep when we remove instructions.
980 Instruction
*Inst
= Dep
.getInst();
981 assert(Inst
&& "Didn't depend on anything?");
982 ValueIsLoadPair
CacheKey(Loc
.Ptr
, isLoad
);
983 ReverseNonLocalPtrDeps
[Inst
].insert(CacheKey
);
987 /// Sort the NonLocalDepInfo cache, given a certain number of elements in the
988 /// array that are already properly ordered.
990 /// This is optimized for the case when only a few entries are added.
992 SortNonLocalDepInfoCache(MemoryDependenceResults::NonLocalDepInfo
&Cache
,
993 unsigned NumSortedEntries
) {
994 switch (Cache
.size() - NumSortedEntries
) {
996 // done, no new entries.
999 // Two new entries, insert the last one into place.
1000 NonLocalDepEntry Val
= Cache
.back();
1002 MemoryDependenceResults::NonLocalDepInfo::iterator Entry
=
1003 std::upper_bound(Cache
.begin(), Cache
.end() - 1, Val
);
1004 Cache
.insert(Entry
, Val
);
1008 // One new entry, Just insert the new value at the appropriate position.
1009 if (Cache
.size() != 1) {
1010 NonLocalDepEntry Val
= Cache
.back();
1012 MemoryDependenceResults::NonLocalDepInfo::iterator Entry
=
1013 llvm::upper_bound(Cache
, Val
);
1014 Cache
.insert(Entry
, Val
);
1018 // Added many values, do a full scale sort.
1024 /// Perform a dependency query based on pointer/pointeesize starting at the end
1027 /// Add any clobber/def results to the results vector and keep track of which
1028 /// blocks are visited in 'Visited'.
1030 /// This has special behavior for the first block queries (when SkipFirstBlock
1031 /// is true). In this special case, it ignores the contents of the specified
1032 /// block and starts returning dependence info for its predecessors.
1034 /// This function returns true on success, or false to indicate that it could
1035 /// not compute dependence information for some reason. This should be treated
1036 /// as a clobber dependence on the first instruction in the predecessor block.
1037 bool MemoryDependenceResults::getNonLocalPointerDepFromBB(
1038 Instruction
*QueryInst
, const PHITransAddr
&Pointer
,
1039 const MemoryLocation
&Loc
, bool isLoad
, BasicBlock
*StartBB
,
1040 SmallVectorImpl
<NonLocalDepResult
> &Result
,
1041 DenseMap
<BasicBlock
*, Value
*> &Visited
, bool SkipFirstBlock
,
1042 bool IsIncomplete
) {
1043 // Look up the cached info for Pointer.
1044 ValueIsLoadPair
CacheKey(Pointer
.getAddr(), isLoad
);
1046 // Set up a temporary NLPI value. If the map doesn't yet have an entry for
1047 // CacheKey, this value will be inserted as the associated value. Otherwise,
1048 // it'll be ignored, and we'll have to check to see if the cached size and
1049 // aa tags are consistent with the current query.
1050 NonLocalPointerInfo InitialNLPI
;
1051 InitialNLPI
.Size
= Loc
.Size
;
1052 InitialNLPI
.AATags
= Loc
.AATags
;
1054 bool isInvariantLoad
= false;
1055 if (LoadInst
*LI
= dyn_cast_or_null
<LoadInst
>(QueryInst
))
1056 isInvariantLoad
= LI
->getMetadata(LLVMContext::MD_invariant_load
);
1058 // Get the NLPI for CacheKey, inserting one into the map if it doesn't
1059 // already have one.
1060 std::pair
<CachedNonLocalPointerInfo::iterator
, bool> Pair
=
1061 NonLocalPointerDeps
.insert(std::make_pair(CacheKey
, InitialNLPI
));
1062 NonLocalPointerInfo
*CacheInfo
= &Pair
.first
->second
;
1064 // If we already have a cache entry for this CacheKey, we may need to do some
1065 // work to reconcile the cache entry and the current query.
1066 // Invariant loads don't participate in caching. Thus no need to reconcile.
1067 if (!isInvariantLoad
&& !Pair
.second
) {
1068 if (CacheInfo
->Size
!= Loc
.Size
) {
1069 bool ThrowOutEverything
;
1070 if (CacheInfo
->Size
.hasValue() && Loc
.Size
.hasValue()) {
1071 // FIXME: We may be able to do better in the face of results with mixed
1072 // precision. We don't appear to get them in practice, though, so just
1074 ThrowOutEverything
=
1075 CacheInfo
->Size
.isPrecise() != Loc
.Size
.isPrecise() ||
1076 CacheInfo
->Size
.getValue() < Loc
.Size
.getValue();
1078 // For our purposes, unknown size > all others.
1079 ThrowOutEverything
= !Loc
.Size
.hasValue();
1082 if (ThrowOutEverything
) {
1083 // The query's Size is greater than the cached one. Throw out the
1084 // cached data and proceed with the query at the greater size.
1085 CacheInfo
->Pair
= BBSkipFirstBlockPair();
1086 CacheInfo
->Size
= Loc
.Size
;
1087 for (auto &Entry
: CacheInfo
->NonLocalDeps
)
1088 if (Instruction
*Inst
= Entry
.getResult().getInst())
1089 RemoveFromReverseMap(ReverseNonLocalPtrDeps
, Inst
, CacheKey
);
1090 CacheInfo
->NonLocalDeps
.clear();
1091 // The cache is cleared (in the above line) so we will have lost
1092 // information about blocks we have already visited. We therefore must
1093 // assume that the cache information is incomplete.
1094 IsIncomplete
= true;
1096 // This query's Size is less than the cached one. Conservatively restart
1097 // the query using the greater size.
1098 return getNonLocalPointerDepFromBB(
1099 QueryInst
, Pointer
, Loc
.getWithNewSize(CacheInfo
->Size
), isLoad
,
1100 StartBB
, Result
, Visited
, SkipFirstBlock
, IsIncomplete
);
1104 // If the query's AATags are inconsistent with the cached one,
1105 // conservatively throw out the cached data and restart the query with
1106 // no tag if needed.
1107 if (CacheInfo
->AATags
!= Loc
.AATags
) {
1108 if (CacheInfo
->AATags
) {
1109 CacheInfo
->Pair
= BBSkipFirstBlockPair();
1110 CacheInfo
->AATags
= AAMDNodes();
1111 for (auto &Entry
: CacheInfo
->NonLocalDeps
)
1112 if (Instruction
*Inst
= Entry
.getResult().getInst())
1113 RemoveFromReverseMap(ReverseNonLocalPtrDeps
, Inst
, CacheKey
);
1114 CacheInfo
->NonLocalDeps
.clear();
1115 // The cache is cleared (in the above line) so we will have lost
1116 // information about blocks we have already visited. We therefore must
1117 // assume that the cache information is incomplete.
1118 IsIncomplete
= true;
1121 return getNonLocalPointerDepFromBB(
1122 QueryInst
, Pointer
, Loc
.getWithoutAATags(), isLoad
, StartBB
, Result
,
1123 Visited
, SkipFirstBlock
, IsIncomplete
);
1127 NonLocalDepInfo
*Cache
= &CacheInfo
->NonLocalDeps
;
1129 // If we have valid cached information for exactly the block we are
1130 // investigating, just return it with no recomputation.
1131 // Don't use cached information for invariant loads since it is valid for
1132 // non-invariant loads only.
1133 if (!IsIncomplete
&& !isInvariantLoad
&&
1134 CacheInfo
->Pair
== BBSkipFirstBlockPair(StartBB
, SkipFirstBlock
)) {
1135 // We have a fully cached result for this query then we can just return the
1136 // cached results and populate the visited set. However, we have to verify
1137 // that we don't already have conflicting results for these blocks. Check
1138 // to ensure that if a block in the results set is in the visited set that
1139 // it was for the same pointer query.
1140 if (!Visited
.empty()) {
1141 for (auto &Entry
: *Cache
) {
1142 DenseMap
<BasicBlock
*, Value
*>::iterator VI
=
1143 Visited
.find(Entry
.getBB());
1144 if (VI
== Visited
.end() || VI
->second
== Pointer
.getAddr())
1147 // We have a pointer mismatch in a block. Just return false, saying
1148 // that something was clobbered in this result. We could also do a
1149 // non-fully cached query, but there is little point in doing this.
1154 Value
*Addr
= Pointer
.getAddr();
1155 for (auto &Entry
: *Cache
) {
1156 Visited
.insert(std::make_pair(Entry
.getBB(), Addr
));
1157 if (Entry
.getResult().isNonLocal()) {
1161 if (DT
.isReachableFromEntry(Entry
.getBB())) {
1163 NonLocalDepResult(Entry
.getBB(), Entry
.getResult(), Addr
));
1166 ++NumCacheCompleteNonLocalPtr
;
1170 // Otherwise, either this is a new block, a block with an invalid cache
1171 // pointer or one that we're about to invalidate by putting more info into
1172 // it than its valid cache info. If empty and not explicitly indicated as
1173 // incomplete, the result will be valid cache info, otherwise it isn't.
1175 // Invariant loads don't affect cache in any way thus no need to update
1176 // CacheInfo as well.
1177 if (!isInvariantLoad
) {
1178 if (!IsIncomplete
&& Cache
->empty())
1179 CacheInfo
->Pair
= BBSkipFirstBlockPair(StartBB
, SkipFirstBlock
);
1181 CacheInfo
->Pair
= BBSkipFirstBlockPair();
1184 SmallVector
<BasicBlock
*, 32> Worklist
;
1185 Worklist
.push_back(StartBB
);
1187 // PredList used inside loop.
1188 SmallVector
<std::pair
<BasicBlock
*, PHITransAddr
>, 16> PredList
;
1190 // Keep track of the entries that we know are sorted. Previously cached
1191 // entries will all be sorted. The entries we add we only sort on demand (we
1192 // don't insert every element into its sorted position). We know that we
1193 // won't get any reuse from currently inserted values, because we don't
1194 // revisit blocks after we insert info for them.
1195 unsigned NumSortedEntries
= Cache
->size();
1196 unsigned WorklistEntries
= BlockNumberLimit
;
1197 bool GotWorklistLimit
= false;
1198 LLVM_DEBUG(AssertSorted(*Cache
));
1200 BatchAAResults
BatchAA(AA
);
1201 while (!Worklist
.empty()) {
1202 BasicBlock
*BB
= Worklist
.pop_back_val();
1204 // If we do process a large number of blocks it becomes very expensive and
1205 // likely it isn't worth worrying about
1206 if (Result
.size() > NumResultsLimit
) {
1208 // Sort it now (if needed) so that recursive invocations of
1209 // getNonLocalPointerDepFromBB and other routines that could reuse the
1210 // cache value will only see properly sorted cache arrays.
1211 if (Cache
&& NumSortedEntries
!= Cache
->size()) {
1212 SortNonLocalDepInfoCache(*Cache
, NumSortedEntries
);
1214 // Since we bail out, the "Cache" set won't contain all of the
1215 // results for the query. This is ok (we can still use it to accelerate
1216 // specific block queries) but we can't do the fastpath "return all
1217 // results from the set". Clear out the indicator for this.
1218 CacheInfo
->Pair
= BBSkipFirstBlockPair();
1222 // Skip the first block if we have it.
1223 if (!SkipFirstBlock
) {
1224 // Analyze the dependency of *Pointer in FromBB. See if we already have
1226 assert(Visited
.count(BB
) && "Should check 'visited' before adding to WL");
1228 // Get the dependency info for Pointer in BB. If we have cached
1229 // information, we will use it, otherwise we compute it.
1230 LLVM_DEBUG(AssertSorted(*Cache
, NumSortedEntries
));
1231 MemDepResult Dep
= getNonLocalInfoForBlock(
1232 QueryInst
, Loc
, isLoad
, BB
, Cache
, NumSortedEntries
, BatchAA
);
1234 // If we got a Def or Clobber, add this to the list of results.
1235 if (!Dep
.isNonLocal()) {
1236 if (DT
.isReachableFromEntry(BB
)) {
1237 Result
.push_back(NonLocalDepResult(BB
, Dep
, Pointer
.getAddr()));
1243 // If 'Pointer' is an instruction defined in this block, then we need to do
1244 // phi translation to change it into a value live in the predecessor block.
1245 // If not, we just add the predecessors to the worklist and scan them with
1246 // the same Pointer.
1247 if (!Pointer
.NeedsPHITranslationFromBlock(BB
)) {
1248 SkipFirstBlock
= false;
1249 SmallVector
<BasicBlock
*, 16> NewBlocks
;
1250 for (BasicBlock
*Pred
: PredCache
.get(BB
)) {
1251 // Verify that we haven't looked at this block yet.
1252 std::pair
<DenseMap
<BasicBlock
*, Value
*>::iterator
, bool> InsertRes
=
1253 Visited
.insert(std::make_pair(Pred
, Pointer
.getAddr()));
1254 if (InsertRes
.second
) {
1255 // First time we've looked at *PI.
1256 NewBlocks
.push_back(Pred
);
1260 // If we have seen this block before, but it was with a different
1261 // pointer then we have a phi translation failure and we have to treat
1262 // this as a clobber.
1263 if (InsertRes
.first
->second
!= Pointer
.getAddr()) {
1264 // Make sure to clean up the Visited map before continuing on to
1265 // PredTranslationFailure.
1266 for (unsigned i
= 0; i
< NewBlocks
.size(); i
++)
1267 Visited
.erase(NewBlocks
[i
]);
1268 goto PredTranslationFailure
;
1271 if (NewBlocks
.size() > WorklistEntries
) {
1272 // Make sure to clean up the Visited map before continuing on to
1273 // PredTranslationFailure.
1274 for (unsigned i
= 0; i
< NewBlocks
.size(); i
++)
1275 Visited
.erase(NewBlocks
[i
]);
1276 GotWorklistLimit
= true;
1277 goto PredTranslationFailure
;
1279 WorklistEntries
-= NewBlocks
.size();
1280 Worklist
.append(NewBlocks
.begin(), NewBlocks
.end());
1284 // We do need to do phi translation, if we know ahead of time we can't phi
1285 // translate this value, don't even try.
1286 if (!Pointer
.IsPotentiallyPHITranslatable())
1287 goto PredTranslationFailure
;
1289 // We may have added values to the cache list before this PHI translation.
1290 // If so, we haven't done anything to ensure that the cache remains sorted.
1291 // Sort it now (if needed) so that recursive invocations of
1292 // getNonLocalPointerDepFromBB and other routines that could reuse the cache
1293 // value will only see properly sorted cache arrays.
1294 if (Cache
&& NumSortedEntries
!= Cache
->size()) {
1295 SortNonLocalDepInfoCache(*Cache
, NumSortedEntries
);
1296 NumSortedEntries
= Cache
->size();
1301 for (BasicBlock
*Pred
: PredCache
.get(BB
)) {
1302 PredList
.push_back(std::make_pair(Pred
, Pointer
));
1304 // Get the PHI translated pointer in this predecessor. This can fail if
1305 // not translatable, in which case the getAddr() returns null.
1306 PHITransAddr
&PredPointer
= PredList
.back().second
;
1307 PredPointer
.PHITranslateValue(BB
, Pred
, &DT
, /*MustDominate=*/false);
1308 Value
*PredPtrVal
= PredPointer
.getAddr();
1310 // Check to see if we have already visited this pred block with another
1311 // pointer. If so, we can't do this lookup. This failure can occur
1312 // with PHI translation when a critical edge exists and the PHI node in
1313 // the successor translates to a pointer value different than the
1314 // pointer the block was first analyzed with.
1315 std::pair
<DenseMap
<BasicBlock
*, Value
*>::iterator
, bool> InsertRes
=
1316 Visited
.insert(std::make_pair(Pred
, PredPtrVal
));
1318 if (!InsertRes
.second
) {
1319 // We found the pred; take it off the list of preds to visit.
1320 PredList
.pop_back();
1322 // If the predecessor was visited with PredPtr, then we already did
1323 // the analysis and can ignore it.
1324 if (InsertRes
.first
->second
== PredPtrVal
)
1327 // Otherwise, the block was previously analyzed with a different
1328 // pointer. We can't represent the result of this case, so we just
1329 // treat this as a phi translation failure.
1331 // Make sure to clean up the Visited map before continuing on to
1332 // PredTranslationFailure.
1333 for (unsigned i
= 0, n
= PredList
.size(); i
< n
; ++i
)
1334 Visited
.erase(PredList
[i
].first
);
1336 goto PredTranslationFailure
;
1340 // Actually process results here; this need to be a separate loop to avoid
1341 // calling getNonLocalPointerDepFromBB for blocks we don't want to return
1342 // any results for. (getNonLocalPointerDepFromBB will modify our
1343 // datastructures in ways the code after the PredTranslationFailure label
1345 for (unsigned i
= 0, n
= PredList
.size(); i
< n
; ++i
) {
1346 BasicBlock
*Pred
= PredList
[i
].first
;
1347 PHITransAddr
&PredPointer
= PredList
[i
].second
;
1348 Value
*PredPtrVal
= PredPointer
.getAddr();
1350 bool CanTranslate
= true;
1351 // If PHI translation was unable to find an available pointer in this
1352 // predecessor, then we have to assume that the pointer is clobbered in
1353 // that predecessor. We can still do PRE of the load, which would insert
1354 // a computation of the pointer in this predecessor.
1356 CanTranslate
= false;
1358 // FIXME: it is entirely possible that PHI translating will end up with
1359 // the same value. Consider PHI translating something like:
1360 // X = phi [x, bb1], [y, bb2]. PHI translating for bb1 doesn't *need*
1361 // to recurse here, pedantically speaking.
1363 // If getNonLocalPointerDepFromBB fails here, that means the cached
1364 // result conflicted with the Visited list; we have to conservatively
1365 // assume it is unknown, but this also does not block PRE of the load.
1366 if (!CanTranslate
||
1367 !getNonLocalPointerDepFromBB(QueryInst
, PredPointer
,
1368 Loc
.getWithNewPtr(PredPtrVal
), isLoad
,
1369 Pred
, Result
, Visited
)) {
1370 // Add the entry to the Result list.
1371 NonLocalDepResult
Entry(Pred
, MemDepResult::getUnknown(), PredPtrVal
);
1372 Result
.push_back(Entry
);
1374 // Since we had a phi translation failure, the cache for CacheKey won't
1375 // include all of the entries that we need to immediately satisfy future
1376 // queries. Mark this in NonLocalPointerDeps by setting the
1377 // BBSkipFirstBlockPair pointer to null. This requires reuse of the
1378 // cached value to do more work but not miss the phi trans failure.
1379 NonLocalPointerInfo
&NLPI
= NonLocalPointerDeps
[CacheKey
];
1380 NLPI
.Pair
= BBSkipFirstBlockPair();
1385 // Refresh the CacheInfo/Cache pointer so that it isn't invalidated.
1386 CacheInfo
= &NonLocalPointerDeps
[CacheKey
];
1387 Cache
= &CacheInfo
->NonLocalDeps
;
1388 NumSortedEntries
= Cache
->size();
1390 // Since we did phi translation, the "Cache" set won't contain all of the
1391 // results for the query. This is ok (we can still use it to accelerate
1392 // specific block queries) but we can't do the fastpath "return all
1393 // results from the set" Clear out the indicator for this.
1394 CacheInfo
->Pair
= BBSkipFirstBlockPair();
1395 SkipFirstBlock
= false;
1398 PredTranslationFailure
:
1399 // The following code is "failure"; we can't produce a sane translation
1400 // for the given block. It assumes that we haven't modified any of
1401 // our datastructures while processing the current block.
1404 // Refresh the CacheInfo/Cache pointer if it got invalidated.
1405 CacheInfo
= &NonLocalPointerDeps
[CacheKey
];
1406 Cache
= &CacheInfo
->NonLocalDeps
;
1407 NumSortedEntries
= Cache
->size();
1410 // Since we failed phi translation, the "Cache" set won't contain all of the
1411 // results for the query. This is ok (we can still use it to accelerate
1412 // specific block queries) but we can't do the fastpath "return all
1413 // results from the set". Clear out the indicator for this.
1414 CacheInfo
->Pair
= BBSkipFirstBlockPair();
1416 // If *nothing* works, mark the pointer as unknown.
1418 // If this is the magic first block, return this as a clobber of the whole
1419 // incoming value. Since we can't phi translate to one of the predecessors,
1420 // we have to bail out.
1424 // Results of invariant loads are not cached thus no need to update cached
1426 if (!isInvariantLoad
) {
1427 for (NonLocalDepEntry
&I
: llvm::reverse(*Cache
)) {
1428 if (I
.getBB() != BB
)
1431 assert((GotWorklistLimit
|| I
.getResult().isNonLocal() ||
1432 !DT
.isReachableFromEntry(BB
)) &&
1433 "Should only be here with transparent block");
1435 I
.setResult(MemDepResult::getUnknown());
1441 (void)GotWorklistLimit
;
1442 // Go ahead and report unknown dependence.
1444 NonLocalDepResult(BB
, MemDepResult::getUnknown(), Pointer
.getAddr()));
1447 // Okay, we're done now. If we added new values to the cache, re-sort it.
1448 SortNonLocalDepInfoCache(*Cache
, NumSortedEntries
);
1449 LLVM_DEBUG(AssertSorted(*Cache
));
1453 /// If P exists in CachedNonLocalPointerInfo or NonLocalDefsCache, remove it.
1454 void MemoryDependenceResults::removeCachedNonLocalPointerDependencies(
1455 ValueIsLoadPair P
) {
1457 // Most of the time this cache is empty.
1458 if (!NonLocalDefsCache
.empty()) {
1459 auto it
= NonLocalDefsCache
.find(P
.getPointer());
1460 if (it
!= NonLocalDefsCache
.end()) {
1461 RemoveFromReverseMap(ReverseNonLocalDefsCache
,
1462 it
->second
.getResult().getInst(), P
.getPointer());
1463 NonLocalDefsCache
.erase(it
);
1466 if (auto *I
= dyn_cast
<Instruction
>(P
.getPointer())) {
1467 auto toRemoveIt
= ReverseNonLocalDefsCache
.find(I
);
1468 if (toRemoveIt
!= ReverseNonLocalDefsCache
.end()) {
1469 for (const auto *entry
: toRemoveIt
->second
)
1470 NonLocalDefsCache
.erase(entry
);
1471 ReverseNonLocalDefsCache
.erase(toRemoveIt
);
1476 CachedNonLocalPointerInfo::iterator It
= NonLocalPointerDeps
.find(P
);
1477 if (It
== NonLocalPointerDeps
.end())
1480 // Remove all of the entries in the BB->val map. This involves removing
1481 // instructions from the reverse map.
1482 NonLocalDepInfo
&PInfo
= It
->second
.NonLocalDeps
;
1484 for (unsigned i
= 0, e
= PInfo
.size(); i
!= e
; ++i
) {
1485 Instruction
*Target
= PInfo
[i
].getResult().getInst();
1487 continue; // Ignore non-local dep results.
1488 assert(Target
->getParent() == PInfo
[i
].getBB());
1490 // Eliminating the dirty entry from 'Cache', so update the reverse info.
1491 RemoveFromReverseMap(ReverseNonLocalPtrDeps
, Target
, P
);
1494 // Remove P from NonLocalPointerDeps (which deletes NonLocalDepInfo).
1495 NonLocalPointerDeps
.erase(It
);
1498 void MemoryDependenceResults::invalidateCachedPointerInfo(Value
*Ptr
) {
1499 // If Ptr isn't really a pointer, just ignore it.
1500 if (!Ptr
->getType()->isPointerTy())
1502 // Flush store info for the pointer.
1503 removeCachedNonLocalPointerDependencies(ValueIsLoadPair(Ptr
, false));
1504 // Flush load info for the pointer.
1505 removeCachedNonLocalPointerDependencies(ValueIsLoadPair(Ptr
, true));
1506 // Invalidate phis that use the pointer.
1507 PV
.invalidateValue(Ptr
);
1510 void MemoryDependenceResults::invalidateCachedPredecessors() {
1514 void MemoryDependenceResults::removeInstruction(Instruction
*RemInst
) {
1515 // Walk through the Non-local dependencies, removing this one as the value
1516 // for any cached queries.
1517 NonLocalDepMapType::iterator NLDI
= NonLocalDepsMap
.find(RemInst
);
1518 if (NLDI
!= NonLocalDepsMap
.end()) {
1519 NonLocalDepInfo
&BlockMap
= NLDI
->second
.first
;
1520 for (auto &Entry
: BlockMap
)
1521 if (Instruction
*Inst
= Entry
.getResult().getInst())
1522 RemoveFromReverseMap(ReverseNonLocalDeps
, Inst
, RemInst
);
1523 NonLocalDepsMap
.erase(NLDI
);
1526 // If we have a cached local dependence query for this instruction, remove it.
1527 LocalDepMapType::iterator LocalDepEntry
= LocalDeps
.find(RemInst
);
1528 if (LocalDepEntry
!= LocalDeps
.end()) {
1529 // Remove us from DepInst's reverse set now that the local dep info is gone.
1530 if (Instruction
*Inst
= LocalDepEntry
->second
.getInst())
1531 RemoveFromReverseMap(ReverseLocalDeps
, Inst
, RemInst
);
1533 // Remove this local dependency info.
1534 LocalDeps
.erase(LocalDepEntry
);
1537 // If we have any cached dependencies on this instruction, remove
1540 // If the instruction is a pointer, remove it from both the load info and the
1542 if (RemInst
->getType()->isPointerTy()) {
1543 removeCachedNonLocalPointerDependencies(ValueIsLoadPair(RemInst
, false));
1544 removeCachedNonLocalPointerDependencies(ValueIsLoadPair(RemInst
, true));
1546 // Otherwise, if the instructions is in the map directly, it must be a load.
1548 auto toRemoveIt
= NonLocalDefsCache
.find(RemInst
);
1549 if (toRemoveIt
!= NonLocalDefsCache
.end()) {
1550 assert(isa
<LoadInst
>(RemInst
) &&
1551 "only load instructions should be added directly");
1552 const Instruction
*DepV
= toRemoveIt
->second
.getResult().getInst();
1553 ReverseNonLocalDefsCache
.find(DepV
)->second
.erase(RemInst
);
1554 NonLocalDefsCache
.erase(toRemoveIt
);
1558 // Loop over all of the things that depend on the instruction we're removing.
1559 SmallVector
<std::pair
<Instruction
*, Instruction
*>, 8> ReverseDepsToAdd
;
1561 // If we find RemInst as a clobber or Def in any of the maps for other values,
1562 // we need to replace its entry with a dirty version of the instruction after
1563 // it. If RemInst is a terminator, we use a null dirty value.
1565 // Using a dirty version of the instruction after RemInst saves having to scan
1566 // the entire block to get to this point.
1567 MemDepResult NewDirtyVal
;
1568 if (!RemInst
->isTerminator())
1569 NewDirtyVal
= MemDepResult::getDirty(&*++RemInst
->getIterator());
1571 ReverseDepMapType::iterator ReverseDepIt
= ReverseLocalDeps
.find(RemInst
);
1572 if (ReverseDepIt
!= ReverseLocalDeps
.end()) {
1573 // RemInst can't be the terminator if it has local stuff depending on it.
1574 assert(!ReverseDepIt
->second
.empty() && !RemInst
->isTerminator() &&
1575 "Nothing can locally depend on a terminator");
1577 for (Instruction
*InstDependingOnRemInst
: ReverseDepIt
->second
) {
1578 assert(InstDependingOnRemInst
!= RemInst
&&
1579 "Already removed our local dep info");
1581 LocalDeps
[InstDependingOnRemInst
] = NewDirtyVal
;
1583 // Make sure to remember that new things depend on NewDepInst.
1584 assert(NewDirtyVal
.getInst() &&
1585 "There is no way something else can have "
1586 "a local dep on this if it is a terminator!");
1587 ReverseDepsToAdd
.push_back(
1588 std::make_pair(NewDirtyVal
.getInst(), InstDependingOnRemInst
));
1591 ReverseLocalDeps
.erase(ReverseDepIt
);
1593 // Add new reverse deps after scanning the set, to avoid invalidating the
1594 // 'ReverseDeps' reference.
1595 while (!ReverseDepsToAdd
.empty()) {
1596 ReverseLocalDeps
[ReverseDepsToAdd
.back().first
].insert(
1597 ReverseDepsToAdd
.back().second
);
1598 ReverseDepsToAdd
.pop_back();
1602 ReverseDepIt
= ReverseNonLocalDeps
.find(RemInst
);
1603 if (ReverseDepIt
!= ReverseNonLocalDeps
.end()) {
1604 for (Instruction
*I
: ReverseDepIt
->second
) {
1605 assert(I
!= RemInst
&& "Already removed NonLocalDep info for RemInst");
1607 PerInstNLInfo
&INLD
= NonLocalDepsMap
[I
];
1608 // The information is now dirty!
1611 for (auto &Entry
: INLD
.first
) {
1612 if (Entry
.getResult().getInst() != RemInst
)
1615 // Convert to a dirty entry for the subsequent instruction.
1616 Entry
.setResult(NewDirtyVal
);
1618 if (Instruction
*NextI
= NewDirtyVal
.getInst())
1619 ReverseDepsToAdd
.push_back(std::make_pair(NextI
, I
));
1623 ReverseNonLocalDeps
.erase(ReverseDepIt
);
1625 // Add new reverse deps after scanning the set, to avoid invalidating 'Set'
1626 while (!ReverseDepsToAdd
.empty()) {
1627 ReverseNonLocalDeps
[ReverseDepsToAdd
.back().first
].insert(
1628 ReverseDepsToAdd
.back().second
);
1629 ReverseDepsToAdd
.pop_back();
1633 // If the instruction is in ReverseNonLocalPtrDeps then it appears as a
1634 // value in the NonLocalPointerDeps info.
1635 ReverseNonLocalPtrDepTy::iterator ReversePtrDepIt
=
1636 ReverseNonLocalPtrDeps
.find(RemInst
);
1637 if (ReversePtrDepIt
!= ReverseNonLocalPtrDeps
.end()) {
1638 SmallVector
<std::pair
<Instruction
*, ValueIsLoadPair
>, 8>
1639 ReversePtrDepsToAdd
;
1641 for (ValueIsLoadPair P
: ReversePtrDepIt
->second
) {
1642 assert(P
.getPointer() != RemInst
&&
1643 "Already removed NonLocalPointerDeps info for RemInst");
1645 NonLocalDepInfo
&NLPDI
= NonLocalPointerDeps
[P
].NonLocalDeps
;
1647 // The cache is not valid for any specific block anymore.
1648 NonLocalPointerDeps
[P
].Pair
= BBSkipFirstBlockPair();
1650 // Update any entries for RemInst to use the instruction after it.
1651 for (auto &Entry
: NLPDI
) {
1652 if (Entry
.getResult().getInst() != RemInst
)
1655 // Convert to a dirty entry for the subsequent instruction.
1656 Entry
.setResult(NewDirtyVal
);
1658 if (Instruction
*NewDirtyInst
= NewDirtyVal
.getInst())
1659 ReversePtrDepsToAdd
.push_back(std::make_pair(NewDirtyInst
, P
));
1662 // Re-sort the NonLocalDepInfo. Changing the dirty entry to its
1663 // subsequent value may invalidate the sortedness.
1667 ReverseNonLocalPtrDeps
.erase(ReversePtrDepIt
);
1669 while (!ReversePtrDepsToAdd
.empty()) {
1670 ReverseNonLocalPtrDeps
[ReversePtrDepsToAdd
.back().first
].insert(
1671 ReversePtrDepsToAdd
.back().second
);
1672 ReversePtrDepsToAdd
.pop_back();
1676 // Invalidate phis that use the removed instruction.
1677 PV
.invalidateValue(RemInst
);
1679 assert(!NonLocalDepsMap
.count(RemInst
) && "RemInst got reinserted?");
1680 LLVM_DEBUG(verifyRemoved(RemInst
));
1683 /// Verify that the specified instruction does not occur in our internal data
1686 /// This function verifies by asserting in debug builds.
1687 void MemoryDependenceResults::verifyRemoved(Instruction
*D
) const {
1689 for (const auto &DepKV
: LocalDeps
) {
1690 assert(DepKV
.first
!= D
&& "Inst occurs in data structures");
1691 assert(DepKV
.second
.getInst() != D
&& "Inst occurs in data structures");
1694 for (const auto &DepKV
: NonLocalPointerDeps
) {
1695 assert(DepKV
.first
.getPointer() != D
&& "Inst occurs in NLPD map key");
1696 for (const auto &Entry
: DepKV
.second
.NonLocalDeps
)
1697 assert(Entry
.getResult().getInst() != D
&& "Inst occurs as NLPD value");
1700 for (const auto &DepKV
: NonLocalDepsMap
) {
1701 assert(DepKV
.first
!= D
&& "Inst occurs in data structures");
1702 const PerInstNLInfo
&INLD
= DepKV
.second
;
1703 for (const auto &Entry
: INLD
.first
)
1704 assert(Entry
.getResult().getInst() != D
&&
1705 "Inst occurs in data structures");
1708 for (const auto &DepKV
: ReverseLocalDeps
) {
1709 assert(DepKV
.first
!= D
&& "Inst occurs in data structures");
1710 for (Instruction
*Inst
: DepKV
.second
)
1711 assert(Inst
!= D
&& "Inst occurs in data structures");
1714 for (const auto &DepKV
: ReverseNonLocalDeps
) {
1715 assert(DepKV
.first
!= D
&& "Inst occurs in data structures");
1716 for (Instruction
*Inst
: DepKV
.second
)
1717 assert(Inst
!= D
&& "Inst occurs in data structures");
1720 for (const auto &DepKV
: ReverseNonLocalPtrDeps
) {
1721 assert(DepKV
.first
!= D
&& "Inst occurs in rev NLPD map");
1723 for (ValueIsLoadPair P
: DepKV
.second
)
1724 assert(P
!= ValueIsLoadPair(D
, false) && P
!= ValueIsLoadPair(D
, true) &&
1725 "Inst occurs in ReverseNonLocalPtrDeps map");
1730 AnalysisKey
MemoryDependenceAnalysis::Key
;
1732 MemoryDependenceAnalysis::MemoryDependenceAnalysis()
1733 : DefaultBlockScanLimit(BlockScanLimit
) {}
1735 MemoryDependenceResults
1736 MemoryDependenceAnalysis::run(Function
&F
, FunctionAnalysisManager
&AM
) {
1737 auto &AA
= AM
.getResult
<AAManager
>(F
);
1738 auto &AC
= AM
.getResult
<AssumptionAnalysis
>(F
);
1739 auto &TLI
= AM
.getResult
<TargetLibraryAnalysis
>(F
);
1740 auto &DT
= AM
.getResult
<DominatorTreeAnalysis
>(F
);
1741 auto &PV
= AM
.getResult
<PhiValuesAnalysis
>(F
);
1742 return MemoryDependenceResults(AA
, AC
, TLI
, DT
, PV
, DefaultBlockScanLimit
);
1745 char MemoryDependenceWrapperPass::ID
= 0;
1747 INITIALIZE_PASS_BEGIN(MemoryDependenceWrapperPass
, "memdep",
1748 "Memory Dependence Analysis", false, true)
1749 INITIALIZE_PASS_DEPENDENCY(AssumptionCacheTracker
)
1750 INITIALIZE_PASS_DEPENDENCY(AAResultsWrapperPass
)
1751 INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass
)
1752 INITIALIZE_PASS_DEPENDENCY(TargetLibraryInfoWrapperPass
)
1753 INITIALIZE_PASS_DEPENDENCY(PhiValuesWrapperPass
)
1754 INITIALIZE_PASS_END(MemoryDependenceWrapperPass
, "memdep",
1755 "Memory Dependence Analysis", false, true)
1757 MemoryDependenceWrapperPass::MemoryDependenceWrapperPass() : FunctionPass(ID
) {
1758 initializeMemoryDependenceWrapperPassPass(*PassRegistry::getPassRegistry());
1761 MemoryDependenceWrapperPass::~MemoryDependenceWrapperPass() = default;
1763 void MemoryDependenceWrapperPass::releaseMemory() {
1767 void MemoryDependenceWrapperPass::getAnalysisUsage(AnalysisUsage
&AU
) const {
1768 AU
.setPreservesAll();
1769 AU
.addRequired
<AssumptionCacheTracker
>();
1770 AU
.addRequired
<DominatorTreeWrapperPass
>();
1771 AU
.addRequired
<PhiValuesWrapperPass
>();
1772 AU
.addRequiredTransitive
<AAResultsWrapperPass
>();
1773 AU
.addRequiredTransitive
<TargetLibraryInfoWrapperPass
>();
1776 bool MemoryDependenceResults::invalidate(Function
&F
, const PreservedAnalyses
&PA
,
1777 FunctionAnalysisManager::Invalidator
&Inv
) {
1778 // Check whether our analysis is preserved.
1779 auto PAC
= PA
.getChecker
<MemoryDependenceAnalysis
>();
1780 if (!PAC
.preserved() && !PAC
.preservedSet
<AllAnalysesOn
<Function
>>())
1781 // If not, give up now.
1784 // Check whether the analyses we depend on became invalid for any reason.
1785 if (Inv
.invalidate
<AAManager
>(F
, PA
) ||
1786 Inv
.invalidate
<AssumptionAnalysis
>(F
, PA
) ||
1787 Inv
.invalidate
<DominatorTreeAnalysis
>(F
, PA
) ||
1788 Inv
.invalidate
<PhiValuesAnalysis
>(F
, PA
))
1791 // Otherwise this analysis result remains valid.
1795 unsigned MemoryDependenceResults::getDefaultBlockScanLimit() const {
1796 return DefaultBlockScanLimit
;
1799 bool MemoryDependenceWrapperPass::runOnFunction(Function
&F
) {
1800 auto &AA
= getAnalysis
<AAResultsWrapperPass
>().getAAResults();
1801 auto &AC
= getAnalysis
<AssumptionCacheTracker
>().getAssumptionCache(F
);
1802 auto &TLI
= getAnalysis
<TargetLibraryInfoWrapperPass
>().getTLI(F
);
1803 auto &DT
= getAnalysis
<DominatorTreeWrapperPass
>().getDomTree();
1804 auto &PV
= getAnalysis
<PhiValuesWrapperPass
>().getResult();
1805 MemDep
.emplace(AA
, AC
, TLI
, DT
, PV
, BlockScanLimit
);