1 //===- MemorySSA.cpp - Memory SSA Builder ---------------------------------===//
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7 //===----------------------------------------------------------------------===//
9 // This file implements the MemorySSA class.
11 //===----------------------------------------------------------------------===//
13 #include "llvm/Analysis/MemorySSA.h"
14 #include "llvm/ADT/DenseMap.h"
15 #include "llvm/ADT/DenseMapInfo.h"
16 #include "llvm/ADT/DenseSet.h"
17 #include "llvm/ADT/DepthFirstIterator.h"
18 #include "llvm/ADT/Hashing.h"
19 #include "llvm/ADT/STLExtras.h"
20 #include "llvm/ADT/SmallPtrSet.h"
21 #include "llvm/ADT/SmallVector.h"
22 #include "llvm/ADT/StringExtras.h"
23 #include "llvm/ADT/iterator.h"
24 #include "llvm/ADT/iterator_range.h"
25 #include "llvm/Analysis/AliasAnalysis.h"
26 #include "llvm/Analysis/CFGPrinter.h"
27 #include "llvm/Analysis/IteratedDominanceFrontier.h"
28 #include "llvm/Analysis/LoopInfo.h"
29 #include "llvm/Analysis/MemoryLocation.h"
30 #include "llvm/Config/llvm-config.h"
31 #include "llvm/IR/AssemblyAnnotationWriter.h"
32 #include "llvm/IR/BasicBlock.h"
33 #include "llvm/IR/Dominators.h"
34 #include "llvm/IR/Function.h"
35 #include "llvm/IR/Instruction.h"
36 #include "llvm/IR/Instructions.h"
37 #include "llvm/IR/IntrinsicInst.h"
38 #include "llvm/IR/LLVMContext.h"
39 #include "llvm/IR/Operator.h"
40 #include "llvm/IR/PassManager.h"
41 #include "llvm/IR/Use.h"
42 #include "llvm/InitializePasses.h"
43 #include "llvm/Pass.h"
44 #include "llvm/Support/AtomicOrdering.h"
45 #include "llvm/Support/Casting.h"
46 #include "llvm/Support/CommandLine.h"
47 #include "llvm/Support/Compiler.h"
48 #include "llvm/Support/Debug.h"
49 #include "llvm/Support/ErrorHandling.h"
50 #include "llvm/Support/FormattedStream.h"
51 #include "llvm/Support/GraphWriter.h"
52 #include "llvm/Support/raw_ostream.h"
61 #define DEBUG_TYPE "memoryssa"
63 static cl::opt
<std::string
>
64 DotCFGMSSA("dot-cfg-mssa",
65 cl::value_desc("file name for generated dot file"),
66 cl::desc("file name for generated dot file"), cl::init(""));
68 INITIALIZE_PASS_BEGIN(MemorySSAWrapperPass
, "memoryssa", "Memory SSA", false,
70 INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass
)
71 INITIALIZE_PASS_DEPENDENCY(AAResultsWrapperPass
)
72 INITIALIZE_PASS_END(MemorySSAWrapperPass
, "memoryssa", "Memory SSA", false,
75 static cl::opt
<unsigned> MaxCheckLimit(
76 "memssa-check-limit", cl::Hidden
, cl::init(100),
77 cl::desc("The maximum number of stores/phis MemorySSA"
78 "will consider trying to walk past (default = 100)"));
80 // Always verify MemorySSA if expensive checking is enabled.
81 #ifdef EXPENSIVE_CHECKS
82 bool llvm::VerifyMemorySSA
= true;
84 bool llvm::VerifyMemorySSA
= false;
87 static cl::opt
<bool, true>
88 VerifyMemorySSAX("verify-memoryssa", cl::location(VerifyMemorySSA
),
89 cl::Hidden
, cl::desc("Enable verification of MemorySSA."));
91 const static char LiveOnEntryStr
[] = "liveOnEntry";
95 /// An assembly annotator class to print Memory SSA information in
97 class MemorySSAAnnotatedWriter
: public AssemblyAnnotationWriter
{
98 const MemorySSA
*MSSA
;
101 MemorySSAAnnotatedWriter(const MemorySSA
*M
) : MSSA(M
) {}
103 void emitBasicBlockStartAnnot(const BasicBlock
*BB
,
104 formatted_raw_ostream
&OS
) override
{
105 if (MemoryAccess
*MA
= MSSA
->getMemoryAccess(BB
))
106 OS
<< "; " << *MA
<< "\n";
109 void emitInstructionAnnot(const Instruction
*I
,
110 formatted_raw_ostream
&OS
) override
{
111 if (MemoryAccess
*MA
= MSSA
->getMemoryAccess(I
))
112 OS
<< "; " << *MA
<< "\n";
116 /// An assembly annotator class to print Memory SSA information in
118 class MemorySSAWalkerAnnotatedWriter
: public AssemblyAnnotationWriter
{
120 MemorySSAWalker
*Walker
;
124 MemorySSAWalkerAnnotatedWriter(MemorySSA
*M
)
125 : MSSA(M
), Walker(M
->getWalker()), BAA(M
->getAA()) {}
127 void emitBasicBlockStartAnnot(const BasicBlock
*BB
,
128 formatted_raw_ostream
&OS
) override
{
129 if (MemoryAccess
*MA
= MSSA
->getMemoryAccess(BB
))
130 OS
<< "; " << *MA
<< "\n";
133 void emitInstructionAnnot(const Instruction
*I
,
134 formatted_raw_ostream
&OS
) override
{
135 if (MemoryAccess
*MA
= MSSA
->getMemoryAccess(I
)) {
136 MemoryAccess
*Clobber
= Walker
->getClobberingMemoryAccess(MA
, BAA
);
139 OS
<< " - clobbered by ";
140 if (MSSA
->isLiveOnEntryDef(Clobber
))
141 OS
<< LiveOnEntryStr
;
154 /// Our current alias analysis API differentiates heavily between calls and
155 /// non-calls, and functions called on one usually assert on the other.
156 /// This class encapsulates the distinction to simplify other code that wants
157 /// "Memory affecting instructions and related data" to use as a key.
158 /// For example, this class is used as a densemap key in the use optimizer.
159 class MemoryLocOrCall
{
163 MemoryLocOrCall(MemoryUseOrDef
*MUD
)
164 : MemoryLocOrCall(MUD
->getMemoryInst()) {}
165 MemoryLocOrCall(const MemoryUseOrDef
*MUD
)
166 : MemoryLocOrCall(MUD
->getMemoryInst()) {}
168 MemoryLocOrCall(Instruction
*Inst
) {
169 if (auto *C
= dyn_cast
<CallBase
>(Inst
)) {
174 // There is no such thing as a memorylocation for a fence inst, and it is
175 // unique in that regard.
176 if (!isa
<FenceInst
>(Inst
))
177 Loc
= MemoryLocation::get(Inst
);
181 explicit MemoryLocOrCall(const MemoryLocation
&Loc
) : Loc(Loc
) {}
183 const CallBase
*getCall() const {
188 MemoryLocation
getLoc() const {
193 bool operator==(const MemoryLocOrCall
&Other
) const {
194 if (IsCall
!= Other
.IsCall
)
198 return Loc
== Other
.Loc
;
200 if (Call
->getCalledOperand() != Other
.Call
->getCalledOperand())
203 return Call
->arg_size() == Other
.Call
->arg_size() &&
204 std::equal(Call
->arg_begin(), Call
->arg_end(),
205 Other
.Call
->arg_begin());
210 const CallBase
*Call
;
215 } // end anonymous namespace
219 template <> struct DenseMapInfo
<MemoryLocOrCall
> {
220 static inline MemoryLocOrCall
getEmptyKey() {
221 return MemoryLocOrCall(DenseMapInfo
<MemoryLocation
>::getEmptyKey());
224 static inline MemoryLocOrCall
getTombstoneKey() {
225 return MemoryLocOrCall(DenseMapInfo
<MemoryLocation
>::getTombstoneKey());
228 static unsigned getHashValue(const MemoryLocOrCall
&MLOC
) {
232 DenseMapInfo
<MemoryLocation
>::getHashValue(MLOC
.getLoc()));
235 hash_combine(MLOC
.IsCall
, DenseMapInfo
<const Value
*>::getHashValue(
236 MLOC
.getCall()->getCalledOperand()));
238 for (const Value
*Arg
: MLOC
.getCall()->args())
239 hash
= hash_combine(hash
, DenseMapInfo
<const Value
*>::getHashValue(Arg
));
243 static bool isEqual(const MemoryLocOrCall
&LHS
, const MemoryLocOrCall
&RHS
) {
248 } // end namespace llvm
250 /// This does one-way checks to see if Use could theoretically be hoisted above
251 /// MayClobber. This will not check the other way around.
253 /// This assumes that, for the purposes of MemorySSA, Use comes directly after
254 /// MayClobber, with no potentially clobbering operations in between them.
255 /// (Where potentially clobbering ops are memory barriers, aliased stores, etc.)
256 static bool areLoadsReorderable(const LoadInst
*Use
,
257 const LoadInst
*MayClobber
) {
258 bool VolatileUse
= Use
->isVolatile();
259 bool VolatileClobber
= MayClobber
->isVolatile();
260 // Volatile operations may never be reordered with other volatile operations.
261 if (VolatileUse
&& VolatileClobber
)
263 // Otherwise, volatile doesn't matter here. From the language reference:
264 // 'optimizers may change the order of volatile operations relative to
265 // non-volatile operations.'"
267 // If a load is seq_cst, it cannot be moved above other loads. If its ordering
268 // is weaker, it can be moved above other loads. We just need to be sure that
269 // MayClobber isn't an acquire load, because loads can't be moved above
272 // Note that this explicitly *does* allow the free reordering of monotonic (or
273 // weaker) loads of the same address.
274 bool SeqCstUse
= Use
->getOrdering() == AtomicOrdering::SequentiallyConsistent
;
275 bool MayClobberIsAcquire
= isAtLeastOrStrongerThan(MayClobber
->getOrdering(),
276 AtomicOrdering::Acquire
);
277 return !(SeqCstUse
|| MayClobberIsAcquire
);
280 template <typename AliasAnalysisType
>
282 instructionClobbersQuery(const MemoryDef
*MD
, const MemoryLocation
&UseLoc
,
283 const Instruction
*UseInst
, AliasAnalysisType
&AA
) {
284 Instruction
*DefInst
= MD
->getMemoryInst();
285 assert(DefInst
&& "Defining instruction not actually an instruction");
287 if (const IntrinsicInst
*II
= dyn_cast
<IntrinsicInst
>(DefInst
)) {
288 // These intrinsics will show up as affecting memory, but they are just
291 // FIXME: We probably don't actually want MemorySSA to model these at all
292 // (including creating MemoryAccesses for them): we just end up inventing
293 // clobbers where they don't really exist at all. Please see D43269 for
295 switch (II
->getIntrinsicID()) {
296 case Intrinsic::allow_runtime_check
:
297 case Intrinsic::allow_ubsan_check
:
298 case Intrinsic::invariant_start
:
299 case Intrinsic::invariant_end
:
300 case Intrinsic::assume
:
301 case Intrinsic::experimental_noalias_scope_decl
:
302 case Intrinsic::pseudoprobe
:
304 case Intrinsic::dbg_declare
:
305 case Intrinsic::dbg_label
:
306 case Intrinsic::dbg_value
:
307 llvm_unreachable("debuginfo shouldn't have associated defs!");
313 if (auto *CB
= dyn_cast_or_null
<CallBase
>(UseInst
)) {
314 ModRefInfo I
= AA
.getModRefInfo(DefInst
, CB
);
315 return isModOrRefSet(I
);
318 if (auto *DefLoad
= dyn_cast
<LoadInst
>(DefInst
))
319 if (auto *UseLoad
= dyn_cast_or_null
<LoadInst
>(UseInst
))
320 return !areLoadsReorderable(UseLoad
, DefLoad
);
322 ModRefInfo I
= AA
.getModRefInfo(DefInst
, UseLoc
);
326 template <typename AliasAnalysisType
>
327 static bool instructionClobbersQuery(MemoryDef
*MD
, const MemoryUseOrDef
*MU
,
328 const MemoryLocOrCall
&UseMLOC
,
329 AliasAnalysisType
&AA
) {
330 // FIXME: This is a temporary hack to allow a single instructionClobbersQuery
331 // to exist while MemoryLocOrCall is pushed through places.
333 return instructionClobbersQuery(MD
, MemoryLocation(), MU
->getMemoryInst(),
335 return instructionClobbersQuery(MD
, UseMLOC
.getLoc(), MU
->getMemoryInst(),
339 // Return true when MD may alias MU, return false otherwise.
340 bool MemorySSAUtil::defClobbersUseOrDef(MemoryDef
*MD
, const MemoryUseOrDef
*MU
,
342 return instructionClobbersQuery(MD
, MU
, MemoryLocOrCall(MU
), AA
);
347 struct UpwardsMemoryQuery
{
348 // True if our original query started off as a call
350 // The pointer location we started the query with. This will be empty if
352 MemoryLocation StartingLoc
;
353 // This is the instruction we were querying about.
354 const Instruction
*Inst
= nullptr;
355 // The MemoryAccess we actually got called with, used to test local domination
356 const MemoryAccess
*OriginalAccess
= nullptr;
357 bool SkipSelfAccess
= false;
359 UpwardsMemoryQuery() = default;
361 UpwardsMemoryQuery(const Instruction
*Inst
, const MemoryAccess
*Access
)
362 : IsCall(isa
<CallBase
>(Inst
)), Inst(Inst
), OriginalAccess(Access
) {
364 StartingLoc
= MemoryLocation::get(Inst
);
368 } // end anonymous namespace
370 template <typename AliasAnalysisType
>
371 static bool isUseTriviallyOptimizableToLiveOnEntry(AliasAnalysisType
&AA
,
372 const Instruction
*I
) {
373 // If the memory can't be changed, then loads of the memory can't be
375 if (auto *LI
= dyn_cast
<LoadInst
>(I
)) {
376 return I
->hasMetadata(LLVMContext::MD_invariant_load
) ||
377 !isModSet(AA
.getModRefInfoMask(MemoryLocation::get(LI
)));
382 /// Verifies that `Start` is clobbered by `ClobberAt`, and that nothing
383 /// inbetween `Start` and `ClobberAt` can clobbers `Start`.
385 /// This is meant to be as simple and self-contained as possible. Because it
386 /// uses no cache, etc., it can be relatively expensive.
388 /// \param Start The MemoryAccess that we want to walk from.
389 /// \param ClobberAt A clobber for Start.
390 /// \param StartLoc The MemoryLocation for Start.
391 /// \param MSSA The MemorySSA instance that Start and ClobberAt belong to.
392 /// \param Query The UpwardsMemoryQuery we used for our search.
393 /// \param AA The AliasAnalysis we used for our search.
394 /// \param AllowImpreciseClobber Always false, unless we do relaxed verify.
396 LLVM_ATTRIBUTE_UNUSED
static void
397 checkClobberSanity(const MemoryAccess
*Start
, MemoryAccess
*ClobberAt
,
398 const MemoryLocation
&StartLoc
, const MemorySSA
&MSSA
,
399 const UpwardsMemoryQuery
&Query
, BatchAAResults
&AA
,
400 bool AllowImpreciseClobber
= false) {
401 assert(MSSA
.dominates(ClobberAt
, Start
) && "Clobber doesn't dominate start?");
403 if (MSSA
.isLiveOnEntryDef(Start
)) {
404 assert(MSSA
.isLiveOnEntryDef(ClobberAt
) &&
405 "liveOnEntry must clobber itself");
409 bool FoundClobber
= false;
410 DenseSet
<ConstMemoryAccessPair
> VisitedPhis
;
411 SmallVector
<ConstMemoryAccessPair
, 8> Worklist
;
412 Worklist
.emplace_back(Start
, StartLoc
);
413 // Walk all paths from Start to ClobberAt, while looking for clobbers. If one
414 // is found, complain.
415 while (!Worklist
.empty()) {
416 auto MAP
= Worklist
.pop_back_val();
417 // All we care about is that nothing from Start to ClobberAt clobbers Start.
418 // We learn nothing from revisiting nodes.
419 if (!VisitedPhis
.insert(MAP
).second
)
422 for (const auto *MA
: def_chain(MAP
.first
)) {
423 if (MA
== ClobberAt
) {
424 if (const auto *MD
= dyn_cast
<MemoryDef
>(MA
)) {
425 // instructionClobbersQuery isn't essentially free, so don't use `|=`,
426 // since it won't let us short-circuit.
428 // Also, note that this can't be hoisted out of the `Worklist` loop,
429 // since MD may only act as a clobber for 1 of N MemoryLocations.
430 FoundClobber
= FoundClobber
|| MSSA
.isLiveOnEntryDef(MD
);
432 if (instructionClobbersQuery(MD
, MAP
.second
, Query
.Inst
, AA
))
439 // We should never hit liveOnEntry, unless it's the clobber.
440 assert(!MSSA
.isLiveOnEntryDef(MA
) && "Hit liveOnEntry before clobber?");
442 if (const auto *MD
= dyn_cast
<MemoryDef
>(MA
)) {
443 // If Start is a Def, skip self.
447 assert(!instructionClobbersQuery(MD
, MAP
.second
, Query
.Inst
, AA
) &&
448 "Found clobber before reaching ClobberAt!");
452 if (const auto *MU
= dyn_cast
<MemoryUse
>(MA
)) {
454 assert (MU
== Start
&&
455 "Can only find use in def chain if Start is a use");
459 assert(isa
<MemoryPhi
>(MA
));
461 // Add reachable phi predecessors
462 for (auto ItB
= upward_defs_begin(
463 {const_cast<MemoryAccess
*>(MA
), MAP
.second
},
465 ItE
= upward_defs_end();
467 if (MSSA
.getDomTree().isReachableFromEntry(ItB
.getPhiArgBlock()))
468 Worklist
.emplace_back(*ItB
);
472 // If the verify is done following an optimization, it's possible that
473 // ClobberAt was a conservative clobbering, that we can now infer is not a
474 // true clobbering access. Don't fail the verify if that's the case.
475 // We do have accesses that claim they're optimized, but could be optimized
476 // further. Updating all these can be expensive, so allow it for now (FIXME).
477 if (AllowImpreciseClobber
)
480 // If ClobberAt is a MemoryPhi, we can assume something above it acted as a
481 // clobber. Otherwise, `ClobberAt` should've acted as a clobber at some point.
482 assert((isa
<MemoryPhi
>(ClobberAt
) || FoundClobber
) &&
483 "ClobberAt never acted as a clobber");
488 /// Our algorithm for walking (and trying to optimize) clobbers, all wrapped up
490 class ClobberWalker
{
491 /// Save a few bytes by using unsigned instead of size_t.
492 using ListIndex
= unsigned;
494 /// Represents a span of contiguous MemoryDefs, potentially ending in a
498 // Note that, because we always walk in reverse, Last will always dominate
499 // First. Also note that First and Last are inclusive.
502 std::optional
<ListIndex
> Previous
;
504 DefPath(const MemoryLocation
&Loc
, MemoryAccess
*First
, MemoryAccess
*Last
,
505 std::optional
<ListIndex
> Previous
)
506 : Loc(Loc
), First(First
), Last(Last
), Previous(Previous
) {}
508 DefPath(const MemoryLocation
&Loc
, MemoryAccess
*Init
,
509 std::optional
<ListIndex
> Previous
)
510 : DefPath(Loc
, Init
, Init
, Previous
) {}
513 const MemorySSA
&MSSA
;
516 UpwardsMemoryQuery
*Query
;
517 unsigned *UpwardWalkLimit
;
519 // Phi optimization bookkeeping:
520 // List of DefPath to process during the current phi optimization walk.
521 SmallVector
<DefPath
, 32> Paths
;
522 // List of visited <Access, Location> pairs; we can skip paths already
523 // visited with the same memory location.
524 DenseSet
<ConstMemoryAccessPair
> VisitedPhis
;
526 /// Find the nearest def or phi that `From` can legally be optimized to.
527 const MemoryAccess
*getWalkTarget(const MemoryPhi
*From
) const {
528 assert(From
->getNumOperands() && "Phi with no operands?");
530 BasicBlock
*BB
= From
->getBlock();
531 MemoryAccess
*Result
= MSSA
.getLiveOnEntryDef();
532 DomTreeNode
*Node
= DT
.getNode(BB
);
533 while ((Node
= Node
->getIDom())) {
534 auto *Defs
= MSSA
.getBlockDefs(Node
->getBlock());
536 return &*Defs
->rbegin();
541 /// Result of calling walkToPhiOrClobber.
542 struct UpwardsWalkResult
{
543 /// The "Result" of the walk. Either a clobber, the last thing we walked, or
544 /// both. Include alias info when clobber found.
545 MemoryAccess
*Result
;
549 /// Walk to the next Phi or Clobber in the def chain starting at Desc.Last.
550 /// This will update Desc.Last as it walks. It will (optionally) also stop at
553 /// This does not test for whether StopAt is a clobber
555 walkToPhiOrClobber(DefPath
&Desc
, const MemoryAccess
*StopAt
= nullptr,
556 const MemoryAccess
*SkipStopAt
= nullptr) const {
557 assert(!isa
<MemoryUse
>(Desc
.Last
) && "Uses don't exist in my world");
558 assert(UpwardWalkLimit
&& "Need a valid walk limit");
559 bool LimitAlreadyReached
= false;
560 // (*UpwardWalkLimit) may be 0 here, due to the loop in tryOptimizePhi. Set
561 // it to 1. This will not do any alias() calls. It either returns in the
562 // first iteration in the loop below, or is set back to 0 if all def chains
563 // are free of MemoryDefs.
564 if (!*UpwardWalkLimit
) {
565 *UpwardWalkLimit
= 1;
566 LimitAlreadyReached
= true;
569 for (MemoryAccess
*Current
: def_chain(Desc
.Last
)) {
571 if (Current
== StopAt
|| Current
== SkipStopAt
)
572 return {Current
, false};
574 if (auto *MD
= dyn_cast
<MemoryDef
>(Current
)) {
575 if (MSSA
.isLiveOnEntryDef(MD
))
578 if (!--*UpwardWalkLimit
)
579 return {Current
, true};
581 if (instructionClobbersQuery(MD
, Desc
.Loc
, Query
->Inst
, *AA
))
586 if (LimitAlreadyReached
)
587 *UpwardWalkLimit
= 0;
589 assert(isa
<MemoryPhi
>(Desc
.Last
) &&
590 "Ended at a non-clobber that's not a phi?");
591 return {Desc
.Last
, false};
594 void addSearches(MemoryPhi
*Phi
, SmallVectorImpl
<ListIndex
> &PausedSearches
,
595 ListIndex PriorNode
) {
596 auto UpwardDefsBegin
= upward_defs_begin({Phi
, Paths
[PriorNode
].Loc
}, DT
);
597 auto UpwardDefs
= make_range(UpwardDefsBegin
, upward_defs_end());
598 for (const MemoryAccessPair
&P
: UpwardDefs
) {
599 PausedSearches
.push_back(Paths
.size());
600 Paths
.emplace_back(P
.second
, P
.first
, PriorNode
);
604 /// Represents a search that terminated after finding a clobber. This clobber
605 /// may or may not be present in the path of defs from LastNode..SearchStart,
606 /// since it may have been retrieved from cache.
607 struct TerminatedPath
{
608 MemoryAccess
*Clobber
;
612 /// Get an access that keeps us from optimizing to the given phi.
614 /// PausedSearches is an array of indices into the Paths array. Its incoming
615 /// value is the indices of searches that stopped at the last phi optimization
616 /// target. It's left in an unspecified state.
618 /// If this returns std::nullopt, NewPaused is a vector of searches that
619 /// terminated at StopWhere. Otherwise, NewPaused is left in an unspecified
621 std::optional
<TerminatedPath
>
622 getBlockingAccess(const MemoryAccess
*StopWhere
,
623 SmallVectorImpl
<ListIndex
> &PausedSearches
,
624 SmallVectorImpl
<ListIndex
> &NewPaused
,
625 SmallVectorImpl
<TerminatedPath
> &Terminated
) {
626 assert(!PausedSearches
.empty() && "No searches to continue?");
628 // BFS vs DFS really doesn't make a difference here, so just do a DFS with
629 // PausedSearches as our stack.
630 while (!PausedSearches
.empty()) {
631 ListIndex PathIndex
= PausedSearches
.pop_back_val();
632 DefPath
&Node
= Paths
[PathIndex
];
634 // If we've already visited this path with this MemoryLocation, we don't
635 // need to do so again.
637 // NOTE: That we just drop these paths on the ground makes caching
638 // behavior sporadic. e.g. given a diamond:
643 // ...If we walk D, B, A, C, we'll only cache the result of phi
644 // optimization for A, B, and D; C will be skipped because it dies here.
645 // This arguably isn't the worst thing ever, since:
646 // - We generally query things in a top-down order, so if we got below D
647 // without needing cache entries for {C, MemLoc}, then chances are
648 // that those cache entries would end up ultimately unused.
649 // - We still cache things for A, so C only needs to walk up a bit.
650 // If this behavior becomes problematic, we can fix without a ton of extra
652 if (!VisitedPhis
.insert({Node
.Last
, Node
.Loc
}).second
)
655 const MemoryAccess
*SkipStopWhere
= nullptr;
656 if (Query
->SkipSelfAccess
&& Node
.Loc
== Query
->StartingLoc
) {
657 assert(isa
<MemoryDef
>(Query
->OriginalAccess
));
658 SkipStopWhere
= Query
->OriginalAccess
;
661 UpwardsWalkResult Res
= walkToPhiOrClobber(Node
,
662 /*StopAt=*/StopWhere
,
663 /*SkipStopAt=*/SkipStopWhere
);
664 if (Res
.IsKnownClobber
) {
665 assert(Res
.Result
!= StopWhere
&& Res
.Result
!= SkipStopWhere
);
667 // If this wasn't a cache hit, we hit a clobber when walking. That's a
669 TerminatedPath Term
{Res
.Result
, PathIndex
};
670 if (!MSSA
.dominates(Res
.Result
, StopWhere
))
673 // Otherwise, it's a valid thing to potentially optimize to.
674 Terminated
.push_back(Term
);
678 if (Res
.Result
== StopWhere
|| Res
.Result
== SkipStopWhere
) {
679 // We've hit our target. Save this path off for if we want to continue
680 // walking. If we are in the mode of skipping the OriginalAccess, and
681 // we've reached back to the OriginalAccess, do not save path, we've
682 // just looped back to self.
683 if (Res
.Result
!= SkipStopWhere
)
684 NewPaused
.push_back(PathIndex
);
688 assert(!MSSA
.isLiveOnEntryDef(Res
.Result
) && "liveOnEntry is a clobber");
689 addSearches(cast
<MemoryPhi
>(Res
.Result
), PausedSearches
, PathIndex
);
695 template <typename T
, typename Walker
>
696 struct generic_def_path_iterator
697 : public iterator_facade_base
<generic_def_path_iterator
<T
, Walker
>,
698 std::forward_iterator_tag
, T
*> {
699 generic_def_path_iterator() = default;
700 generic_def_path_iterator(Walker
*W
, ListIndex N
) : W(W
), N(N
) {}
702 T
&operator*() const { return curNode(); }
704 generic_def_path_iterator
&operator++() {
705 N
= curNode().Previous
;
709 bool operator==(const generic_def_path_iterator
&O
) const {
710 if (N
.has_value() != O
.N
.has_value())
712 return !N
|| *N
== *O
.N
;
716 T
&curNode() const { return W
->Paths
[*N
]; }
719 std::optional
<ListIndex
> N
;
722 using def_path_iterator
= generic_def_path_iterator
<DefPath
, ClobberWalker
>;
723 using const_def_path_iterator
=
724 generic_def_path_iterator
<const DefPath
, const ClobberWalker
>;
726 iterator_range
<def_path_iterator
> def_path(ListIndex From
) {
727 return make_range(def_path_iterator(this, From
), def_path_iterator());
730 iterator_range
<const_def_path_iterator
> const_def_path(ListIndex From
) const {
731 return make_range(const_def_path_iterator(this, From
),
732 const_def_path_iterator());
736 /// The path that contains our result.
737 TerminatedPath PrimaryClobber
;
738 /// The paths that we can legally cache back from, but that aren't
739 /// necessarily the result of the Phi optimization.
740 SmallVector
<TerminatedPath
, 4> OtherClobbers
;
743 ListIndex
defPathIndex(const DefPath
&N
) const {
744 // The assert looks nicer if we don't need to do &N
745 const DefPath
*NP
= &N
;
746 assert(!Paths
.empty() && NP
>= &Paths
.front() && NP
<= &Paths
.back() &&
747 "Out of bounds DefPath!");
748 return NP
- &Paths
.front();
751 /// Try to optimize a phi as best as we can. Returns a SmallVector of Paths
752 /// that act as legal clobbers. Note that this won't return *all* clobbers.
754 /// Phi optimization algorithm tl;dr:
755 /// - Find the earliest def/phi, A, we can optimize to
756 /// - Find if all paths from the starting memory access ultimately reach A
757 /// - If not, optimization isn't possible.
758 /// - Otherwise, walk from A to another clobber or phi, A'.
759 /// - If A' is a def, we're done.
760 /// - If A' is a phi, try to optimize it.
762 /// A path is a series of {MemoryAccess, MemoryLocation} pairs. A path
763 /// terminates when a MemoryAccess that clobbers said MemoryLocation is found.
764 OptznResult
tryOptimizePhi(MemoryPhi
*Phi
, MemoryAccess
*Start
,
765 const MemoryLocation
&Loc
) {
766 assert(Paths
.empty() && VisitedPhis
.empty() &&
767 "Reset the optimization state.");
769 Paths
.emplace_back(Loc
, Start
, Phi
, std::nullopt
);
770 // Stores how many "valid" optimization nodes we had prior to calling
771 // addSearches/getBlockingAccess. Necessary for caching if we had a blocker.
772 auto PriorPathsSize
= Paths
.size();
774 SmallVector
<ListIndex
, 16> PausedSearches
;
775 SmallVector
<ListIndex
, 8> NewPaused
;
776 SmallVector
<TerminatedPath
, 4> TerminatedPaths
;
778 addSearches(Phi
, PausedSearches
, 0);
780 // Moves the TerminatedPath with the "most dominated" Clobber to the end of
782 auto MoveDominatedPathToEnd
= [&](SmallVectorImpl
<TerminatedPath
> &Paths
) {
783 assert(!Paths
.empty() && "Need a path to move");
784 auto Dom
= Paths
.begin();
785 for (auto I
= std::next(Dom
), E
= Paths
.end(); I
!= E
; ++I
)
786 if (!MSSA
.dominates(I
->Clobber
, Dom
->Clobber
))
788 auto Last
= Paths
.end() - 1;
790 std::iter_swap(Last
, Dom
);
793 MemoryPhi
*Current
= Phi
;
795 assert(!MSSA
.isLiveOnEntryDef(Current
) &&
796 "liveOnEntry wasn't treated as a clobber?");
798 const auto *Target
= getWalkTarget(Current
);
799 // If a TerminatedPath doesn't dominate Target, then it wasn't a legal
800 // optimization for the prior phi.
801 assert(all_of(TerminatedPaths
, [&](const TerminatedPath
&P
) {
802 return MSSA
.dominates(P
.Clobber
, Target
);
805 // FIXME: This is broken, because the Blocker may be reported to be
806 // liveOnEntry, and we'll happily wait for that to disappear (read: never)
807 // For the moment, this is fine, since we do nothing with blocker info.
808 if (std::optional
<TerminatedPath
> Blocker
= getBlockingAccess(
809 Target
, PausedSearches
, NewPaused
, TerminatedPaths
)) {
811 // Find the node we started at. We can't search based on N->Last, since
812 // we may have gone around a loop with a different MemoryLocation.
813 auto Iter
= find_if(def_path(Blocker
->LastNode
), [&](const DefPath
&N
) {
814 return defPathIndex(N
) < PriorPathsSize
;
816 assert(Iter
!= def_path_iterator());
818 DefPath
&CurNode
= *Iter
;
819 assert(CurNode
.Last
== Current
);
822 // A. We can't reliably cache all of NewPaused back. Consider a case
823 // where we have two paths in NewPaused; one of which can't optimize
824 // above this phi, whereas the other can. If we cache the second path
825 // back, we'll end up with suboptimal cache entries. We can handle
826 // cases like this a bit better when we either try to find all
827 // clobbers that block phi optimization, or when our cache starts
828 // supporting unfinished searches.
829 // B. We can't reliably cache TerminatedPaths back here without doing
830 // extra checks; consider a case like:
836 // Where T is our target, C is a node with a clobber on it, D is a
837 // diamond (with a clobber *only* on the left or right node, N), and
838 // S is our start. Say we walk to D, through the node opposite N
839 // (read: ignoring the clobber), and see a cache entry in the top
840 // node of D. That cache entry gets put into TerminatedPaths. We then
841 // walk up to C (N is later in our worklist), find the clobber, and
842 // quit. If we append TerminatedPaths to OtherClobbers, we'll cache
843 // the bottom part of D to the cached clobber, ignoring the clobber
844 // in N. Again, this problem goes away if we start tracking all
845 // blockers for a given phi optimization.
846 TerminatedPath Result
{CurNode
.Last
, defPathIndex(CurNode
)};
850 // If there's nothing left to search, then all paths led to valid clobbers
851 // that we got from our cache; pick the nearest to the start, and allow
852 // the rest to be cached back.
853 if (NewPaused
.empty()) {
854 MoveDominatedPathToEnd(TerminatedPaths
);
855 TerminatedPath Result
= TerminatedPaths
.pop_back_val();
856 return {Result
, std::move(TerminatedPaths
)};
859 MemoryAccess
*DefChainEnd
= nullptr;
860 SmallVector
<TerminatedPath
, 4> Clobbers
;
861 for (ListIndex Paused
: NewPaused
) {
862 UpwardsWalkResult WR
= walkToPhiOrClobber(Paths
[Paused
]);
863 if (WR
.IsKnownClobber
)
864 Clobbers
.push_back({WR
.Result
, Paused
});
866 // Micro-opt: If we hit the end of the chain, save it.
867 DefChainEnd
= WR
.Result
;
870 if (!TerminatedPaths
.empty()) {
871 // If we couldn't find the dominating phi/liveOnEntry in the above loop,
874 for (auto *MA
: def_chain(const_cast<MemoryAccess
*>(Target
)))
876 assert(DefChainEnd
&& "Failed to find dominating phi/liveOnEntry");
878 // If any of the terminated paths don't dominate the phi we'll try to
879 // optimize, we need to figure out what they are and quit.
880 const BasicBlock
*ChainBB
= DefChainEnd
->getBlock();
881 for (const TerminatedPath
&TP
: TerminatedPaths
) {
882 // Because we know that DefChainEnd is as "high" as we can go, we
883 // don't need local dominance checks; BB dominance is sufficient.
884 if (DT
.dominates(ChainBB
, TP
.Clobber
->getBlock()))
885 Clobbers
.push_back(TP
);
889 // If we have clobbers in the def chain, find the one closest to Current
891 if (!Clobbers
.empty()) {
892 MoveDominatedPathToEnd(Clobbers
);
893 TerminatedPath Result
= Clobbers
.pop_back_val();
894 return {Result
, std::move(Clobbers
)};
897 assert(all_of(NewPaused
,
898 [&](ListIndex I
) { return Paths
[I
].Last
== DefChainEnd
; }));
900 // Because liveOnEntry is a clobber, this must be a phi.
901 auto *DefChainPhi
= cast
<MemoryPhi
>(DefChainEnd
);
903 PriorPathsSize
= Paths
.size();
904 PausedSearches
.clear();
905 for (ListIndex I
: NewPaused
)
906 addSearches(DefChainPhi
, PausedSearches
, I
);
909 Current
= DefChainPhi
;
913 void verifyOptResult(const OptznResult
&R
) const {
914 assert(all_of(R
.OtherClobbers
, [&](const TerminatedPath
&P
) {
915 return MSSA
.dominates(P
.Clobber
, R
.PrimaryClobber
.Clobber
);
919 void resetPhiOptznState() {
925 ClobberWalker(const MemorySSA
&MSSA
, DominatorTree
&DT
)
926 : MSSA(MSSA
), DT(DT
) {}
928 /// Finds the nearest clobber for the given query, optimizing phis if
930 MemoryAccess
*findClobber(BatchAAResults
&BAA
, MemoryAccess
*Start
,
931 UpwardsMemoryQuery
&Q
, unsigned &UpWalkLimit
) {
934 UpwardWalkLimit
= &UpWalkLimit
;
935 // Starting limit must be > 0.
939 MemoryAccess
*Current
= Start
;
940 // This walker pretends uses don't exist. If we're handed one, silently grab
941 // its def. (This has the nice side-effect of ensuring we never cache uses)
942 if (auto *MU
= dyn_cast
<MemoryUse
>(Start
))
943 Current
= MU
->getDefiningAccess();
945 DefPath
FirstDesc(Q
.StartingLoc
, Current
, Current
, std::nullopt
);
946 // Fast path for the overly-common case (no crazy phi optimization
948 UpwardsWalkResult WalkResult
= walkToPhiOrClobber(FirstDesc
);
949 MemoryAccess
*Result
;
950 if (WalkResult
.IsKnownClobber
) {
951 Result
= WalkResult
.Result
;
953 OptznResult OptRes
= tryOptimizePhi(cast
<MemoryPhi
>(FirstDesc
.Last
),
954 Current
, Q
.StartingLoc
);
955 verifyOptResult(OptRes
);
956 resetPhiOptznState();
957 Result
= OptRes
.PrimaryClobber
.Clobber
;
960 #ifdef EXPENSIVE_CHECKS
961 if (!Q
.SkipSelfAccess
&& *UpwardWalkLimit
> 0)
962 checkClobberSanity(Current
, Result
, Q
.StartingLoc
, MSSA
, Q
, BAA
);
968 struct RenamePassData
{
970 DomTreeNode::const_iterator ChildIt
;
971 MemoryAccess
*IncomingVal
;
973 RenamePassData(DomTreeNode
*D
, DomTreeNode::const_iterator It
,
975 : DTN(D
), ChildIt(It
), IncomingVal(M
) {}
977 void swap(RenamePassData
&RHS
) {
978 std::swap(DTN
, RHS
.DTN
);
979 std::swap(ChildIt
, RHS
.ChildIt
);
980 std::swap(IncomingVal
, RHS
.IncomingVal
);
984 } // end anonymous namespace
988 class MemorySSA::ClobberWalkerBase
{
989 ClobberWalker Walker
;
993 ClobberWalkerBase(MemorySSA
*M
, DominatorTree
*D
) : Walker(*M
, *D
), MSSA(M
) {}
995 MemoryAccess
*getClobberingMemoryAccessBase(MemoryAccess
*,
996 const MemoryLocation
&,
997 BatchAAResults
&, unsigned &);
998 // Third argument (bool), defines whether the clobber search should skip the
999 // original queried access. If true, there will be a follow-up query searching
1000 // for a clobber access past "self". Note that the Optimized access is not
1001 // updated if a new clobber is found by this SkipSelf search. If this
1002 // additional query becomes heavily used we may decide to cache the result.
1003 // Walker instantiations will decide how to set the SkipSelf bool.
1004 MemoryAccess
*getClobberingMemoryAccessBase(MemoryAccess
*, BatchAAResults
&,
1006 bool UseInvariantGroup
= true);
1009 /// A MemorySSAWalker that does AA walks to disambiguate accesses. It no
1010 /// longer does caching on its own, but the name has been retained for the
1012 class MemorySSA::CachingWalker final
: public MemorySSAWalker
{
1013 ClobberWalkerBase
*Walker
;
1016 CachingWalker(MemorySSA
*M
, ClobberWalkerBase
*W
)
1017 : MemorySSAWalker(M
), Walker(W
) {}
1018 ~CachingWalker() override
= default;
1020 using MemorySSAWalker::getClobberingMemoryAccess
;
1022 MemoryAccess
*getClobberingMemoryAccess(MemoryAccess
*MA
, BatchAAResults
&BAA
,
1024 return Walker
->getClobberingMemoryAccessBase(MA
, BAA
, UWL
, false);
1026 MemoryAccess
*getClobberingMemoryAccess(MemoryAccess
*MA
,
1027 const MemoryLocation
&Loc
,
1028 BatchAAResults
&BAA
, unsigned &UWL
) {
1029 return Walker
->getClobberingMemoryAccessBase(MA
, Loc
, BAA
, UWL
);
1031 // This method is not accessible outside of this file.
1032 MemoryAccess
*getClobberingMemoryAccessWithoutInvariantGroup(
1033 MemoryAccess
*MA
, BatchAAResults
&BAA
, unsigned &UWL
) {
1034 return Walker
->getClobberingMemoryAccessBase(MA
, BAA
, UWL
, false, false);
1037 MemoryAccess
*getClobberingMemoryAccess(MemoryAccess
*MA
,
1038 BatchAAResults
&BAA
) override
{
1039 unsigned UpwardWalkLimit
= MaxCheckLimit
;
1040 return getClobberingMemoryAccess(MA
, BAA
, UpwardWalkLimit
);
1042 MemoryAccess
*getClobberingMemoryAccess(MemoryAccess
*MA
,
1043 const MemoryLocation
&Loc
,
1044 BatchAAResults
&BAA
) override
{
1045 unsigned UpwardWalkLimit
= MaxCheckLimit
;
1046 return getClobberingMemoryAccess(MA
, Loc
, BAA
, UpwardWalkLimit
);
1049 void invalidateInfo(MemoryAccess
*MA
) override
{
1050 if (auto *MUD
= dyn_cast
<MemoryUseOrDef
>(MA
))
1051 MUD
->resetOptimized();
1055 class MemorySSA::SkipSelfWalker final
: public MemorySSAWalker
{
1056 ClobberWalkerBase
*Walker
;
1059 SkipSelfWalker(MemorySSA
*M
, ClobberWalkerBase
*W
)
1060 : MemorySSAWalker(M
), Walker(W
) {}
1061 ~SkipSelfWalker() override
= default;
1063 using MemorySSAWalker::getClobberingMemoryAccess
;
1065 MemoryAccess
*getClobberingMemoryAccess(MemoryAccess
*MA
, BatchAAResults
&BAA
,
1067 return Walker
->getClobberingMemoryAccessBase(MA
, BAA
, UWL
, true);
1069 MemoryAccess
*getClobberingMemoryAccess(MemoryAccess
*MA
,
1070 const MemoryLocation
&Loc
,
1071 BatchAAResults
&BAA
, unsigned &UWL
) {
1072 return Walker
->getClobberingMemoryAccessBase(MA
, Loc
, BAA
, UWL
);
1075 MemoryAccess
*getClobberingMemoryAccess(MemoryAccess
*MA
,
1076 BatchAAResults
&BAA
) override
{
1077 unsigned UpwardWalkLimit
= MaxCheckLimit
;
1078 return getClobberingMemoryAccess(MA
, BAA
, UpwardWalkLimit
);
1080 MemoryAccess
*getClobberingMemoryAccess(MemoryAccess
*MA
,
1081 const MemoryLocation
&Loc
,
1082 BatchAAResults
&BAA
) override
{
1083 unsigned UpwardWalkLimit
= MaxCheckLimit
;
1084 return getClobberingMemoryAccess(MA
, Loc
, BAA
, UpwardWalkLimit
);
1087 void invalidateInfo(MemoryAccess
*MA
) override
{
1088 if (auto *MUD
= dyn_cast
<MemoryUseOrDef
>(MA
))
1089 MUD
->resetOptimized();
1093 } // end namespace llvm
1095 void MemorySSA::renameSuccessorPhis(BasicBlock
*BB
, MemoryAccess
*IncomingVal
,
1096 bool RenameAllUses
) {
1097 // Pass through values to our successors
1098 for (const BasicBlock
*S
: successors(BB
)) {
1099 auto It
= PerBlockAccesses
.find(S
);
1100 // Rename the phi nodes in our successor block
1101 if (It
== PerBlockAccesses
.end() || !isa
<MemoryPhi
>(It
->second
->front()))
1103 AccessList
*Accesses
= It
->second
.get();
1104 auto *Phi
= cast
<MemoryPhi
>(&Accesses
->front());
1105 if (RenameAllUses
) {
1106 bool ReplacementDone
= false;
1107 for (unsigned I
= 0, E
= Phi
->getNumIncomingValues(); I
!= E
; ++I
)
1108 if (Phi
->getIncomingBlock(I
) == BB
) {
1109 Phi
->setIncomingValue(I
, IncomingVal
);
1110 ReplacementDone
= true;
1112 (void) ReplacementDone
;
1113 assert(ReplacementDone
&& "Incomplete phi during partial rename");
1115 Phi
->addIncoming(IncomingVal
, BB
);
1119 /// Rename a single basic block into MemorySSA form.
1120 /// Uses the standard SSA renaming algorithm.
1121 /// \returns The new incoming value.
1122 MemoryAccess
*MemorySSA::renameBlock(BasicBlock
*BB
, MemoryAccess
*IncomingVal
,
1123 bool RenameAllUses
) {
1124 auto It
= PerBlockAccesses
.find(BB
);
1125 // Skip most processing if the list is empty.
1126 if (It
!= PerBlockAccesses
.end()) {
1127 AccessList
*Accesses
= It
->second
.get();
1128 for (MemoryAccess
&L
: *Accesses
) {
1129 if (MemoryUseOrDef
*MUD
= dyn_cast
<MemoryUseOrDef
>(&L
)) {
1130 if (MUD
->getDefiningAccess() == nullptr || RenameAllUses
)
1131 MUD
->setDefiningAccess(IncomingVal
);
1132 if (isa
<MemoryDef
>(&L
))
1142 /// This is the standard SSA renaming algorithm.
1144 /// We walk the dominator tree in preorder, renaming accesses, and then filling
1145 /// in phi nodes in our successors.
1146 void MemorySSA::renamePass(DomTreeNode
*Root
, MemoryAccess
*IncomingVal
,
1147 SmallPtrSetImpl
<BasicBlock
*> &Visited
,
1148 bool SkipVisited
, bool RenameAllUses
) {
1149 assert(Root
&& "Trying to rename accesses in an unreachable block");
1151 SmallVector
<RenamePassData
, 32> WorkStack
;
1152 // Skip everything if we already renamed this block and we are skipping.
1153 // Note: You can't sink this into the if, because we need it to occur
1154 // regardless of whether we skip blocks or not.
1155 bool AlreadyVisited
= !Visited
.insert(Root
->getBlock()).second
;
1156 if (SkipVisited
&& AlreadyVisited
)
1159 IncomingVal
= renameBlock(Root
->getBlock(), IncomingVal
, RenameAllUses
);
1160 renameSuccessorPhis(Root
->getBlock(), IncomingVal
, RenameAllUses
);
1161 WorkStack
.push_back({Root
, Root
->begin(), IncomingVal
});
1163 while (!WorkStack
.empty()) {
1164 DomTreeNode
*Node
= WorkStack
.back().DTN
;
1165 DomTreeNode::const_iterator ChildIt
= WorkStack
.back().ChildIt
;
1166 IncomingVal
= WorkStack
.back().IncomingVal
;
1168 if (ChildIt
== Node
->end()) {
1169 WorkStack
.pop_back();
1171 DomTreeNode
*Child
= *ChildIt
;
1172 ++WorkStack
.back().ChildIt
;
1173 BasicBlock
*BB
= Child
->getBlock();
1174 // Note: You can't sink this into the if, because we need it to occur
1175 // regardless of whether we skip blocks or not.
1176 AlreadyVisited
= !Visited
.insert(BB
).second
;
1177 if (SkipVisited
&& AlreadyVisited
) {
1178 // We already visited this during our renaming, which can happen when
1179 // being asked to rename multiple blocks. Figure out the incoming val,
1180 // which is the last def.
1181 // Incoming value can only change if there is a block def, and in that
1182 // case, it's the last block def in the list.
1183 if (auto *BlockDefs
= getWritableBlockDefs(BB
))
1184 IncomingVal
= &*BlockDefs
->rbegin();
1186 IncomingVal
= renameBlock(BB
, IncomingVal
, RenameAllUses
);
1187 renameSuccessorPhis(BB
, IncomingVal
, RenameAllUses
);
1188 WorkStack
.push_back({Child
, Child
->begin(), IncomingVal
});
1193 /// This handles unreachable block accesses by deleting phi nodes in
1194 /// unreachable blocks, and marking all other unreachable MemoryAccess's as
1195 /// being uses of the live on entry definition.
1196 void MemorySSA::markUnreachableAsLiveOnEntry(BasicBlock
*BB
) {
1197 assert(!DT
->isReachableFromEntry(BB
) &&
1198 "Reachable block found while handling unreachable blocks");
1200 // Make sure phi nodes in our reachable successors end up with a
1201 // LiveOnEntryDef for our incoming edge, even though our block is forward
1202 // unreachable. We could just disconnect these blocks from the CFG fully,
1203 // but we do not right now.
1204 for (const BasicBlock
*S
: successors(BB
)) {
1205 if (!DT
->isReachableFromEntry(S
))
1207 auto It
= PerBlockAccesses
.find(S
);
1208 // Rename the phi nodes in our successor block
1209 if (It
== PerBlockAccesses
.end() || !isa
<MemoryPhi
>(It
->second
->front()))
1211 AccessList
*Accesses
= It
->second
.get();
1212 auto *Phi
= cast
<MemoryPhi
>(&Accesses
->front());
1213 Phi
->addIncoming(LiveOnEntryDef
.get(), BB
);
1216 auto It
= PerBlockAccesses
.find(BB
);
1217 if (It
== PerBlockAccesses
.end())
1220 auto &Accesses
= It
->second
;
1221 for (auto AI
= Accesses
->begin(), AE
= Accesses
->end(); AI
!= AE
;) {
1222 auto Next
= std::next(AI
);
1223 // If we have a phi, just remove it. We are going to replace all
1224 // users with live on entry.
1225 if (auto *UseOrDef
= dyn_cast
<MemoryUseOrDef
>(AI
))
1226 UseOrDef
->setDefiningAccess(LiveOnEntryDef
.get());
1228 Accesses
->erase(AI
);
1233 MemorySSA::MemorySSA(Function
&Func
, AliasAnalysis
*AA
, DominatorTree
*DT
)
1234 : DT(DT
), F(&Func
), LiveOnEntryDef(nullptr), Walker(nullptr),
1235 SkipWalker(nullptr) {
1236 // Build MemorySSA using a batch alias analysis. This reuses the internal
1237 // state that AA collects during an alias()/getModRefInfo() call. This is
1238 // safe because there are no CFG changes while building MemorySSA and can
1239 // significantly reduce the time spent by the compiler in AA, because we will
1240 // make queries about all the instructions in the Function.
1241 assert(AA
&& "No alias analysis?");
1242 BatchAAResults
BatchAA(*AA
);
1243 buildMemorySSA(BatchAA
, iterator_range(F
->begin(), F
->end()));
1244 // Intentionally leave AA to nullptr while building so we don't accidentally
1245 // use non-batch AliasAnalysis.
1247 // Also create the walker here.
1251 MemorySSA::MemorySSA(Loop
&L
, AliasAnalysis
*AA
, DominatorTree
*DT
)
1252 : DT(DT
), L(&L
), LiveOnEntryDef(nullptr), Walker(nullptr),
1253 SkipWalker(nullptr) {
1254 // Build MemorySSA using a batch alias analysis. This reuses the internal
1255 // state that AA collects during an alias()/getModRefInfo() call. This is
1256 // safe because there are no CFG changes while building MemorySSA and can
1257 // significantly reduce the time spent by the compiler in AA, because we will
1258 // make queries about all the instructions in the Function.
1259 assert(AA
&& "No alias analysis?");
1260 BatchAAResults
BatchAA(*AA
);
1262 BatchAA
, map_range(L
.blocks(), [](const BasicBlock
*BB
) -> BasicBlock
& {
1263 return *const_cast<BasicBlock
*>(BB
);
1265 // Intentionally leave AA to nullptr while building so we don't accidentally
1266 // use non-batch AliasAnalysis.
1268 // Also create the walker here.
1272 MemorySSA::~MemorySSA() {
1273 // Drop all our references
1274 for (const auto &Pair
: PerBlockAccesses
)
1275 for (MemoryAccess
&MA
: *Pair
.second
)
1276 MA
.dropAllReferences();
1279 MemorySSA::AccessList
*MemorySSA::getOrCreateAccessList(const BasicBlock
*BB
) {
1280 auto Res
= PerBlockAccesses
.insert(std::make_pair(BB
, nullptr));
1283 Res
.first
->second
= std::make_unique
<AccessList
>();
1284 return Res
.first
->second
.get();
1287 MemorySSA::DefsList
*MemorySSA::getOrCreateDefsList(const BasicBlock
*BB
) {
1288 auto Res
= PerBlockDefs
.insert(std::make_pair(BB
, nullptr));
1291 Res
.first
->second
= std::make_unique
<DefsList
>();
1292 return Res
.first
->second
.get();
1297 /// This class is a batch walker of all MemoryUse's in the program, and points
1298 /// their defining access at the thing that actually clobbers them. Because it
1299 /// is a batch walker that touches everything, it does not operate like the
1300 /// other walkers. This walker is basically performing a top-down SSA renaming
1301 /// pass, where the version stack is used as the cache. This enables it to be
1302 /// significantly more time and memory efficient than using the regular walker,
1303 /// which is walking bottom-up.
1304 class MemorySSA::OptimizeUses
{
1306 OptimizeUses(MemorySSA
*MSSA
, CachingWalker
*Walker
, BatchAAResults
*BAA
,
1308 : MSSA(MSSA
), Walker(Walker
), AA(BAA
), DT(DT
) {}
1310 void optimizeUses();
1313 /// This represents where a given memorylocation is in the stack.
1314 struct MemlocStackInfo
{
1315 // This essentially is keeping track of versions of the stack. Whenever
1316 // the stack changes due to pushes or pops, these versions increase.
1317 unsigned long StackEpoch
;
1318 unsigned long PopEpoch
;
1319 // This is the lower bound of places on the stack to check. It is equal to
1320 // the place the last stack walk ended.
1321 // Note: Correctness depends on this being initialized to 0, which densemap
1323 unsigned long LowerBound
;
1324 const BasicBlock
*LowerBoundBlock
;
1325 // This is where the last walk for this memory location ended.
1326 unsigned long LastKill
;
1330 void optimizeUsesInBlock(const BasicBlock
*, unsigned long &, unsigned long &,
1331 SmallVectorImpl
<MemoryAccess
*> &,
1332 DenseMap
<MemoryLocOrCall
, MemlocStackInfo
> &);
1335 CachingWalker
*Walker
;
1340 } // end namespace llvm
1342 /// Optimize the uses in a given block This is basically the SSA renaming
1343 /// algorithm, with one caveat: We are able to use a single stack for all
1344 /// MemoryUses. This is because the set of *possible* reaching MemoryDefs is
1345 /// the same for every MemoryUse. The *actual* clobbering MemoryDef is just
1346 /// going to be some position in that stack of possible ones.
1348 /// We track the stack positions that each MemoryLocation needs
1349 /// to check, and last ended at. This is because we only want to check the
1350 /// things that changed since last time. The same MemoryLocation should
1351 /// get clobbered by the same store (getModRefInfo does not use invariantness or
1352 /// things like this, and if they start, we can modify MemoryLocOrCall to
1353 /// include relevant data)
1354 void MemorySSA::OptimizeUses::optimizeUsesInBlock(
1355 const BasicBlock
*BB
, unsigned long &StackEpoch
, unsigned long &PopEpoch
,
1356 SmallVectorImpl
<MemoryAccess
*> &VersionStack
,
1357 DenseMap
<MemoryLocOrCall
, MemlocStackInfo
> &LocStackInfo
) {
1359 /// If no accesses, nothing to do.
1360 MemorySSA::AccessList
*Accesses
= MSSA
->getWritableBlockAccesses(BB
);
1361 if (Accesses
== nullptr)
1364 // Pop everything that doesn't dominate the current block off the stack,
1365 // increment the PopEpoch to account for this.
1368 !VersionStack
.empty() &&
1369 "Version stack should have liveOnEntry sentinel dominating everything");
1370 BasicBlock
*BackBlock
= VersionStack
.back()->getBlock();
1371 if (DT
->dominates(BackBlock
, BB
))
1373 while (VersionStack
.back()->getBlock() == BackBlock
)
1374 VersionStack
.pop_back();
1378 for (MemoryAccess
&MA
: *Accesses
) {
1379 auto *MU
= dyn_cast
<MemoryUse
>(&MA
);
1381 VersionStack
.push_back(&MA
);
1386 if (MU
->isOptimized())
1389 MemoryLocOrCall
UseMLOC(MU
);
1390 auto &LocInfo
= LocStackInfo
[UseMLOC
];
1391 // If the pop epoch changed, it means we've removed stuff from top of
1392 // stack due to changing blocks. We may have to reset the lower bound or
1394 if (LocInfo
.PopEpoch
!= PopEpoch
) {
1395 LocInfo
.PopEpoch
= PopEpoch
;
1396 LocInfo
.StackEpoch
= StackEpoch
;
1397 // If the lower bound was in something that no longer dominates us, we
1398 // have to reset it.
1399 // We can't simply track stack size, because the stack may have had
1400 // pushes/pops in the meantime.
1401 // XXX: This is non-optimal, but only is slower cases with heavily
1402 // branching dominator trees. To get the optimal number of queries would
1403 // be to make lowerbound and lastkill a per-loc stack, and pop it until
1404 // the top of that stack dominates us. This does not seem worth it ATM.
1405 // A much cheaper optimization would be to always explore the deepest
1406 // branch of the dominator tree first. This will guarantee this resets on
1407 // the smallest set of blocks.
1408 if (LocInfo
.LowerBoundBlock
&& LocInfo
.LowerBoundBlock
!= BB
&&
1409 !DT
->dominates(LocInfo
.LowerBoundBlock
, BB
)) {
1410 // Reset the lower bound of things to check.
1411 // TODO: Some day we should be able to reset to last kill, rather than
1413 LocInfo
.LowerBound
= 0;
1414 LocInfo
.LowerBoundBlock
= VersionStack
[0]->getBlock();
1415 LocInfo
.LastKillValid
= false;
1417 } else if (LocInfo
.StackEpoch
!= StackEpoch
) {
1418 // If all that has changed is the StackEpoch, we only have to check the
1419 // new things on the stack, because we've checked everything before. In
1420 // this case, the lower bound of things to check remains the same.
1421 LocInfo
.PopEpoch
= PopEpoch
;
1422 LocInfo
.StackEpoch
= StackEpoch
;
1424 if (!LocInfo
.LastKillValid
) {
1425 LocInfo
.LastKill
= VersionStack
.size() - 1;
1426 LocInfo
.LastKillValid
= true;
1429 // At this point, we should have corrected last kill and LowerBound to be
1431 assert(LocInfo
.LowerBound
< VersionStack
.size() &&
1432 "Lower bound out of range");
1433 assert(LocInfo
.LastKill
< VersionStack
.size() &&
1434 "Last kill info out of range");
1435 // In any case, the new upper bound is the top of the stack.
1436 unsigned long UpperBound
= VersionStack
.size() - 1;
1438 if (UpperBound
- LocInfo
.LowerBound
> MaxCheckLimit
) {
1439 LLVM_DEBUG(dbgs() << "MemorySSA skipping optimization of " << *MU
<< " ("
1440 << *(MU
->getMemoryInst()) << ")"
1441 << " because there are "
1442 << UpperBound
- LocInfo
.LowerBound
1443 << " stores to disambiguate\n");
1444 // Because we did not walk, LastKill is no longer valid, as this may
1445 // have been a kill.
1446 LocInfo
.LastKillValid
= false;
1449 bool FoundClobberResult
= false;
1450 unsigned UpwardWalkLimit
= MaxCheckLimit
;
1451 while (UpperBound
> LocInfo
.LowerBound
) {
1452 if (isa
<MemoryPhi
>(VersionStack
[UpperBound
])) {
1453 // For phis, use the walker, see where we ended up, go there.
1454 // The invariant.group handling in MemorySSA is ad-hoc and doesn't
1455 // support updates, so don't use it to optimize uses.
1456 MemoryAccess
*Result
=
1457 Walker
->getClobberingMemoryAccessWithoutInvariantGroup(
1458 MU
, *AA
, UpwardWalkLimit
);
1459 // We are guaranteed to find it or something is wrong.
1460 while (VersionStack
[UpperBound
] != Result
) {
1461 assert(UpperBound
!= 0);
1464 FoundClobberResult
= true;
1468 MemoryDef
*MD
= cast
<MemoryDef
>(VersionStack
[UpperBound
]);
1469 if (instructionClobbersQuery(MD
, MU
, UseMLOC
, *AA
)) {
1470 FoundClobberResult
= true;
1476 // At the end of this loop, UpperBound is either a clobber, or lower bound
1477 // PHI walking may cause it to be < LowerBound, and in fact, < LastKill.
1478 if (FoundClobberResult
|| UpperBound
< LocInfo
.LastKill
) {
1479 MU
->setDefiningAccess(VersionStack
[UpperBound
], true);
1480 LocInfo
.LastKill
= UpperBound
;
1482 // Otherwise, we checked all the new ones, and now we know we can get to
1484 MU
->setDefiningAccess(VersionStack
[LocInfo
.LastKill
], true);
1486 LocInfo
.LowerBound
= VersionStack
.size() - 1;
1487 LocInfo
.LowerBoundBlock
= BB
;
1491 /// Optimize uses to point to their actual clobbering definitions.
1492 void MemorySSA::OptimizeUses::optimizeUses() {
1493 SmallVector
<MemoryAccess
*, 16> VersionStack
;
1494 DenseMap
<MemoryLocOrCall
, MemlocStackInfo
> LocStackInfo
;
1495 VersionStack
.push_back(MSSA
->getLiveOnEntryDef());
1497 unsigned long StackEpoch
= 1;
1498 unsigned long PopEpoch
= 1;
1499 // We perform a non-recursive top-down dominator tree walk.
1500 for (const auto *DomNode
: depth_first(DT
->getRootNode()))
1501 optimizeUsesInBlock(DomNode
->getBlock(), StackEpoch
, PopEpoch
, VersionStack
,
1505 void MemorySSA::placePHINodes(
1506 const SmallPtrSetImpl
<BasicBlock
*> &DefiningBlocks
) {
1507 // Determine where our MemoryPhi's should go
1508 ForwardIDFCalculator
IDFs(*DT
);
1509 IDFs
.setDefiningBlocks(DefiningBlocks
);
1510 SmallVector
<BasicBlock
*, 32> IDFBlocks
;
1511 IDFs
.calculate(IDFBlocks
);
1513 // Now place MemoryPhi nodes.
1514 for (auto &BB
: IDFBlocks
)
1515 createMemoryPhi(BB
);
1518 template <typename IterT
>
1519 void MemorySSA::buildMemorySSA(BatchAAResults
&BAA
, IterT Blocks
) {
1520 // We create an access to represent "live on entry", for things like
1521 // arguments or users of globals, where the memory they use is defined before
1522 // the beginning of the function. We do not actually insert it into the IR.
1523 // We do not define a live on exit for the immediate uses, and thus our
1524 // semantics do *not* imply that something with no immediate uses can simply
1526 BasicBlock
&StartingPoint
= *Blocks
.begin();
1527 LiveOnEntryDef
.reset(new MemoryDef(StartingPoint
.getContext(), nullptr,
1528 nullptr, &StartingPoint
, NextID
++));
1530 // We maintain lists of memory accesses per-block, trading memory for time. We
1531 // could just look up the memory access for every possible instruction in the
1533 SmallPtrSet
<BasicBlock
*, 32> DefiningBlocks
;
1534 // Go through each block, figure out where defs occur, and chain together all
1536 for (BasicBlock
&B
: Blocks
) {
1537 bool InsertIntoDef
= false;
1538 AccessList
*Accesses
= nullptr;
1539 DefsList
*Defs
= nullptr;
1540 for (Instruction
&I
: B
) {
1541 MemoryUseOrDef
*MUD
= createNewAccess(&I
, &BAA
);
1546 Accesses
= getOrCreateAccessList(&B
);
1547 Accesses
->push_back(MUD
);
1548 if (isa
<MemoryDef
>(MUD
)) {
1549 InsertIntoDef
= true;
1551 Defs
= getOrCreateDefsList(&B
);
1552 Defs
->push_back(*MUD
);
1556 DefiningBlocks
.insert(&B
);
1558 placePHINodes(DefiningBlocks
);
1560 // Now do regular SSA renaming on the MemoryDef/MemoryUse. Visited will get
1561 // filled in with all blocks.
1562 SmallPtrSet
<BasicBlock
*, 16> Visited
;
1564 // Only building MemorySSA for a single loop. placePHINodes may have
1565 // inserted a MemoryPhi in the loop's preheader. As this is outside the
1566 // scope of the loop, set them to LiveOnEntry.
1567 if (auto *P
= getMemoryAccess(L
->getLoopPreheader())) {
1568 for (Use
&U
: make_early_inc_range(P
->uses()))
1569 U
.set(LiveOnEntryDef
.get());
1572 // Now rename accesses in the loop. Populate Visited with the exit blocks of
1573 // the loop, to limit the scope of the renaming.
1574 SmallVector
<BasicBlock
*> ExitBlocks
;
1575 L
->getExitBlocks(ExitBlocks
);
1576 Visited
.insert(ExitBlocks
.begin(), ExitBlocks
.end());
1577 renamePass(DT
->getNode(L
->getLoopPreheader()), LiveOnEntryDef
.get(),
1580 renamePass(DT
->getRootNode(), LiveOnEntryDef
.get(), Visited
);
1583 // Mark the uses in unreachable blocks as live on entry, so that they go
1585 for (auto &BB
: Blocks
)
1586 if (!Visited
.count(&BB
))
1587 markUnreachableAsLiveOnEntry(&BB
);
1590 MemorySSAWalker
*MemorySSA::getWalker() { return getWalkerImpl(); }
1592 MemorySSA::CachingWalker
*MemorySSA::getWalkerImpl() {
1594 return Walker
.get();
1597 WalkerBase
= std::make_unique
<ClobberWalkerBase
>(this, DT
);
1599 Walker
= std::make_unique
<CachingWalker
>(this, WalkerBase
.get());
1600 return Walker
.get();
1603 MemorySSAWalker
*MemorySSA::getSkipSelfWalker() {
1605 return SkipWalker
.get();
1608 WalkerBase
= std::make_unique
<ClobberWalkerBase
>(this, DT
);
1610 SkipWalker
= std::make_unique
<SkipSelfWalker
>(this, WalkerBase
.get());
1611 return SkipWalker
.get();
1615 // This is a helper function used by the creation routines. It places NewAccess
1616 // into the access and defs lists for a given basic block, at the given
1618 void MemorySSA::insertIntoListsForBlock(MemoryAccess
*NewAccess
,
1619 const BasicBlock
*BB
,
1620 InsertionPlace Point
) {
1621 auto *Accesses
= getOrCreateAccessList(BB
);
1622 if (Point
== Beginning
) {
1623 // If it's a phi node, it goes first, otherwise, it goes after any phi
1625 if (isa
<MemoryPhi
>(NewAccess
)) {
1626 Accesses
->push_front(NewAccess
);
1627 auto *Defs
= getOrCreateDefsList(BB
);
1628 Defs
->push_front(*NewAccess
);
1630 auto AI
= find_if_not(
1631 *Accesses
, [](const MemoryAccess
&MA
) { return isa
<MemoryPhi
>(MA
); });
1632 Accesses
->insert(AI
, NewAccess
);
1633 if (!isa
<MemoryUse
>(NewAccess
)) {
1634 auto *Defs
= getOrCreateDefsList(BB
);
1635 auto DI
= find_if_not(
1636 *Defs
, [](const MemoryAccess
&MA
) { return isa
<MemoryPhi
>(MA
); });
1637 Defs
->insert(DI
, *NewAccess
);
1641 Accesses
->push_back(NewAccess
);
1642 if (!isa
<MemoryUse
>(NewAccess
)) {
1643 auto *Defs
= getOrCreateDefsList(BB
);
1644 Defs
->push_back(*NewAccess
);
1647 BlockNumberingValid
.erase(BB
);
1650 void MemorySSA::insertIntoListsBefore(MemoryAccess
*What
, const BasicBlock
*BB
,
1651 AccessList::iterator InsertPt
) {
1652 auto *Accesses
= getWritableBlockAccesses(BB
);
1653 bool WasEnd
= InsertPt
== Accesses
->end();
1654 Accesses
->insert(AccessList::iterator(InsertPt
), What
);
1655 if (!isa
<MemoryUse
>(What
)) {
1656 auto *Defs
= getOrCreateDefsList(BB
);
1657 // If we got asked to insert at the end, we have an easy job, just shove it
1658 // at the end. If we got asked to insert before an existing def, we also get
1659 // an iterator. If we got asked to insert before a use, we have to hunt for
1662 Defs
->push_back(*What
);
1663 } else if (isa
<MemoryDef
>(InsertPt
)) {
1664 Defs
->insert(InsertPt
->getDefsIterator(), *What
);
1666 while (InsertPt
!= Accesses
->end() && !isa
<MemoryDef
>(InsertPt
))
1668 // Either we found a def, or we are inserting at the end
1669 if (InsertPt
== Accesses
->end())
1670 Defs
->push_back(*What
);
1672 Defs
->insert(InsertPt
->getDefsIterator(), *What
);
1675 BlockNumberingValid
.erase(BB
);
1678 void MemorySSA::prepareForMoveTo(MemoryAccess
*What
, BasicBlock
*BB
) {
1679 // Keep it in the lookup tables, remove from the lists
1680 removeFromLists(What
, false);
1682 // Note that moving should implicitly invalidate the optimized state of a
1683 // MemoryUse (and Phis can't be optimized). However, it doesn't do so for a
1685 if (auto *MD
= dyn_cast
<MemoryDef
>(What
))
1686 MD
->resetOptimized();
1690 // Move What before Where in the IR. The end result is that What will belong to
1691 // the right lists and have the right Block set, but will not otherwise be
1692 // correct. It will not have the right defining access, and if it is a def,
1693 // things below it will not properly be updated.
1694 void MemorySSA::moveTo(MemoryUseOrDef
*What
, BasicBlock
*BB
,
1695 AccessList::iterator Where
) {
1696 prepareForMoveTo(What
, BB
);
1697 insertIntoListsBefore(What
, BB
, Where
);
1700 void MemorySSA::moveTo(MemoryAccess
*What
, BasicBlock
*BB
,
1701 InsertionPlace Point
) {
1702 if (isa
<MemoryPhi
>(What
)) {
1703 assert(Point
== Beginning
&&
1704 "Can only move a Phi at the beginning of the block");
1705 // Update lookup table entry
1706 ValueToMemoryAccess
.erase(What
->getBlock());
1707 bool Inserted
= ValueToMemoryAccess
.insert({BB
, What
}).second
;
1709 assert(Inserted
&& "Cannot move a Phi to a block that already has one");
1712 prepareForMoveTo(What
, BB
);
1713 insertIntoListsForBlock(What
, BB
, Point
);
1716 MemoryPhi
*MemorySSA::createMemoryPhi(BasicBlock
*BB
) {
1717 assert(!getMemoryAccess(BB
) && "MemoryPhi already exists for this BB");
1718 MemoryPhi
*Phi
= new MemoryPhi(BB
->getContext(), BB
, NextID
++);
1719 // Phi's always are placed at the front of the block.
1720 insertIntoListsForBlock(Phi
, BB
, Beginning
);
1721 ValueToMemoryAccess
[BB
] = Phi
;
1725 MemoryUseOrDef
*MemorySSA::createDefinedAccess(Instruction
*I
,
1726 MemoryAccess
*Definition
,
1727 const MemoryUseOrDef
*Template
,
1728 bool CreationMustSucceed
) {
1729 assert(!isa
<PHINode
>(I
) && "Cannot create a defined access for a PHI");
1730 MemoryUseOrDef
*NewAccess
= createNewAccess(I
, AA
, Template
);
1731 if (CreationMustSucceed
)
1732 assert(NewAccess
!= nullptr && "Tried to create a memory access for a "
1733 "non-memory touching instruction");
1735 assert((!Definition
|| !isa
<MemoryUse
>(Definition
)) &&
1736 "A use cannot be a defining access");
1737 NewAccess
->setDefiningAccess(Definition
);
1742 // Return true if the instruction has ordering constraints.
1743 // Note specifically that this only considers stores and loads
1744 // because others are still considered ModRef by getModRefInfo.
1745 static inline bool isOrdered(const Instruction
*I
) {
1746 if (auto *SI
= dyn_cast
<StoreInst
>(I
)) {
1747 if (!SI
->isUnordered())
1749 } else if (auto *LI
= dyn_cast
<LoadInst
>(I
)) {
1750 if (!LI
->isUnordered())
1756 /// Helper function to create new memory accesses
1757 template <typename AliasAnalysisType
>
1758 MemoryUseOrDef
*MemorySSA::createNewAccess(Instruction
*I
,
1759 AliasAnalysisType
*AAP
,
1760 const MemoryUseOrDef
*Template
) {
1761 // The assume intrinsic has a control dependency which we model by claiming
1762 // that it writes arbitrarily. Debuginfo intrinsics may be considered
1763 // clobbers when we have a nonstandard AA pipeline. Ignore these fake memory
1764 // dependencies here.
1765 // FIXME: Replace this special casing with a more accurate modelling of
1766 // assume's control dependency.
1767 if (IntrinsicInst
*II
= dyn_cast
<IntrinsicInst
>(I
)) {
1768 switch (II
->getIntrinsicID()) {
1771 case Intrinsic::allow_runtime_check
:
1772 case Intrinsic::allow_ubsan_check
:
1773 case Intrinsic::assume
:
1774 case Intrinsic::experimental_noalias_scope_decl
:
1775 case Intrinsic::pseudoprobe
:
1780 // Using a nonstandard AA pipelines might leave us with unexpected modref
1781 // results for I, so add a check to not model instructions that may not read
1782 // from or write to memory. This is necessary for correctness.
1783 if (!I
->mayReadFromMemory() && !I
->mayWriteToMemory())
1788 Def
= isa
<MemoryDef
>(Template
);
1789 Use
= isa
<MemoryUse
>(Template
);
1790 #if !defined(NDEBUG)
1791 ModRefInfo ModRef
= AAP
->getModRefInfo(I
, std::nullopt
);
1792 bool DefCheck
, UseCheck
;
1793 DefCheck
= isModSet(ModRef
) || isOrdered(I
);
1794 UseCheck
= isRefSet(ModRef
);
1795 // Memory accesses should only be reduced and can not be increased since AA
1796 // just might return better results as a result of some transformations.
1797 assert((Def
== DefCheck
|| !DefCheck
) &&
1798 "Memory accesses should only be reduced");
1799 if (!Def
&& Use
!= UseCheck
) {
1800 // New Access should not have more power than template access
1801 assert(!UseCheck
&& "Invalid template");
1805 // Find out what affect this instruction has on memory.
1806 ModRefInfo ModRef
= AAP
->getModRefInfo(I
, std::nullopt
);
1807 // The isOrdered check is used to ensure that volatiles end up as defs
1808 // (atomics end up as ModRef right now anyway). Until we separate the
1809 // ordering chain from the memory chain, this enables people to see at least
1810 // some relative ordering to volatiles. Note that getClobberingMemoryAccess
1811 // will still give an answer that bypasses other volatile loads. TODO:
1812 // Separate memory aliasing and ordering into two different chains so that
1813 // we can precisely represent both "what memory will this read/write/is
1814 // clobbered by" and "what instructions can I move this past".
1815 Def
= isModSet(ModRef
) || isOrdered(I
);
1816 Use
= isRefSet(ModRef
);
1819 // It's possible for an instruction to not modify memory at all. During
1820 // construction, we ignore them.
1824 MemoryUseOrDef
*MUD
;
1826 MUD
= new MemoryDef(I
->getContext(), nullptr, I
, I
->getParent(), NextID
++);
1828 MUD
= new MemoryUse(I
->getContext(), nullptr, I
, I
->getParent());
1829 if (isUseTriviallyOptimizableToLiveOnEntry(*AAP
, I
)) {
1830 MemoryAccess
*LiveOnEntry
= getLiveOnEntryDef();
1831 MUD
->setOptimized(LiveOnEntry
);
1834 ValueToMemoryAccess
[I
] = MUD
;
1838 /// Properly remove \p MA from all of MemorySSA's lookup tables.
1839 void MemorySSA::removeFromLookups(MemoryAccess
*MA
) {
1840 assert(MA
->use_empty() &&
1841 "Trying to remove memory access that still has uses");
1842 BlockNumbering
.erase(MA
);
1843 if (auto *MUD
= dyn_cast
<MemoryUseOrDef
>(MA
))
1844 MUD
->setDefiningAccess(nullptr);
1845 // Invalidate our walker's cache if necessary
1846 if (!isa
<MemoryUse
>(MA
))
1847 getWalker()->invalidateInfo(MA
);
1850 if (const auto *MUD
= dyn_cast
<MemoryUseOrDef
>(MA
))
1851 MemoryInst
= MUD
->getMemoryInst();
1853 MemoryInst
= MA
->getBlock();
1855 auto VMA
= ValueToMemoryAccess
.find(MemoryInst
);
1856 if (VMA
->second
== MA
)
1857 ValueToMemoryAccess
.erase(VMA
);
1860 /// Properly remove \p MA from all of MemorySSA's lists.
1862 /// Because of the way the intrusive list and use lists work, it is important to
1863 /// do removal in the right order.
1864 /// ShouldDelete defaults to true, and will cause the memory access to also be
1865 /// deleted, not just removed.
1866 void MemorySSA::removeFromLists(MemoryAccess
*MA
, bool ShouldDelete
) {
1867 BasicBlock
*BB
= MA
->getBlock();
1868 // The access list owns the reference, so we erase it from the non-owning list
1870 if (!isa
<MemoryUse
>(MA
)) {
1871 auto DefsIt
= PerBlockDefs
.find(BB
);
1872 std::unique_ptr
<DefsList
> &Defs
= DefsIt
->second
;
1875 PerBlockDefs
.erase(DefsIt
);
1878 // The erase call here will delete it. If we don't want it deleted, we call
1880 auto AccessIt
= PerBlockAccesses
.find(BB
);
1881 std::unique_ptr
<AccessList
> &Accesses
= AccessIt
->second
;
1883 Accesses
->erase(MA
);
1885 Accesses
->remove(MA
);
1887 if (Accesses
->empty()) {
1888 PerBlockAccesses
.erase(AccessIt
);
1889 BlockNumberingValid
.erase(BB
);
1893 void MemorySSA::print(raw_ostream
&OS
) const {
1894 MemorySSAAnnotatedWriter
Writer(this);
1895 Function
*F
= this->F
;
1897 F
= L
->getHeader()->getParent();
1898 F
->print(OS
, &Writer
);
1901 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
1902 LLVM_DUMP_METHOD
void MemorySSA::dump() const { print(dbgs()); }
1905 void MemorySSA::verifyMemorySSA(VerificationLevel VL
) const {
1906 #if !defined(NDEBUG) && defined(EXPENSIVE_CHECKS)
1907 VL
= VerificationLevel::Full
;
1912 auto Blocks
= iterator_range(F
->begin(), F
->end());
1913 verifyOrderingDominationAndDefUses(Blocks
, VL
);
1914 verifyDominationNumbers(Blocks
);
1915 if (VL
== VerificationLevel::Full
)
1916 verifyPrevDefInPhis(Blocks
);
1918 assert(L
&& "must either have loop or function");
1920 map_range(L
->blocks(), [](const BasicBlock
*BB
) -> BasicBlock
& {
1921 return *const_cast<BasicBlock
*>(BB
);
1923 verifyOrderingDominationAndDefUses(Blocks
, VL
);
1924 verifyDominationNumbers(Blocks
);
1925 if (VL
== VerificationLevel::Full
)
1926 verifyPrevDefInPhis(Blocks
);
1929 // Previously, the verification used to also verify that the clobberingAccess
1930 // cached by MemorySSA is the same as the clobberingAccess found at a later
1931 // query to AA. This does not hold true in general due to the current fragility
1932 // of BasicAA which has arbitrary caps on the things it analyzes before giving
1933 // up. As a result, transformations that are correct, will lead to BasicAA
1934 // returning different Alias answers before and after that transformation.
1935 // Invalidating MemorySSA is not an option, as the results in BasicAA can be so
1936 // random, in the worst case we'd need to rebuild MemorySSA from scratch after
1937 // every transformation, which defeats the purpose of using it. For such an
1938 // example, see test4 added in D51960.
1941 template <typename IterT
>
1942 void MemorySSA::verifyPrevDefInPhis(IterT Blocks
) const {
1943 for (const BasicBlock
&BB
: Blocks
) {
1944 if (MemoryPhi
*Phi
= getMemoryAccess(&BB
)) {
1945 for (unsigned I
= 0, E
= Phi
->getNumIncomingValues(); I
!= E
; ++I
) {
1946 auto *Pred
= Phi
->getIncomingBlock(I
);
1947 auto *IncAcc
= Phi
->getIncomingValue(I
);
1948 // If Pred has no unreachable predecessors, get last def looking at
1949 // IDoms. If, while walkings IDoms, any of these has an unreachable
1950 // predecessor, then the incoming def can be any access.
1951 if (auto *DTNode
= DT
->getNode(Pred
)) {
1953 if (auto *DefList
= getBlockDefs(DTNode
->getBlock())) {
1954 auto *LastAcc
= &*(--DefList
->end());
1955 assert(LastAcc
== IncAcc
&&
1956 "Incorrect incoming access into phi.");
1961 DTNode
= DTNode
->getIDom();
1964 // If Pred has unreachable predecessors, but has at least a Def, the
1965 // incoming access can be the last Def in Pred, or it could have been
1966 // optimized to LoE. After an update, though, the LoE may have been
1967 // replaced by another access, so IncAcc may be any access.
1968 // If Pred has unreachable predecessors and no Defs, incoming access
1969 // should be LoE; However, after an update, it may be any access.
1976 /// Verify that all of the blocks we believe to have valid domination numbers
1977 /// actually have valid domination numbers.
1978 template <typename IterT
>
1979 void MemorySSA::verifyDominationNumbers(IterT Blocks
) const {
1980 if (BlockNumberingValid
.empty())
1983 SmallPtrSet
<const BasicBlock
*, 16> ValidBlocks
= BlockNumberingValid
;
1984 for (const BasicBlock
&BB
: Blocks
) {
1985 if (!ValidBlocks
.count(&BB
))
1988 ValidBlocks
.erase(&BB
);
1990 const AccessList
*Accesses
= getBlockAccesses(&BB
);
1991 // It's correct to say an empty block has valid numbering.
1995 // Block numbering starts at 1.
1996 unsigned long LastNumber
= 0;
1997 for (const MemoryAccess
&MA
: *Accesses
) {
1998 auto ThisNumberIter
= BlockNumbering
.find(&MA
);
1999 assert(ThisNumberIter
!= BlockNumbering
.end() &&
2000 "MemoryAccess has no domination number in a valid block!");
2002 unsigned long ThisNumber
= ThisNumberIter
->second
;
2003 assert(ThisNumber
> LastNumber
&&
2004 "Domination numbers should be strictly increasing!");
2006 LastNumber
= ThisNumber
;
2010 assert(ValidBlocks
.empty() &&
2011 "All valid BasicBlocks should exist in F -- dangling pointers?");
2014 /// Verify ordering: the order and existence of MemoryAccesses matches the
2015 /// order and existence of memory affecting instructions.
2016 /// Verify domination: each definition dominates all of its uses.
2017 /// Verify def-uses: the immediate use information - walk all the memory
2018 /// accesses and verifying that, for each use, it appears in the appropriate
2020 template <typename IterT
>
2021 void MemorySSA::verifyOrderingDominationAndDefUses(IterT Blocks
,
2022 VerificationLevel VL
) const {
2023 // Walk all the blocks, comparing what the lookups think and what the access
2024 // lists think, as well as the order in the blocks vs the order in the access
2026 SmallVector
<MemoryAccess
*, 32> ActualAccesses
;
2027 SmallVector
<MemoryAccess
*, 32> ActualDefs
;
2028 for (BasicBlock
&B
: Blocks
) {
2029 const AccessList
*AL
= getBlockAccesses(&B
);
2030 const auto *DL
= getBlockDefs(&B
);
2031 MemoryPhi
*Phi
= getMemoryAccess(&B
);
2034 ActualAccesses
.push_back(Phi
);
2035 ActualDefs
.push_back(Phi
);
2036 // Verify domination
2037 for (const Use
&U
: Phi
->uses()) {
2038 assert(dominates(Phi
, U
) && "Memory PHI does not dominate it's uses");
2041 // Verify def-uses for full verify.
2042 if (VL
== VerificationLevel::Full
) {
2043 assert(Phi
->getNumOperands() == pred_size(&B
) &&
2044 "Incomplete MemoryPhi Node");
2045 for (unsigned I
= 0, E
= Phi
->getNumIncomingValues(); I
!= E
; ++I
) {
2046 verifyUseInDefs(Phi
->getIncomingValue(I
), Phi
);
2047 assert(is_contained(predecessors(&B
), Phi
->getIncomingBlock(I
)) &&
2048 "Incoming phi block not a block predecessor");
2053 for (Instruction
&I
: B
) {
2054 MemoryUseOrDef
*MA
= getMemoryAccess(&I
);
2055 assert((!MA
|| (AL
&& (isa
<MemoryUse
>(MA
) || DL
))) &&
2056 "We have memory affecting instructions "
2057 "in this block but they are not in the "
2058 "access list or defs list");
2061 ActualAccesses
.push_back(MA
);
2062 if (MemoryAccess
*MD
= dyn_cast
<MemoryDef
>(MA
)) {
2064 ActualDefs
.push_back(MA
);
2065 // Verify domination.
2066 for (const Use
&U
: MD
->uses()) {
2067 assert(dominates(MD
, U
) &&
2068 "Memory Def does not dominate it's uses");
2072 // Verify def-uses for full verify.
2073 if (VL
== VerificationLevel::Full
)
2074 verifyUseInDefs(MA
->getDefiningAccess(), MA
);
2077 // Either we hit the assert, really have no accesses, or we have both
2078 // accesses and an access list. Same with defs.
2082 assert(AL
->size() == ActualAccesses
.size() &&
2083 "We don't have the same number of accesses in the block as on the "
2085 assert((DL
|| ActualDefs
.size() == 0) &&
2086 "Either we should have a defs list, or we should have no defs");
2087 assert((!DL
|| DL
->size() == ActualDefs
.size()) &&
2088 "We don't have the same number of defs in the block as on the "
2090 auto ALI
= AL
->begin();
2091 auto AAI
= ActualAccesses
.begin();
2092 while (ALI
!= AL
->end() && AAI
!= ActualAccesses
.end()) {
2093 assert(&*ALI
== *AAI
&& "Not the same accesses in the same order");
2097 ActualAccesses
.clear();
2099 auto DLI
= DL
->begin();
2100 auto ADI
= ActualDefs
.begin();
2101 while (DLI
!= DL
->end() && ADI
!= ActualDefs
.end()) {
2102 assert(&*DLI
== *ADI
&& "Not the same defs in the same order");
2111 /// Verify the def-use lists in MemorySSA, by verifying that \p Use
2112 /// appears in the use list of \p Def.
2113 void MemorySSA::verifyUseInDefs(MemoryAccess
*Def
, MemoryAccess
*Use
) const {
2114 // The live on entry use may cause us to get a NULL def here
2116 assert(isLiveOnEntryDef(Use
) &&
2117 "Null def but use not point to live on entry def");
2119 assert(is_contained(Def
->users(), Use
) &&
2120 "Did not find use in def's use list");
2123 /// Perform a local numbering on blocks so that instruction ordering can be
2124 /// determined in constant time.
2125 /// TODO: We currently just number in order. If we numbered by N, we could
2126 /// allow at least N-1 sequences of insertBefore or insertAfter (and at least
2127 /// log2(N) sequences of mixed before and after) without needing to invalidate
2129 void MemorySSA::renumberBlock(const BasicBlock
*B
) const {
2130 // The pre-increment ensures the numbers really start at 1.
2131 unsigned long CurrentNumber
= 0;
2132 const AccessList
*AL
= getBlockAccesses(B
);
2133 assert(AL
!= nullptr && "Asking to renumber an empty block");
2134 for (const auto &I
: *AL
)
2135 BlockNumbering
[&I
] = ++CurrentNumber
;
2136 BlockNumberingValid
.insert(B
);
2139 /// Determine, for two memory accesses in the same block,
2140 /// whether \p Dominator dominates \p Dominatee.
2141 /// \returns True if \p Dominator dominates \p Dominatee.
2142 bool MemorySSA::locallyDominates(const MemoryAccess
*Dominator
,
2143 const MemoryAccess
*Dominatee
) const {
2144 const BasicBlock
*DominatorBlock
= Dominator
->getBlock();
2146 assert((DominatorBlock
== Dominatee
->getBlock()) &&
2147 "Asking for local domination when accesses are in different blocks!");
2148 // A node dominates itself.
2149 if (Dominatee
== Dominator
)
2152 // When Dominatee is defined on function entry, it is not dominated by another
2154 if (isLiveOnEntryDef(Dominatee
))
2157 // When Dominator is defined on function entry, it dominates the other memory
2159 if (isLiveOnEntryDef(Dominator
))
2162 if (!BlockNumberingValid
.count(DominatorBlock
))
2163 renumberBlock(DominatorBlock
);
2165 unsigned long DominatorNum
= BlockNumbering
.lookup(Dominator
);
2166 // All numbers start with 1
2167 assert(DominatorNum
!= 0 && "Block was not numbered properly");
2168 unsigned long DominateeNum
= BlockNumbering
.lookup(Dominatee
);
2169 assert(DominateeNum
!= 0 && "Block was not numbered properly");
2170 return DominatorNum
< DominateeNum
;
2173 bool MemorySSA::dominates(const MemoryAccess
*Dominator
,
2174 const MemoryAccess
*Dominatee
) const {
2175 if (Dominator
== Dominatee
)
2178 if (isLiveOnEntryDef(Dominatee
))
2181 if (Dominator
->getBlock() != Dominatee
->getBlock())
2182 return DT
->dominates(Dominator
->getBlock(), Dominatee
->getBlock());
2183 return locallyDominates(Dominator
, Dominatee
);
2186 bool MemorySSA::dominates(const MemoryAccess
*Dominator
,
2187 const Use
&Dominatee
) const {
2188 if (MemoryPhi
*MP
= dyn_cast
<MemoryPhi
>(Dominatee
.getUser())) {
2189 BasicBlock
*UseBB
= MP
->getIncomingBlock(Dominatee
);
2190 // The def must dominate the incoming block of the phi.
2191 if (UseBB
!= Dominator
->getBlock())
2192 return DT
->dominates(Dominator
->getBlock(), UseBB
);
2193 // If the UseBB and the DefBB are the same, compare locally.
2194 return locallyDominates(Dominator
, cast
<MemoryAccess
>(Dominatee
));
2196 // If it's not a PHI node use, the normal dominates can already handle it.
2197 return dominates(Dominator
, cast
<MemoryAccess
>(Dominatee
.getUser()));
2200 void MemorySSA::ensureOptimizedUses() {
2204 BatchAAResults
BatchAA(*AA
);
2205 ClobberWalkerBase
WalkerBase(this, DT
);
2206 CachingWalker
WalkerLocal(this, &WalkerBase
);
2207 OptimizeUses(this, &WalkerLocal
, &BatchAA
, DT
).optimizeUses();
2211 void MemoryAccess::print(raw_ostream
&OS
) const {
2212 switch (getValueID()) {
2213 case MemoryPhiVal
: return static_cast<const MemoryPhi
*>(this)->print(OS
);
2214 case MemoryDefVal
: return static_cast<const MemoryDef
*>(this)->print(OS
);
2215 case MemoryUseVal
: return static_cast<const MemoryUse
*>(this)->print(OS
);
2217 llvm_unreachable("invalid value id");
2220 void MemoryDef::print(raw_ostream
&OS
) const {
2221 MemoryAccess
*UO
= getDefiningAccess();
2223 auto printID
= [&OS
](MemoryAccess
*A
) {
2224 if (A
&& A
->getID())
2227 OS
<< LiveOnEntryStr
;
2230 OS
<< getID() << " = MemoryDef(";
2234 if (isOptimized()) {
2236 printID(getOptimized());
2240 void MemoryPhi::print(raw_ostream
&OS
) const {
2241 ListSeparator
LS(",");
2242 OS
<< getID() << " = MemoryPhi(";
2243 for (const auto &Op
: operands()) {
2244 BasicBlock
*BB
= getIncomingBlock(Op
);
2245 MemoryAccess
*MA
= cast
<MemoryAccess
>(Op
);
2249 OS
<< BB
->getName();
2251 BB
->printAsOperand(OS
, false);
2253 if (unsigned ID
= MA
->getID())
2256 OS
<< LiveOnEntryStr
;
2262 void MemoryUse::print(raw_ostream
&OS
) const {
2263 MemoryAccess
*UO
= getDefiningAccess();
2265 if (UO
&& UO
->getID())
2268 OS
<< LiveOnEntryStr
;
2272 void MemoryAccess::dump() const {
2273 // Cannot completely remove virtual function even in release mode.
2274 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
2280 class DOTFuncMSSAInfo
{
2283 MemorySSAAnnotatedWriter MSSAWriter
;
2286 DOTFuncMSSAInfo(const Function
&F
, MemorySSA
&MSSA
)
2287 : F(F
), MSSAWriter(&MSSA
) {}
2289 const Function
*getFunction() { return &F
; }
2290 MemorySSAAnnotatedWriter
&getWriter() { return MSSAWriter
; }
2296 struct GraphTraits
<DOTFuncMSSAInfo
*> : public GraphTraits
<const BasicBlock
*> {
2297 static NodeRef
getEntryNode(DOTFuncMSSAInfo
*CFGInfo
) {
2298 return &(CFGInfo
->getFunction()->getEntryBlock());
2301 // nodes_iterator/begin/end - Allow iteration over all nodes in the graph
2302 using nodes_iterator
= pointer_iterator
<Function::const_iterator
>;
2304 static nodes_iterator
nodes_begin(DOTFuncMSSAInfo
*CFGInfo
) {
2305 return nodes_iterator(CFGInfo
->getFunction()->begin());
2308 static nodes_iterator
nodes_end(DOTFuncMSSAInfo
*CFGInfo
) {
2309 return nodes_iterator(CFGInfo
->getFunction()->end());
2312 static size_t size(DOTFuncMSSAInfo
*CFGInfo
) {
2313 return CFGInfo
->getFunction()->size();
2318 struct DOTGraphTraits
<DOTFuncMSSAInfo
*> : public DefaultDOTGraphTraits
{
2320 DOTGraphTraits(bool IsSimple
= false) : DefaultDOTGraphTraits(IsSimple
) {}
2322 static std::string
getGraphName(DOTFuncMSSAInfo
*CFGInfo
) {
2323 return "MSSA CFG for '" + CFGInfo
->getFunction()->getName().str() +
2327 std::string
getNodeLabel(const BasicBlock
*Node
, DOTFuncMSSAInfo
*CFGInfo
) {
2328 return DOTGraphTraits
<DOTFuncInfo
*>::getCompleteNodeLabel(
2330 [CFGInfo
](raw_string_ostream
&OS
, const BasicBlock
&BB
) -> void {
2331 BB
.print(OS
, &CFGInfo
->getWriter(), true, true);
2333 [](std::string
&S
, unsigned &I
, unsigned Idx
) -> void {
2334 std::string Str
= S
.substr(I
, Idx
- I
);
2336 if (SR
.count(" = MemoryDef(") || SR
.count(" = MemoryPhi(") ||
2337 SR
.count("MemoryUse("))
2339 DOTGraphTraits
<DOTFuncInfo
*>::eraseComment(S
, I
, Idx
);
2343 static std::string
getEdgeSourceLabel(const BasicBlock
*Node
,
2344 const_succ_iterator I
) {
2345 return DOTGraphTraits
<DOTFuncInfo
*>::getEdgeSourceLabel(Node
, I
);
2348 /// Display the raw branch weights from PGO.
2349 std::string
getEdgeAttributes(const BasicBlock
*Node
, const_succ_iterator I
,
2350 DOTFuncMSSAInfo
*CFGInfo
) {
2354 std::string
getNodeAttributes(const BasicBlock
*Node
,
2355 DOTFuncMSSAInfo
*CFGInfo
) {
2356 return getNodeLabel(Node
, CFGInfo
).find(';') != std::string::npos
2357 ? "style=filled, fillcolor=lightpink"
2364 AnalysisKey
MemorySSAAnalysis::Key
;
2366 MemorySSAAnalysis::Result
MemorySSAAnalysis::run(Function
&F
,
2367 FunctionAnalysisManager
&AM
) {
2368 auto &DT
= AM
.getResult
<DominatorTreeAnalysis
>(F
);
2369 auto &AA
= AM
.getResult
<AAManager
>(F
);
2370 return MemorySSAAnalysis::Result(std::make_unique
<MemorySSA
>(F
, &AA
, &DT
));
2373 bool MemorySSAAnalysis::Result::invalidate(
2374 Function
&F
, const PreservedAnalyses
&PA
,
2375 FunctionAnalysisManager::Invalidator
&Inv
) {
2376 auto PAC
= PA
.getChecker
<MemorySSAAnalysis
>();
2377 return !(PAC
.preserved() || PAC
.preservedSet
<AllAnalysesOn
<Function
>>()) ||
2378 Inv
.invalidate
<AAManager
>(F
, PA
) ||
2379 Inv
.invalidate
<DominatorTreeAnalysis
>(F
, PA
);
2382 PreservedAnalyses
MemorySSAPrinterPass::run(Function
&F
,
2383 FunctionAnalysisManager
&AM
) {
2384 auto &MSSA
= AM
.getResult
<MemorySSAAnalysis
>(F
).getMSSA();
2385 if (EnsureOptimizedUses
)
2386 MSSA
.ensureOptimizedUses();
2387 if (DotCFGMSSA
!= "") {
2388 DOTFuncMSSAInfo
CFGInfo(F
, MSSA
);
2389 WriteGraph(&CFGInfo
, "", false, "MSSA", DotCFGMSSA
);
2391 OS
<< "MemorySSA for function: " << F
.getName() << "\n";
2395 return PreservedAnalyses::all();
2398 PreservedAnalyses
MemorySSAWalkerPrinterPass::run(Function
&F
,
2399 FunctionAnalysisManager
&AM
) {
2400 auto &MSSA
= AM
.getResult
<MemorySSAAnalysis
>(F
).getMSSA();
2401 OS
<< "MemorySSA (walker) for function: " << F
.getName() << "\n";
2402 MemorySSAWalkerAnnotatedWriter
Writer(&MSSA
);
2403 F
.print(OS
, &Writer
);
2405 return PreservedAnalyses::all();
2408 PreservedAnalyses
MemorySSAVerifierPass::run(Function
&F
,
2409 FunctionAnalysisManager
&AM
) {
2410 AM
.getResult
<MemorySSAAnalysis
>(F
).getMSSA().verifyMemorySSA();
2412 return PreservedAnalyses::all();
2415 char MemorySSAWrapperPass::ID
= 0;
2417 MemorySSAWrapperPass::MemorySSAWrapperPass() : FunctionPass(ID
) {
2418 initializeMemorySSAWrapperPassPass(*PassRegistry::getPassRegistry());
2421 void MemorySSAWrapperPass::releaseMemory() { MSSA
.reset(); }
2423 void MemorySSAWrapperPass::getAnalysisUsage(AnalysisUsage
&AU
) const {
2424 AU
.setPreservesAll();
2425 AU
.addRequiredTransitive
<DominatorTreeWrapperPass
>();
2426 AU
.addRequiredTransitive
<AAResultsWrapperPass
>();
2429 bool MemorySSAWrapperPass::runOnFunction(Function
&F
) {
2430 auto &DT
= getAnalysis
<DominatorTreeWrapperPass
>().getDomTree();
2431 auto &AA
= getAnalysis
<AAResultsWrapperPass
>().getAAResults();
2432 MSSA
.reset(new MemorySSA(F
, &AA
, &DT
));
2436 void MemorySSAWrapperPass::verifyAnalysis() const {
2437 if (VerifyMemorySSA
)
2438 MSSA
->verifyMemorySSA();
2441 void MemorySSAWrapperPass::print(raw_ostream
&OS
, const Module
*M
) const {
2445 MemorySSAWalker::MemorySSAWalker(MemorySSA
*M
) : MSSA(M
) {}
2447 /// Walk the use-def chains starting at \p StartingAccess and find
2448 /// the MemoryAccess that actually clobbers Loc.
2450 /// \returns our clobbering memory access
2451 MemoryAccess
*MemorySSA::ClobberWalkerBase::getClobberingMemoryAccessBase(
2452 MemoryAccess
*StartingAccess
, const MemoryLocation
&Loc
,
2453 BatchAAResults
&BAA
, unsigned &UpwardWalkLimit
) {
2454 assert(!isa
<MemoryUse
>(StartingAccess
) && "Use cannot be defining access");
2456 // If location is undefined, conservatively return starting access.
2457 if (Loc
.Ptr
== nullptr)
2458 return StartingAccess
;
2460 Instruction
*I
= nullptr;
2461 if (auto *StartingUseOrDef
= dyn_cast
<MemoryUseOrDef
>(StartingAccess
)) {
2462 if (MSSA
->isLiveOnEntryDef(StartingUseOrDef
))
2463 return StartingUseOrDef
;
2465 I
= StartingUseOrDef
->getMemoryInst();
2467 // Conservatively, fences are always clobbers, so don't perform the walk if
2469 if (!isa
<CallBase
>(I
) && I
->isFenceLike())
2470 return StartingUseOrDef
;
2473 UpwardsMemoryQuery Q
;
2474 Q
.OriginalAccess
= StartingAccess
;
2475 Q
.StartingLoc
= Loc
;
2479 // Unlike the other function, do not walk to the def of a def, because we are
2480 // handed something we already believe is the clobbering access.
2481 // We never set SkipSelf to true in Q in this method.
2482 MemoryAccess
*Clobber
=
2483 Walker
.findClobber(BAA
, StartingAccess
, Q
, UpwardWalkLimit
);
2485 dbgs() << "Clobber starting at access " << *StartingAccess
<< "\n";
2487 dbgs() << " for instruction " << *I
<< "\n";
2488 dbgs() << " is " << *Clobber
<< "\n";
2493 static const Instruction
*
2494 getInvariantGroupClobberingInstruction(Instruction
&I
, DominatorTree
&DT
) {
2495 if (!I
.hasMetadata(LLVMContext::MD_invariant_group
) || I
.isVolatile())
2498 // We consider bitcasts and zero GEPs to be the same pointer value. Start by
2499 // stripping bitcasts and zero GEPs, then we will recursively look at loads
2500 // and stores through bitcasts and zero GEPs.
2501 Value
*PointerOperand
= getLoadStorePointerOperand(&I
)->stripPointerCasts();
2503 // It's not safe to walk the use list of a global value because function
2504 // passes aren't allowed to look outside their functions.
2505 // FIXME: this could be fixed by filtering instructions from outside of
2506 // current function.
2507 if (isa
<Constant
>(PointerOperand
))
2510 const Instruction
*MostDominatingInstruction
= &I
;
2512 for (const User
*Us
: PointerOperand
->users()) {
2513 auto *U
= dyn_cast
<Instruction
>(Us
);
2514 if (!U
|| U
== &I
|| !DT
.dominates(U
, MostDominatingInstruction
))
2517 // If we hit a load/store with an invariant.group metadata and the same
2518 // pointer operand, we can assume that value pointed to by the pointer
2519 // operand didn't change.
2520 if (U
->hasMetadata(LLVMContext::MD_invariant_group
) &&
2521 getLoadStorePointerOperand(U
) == PointerOperand
&& !U
->isVolatile()) {
2522 MostDominatingInstruction
= U
;
2526 return MostDominatingInstruction
== &I
? nullptr : MostDominatingInstruction
;
2529 MemoryAccess
*MemorySSA::ClobberWalkerBase::getClobberingMemoryAccessBase(
2530 MemoryAccess
*MA
, BatchAAResults
&BAA
, unsigned &UpwardWalkLimit
,
2531 bool SkipSelf
, bool UseInvariantGroup
) {
2532 auto *StartingAccess
= dyn_cast
<MemoryUseOrDef
>(MA
);
2533 // If this is a MemoryPhi, we can't do anything.
2534 if (!StartingAccess
)
2537 if (UseInvariantGroup
) {
2538 if (auto *I
= getInvariantGroupClobberingInstruction(
2539 *StartingAccess
->getMemoryInst(), MSSA
->getDomTree())) {
2540 assert(isa
<LoadInst
>(I
) || isa
<StoreInst
>(I
));
2542 auto *ClobberMA
= MSSA
->getMemoryAccess(I
);
2544 if (isa
<MemoryUse
>(ClobberMA
))
2545 return ClobberMA
->getDefiningAccess();
2550 bool IsOptimized
= false;
2552 // If this is an already optimized use or def, return the optimized result.
2553 // Note: Currently, we store the optimized def result in a separate field,
2554 // since we can't use the defining access.
2555 if (StartingAccess
->isOptimized()) {
2556 if (!SkipSelf
|| !isa
<MemoryDef
>(StartingAccess
))
2557 return StartingAccess
->getOptimized();
2561 const Instruction
*I
= StartingAccess
->getMemoryInst();
2562 // We can't sanely do anything with a fence, since they conservatively clobber
2563 // all memory, and have no locations to get pointers from to try to
2565 if (!isa
<CallBase
>(I
) && I
->isFenceLike())
2566 return StartingAccess
;
2568 UpwardsMemoryQuery
Q(I
, StartingAccess
);
2570 if (isUseTriviallyOptimizableToLiveOnEntry(BAA
, I
)) {
2571 MemoryAccess
*LiveOnEntry
= MSSA
->getLiveOnEntryDef();
2572 StartingAccess
->setOptimized(LiveOnEntry
);
2576 MemoryAccess
*OptimizedAccess
;
2578 // Start with the thing we already think clobbers this location
2579 MemoryAccess
*DefiningAccess
= StartingAccess
->getDefiningAccess();
2581 // At this point, DefiningAccess may be the live on entry def.
2582 // If it is, we will not get a better result.
2583 if (MSSA
->isLiveOnEntryDef(DefiningAccess
)) {
2584 StartingAccess
->setOptimized(DefiningAccess
);
2585 return DefiningAccess
;
2589 Walker
.findClobber(BAA
, DefiningAccess
, Q
, UpwardWalkLimit
);
2590 StartingAccess
->setOptimized(OptimizedAccess
);
2592 OptimizedAccess
= StartingAccess
->getOptimized();
2594 LLVM_DEBUG(dbgs() << "Starting Memory SSA clobber for " << *I
<< " is ");
2595 LLVM_DEBUG(dbgs() << *StartingAccess
<< "\n");
2596 LLVM_DEBUG(dbgs() << "Optimized Memory SSA clobber for " << *I
<< " is ");
2597 LLVM_DEBUG(dbgs() << *OptimizedAccess
<< "\n");
2599 MemoryAccess
*Result
;
2600 if (SkipSelf
&& isa
<MemoryPhi
>(OptimizedAccess
) &&
2601 isa
<MemoryDef
>(StartingAccess
) && UpwardWalkLimit
) {
2602 assert(isa
<MemoryDef
>(Q
.OriginalAccess
));
2603 Q
.SkipSelfAccess
= true;
2604 Result
= Walker
.findClobber(BAA
, OptimizedAccess
, Q
, UpwardWalkLimit
);
2606 Result
= OptimizedAccess
;
2608 LLVM_DEBUG(dbgs() << "Result Memory SSA clobber [SkipSelf = " << SkipSelf
);
2609 LLVM_DEBUG(dbgs() << "] for " << *I
<< " is " << *Result
<< "\n");
2615 DoNothingMemorySSAWalker::getClobberingMemoryAccess(MemoryAccess
*MA
,
2617 if (auto *Use
= dyn_cast
<MemoryUseOrDef
>(MA
))
2618 return Use
->getDefiningAccess();
2622 MemoryAccess
*DoNothingMemorySSAWalker::getClobberingMemoryAccess(
2623 MemoryAccess
*StartingAccess
, const MemoryLocation
&, BatchAAResults
&) {
2624 if (auto *Use
= dyn_cast
<MemoryUseOrDef
>(StartingAccess
))
2625 return Use
->getDefiningAccess();
2626 return StartingAccess
;
2629 void MemoryPhi::deleteMe(DerivedUser
*Self
) {
2630 delete static_cast<MemoryPhi
*>(Self
);
2633 void MemoryDef::deleteMe(DerivedUser
*Self
) {
2634 delete static_cast<MemoryDef
*>(Self
);
2637 void MemoryUse::deleteMe(DerivedUser
*Self
) {
2638 delete static_cast<MemoryUse
*>(Self
);
2641 bool upward_defs_iterator::IsGuaranteedLoopInvariant(const Value
*Ptr
) const {
2642 auto IsGuaranteedLoopInvariantBase
= [](const Value
*Ptr
) {
2643 Ptr
= Ptr
->stripPointerCasts();
2644 if (!isa
<Instruction
>(Ptr
))
2646 return isa
<AllocaInst
>(Ptr
);
2649 Ptr
= Ptr
->stripPointerCasts();
2650 if (auto *I
= dyn_cast
<Instruction
>(Ptr
)) {
2651 if (I
->getParent()->isEntryBlock())
2654 if (auto *GEP
= dyn_cast
<GEPOperator
>(Ptr
)) {
2655 return IsGuaranteedLoopInvariantBase(GEP
->getPointerOperand()) &&
2656 GEP
->hasAllConstantIndices();
2658 return IsGuaranteedLoopInvariantBase(Ptr
);