1 //===- MemorySSA.h - Build Memory SSA ---------------------------*- C++ -*-===//
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7 //===----------------------------------------------------------------------===//
10 /// This file exposes an interface to building/using memory SSA to
11 /// walk memory instructions using a use/def graph.
13 /// Memory SSA class builds an SSA form that links together memory access
14 /// instructions such as loads, stores, atomics, and calls. Additionally, it
15 /// does a trivial form of "heap versioning" Every time the memory state changes
16 /// in the program, we generate a new heap version. It generates
17 /// MemoryDef/Uses/Phis that are overlayed on top of the existing instructions.
19 /// As a trivial example,
20 /// define i32 @main() #0 {
22 /// %call = call noalias i8* @_Znwm(i64 4) #2
23 /// %0 = bitcast i8* %call to i32*
24 /// %call1 = call noalias i8* @_Znwm(i64 4) #2
25 /// %1 = bitcast i8* %call1 to i32*
26 /// store i32 5, i32* %0, align 4
27 /// store i32 7, i32* %1, align 4
28 /// %2 = load i32* %0, align 4
29 /// %3 = load i32* %1, align 4
30 /// %add = add nsw i32 %2, %3
35 /// define i32 @main() #0 {
37 /// ; 1 = MemoryDef(0)
38 /// %call = call noalias i8* @_Znwm(i64 4) #3
39 /// %2 = bitcast i8* %call to i32*
40 /// ; 2 = MemoryDef(1)
41 /// %call1 = call noalias i8* @_Znwm(i64 4) #3
42 /// %4 = bitcast i8* %call1 to i32*
43 /// ; 3 = MemoryDef(2)
44 /// store i32 5, i32* %2, align 4
45 /// ; 4 = MemoryDef(3)
46 /// store i32 7, i32* %4, align 4
48 /// %7 = load i32* %2, align 4
50 /// %8 = load i32* %4, align 4
51 /// %add = add nsw i32 %7, %8
55 /// Given this form, all the stores that could ever effect the load at %8 can be
56 /// gotten by using the MemoryUse associated with it, and walking from use to
57 /// def until you hit the top of the function.
59 /// Each def also has a list of users associated with it, so you can walk from
60 /// both def to users, and users to defs. Note that we disambiguate MemoryUses,
61 /// but not the RHS of MemoryDefs. You can see this above at %7, which would
62 /// otherwise be a MemoryUse(4). Being disambiguated means that for a given
63 /// store, all the MemoryUses on its use lists are may-aliases of that store
64 /// (but the MemoryDefs on its use list may not be).
66 /// MemoryDefs are not disambiguated because it would require multiple reaching
67 /// definitions, which would require multiple phis, and multiple memoryaccesses
70 //===----------------------------------------------------------------------===//
72 #ifndef LLVM_ANALYSIS_MEMORYSSA_H
73 #define LLVM_ANALYSIS_MEMORYSSA_H
75 #include "llvm/ADT/DenseMap.h"
76 #include "llvm/ADT/GraphTraits.h"
77 #include "llvm/ADT/SmallPtrSet.h"
78 #include "llvm/ADT/SmallVector.h"
79 #include "llvm/ADT/ilist.h"
80 #include "llvm/ADT/ilist_node.h"
81 #include "llvm/ADT/iterator.h"
82 #include "llvm/ADT/iterator_range.h"
83 #include "llvm/ADT/simple_ilist.h"
84 #include "llvm/Analysis/AliasAnalysis.h"
85 #include "llvm/Analysis/MemoryLocation.h"
86 #include "llvm/Analysis/PHITransAddr.h"
87 #include "llvm/IR/BasicBlock.h"
88 #include "llvm/IR/DerivedUser.h"
89 #include "llvm/IR/Dominators.h"
90 #include "llvm/IR/Module.h"
91 #include "llvm/IR/Type.h"
92 #include "llvm/IR/Use.h"
93 #include "llvm/IR/User.h"
94 #include "llvm/IR/Value.h"
95 #include "llvm/IR/ValueHandle.h"
96 #include "llvm/Pass.h"
97 #include "llvm/Support/Casting.h"
107 /// Enables memory ssa as a dependency for loop passes.
108 extern cl::opt
<bool> EnableMSSALoopDependency
;
113 class MemorySSAWalker
;
117 namespace MSSAHelpers
{
119 struct AllAccessTag
{};
120 struct DefsOnlyTag
{};
122 } // end namespace MSSAHelpers
125 // Used to signify what the default invalid ID is for MemoryAccess's
127 INVALID_MEMORYACCESS_ID
= -1U
130 template <class T
> class memoryaccess_def_iterator_base
;
131 using memoryaccess_def_iterator
= memoryaccess_def_iterator_base
<MemoryAccess
>;
132 using const_memoryaccess_def_iterator
=
133 memoryaccess_def_iterator_base
<const MemoryAccess
>;
135 // The base for all memory accesses. All memory accesses in a block are
136 // linked together using an intrusive list.
138 : public DerivedUser
,
139 public ilist_node
<MemoryAccess
, ilist_tag
<MSSAHelpers::AllAccessTag
>>,
140 public ilist_node
<MemoryAccess
, ilist_tag
<MSSAHelpers::DefsOnlyTag
>> {
142 using AllAccessType
=
143 ilist_node
<MemoryAccess
, ilist_tag
<MSSAHelpers::AllAccessTag
>>;
145 ilist_node
<MemoryAccess
, ilist_tag
<MSSAHelpers::DefsOnlyTag
>>;
147 MemoryAccess(const MemoryAccess
&) = delete;
148 MemoryAccess
&operator=(const MemoryAccess
&) = delete;
150 void *operator new(size_t) = delete;
152 // Methods for support type inquiry through isa, cast, and
154 static bool classof(const Value
*V
) {
155 unsigned ID
= V
->getValueID();
156 return ID
== MemoryUseVal
|| ID
== MemoryPhiVal
|| ID
== MemoryDefVal
;
159 BasicBlock
*getBlock() const { return Block
; }
161 void print(raw_ostream
&OS
) const;
164 /// The user iterators for a memory access
165 using iterator
= user_iterator
;
166 using const_iterator
= const_user_iterator
;
168 /// This iterator walks over all of the defs in a given
169 /// MemoryAccess. For MemoryPhi nodes, this walks arguments. For
170 /// MemoryUse/MemoryDef, this walks the defining access.
171 memoryaccess_def_iterator
defs_begin();
172 const_memoryaccess_def_iterator
defs_begin() const;
173 memoryaccess_def_iterator
defs_end();
174 const_memoryaccess_def_iterator
defs_end() const;
176 /// Get the iterators for the all access list and the defs only list
177 /// We default to the all access list.
178 AllAccessType::self_iterator
getIterator() {
179 return this->AllAccessType::getIterator();
181 AllAccessType::const_self_iterator
getIterator() const {
182 return this->AllAccessType::getIterator();
184 AllAccessType::reverse_self_iterator
getReverseIterator() {
185 return this->AllAccessType::getReverseIterator();
187 AllAccessType::const_reverse_self_iterator
getReverseIterator() const {
188 return this->AllAccessType::getReverseIterator();
190 DefsOnlyType::self_iterator
getDefsIterator() {
191 return this->DefsOnlyType::getIterator();
193 DefsOnlyType::const_self_iterator
getDefsIterator() const {
194 return this->DefsOnlyType::getIterator();
196 DefsOnlyType::reverse_self_iterator
getReverseDefsIterator() {
197 return this->DefsOnlyType::getReverseIterator();
199 DefsOnlyType::const_reverse_self_iterator
getReverseDefsIterator() const {
200 return this->DefsOnlyType::getReverseIterator();
204 friend class MemoryDef
;
205 friend class MemoryPhi
;
206 friend class MemorySSA
;
207 friend class MemoryUse
;
208 friend class MemoryUseOrDef
;
210 /// Used by MemorySSA to change the block of a MemoryAccess when it is
212 void setBlock(BasicBlock
*BB
) { Block
= BB
; }
214 /// Used for debugging and tracking things about MemoryAccesses.
215 /// Guaranteed unique among MemoryAccesses, no guarantees otherwise.
216 inline unsigned getID() const;
218 MemoryAccess(LLVMContext
&C
, unsigned Vty
, DeleteValueTy DeleteValue
,
219 BasicBlock
*BB
, unsigned NumOperands
)
220 : DerivedUser(Type::getVoidTy(C
), Vty
, nullptr, NumOperands
, DeleteValue
),
223 // Use deleteValue() to delete a generic MemoryAccess.
224 ~MemoryAccess() = default;
231 struct ilist_alloc_traits
<MemoryAccess
> {
232 static void deleteNode(MemoryAccess
*MA
) { MA
->deleteValue(); }
235 inline raw_ostream
&operator<<(raw_ostream
&OS
, const MemoryAccess
&MA
) {
240 /// Class that has the common methods + fields of memory uses/defs. It's
241 /// a little awkward to have, but there are many cases where we want either a
242 /// use or def, and there are many cases where uses are needed (defs aren't
243 /// acceptable), and vice-versa.
245 /// This class should never be instantiated directly; make a MemoryUse or
246 /// MemoryDef instead.
247 class MemoryUseOrDef
: public MemoryAccess
{
249 void *operator new(size_t) = delete;
251 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(MemoryAccess
);
253 /// Get the instruction that this MemoryUse represents.
254 Instruction
*getMemoryInst() const { return MemoryInstruction
; }
256 /// Get the access that produces the memory state used by this Use.
257 MemoryAccess
*getDefiningAccess() const { return getOperand(0); }
259 static bool classof(const Value
*MA
) {
260 return MA
->getValueID() == MemoryUseVal
|| MA
->getValueID() == MemoryDefVal
;
263 // Sadly, these have to be public because they are needed in some of the
265 inline bool isOptimized() const;
266 inline MemoryAccess
*getOptimized() const;
267 inline void setOptimized(MemoryAccess
*);
269 // Retrieve AliasResult type of the optimized access. Ideally this would be
270 // returned by the caching walker and may go away in the future.
271 Optional
<AliasResult
> getOptimizedAccessType() const {
272 return OptimizedAccessAlias
;
275 /// Reset the ID of what this MemoryUse was optimized to, causing it to
276 /// be rewalked by the walker if necessary.
277 /// This really should only be called by tests.
278 inline void resetOptimized();
281 friend class MemorySSA
;
282 friend class MemorySSAUpdater
;
284 MemoryUseOrDef(LLVMContext
&C
, MemoryAccess
*DMA
, unsigned Vty
,
285 DeleteValueTy DeleteValue
, Instruction
*MI
, BasicBlock
*BB
,
286 unsigned NumOperands
)
287 : MemoryAccess(C
, Vty
, DeleteValue
, BB
, NumOperands
),
288 MemoryInstruction(MI
), OptimizedAccessAlias(MayAlias
) {
289 setDefiningAccess(DMA
);
292 // Use deleteValue() to delete a generic MemoryUseOrDef.
293 ~MemoryUseOrDef() = default;
295 void setOptimizedAccessType(Optional
<AliasResult
> AR
) {
296 OptimizedAccessAlias
= AR
;
299 void setDefiningAccess(MemoryAccess
*DMA
, bool Optimized
= false,
300 Optional
<AliasResult
> AR
= MayAlias
) {
306 setOptimizedAccessType(AR
);
310 Instruction
*MemoryInstruction
;
311 Optional
<AliasResult
> OptimizedAccessAlias
;
314 /// Represents read-only accesses to memory
316 /// In particular, the set of Instructions that will be represented by
317 /// MemoryUse's is exactly the set of Instructions for which
318 /// AliasAnalysis::getModRefInfo returns "Ref".
319 class MemoryUse final
: public MemoryUseOrDef
{
321 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(MemoryAccess
);
323 MemoryUse(LLVMContext
&C
, MemoryAccess
*DMA
, Instruction
*MI
, BasicBlock
*BB
)
324 : MemoryUseOrDef(C
, DMA
, MemoryUseVal
, deleteMe
, MI
, BB
,
325 /*NumOperands=*/1) {}
327 // allocate space for exactly one operand
328 void *operator new(size_t s
) { return User::operator new(s
, 1); }
330 static bool classof(const Value
*MA
) {
331 return MA
->getValueID() == MemoryUseVal
;
334 void print(raw_ostream
&OS
) const;
336 void setOptimized(MemoryAccess
*DMA
) {
337 OptimizedID
= DMA
->getID();
341 bool isOptimized() const {
342 return getDefiningAccess() && OptimizedID
== getDefiningAccess()->getID();
345 MemoryAccess
*getOptimized() const {
346 return getDefiningAccess();
349 void resetOptimized() {
350 OptimizedID
= INVALID_MEMORYACCESS_ID
;
354 friend class MemorySSA
;
357 static void deleteMe(DerivedUser
*Self
);
359 unsigned OptimizedID
= INVALID_MEMORYACCESS_ID
;
363 struct OperandTraits
<MemoryUse
> : public FixedNumOperandTraits
<MemoryUse
, 1> {};
364 DEFINE_TRANSPARENT_OPERAND_ACCESSORS(MemoryUse
, MemoryAccess
)
366 /// Represents a read-write access to memory, whether it is a must-alias,
369 /// In particular, the set of Instructions that will be represented by
370 /// MemoryDef's is exactly the set of Instructions for which
371 /// AliasAnalysis::getModRefInfo returns "Mod" or "ModRef".
372 /// Note that, in order to provide def-def chains, all defs also have a use
373 /// associated with them. This use points to the nearest reaching
374 /// MemoryDef/MemoryPhi.
375 class MemoryDef final
: public MemoryUseOrDef
{
377 friend class MemorySSA
;
379 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(MemoryAccess
);
381 MemoryDef(LLVMContext
&C
, MemoryAccess
*DMA
, Instruction
*MI
, BasicBlock
*BB
,
383 : MemoryUseOrDef(C
, DMA
, MemoryDefVal
, deleteMe
, MI
, BB
,
387 // allocate space for exactly two operands
388 void *operator new(size_t s
) { return User::operator new(s
, 2); }
390 static bool classof(const Value
*MA
) {
391 return MA
->getValueID() == MemoryDefVal
;
394 void setOptimized(MemoryAccess
*MA
) {
396 OptimizedID
= MA
->getID();
399 MemoryAccess
*getOptimized() const {
400 return cast_or_null
<MemoryAccess
>(getOperand(1));
403 bool isOptimized() const {
404 return getOptimized() && OptimizedID
== getOptimized()->getID();
407 void resetOptimized() {
408 OptimizedID
= INVALID_MEMORYACCESS_ID
;
409 setOperand(1, nullptr);
412 void print(raw_ostream
&OS
) const;
414 unsigned getID() const { return ID
; }
417 static void deleteMe(DerivedUser
*Self
);
420 unsigned OptimizedID
= INVALID_MEMORYACCESS_ID
;
424 struct OperandTraits
<MemoryDef
> : public FixedNumOperandTraits
<MemoryDef
, 2> {};
425 DEFINE_TRANSPARENT_OPERAND_ACCESSORS(MemoryDef
, MemoryAccess
)
428 struct OperandTraits
<MemoryUseOrDef
> {
429 static Use
*op_begin(MemoryUseOrDef
*MUD
) {
430 if (auto *MU
= dyn_cast
<MemoryUse
>(MUD
))
431 return OperandTraits
<MemoryUse
>::op_begin(MU
);
432 return OperandTraits
<MemoryDef
>::op_begin(cast
<MemoryDef
>(MUD
));
435 static Use
*op_end(MemoryUseOrDef
*MUD
) {
436 if (auto *MU
= dyn_cast
<MemoryUse
>(MUD
))
437 return OperandTraits
<MemoryUse
>::op_end(MU
);
438 return OperandTraits
<MemoryDef
>::op_end(cast
<MemoryDef
>(MUD
));
441 static unsigned operands(const MemoryUseOrDef
*MUD
) {
442 if (const auto *MU
= dyn_cast
<MemoryUse
>(MUD
))
443 return OperandTraits
<MemoryUse
>::operands(MU
);
444 return OperandTraits
<MemoryDef
>::operands(cast
<MemoryDef
>(MUD
));
447 DEFINE_TRANSPARENT_OPERAND_ACCESSORS(MemoryUseOrDef
, MemoryAccess
)
449 /// Represents phi nodes for memory accesses.
451 /// These have the same semantic as regular phi nodes, with the exception that
452 /// only one phi will ever exist in a given basic block.
453 /// Guaranteeing one phi per block means guaranteeing there is only ever one
454 /// valid reaching MemoryDef/MemoryPHI along each path to the phi node.
455 /// This is ensured by not allowing disambiguation of the RHS of a MemoryDef or
456 /// a MemoryPhi's operands.
462 /// it *must* be transformed into
464 /// 1 = MemoryDef(liveOnEntry)
471 /// 1 = MemoryDef(liveOnEntry)
473 /// 2 = MemoryDef(liveOnEntry)
476 /// even if the two stores do not conflict. Otherwise, both 1 and 2 reach the
477 /// end of the branch, and if there are not two phi nodes, one will be
478 /// disconnected completely from the SSA graph below that point.
479 /// Because MemoryUse's do not generate new definitions, they do not have this
481 class MemoryPhi final
: public MemoryAccess
{
482 // allocate space for exactly zero operands
483 void *operator new(size_t s
) { return User::operator new(s
); }
486 /// Provide fast operand accessors
487 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(MemoryAccess
);
489 MemoryPhi(LLVMContext
&C
, BasicBlock
*BB
, unsigned Ver
, unsigned NumPreds
= 0)
490 : MemoryAccess(C
, MemoryPhiVal
, deleteMe
, BB
, 0), ID(Ver
),
491 ReservedSpace(NumPreds
) {
492 allocHungoffUses(ReservedSpace
);
495 // Block iterator interface. This provides access to the list of incoming
496 // basic blocks, which parallels the list of incoming values.
497 using block_iterator
= BasicBlock
**;
498 using const_block_iterator
= BasicBlock
*const *;
500 block_iterator
block_begin() {
501 auto *Ref
= reinterpret_cast<Use::UserRef
*>(op_begin() + ReservedSpace
);
502 return reinterpret_cast<block_iterator
>(Ref
+ 1);
505 const_block_iterator
block_begin() const {
507 reinterpret_cast<const Use::UserRef
*>(op_begin() + ReservedSpace
);
508 return reinterpret_cast<const_block_iterator
>(Ref
+ 1);
511 block_iterator
block_end() { return block_begin() + getNumOperands(); }
513 const_block_iterator
block_end() const {
514 return block_begin() + getNumOperands();
517 iterator_range
<block_iterator
> blocks() {
518 return make_range(block_begin(), block_end());
521 iterator_range
<const_block_iterator
> blocks() const {
522 return make_range(block_begin(), block_end());
525 op_range
incoming_values() { return operands(); }
527 const_op_range
incoming_values() const { return operands(); }
529 /// Return the number of incoming edges
530 unsigned getNumIncomingValues() const { return getNumOperands(); }
532 /// Return incoming value number x
533 MemoryAccess
*getIncomingValue(unsigned I
) const { return getOperand(I
); }
534 void setIncomingValue(unsigned I
, MemoryAccess
*V
) {
535 assert(V
&& "PHI node got a null value!");
539 static unsigned getOperandNumForIncomingValue(unsigned I
) { return I
; }
540 static unsigned getIncomingValueNumForOperand(unsigned I
) { return I
; }
542 /// Return incoming basic block number @p i.
543 BasicBlock
*getIncomingBlock(unsigned I
) const { return block_begin()[I
]; }
545 /// Return incoming basic block corresponding
546 /// to an operand of the PHI.
547 BasicBlock
*getIncomingBlock(const Use
&U
) const {
548 assert(this == U
.getUser() && "Iterator doesn't point to PHI's Uses?");
549 return getIncomingBlock(unsigned(&U
- op_begin()));
552 /// Return incoming basic block corresponding
553 /// to value use iterator.
554 BasicBlock
*getIncomingBlock(MemoryAccess::const_user_iterator I
) const {
555 return getIncomingBlock(I
.getUse());
558 void setIncomingBlock(unsigned I
, BasicBlock
*BB
) {
559 assert(BB
&& "PHI node got a null basic block!");
560 block_begin()[I
] = BB
;
563 /// Add an incoming value to the end of the PHI list
564 void addIncoming(MemoryAccess
*V
, BasicBlock
*BB
) {
565 if (getNumOperands() == ReservedSpace
)
566 growOperands(); // Get more space!
567 // Initialize some new operands.
568 setNumHungOffUseOperands(getNumOperands() + 1);
569 setIncomingValue(getNumOperands() - 1, V
);
570 setIncomingBlock(getNumOperands() - 1, BB
);
573 /// Return the first index of the specified basic
574 /// block in the value list for this PHI. Returns -1 if no instance.
575 int getBasicBlockIndex(const BasicBlock
*BB
) const {
576 for (unsigned I
= 0, E
= getNumOperands(); I
!= E
; ++I
)
577 if (block_begin()[I
] == BB
)
582 MemoryAccess
*getIncomingValueForBlock(const BasicBlock
*BB
) const {
583 int Idx
= getBasicBlockIndex(BB
);
584 assert(Idx
>= 0 && "Invalid basic block argument!");
585 return getIncomingValue(Idx
);
588 // After deleting incoming position I, the order of incoming may be changed.
589 void unorderedDeleteIncoming(unsigned I
) {
590 unsigned E
= getNumOperands();
591 assert(I
< E
&& "Cannot remove out of bounds Phi entry.");
592 // MemoryPhi must have at least two incoming values, otherwise the MemoryPhi
593 // itself should be deleted.
594 assert(E
>= 2 && "Cannot only remove incoming values in MemoryPhis with "
595 "at least 2 values.");
596 setIncomingValue(I
, getIncomingValue(E
- 1));
597 setIncomingBlock(I
, block_begin()[E
- 1]);
598 setOperand(E
- 1, nullptr);
599 block_begin()[E
- 1] = nullptr;
600 setNumHungOffUseOperands(getNumOperands() - 1);
603 // After deleting entries that satisfy Pred, remaining entries may have
605 template <typename Fn
> void unorderedDeleteIncomingIf(Fn
&&Pred
) {
606 for (unsigned I
= 0, E
= getNumOperands(); I
!= E
; ++I
)
607 if (Pred(getIncomingValue(I
), getIncomingBlock(I
))) {
608 unorderedDeleteIncoming(I
);
609 E
= getNumOperands();
612 assert(getNumOperands() >= 1 &&
613 "Cannot remove all incoming blocks in a MemoryPhi.");
616 // After deleting incoming block BB, the incoming blocks order may be changed.
617 void unorderedDeleteIncomingBlock(const BasicBlock
*BB
) {
618 unorderedDeleteIncomingIf(
619 [&](const MemoryAccess
*, const BasicBlock
*B
) { return BB
== B
; });
622 // After deleting incoming memory access MA, the incoming accesses order may
624 void unorderedDeleteIncomingValue(const MemoryAccess
*MA
) {
625 unorderedDeleteIncomingIf(
626 [&](const MemoryAccess
*M
, const BasicBlock
*) { return MA
== M
; });
629 static bool classof(const Value
*V
) {
630 return V
->getValueID() == MemoryPhiVal
;
633 void print(raw_ostream
&OS
) const;
635 unsigned getID() const { return ID
; }
638 friend class MemorySSA
;
640 /// this is more complicated than the generic
641 /// User::allocHungoffUses, because we have to allocate Uses for the incoming
642 /// values and pointers to the incoming blocks, all in one allocation.
643 void allocHungoffUses(unsigned N
) {
644 User::allocHungoffUses(N
, /* IsPhi */ true);
648 // For debugging only
650 unsigned ReservedSpace
;
652 /// This grows the operand list in response to a push_back style of
653 /// operation. This grows the number of ops by 1.5 times.
654 void growOperands() {
655 unsigned E
= getNumOperands();
656 // 2 op PHI nodes are VERY common, so reserve at least enough for that.
657 ReservedSpace
= std::max(E
+ E
/ 2, 2u);
658 growHungoffUses(ReservedSpace
, /* IsPhi */ true);
661 static void deleteMe(DerivedUser
*Self
);
664 inline unsigned MemoryAccess::getID() const {
665 assert((isa
<MemoryDef
>(this) || isa
<MemoryPhi
>(this)) &&
666 "only memory defs and phis have ids");
667 if (const auto *MD
= dyn_cast
<MemoryDef
>(this))
669 return cast
<MemoryPhi
>(this)->getID();
672 inline bool MemoryUseOrDef::isOptimized() const {
673 if (const auto *MD
= dyn_cast
<MemoryDef
>(this))
674 return MD
->isOptimized();
675 return cast
<MemoryUse
>(this)->isOptimized();
678 inline MemoryAccess
*MemoryUseOrDef::getOptimized() const {
679 if (const auto *MD
= dyn_cast
<MemoryDef
>(this))
680 return MD
->getOptimized();
681 return cast
<MemoryUse
>(this)->getOptimized();
684 inline void MemoryUseOrDef::setOptimized(MemoryAccess
*MA
) {
685 if (auto *MD
= dyn_cast
<MemoryDef
>(this))
686 MD
->setOptimized(MA
);
688 cast
<MemoryUse
>(this)->setOptimized(MA
);
691 inline void MemoryUseOrDef::resetOptimized() {
692 if (auto *MD
= dyn_cast
<MemoryDef
>(this))
693 MD
->resetOptimized();
695 cast
<MemoryUse
>(this)->resetOptimized();
698 template <> struct OperandTraits
<MemoryPhi
> : public HungoffOperandTraits
<2> {};
699 DEFINE_TRANSPARENT_OPERAND_ACCESSORS(MemoryPhi
, MemoryAccess
)
701 /// Encapsulates MemorySSA, including all data associated with memory
705 MemorySSA(Function
&, AliasAnalysis
*, DominatorTree
*);
707 // MemorySSA must remain where it's constructed; Walkers it creates store
709 MemorySSA(MemorySSA
&&) = delete;
713 MemorySSAWalker
*getWalker();
714 MemorySSAWalker
*getSkipSelfWalker();
716 /// Given a memory Mod/Ref'ing instruction, get the MemorySSA
717 /// access associated with it. If passed a basic block gets the memory phi
718 /// node that exists for that block, if there is one. Otherwise, this will get
719 /// a MemoryUseOrDef.
720 MemoryUseOrDef
*getMemoryAccess(const Instruction
*I
) const {
721 return cast_or_null
<MemoryUseOrDef
>(ValueToMemoryAccess
.lookup(I
));
724 MemoryPhi
*getMemoryAccess(const BasicBlock
*BB
) const {
725 return cast_or_null
<MemoryPhi
>(ValueToMemoryAccess
.lookup(cast
<Value
>(BB
)));
729 void print(raw_ostream
&) const;
731 /// Return true if \p MA represents the live on entry value
733 /// Loads and stores from pointer arguments and other global values may be
734 /// defined by memory operations that do not occur in the current function, so
735 /// they may be live on entry to the function. MemorySSA represents such
736 /// memory state by the live on entry definition, which is guaranteed to occur
737 /// before any other memory access in the function.
738 inline bool isLiveOnEntryDef(const MemoryAccess
*MA
) const {
739 return MA
== LiveOnEntryDef
.get();
742 inline MemoryAccess
*getLiveOnEntryDef() const {
743 return LiveOnEntryDef
.get();
746 // Sadly, iplists, by default, owns and deletes pointers added to the
747 // list. It's not currently possible to have two iplists for the same type,
748 // where one owns the pointers, and one does not. This is because the traits
749 // are per-type, not per-tag. If this ever changes, we should make the
750 // DefList an iplist.
751 using AccessList
= iplist
<MemoryAccess
, ilist_tag
<MSSAHelpers::AllAccessTag
>>;
753 simple_ilist
<MemoryAccess
, ilist_tag
<MSSAHelpers::DefsOnlyTag
>>;
755 /// Return the list of MemoryAccess's for a given basic block.
757 /// This list is not modifiable by the user.
758 const AccessList
*getBlockAccesses(const BasicBlock
*BB
) const {
759 return getWritableBlockAccesses(BB
);
762 /// Return the list of MemoryDef's and MemoryPhi's for a given basic
765 /// This list is not modifiable by the user.
766 const DefsList
*getBlockDefs(const BasicBlock
*BB
) const {
767 return getWritableBlockDefs(BB
);
770 /// Given two memory accesses in the same basic block, determine
771 /// whether MemoryAccess \p A dominates MemoryAccess \p B.
772 bool locallyDominates(const MemoryAccess
*A
, const MemoryAccess
*B
) const;
774 /// Given two memory accesses in potentially different blocks,
775 /// determine whether MemoryAccess \p A dominates MemoryAccess \p B.
776 bool dominates(const MemoryAccess
*A
, const MemoryAccess
*B
) const;
778 /// Given a MemoryAccess and a Use, determine whether MemoryAccess \p A
779 /// dominates Use \p B.
780 bool dominates(const MemoryAccess
*A
, const Use
&B
) const;
782 /// Verify that MemorySSA is self consistent (IE definitions dominate
783 /// all uses, uses appear in the right places). This is used by unit tests.
784 void verifyMemorySSA() const;
786 /// Used in various insertion functions to specify whether we are talking
787 /// about the beginning or end of a block.
788 enum InsertionPlace
{ Beginning
, End
};
791 // Used by Memory SSA annotater, dumpers, and wrapper pass
792 friend class MemorySSAAnnotatedWriter
;
793 friend class MemorySSAPrinterLegacyPass
;
794 friend class MemorySSAUpdater
;
796 void verifyPrevDefInPhis(Function
&F
) const;
797 void verifyDefUses(Function
&F
) const;
798 void verifyDomination(Function
&F
) const;
799 void verifyOrdering(Function
&F
) const;
800 void verifyDominationNumbers(const Function
&F
) const;
802 // This is used by the use optimizer and updater.
803 AccessList
*getWritableBlockAccesses(const BasicBlock
*BB
) const {
804 auto It
= PerBlockAccesses
.find(BB
);
805 return It
== PerBlockAccesses
.end() ? nullptr : It
->second
.get();
808 // This is used by the use optimizer and updater.
809 DefsList
*getWritableBlockDefs(const BasicBlock
*BB
) const {
810 auto It
= PerBlockDefs
.find(BB
);
811 return It
== PerBlockDefs
.end() ? nullptr : It
->second
.get();
814 // These is used by the updater to perform various internal MemorySSA
815 // machinsations. They do not always leave the IR in a correct state, and
816 // relies on the updater to fixup what it breaks, so it is not public.
818 void moveTo(MemoryUseOrDef
*What
, BasicBlock
*BB
, AccessList::iterator Where
);
819 void moveTo(MemoryAccess
*What
, BasicBlock
*BB
, InsertionPlace Point
);
821 // Rename the dominator tree branch rooted at BB.
822 void renamePass(BasicBlock
*BB
, MemoryAccess
*IncomingVal
,
823 SmallPtrSetImpl
<BasicBlock
*> &Visited
) {
824 renamePass(DT
->getNode(BB
), IncomingVal
, Visited
, true, true);
827 void removeFromLookups(MemoryAccess
*);
828 void removeFromLists(MemoryAccess
*, bool ShouldDelete
= true);
829 void insertIntoListsForBlock(MemoryAccess
*, const BasicBlock
*,
831 void insertIntoListsBefore(MemoryAccess
*, const BasicBlock
*,
832 AccessList::iterator
);
833 MemoryUseOrDef
*createDefinedAccess(Instruction
*, MemoryAccess
*,
834 const MemoryUseOrDef
*Template
= nullptr,
835 bool CreationMustSucceed
= true);
838 template <class AliasAnalysisType
> class ClobberWalkerBase
;
839 template <class AliasAnalysisType
> class CachingWalker
;
840 template <class AliasAnalysisType
> class SkipSelfWalker
;
843 CachingWalker
<AliasAnalysis
> *getWalkerImpl();
844 void buildMemorySSA(BatchAAResults
&BAA
);
847 void prepareForMoveTo(MemoryAccess
*, BasicBlock
*);
848 void verifyUseInDefs(MemoryAccess
*, MemoryAccess
*) const;
850 using AccessMap
= DenseMap
<const BasicBlock
*, std::unique_ptr
<AccessList
>>;
851 using DefsMap
= DenseMap
<const BasicBlock
*, std::unique_ptr
<DefsList
>>;
854 determineInsertionPoint(const SmallPtrSetImpl
<BasicBlock
*> &DefiningBlocks
);
855 void markUnreachableAsLiveOnEntry(BasicBlock
*BB
);
856 bool dominatesUse(const MemoryAccess
*, const MemoryAccess
*) const;
857 MemoryPhi
*createMemoryPhi(BasicBlock
*BB
);
858 template <typename AliasAnalysisType
>
859 MemoryUseOrDef
*createNewAccess(Instruction
*, AliasAnalysisType
*,
860 const MemoryUseOrDef
*Template
= nullptr);
861 MemoryAccess
*findDominatingDef(BasicBlock
*, enum InsertionPlace
);
862 void placePHINodes(const SmallPtrSetImpl
<BasicBlock
*> &);
863 MemoryAccess
*renameBlock(BasicBlock
*, MemoryAccess
*, bool);
864 void renameSuccessorPhis(BasicBlock
*, MemoryAccess
*, bool);
865 void renamePass(DomTreeNode
*, MemoryAccess
*IncomingVal
,
866 SmallPtrSetImpl
<BasicBlock
*> &Visited
,
867 bool SkipVisited
= false, bool RenameAllUses
= false);
868 AccessList
*getOrCreateAccessList(const BasicBlock
*);
869 DefsList
*getOrCreateDefsList(const BasicBlock
*);
870 void renumberBlock(const BasicBlock
*) const;
875 // Memory SSA mappings
876 DenseMap
<const Value
*, MemoryAccess
*> ValueToMemoryAccess
;
878 // These two mappings contain the main block to access/def mappings for
879 // MemorySSA. The list contained in PerBlockAccesses really owns all the
881 // Both maps maintain the invariant that if a block is found in them, the
882 // corresponding list is not empty, and if a block is not found in them, the
883 // corresponding list is empty.
884 AccessMap PerBlockAccesses
;
885 DefsMap PerBlockDefs
;
886 std::unique_ptr
<MemoryAccess
, ValueDeleter
> LiveOnEntryDef
;
888 // Domination mappings
889 // Note that the numbering is local to a block, even though the map is
891 mutable SmallPtrSet
<const BasicBlock
*, 16> BlockNumberingValid
;
892 mutable DenseMap
<const MemoryAccess
*, unsigned long> BlockNumbering
;
894 // Memory SSA building info
895 std::unique_ptr
<ClobberWalkerBase
<AliasAnalysis
>> WalkerBase
;
896 std::unique_ptr
<CachingWalker
<AliasAnalysis
>> Walker
;
897 std::unique_ptr
<SkipSelfWalker
<AliasAnalysis
>> SkipWalker
;
901 // Internal MemorySSA utils, for use by MemorySSA classes and walkers
902 class MemorySSAUtil
{
904 friend class GVNHoist
;
905 friend class MemorySSAWalker
;
907 // This function should not be used by new passes.
908 static bool defClobbersUseOrDef(MemoryDef
*MD
, const MemoryUseOrDef
*MU
,
912 // This pass does eager building and then printing of MemorySSA. It is used by
913 // the tests to be able to build, dump, and verify Memory SSA.
914 class MemorySSAPrinterLegacyPass
: public FunctionPass
{
916 MemorySSAPrinterLegacyPass();
918 bool runOnFunction(Function
&) override
;
919 void getAnalysisUsage(AnalysisUsage
&AU
) const override
;
924 /// An analysis that produces \c MemorySSA for a function.
926 class MemorySSAAnalysis
: public AnalysisInfoMixin
<MemorySSAAnalysis
> {
927 friend AnalysisInfoMixin
<MemorySSAAnalysis
>;
929 static AnalysisKey Key
;
932 // Wrap MemorySSA result to ensure address stability of internal MemorySSA
933 // pointers after construction. Use a wrapper class instead of plain
934 // unique_ptr<MemorySSA> to avoid build breakage on MSVC.
936 Result(std::unique_ptr
<MemorySSA
> &&MSSA
) : MSSA(std::move(MSSA
)) {}
938 MemorySSA
&getMSSA() { return *MSSA
.get(); }
940 std::unique_ptr
<MemorySSA
> MSSA
;
942 bool invalidate(Function
&F
, const PreservedAnalyses
&PA
,
943 FunctionAnalysisManager::Invalidator
&Inv
);
946 Result
run(Function
&F
, FunctionAnalysisManager
&AM
);
949 /// Printer pass for \c MemorySSA.
950 class MemorySSAPrinterPass
: public PassInfoMixin
<MemorySSAPrinterPass
> {
954 explicit MemorySSAPrinterPass(raw_ostream
&OS
) : OS(OS
) {}
956 PreservedAnalyses
run(Function
&F
, FunctionAnalysisManager
&AM
);
959 /// Verifier pass for \c MemorySSA.
960 struct MemorySSAVerifierPass
: PassInfoMixin
<MemorySSAVerifierPass
> {
961 PreservedAnalyses
run(Function
&F
, FunctionAnalysisManager
&AM
);
964 /// Legacy analysis pass which computes \c MemorySSA.
965 class MemorySSAWrapperPass
: public FunctionPass
{
967 MemorySSAWrapperPass();
971 bool runOnFunction(Function
&) override
;
972 void releaseMemory() override
;
973 MemorySSA
&getMSSA() { return *MSSA
; }
974 const MemorySSA
&getMSSA() const { return *MSSA
; }
976 void getAnalysisUsage(AnalysisUsage
&AU
) const override
;
978 void verifyAnalysis() const override
;
979 void print(raw_ostream
&OS
, const Module
*M
= nullptr) const override
;
982 std::unique_ptr
<MemorySSA
> MSSA
;
985 /// This is the generic walker interface for walkers of MemorySSA.
986 /// Walkers are used to be able to further disambiguate the def-use chains
987 /// MemorySSA gives you, or otherwise produce better info than MemorySSA gives
989 /// In particular, while the def-use chains provide basic information, and are
990 /// guaranteed to give, for example, the nearest may-aliasing MemoryDef for a
991 /// MemoryUse as AliasAnalysis considers it, a user mant want better or other
992 /// information. In particular, they may want to use SCEV info to further
993 /// disambiguate memory accesses, or they may want the nearest dominating
994 /// may-aliasing MemoryDef for a call or a store. This API enables a
995 /// standardized interface to getting and using that info.
996 class MemorySSAWalker
{
998 MemorySSAWalker(MemorySSA
*);
999 virtual ~MemorySSAWalker() = default;
1001 using MemoryAccessSet
= SmallVector
<MemoryAccess
*, 8>;
1003 /// Given a memory Mod/Ref/ModRef'ing instruction, calling this
1004 /// will give you the nearest dominating MemoryAccess that Mod's the location
1005 /// the instruction accesses (by skipping any def which AA can prove does not
1006 /// alias the location(s) accessed by the instruction given).
1008 /// Note that this will return a single access, and it must dominate the
1009 /// Instruction, so if an operand of a MemoryPhi node Mod's the instruction,
1010 /// this will return the MemoryPhi, not the operand. This means that
1013 /// 1 = MemoryDef(liveOnEntry)
1016 /// 2 = MemoryDef(liveOnEntry)
1019 /// 3 = MemoryPhi(2, 1)
1023 /// calling this API on load(%a) will return the MemoryPhi, not the MemoryDef
1024 /// in the if (a) branch.
1025 MemoryAccess
*getClobberingMemoryAccess(const Instruction
*I
) {
1026 MemoryAccess
*MA
= MSSA
->getMemoryAccess(I
);
1027 assert(MA
&& "Handed an instruction that MemorySSA doesn't recognize?");
1028 return getClobberingMemoryAccess(MA
);
1031 /// Does the same thing as getClobberingMemoryAccess(const Instruction *I),
1032 /// but takes a MemoryAccess instead of an Instruction.
1033 virtual MemoryAccess
*getClobberingMemoryAccess(MemoryAccess
*) = 0;
1035 /// Given a potentially clobbering memory access and a new location,
1036 /// calling this will give you the nearest dominating clobbering MemoryAccess
1037 /// (by skipping non-aliasing def links).
1039 /// This version of the function is mainly used to disambiguate phi translated
1040 /// pointers, where the value of a pointer may have changed from the initial
1041 /// memory access. Note that this expects to be handed either a MemoryUse,
1042 /// or an already potentially clobbering access. Unlike the above API, if
1043 /// given a MemoryDef that clobbers the pointer as the starting access, it
1044 /// will return that MemoryDef, whereas the above would return the clobber
1045 /// starting from the use side of the memory def.
1046 virtual MemoryAccess
*getClobberingMemoryAccess(MemoryAccess
*,
1047 const MemoryLocation
&) = 0;
1049 /// Given a memory access, invalidate anything this walker knows about
1051 /// This API is used by walkers that store information to perform basic cache
1052 /// invalidation. This will be called by MemorySSA at appropriate times for
1053 /// the walker it uses or returns.
1054 virtual void invalidateInfo(MemoryAccess
*) {}
1057 friend class MemorySSA
; // For updating MSSA pointer in MemorySSA move
1062 /// A MemorySSAWalker that does no alias queries, or anything else. It
1063 /// simply returns the links as they were constructed by the builder.
1064 class DoNothingMemorySSAWalker final
: public MemorySSAWalker
{
1066 // Keep the overrides below from hiding the Instruction overload of
1067 // getClobberingMemoryAccess.
1068 using MemorySSAWalker::getClobberingMemoryAccess
;
1070 MemoryAccess
*getClobberingMemoryAccess(MemoryAccess
*) override
;
1071 MemoryAccess
*getClobberingMemoryAccess(MemoryAccess
*,
1072 const MemoryLocation
&) override
;
1075 using MemoryAccessPair
= std::pair
<MemoryAccess
*, MemoryLocation
>;
1076 using ConstMemoryAccessPair
= std::pair
<const MemoryAccess
*, MemoryLocation
>;
1078 /// Iterator base class used to implement const and non-const iterators
1079 /// over the defining accesses of a MemoryAccess.
1081 class memoryaccess_def_iterator_base
1082 : public iterator_facade_base
<memoryaccess_def_iterator_base
<T
>,
1083 std::forward_iterator_tag
, T
, ptrdiff_t, T
*,
1085 using BaseT
= typename
memoryaccess_def_iterator_base::iterator_facade_base
;
1088 memoryaccess_def_iterator_base(T
*Start
) : Access(Start
) {}
1089 memoryaccess_def_iterator_base() = default;
1091 bool operator==(const memoryaccess_def_iterator_base
&Other
) const {
1092 return Access
== Other
.Access
&& (!Access
|| ArgNo
== Other
.ArgNo
);
1095 // This is a bit ugly, but for MemoryPHI's, unlike PHINodes, you can't get the
1096 // block from the operand in constant time (In a PHINode, the uselist has
1097 // both, so it's just subtraction). We provide it as part of the
1098 // iterator to avoid callers having to linear walk to get the block.
1099 // If the operation becomes constant time on MemoryPHI's, this bit of
1100 // abstraction breaking should be removed.
1101 BasicBlock
*getPhiArgBlock() const {
1102 MemoryPhi
*MP
= dyn_cast
<MemoryPhi
>(Access
);
1103 assert(MP
&& "Tried to get phi arg block when not iterating over a PHI");
1104 return MP
->getIncomingBlock(ArgNo
);
1107 typename
BaseT::iterator::pointer
operator*() const {
1108 assert(Access
&& "Tried to access past the end of our iterator");
1109 // Go to the first argument for phis, and the defining access for everything
1111 if (const MemoryPhi
*MP
= dyn_cast
<MemoryPhi
>(Access
))
1112 return MP
->getIncomingValue(ArgNo
);
1113 return cast
<MemoryUseOrDef
>(Access
)->getDefiningAccess();
1116 using BaseT::operator++;
1117 memoryaccess_def_iterator_base
&operator++() {
1118 assert(Access
&& "Hit end of iterator");
1119 if (const MemoryPhi
*MP
= dyn_cast
<MemoryPhi
>(Access
)) {
1120 if (++ArgNo
>= MP
->getNumIncomingValues()) {
1131 T
*Access
= nullptr;
1135 inline memoryaccess_def_iterator
MemoryAccess::defs_begin() {
1136 return memoryaccess_def_iterator(this);
1139 inline const_memoryaccess_def_iterator
MemoryAccess::defs_begin() const {
1140 return const_memoryaccess_def_iterator(this);
1143 inline memoryaccess_def_iterator
MemoryAccess::defs_end() {
1144 return memoryaccess_def_iterator();
1147 inline const_memoryaccess_def_iterator
MemoryAccess::defs_end() const {
1148 return const_memoryaccess_def_iterator();
1151 /// GraphTraits for a MemoryAccess, which walks defs in the normal case,
1152 /// and uses in the inverse case.
1153 template <> struct GraphTraits
<MemoryAccess
*> {
1154 using NodeRef
= MemoryAccess
*;
1155 using ChildIteratorType
= memoryaccess_def_iterator
;
1157 static NodeRef
getEntryNode(NodeRef N
) { return N
; }
1158 static ChildIteratorType
child_begin(NodeRef N
) { return N
->defs_begin(); }
1159 static ChildIteratorType
child_end(NodeRef N
) { return N
->defs_end(); }
1162 template <> struct GraphTraits
<Inverse
<MemoryAccess
*>> {
1163 using NodeRef
= MemoryAccess
*;
1164 using ChildIteratorType
= MemoryAccess::iterator
;
1166 static NodeRef
getEntryNode(NodeRef N
) { return N
; }
1167 static ChildIteratorType
child_begin(NodeRef N
) { return N
->user_begin(); }
1168 static ChildIteratorType
child_end(NodeRef N
) { return N
->user_end(); }
1171 /// Provide an iterator that walks defs, giving both the memory access,
1172 /// and the current pointer location, updating the pointer location as it
1173 /// changes due to phi node translation.
1175 /// This iterator, while somewhat specialized, is what most clients actually
1176 /// want when walking upwards through MemorySSA def chains. It takes a pair of
1177 /// <MemoryAccess,MemoryLocation>, and walks defs, properly translating the
1178 /// memory location through phi nodes for the user.
1179 class upward_defs_iterator
1180 : public iterator_facade_base
<upward_defs_iterator
,
1181 std::forward_iterator_tag
,
1182 const MemoryAccessPair
> {
1183 using BaseT
= upward_defs_iterator::iterator_facade_base
;
1186 upward_defs_iterator(const MemoryAccessPair
&Info
)
1187 : DefIterator(Info
.first
), Location(Info
.second
),
1188 OriginalAccess(Info
.first
) {
1189 CurrentPair
.first
= nullptr;
1191 WalkingPhi
= Info
.first
&& isa
<MemoryPhi
>(Info
.first
);
1192 fillInCurrentPair();
1195 upward_defs_iterator() { CurrentPair
.first
= nullptr; }
1197 bool operator==(const upward_defs_iterator
&Other
) const {
1198 return DefIterator
== Other
.DefIterator
;
1201 BaseT::iterator::reference
operator*() const {
1202 assert(DefIterator
!= OriginalAccess
->defs_end() &&
1203 "Tried to access past the end of our iterator");
1207 using BaseT::operator++;
1208 upward_defs_iterator
&operator++() {
1209 assert(DefIterator
!= OriginalAccess
->defs_end() &&
1210 "Tried to access past the end of the iterator");
1212 if (DefIterator
!= OriginalAccess
->defs_end())
1213 fillInCurrentPair();
1217 BasicBlock
*getPhiArgBlock() const { return DefIterator
.getPhiArgBlock(); }
1220 void fillInCurrentPair() {
1221 CurrentPair
.first
= *DefIterator
;
1222 if (WalkingPhi
&& Location
.Ptr
) {
1223 PHITransAddr
Translator(
1224 const_cast<Value
*>(Location
.Ptr
),
1225 OriginalAccess
->getBlock()->getModule()->getDataLayout(), nullptr);
1226 if (!Translator
.PHITranslateValue(OriginalAccess
->getBlock(),
1227 DefIterator
.getPhiArgBlock(), nullptr,
1229 if (Translator
.getAddr() != Location
.Ptr
) {
1230 CurrentPair
.second
= Location
.getWithNewPtr(Translator
.getAddr());
1234 CurrentPair
.second
= Location
;
1237 MemoryAccessPair CurrentPair
;
1238 memoryaccess_def_iterator DefIterator
;
1239 MemoryLocation Location
;
1240 MemoryAccess
*OriginalAccess
= nullptr;
1241 bool WalkingPhi
= false;
1244 inline upward_defs_iterator
upward_defs_begin(const MemoryAccessPair
&Pair
) {
1245 return upward_defs_iterator(Pair
);
1248 inline upward_defs_iterator
upward_defs_end() { return upward_defs_iterator(); }
1250 inline iterator_range
<upward_defs_iterator
>
1251 upward_defs(const MemoryAccessPair
&Pair
) {
1252 return make_range(upward_defs_begin(Pair
), upward_defs_end());
1255 /// Walks the defining accesses of MemoryDefs. Stops after we hit something that
1256 /// has no defining use (e.g. a MemoryPhi or liveOnEntry). Note that, when
1257 /// comparing against a null def_chain_iterator, this will compare equal only
1258 /// after walking said Phi/liveOnEntry.
1260 /// The UseOptimizedChain flag specifies whether to walk the clobbering
1261 /// access chain, or all the accesses.
1263 /// Normally, MemoryDef are all just def/use linked together, so a def_chain on
1264 /// a MemoryDef will walk all MemoryDefs above it in the program until it hits
1265 /// a phi node. The optimized chain walks the clobbering access of a store.
1266 /// So if you are just trying to find, given a store, what the next
1267 /// thing that would clobber the same memory is, you want the optimized chain.
1268 template <class T
, bool UseOptimizedChain
= false>
1269 struct def_chain_iterator
1270 : public iterator_facade_base
<def_chain_iterator
<T
, UseOptimizedChain
>,
1271 std::forward_iterator_tag
, MemoryAccess
*> {
1272 def_chain_iterator() : MA(nullptr) {}
1273 def_chain_iterator(T MA
) : MA(MA
) {}
1275 T
operator*() const { return MA
; }
1277 def_chain_iterator
&operator++() {
1278 // N.B. liveOnEntry has a null defining access.
1279 if (auto *MUD
= dyn_cast
<MemoryUseOrDef
>(MA
)) {
1280 if (UseOptimizedChain
&& MUD
->isOptimized())
1281 MA
= MUD
->getOptimized();
1283 MA
= MUD
->getDefiningAccess();
1291 bool operator==(const def_chain_iterator
&O
) const { return MA
== O
.MA
; }
1298 inline iterator_range
<def_chain_iterator
<T
>>
1299 def_chain(T MA
, MemoryAccess
*UpTo
= nullptr) {
1300 #ifdef EXPENSIVE_CHECKS
1301 assert((!UpTo
|| find(def_chain(MA
), UpTo
) != def_chain_iterator
<T
>()) &&
1302 "UpTo isn't in the def chain!");
1304 return make_range(def_chain_iterator
<T
>(MA
), def_chain_iterator
<T
>(UpTo
));
1308 inline iterator_range
<def_chain_iterator
<T
, true>> optimized_def_chain(T MA
) {
1309 return make_range(def_chain_iterator
<T
, true>(MA
),
1310 def_chain_iterator
<T
, true>(nullptr));
1313 } // end namespace llvm
1315 #endif // LLVM_ANALYSIS_MEMORYSSA_H