1 //===-- MemorySSAUpdater.cpp - Memory SSA Updater--------------------===//
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7 //===----------------------------------------------------------------===//
9 // This file implements the MemorySSAUpdater class.
11 //===----------------------------------------------------------------===//
12 #include "llvm/Analysis/MemorySSAUpdater.h"
13 #include "llvm/ADT/STLExtras.h"
14 #include "llvm/ADT/SetVector.h"
15 #include "llvm/ADT/SmallPtrSet.h"
16 #include "llvm/Analysis/IteratedDominanceFrontier.h"
17 #include "llvm/Analysis/MemorySSA.h"
18 #include "llvm/IR/DataLayout.h"
19 #include "llvm/IR/Dominators.h"
20 #include "llvm/IR/GlobalVariable.h"
21 #include "llvm/IR/IRBuilder.h"
22 #include "llvm/IR/LLVMContext.h"
23 #include "llvm/IR/Metadata.h"
24 #include "llvm/IR/Module.h"
25 #include "llvm/Support/Debug.h"
26 #include "llvm/Support/FormattedStream.h"
29 #define DEBUG_TYPE "memoryssa"
32 // This is the marker algorithm from "Simple and Efficient Construction of
33 // Static Single Assignment Form"
34 // The simple, non-marker algorithm places phi nodes at any join
35 // Here, we place markers, and only place phi nodes if they end up necessary.
36 // They are only necessary if they break a cycle (IE we recursively visit
37 // ourselves again), or we discover, while getting the value of the operands,
38 // that there are two or more definitions needing to be merged.
39 // This still will leave non-minimal form in the case of irreducible control
40 // flow, where phi nodes may be in cycles with themselves, but unnecessary.
41 MemoryAccess
*MemorySSAUpdater::getPreviousDefRecursive(
43 DenseMap
<BasicBlock
*, TrackingVH
<MemoryAccess
>> &CachedPreviousDef
) {
44 // First, do a cache lookup. Without this cache, certain CFG structures
45 // (like a series of if statements) take exponential time to visit.
46 auto Cached
= CachedPreviousDef
.find(BB
);
47 if (Cached
!= CachedPreviousDef
.end())
48 return Cached
->second
;
50 // If this method is called from an unreachable block, return LoE.
51 if (!MSSA
->DT
->isReachableFromEntry(BB
))
52 return MSSA
->getLiveOnEntryDef();
54 if (BasicBlock
*Pred
= BB
->getUniquePredecessor()) {
55 VisitedBlocks
.insert(BB
);
56 // Single predecessor case, just recurse, we can only have one definition.
57 MemoryAccess
*Result
= getPreviousDefFromEnd(Pred
, CachedPreviousDef
);
58 CachedPreviousDef
.insert({BB
, Result
});
62 if (VisitedBlocks
.count(BB
)) {
63 // We hit our node again, meaning we had a cycle, we must insert a phi
64 // node to break it so we have an operand. The only case this will
65 // insert useless phis is if we have irreducible control flow.
66 MemoryAccess
*Result
= MSSA
->createMemoryPhi(BB
);
67 CachedPreviousDef
.insert({BB
, Result
});
71 if (VisitedBlocks
.insert(BB
).second
) {
72 // Mark us visited so we can detect a cycle
73 SmallVector
<TrackingVH
<MemoryAccess
>, 8> PhiOps
;
75 // Recurse to get the values in our predecessors for placement of a
76 // potential phi node. This will insert phi nodes if we cycle in order to
77 // break the cycle and have an operand.
78 bool UniqueIncomingAccess
= true;
79 MemoryAccess
*SingleAccess
= nullptr;
80 for (auto *Pred
: predecessors(BB
)) {
81 if (MSSA
->DT
->isReachableFromEntry(Pred
)) {
82 auto *IncomingAccess
= getPreviousDefFromEnd(Pred
, CachedPreviousDef
);
84 SingleAccess
= IncomingAccess
;
85 else if (IncomingAccess
!= SingleAccess
)
86 UniqueIncomingAccess
= false;
87 PhiOps
.push_back(IncomingAccess
);
89 PhiOps
.push_back(MSSA
->getLiveOnEntryDef());
92 // Now try to simplify the ops to avoid placing a phi.
93 // This may return null if we never created a phi yet, that's okay
94 MemoryPhi
*Phi
= dyn_cast_or_null
<MemoryPhi
>(MSSA
->getMemoryAccess(BB
));
96 // See if we can avoid the phi by simplifying it.
97 auto *Result
= tryRemoveTrivialPhi(Phi
, PhiOps
);
98 // If we couldn't simplify, we may have to create a phi
99 if (Result
== Phi
&& UniqueIncomingAccess
&& SingleAccess
) {
100 // A concrete Phi only exists if we created an empty one to break a cycle.
102 assert(Phi
->operands().empty() && "Expected empty Phi");
103 Phi
->replaceAllUsesWith(SingleAccess
);
104 removeMemoryAccess(Phi
);
106 Result
= SingleAccess
;
107 } else if (Result
== Phi
&& !(UniqueIncomingAccess
&& SingleAccess
)) {
109 Phi
= MSSA
->createMemoryPhi(BB
);
111 // See if the existing phi operands match what we need.
112 // Unlike normal SSA, we only allow one phi node per block, so we can't just
114 if (Phi
->getNumOperands() != 0) {
115 // FIXME: Figure out whether this is dead code and if so remove it.
116 if (!std::equal(Phi
->op_begin(), Phi
->op_end(), PhiOps
.begin())) {
117 // These will have been filled in by the recursive read we did above.
118 llvm::copy(PhiOps
, Phi
->op_begin());
119 std::copy(pred_begin(BB
), pred_end(BB
), Phi
->block_begin());
123 for (auto *Pred
: predecessors(BB
))
124 Phi
->addIncoming(&*PhiOps
[i
++], Pred
);
125 InsertedPHIs
.push_back(Phi
);
130 // Set ourselves up for the next variable by resetting visited state.
131 VisitedBlocks
.erase(BB
);
132 CachedPreviousDef
.insert({BB
, Result
});
135 llvm_unreachable("Should have hit one of the three cases above");
138 // This starts at the memory access, and goes backwards in the block to find the
139 // previous definition. If a definition is not found the block of the access,
140 // it continues globally, creating phi nodes to ensure we have a single
142 MemoryAccess
*MemorySSAUpdater::getPreviousDef(MemoryAccess
*MA
) {
143 if (auto *LocalResult
= getPreviousDefInBlock(MA
))
145 DenseMap
<BasicBlock
*, TrackingVH
<MemoryAccess
>> CachedPreviousDef
;
146 return getPreviousDefRecursive(MA
->getBlock(), CachedPreviousDef
);
149 // This starts at the memory access, and goes backwards in the block to the find
150 // the previous definition. If the definition is not found in the block of the
151 // access, it returns nullptr.
152 MemoryAccess
*MemorySSAUpdater::getPreviousDefInBlock(MemoryAccess
*MA
) {
153 auto *Defs
= MSSA
->getWritableBlockDefs(MA
->getBlock());
155 // It's possible there are no defs, or we got handed the first def to start.
157 // If this is a def, we can just use the def iterators.
158 if (!isa
<MemoryUse
>(MA
)) {
159 auto Iter
= MA
->getReverseDefsIterator();
161 if (Iter
!= Defs
->rend())
164 // Otherwise, have to walk the all access iterator.
165 auto End
= MSSA
->getWritableBlockAccesses(MA
->getBlock())->rend();
166 for (auto &U
: make_range(++MA
->getReverseIterator(), End
))
167 if (!isa
<MemoryUse
>(U
))
168 return cast
<MemoryAccess
>(&U
);
169 // Note that if MA comes before Defs->begin(), we won't hit a def.
176 // This starts at the end of block
177 MemoryAccess
*MemorySSAUpdater::getPreviousDefFromEnd(
179 DenseMap
<BasicBlock
*, TrackingVH
<MemoryAccess
>> &CachedPreviousDef
) {
180 auto *Defs
= MSSA
->getWritableBlockDefs(BB
);
183 CachedPreviousDef
.insert({BB
, &*Defs
->rbegin()});
184 return &*Defs
->rbegin();
187 return getPreviousDefRecursive(BB
, CachedPreviousDef
);
189 // Recurse over a set of phi uses to eliminate the trivial ones
190 MemoryAccess
*MemorySSAUpdater::recursePhi(MemoryAccess
*Phi
) {
193 TrackingVH
<MemoryAccess
> Res(Phi
);
194 SmallVector
<TrackingVH
<Value
>, 8> Uses
;
195 std::copy(Phi
->user_begin(), Phi
->user_end(), std::back_inserter(Uses
));
197 if (MemoryPhi
*UsePhi
= dyn_cast
<MemoryPhi
>(&*U
))
198 tryRemoveTrivialPhi(UsePhi
);
202 // Eliminate trivial phis
203 // Phis are trivial if they are defined either by themselves, or all the same
205 // IE phi(a, a) or b = phi(a, b) or c = phi(a, a, c)
206 // We recursively try to remove them.
207 MemoryAccess
*MemorySSAUpdater::tryRemoveTrivialPhi(MemoryPhi
*Phi
) {
208 assert(Phi
&& "Can only remove concrete Phi.");
209 auto OperRange
= Phi
->operands();
210 return tryRemoveTrivialPhi(Phi
, OperRange
);
212 template <class RangeType
>
213 MemoryAccess
*MemorySSAUpdater::tryRemoveTrivialPhi(MemoryPhi
*Phi
,
214 RangeType
&Operands
) {
215 // Bail out on non-opt Phis.
216 if (NonOptPhis
.count(Phi
))
219 // Detect equal or self arguments
220 MemoryAccess
*Same
= nullptr;
221 for (auto &Op
: Operands
) {
222 // If the same or self, good so far
223 if (Op
== Phi
|| Op
== Same
)
225 // not the same, return the phi since it's not eliminatable by us
228 Same
= cast
<MemoryAccess
>(&*Op
);
230 // Never found a non-self reference, the phi is undef
232 return MSSA
->getLiveOnEntryDef();
234 Phi
->replaceAllUsesWith(Same
);
235 removeMemoryAccess(Phi
);
238 // We should only end up recursing in case we replaced something, in which
239 // case, we may have made other Phis trivial.
240 return recursePhi(Same
);
243 void MemorySSAUpdater::insertUse(MemoryUse
*MU
, bool RenameUses
) {
244 InsertedPHIs
.clear();
245 MU
->setDefiningAccess(getPreviousDef(MU
));
247 // In cases without unreachable blocks, because uses do not create new
248 // may-defs, there are only two cases:
249 // 1. There was a def already below us, and therefore, we should not have
250 // created a phi node because it was already needed for the def.
252 // 2. There is no def below us, and therefore, there is no extra renaming work
255 // In cases with unreachable blocks, where the unnecessary Phis were
256 // optimized out, adding the Use may re-insert those Phis. Hence, when
257 // inserting Uses outside of the MSSA creation process, and new Phis were
258 // added, rename all uses if we are asked.
260 if (!RenameUses
&& !InsertedPHIs
.empty()) {
261 auto *Defs
= MSSA
->getBlockDefs(MU
->getBlock());
263 assert((!Defs
|| (++Defs
->begin() == Defs
->end())) &&
264 "Block may have only a Phi or no defs");
267 if (RenameUses
&& InsertedPHIs
.size()) {
268 SmallPtrSet
<BasicBlock
*, 16> Visited
;
269 BasicBlock
*StartBlock
= MU
->getBlock();
271 if (auto *Defs
= MSSA
->getWritableBlockDefs(StartBlock
)) {
272 MemoryAccess
*FirstDef
= &*Defs
->begin();
273 // Convert to incoming value if it's a memorydef. A phi *is* already an
275 if (auto *MD
= dyn_cast
<MemoryDef
>(FirstDef
))
276 FirstDef
= MD
->getDefiningAccess();
278 MSSA
->renamePass(MU
->getBlock(), FirstDef
, Visited
);
280 // We just inserted a phi into this block, so the incoming value will
281 // become the phi anyway, so it does not matter what we pass.
282 for (auto &MP
: InsertedPHIs
)
283 if (MemoryPhi
*Phi
= cast_or_null
<MemoryPhi
>(MP
))
284 MSSA
->renamePass(Phi
->getBlock(), nullptr, Visited
);
288 // Set every incoming edge {BB, MP->getBlock()} of MemoryPhi MP to NewDef.
289 static void setMemoryPhiValueForBlock(MemoryPhi
*MP
, const BasicBlock
*BB
,
290 MemoryAccess
*NewDef
) {
291 // Replace any operand with us an incoming block with the new defining
293 int i
= MP
->getBasicBlockIndex(BB
);
294 assert(i
!= -1 && "Should have found the basic block in the phi");
295 // We can't just compare i against getNumOperands since one is signed and the
296 // other not. So use it to index into the block iterator.
297 for (auto BBIter
= MP
->block_begin() + i
; BBIter
!= MP
->block_end();
301 MP
->setIncomingValue(i
, NewDef
);
306 // A brief description of the algorithm:
307 // First, we compute what should define the new def, using the SSA
308 // construction algorithm.
309 // Then, we update the defs below us (and any new phi nodes) in the graph to
310 // point to the correct new defs, to ensure we only have one variable, and no
311 // disconnected stores.
312 void MemorySSAUpdater::insertDef(MemoryDef
*MD
, bool RenameUses
) {
313 InsertedPHIs
.clear();
315 // See if we had a local def, and if not, go hunting.
316 MemoryAccess
*DefBefore
= getPreviousDef(MD
);
317 bool DefBeforeSameBlock
= false;
318 if (DefBefore
->getBlock() == MD
->getBlock() &&
319 !(isa
<MemoryPhi
>(DefBefore
) &&
320 std::find(InsertedPHIs
.begin(), InsertedPHIs
.end(), DefBefore
) !=
322 DefBeforeSameBlock
= true;
324 // There is a def before us, which means we can replace any store/phi uses
325 // of that thing with us, since we are in the way of whatever was there
327 // We now define that def's memorydefs and memoryphis
328 if (DefBeforeSameBlock
) {
329 DefBefore
->replaceUsesWithIf(MD
, [MD
](Use
&U
) {
330 // Leave the MemoryUses alone.
331 // Also make sure we skip ourselves to avoid self references.
332 User
*Usr
= U
.getUser();
333 return !isa
<MemoryUse
>(Usr
) && Usr
!= MD
;
334 // Defs are automatically unoptimized when the user is set to MD below,
335 // because the isOptimized() call will fail to find the same ID.
339 // and that def is now our defining access.
340 MD
->setDefiningAccess(DefBefore
);
342 SmallVector
<WeakVH
, 8> FixupList(InsertedPHIs
.begin(), InsertedPHIs
.end());
344 // Remember the index where we may insert new phis.
345 unsigned NewPhiIndex
= InsertedPHIs
.size();
346 if (!DefBeforeSameBlock
) {
347 // If there was a local def before us, we must have the same effect it
348 // did. Because every may-def is the same, any phis/etc we would create, it
349 // would also have created. If there was no local def before us, we
350 // performed a global update, and have to search all successors and make
351 // sure we update the first def in each of them (following all paths until
352 // we hit the first def along each path). This may also insert phi nodes.
353 // TODO: There are other cases we can skip this work, such as when we have a
354 // single successor, and only used a straight line of single pred blocks
355 // backwards to find the def. To make that work, we'd have to track whether
356 // getDefRecursive only ever used the single predecessor case. These types
357 // of paths also only exist in between CFG simplifications.
359 // If this is the first def in the block and this insert is in an arbitrary
360 // place, compute IDF and place phis.
361 SmallPtrSet
<BasicBlock
*, 2> DefiningBlocks
;
363 // If this is the last Def in the block, also compute IDF based on MD, since
364 // this may a new Def added, and we may need additional Phis.
365 auto Iter
= MD
->getDefsIterator();
367 auto IterEnd
= MSSA
->getBlockDefs(MD
->getBlock())->end();
369 DefiningBlocks
.insert(MD
->getBlock());
371 for (const auto &VH
: InsertedPHIs
)
372 if (const auto *RealPHI
= cast_or_null
<MemoryPhi
>(VH
))
373 DefiningBlocks
.insert(RealPHI
->getBlock());
374 ForwardIDFCalculator
IDFs(*MSSA
->DT
);
375 SmallVector
<BasicBlock
*, 32> IDFBlocks
;
376 IDFs
.setDefiningBlocks(DefiningBlocks
);
377 IDFs
.calculate(IDFBlocks
);
378 SmallVector
<AssertingVH
<MemoryPhi
>, 4> NewInsertedPHIs
;
379 for (auto *BBIDF
: IDFBlocks
) {
380 auto *MPhi
= MSSA
->getMemoryAccess(BBIDF
);
382 MPhi
= MSSA
->createMemoryPhi(BBIDF
);
383 NewInsertedPHIs
.push_back(MPhi
);
385 // Add the phis created into the IDF blocks to NonOptPhis, so they are not
386 // optimized out as trivial by the call to getPreviousDefFromEnd below.
387 // Once they are complete, all these Phis are added to the FixupList, and
388 // removed from NonOptPhis inside fixupDefs(). Existing Phis in IDF may
389 // need fixing as well, and potentially be trivial before this insertion,
390 // hence add all IDF Phis. See PR43044.
391 NonOptPhis
.insert(MPhi
);
393 for (auto &MPhi
: NewInsertedPHIs
) {
394 auto *BBIDF
= MPhi
->getBlock();
395 for (auto *Pred
: predecessors(BBIDF
)) {
396 DenseMap
<BasicBlock
*, TrackingVH
<MemoryAccess
>> CachedPreviousDef
;
397 MPhi
->addIncoming(getPreviousDefFromEnd(Pred
, CachedPreviousDef
), Pred
);
401 // Re-take the index where we're adding the new phis, because the above call
402 // to getPreviousDefFromEnd, may have inserted into InsertedPHIs.
403 NewPhiIndex
= InsertedPHIs
.size();
404 for (auto &MPhi
: NewInsertedPHIs
) {
405 InsertedPHIs
.push_back(&*MPhi
);
406 FixupList
.push_back(&*MPhi
);
409 FixupList
.push_back(MD
);
412 // Remember the index where we stopped inserting new phis above, since the
413 // fixupDefs call in the loop below may insert more, that are already minimal.
414 unsigned NewPhiIndexEnd
= InsertedPHIs
.size();
416 while (!FixupList
.empty()) {
417 unsigned StartingPHISize
= InsertedPHIs
.size();
418 fixupDefs(FixupList
);
420 // Put any new phis on the fixup list, and process them
421 FixupList
.append(InsertedPHIs
.begin() + StartingPHISize
, InsertedPHIs
.end());
424 // Optimize potentially non-minimal phis added in this method.
425 unsigned NewPhiSize
= NewPhiIndexEnd
- NewPhiIndex
;
427 tryRemoveTrivialPhis(ArrayRef
<WeakVH
>(&InsertedPHIs
[NewPhiIndex
], NewPhiSize
));
429 // Now that all fixups are done, rename all uses if we are asked.
431 SmallPtrSet
<BasicBlock
*, 16> Visited
;
432 BasicBlock
*StartBlock
= MD
->getBlock();
433 // We are guaranteed there is a def in the block, because we just got it
434 // handed to us in this function.
435 MemoryAccess
*FirstDef
= &*MSSA
->getWritableBlockDefs(StartBlock
)->begin();
436 // Convert to incoming value if it's a memorydef. A phi *is* already an
438 if (auto *MD
= dyn_cast
<MemoryDef
>(FirstDef
))
439 FirstDef
= MD
->getDefiningAccess();
441 MSSA
->renamePass(MD
->getBlock(), FirstDef
, Visited
);
442 // We just inserted a phi into this block, so the incoming value will become
443 // the phi anyway, so it does not matter what we pass.
444 for (auto &MP
: InsertedPHIs
) {
445 MemoryPhi
*Phi
= dyn_cast_or_null
<MemoryPhi
>(MP
);
447 MSSA
->renamePass(Phi
->getBlock(), nullptr, Visited
);
452 void MemorySSAUpdater::fixupDefs(const SmallVectorImpl
<WeakVH
> &Vars
) {
453 SmallPtrSet
<const BasicBlock
*, 8> Seen
;
454 SmallVector
<const BasicBlock
*, 16> Worklist
;
455 for (auto &Var
: Vars
) {
456 MemoryAccess
*NewDef
= dyn_cast_or_null
<MemoryAccess
>(Var
);
459 // First, see if there is a local def after the operand.
460 auto *Defs
= MSSA
->getWritableBlockDefs(NewDef
->getBlock());
461 auto DefIter
= NewDef
->getDefsIterator();
463 // The temporary Phi is being fixed, unmark it for not to optimize.
464 if (MemoryPhi
*Phi
= dyn_cast
<MemoryPhi
>(NewDef
))
465 NonOptPhis
.erase(Phi
);
467 // If there is a local def after us, we only have to rename that.
468 if (++DefIter
!= Defs
->end()) {
469 cast
<MemoryDef
>(DefIter
)->setDefiningAccess(NewDef
);
473 // Otherwise, we need to search down through the CFG.
474 // For each of our successors, handle it directly if their is a phi, or
475 // place on the fixup worklist.
476 for (const auto *S
: successors(NewDef
->getBlock())) {
477 if (auto *MP
= MSSA
->getMemoryAccess(S
))
478 setMemoryPhiValueForBlock(MP
, NewDef
->getBlock(), NewDef
);
480 Worklist
.push_back(S
);
483 while (!Worklist
.empty()) {
484 const BasicBlock
*FixupBlock
= Worklist
.back();
487 // Get the first def in the block that isn't a phi node.
488 if (auto *Defs
= MSSA
->getWritableBlockDefs(FixupBlock
)) {
489 auto *FirstDef
= &*Defs
->begin();
490 // The loop above and below should have taken care of phi nodes
491 assert(!isa
<MemoryPhi
>(FirstDef
) &&
492 "Should have already handled phi nodes!");
493 // We are now this def's defining access, make sure we actually dominate
495 assert(MSSA
->dominates(NewDef
, FirstDef
) &&
496 "Should have dominated the new access");
498 // This may insert new phi nodes, because we are not guaranteed the
499 // block we are processing has a single pred, and depending where the
500 // store was inserted, it may require phi nodes below it.
501 cast
<MemoryDef
>(FirstDef
)->setDefiningAccess(getPreviousDef(FirstDef
));
504 // We didn't find a def, so we must continue.
505 for (const auto *S
: successors(FixupBlock
)) {
506 // If there is a phi node, handle it.
507 // Otherwise, put the block on the worklist
508 if (auto *MP
= MSSA
->getMemoryAccess(S
))
509 setMemoryPhiValueForBlock(MP
, FixupBlock
, NewDef
);
511 // If we cycle, we should have ended up at a phi node that we already
512 // processed. FIXME: Double check this
513 if (!Seen
.insert(S
).second
)
515 Worklist
.push_back(S
);
522 void MemorySSAUpdater::removeEdge(BasicBlock
*From
, BasicBlock
*To
) {
523 if (MemoryPhi
*MPhi
= MSSA
->getMemoryAccess(To
)) {
524 MPhi
->unorderedDeleteIncomingBlock(From
);
525 tryRemoveTrivialPhi(MPhi
);
529 void MemorySSAUpdater::removeDuplicatePhiEdgesBetween(const BasicBlock
*From
,
530 const BasicBlock
*To
) {
531 if (MemoryPhi
*MPhi
= MSSA
->getMemoryAccess(To
)) {
533 MPhi
->unorderedDeleteIncomingIf([&](const MemoryAccess
*, BasicBlock
*B
) {
541 tryRemoveTrivialPhi(MPhi
);
545 static MemoryAccess
*getNewDefiningAccessForClone(MemoryAccess
*MA
,
546 const ValueToValueMapTy
&VMap
,
547 PhiToDefMap
&MPhiMap
,
548 bool CloneWasSimplified
,
550 MemoryAccess
*InsnDefining
= MA
;
551 if (MemoryDef
*DefMUD
= dyn_cast
<MemoryDef
>(InsnDefining
)) {
552 if (!MSSA
->isLiveOnEntryDef(DefMUD
)) {
553 Instruction
*DefMUDI
= DefMUD
->getMemoryInst();
554 assert(DefMUDI
&& "Found MemoryUseOrDef with no Instruction.");
555 if (Instruction
*NewDefMUDI
=
556 cast_or_null
<Instruction
>(VMap
.lookup(DefMUDI
))) {
557 InsnDefining
= MSSA
->getMemoryAccess(NewDefMUDI
);
558 if (!CloneWasSimplified
)
559 assert(InsnDefining
&& "Defining instruction cannot be nullptr.");
560 else if (!InsnDefining
|| isa
<MemoryUse
>(InsnDefining
)) {
561 // The clone was simplified, it's no longer a MemoryDef, look up.
562 auto DefIt
= DefMUD
->getDefsIterator();
563 // Since simplified clones only occur in single block cloning, a
564 // previous definition must exist, otherwise NewDefMUDI would not
565 // have been found in VMap.
566 assert(DefIt
!= MSSA
->getBlockDefs(DefMUD
->getBlock())->begin() &&
567 "Previous def must exist");
568 InsnDefining
= getNewDefiningAccessForClone(
569 &*(--DefIt
), VMap
, MPhiMap
, CloneWasSimplified
, MSSA
);
574 MemoryPhi
*DefPhi
= cast
<MemoryPhi
>(InsnDefining
);
575 if (MemoryAccess
*NewDefPhi
= MPhiMap
.lookup(DefPhi
))
576 InsnDefining
= NewDefPhi
;
578 assert(InsnDefining
&& "Defining instruction cannot be nullptr.");
582 void MemorySSAUpdater::cloneUsesAndDefs(BasicBlock
*BB
, BasicBlock
*NewBB
,
583 const ValueToValueMapTy
&VMap
,
584 PhiToDefMap
&MPhiMap
,
585 bool CloneWasSimplified
) {
586 const MemorySSA::AccessList
*Acc
= MSSA
->getBlockAccesses(BB
);
589 for (const MemoryAccess
&MA
: *Acc
) {
590 if (const MemoryUseOrDef
*MUD
= dyn_cast
<MemoryUseOrDef
>(&MA
)) {
591 Instruction
*Insn
= MUD
->getMemoryInst();
592 // Entry does not exist if the clone of the block did not clone all
593 // instructions. This occurs in LoopRotate when cloning instructions
594 // from the old header to the old preheader. The cloned instruction may
595 // also be a simplified Value, not an Instruction (see LoopRotate).
596 // Also in LoopRotate, even when it's an instruction, due to it being
597 // simplified, it may be a Use rather than a Def, so we cannot use MUD as
598 // template. Calls coming from updateForClonedBlockIntoPred, ensure this.
599 if (Instruction
*NewInsn
=
600 dyn_cast_or_null
<Instruction
>(VMap
.lookup(Insn
))) {
601 MemoryAccess
*NewUseOrDef
= MSSA
->createDefinedAccess(
603 getNewDefiningAccessForClone(MUD
->getDefiningAccess(), VMap
,
604 MPhiMap
, CloneWasSimplified
, MSSA
),
605 /*Template=*/CloneWasSimplified
? nullptr : MUD
,
606 /*CreationMustSucceed=*/CloneWasSimplified
? false : true);
608 MSSA
->insertIntoListsForBlock(NewUseOrDef
, NewBB
, MemorySSA::End
);
614 void MemorySSAUpdater::updatePhisWhenInsertingUniqueBackedgeBlock(
615 BasicBlock
*Header
, BasicBlock
*Preheader
, BasicBlock
*BEBlock
) {
616 auto *MPhi
= MSSA
->getMemoryAccess(Header
);
620 // Create phi node in the backedge block and populate it with the same
621 // incoming values as MPhi. Skip incoming values coming from Preheader.
622 auto *NewMPhi
= MSSA
->createMemoryPhi(BEBlock
);
623 bool HasUniqueIncomingValue
= true;
624 MemoryAccess
*UniqueValue
= nullptr;
625 for (unsigned I
= 0, E
= MPhi
->getNumIncomingValues(); I
!= E
; ++I
) {
626 BasicBlock
*IBB
= MPhi
->getIncomingBlock(I
);
627 MemoryAccess
*IV
= MPhi
->getIncomingValue(I
);
628 if (IBB
!= Preheader
) {
629 NewMPhi
->addIncoming(IV
, IBB
);
630 if (HasUniqueIncomingValue
) {
633 else if (UniqueValue
!= IV
)
634 HasUniqueIncomingValue
= false;
639 // Update incoming edges into MPhi. Remove all but the incoming edge from
640 // Preheader. Add an edge from NewMPhi
641 auto *AccFromPreheader
= MPhi
->getIncomingValueForBlock(Preheader
);
642 MPhi
->setIncomingValue(0, AccFromPreheader
);
643 MPhi
->setIncomingBlock(0, Preheader
);
644 for (unsigned I
= MPhi
->getNumIncomingValues() - 1; I
>= 1; --I
)
645 MPhi
->unorderedDeleteIncoming(I
);
646 MPhi
->addIncoming(NewMPhi
, BEBlock
);
648 // If NewMPhi is a trivial phi, remove it. Its use in the header MPhi will be
649 // replaced with the unique value.
650 tryRemoveTrivialPhi(NewMPhi
);
653 void MemorySSAUpdater::updateForClonedLoop(const LoopBlocksRPO
&LoopBlocks
,
654 ArrayRef
<BasicBlock
*> ExitBlocks
,
655 const ValueToValueMapTy
&VMap
,
656 bool IgnoreIncomingWithNoClones
) {
659 auto FixPhiIncomingValues
= [&](MemoryPhi
*Phi
, MemoryPhi
*NewPhi
) {
660 assert(Phi
&& NewPhi
&& "Invalid Phi nodes.");
661 BasicBlock
*NewPhiBB
= NewPhi
->getBlock();
662 SmallPtrSet
<BasicBlock
*, 4> NewPhiBBPreds(pred_begin(NewPhiBB
),
664 for (unsigned It
= 0, E
= Phi
->getNumIncomingValues(); It
< E
; ++It
) {
665 MemoryAccess
*IncomingAccess
= Phi
->getIncomingValue(It
);
666 BasicBlock
*IncBB
= Phi
->getIncomingBlock(It
);
668 if (BasicBlock
*NewIncBB
= cast_or_null
<BasicBlock
>(VMap
.lookup(IncBB
)))
670 else if (IgnoreIncomingWithNoClones
)
673 // Now we have IncBB, and will need to add incoming from it to NewPhi.
675 // If IncBB is not a predecessor of NewPhiBB, then do not add it.
676 // NewPhiBB was cloned without that edge.
677 if (!NewPhiBBPreds
.count(IncBB
))
680 // Determine incoming value and add it as incoming from IncBB.
681 if (MemoryUseOrDef
*IncMUD
= dyn_cast
<MemoryUseOrDef
>(IncomingAccess
)) {
682 if (!MSSA
->isLiveOnEntryDef(IncMUD
)) {
683 Instruction
*IncI
= IncMUD
->getMemoryInst();
684 assert(IncI
&& "Found MemoryUseOrDef with no Instruction.");
685 if (Instruction
*NewIncI
=
686 cast_or_null
<Instruction
>(VMap
.lookup(IncI
))) {
687 IncMUD
= MSSA
->getMemoryAccess(NewIncI
);
689 "MemoryUseOrDef cannot be null, all preds processed.");
692 NewPhi
->addIncoming(IncMUD
, IncBB
);
694 MemoryPhi
*IncPhi
= cast
<MemoryPhi
>(IncomingAccess
);
695 if (MemoryAccess
*NewDefPhi
= MPhiMap
.lookup(IncPhi
))
696 NewPhi
->addIncoming(NewDefPhi
, IncBB
);
698 NewPhi
->addIncoming(IncPhi
, IncBB
);
703 auto ProcessBlock
= [&](BasicBlock
*BB
) {
704 BasicBlock
*NewBlock
= cast_or_null
<BasicBlock
>(VMap
.lookup(BB
));
708 assert(!MSSA
->getWritableBlockAccesses(NewBlock
) &&
709 "Cloned block should have no accesses");
712 if (MemoryPhi
*MPhi
= MSSA
->getMemoryAccess(BB
)) {
713 MemoryPhi
*NewPhi
= MSSA
->createMemoryPhi(NewBlock
);
714 MPhiMap
[MPhi
] = NewPhi
;
716 // Update Uses and Defs.
717 cloneUsesAndDefs(BB
, NewBlock
, VMap
, MPhiMap
);
720 for (auto BB
: llvm::concat
<BasicBlock
*const>(LoopBlocks
, ExitBlocks
))
723 for (auto BB
: llvm::concat
<BasicBlock
*const>(LoopBlocks
, ExitBlocks
))
724 if (MemoryPhi
*MPhi
= MSSA
->getMemoryAccess(BB
))
725 if (MemoryAccess
*NewPhi
= MPhiMap
.lookup(MPhi
))
726 FixPhiIncomingValues(MPhi
, cast
<MemoryPhi
>(NewPhi
));
729 void MemorySSAUpdater::updateForClonedBlockIntoPred(
730 BasicBlock
*BB
, BasicBlock
*P1
, const ValueToValueMapTy
&VM
) {
731 // All defs/phis from outside BB that are used in BB, are valid uses in P1.
732 // Since those defs/phis must have dominated BB, and also dominate P1.
733 // Defs from BB being used in BB will be replaced with the cloned defs from
734 // VM. The uses of BB's Phi (if it exists) in BB will be replaced by the
735 // incoming def into the Phi from P1.
736 // Instructions cloned into the predecessor are in practice sometimes
737 // simplified, so disable the use of the template, and create an access from
740 if (MemoryPhi
*MPhi
= MSSA
->getMemoryAccess(BB
))
741 MPhiMap
[MPhi
] = MPhi
->getIncomingValueForBlock(P1
);
742 cloneUsesAndDefs(BB
, P1
, VM
, MPhiMap
, /*CloneWasSimplified=*/true);
745 template <typename Iter
>
746 void MemorySSAUpdater::privateUpdateExitBlocksForClonedLoop(
747 ArrayRef
<BasicBlock
*> ExitBlocks
, Iter ValuesBegin
, Iter ValuesEnd
,
749 SmallVector
<CFGUpdate
, 4> Updates
;
750 // Update/insert phis in all successors of exit blocks.
751 for (auto *Exit
: ExitBlocks
)
752 for (const ValueToValueMapTy
*VMap
: make_range(ValuesBegin
, ValuesEnd
))
753 if (BasicBlock
*NewExit
= cast_or_null
<BasicBlock
>(VMap
->lookup(Exit
))) {
754 BasicBlock
*ExitSucc
= NewExit
->getTerminator()->getSuccessor(0);
755 Updates
.push_back({DT
.Insert
, NewExit
, ExitSucc
});
757 applyInsertUpdates(Updates
, DT
);
760 void MemorySSAUpdater::updateExitBlocksForClonedLoop(
761 ArrayRef
<BasicBlock
*> ExitBlocks
, const ValueToValueMapTy
&VMap
,
763 const ValueToValueMapTy
*const Arr
[] = {&VMap
};
764 privateUpdateExitBlocksForClonedLoop(ExitBlocks
, std::begin(Arr
),
768 void MemorySSAUpdater::updateExitBlocksForClonedLoop(
769 ArrayRef
<BasicBlock
*> ExitBlocks
,
770 ArrayRef
<std::unique_ptr
<ValueToValueMapTy
>> VMaps
, DominatorTree
&DT
) {
771 auto GetPtr
= [&](const std::unique_ptr
<ValueToValueMapTy
> &I
) {
774 using MappedIteratorType
=
775 mapped_iterator
<const std::unique_ptr
<ValueToValueMapTy
> *,
777 auto MapBegin
= MappedIteratorType(VMaps
.begin(), GetPtr
);
778 auto MapEnd
= MappedIteratorType(VMaps
.end(), GetPtr
);
779 privateUpdateExitBlocksForClonedLoop(ExitBlocks
, MapBegin
, MapEnd
, DT
);
782 void MemorySSAUpdater::applyUpdates(ArrayRef
<CFGUpdate
> Updates
,
784 SmallVector
<CFGUpdate
, 4> RevDeleteUpdates
;
785 SmallVector
<CFGUpdate
, 4> InsertUpdates
;
786 for (auto &Update
: Updates
) {
787 if (Update
.getKind() == DT
.Insert
)
788 InsertUpdates
.push_back({DT
.Insert
, Update
.getFrom(), Update
.getTo()});
790 RevDeleteUpdates
.push_back({DT
.Insert
, Update
.getFrom(), Update
.getTo()});
793 if (!RevDeleteUpdates
.empty()) {
794 // Update for inserted edges: use newDT and snapshot CFG as if deletes had
796 // FIXME: This creates a new DT, so it's more expensive to do mix
797 // delete/inserts vs just inserts. We can do an incremental update on the DT
798 // to revert deletes, than re-delete the edges. Teaching DT to do this, is
799 // part of a pending cleanup.
800 DominatorTree
NewDT(DT
, RevDeleteUpdates
);
801 GraphDiff
<BasicBlock
*> GD(RevDeleteUpdates
);
802 applyInsertUpdates(InsertUpdates
, NewDT
, &GD
);
804 GraphDiff
<BasicBlock
*> GD
;
805 applyInsertUpdates(InsertUpdates
, DT
, &GD
);
808 // Update for deleted edges
809 for (auto &Update
: RevDeleteUpdates
)
810 removeEdge(Update
.getFrom(), Update
.getTo());
813 void MemorySSAUpdater::applyInsertUpdates(ArrayRef
<CFGUpdate
> Updates
,
815 GraphDiff
<BasicBlock
*> GD
;
816 applyInsertUpdates(Updates
, DT
, &GD
);
819 void MemorySSAUpdater::applyInsertUpdates(ArrayRef
<CFGUpdate
> Updates
,
821 const GraphDiff
<BasicBlock
*> *GD
) {
822 // Get recursive last Def, assuming well formed MSSA and updated DT.
823 auto GetLastDef
= [&](BasicBlock
*BB
) -> MemoryAccess
* {
825 MemorySSA::DefsList
*Defs
= MSSA
->getWritableBlockDefs(BB
);
826 // Return last Def or Phi in BB, if it exists.
828 return &*(--Defs
->end());
830 // Check number of predecessors, we only care if there's more than one.
832 BasicBlock
*Pred
= nullptr;
833 for (auto &Pair
: children
<GraphDiffInvBBPair
>({GD
, BB
})) {
840 // If BB has multiple predecessors, get last definition from IDom.
842 // [SimpleLoopUnswitch] If BB is a dead block, about to be deleted, its
843 // DT is invalidated. Return LoE as its last def. This will be added to
844 // MemoryPhi node, and later deleted when the block is deleted.
846 return MSSA
->getLiveOnEntryDef();
847 if (auto *IDom
= DT
.getNode(BB
)->getIDom())
848 if (IDom
->getBlock() != BB
) {
849 BB
= IDom
->getBlock();
852 return MSSA
->getLiveOnEntryDef();
854 // Single predecessor, BB cannot be dead. GetLastDef of Pred.
855 assert(Count
== 1 && Pred
&& "Single predecessor expected.");
856 // BB can be unreachable though, return LoE if that is the case.
858 return MSSA
->getLiveOnEntryDef();
862 llvm_unreachable("Unable to get last definition.");
865 // Get nearest IDom given a set of blocks.
866 // TODO: this can be optimized by starting the search at the node with the
867 // lowest level (highest in the tree).
868 auto FindNearestCommonDominator
=
869 [&](const SmallSetVector
<BasicBlock
*, 2> &BBSet
) -> BasicBlock
* {
870 BasicBlock
*PrevIDom
= *BBSet
.begin();
871 for (auto *BB
: BBSet
)
872 PrevIDom
= DT
.findNearestCommonDominator(PrevIDom
, BB
);
876 // Get all blocks that dominate PrevIDom, stop when reaching CurrIDom. Do not
878 auto GetNoLongerDomBlocks
=
879 [&](BasicBlock
*PrevIDom
, BasicBlock
*CurrIDom
,
880 SmallVectorImpl
<BasicBlock
*> &BlocksPrevDom
) {
881 if (PrevIDom
== CurrIDom
)
883 BlocksPrevDom
.push_back(PrevIDom
);
884 BasicBlock
*NextIDom
= PrevIDom
;
885 while (BasicBlock
*UpIDom
=
886 DT
.getNode(NextIDom
)->getIDom()->getBlock()) {
887 if (UpIDom
== CurrIDom
)
889 BlocksPrevDom
.push_back(UpIDom
);
894 // Map a BB to its predecessors: added + previously existing. To get a
895 // deterministic order, store predecessors as SetVectors. The order in each
896 // will be defined by the order in Updates (fixed) and the order given by
897 // children<> (also fixed). Since we further iterate over these ordered sets,
898 // we lose the information of multiple edges possibly existing between two
899 // blocks, so we'll keep and EdgeCount map for that.
900 // An alternate implementation could keep unordered set for the predecessors,
901 // traverse either Updates or children<> each time to get the deterministic
902 // order, and drop the usage of EdgeCount. This alternate approach would still
903 // require querying the maps for each predecessor, and children<> call has
904 // additional computation inside for creating the snapshot-graph predecessors.
905 // As such, we favor using a little additional storage and less compute time.
906 // This decision can be revisited if we find the alternative more favorable.
909 SmallSetVector
<BasicBlock
*, 2> Added
;
910 SmallSetVector
<BasicBlock
*, 2> Prev
;
912 SmallDenseMap
<BasicBlock
*, PredInfo
> PredMap
;
914 for (auto &Edge
: Updates
) {
915 BasicBlock
*BB
= Edge
.getTo();
916 auto &AddedBlockSet
= PredMap
[BB
].Added
;
917 AddedBlockSet
.insert(Edge
.getFrom());
920 // Store all existing predecessor for each BB, at least one must exist.
921 SmallDenseMap
<std::pair
<BasicBlock
*, BasicBlock
*>, int> EdgeCountMap
;
922 SmallPtrSet
<BasicBlock
*, 2> NewBlocks
;
923 for (auto &BBPredPair
: PredMap
) {
924 auto *BB
= BBPredPair
.first
;
925 const auto &AddedBlockSet
= BBPredPair
.second
.Added
;
926 auto &PrevBlockSet
= BBPredPair
.second
.Prev
;
927 for (auto &Pair
: children
<GraphDiffInvBBPair
>({GD
, BB
})) {
928 BasicBlock
*Pi
= Pair
.second
;
929 if (!AddedBlockSet
.count(Pi
))
930 PrevBlockSet
.insert(Pi
);
931 EdgeCountMap
[{Pi
, BB
}]++;
934 if (PrevBlockSet
.empty()) {
935 assert(pred_size(BB
) == AddedBlockSet
.size() && "Duplicate edges added.");
938 << "Adding a predecessor to a block with no predecessors. "
939 "This must be an edge added to a new, likely cloned, block. "
940 "Its memory accesses must be already correct, assuming completed "
941 "via the updateExitBlocksForClonedLoop API. "
942 "Assert a single such edge is added so no phi addition or "
943 "additional processing is required.\n");
944 assert(AddedBlockSet
.size() == 1 &&
945 "Can only handle adding one predecessor to a new block.");
946 // Need to remove new blocks from PredMap. Remove below to not invalidate
948 NewBlocks
.insert(BB
);
951 // Nothing to process for new/cloned blocks.
952 for (auto *BB
: NewBlocks
)
955 SmallVector
<BasicBlock
*, 16> BlocksWithDefsToReplace
;
956 SmallVector
<WeakVH
, 8> InsertedPhis
;
958 // First create MemoryPhis in all blocks that don't have one. Create in the
959 // order found in Updates, not in PredMap, to get deterministic numbering.
960 for (auto &Edge
: Updates
) {
961 BasicBlock
*BB
= Edge
.getTo();
962 if (PredMap
.count(BB
) && !MSSA
->getMemoryAccess(BB
))
963 InsertedPhis
.push_back(MSSA
->createMemoryPhi(BB
));
966 // Now we'll fill in the MemoryPhis with the right incoming values.
967 for (auto &BBPredPair
: PredMap
) {
968 auto *BB
= BBPredPair
.first
;
969 const auto &PrevBlockSet
= BBPredPair
.second
.Prev
;
970 const auto &AddedBlockSet
= BBPredPair
.second
.Added
;
971 assert(!PrevBlockSet
.empty() &&
972 "At least one previous predecessor must exist.");
974 // TODO: if this becomes a bottleneck, we can save on GetLastDef calls by
975 // keeping this map before the loop. We can reuse already populated entries
976 // if an edge is added from the same predecessor to two different blocks,
977 // and this does happen in rotate. Note that the map needs to be updated
978 // when deleting non-necessary phis below, if the phi is in the map by
979 // replacing the value with DefP1.
980 SmallDenseMap
<BasicBlock
*, MemoryAccess
*> LastDefAddedPred
;
981 for (auto *AddedPred
: AddedBlockSet
) {
982 auto *DefPn
= GetLastDef(AddedPred
);
983 assert(DefPn
!= nullptr && "Unable to find last definition.");
984 LastDefAddedPred
[AddedPred
] = DefPn
;
987 MemoryPhi
*NewPhi
= MSSA
->getMemoryAccess(BB
);
988 // If Phi is not empty, add an incoming edge from each added pred. Must
989 // still compute blocks with defs to replace for this block below.
990 if (NewPhi
->getNumOperands()) {
991 for (auto *Pred
: AddedBlockSet
) {
992 auto *LastDefForPred
= LastDefAddedPred
[Pred
];
993 for (int I
= 0, E
= EdgeCountMap
[{Pred
, BB
}]; I
< E
; ++I
)
994 NewPhi
->addIncoming(LastDefForPred
, Pred
);
997 // Pick any existing predecessor and get its definition. All other
998 // existing predecessors should have the same one, since no phi existed.
999 auto *P1
= *PrevBlockSet
.begin();
1000 MemoryAccess
*DefP1
= GetLastDef(P1
);
1002 // Check DefP1 against all Defs in LastDefPredPair. If all the same,
1004 bool InsertPhi
= false;
1005 for (auto LastDefPredPair
: LastDefAddedPred
)
1006 if (DefP1
!= LastDefPredPair
.second
) {
1011 // Since NewPhi may be used in other newly added Phis, replace all uses
1012 // of NewPhi with the definition coming from all predecessors (DefP1),
1013 // before deleting it.
1014 NewPhi
->replaceAllUsesWith(DefP1
);
1015 removeMemoryAccess(NewPhi
);
1019 // Update Phi with new values for new predecessors and old value for all
1020 // other predecessors. Since AddedBlockSet and PrevBlockSet are ordered
1021 // sets, the order of entries in NewPhi is deterministic.
1022 for (auto *Pred
: AddedBlockSet
) {
1023 auto *LastDefForPred
= LastDefAddedPred
[Pred
];
1024 for (int I
= 0, E
= EdgeCountMap
[{Pred
, BB
}]; I
< E
; ++I
)
1025 NewPhi
->addIncoming(LastDefForPred
, Pred
);
1027 for (auto *Pred
: PrevBlockSet
)
1028 for (int I
= 0, E
= EdgeCountMap
[{Pred
, BB
}]; I
< E
; ++I
)
1029 NewPhi
->addIncoming(DefP1
, Pred
);
1032 // Get all blocks that used to dominate BB and no longer do after adding
1033 // AddedBlockSet, where PrevBlockSet are the previously known predecessors.
1034 assert(DT
.getNode(BB
)->getIDom() && "BB does not have valid idom");
1035 BasicBlock
*PrevIDom
= FindNearestCommonDominator(PrevBlockSet
);
1036 assert(PrevIDom
&& "Previous IDom should exists");
1037 BasicBlock
*NewIDom
= DT
.getNode(BB
)->getIDom()->getBlock();
1038 assert(NewIDom
&& "BB should have a new valid idom");
1039 assert(DT
.dominates(NewIDom
, PrevIDom
) &&
1040 "New idom should dominate old idom");
1041 GetNoLongerDomBlocks(PrevIDom
, NewIDom
, BlocksWithDefsToReplace
);
1044 tryRemoveTrivialPhis(InsertedPhis
);
1045 // Create the set of blocks that now have a definition. We'll use this to
1046 // compute IDF and add Phis there next.
1047 SmallVector
<BasicBlock
*, 8> BlocksToProcess
;
1048 for (auto &VH
: InsertedPhis
)
1049 if (auto *MPhi
= cast_or_null
<MemoryPhi
>(VH
))
1050 BlocksToProcess
.push_back(MPhi
->getBlock());
1052 // Compute IDF and add Phis in all IDF blocks that do not have one.
1053 SmallVector
<BasicBlock
*, 32> IDFBlocks
;
1054 if (!BlocksToProcess
.empty()) {
1055 ForwardIDFCalculator
IDFs(DT
, GD
);
1056 SmallPtrSet
<BasicBlock
*, 16> DefiningBlocks(BlocksToProcess
.begin(),
1057 BlocksToProcess
.end());
1058 IDFs
.setDefiningBlocks(DefiningBlocks
);
1059 IDFs
.calculate(IDFBlocks
);
1061 SmallSetVector
<MemoryPhi
*, 4> PhisToFill
;
1062 // First create all needed Phis.
1063 for (auto *BBIDF
: IDFBlocks
)
1064 if (!MSSA
->getMemoryAccess(BBIDF
)) {
1065 auto *IDFPhi
= MSSA
->createMemoryPhi(BBIDF
);
1066 InsertedPhis
.push_back(IDFPhi
);
1067 PhisToFill
.insert(IDFPhi
);
1069 // Then update or insert their correct incoming values.
1070 for (auto *BBIDF
: IDFBlocks
) {
1071 auto *IDFPhi
= MSSA
->getMemoryAccess(BBIDF
);
1072 assert(IDFPhi
&& "Phi must exist");
1073 if (!PhisToFill
.count(IDFPhi
)) {
1074 // Update existing Phi.
1075 // FIXME: some updates may be redundant, try to optimize and skip some.
1076 for (unsigned I
= 0, E
= IDFPhi
->getNumIncomingValues(); I
< E
; ++I
)
1077 IDFPhi
->setIncomingValue(I
, GetLastDef(IDFPhi
->getIncomingBlock(I
)));
1079 for (auto &Pair
: children
<GraphDiffInvBBPair
>({GD
, BBIDF
})) {
1080 BasicBlock
*Pi
= Pair
.second
;
1081 IDFPhi
->addIncoming(GetLastDef(Pi
), Pi
);
1087 // Now for all defs in BlocksWithDefsToReplace, if there are uses they no
1088 // longer dominate, replace those with the closest dominating def.
1089 // This will also update optimized accesses, as they're also uses.
1090 for (auto *BlockWithDefsToReplace
: BlocksWithDefsToReplace
) {
1091 if (auto DefsList
= MSSA
->getWritableBlockDefs(BlockWithDefsToReplace
)) {
1092 for (auto &DefToReplaceUses
: *DefsList
) {
1093 BasicBlock
*DominatingBlock
= DefToReplaceUses
.getBlock();
1094 Value::use_iterator UI
= DefToReplaceUses
.use_begin(),
1095 E
= DefToReplaceUses
.use_end();
1099 MemoryAccess
*Usr
= cast
<MemoryAccess
>(U
.getUser());
1100 if (MemoryPhi
*UsrPhi
= dyn_cast
<MemoryPhi
>(Usr
)) {
1101 BasicBlock
*DominatedBlock
= UsrPhi
->getIncomingBlock(U
);
1102 if (!DT
.dominates(DominatingBlock
, DominatedBlock
))
1103 U
.set(GetLastDef(DominatedBlock
));
1105 BasicBlock
*DominatedBlock
= Usr
->getBlock();
1106 if (!DT
.dominates(DominatingBlock
, DominatedBlock
)) {
1107 if (auto *DomBlPhi
= MSSA
->getMemoryAccess(DominatedBlock
))
1110 auto *IDom
= DT
.getNode(DominatedBlock
)->getIDom();
1111 assert(IDom
&& "Block must have a valid IDom.");
1112 U
.set(GetLastDef(IDom
->getBlock()));
1114 cast
<MemoryUseOrDef
>(Usr
)->resetOptimized();
1121 tryRemoveTrivialPhis(InsertedPhis
);
1124 // Move What before Where in the MemorySSA IR.
1125 template <class WhereType
>
1126 void MemorySSAUpdater::moveTo(MemoryUseOrDef
*What
, BasicBlock
*BB
,
1128 // Mark MemoryPhi users of What not to be optimized.
1129 for (auto *U
: What
->users())
1130 if (MemoryPhi
*PhiUser
= dyn_cast
<MemoryPhi
>(U
))
1131 NonOptPhis
.insert(PhiUser
);
1133 // Replace all our users with our defining access.
1134 What
->replaceAllUsesWith(What
->getDefiningAccess());
1136 // Let MemorySSA take care of moving it around in the lists.
1137 MSSA
->moveTo(What
, BB
, Where
);
1139 // Now reinsert it into the IR and do whatever fixups needed.
1140 if (auto *MD
= dyn_cast
<MemoryDef
>(What
))
1141 insertDef(MD
, /*RenameUses=*/true);
1143 insertUse(cast
<MemoryUse
>(What
), /*RenameUses=*/true);
1145 // Clear dangling pointers. We added all MemoryPhi users, but not all
1146 // of them are removed by fixupDefs().
1150 // Move What before Where in the MemorySSA IR.
1151 void MemorySSAUpdater::moveBefore(MemoryUseOrDef
*What
, MemoryUseOrDef
*Where
) {
1152 moveTo(What
, Where
->getBlock(), Where
->getIterator());
1155 // Move What after Where in the MemorySSA IR.
1156 void MemorySSAUpdater::moveAfter(MemoryUseOrDef
*What
, MemoryUseOrDef
*Where
) {
1157 moveTo(What
, Where
->getBlock(), ++Where
->getIterator());
1160 void MemorySSAUpdater::moveToPlace(MemoryUseOrDef
*What
, BasicBlock
*BB
,
1161 MemorySSA::InsertionPlace Where
) {
1162 return moveTo(What
, BB
, Where
);
1165 // All accesses in To used to be in From. Move to end and update access lists.
1166 void MemorySSAUpdater::moveAllAccesses(BasicBlock
*From
, BasicBlock
*To
,
1167 Instruction
*Start
) {
1169 MemorySSA::AccessList
*Accs
= MSSA
->getWritableBlockAccesses(From
);
1173 assert(Start
->getParent() == To
&& "Incorrect Start instruction");
1174 MemoryAccess
*FirstInNew
= nullptr;
1175 for (Instruction
&I
: make_range(Start
->getIterator(), To
->end()))
1176 if ((FirstInNew
= MSSA
->getMemoryAccess(&I
)))
1179 auto *MUD
= cast
<MemoryUseOrDef
>(FirstInNew
);
1181 auto NextIt
= ++MUD
->getIterator();
1182 MemoryUseOrDef
*NextMUD
= (!Accs
|| NextIt
== Accs
->end())
1184 : cast
<MemoryUseOrDef
>(&*NextIt
);
1185 MSSA
->moveTo(MUD
, To
, MemorySSA::End
);
1186 // Moving MUD from Accs in the moveTo above, may delete Accs, so we need
1187 // to retrieve it again.
1188 Accs
= MSSA
->getWritableBlockAccesses(From
);
1193 // If all accesses were moved and only a trivial Phi remains, we try to remove
1194 // that Phi. This is needed when From is going to be deleted.
1195 auto *Defs
= MSSA
->getWritableBlockDefs(From
);
1196 if (Defs
&& !Defs
->empty())
1197 if (auto *Phi
= dyn_cast
<MemoryPhi
>(&*Defs
->begin()))
1198 tryRemoveTrivialPhi(Phi
);
1201 void MemorySSAUpdater::moveAllAfterSpliceBlocks(BasicBlock
*From
,
1203 Instruction
*Start
) {
1204 assert(MSSA
->getBlockAccesses(To
) == nullptr &&
1205 "To block is expected to be free of MemoryAccesses.");
1206 moveAllAccesses(From
, To
, Start
);
1207 for (BasicBlock
*Succ
: successors(To
))
1208 if (MemoryPhi
*MPhi
= MSSA
->getMemoryAccess(Succ
))
1209 MPhi
->setIncomingBlock(MPhi
->getBasicBlockIndex(From
), To
);
1212 void MemorySSAUpdater::moveAllAfterMergeBlocks(BasicBlock
*From
, BasicBlock
*To
,
1213 Instruction
*Start
) {
1214 assert(From
->getUniquePredecessor() == To
&&
1215 "From block is expected to have a single predecessor (To).");
1216 moveAllAccesses(From
, To
, Start
);
1217 for (BasicBlock
*Succ
: successors(From
))
1218 if (MemoryPhi
*MPhi
= MSSA
->getMemoryAccess(Succ
))
1219 MPhi
->setIncomingBlock(MPhi
->getBasicBlockIndex(From
), To
);
1222 /// If all arguments of a MemoryPHI are defined by the same incoming
1223 /// argument, return that argument.
1224 static MemoryAccess
*onlySingleValue(MemoryPhi
*MP
) {
1225 MemoryAccess
*MA
= nullptr;
1227 for (auto &Arg
: MP
->operands()) {
1229 MA
= cast
<MemoryAccess
>(Arg
);
1236 void MemorySSAUpdater::wireOldPredecessorsToNewImmediatePredecessor(
1237 BasicBlock
*Old
, BasicBlock
*New
, ArrayRef
<BasicBlock
*> Preds
,
1238 bool IdenticalEdgesWereMerged
) {
1239 assert(!MSSA
->getWritableBlockAccesses(New
) &&
1240 "Access list should be null for a new block.");
1241 MemoryPhi
*Phi
= MSSA
->getMemoryAccess(Old
);
1244 if (Old
->hasNPredecessors(1)) {
1245 assert(pred_size(New
) == Preds
.size() &&
1246 "Should have moved all predecessors.");
1247 MSSA
->moveTo(Phi
, New
, MemorySSA::Beginning
);
1249 assert(!Preds
.empty() && "Must be moving at least one predecessor to the "
1250 "new immediate predecessor.");
1251 MemoryPhi
*NewPhi
= MSSA
->createMemoryPhi(New
);
1252 SmallPtrSet
<BasicBlock
*, 16> PredsSet(Preds
.begin(), Preds
.end());
1253 // Currently only support the case of removing a single incoming edge when
1254 // identical edges were not merged.
1255 if (!IdenticalEdgesWereMerged
)
1256 assert(PredsSet
.size() == Preds
.size() &&
1257 "If identical edges were not merged, we cannot have duplicate "
1258 "blocks in the predecessors");
1259 Phi
->unorderedDeleteIncomingIf([&](MemoryAccess
*MA
, BasicBlock
*B
) {
1260 if (PredsSet
.count(B
)) {
1261 NewPhi
->addIncoming(MA
, B
);
1262 if (!IdenticalEdgesWereMerged
)
1268 Phi
->addIncoming(NewPhi
, New
);
1269 tryRemoveTrivialPhi(NewPhi
);
1273 void MemorySSAUpdater::removeMemoryAccess(MemoryAccess
*MA
, bool OptimizePhis
) {
1274 assert(!MSSA
->isLiveOnEntryDef(MA
) &&
1275 "Trying to remove the live on entry def");
1276 // We can only delete phi nodes if they have no uses, or we can replace all
1277 // uses with a single definition.
1278 MemoryAccess
*NewDefTarget
= nullptr;
1279 if (MemoryPhi
*MP
= dyn_cast
<MemoryPhi
>(MA
)) {
1280 // Note that it is sufficient to know that all edges of the phi node have
1281 // the same argument. If they do, by the definition of dominance frontiers
1282 // (which we used to place this phi), that argument must dominate this phi,
1283 // and thus, must dominate the phi's uses, and so we will not hit the assert
1285 NewDefTarget
= onlySingleValue(MP
);
1286 assert((NewDefTarget
|| MP
->use_empty()) &&
1287 "We can't delete this memory phi");
1289 NewDefTarget
= cast
<MemoryUseOrDef
>(MA
)->getDefiningAccess();
1292 SmallSetVector
<MemoryPhi
*, 4> PhisToCheck
;
1294 // Re-point the uses at our defining access
1295 if (!isa
<MemoryUse
>(MA
) && !MA
->use_empty()) {
1296 // Reset optimized on users of this store, and reset the uses.
1298 // 1. This is a slightly modified version of RAUW to avoid walking the
1300 // 2. If we wanted to be complete, we would have to reset the optimized
1301 // flags on users of phi nodes if doing the below makes a phi node have all
1302 // the same arguments. Instead, we prefer users to removeMemoryAccess those
1303 // phi nodes, because doing it here would be N^3.
1304 if (MA
->hasValueHandle())
1305 ValueHandleBase::ValueIsRAUWd(MA
, NewDefTarget
);
1306 // Note: We assume MemorySSA is not used in metadata since it's not really
1309 while (!MA
->use_empty()) {
1310 Use
&U
= *MA
->use_begin();
1311 if (auto *MUD
= dyn_cast
<MemoryUseOrDef
>(U
.getUser()))
1312 MUD
->resetOptimized();
1314 if (MemoryPhi
*MP
= dyn_cast
<MemoryPhi
>(U
.getUser()))
1315 PhisToCheck
.insert(MP
);
1316 U
.set(NewDefTarget
);
1320 // The call below to erase will destroy MA, so we can't change the order we
1321 // are doing things here
1322 MSSA
->removeFromLookups(MA
);
1323 MSSA
->removeFromLists(MA
);
1325 // Optionally optimize Phi uses. This will recursively remove trivial phis.
1326 if (!PhisToCheck
.empty()) {
1327 SmallVector
<WeakVH
, 16> PhisToOptimize
{PhisToCheck
.begin(),
1329 PhisToCheck
.clear();
1331 unsigned PhisSize
= PhisToOptimize
.size();
1332 while (PhisSize
-- > 0)
1334 cast_or_null
<MemoryPhi
>(PhisToOptimize
.pop_back_val()))
1335 tryRemoveTrivialPhi(MP
);
1339 void MemorySSAUpdater::removeBlocks(
1340 const SmallSetVector
<BasicBlock
*, 8> &DeadBlocks
) {
1341 // First delete all uses of BB in MemoryPhis.
1342 for (BasicBlock
*BB
: DeadBlocks
) {
1343 Instruction
*TI
= BB
->getTerminator();
1344 assert(TI
&& "Basic block expected to have a terminator instruction");
1345 for (BasicBlock
*Succ
: successors(TI
))
1346 if (!DeadBlocks
.count(Succ
))
1347 if (MemoryPhi
*MP
= MSSA
->getMemoryAccess(Succ
)) {
1348 MP
->unorderedDeleteIncomingBlock(BB
);
1349 tryRemoveTrivialPhi(MP
);
1351 // Drop all references of all accesses in BB
1352 if (MemorySSA::AccessList
*Acc
= MSSA
->getWritableBlockAccesses(BB
))
1353 for (MemoryAccess
&MA
: *Acc
)
1354 MA
.dropAllReferences();
1357 // Next, delete all memory accesses in each block
1358 for (BasicBlock
*BB
: DeadBlocks
) {
1359 MemorySSA::AccessList
*Acc
= MSSA
->getWritableBlockAccesses(BB
);
1362 for (auto AB
= Acc
->begin(), AE
= Acc
->end(); AB
!= AE
;) {
1363 MemoryAccess
*MA
= &*AB
;
1365 MSSA
->removeFromLookups(MA
);
1366 MSSA
->removeFromLists(MA
);
1371 void MemorySSAUpdater::tryRemoveTrivialPhis(ArrayRef
<WeakVH
> UpdatedPHIs
) {
1372 for (auto &VH
: UpdatedPHIs
)
1373 if (auto *MPhi
= cast_or_null
<MemoryPhi
>(VH
))
1374 tryRemoveTrivialPhi(MPhi
);
1377 void MemorySSAUpdater::changeToUnreachable(const Instruction
*I
) {
1378 const BasicBlock
*BB
= I
->getParent();
1379 // Remove memory accesses in BB for I and all following instructions.
1380 auto BBI
= I
->getIterator(), BBE
= BB
->end();
1381 // FIXME: If this becomes too expensive, iterate until the first instruction
1382 // with a memory access, then iterate over MemoryAccesses.
1384 removeMemoryAccess(&*(BBI
++));
1385 // Update phis in BB's successors to remove BB.
1386 SmallVector
<WeakVH
, 16> UpdatedPHIs
;
1387 for (const BasicBlock
*Successor
: successors(BB
)) {
1388 removeDuplicatePhiEdgesBetween(BB
, Successor
);
1389 if (MemoryPhi
*MPhi
= MSSA
->getMemoryAccess(Successor
)) {
1390 MPhi
->unorderedDeleteIncomingBlock(BB
);
1391 UpdatedPHIs
.push_back(MPhi
);
1394 // Optimize trivial phis.
1395 tryRemoveTrivialPhis(UpdatedPHIs
);
1398 void MemorySSAUpdater::changeCondBranchToUnconditionalTo(const BranchInst
*BI
,
1399 const BasicBlock
*To
) {
1400 const BasicBlock
*BB
= BI
->getParent();
1401 SmallVector
<WeakVH
, 16> UpdatedPHIs
;
1402 for (const BasicBlock
*Succ
: successors(BB
)) {
1403 removeDuplicatePhiEdgesBetween(BB
, Succ
);
1405 if (auto *MPhi
= MSSA
->getMemoryAccess(Succ
)) {
1406 MPhi
->unorderedDeleteIncomingBlock(BB
);
1407 UpdatedPHIs
.push_back(MPhi
);
1410 // Optimize trivial phis.
1411 tryRemoveTrivialPhis(UpdatedPHIs
);
1414 MemoryAccess
*MemorySSAUpdater::createMemoryAccessInBB(
1415 Instruction
*I
, MemoryAccess
*Definition
, const BasicBlock
*BB
,
1416 MemorySSA::InsertionPlace Point
) {
1417 MemoryUseOrDef
*NewAccess
= MSSA
->createDefinedAccess(I
, Definition
);
1418 MSSA
->insertIntoListsForBlock(NewAccess
, BB
, Point
);
1422 MemoryUseOrDef
*MemorySSAUpdater::createMemoryAccessBefore(
1423 Instruction
*I
, MemoryAccess
*Definition
, MemoryUseOrDef
*InsertPt
) {
1424 assert(I
->getParent() == InsertPt
->getBlock() &&
1425 "New and old access must be in the same block");
1426 MemoryUseOrDef
*NewAccess
= MSSA
->createDefinedAccess(I
, Definition
);
1427 MSSA
->insertIntoListsBefore(NewAccess
, InsertPt
->getBlock(),
1428 InsertPt
->getIterator());
1432 MemoryUseOrDef
*MemorySSAUpdater::createMemoryAccessAfter(
1433 Instruction
*I
, MemoryAccess
*Definition
, MemoryAccess
*InsertPt
) {
1434 assert(I
->getParent() == InsertPt
->getBlock() &&
1435 "New and old access must be in the same block");
1436 MemoryUseOrDef
*NewAccess
= MSSA
->createDefinedAccess(I
, Definition
);
1437 MSSA
->insertIntoListsBefore(NewAccess
, InsertPt
->getBlock(),
1438 ++InsertPt
->getIterator());