[MIPS GlobalISel] Select float constants
[llvm-complete.git] / lib / Analysis / MemorySSAUpdater.cpp
blobf3ea8827370bf7c3832c22c1009a6f0718b524a6
1 //===-- MemorySSAUpdater.cpp - Memory SSA Updater--------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------===//
8 //
9 // This file implements the MemorySSAUpdater class.
11 //===----------------------------------------------------------------===//
12 #include "llvm/Analysis/MemorySSAUpdater.h"
13 #include "llvm/ADT/STLExtras.h"
14 #include "llvm/ADT/SetVector.h"
15 #include "llvm/ADT/SmallPtrSet.h"
16 #include "llvm/Analysis/IteratedDominanceFrontier.h"
17 #include "llvm/Analysis/MemorySSA.h"
18 #include "llvm/IR/DataLayout.h"
19 #include "llvm/IR/Dominators.h"
20 #include "llvm/IR/GlobalVariable.h"
21 #include "llvm/IR/IRBuilder.h"
22 #include "llvm/IR/LLVMContext.h"
23 #include "llvm/IR/Metadata.h"
24 #include "llvm/IR/Module.h"
25 #include "llvm/Support/Debug.h"
26 #include "llvm/Support/FormattedStream.h"
27 #include <algorithm>
29 #define DEBUG_TYPE "memoryssa"
30 using namespace llvm;
32 // This is the marker algorithm from "Simple and Efficient Construction of
33 // Static Single Assignment Form"
34 // The simple, non-marker algorithm places phi nodes at any join
35 // Here, we place markers, and only place phi nodes if they end up necessary.
36 // They are only necessary if they break a cycle (IE we recursively visit
37 // ourselves again), or we discover, while getting the value of the operands,
38 // that there are two or more definitions needing to be merged.
39 // This still will leave non-minimal form in the case of irreducible control
40 // flow, where phi nodes may be in cycles with themselves, but unnecessary.
41 MemoryAccess *MemorySSAUpdater::getPreviousDefRecursive(
42 BasicBlock *BB,
43 DenseMap<BasicBlock *, TrackingVH<MemoryAccess>> &CachedPreviousDef) {
44 // First, do a cache lookup. Without this cache, certain CFG structures
45 // (like a series of if statements) take exponential time to visit.
46 auto Cached = CachedPreviousDef.find(BB);
47 if (Cached != CachedPreviousDef.end()) {
48 return Cached->second;
51 if (BasicBlock *Pred = BB->getSinglePredecessor()) {
52 // Single predecessor case, just recurse, we can only have one definition.
53 MemoryAccess *Result = getPreviousDefFromEnd(Pred, CachedPreviousDef);
54 CachedPreviousDef.insert({BB, Result});
55 return Result;
58 if (VisitedBlocks.count(BB)) {
59 // We hit our node again, meaning we had a cycle, we must insert a phi
60 // node to break it so we have an operand. The only case this will
61 // insert useless phis is if we have irreducible control flow.
62 MemoryAccess *Result = MSSA->createMemoryPhi(BB);
63 CachedPreviousDef.insert({BB, Result});
64 return Result;
67 if (VisitedBlocks.insert(BB).second) {
68 // Mark us visited so we can detect a cycle
69 SmallVector<TrackingVH<MemoryAccess>, 8> PhiOps;
71 // Recurse to get the values in our predecessors for placement of a
72 // potential phi node. This will insert phi nodes if we cycle in order to
73 // break the cycle and have an operand.
74 for (auto *Pred : predecessors(BB))
75 PhiOps.push_back(getPreviousDefFromEnd(Pred, CachedPreviousDef));
77 // Now try to simplify the ops to avoid placing a phi.
78 // This may return null if we never created a phi yet, that's okay
79 MemoryPhi *Phi = dyn_cast_or_null<MemoryPhi>(MSSA->getMemoryAccess(BB));
81 // See if we can avoid the phi by simplifying it.
82 auto *Result = tryRemoveTrivialPhi(Phi, PhiOps);
83 // If we couldn't simplify, we may have to create a phi
84 if (Result == Phi) {
85 if (!Phi)
86 Phi = MSSA->createMemoryPhi(BB);
88 // See if the existing phi operands match what we need.
89 // Unlike normal SSA, we only allow one phi node per block, so we can't just
90 // create a new one.
91 if (Phi->getNumOperands() != 0) {
92 // FIXME: Figure out whether this is dead code and if so remove it.
93 if (!std::equal(Phi->op_begin(), Phi->op_end(), PhiOps.begin())) {
94 // These will have been filled in by the recursive read we did above.
95 llvm::copy(PhiOps, Phi->op_begin());
96 std::copy(pred_begin(BB), pred_end(BB), Phi->block_begin());
98 } else {
99 unsigned i = 0;
100 for (auto *Pred : predecessors(BB))
101 Phi->addIncoming(&*PhiOps[i++], Pred);
102 InsertedPHIs.push_back(Phi);
104 Result = Phi;
107 // Set ourselves up for the next variable by resetting visited state.
108 VisitedBlocks.erase(BB);
109 CachedPreviousDef.insert({BB, Result});
110 return Result;
112 llvm_unreachable("Should have hit one of the three cases above");
115 // This starts at the memory access, and goes backwards in the block to find the
116 // previous definition. If a definition is not found the block of the access,
117 // it continues globally, creating phi nodes to ensure we have a single
118 // definition.
119 MemoryAccess *MemorySSAUpdater::getPreviousDef(MemoryAccess *MA) {
120 if (auto *LocalResult = getPreviousDefInBlock(MA))
121 return LocalResult;
122 DenseMap<BasicBlock *, TrackingVH<MemoryAccess>> CachedPreviousDef;
123 return getPreviousDefRecursive(MA->getBlock(), CachedPreviousDef);
126 // This starts at the memory access, and goes backwards in the block to the find
127 // the previous definition. If the definition is not found in the block of the
128 // access, it returns nullptr.
129 MemoryAccess *MemorySSAUpdater::getPreviousDefInBlock(MemoryAccess *MA) {
130 auto *Defs = MSSA->getWritableBlockDefs(MA->getBlock());
132 // It's possible there are no defs, or we got handed the first def to start.
133 if (Defs) {
134 // If this is a def, we can just use the def iterators.
135 if (!isa<MemoryUse>(MA)) {
136 auto Iter = MA->getReverseDefsIterator();
137 ++Iter;
138 if (Iter != Defs->rend())
139 return &*Iter;
140 } else {
141 // Otherwise, have to walk the all access iterator.
142 auto End = MSSA->getWritableBlockAccesses(MA->getBlock())->rend();
143 for (auto &U : make_range(++MA->getReverseIterator(), End))
144 if (!isa<MemoryUse>(U))
145 return cast<MemoryAccess>(&U);
146 // Note that if MA comes before Defs->begin(), we won't hit a def.
147 return nullptr;
150 return nullptr;
153 // This starts at the end of block
154 MemoryAccess *MemorySSAUpdater::getPreviousDefFromEnd(
155 BasicBlock *BB,
156 DenseMap<BasicBlock *, TrackingVH<MemoryAccess>> &CachedPreviousDef) {
157 auto *Defs = MSSA->getWritableBlockDefs(BB);
159 if (Defs)
160 return &*Defs->rbegin();
162 return getPreviousDefRecursive(BB, CachedPreviousDef);
164 // Recurse over a set of phi uses to eliminate the trivial ones
165 MemoryAccess *MemorySSAUpdater::recursePhi(MemoryAccess *Phi) {
166 if (!Phi)
167 return nullptr;
168 TrackingVH<MemoryAccess> Res(Phi);
169 SmallVector<TrackingVH<Value>, 8> Uses;
170 std::copy(Phi->user_begin(), Phi->user_end(), std::back_inserter(Uses));
171 for (auto &U : Uses) {
172 if (MemoryPhi *UsePhi = dyn_cast<MemoryPhi>(&*U)) {
173 auto OperRange = UsePhi->operands();
174 tryRemoveTrivialPhi(UsePhi, OperRange);
177 return Res;
180 // Eliminate trivial phis
181 // Phis are trivial if they are defined either by themselves, or all the same
182 // argument.
183 // IE phi(a, a) or b = phi(a, b) or c = phi(a, a, c)
184 // We recursively try to remove them.
185 template <class RangeType>
186 MemoryAccess *MemorySSAUpdater::tryRemoveTrivialPhi(MemoryPhi *Phi,
187 RangeType &Operands) {
188 // Bail out on non-opt Phis.
189 if (NonOptPhis.count(Phi))
190 return Phi;
192 // Detect equal or self arguments
193 MemoryAccess *Same = nullptr;
194 for (auto &Op : Operands) {
195 // If the same or self, good so far
196 if (Op == Phi || Op == Same)
197 continue;
198 // not the same, return the phi since it's not eliminatable by us
199 if (Same)
200 return Phi;
201 Same = cast<MemoryAccess>(&*Op);
203 // Never found a non-self reference, the phi is undef
204 if (Same == nullptr)
205 return MSSA->getLiveOnEntryDef();
206 if (Phi) {
207 Phi->replaceAllUsesWith(Same);
208 removeMemoryAccess(Phi);
211 // We should only end up recursing in case we replaced something, in which
212 // case, we may have made other Phis trivial.
213 return recursePhi(Same);
216 void MemorySSAUpdater::insertUse(MemoryUse *MU) {
217 InsertedPHIs.clear();
218 MU->setDefiningAccess(getPreviousDef(MU));
219 // Unlike for defs, there is no extra work to do. Because uses do not create
220 // new may-defs, there are only two cases:
222 // 1. There was a def already below us, and therefore, we should not have
223 // created a phi node because it was already needed for the def.
225 // 2. There is no def below us, and therefore, there is no extra renaming work
226 // to do.
229 // Set every incoming edge {BB, MP->getBlock()} of MemoryPhi MP to NewDef.
230 static void setMemoryPhiValueForBlock(MemoryPhi *MP, const BasicBlock *BB,
231 MemoryAccess *NewDef) {
232 // Replace any operand with us an incoming block with the new defining
233 // access.
234 int i = MP->getBasicBlockIndex(BB);
235 assert(i != -1 && "Should have found the basic block in the phi");
236 // We can't just compare i against getNumOperands since one is signed and the
237 // other not. So use it to index into the block iterator.
238 for (auto BBIter = MP->block_begin() + i; BBIter != MP->block_end();
239 ++BBIter) {
240 if (*BBIter != BB)
241 break;
242 MP->setIncomingValue(i, NewDef);
243 ++i;
247 // A brief description of the algorithm:
248 // First, we compute what should define the new def, using the SSA
249 // construction algorithm.
250 // Then, we update the defs below us (and any new phi nodes) in the graph to
251 // point to the correct new defs, to ensure we only have one variable, and no
252 // disconnected stores.
253 void MemorySSAUpdater::insertDef(MemoryDef *MD, bool RenameUses) {
254 InsertedPHIs.clear();
256 // See if we had a local def, and if not, go hunting.
257 MemoryAccess *DefBefore = getPreviousDef(MD);
258 bool DefBeforeSameBlock = DefBefore->getBlock() == MD->getBlock();
260 // There is a def before us, which means we can replace any store/phi uses
261 // of that thing with us, since we are in the way of whatever was there
262 // before.
263 // We now define that def's memorydefs and memoryphis
264 if (DefBeforeSameBlock) {
265 for (auto UI = DefBefore->use_begin(), UE = DefBefore->use_end();
266 UI != UE;) {
267 Use &U = *UI++;
268 // Leave the MemoryUses alone.
269 // Also make sure we skip ourselves to avoid self references.
270 if (isa<MemoryUse>(U.getUser()) || U.getUser() == MD)
271 continue;
272 // Defs are automatically unoptimized when the user is set to MD below,
273 // because the isOptimized() call will fail to find the same ID.
274 U.set(MD);
278 // and that def is now our defining access.
279 MD->setDefiningAccess(DefBefore);
281 // Remember the index where we may insert new phis below.
282 unsigned NewPhiIndex = InsertedPHIs.size();
284 SmallVector<WeakVH, 8> FixupList(InsertedPHIs.begin(), InsertedPHIs.end());
285 if (!DefBeforeSameBlock) {
286 // If there was a local def before us, we must have the same effect it
287 // did. Because every may-def is the same, any phis/etc we would create, it
288 // would also have created. If there was no local def before us, we
289 // performed a global update, and have to search all successors and make
290 // sure we update the first def in each of them (following all paths until
291 // we hit the first def along each path). This may also insert phi nodes.
292 // TODO: There are other cases we can skip this work, such as when we have a
293 // single successor, and only used a straight line of single pred blocks
294 // backwards to find the def. To make that work, we'd have to track whether
295 // getDefRecursive only ever used the single predecessor case. These types
296 // of paths also only exist in between CFG simplifications.
298 // If this is the first def in the block and this insert is in an arbitrary
299 // place, compute IDF and place phis.
300 auto Iter = MD->getDefsIterator();
301 ++Iter;
302 auto IterEnd = MSSA->getBlockDefs(MD->getBlock())->end();
303 if (Iter == IterEnd) {
304 ForwardIDFCalculator IDFs(*MSSA->DT);
305 SmallVector<BasicBlock *, 32> IDFBlocks;
306 SmallPtrSet<BasicBlock *, 2> DefiningBlocks;
307 DefiningBlocks.insert(MD->getBlock());
308 IDFs.setDefiningBlocks(DefiningBlocks);
309 IDFs.calculate(IDFBlocks);
310 SmallVector<AssertingVH<MemoryPhi>, 4> NewInsertedPHIs;
311 for (auto *BBIDF : IDFBlocks)
312 if (!MSSA->getMemoryAccess(BBIDF))
313 NewInsertedPHIs.push_back(MSSA->createMemoryPhi(BBIDF));
315 for (auto &MPhi : NewInsertedPHIs) {
316 auto *BBIDF = MPhi->getBlock();
317 for (auto *Pred : predecessors(BBIDF)) {
318 DenseMap<BasicBlock *, TrackingVH<MemoryAccess>> CachedPreviousDef;
319 MPhi->addIncoming(getPreviousDefFromEnd(Pred, CachedPreviousDef),
320 Pred);
324 // Re-take the index where we're adding the new phis, because the above
325 // call to getPreviousDefFromEnd, may have inserted into InsertedPHIs.
326 NewPhiIndex = InsertedPHIs.size();
327 for (auto &MPhi : NewInsertedPHIs) {
328 InsertedPHIs.push_back(&*MPhi);
329 FixupList.push_back(&*MPhi);
333 FixupList.push_back(MD);
336 // Remember the index where we stopped inserting new phis above, since the
337 // fixupDefs call in the loop below may insert more, that are already minimal.
338 unsigned NewPhiIndexEnd = InsertedPHIs.size();
340 while (!FixupList.empty()) {
341 unsigned StartingPHISize = InsertedPHIs.size();
342 fixupDefs(FixupList);
343 FixupList.clear();
344 // Put any new phis on the fixup list, and process them
345 FixupList.append(InsertedPHIs.begin() + StartingPHISize, InsertedPHIs.end());
348 // Optimize potentially non-minimal phis added in this method.
349 for (unsigned Idx = NewPhiIndex; Idx < NewPhiIndexEnd; ++Idx) {
350 if (auto *MPhi = cast_or_null<MemoryPhi>(InsertedPHIs[Idx])) {
351 auto OperRange = MPhi->operands();
352 tryRemoveTrivialPhi(MPhi, OperRange);
356 // Now that all fixups are done, rename all uses if we are asked.
357 if (RenameUses) {
358 SmallPtrSet<BasicBlock *, 16> Visited;
359 BasicBlock *StartBlock = MD->getBlock();
360 // We are guaranteed there is a def in the block, because we just got it
361 // handed to us in this function.
362 MemoryAccess *FirstDef = &*MSSA->getWritableBlockDefs(StartBlock)->begin();
363 // Convert to incoming value if it's a memorydef. A phi *is* already an
364 // incoming value.
365 if (auto *MD = dyn_cast<MemoryDef>(FirstDef))
366 FirstDef = MD->getDefiningAccess();
368 MSSA->renamePass(MD->getBlock(), FirstDef, Visited);
369 // We just inserted a phi into this block, so the incoming value will become
370 // the phi anyway, so it does not matter what we pass.
371 for (auto &MP : InsertedPHIs) {
372 MemoryPhi *Phi = dyn_cast_or_null<MemoryPhi>(MP);
373 if (Phi)
374 MSSA->renamePass(Phi->getBlock(), nullptr, Visited);
379 void MemorySSAUpdater::fixupDefs(const SmallVectorImpl<WeakVH> &Vars) {
380 SmallPtrSet<const BasicBlock *, 8> Seen;
381 SmallVector<const BasicBlock *, 16> Worklist;
382 for (auto &Var : Vars) {
383 MemoryAccess *NewDef = dyn_cast_or_null<MemoryAccess>(Var);
384 if (!NewDef)
385 continue;
386 // First, see if there is a local def after the operand.
387 auto *Defs = MSSA->getWritableBlockDefs(NewDef->getBlock());
388 auto DefIter = NewDef->getDefsIterator();
390 // The temporary Phi is being fixed, unmark it for not to optimize.
391 if (MemoryPhi *Phi = dyn_cast<MemoryPhi>(NewDef))
392 NonOptPhis.erase(Phi);
394 // If there is a local def after us, we only have to rename that.
395 if (++DefIter != Defs->end()) {
396 cast<MemoryDef>(DefIter)->setDefiningAccess(NewDef);
397 continue;
400 // Otherwise, we need to search down through the CFG.
401 // For each of our successors, handle it directly if their is a phi, or
402 // place on the fixup worklist.
403 for (const auto *S : successors(NewDef->getBlock())) {
404 if (auto *MP = MSSA->getMemoryAccess(S))
405 setMemoryPhiValueForBlock(MP, NewDef->getBlock(), NewDef);
406 else
407 Worklist.push_back(S);
410 while (!Worklist.empty()) {
411 const BasicBlock *FixupBlock = Worklist.back();
412 Worklist.pop_back();
414 // Get the first def in the block that isn't a phi node.
415 if (auto *Defs = MSSA->getWritableBlockDefs(FixupBlock)) {
416 auto *FirstDef = &*Defs->begin();
417 // The loop above and below should have taken care of phi nodes
418 assert(!isa<MemoryPhi>(FirstDef) &&
419 "Should have already handled phi nodes!");
420 // We are now this def's defining access, make sure we actually dominate
421 // it
422 assert(MSSA->dominates(NewDef, FirstDef) &&
423 "Should have dominated the new access");
425 // This may insert new phi nodes, because we are not guaranteed the
426 // block we are processing has a single pred, and depending where the
427 // store was inserted, it may require phi nodes below it.
428 cast<MemoryDef>(FirstDef)->setDefiningAccess(getPreviousDef(FirstDef));
429 return;
431 // We didn't find a def, so we must continue.
432 for (const auto *S : successors(FixupBlock)) {
433 // If there is a phi node, handle it.
434 // Otherwise, put the block on the worklist
435 if (auto *MP = MSSA->getMemoryAccess(S))
436 setMemoryPhiValueForBlock(MP, FixupBlock, NewDef);
437 else {
438 // If we cycle, we should have ended up at a phi node that we already
439 // processed. FIXME: Double check this
440 if (!Seen.insert(S).second)
441 continue;
442 Worklist.push_back(S);
449 void MemorySSAUpdater::removeEdge(BasicBlock *From, BasicBlock *To) {
450 if (MemoryPhi *MPhi = MSSA->getMemoryAccess(To)) {
451 MPhi->unorderedDeleteIncomingBlock(From);
452 if (MPhi->getNumIncomingValues() == 1)
453 removeMemoryAccess(MPhi);
457 void MemorySSAUpdater::removeDuplicatePhiEdgesBetween(BasicBlock *From,
458 BasicBlock *To) {
459 if (MemoryPhi *MPhi = MSSA->getMemoryAccess(To)) {
460 bool Found = false;
461 MPhi->unorderedDeleteIncomingIf([&](const MemoryAccess *, BasicBlock *B) {
462 if (From != B)
463 return false;
464 if (Found)
465 return true;
466 Found = true;
467 return false;
469 if (MPhi->getNumIncomingValues() == 1)
470 removeMemoryAccess(MPhi);
474 void MemorySSAUpdater::cloneUsesAndDefs(BasicBlock *BB, BasicBlock *NewBB,
475 const ValueToValueMapTy &VMap,
476 PhiToDefMap &MPhiMap) {
477 auto GetNewDefiningAccess = [&](MemoryAccess *MA) -> MemoryAccess * {
478 MemoryAccess *InsnDefining = MA;
479 if (MemoryUseOrDef *DefMUD = dyn_cast<MemoryUseOrDef>(InsnDefining)) {
480 if (!MSSA->isLiveOnEntryDef(DefMUD)) {
481 Instruction *DefMUDI = DefMUD->getMemoryInst();
482 assert(DefMUDI && "Found MemoryUseOrDef with no Instruction.");
483 if (Instruction *NewDefMUDI =
484 cast_or_null<Instruction>(VMap.lookup(DefMUDI)))
485 InsnDefining = MSSA->getMemoryAccess(NewDefMUDI);
487 } else {
488 MemoryPhi *DefPhi = cast<MemoryPhi>(InsnDefining);
489 if (MemoryAccess *NewDefPhi = MPhiMap.lookup(DefPhi))
490 InsnDefining = NewDefPhi;
492 assert(InsnDefining && "Defining instruction cannot be nullptr.");
493 return InsnDefining;
496 const MemorySSA::AccessList *Acc = MSSA->getBlockAccesses(BB);
497 if (!Acc)
498 return;
499 for (const MemoryAccess &MA : *Acc) {
500 if (const MemoryUseOrDef *MUD = dyn_cast<MemoryUseOrDef>(&MA)) {
501 Instruction *Insn = MUD->getMemoryInst();
502 // Entry does not exist if the clone of the block did not clone all
503 // instructions. This occurs in LoopRotate when cloning instructions
504 // from the old header to the old preheader. The cloned instruction may
505 // also be a simplified Value, not an Instruction (see LoopRotate).
506 if (Instruction *NewInsn =
507 dyn_cast_or_null<Instruction>(VMap.lookup(Insn))) {
508 MemoryAccess *NewUseOrDef = MSSA->createDefinedAccess(
509 NewInsn, GetNewDefiningAccess(MUD->getDefiningAccess()), MUD);
510 MSSA->insertIntoListsForBlock(NewUseOrDef, NewBB, MemorySSA::End);
516 void MemorySSAUpdater::updateForClonedLoop(const LoopBlocksRPO &LoopBlocks,
517 ArrayRef<BasicBlock *> ExitBlocks,
518 const ValueToValueMapTy &VMap,
519 bool IgnoreIncomingWithNoClones) {
520 PhiToDefMap MPhiMap;
522 auto FixPhiIncomingValues = [&](MemoryPhi *Phi, MemoryPhi *NewPhi) {
523 assert(Phi && NewPhi && "Invalid Phi nodes.");
524 BasicBlock *NewPhiBB = NewPhi->getBlock();
525 SmallPtrSet<BasicBlock *, 4> NewPhiBBPreds(pred_begin(NewPhiBB),
526 pred_end(NewPhiBB));
527 for (unsigned It = 0, E = Phi->getNumIncomingValues(); It < E; ++It) {
528 MemoryAccess *IncomingAccess = Phi->getIncomingValue(It);
529 BasicBlock *IncBB = Phi->getIncomingBlock(It);
531 if (BasicBlock *NewIncBB = cast_or_null<BasicBlock>(VMap.lookup(IncBB)))
532 IncBB = NewIncBB;
533 else if (IgnoreIncomingWithNoClones)
534 continue;
536 // Now we have IncBB, and will need to add incoming from it to NewPhi.
538 // If IncBB is not a predecessor of NewPhiBB, then do not add it.
539 // NewPhiBB was cloned without that edge.
540 if (!NewPhiBBPreds.count(IncBB))
541 continue;
543 // Determine incoming value and add it as incoming from IncBB.
544 if (MemoryUseOrDef *IncMUD = dyn_cast<MemoryUseOrDef>(IncomingAccess)) {
545 if (!MSSA->isLiveOnEntryDef(IncMUD)) {
546 Instruction *IncI = IncMUD->getMemoryInst();
547 assert(IncI && "Found MemoryUseOrDef with no Instruction.");
548 if (Instruction *NewIncI =
549 cast_or_null<Instruction>(VMap.lookup(IncI))) {
550 IncMUD = MSSA->getMemoryAccess(NewIncI);
551 assert(IncMUD &&
552 "MemoryUseOrDef cannot be null, all preds processed.");
555 NewPhi->addIncoming(IncMUD, IncBB);
556 } else {
557 MemoryPhi *IncPhi = cast<MemoryPhi>(IncomingAccess);
558 if (MemoryAccess *NewDefPhi = MPhiMap.lookup(IncPhi))
559 NewPhi->addIncoming(NewDefPhi, IncBB);
560 else
561 NewPhi->addIncoming(IncPhi, IncBB);
566 auto ProcessBlock = [&](BasicBlock *BB) {
567 BasicBlock *NewBlock = cast_or_null<BasicBlock>(VMap.lookup(BB));
568 if (!NewBlock)
569 return;
571 assert(!MSSA->getWritableBlockAccesses(NewBlock) &&
572 "Cloned block should have no accesses");
574 // Add MemoryPhi.
575 if (MemoryPhi *MPhi = MSSA->getMemoryAccess(BB)) {
576 MemoryPhi *NewPhi = MSSA->createMemoryPhi(NewBlock);
577 MPhiMap[MPhi] = NewPhi;
579 // Update Uses and Defs.
580 cloneUsesAndDefs(BB, NewBlock, VMap, MPhiMap);
583 for (auto BB : llvm::concat<BasicBlock *const>(LoopBlocks, ExitBlocks))
584 ProcessBlock(BB);
586 for (auto BB : llvm::concat<BasicBlock *const>(LoopBlocks, ExitBlocks))
587 if (MemoryPhi *MPhi = MSSA->getMemoryAccess(BB))
588 if (MemoryAccess *NewPhi = MPhiMap.lookup(MPhi))
589 FixPhiIncomingValues(MPhi, cast<MemoryPhi>(NewPhi));
592 void MemorySSAUpdater::updateForClonedBlockIntoPred(
593 BasicBlock *BB, BasicBlock *P1, const ValueToValueMapTy &VM) {
594 // All defs/phis from outside BB that are used in BB, are valid uses in P1.
595 // Since those defs/phis must have dominated BB, and also dominate P1.
596 // Defs from BB being used in BB will be replaced with the cloned defs from
597 // VM. The uses of BB's Phi (if it exists) in BB will be replaced by the
598 // incoming def into the Phi from P1.
599 PhiToDefMap MPhiMap;
600 if (MemoryPhi *MPhi = MSSA->getMemoryAccess(BB))
601 MPhiMap[MPhi] = MPhi->getIncomingValueForBlock(P1);
602 cloneUsesAndDefs(BB, P1, VM, MPhiMap);
605 template <typename Iter>
606 void MemorySSAUpdater::privateUpdateExitBlocksForClonedLoop(
607 ArrayRef<BasicBlock *> ExitBlocks, Iter ValuesBegin, Iter ValuesEnd,
608 DominatorTree &DT) {
609 SmallVector<CFGUpdate, 4> Updates;
610 // Update/insert phis in all successors of exit blocks.
611 for (auto *Exit : ExitBlocks)
612 for (const ValueToValueMapTy *VMap : make_range(ValuesBegin, ValuesEnd))
613 if (BasicBlock *NewExit = cast_or_null<BasicBlock>(VMap->lookup(Exit))) {
614 BasicBlock *ExitSucc = NewExit->getTerminator()->getSuccessor(0);
615 Updates.push_back({DT.Insert, NewExit, ExitSucc});
617 applyInsertUpdates(Updates, DT);
620 void MemorySSAUpdater::updateExitBlocksForClonedLoop(
621 ArrayRef<BasicBlock *> ExitBlocks, const ValueToValueMapTy &VMap,
622 DominatorTree &DT) {
623 const ValueToValueMapTy *const Arr[] = {&VMap};
624 privateUpdateExitBlocksForClonedLoop(ExitBlocks, std::begin(Arr),
625 std::end(Arr), DT);
628 void MemorySSAUpdater::updateExitBlocksForClonedLoop(
629 ArrayRef<BasicBlock *> ExitBlocks,
630 ArrayRef<std::unique_ptr<ValueToValueMapTy>> VMaps, DominatorTree &DT) {
631 auto GetPtr = [&](const std::unique_ptr<ValueToValueMapTy> &I) {
632 return I.get();
634 using MappedIteratorType =
635 mapped_iterator<const std::unique_ptr<ValueToValueMapTy> *,
636 decltype(GetPtr)>;
637 auto MapBegin = MappedIteratorType(VMaps.begin(), GetPtr);
638 auto MapEnd = MappedIteratorType(VMaps.end(), GetPtr);
639 privateUpdateExitBlocksForClonedLoop(ExitBlocks, MapBegin, MapEnd, DT);
642 void MemorySSAUpdater::applyUpdates(ArrayRef<CFGUpdate> Updates,
643 DominatorTree &DT) {
644 SmallVector<CFGUpdate, 4> RevDeleteUpdates;
645 SmallVector<CFGUpdate, 4> InsertUpdates;
646 for (auto &Update : Updates) {
647 if (Update.getKind() == DT.Insert)
648 InsertUpdates.push_back({DT.Insert, Update.getFrom(), Update.getTo()});
649 else
650 RevDeleteUpdates.push_back({DT.Insert, Update.getFrom(), Update.getTo()});
653 if (!RevDeleteUpdates.empty()) {
654 // Update for inserted edges: use newDT and snapshot CFG as if deletes had
655 // not occurred.
656 // FIXME: This creates a new DT, so it's more expensive to do mix
657 // delete/inserts vs just inserts. We can do an incremental update on the DT
658 // to revert deletes, than re-delete the edges. Teaching DT to do this, is
659 // part of a pending cleanup.
660 DominatorTree NewDT(DT, RevDeleteUpdates);
661 GraphDiff<BasicBlock *> GD(RevDeleteUpdates);
662 applyInsertUpdates(InsertUpdates, NewDT, &GD);
663 } else {
664 GraphDiff<BasicBlock *> GD;
665 applyInsertUpdates(InsertUpdates, DT, &GD);
668 // Update for deleted edges
669 for (auto &Update : RevDeleteUpdates)
670 removeEdge(Update.getFrom(), Update.getTo());
673 void MemorySSAUpdater::applyInsertUpdates(ArrayRef<CFGUpdate> Updates,
674 DominatorTree &DT) {
675 GraphDiff<BasicBlock *> GD;
676 applyInsertUpdates(Updates, DT, &GD);
679 void MemorySSAUpdater::applyInsertUpdates(ArrayRef<CFGUpdate> Updates,
680 DominatorTree &DT,
681 const GraphDiff<BasicBlock *> *GD) {
682 // Get recursive last Def, assuming well formed MSSA and updated DT.
683 auto GetLastDef = [&](BasicBlock *BB) -> MemoryAccess * {
684 while (true) {
685 MemorySSA::DefsList *Defs = MSSA->getWritableBlockDefs(BB);
686 // Return last Def or Phi in BB, if it exists.
687 if (Defs)
688 return &*(--Defs->end());
690 // Check number of predecessors, we only care if there's more than one.
691 unsigned Count = 0;
692 BasicBlock *Pred = nullptr;
693 for (auto &Pair : children<GraphDiffInvBBPair>({GD, BB})) {
694 Pred = Pair.second;
695 Count++;
696 if (Count == 2)
697 break;
700 // If BB has multiple predecessors, get last definition from IDom.
701 if (Count != 1) {
702 // [SimpleLoopUnswitch] If BB is a dead block, about to be deleted, its
703 // DT is invalidated. Return LoE as its last def. This will be added to
704 // MemoryPhi node, and later deleted when the block is deleted.
705 if (!DT.getNode(BB))
706 return MSSA->getLiveOnEntryDef();
707 if (auto *IDom = DT.getNode(BB)->getIDom())
708 if (IDom->getBlock() != BB) {
709 BB = IDom->getBlock();
710 continue;
712 return MSSA->getLiveOnEntryDef();
713 } else {
714 // Single predecessor, BB cannot be dead. GetLastDef of Pred.
715 assert(Count == 1 && Pred && "Single predecessor expected.");
716 BB = Pred;
719 llvm_unreachable("Unable to get last definition.");
722 // Get nearest IDom given a set of blocks.
723 // TODO: this can be optimized by starting the search at the node with the
724 // lowest level (highest in the tree).
725 auto FindNearestCommonDominator =
726 [&](const SmallSetVector<BasicBlock *, 2> &BBSet) -> BasicBlock * {
727 BasicBlock *PrevIDom = *BBSet.begin();
728 for (auto *BB : BBSet)
729 PrevIDom = DT.findNearestCommonDominator(PrevIDom, BB);
730 return PrevIDom;
733 // Get all blocks that dominate PrevIDom, stop when reaching CurrIDom. Do not
734 // include CurrIDom.
735 auto GetNoLongerDomBlocks =
736 [&](BasicBlock *PrevIDom, BasicBlock *CurrIDom,
737 SmallVectorImpl<BasicBlock *> &BlocksPrevDom) {
738 if (PrevIDom == CurrIDom)
739 return;
740 BlocksPrevDom.push_back(PrevIDom);
741 BasicBlock *NextIDom = PrevIDom;
742 while (BasicBlock *UpIDom =
743 DT.getNode(NextIDom)->getIDom()->getBlock()) {
744 if (UpIDom == CurrIDom)
745 break;
746 BlocksPrevDom.push_back(UpIDom);
747 NextIDom = UpIDom;
751 // Map a BB to its predecessors: added + previously existing. To get a
752 // deterministic order, store predecessors as SetVectors. The order in each
753 // will be defined by the order in Updates (fixed) and the order given by
754 // children<> (also fixed). Since we further iterate over these ordered sets,
755 // we lose the information of multiple edges possibly existing between two
756 // blocks, so we'll keep and EdgeCount map for that.
757 // An alternate implementation could keep unordered set for the predecessors,
758 // traverse either Updates or children<> each time to get the deterministic
759 // order, and drop the usage of EdgeCount. This alternate approach would still
760 // require querying the maps for each predecessor, and children<> call has
761 // additional computation inside for creating the snapshot-graph predecessors.
762 // As such, we favor using a little additional storage and less compute time.
763 // This decision can be revisited if we find the alternative more favorable.
765 struct PredInfo {
766 SmallSetVector<BasicBlock *, 2> Added;
767 SmallSetVector<BasicBlock *, 2> Prev;
769 SmallDenseMap<BasicBlock *, PredInfo> PredMap;
771 for (auto &Edge : Updates) {
772 BasicBlock *BB = Edge.getTo();
773 auto &AddedBlockSet = PredMap[BB].Added;
774 AddedBlockSet.insert(Edge.getFrom());
777 // Store all existing predecessor for each BB, at least one must exist.
778 SmallDenseMap<std::pair<BasicBlock *, BasicBlock *>, int> EdgeCountMap;
779 SmallPtrSet<BasicBlock *, 2> NewBlocks;
780 for (auto &BBPredPair : PredMap) {
781 auto *BB = BBPredPair.first;
782 const auto &AddedBlockSet = BBPredPair.second.Added;
783 auto &PrevBlockSet = BBPredPair.second.Prev;
784 for (auto &Pair : children<GraphDiffInvBBPair>({GD, BB})) {
785 BasicBlock *Pi = Pair.second;
786 if (!AddedBlockSet.count(Pi))
787 PrevBlockSet.insert(Pi);
788 EdgeCountMap[{Pi, BB}]++;
791 if (PrevBlockSet.empty()) {
792 assert(pred_size(BB) == AddedBlockSet.size() && "Duplicate edges added.");
793 LLVM_DEBUG(
794 dbgs()
795 << "Adding a predecessor to a block with no predecessors. "
796 "This must be an edge added to a new, likely cloned, block. "
797 "Its memory accesses must be already correct, assuming completed "
798 "via the updateExitBlocksForClonedLoop API. "
799 "Assert a single such edge is added so no phi addition or "
800 "additional processing is required.\n");
801 assert(AddedBlockSet.size() == 1 &&
802 "Can only handle adding one predecessor to a new block.");
803 // Need to remove new blocks from PredMap. Remove below to not invalidate
804 // iterator here.
805 NewBlocks.insert(BB);
808 // Nothing to process for new/cloned blocks.
809 for (auto *BB : NewBlocks)
810 PredMap.erase(BB);
812 SmallVector<BasicBlock *, 8> BlocksToProcess;
813 SmallVector<BasicBlock *, 16> BlocksWithDefsToReplace;
815 // First create MemoryPhis in all blocks that don't have one. Create in the
816 // order found in Updates, not in PredMap, to get deterministic numbering.
817 for (auto &Edge : Updates) {
818 BasicBlock *BB = Edge.getTo();
819 if (PredMap.count(BB) && !MSSA->getMemoryAccess(BB))
820 MSSA->createMemoryPhi(BB);
823 // Now we'll fill in the MemoryPhis with the right incoming values.
824 for (auto &BBPredPair : PredMap) {
825 auto *BB = BBPredPair.first;
826 const auto &PrevBlockSet = BBPredPair.second.Prev;
827 const auto &AddedBlockSet = BBPredPair.second.Added;
828 assert(!PrevBlockSet.empty() &&
829 "At least one previous predecessor must exist.");
831 // TODO: if this becomes a bottleneck, we can save on GetLastDef calls by
832 // keeping this map before the loop. We can reuse already populated entries
833 // if an edge is added from the same predecessor to two different blocks,
834 // and this does happen in rotate. Note that the map needs to be updated
835 // when deleting non-necessary phis below, if the phi is in the map by
836 // replacing the value with DefP1.
837 SmallDenseMap<BasicBlock *, MemoryAccess *> LastDefAddedPred;
838 for (auto *AddedPred : AddedBlockSet) {
839 auto *DefPn = GetLastDef(AddedPred);
840 assert(DefPn != nullptr && "Unable to find last definition.");
841 LastDefAddedPred[AddedPred] = DefPn;
844 MemoryPhi *NewPhi = MSSA->getMemoryAccess(BB);
845 // If Phi is not empty, add an incoming edge from each added pred. Must
846 // still compute blocks with defs to replace for this block below.
847 if (NewPhi->getNumOperands()) {
848 for (auto *Pred : AddedBlockSet) {
849 auto *LastDefForPred = LastDefAddedPred[Pred];
850 for (int I = 0, E = EdgeCountMap[{Pred, BB}]; I < E; ++I)
851 NewPhi->addIncoming(LastDefForPred, Pred);
853 } else {
854 // Pick any existing predecessor and get its definition. All other
855 // existing predecessors should have the same one, since no phi existed.
856 auto *P1 = *PrevBlockSet.begin();
857 MemoryAccess *DefP1 = GetLastDef(P1);
859 // Check DefP1 against all Defs in LastDefPredPair. If all the same,
860 // nothing to add.
861 bool InsertPhi = false;
862 for (auto LastDefPredPair : LastDefAddedPred)
863 if (DefP1 != LastDefPredPair.second) {
864 InsertPhi = true;
865 break;
867 if (!InsertPhi) {
868 // Since NewPhi may be used in other newly added Phis, replace all uses
869 // of NewPhi with the definition coming from all predecessors (DefP1),
870 // before deleting it.
871 NewPhi->replaceAllUsesWith(DefP1);
872 removeMemoryAccess(NewPhi);
873 continue;
876 // Update Phi with new values for new predecessors and old value for all
877 // other predecessors. Since AddedBlockSet and PrevBlockSet are ordered
878 // sets, the order of entries in NewPhi is deterministic.
879 for (auto *Pred : AddedBlockSet) {
880 auto *LastDefForPred = LastDefAddedPred[Pred];
881 for (int I = 0, E = EdgeCountMap[{Pred, BB}]; I < E; ++I)
882 NewPhi->addIncoming(LastDefForPred, Pred);
884 for (auto *Pred : PrevBlockSet)
885 for (int I = 0, E = EdgeCountMap[{Pred, BB}]; I < E; ++I)
886 NewPhi->addIncoming(DefP1, Pred);
888 // Insert BB in the set of blocks that now have definition. We'll use this
889 // to compute IDF and add Phis there next.
890 BlocksToProcess.push_back(BB);
893 // Get all blocks that used to dominate BB and no longer do after adding
894 // AddedBlockSet, where PrevBlockSet are the previously known predecessors.
895 assert(DT.getNode(BB)->getIDom() && "BB does not have valid idom");
896 BasicBlock *PrevIDom = FindNearestCommonDominator(PrevBlockSet);
897 assert(PrevIDom && "Previous IDom should exists");
898 BasicBlock *NewIDom = DT.getNode(BB)->getIDom()->getBlock();
899 assert(NewIDom && "BB should have a new valid idom");
900 assert(DT.dominates(NewIDom, PrevIDom) &&
901 "New idom should dominate old idom");
902 GetNoLongerDomBlocks(PrevIDom, NewIDom, BlocksWithDefsToReplace);
905 // Compute IDF and add Phis in all IDF blocks that do not have one.
906 SmallVector<BasicBlock *, 32> IDFBlocks;
907 if (!BlocksToProcess.empty()) {
908 ForwardIDFCalculator IDFs(DT);
909 SmallPtrSet<BasicBlock *, 16> DefiningBlocks(BlocksToProcess.begin(),
910 BlocksToProcess.end());
911 IDFs.setDefiningBlocks(DefiningBlocks);
912 IDFs.calculate(IDFBlocks);
913 for (auto *BBIDF : IDFBlocks) {
914 if (auto *IDFPhi = MSSA->getMemoryAccess(BBIDF)) {
915 // Update existing Phi.
916 // FIXME: some updates may be redundant, try to optimize and skip some.
917 for (unsigned I = 0, E = IDFPhi->getNumIncomingValues(); I < E; ++I)
918 IDFPhi->setIncomingValue(I, GetLastDef(IDFPhi->getIncomingBlock(I)));
919 } else {
920 IDFPhi = MSSA->createMemoryPhi(BBIDF);
921 for (auto &Pair : children<GraphDiffInvBBPair>({GD, BBIDF})) {
922 BasicBlock *Pi = Pair.second;
923 IDFPhi->addIncoming(GetLastDef(Pi), Pi);
929 // Now for all defs in BlocksWithDefsToReplace, if there are uses they no
930 // longer dominate, replace those with the closest dominating def.
931 // This will also update optimized accesses, as they're also uses.
932 for (auto *BlockWithDefsToReplace : BlocksWithDefsToReplace) {
933 if (auto DefsList = MSSA->getWritableBlockDefs(BlockWithDefsToReplace)) {
934 for (auto &DefToReplaceUses : *DefsList) {
935 BasicBlock *DominatingBlock = DefToReplaceUses.getBlock();
936 Value::use_iterator UI = DefToReplaceUses.use_begin(),
937 E = DefToReplaceUses.use_end();
938 for (; UI != E;) {
939 Use &U = *UI;
940 ++UI;
941 MemoryAccess *Usr = dyn_cast<MemoryAccess>(U.getUser());
942 if (MemoryPhi *UsrPhi = dyn_cast<MemoryPhi>(Usr)) {
943 BasicBlock *DominatedBlock = UsrPhi->getIncomingBlock(U);
944 if (!DT.dominates(DominatingBlock, DominatedBlock))
945 U.set(GetLastDef(DominatedBlock));
946 } else {
947 BasicBlock *DominatedBlock = Usr->getBlock();
948 if (!DT.dominates(DominatingBlock, DominatedBlock)) {
949 if (auto *DomBlPhi = MSSA->getMemoryAccess(DominatedBlock))
950 U.set(DomBlPhi);
951 else {
952 auto *IDom = DT.getNode(DominatedBlock)->getIDom();
953 assert(IDom && "Block must have a valid IDom.");
954 U.set(GetLastDef(IDom->getBlock()));
956 cast<MemoryUseOrDef>(Usr)->resetOptimized();
965 // Move What before Where in the MemorySSA IR.
966 template <class WhereType>
967 void MemorySSAUpdater::moveTo(MemoryUseOrDef *What, BasicBlock *BB,
968 WhereType Where) {
969 // Mark MemoryPhi users of What not to be optimized.
970 for (auto *U : What->users())
971 if (MemoryPhi *PhiUser = dyn_cast<MemoryPhi>(U))
972 NonOptPhis.insert(PhiUser);
974 // Replace all our users with our defining access.
975 What->replaceAllUsesWith(What->getDefiningAccess());
977 // Let MemorySSA take care of moving it around in the lists.
978 MSSA->moveTo(What, BB, Where);
980 // Now reinsert it into the IR and do whatever fixups needed.
981 if (auto *MD = dyn_cast<MemoryDef>(What))
982 insertDef(MD);
983 else
984 insertUse(cast<MemoryUse>(What));
986 // Clear dangling pointers. We added all MemoryPhi users, but not all
987 // of them are removed by fixupDefs().
988 NonOptPhis.clear();
991 // Move What before Where in the MemorySSA IR.
992 void MemorySSAUpdater::moveBefore(MemoryUseOrDef *What, MemoryUseOrDef *Where) {
993 moveTo(What, Where->getBlock(), Where->getIterator());
996 // Move What after Where in the MemorySSA IR.
997 void MemorySSAUpdater::moveAfter(MemoryUseOrDef *What, MemoryUseOrDef *Where) {
998 moveTo(What, Where->getBlock(), ++Where->getIterator());
1001 void MemorySSAUpdater::moveToPlace(MemoryUseOrDef *What, BasicBlock *BB,
1002 MemorySSA::InsertionPlace Where) {
1003 return moveTo(What, BB, Where);
1006 // All accesses in To used to be in From. Move to end and update access lists.
1007 void MemorySSAUpdater::moveAllAccesses(BasicBlock *From, BasicBlock *To,
1008 Instruction *Start) {
1010 MemorySSA::AccessList *Accs = MSSA->getWritableBlockAccesses(From);
1011 if (!Accs)
1012 return;
1014 MemoryAccess *FirstInNew = nullptr;
1015 for (Instruction &I : make_range(Start->getIterator(), To->end()))
1016 if ((FirstInNew = MSSA->getMemoryAccess(&I)))
1017 break;
1018 if (!FirstInNew)
1019 return;
1021 auto *MUD = cast<MemoryUseOrDef>(FirstInNew);
1022 do {
1023 auto NextIt = ++MUD->getIterator();
1024 MemoryUseOrDef *NextMUD = (!Accs || NextIt == Accs->end())
1025 ? nullptr
1026 : cast<MemoryUseOrDef>(&*NextIt);
1027 MSSA->moveTo(MUD, To, MemorySSA::End);
1028 // Moving MUD from Accs in the moveTo above, may delete Accs, so we need to
1029 // retrieve it again.
1030 Accs = MSSA->getWritableBlockAccesses(From);
1031 MUD = NextMUD;
1032 } while (MUD);
1035 void MemorySSAUpdater::moveAllAfterSpliceBlocks(BasicBlock *From,
1036 BasicBlock *To,
1037 Instruction *Start) {
1038 assert(MSSA->getBlockAccesses(To) == nullptr &&
1039 "To block is expected to be free of MemoryAccesses.");
1040 moveAllAccesses(From, To, Start);
1041 for (BasicBlock *Succ : successors(To))
1042 if (MemoryPhi *MPhi = MSSA->getMemoryAccess(Succ))
1043 MPhi->setIncomingBlock(MPhi->getBasicBlockIndex(From), To);
1046 void MemorySSAUpdater::moveAllAfterMergeBlocks(BasicBlock *From, BasicBlock *To,
1047 Instruction *Start) {
1048 assert(From->getSinglePredecessor() == To &&
1049 "From block is expected to have a single predecessor (To).");
1050 moveAllAccesses(From, To, Start);
1051 for (BasicBlock *Succ : successors(From))
1052 if (MemoryPhi *MPhi = MSSA->getMemoryAccess(Succ))
1053 MPhi->setIncomingBlock(MPhi->getBasicBlockIndex(From), To);
1056 /// If all arguments of a MemoryPHI are defined by the same incoming
1057 /// argument, return that argument.
1058 static MemoryAccess *onlySingleValue(MemoryPhi *MP) {
1059 MemoryAccess *MA = nullptr;
1061 for (auto &Arg : MP->operands()) {
1062 if (!MA)
1063 MA = cast<MemoryAccess>(Arg);
1064 else if (MA != Arg)
1065 return nullptr;
1067 return MA;
1070 void MemorySSAUpdater::wireOldPredecessorsToNewImmediatePredecessor(
1071 BasicBlock *Old, BasicBlock *New, ArrayRef<BasicBlock *> Preds,
1072 bool IdenticalEdgesWereMerged) {
1073 assert(!MSSA->getWritableBlockAccesses(New) &&
1074 "Access list should be null for a new block.");
1075 MemoryPhi *Phi = MSSA->getMemoryAccess(Old);
1076 if (!Phi)
1077 return;
1078 if (Old->hasNPredecessors(1)) {
1079 assert(pred_size(New) == Preds.size() &&
1080 "Should have moved all predecessors.");
1081 MSSA->moveTo(Phi, New, MemorySSA::Beginning);
1082 } else {
1083 assert(!Preds.empty() && "Must be moving at least one predecessor to the "
1084 "new immediate predecessor.");
1085 MemoryPhi *NewPhi = MSSA->createMemoryPhi(New);
1086 SmallPtrSet<BasicBlock *, 16> PredsSet(Preds.begin(), Preds.end());
1087 // Currently only support the case of removing a single incoming edge when
1088 // identical edges were not merged.
1089 if (!IdenticalEdgesWereMerged)
1090 assert(PredsSet.size() == Preds.size() &&
1091 "If identical edges were not merged, we cannot have duplicate "
1092 "blocks in the predecessors");
1093 Phi->unorderedDeleteIncomingIf([&](MemoryAccess *MA, BasicBlock *B) {
1094 if (PredsSet.count(B)) {
1095 NewPhi->addIncoming(MA, B);
1096 if (!IdenticalEdgesWereMerged)
1097 PredsSet.erase(B);
1098 return true;
1100 return false;
1102 Phi->addIncoming(NewPhi, New);
1103 if (onlySingleValue(NewPhi))
1104 removeMemoryAccess(NewPhi);
1108 void MemorySSAUpdater::removeMemoryAccess(MemoryAccess *MA, bool OptimizePhis) {
1109 assert(!MSSA->isLiveOnEntryDef(MA) &&
1110 "Trying to remove the live on entry def");
1111 // We can only delete phi nodes if they have no uses, or we can replace all
1112 // uses with a single definition.
1113 MemoryAccess *NewDefTarget = nullptr;
1114 if (MemoryPhi *MP = dyn_cast<MemoryPhi>(MA)) {
1115 // Note that it is sufficient to know that all edges of the phi node have
1116 // the same argument. If they do, by the definition of dominance frontiers
1117 // (which we used to place this phi), that argument must dominate this phi,
1118 // and thus, must dominate the phi's uses, and so we will not hit the assert
1119 // below.
1120 NewDefTarget = onlySingleValue(MP);
1121 assert((NewDefTarget || MP->use_empty()) &&
1122 "We can't delete this memory phi");
1123 } else {
1124 NewDefTarget = cast<MemoryUseOrDef>(MA)->getDefiningAccess();
1127 SmallSetVector<MemoryPhi *, 4> PhisToCheck;
1129 // Re-point the uses at our defining access
1130 if (!isa<MemoryUse>(MA) && !MA->use_empty()) {
1131 // Reset optimized on users of this store, and reset the uses.
1132 // A few notes:
1133 // 1. This is a slightly modified version of RAUW to avoid walking the
1134 // uses twice here.
1135 // 2. If we wanted to be complete, we would have to reset the optimized
1136 // flags on users of phi nodes if doing the below makes a phi node have all
1137 // the same arguments. Instead, we prefer users to removeMemoryAccess those
1138 // phi nodes, because doing it here would be N^3.
1139 if (MA->hasValueHandle())
1140 ValueHandleBase::ValueIsRAUWd(MA, NewDefTarget);
1141 // Note: We assume MemorySSA is not used in metadata since it's not really
1142 // part of the IR.
1144 while (!MA->use_empty()) {
1145 Use &U = *MA->use_begin();
1146 if (auto *MUD = dyn_cast<MemoryUseOrDef>(U.getUser()))
1147 MUD->resetOptimized();
1148 if (OptimizePhis)
1149 if (MemoryPhi *MP = dyn_cast<MemoryPhi>(U.getUser()))
1150 PhisToCheck.insert(MP);
1151 U.set(NewDefTarget);
1155 // The call below to erase will destroy MA, so we can't change the order we
1156 // are doing things here
1157 MSSA->removeFromLookups(MA);
1158 MSSA->removeFromLists(MA);
1160 // Optionally optimize Phi uses. This will recursively remove trivial phis.
1161 if (!PhisToCheck.empty()) {
1162 SmallVector<WeakVH, 16> PhisToOptimize{PhisToCheck.begin(),
1163 PhisToCheck.end()};
1164 PhisToCheck.clear();
1166 unsigned PhisSize = PhisToOptimize.size();
1167 while (PhisSize-- > 0)
1168 if (MemoryPhi *MP =
1169 cast_or_null<MemoryPhi>(PhisToOptimize.pop_back_val())) {
1170 auto OperRange = MP->operands();
1171 tryRemoveTrivialPhi(MP, OperRange);
1176 void MemorySSAUpdater::removeBlocks(
1177 const SmallPtrSetImpl<BasicBlock *> &DeadBlocks) {
1178 // First delete all uses of BB in MemoryPhis.
1179 for (BasicBlock *BB : DeadBlocks) {
1180 Instruction *TI = BB->getTerminator();
1181 assert(TI && "Basic block expected to have a terminator instruction");
1182 for (BasicBlock *Succ : successors(TI))
1183 if (!DeadBlocks.count(Succ))
1184 if (MemoryPhi *MP = MSSA->getMemoryAccess(Succ)) {
1185 MP->unorderedDeleteIncomingBlock(BB);
1186 if (MP->getNumIncomingValues() == 1)
1187 removeMemoryAccess(MP);
1189 // Drop all references of all accesses in BB
1190 if (MemorySSA::AccessList *Acc = MSSA->getWritableBlockAccesses(BB))
1191 for (MemoryAccess &MA : *Acc)
1192 MA.dropAllReferences();
1195 // Next, delete all memory accesses in each block
1196 for (BasicBlock *BB : DeadBlocks) {
1197 MemorySSA::AccessList *Acc = MSSA->getWritableBlockAccesses(BB);
1198 if (!Acc)
1199 continue;
1200 for (auto AB = Acc->begin(), AE = Acc->end(); AB != AE;) {
1201 MemoryAccess *MA = &*AB;
1202 ++AB;
1203 MSSA->removeFromLookups(MA);
1204 MSSA->removeFromLists(MA);
1209 MemoryAccess *MemorySSAUpdater::createMemoryAccessInBB(
1210 Instruction *I, MemoryAccess *Definition, const BasicBlock *BB,
1211 MemorySSA::InsertionPlace Point) {
1212 MemoryUseOrDef *NewAccess = MSSA->createDefinedAccess(I, Definition);
1213 MSSA->insertIntoListsForBlock(NewAccess, BB, Point);
1214 return NewAccess;
1217 MemoryUseOrDef *MemorySSAUpdater::createMemoryAccessBefore(
1218 Instruction *I, MemoryAccess *Definition, MemoryUseOrDef *InsertPt) {
1219 assert(I->getParent() == InsertPt->getBlock() &&
1220 "New and old access must be in the same block");
1221 MemoryUseOrDef *NewAccess = MSSA->createDefinedAccess(I, Definition);
1222 MSSA->insertIntoListsBefore(NewAccess, InsertPt->getBlock(),
1223 InsertPt->getIterator());
1224 return NewAccess;
1227 MemoryUseOrDef *MemorySSAUpdater::createMemoryAccessAfter(
1228 Instruction *I, MemoryAccess *Definition, MemoryAccess *InsertPt) {
1229 assert(I->getParent() == InsertPt->getBlock() &&
1230 "New and old access must be in the same block");
1231 MemoryUseOrDef *NewAccess = MSSA->createDefinedAccess(I, Definition);
1232 MSSA->insertIntoListsBefore(NewAccess, InsertPt->getBlock(),
1233 ++InsertPt->getIterator());
1234 return NewAccess;