Revert r354244 "[DAGCombiner] Eliminate dead stores to stack."
[llvm-complete.git] / lib / Target / Hexagon / RDFGraph.cpp
blob18ceef5b718352af069cfde74e021444a263689c
1 //===- RDFGraph.cpp -------------------------------------------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // Target-independent, SSA-based data flow graph for register data flow (RDF).
11 #include "RDFGraph.h"
12 #include "RDFRegisters.h"
13 #include "llvm/ADT/BitVector.h"
14 #include "llvm/ADT/STLExtras.h"
15 #include "llvm/ADT/SetVector.h"
16 #include "llvm/CodeGen/MachineBasicBlock.h"
17 #include "llvm/CodeGen/MachineDominanceFrontier.h"
18 #include "llvm/CodeGen/MachineDominators.h"
19 #include "llvm/CodeGen/MachineFunction.h"
20 #include "llvm/CodeGen/MachineInstr.h"
21 #include "llvm/CodeGen/MachineOperand.h"
22 #include "llvm/CodeGen/MachineRegisterInfo.h"
23 #include "llvm/CodeGen/TargetInstrInfo.h"
24 #include "llvm/CodeGen/TargetLowering.h"
25 #include "llvm/CodeGen/TargetRegisterInfo.h"
26 #include "llvm/CodeGen/TargetSubtargetInfo.h"
27 #include "llvm/IR/Function.h"
28 #include "llvm/MC/LaneBitmask.h"
29 #include "llvm/MC/MCInstrDesc.h"
30 #include "llvm/MC/MCRegisterInfo.h"
31 #include "llvm/Support/Debug.h"
32 #include "llvm/Support/ErrorHandling.h"
33 #include "llvm/Support/raw_ostream.h"
34 #include <algorithm>
35 #include <cassert>
36 #include <cstdint>
37 #include <cstring>
38 #include <iterator>
39 #include <set>
40 #include <utility>
41 #include <vector>
43 using namespace llvm;
44 using namespace rdf;
46 // Printing functions. Have them here first, so that the rest of the code
47 // can use them.
48 namespace llvm {
49 namespace rdf {
51 raw_ostream &operator<< (raw_ostream &OS, const PrintLaneMaskOpt &P) {
52 if (!P.Mask.all())
53 OS << ':' << PrintLaneMask(P.Mask);
54 return OS;
57 template<>
58 raw_ostream &operator<< (raw_ostream &OS, const Print<RegisterRef> &P) {
59 auto &TRI = P.G.getTRI();
60 if (P.Obj.Reg > 0 && P.Obj.Reg < TRI.getNumRegs())
61 OS << TRI.getName(P.Obj.Reg);
62 else
63 OS << '#' << P.Obj.Reg;
64 OS << PrintLaneMaskOpt(P.Obj.Mask);
65 return OS;
68 template<>
69 raw_ostream &operator<< (raw_ostream &OS, const Print<NodeId> &P) {
70 auto NA = P.G.addr<NodeBase*>(P.Obj);
71 uint16_t Attrs = NA.Addr->getAttrs();
72 uint16_t Kind = NodeAttrs::kind(Attrs);
73 uint16_t Flags = NodeAttrs::flags(Attrs);
74 switch (NodeAttrs::type(Attrs)) {
75 case NodeAttrs::Code:
76 switch (Kind) {
77 case NodeAttrs::Func: OS << 'f'; break;
78 case NodeAttrs::Block: OS << 'b'; break;
79 case NodeAttrs::Stmt: OS << 's'; break;
80 case NodeAttrs::Phi: OS << 'p'; break;
81 default: OS << "c?"; break;
83 break;
84 case NodeAttrs::Ref:
85 if (Flags & NodeAttrs::Undef)
86 OS << '/';
87 if (Flags & NodeAttrs::Dead)
88 OS << '\\';
89 if (Flags & NodeAttrs::Preserving)
90 OS << '+';
91 if (Flags & NodeAttrs::Clobbering)
92 OS << '~';
93 switch (Kind) {
94 case NodeAttrs::Use: OS << 'u'; break;
95 case NodeAttrs::Def: OS << 'd'; break;
96 case NodeAttrs::Block: OS << 'b'; break;
97 default: OS << "r?"; break;
99 break;
100 default:
101 OS << '?';
102 break;
104 OS << P.Obj;
105 if (Flags & NodeAttrs::Shadow)
106 OS << '"';
107 return OS;
110 static void printRefHeader(raw_ostream &OS, const NodeAddr<RefNode*> RA,
111 const DataFlowGraph &G) {
112 OS << Print<NodeId>(RA.Id, G) << '<'
113 << Print<RegisterRef>(RA.Addr->getRegRef(G), G) << '>';
114 if (RA.Addr->getFlags() & NodeAttrs::Fixed)
115 OS << '!';
118 template<>
119 raw_ostream &operator<< (raw_ostream &OS, const Print<NodeAddr<DefNode*>> &P) {
120 printRefHeader(OS, P.Obj, P.G);
121 OS << '(';
122 if (NodeId N = P.Obj.Addr->getReachingDef())
123 OS << Print<NodeId>(N, P.G);
124 OS << ',';
125 if (NodeId N = P.Obj.Addr->getReachedDef())
126 OS << Print<NodeId>(N, P.G);
127 OS << ',';
128 if (NodeId N = P.Obj.Addr->getReachedUse())
129 OS << Print<NodeId>(N, P.G);
130 OS << "):";
131 if (NodeId N = P.Obj.Addr->getSibling())
132 OS << Print<NodeId>(N, P.G);
133 return OS;
136 template<>
137 raw_ostream &operator<< (raw_ostream &OS, const Print<NodeAddr<UseNode*>> &P) {
138 printRefHeader(OS, P.Obj, P.G);
139 OS << '(';
140 if (NodeId N = P.Obj.Addr->getReachingDef())
141 OS << Print<NodeId>(N, P.G);
142 OS << "):";
143 if (NodeId N = P.Obj.Addr->getSibling())
144 OS << Print<NodeId>(N, P.G);
145 return OS;
148 template<>
149 raw_ostream &operator<< (raw_ostream &OS,
150 const Print<NodeAddr<PhiUseNode*>> &P) {
151 printRefHeader(OS, P.Obj, P.G);
152 OS << '(';
153 if (NodeId N = P.Obj.Addr->getReachingDef())
154 OS << Print<NodeId>(N, P.G);
155 OS << ',';
156 if (NodeId N = P.Obj.Addr->getPredecessor())
157 OS << Print<NodeId>(N, P.G);
158 OS << "):";
159 if (NodeId N = P.Obj.Addr->getSibling())
160 OS << Print<NodeId>(N, P.G);
161 return OS;
164 template<>
165 raw_ostream &operator<< (raw_ostream &OS, const Print<NodeAddr<RefNode*>> &P) {
166 switch (P.Obj.Addr->getKind()) {
167 case NodeAttrs::Def:
168 OS << PrintNode<DefNode*>(P.Obj, P.G);
169 break;
170 case NodeAttrs::Use:
171 if (P.Obj.Addr->getFlags() & NodeAttrs::PhiRef)
172 OS << PrintNode<PhiUseNode*>(P.Obj, P.G);
173 else
174 OS << PrintNode<UseNode*>(P.Obj, P.G);
175 break;
177 return OS;
180 template<>
181 raw_ostream &operator<< (raw_ostream &OS, const Print<NodeList> &P) {
182 unsigned N = P.Obj.size();
183 for (auto I : P.Obj) {
184 OS << Print<NodeId>(I.Id, P.G);
185 if (--N)
186 OS << ' ';
188 return OS;
191 template<>
192 raw_ostream &operator<< (raw_ostream &OS, const Print<NodeSet> &P) {
193 unsigned N = P.Obj.size();
194 for (auto I : P.Obj) {
195 OS << Print<NodeId>(I, P.G);
196 if (--N)
197 OS << ' ';
199 return OS;
202 namespace {
204 template <typename T>
205 struct PrintListV {
206 PrintListV(const NodeList &L, const DataFlowGraph &G) : List(L), G(G) {}
208 using Type = T;
209 const NodeList &List;
210 const DataFlowGraph &G;
213 template <typename T>
214 raw_ostream &operator<< (raw_ostream &OS, const PrintListV<T> &P) {
215 unsigned N = P.List.size();
216 for (NodeAddr<T> A : P.List) {
217 OS << PrintNode<T>(A, P.G);
218 if (--N)
219 OS << ", ";
221 return OS;
224 } // end anonymous namespace
226 template<>
227 raw_ostream &operator<< (raw_ostream &OS, const Print<NodeAddr<PhiNode*>> &P) {
228 OS << Print<NodeId>(P.Obj.Id, P.G) << ": phi ["
229 << PrintListV<RefNode*>(P.Obj.Addr->members(P.G), P.G) << ']';
230 return OS;
233 template<>
234 raw_ostream &operator<< (raw_ostream &OS,
235 const Print<NodeAddr<StmtNode*>> &P) {
236 const MachineInstr &MI = *P.Obj.Addr->getCode();
237 unsigned Opc = MI.getOpcode();
238 OS << Print<NodeId>(P.Obj.Id, P.G) << ": " << P.G.getTII().getName(Opc);
239 // Print the target for calls and branches (for readability).
240 if (MI.isCall() || MI.isBranch()) {
241 MachineInstr::const_mop_iterator T =
242 llvm::find_if(MI.operands(),
243 [] (const MachineOperand &Op) -> bool {
244 return Op.isMBB() || Op.isGlobal() || Op.isSymbol();
246 if (T != MI.operands_end()) {
247 OS << ' ';
248 if (T->isMBB())
249 OS << printMBBReference(*T->getMBB());
250 else if (T->isGlobal())
251 OS << T->getGlobal()->getName();
252 else if (T->isSymbol())
253 OS << T->getSymbolName();
256 OS << " [" << PrintListV<RefNode*>(P.Obj.Addr->members(P.G), P.G) << ']';
257 return OS;
260 template<>
261 raw_ostream &operator<< (raw_ostream &OS,
262 const Print<NodeAddr<InstrNode*>> &P) {
263 switch (P.Obj.Addr->getKind()) {
264 case NodeAttrs::Phi:
265 OS << PrintNode<PhiNode*>(P.Obj, P.G);
266 break;
267 case NodeAttrs::Stmt:
268 OS << PrintNode<StmtNode*>(P.Obj, P.G);
269 break;
270 default:
271 OS << "instr? " << Print<NodeId>(P.Obj.Id, P.G);
272 break;
274 return OS;
277 template<>
278 raw_ostream &operator<< (raw_ostream &OS,
279 const Print<NodeAddr<BlockNode*>> &P) {
280 MachineBasicBlock *BB = P.Obj.Addr->getCode();
281 unsigned NP = BB->pred_size();
282 std::vector<int> Ns;
283 auto PrintBBs = [&OS] (std::vector<int> Ns) -> void {
284 unsigned N = Ns.size();
285 for (int I : Ns) {
286 OS << "%bb." << I;
287 if (--N)
288 OS << ", ";
292 OS << Print<NodeId>(P.Obj.Id, P.G) << ": --- " << printMBBReference(*BB)
293 << " --- preds(" << NP << "): ";
294 for (MachineBasicBlock *B : BB->predecessors())
295 Ns.push_back(B->getNumber());
296 PrintBBs(Ns);
298 unsigned NS = BB->succ_size();
299 OS << " succs(" << NS << "): ";
300 Ns.clear();
301 for (MachineBasicBlock *B : BB->successors())
302 Ns.push_back(B->getNumber());
303 PrintBBs(Ns);
304 OS << '\n';
306 for (auto I : P.Obj.Addr->members(P.G))
307 OS << PrintNode<InstrNode*>(I, P.G) << '\n';
308 return OS;
311 template<>
312 raw_ostream &operator<< (raw_ostream &OS,
313 const Print<NodeAddr<FuncNode*>> &P) {
314 OS << "DFG dump:[\n" << Print<NodeId>(P.Obj.Id, P.G) << ": Function: "
315 << P.Obj.Addr->getCode()->getName() << '\n';
316 for (auto I : P.Obj.Addr->members(P.G))
317 OS << PrintNode<BlockNode*>(I, P.G) << '\n';
318 OS << "]\n";
319 return OS;
322 template<>
323 raw_ostream &operator<< (raw_ostream &OS, const Print<RegisterSet> &P) {
324 OS << '{';
325 for (auto I : P.Obj)
326 OS << ' ' << Print<RegisterRef>(I, P.G);
327 OS << " }";
328 return OS;
331 template<>
332 raw_ostream &operator<< (raw_ostream &OS, const Print<RegisterAggr> &P) {
333 P.Obj.print(OS);
334 return OS;
337 template<>
338 raw_ostream &operator<< (raw_ostream &OS,
339 const Print<DataFlowGraph::DefStack> &P) {
340 for (auto I = P.Obj.top(), E = P.Obj.bottom(); I != E; ) {
341 OS << Print<NodeId>(I->Id, P.G)
342 << '<' << Print<RegisterRef>(I->Addr->getRegRef(P.G), P.G) << '>';
343 I.down();
344 if (I != E)
345 OS << ' ';
347 return OS;
350 } // end namespace rdf
351 } // end namespace llvm
353 // Node allocation functions.
355 // Node allocator is like a slab memory allocator: it allocates blocks of
356 // memory in sizes that are multiples of the size of a node. Each block has
357 // the same size. Nodes are allocated from the currently active block, and
358 // when it becomes full, a new one is created.
359 // There is a mapping scheme between node id and its location in a block,
360 // and within that block is described in the header file.
362 void NodeAllocator::startNewBlock() {
363 void *T = MemPool.Allocate(NodesPerBlock*NodeMemSize, NodeMemSize);
364 char *P = static_cast<char*>(T);
365 Blocks.push_back(P);
366 // Check if the block index is still within the allowed range, i.e. less
367 // than 2^N, where N is the number of bits in NodeId for the block index.
368 // BitsPerIndex is the number of bits per node index.
369 assert((Blocks.size() < ((size_t)1 << (8*sizeof(NodeId)-BitsPerIndex))) &&
370 "Out of bits for block index");
371 ActiveEnd = P;
374 bool NodeAllocator::needNewBlock() {
375 if (Blocks.empty())
376 return true;
378 char *ActiveBegin = Blocks.back();
379 uint32_t Index = (ActiveEnd-ActiveBegin)/NodeMemSize;
380 return Index >= NodesPerBlock;
383 NodeAddr<NodeBase*> NodeAllocator::New() {
384 if (needNewBlock())
385 startNewBlock();
387 uint32_t ActiveB = Blocks.size()-1;
388 uint32_t Index = (ActiveEnd - Blocks[ActiveB])/NodeMemSize;
389 NodeAddr<NodeBase*> NA = { reinterpret_cast<NodeBase*>(ActiveEnd),
390 makeId(ActiveB, Index) };
391 ActiveEnd += NodeMemSize;
392 return NA;
395 NodeId NodeAllocator::id(const NodeBase *P) const {
396 uintptr_t A = reinterpret_cast<uintptr_t>(P);
397 for (unsigned i = 0, n = Blocks.size(); i != n; ++i) {
398 uintptr_t B = reinterpret_cast<uintptr_t>(Blocks[i]);
399 if (A < B || A >= B + NodesPerBlock*NodeMemSize)
400 continue;
401 uint32_t Idx = (A-B)/NodeMemSize;
402 return makeId(i, Idx);
404 llvm_unreachable("Invalid node address");
407 void NodeAllocator::clear() {
408 MemPool.Reset();
409 Blocks.clear();
410 ActiveEnd = nullptr;
413 // Insert node NA after "this" in the circular chain.
414 void NodeBase::append(NodeAddr<NodeBase*> NA) {
415 NodeId Nx = Next;
416 // If NA is already "next", do nothing.
417 if (Next != NA.Id) {
418 Next = NA.Id;
419 NA.Addr->Next = Nx;
423 // Fundamental node manipulator functions.
425 // Obtain the register reference from a reference node.
426 RegisterRef RefNode::getRegRef(const DataFlowGraph &G) const {
427 assert(NodeAttrs::type(Attrs) == NodeAttrs::Ref);
428 if (NodeAttrs::flags(Attrs) & NodeAttrs::PhiRef)
429 return G.unpack(Ref.PR);
430 assert(Ref.Op != nullptr);
431 return G.makeRegRef(*Ref.Op);
434 // Set the register reference in the reference node directly (for references
435 // in phi nodes).
436 void RefNode::setRegRef(RegisterRef RR, DataFlowGraph &G) {
437 assert(NodeAttrs::type(Attrs) == NodeAttrs::Ref);
438 assert(NodeAttrs::flags(Attrs) & NodeAttrs::PhiRef);
439 Ref.PR = G.pack(RR);
442 // Set the register reference in the reference node based on a machine
443 // operand (for references in statement nodes).
444 void RefNode::setRegRef(MachineOperand *Op, DataFlowGraph &G) {
445 assert(NodeAttrs::type(Attrs) == NodeAttrs::Ref);
446 assert(!(NodeAttrs::flags(Attrs) & NodeAttrs::PhiRef));
447 (void)G;
448 Ref.Op = Op;
451 // Get the owner of a given reference node.
452 NodeAddr<NodeBase*> RefNode::getOwner(const DataFlowGraph &G) {
453 NodeAddr<NodeBase*> NA = G.addr<NodeBase*>(getNext());
455 while (NA.Addr != this) {
456 if (NA.Addr->getType() == NodeAttrs::Code)
457 return NA;
458 NA = G.addr<NodeBase*>(NA.Addr->getNext());
460 llvm_unreachable("No owner in circular list");
463 // Connect the def node to the reaching def node.
464 void DefNode::linkToDef(NodeId Self, NodeAddr<DefNode*> DA) {
465 Ref.RD = DA.Id;
466 Ref.Sib = DA.Addr->getReachedDef();
467 DA.Addr->setReachedDef(Self);
470 // Connect the use node to the reaching def node.
471 void UseNode::linkToDef(NodeId Self, NodeAddr<DefNode*> DA) {
472 Ref.RD = DA.Id;
473 Ref.Sib = DA.Addr->getReachedUse();
474 DA.Addr->setReachedUse(Self);
477 // Get the first member of the code node.
478 NodeAddr<NodeBase*> CodeNode::getFirstMember(const DataFlowGraph &G) const {
479 if (Code.FirstM == 0)
480 return NodeAddr<NodeBase*>();
481 return G.addr<NodeBase*>(Code.FirstM);
484 // Get the last member of the code node.
485 NodeAddr<NodeBase*> CodeNode::getLastMember(const DataFlowGraph &G) const {
486 if (Code.LastM == 0)
487 return NodeAddr<NodeBase*>();
488 return G.addr<NodeBase*>(Code.LastM);
491 // Add node NA at the end of the member list of the given code node.
492 void CodeNode::addMember(NodeAddr<NodeBase*> NA, const DataFlowGraph &G) {
493 NodeAddr<NodeBase*> ML = getLastMember(G);
494 if (ML.Id != 0) {
495 ML.Addr->append(NA);
496 } else {
497 Code.FirstM = NA.Id;
498 NodeId Self = G.id(this);
499 NA.Addr->setNext(Self);
501 Code.LastM = NA.Id;
504 // Add node NA after member node MA in the given code node.
505 void CodeNode::addMemberAfter(NodeAddr<NodeBase*> MA, NodeAddr<NodeBase*> NA,
506 const DataFlowGraph &G) {
507 MA.Addr->append(NA);
508 if (Code.LastM == MA.Id)
509 Code.LastM = NA.Id;
512 // Remove member node NA from the given code node.
513 void CodeNode::removeMember(NodeAddr<NodeBase*> NA, const DataFlowGraph &G) {
514 NodeAddr<NodeBase*> MA = getFirstMember(G);
515 assert(MA.Id != 0);
517 // Special handling if the member to remove is the first member.
518 if (MA.Id == NA.Id) {
519 if (Code.LastM == MA.Id) {
520 // If it is the only member, set both first and last to 0.
521 Code.FirstM = Code.LastM = 0;
522 } else {
523 // Otherwise, advance the first member.
524 Code.FirstM = MA.Addr->getNext();
526 return;
529 while (MA.Addr != this) {
530 NodeId MX = MA.Addr->getNext();
531 if (MX == NA.Id) {
532 MA.Addr->setNext(NA.Addr->getNext());
533 // If the member to remove happens to be the last one, update the
534 // LastM indicator.
535 if (Code.LastM == NA.Id)
536 Code.LastM = MA.Id;
537 return;
539 MA = G.addr<NodeBase*>(MX);
541 llvm_unreachable("No such member");
544 // Return the list of all members of the code node.
545 NodeList CodeNode::members(const DataFlowGraph &G) const {
546 static auto True = [] (NodeAddr<NodeBase*>) -> bool { return true; };
547 return members_if(True, G);
550 // Return the owner of the given instr node.
551 NodeAddr<NodeBase*> InstrNode::getOwner(const DataFlowGraph &G) {
552 NodeAddr<NodeBase*> NA = G.addr<NodeBase*>(getNext());
554 while (NA.Addr != this) {
555 assert(NA.Addr->getType() == NodeAttrs::Code);
556 if (NA.Addr->getKind() == NodeAttrs::Block)
557 return NA;
558 NA = G.addr<NodeBase*>(NA.Addr->getNext());
560 llvm_unreachable("No owner in circular list");
563 // Add the phi node PA to the given block node.
564 void BlockNode::addPhi(NodeAddr<PhiNode*> PA, const DataFlowGraph &G) {
565 NodeAddr<NodeBase*> M = getFirstMember(G);
566 if (M.Id == 0) {
567 addMember(PA, G);
568 return;
571 assert(M.Addr->getType() == NodeAttrs::Code);
572 if (M.Addr->getKind() == NodeAttrs::Stmt) {
573 // If the first member of the block is a statement, insert the phi as
574 // the first member.
575 Code.FirstM = PA.Id;
576 PA.Addr->setNext(M.Id);
577 } else {
578 // If the first member is a phi, find the last phi, and append PA to it.
579 assert(M.Addr->getKind() == NodeAttrs::Phi);
580 NodeAddr<NodeBase*> MN = M;
581 do {
582 M = MN;
583 MN = G.addr<NodeBase*>(M.Addr->getNext());
584 assert(MN.Addr->getType() == NodeAttrs::Code);
585 } while (MN.Addr->getKind() == NodeAttrs::Phi);
587 // M is the last phi.
588 addMemberAfter(M, PA, G);
592 // Find the block node corresponding to the machine basic block BB in the
593 // given func node.
594 NodeAddr<BlockNode*> FuncNode::findBlock(const MachineBasicBlock *BB,
595 const DataFlowGraph &G) const {
596 auto EqBB = [BB] (NodeAddr<NodeBase*> NA) -> bool {
597 return NodeAddr<BlockNode*>(NA).Addr->getCode() == BB;
599 NodeList Ms = members_if(EqBB, G);
600 if (!Ms.empty())
601 return Ms[0];
602 return NodeAddr<BlockNode*>();
605 // Get the block node for the entry block in the given function.
606 NodeAddr<BlockNode*> FuncNode::getEntryBlock(const DataFlowGraph &G) {
607 MachineBasicBlock *EntryB = &getCode()->front();
608 return findBlock(EntryB, G);
611 // Target operand information.
614 // For a given instruction, check if there are any bits of RR that can remain
615 // unchanged across this def.
616 bool TargetOperandInfo::isPreserving(const MachineInstr &In, unsigned OpNum)
617 const {
618 return TII.isPredicated(In);
621 // Check if the definition of RR produces an unspecified value.
622 bool TargetOperandInfo::isClobbering(const MachineInstr &In, unsigned OpNum)
623 const {
624 const MachineOperand &Op = In.getOperand(OpNum);
625 if (Op.isRegMask())
626 return true;
627 assert(Op.isReg());
628 if (In.isCall())
629 if (Op.isDef() && Op.isDead())
630 return true;
631 return false;
634 // Check if the given instruction specifically requires
635 bool TargetOperandInfo::isFixedReg(const MachineInstr &In, unsigned OpNum)
636 const {
637 if (In.isCall() || In.isReturn() || In.isInlineAsm())
638 return true;
639 // Check for a tail call.
640 if (In.isBranch())
641 for (const MachineOperand &O : In.operands())
642 if (O.isGlobal() || O.isSymbol())
643 return true;
645 const MCInstrDesc &D = In.getDesc();
646 if (!D.getImplicitDefs() && !D.getImplicitUses())
647 return false;
648 const MachineOperand &Op = In.getOperand(OpNum);
649 // If there is a sub-register, treat the operand as non-fixed. Currently,
650 // fixed registers are those that are listed in the descriptor as implicit
651 // uses or defs, and those lists do not allow sub-registers.
652 if (Op.getSubReg() != 0)
653 return false;
654 RegisterId Reg = Op.getReg();
655 const MCPhysReg *ImpR = Op.isDef() ? D.getImplicitDefs()
656 : D.getImplicitUses();
657 if (!ImpR)
658 return false;
659 while (*ImpR)
660 if (*ImpR++ == Reg)
661 return true;
662 return false;
666 // The data flow graph construction.
669 DataFlowGraph::DataFlowGraph(MachineFunction &mf, const TargetInstrInfo &tii,
670 const TargetRegisterInfo &tri, const MachineDominatorTree &mdt,
671 const MachineDominanceFrontier &mdf, const TargetOperandInfo &toi)
672 : MF(mf), TII(tii), TRI(tri), PRI(tri, mf), MDT(mdt), MDF(mdf), TOI(toi),
673 LiveIns(PRI) {
676 // The implementation of the definition stack.
677 // Each register reference has its own definition stack. In particular,
678 // for a register references "Reg" and "Reg:subreg" will each have their
679 // own definition stacks.
681 // Construct a stack iterator.
682 DataFlowGraph::DefStack::Iterator::Iterator(const DataFlowGraph::DefStack &S,
683 bool Top) : DS(S) {
684 if (!Top) {
685 // Initialize to bottom.
686 Pos = 0;
687 return;
689 // Initialize to the top, i.e. top-most non-delimiter (or 0, if empty).
690 Pos = DS.Stack.size();
691 while (Pos > 0 && DS.isDelimiter(DS.Stack[Pos-1]))
692 Pos--;
695 // Return the size of the stack, including block delimiters.
696 unsigned DataFlowGraph::DefStack::size() const {
697 unsigned S = 0;
698 for (auto I = top(), E = bottom(); I != E; I.down())
699 S++;
700 return S;
703 // Remove the top entry from the stack. Remove all intervening delimiters
704 // so that after this, the stack is either empty, or the top of the stack
705 // is a non-delimiter.
706 void DataFlowGraph::DefStack::pop() {
707 assert(!empty());
708 unsigned P = nextDown(Stack.size());
709 Stack.resize(P);
712 // Push a delimiter for block node N on the stack.
713 void DataFlowGraph::DefStack::start_block(NodeId N) {
714 assert(N != 0);
715 Stack.push_back(NodeAddr<DefNode*>(nullptr, N));
718 // Remove all nodes from the top of the stack, until the delimited for
719 // block node N is encountered. Remove the delimiter as well. In effect,
720 // this will remove from the stack all definitions from block N.
721 void DataFlowGraph::DefStack::clear_block(NodeId N) {
722 assert(N != 0);
723 unsigned P = Stack.size();
724 while (P > 0) {
725 bool Found = isDelimiter(Stack[P-1], N);
726 P--;
727 if (Found)
728 break;
730 // This will also remove the delimiter, if found.
731 Stack.resize(P);
734 // Move the stack iterator up by one.
735 unsigned DataFlowGraph::DefStack::nextUp(unsigned P) const {
736 // Get the next valid position after P (skipping all delimiters).
737 // The input position P does not have to point to a non-delimiter.
738 unsigned SS = Stack.size();
739 bool IsDelim;
740 assert(P < SS);
741 do {
742 P++;
743 IsDelim = isDelimiter(Stack[P-1]);
744 } while (P < SS && IsDelim);
745 assert(!IsDelim);
746 return P;
749 // Move the stack iterator down by one.
750 unsigned DataFlowGraph::DefStack::nextDown(unsigned P) const {
751 // Get the preceding valid position before P (skipping all delimiters).
752 // The input position P does not have to point to a non-delimiter.
753 assert(P > 0 && P <= Stack.size());
754 bool IsDelim = isDelimiter(Stack[P-1]);
755 do {
756 if (--P == 0)
757 break;
758 IsDelim = isDelimiter(Stack[P-1]);
759 } while (P > 0 && IsDelim);
760 assert(!IsDelim);
761 return P;
764 // Register information.
766 RegisterSet DataFlowGraph::getLandingPadLiveIns() const {
767 RegisterSet LR;
768 const Function &F = MF.getFunction();
769 const Constant *PF = F.hasPersonalityFn() ? F.getPersonalityFn()
770 : nullptr;
771 const TargetLowering &TLI = *MF.getSubtarget().getTargetLowering();
772 if (RegisterId R = TLI.getExceptionPointerRegister(PF))
773 LR.insert(RegisterRef(R));
774 if (RegisterId R = TLI.getExceptionSelectorRegister(PF))
775 LR.insert(RegisterRef(R));
776 return LR;
779 // Node management functions.
781 // Get the pointer to the node with the id N.
782 NodeBase *DataFlowGraph::ptr(NodeId N) const {
783 if (N == 0)
784 return nullptr;
785 return Memory.ptr(N);
788 // Get the id of the node at the address P.
789 NodeId DataFlowGraph::id(const NodeBase *P) const {
790 if (P == nullptr)
791 return 0;
792 return Memory.id(P);
795 // Allocate a new node and set the attributes to Attrs.
796 NodeAddr<NodeBase*> DataFlowGraph::newNode(uint16_t Attrs) {
797 NodeAddr<NodeBase*> P = Memory.New();
798 P.Addr->init();
799 P.Addr->setAttrs(Attrs);
800 return P;
803 // Make a copy of the given node B, except for the data-flow links, which
804 // are set to 0.
805 NodeAddr<NodeBase*> DataFlowGraph::cloneNode(const NodeAddr<NodeBase*> B) {
806 NodeAddr<NodeBase*> NA = newNode(0);
807 memcpy(NA.Addr, B.Addr, sizeof(NodeBase));
808 // Ref nodes need to have the data-flow links reset.
809 if (NA.Addr->getType() == NodeAttrs::Ref) {
810 NodeAddr<RefNode*> RA = NA;
811 RA.Addr->setReachingDef(0);
812 RA.Addr->setSibling(0);
813 if (NA.Addr->getKind() == NodeAttrs::Def) {
814 NodeAddr<DefNode*> DA = NA;
815 DA.Addr->setReachedDef(0);
816 DA.Addr->setReachedUse(0);
819 return NA;
822 // Allocation routines for specific node types/kinds.
824 NodeAddr<UseNode*> DataFlowGraph::newUse(NodeAddr<InstrNode*> Owner,
825 MachineOperand &Op, uint16_t Flags) {
826 NodeAddr<UseNode*> UA = newNode(NodeAttrs::Ref | NodeAttrs::Use | Flags);
827 UA.Addr->setRegRef(&Op, *this);
828 return UA;
831 NodeAddr<PhiUseNode*> DataFlowGraph::newPhiUse(NodeAddr<PhiNode*> Owner,
832 RegisterRef RR, NodeAddr<BlockNode*> PredB, uint16_t Flags) {
833 NodeAddr<PhiUseNode*> PUA = newNode(NodeAttrs::Ref | NodeAttrs::Use | Flags);
834 assert(Flags & NodeAttrs::PhiRef);
835 PUA.Addr->setRegRef(RR, *this);
836 PUA.Addr->setPredecessor(PredB.Id);
837 return PUA;
840 NodeAddr<DefNode*> DataFlowGraph::newDef(NodeAddr<InstrNode*> Owner,
841 MachineOperand &Op, uint16_t Flags) {
842 NodeAddr<DefNode*> DA = newNode(NodeAttrs::Ref | NodeAttrs::Def | Flags);
843 DA.Addr->setRegRef(&Op, *this);
844 return DA;
847 NodeAddr<DefNode*> DataFlowGraph::newDef(NodeAddr<InstrNode*> Owner,
848 RegisterRef RR, uint16_t Flags) {
849 NodeAddr<DefNode*> DA = newNode(NodeAttrs::Ref | NodeAttrs::Def | Flags);
850 assert(Flags & NodeAttrs::PhiRef);
851 DA.Addr->setRegRef(RR, *this);
852 return DA;
855 NodeAddr<PhiNode*> DataFlowGraph::newPhi(NodeAddr<BlockNode*> Owner) {
856 NodeAddr<PhiNode*> PA = newNode(NodeAttrs::Code | NodeAttrs::Phi);
857 Owner.Addr->addPhi(PA, *this);
858 return PA;
861 NodeAddr<StmtNode*> DataFlowGraph::newStmt(NodeAddr<BlockNode*> Owner,
862 MachineInstr *MI) {
863 NodeAddr<StmtNode*> SA = newNode(NodeAttrs::Code | NodeAttrs::Stmt);
864 SA.Addr->setCode(MI);
865 Owner.Addr->addMember(SA, *this);
866 return SA;
869 NodeAddr<BlockNode*> DataFlowGraph::newBlock(NodeAddr<FuncNode*> Owner,
870 MachineBasicBlock *BB) {
871 NodeAddr<BlockNode*> BA = newNode(NodeAttrs::Code | NodeAttrs::Block);
872 BA.Addr->setCode(BB);
873 Owner.Addr->addMember(BA, *this);
874 return BA;
877 NodeAddr<FuncNode*> DataFlowGraph::newFunc(MachineFunction *MF) {
878 NodeAddr<FuncNode*> FA = newNode(NodeAttrs::Code | NodeAttrs::Func);
879 FA.Addr->setCode(MF);
880 return FA;
883 // Build the data flow graph.
884 void DataFlowGraph::build(unsigned Options) {
885 reset();
886 Func = newFunc(&MF);
888 if (MF.empty())
889 return;
891 for (MachineBasicBlock &B : MF) {
892 NodeAddr<BlockNode*> BA = newBlock(Func, &B);
893 BlockNodes.insert(std::make_pair(&B, BA));
894 for (MachineInstr &I : B) {
895 if (I.isDebugInstr())
896 continue;
897 buildStmt(BA, I);
901 NodeAddr<BlockNode*> EA = Func.Addr->getEntryBlock(*this);
902 NodeList Blocks = Func.Addr->members(*this);
904 // Collect information about block references.
905 RegisterSet AllRefs;
906 for (NodeAddr<BlockNode*> BA : Blocks)
907 for (NodeAddr<InstrNode*> IA : BA.Addr->members(*this))
908 for (NodeAddr<RefNode*> RA : IA.Addr->members(*this))
909 AllRefs.insert(RA.Addr->getRegRef(*this));
911 // Collect function live-ins and entry block live-ins.
912 MachineRegisterInfo &MRI = MF.getRegInfo();
913 MachineBasicBlock &EntryB = *EA.Addr->getCode();
914 assert(EntryB.pred_empty() && "Function entry block has predecessors");
915 for (std::pair<unsigned,unsigned> P : MRI.liveins())
916 LiveIns.insert(RegisterRef(P.first));
917 if (MRI.tracksLiveness()) {
918 for (auto I : EntryB.liveins())
919 LiveIns.insert(RegisterRef(I.PhysReg, I.LaneMask));
922 // Add function-entry phi nodes for the live-in registers.
923 //for (std::pair<RegisterId,LaneBitmask> P : LiveIns) {
924 for (auto I = LiveIns.rr_begin(), E = LiveIns.rr_end(); I != E; ++I) {
925 RegisterRef RR = *I;
926 NodeAddr<PhiNode*> PA = newPhi(EA);
927 uint16_t PhiFlags = NodeAttrs::PhiRef | NodeAttrs::Preserving;
928 NodeAddr<DefNode*> DA = newDef(PA, RR, PhiFlags);
929 PA.Addr->addMember(DA, *this);
932 // Add phis for landing pads.
933 // Landing pads, unlike usual backs blocks, are not entered through
934 // branches in the program, or fall-throughs from other blocks. They
935 // are entered from the exception handling runtime and target's ABI
936 // may define certain registers as defined on entry to such a block.
937 RegisterSet EHRegs = getLandingPadLiveIns();
938 if (!EHRegs.empty()) {
939 for (NodeAddr<BlockNode*> BA : Blocks) {
940 const MachineBasicBlock &B = *BA.Addr->getCode();
941 if (!B.isEHPad())
942 continue;
944 // Prepare a list of NodeIds of the block's predecessors.
945 NodeList Preds;
946 for (MachineBasicBlock *PB : B.predecessors())
947 Preds.push_back(findBlock(PB));
949 // Build phi nodes for each live-in.
950 for (RegisterRef RR : EHRegs) {
951 NodeAddr<PhiNode*> PA = newPhi(BA);
952 uint16_t PhiFlags = NodeAttrs::PhiRef | NodeAttrs::Preserving;
953 // Add def:
954 NodeAddr<DefNode*> DA = newDef(PA, RR, PhiFlags);
955 PA.Addr->addMember(DA, *this);
956 // Add uses (no reaching defs for phi uses):
957 for (NodeAddr<BlockNode*> PBA : Preds) {
958 NodeAddr<PhiUseNode*> PUA = newPhiUse(PA, RR, PBA);
959 PA.Addr->addMember(PUA, *this);
965 // Build a map "PhiM" which will contain, for each block, the set
966 // of references that will require phi definitions in that block.
967 BlockRefsMap PhiM;
968 for (NodeAddr<BlockNode*> BA : Blocks)
969 recordDefsForDF(PhiM, BA);
970 for (NodeAddr<BlockNode*> BA : Blocks)
971 buildPhis(PhiM, AllRefs, BA);
973 // Link all the refs. This will recursively traverse the dominator tree.
974 DefStackMap DM;
975 linkBlockRefs(DM, EA);
977 // Finally, remove all unused phi nodes.
978 if (!(Options & BuildOptions::KeepDeadPhis))
979 removeUnusedPhis();
982 RegisterRef DataFlowGraph::makeRegRef(unsigned Reg, unsigned Sub) const {
983 assert(PhysicalRegisterInfo::isRegMaskId(Reg) ||
984 TargetRegisterInfo::isPhysicalRegister(Reg));
985 assert(Reg != 0);
986 if (Sub != 0)
987 Reg = TRI.getSubReg(Reg, Sub);
988 return RegisterRef(Reg);
991 RegisterRef DataFlowGraph::makeRegRef(const MachineOperand &Op) const {
992 assert(Op.isReg() || Op.isRegMask());
993 if (Op.isReg())
994 return makeRegRef(Op.getReg(), Op.getSubReg());
995 return RegisterRef(PRI.getRegMaskId(Op.getRegMask()), LaneBitmask::getAll());
998 RegisterRef DataFlowGraph::restrictRef(RegisterRef AR, RegisterRef BR) const {
999 if (AR.Reg == BR.Reg) {
1000 LaneBitmask M = AR.Mask & BR.Mask;
1001 return M.any() ? RegisterRef(AR.Reg, M) : RegisterRef();
1003 #ifndef NDEBUG
1004 // RegisterRef NAR = PRI.normalize(AR);
1005 // RegisterRef NBR = PRI.normalize(BR);
1006 // assert(NAR.Reg != NBR.Reg);
1007 #endif
1008 // This isn't strictly correct, because the overlap may happen in the
1009 // part masked out.
1010 if (PRI.alias(AR, BR))
1011 return AR;
1012 return RegisterRef();
1015 // For each stack in the map DefM, push the delimiter for block B on it.
1016 void DataFlowGraph::markBlock(NodeId B, DefStackMap &DefM) {
1017 // Push block delimiters.
1018 for (auto I = DefM.begin(), E = DefM.end(); I != E; ++I)
1019 I->second.start_block(B);
1022 // Remove all definitions coming from block B from each stack in DefM.
1023 void DataFlowGraph::releaseBlock(NodeId B, DefStackMap &DefM) {
1024 // Pop all defs from this block from the definition stack. Defs that were
1025 // added to the map during the traversal of instructions will not have a
1026 // delimiter, but for those, the whole stack will be emptied.
1027 for (auto I = DefM.begin(), E = DefM.end(); I != E; ++I)
1028 I->second.clear_block(B);
1030 // Finally, remove empty stacks from the map.
1031 for (auto I = DefM.begin(), E = DefM.end(), NextI = I; I != E; I = NextI) {
1032 NextI = std::next(I);
1033 // This preserves the validity of iterators other than I.
1034 if (I->second.empty())
1035 DefM.erase(I);
1039 // Push all definitions from the instruction node IA to an appropriate
1040 // stack in DefM.
1041 void DataFlowGraph::pushAllDefs(NodeAddr<InstrNode*> IA, DefStackMap &DefM) {
1042 pushClobbers(IA, DefM);
1043 pushDefs(IA, DefM);
1046 // Push all definitions from the instruction node IA to an appropriate
1047 // stack in DefM.
1048 void DataFlowGraph::pushClobbers(NodeAddr<InstrNode*> IA, DefStackMap &DefM) {
1049 NodeSet Visited;
1050 std::set<RegisterId> Defined;
1052 // The important objectives of this function are:
1053 // - to be able to handle instructions both while the graph is being
1054 // constructed, and after the graph has been constructed, and
1055 // - maintain proper ordering of definitions on the stack for each
1056 // register reference:
1057 // - if there are two or more related defs in IA (i.e. coming from
1058 // the same machine operand), then only push one def on the stack,
1059 // - if there are multiple unrelated defs of non-overlapping
1060 // subregisters of S, then the stack for S will have both (in an
1061 // unspecified order), but the order does not matter from the data-
1062 // -flow perspective.
1064 for (NodeAddr<DefNode*> DA : IA.Addr->members_if(IsDef, *this)) {
1065 if (Visited.count(DA.Id))
1066 continue;
1067 if (!(DA.Addr->getFlags() & NodeAttrs::Clobbering))
1068 continue;
1070 NodeList Rel = getRelatedRefs(IA, DA);
1071 NodeAddr<DefNode*> PDA = Rel.front();
1072 RegisterRef RR = PDA.Addr->getRegRef(*this);
1074 // Push the definition on the stack for the register and all aliases.
1075 // The def stack traversal in linkNodeUp will check the exact aliasing.
1076 DefM[RR.Reg].push(DA);
1077 Defined.insert(RR.Reg);
1078 for (RegisterId A : PRI.getAliasSet(RR.Reg)) {
1079 // Check that we don't push the same def twice.
1080 assert(A != RR.Reg);
1081 if (!Defined.count(A))
1082 DefM[A].push(DA);
1084 // Mark all the related defs as visited.
1085 for (NodeAddr<NodeBase*> T : Rel)
1086 Visited.insert(T.Id);
1090 // Push all definitions from the instruction node IA to an appropriate
1091 // stack in DefM.
1092 void DataFlowGraph::pushDefs(NodeAddr<InstrNode*> IA, DefStackMap &DefM) {
1093 NodeSet Visited;
1094 #ifndef NDEBUG
1095 std::set<RegisterId> Defined;
1096 #endif
1098 // The important objectives of this function are:
1099 // - to be able to handle instructions both while the graph is being
1100 // constructed, and after the graph has been constructed, and
1101 // - maintain proper ordering of definitions on the stack for each
1102 // register reference:
1103 // - if there are two or more related defs in IA (i.e. coming from
1104 // the same machine operand), then only push one def on the stack,
1105 // - if there are multiple unrelated defs of non-overlapping
1106 // subregisters of S, then the stack for S will have both (in an
1107 // unspecified order), but the order does not matter from the data-
1108 // -flow perspective.
1110 for (NodeAddr<DefNode*> DA : IA.Addr->members_if(IsDef, *this)) {
1111 if (Visited.count(DA.Id))
1112 continue;
1113 if (DA.Addr->getFlags() & NodeAttrs::Clobbering)
1114 continue;
1116 NodeList Rel = getRelatedRefs(IA, DA);
1117 NodeAddr<DefNode*> PDA = Rel.front();
1118 RegisterRef RR = PDA.Addr->getRegRef(*this);
1119 #ifndef NDEBUG
1120 // Assert if the register is defined in two or more unrelated defs.
1121 // This could happen if there are two or more def operands defining it.
1122 if (!Defined.insert(RR.Reg).second) {
1123 MachineInstr *MI = NodeAddr<StmtNode*>(IA).Addr->getCode();
1124 dbgs() << "Multiple definitions of register: "
1125 << Print<RegisterRef>(RR, *this) << " in\n " << *MI << "in "
1126 << printMBBReference(*MI->getParent()) << '\n';
1127 llvm_unreachable(nullptr);
1129 #endif
1130 // Push the definition on the stack for the register and all aliases.
1131 // The def stack traversal in linkNodeUp will check the exact aliasing.
1132 DefM[RR.Reg].push(DA);
1133 for (RegisterId A : PRI.getAliasSet(RR.Reg)) {
1134 // Check that we don't push the same def twice.
1135 assert(A != RR.Reg);
1136 DefM[A].push(DA);
1138 // Mark all the related defs as visited.
1139 for (NodeAddr<NodeBase*> T : Rel)
1140 Visited.insert(T.Id);
1144 // Return the list of all reference nodes related to RA, including RA itself.
1145 // See "getNextRelated" for the meaning of a "related reference".
1146 NodeList DataFlowGraph::getRelatedRefs(NodeAddr<InstrNode*> IA,
1147 NodeAddr<RefNode*> RA) const {
1148 assert(IA.Id != 0 && RA.Id != 0);
1150 NodeList Refs;
1151 NodeId Start = RA.Id;
1152 do {
1153 Refs.push_back(RA);
1154 RA = getNextRelated(IA, RA);
1155 } while (RA.Id != 0 && RA.Id != Start);
1156 return Refs;
1159 // Clear all information in the graph.
1160 void DataFlowGraph::reset() {
1161 Memory.clear();
1162 BlockNodes.clear();
1163 Func = NodeAddr<FuncNode*>();
1166 // Return the next reference node in the instruction node IA that is related
1167 // to RA. Conceptually, two reference nodes are related if they refer to the
1168 // same instance of a register access, but differ in flags or other minor
1169 // characteristics. Specific examples of related nodes are shadow reference
1170 // nodes.
1171 // Return the equivalent of nullptr if there are no more related references.
1172 NodeAddr<RefNode*> DataFlowGraph::getNextRelated(NodeAddr<InstrNode*> IA,
1173 NodeAddr<RefNode*> RA) const {
1174 assert(IA.Id != 0 && RA.Id != 0);
1176 auto Related = [this,RA](NodeAddr<RefNode*> TA) -> bool {
1177 if (TA.Addr->getKind() != RA.Addr->getKind())
1178 return false;
1179 if (TA.Addr->getRegRef(*this) != RA.Addr->getRegRef(*this))
1180 return false;
1181 return true;
1183 auto RelatedStmt = [&Related,RA](NodeAddr<RefNode*> TA) -> bool {
1184 return Related(TA) &&
1185 &RA.Addr->getOp() == &TA.Addr->getOp();
1187 auto RelatedPhi = [&Related,RA](NodeAddr<RefNode*> TA) -> bool {
1188 if (!Related(TA))
1189 return false;
1190 if (TA.Addr->getKind() != NodeAttrs::Use)
1191 return true;
1192 // For phi uses, compare predecessor blocks.
1193 const NodeAddr<const PhiUseNode*> TUA = TA;
1194 const NodeAddr<const PhiUseNode*> RUA = RA;
1195 return TUA.Addr->getPredecessor() == RUA.Addr->getPredecessor();
1198 RegisterRef RR = RA.Addr->getRegRef(*this);
1199 if (IA.Addr->getKind() == NodeAttrs::Stmt)
1200 return RA.Addr->getNextRef(RR, RelatedStmt, true, *this);
1201 return RA.Addr->getNextRef(RR, RelatedPhi, true, *this);
1204 // Find the next node related to RA in IA that satisfies condition P.
1205 // If such a node was found, return a pair where the second element is the
1206 // located node. If such a node does not exist, return a pair where the
1207 // first element is the element after which such a node should be inserted,
1208 // and the second element is a null-address.
1209 template <typename Predicate>
1210 std::pair<NodeAddr<RefNode*>,NodeAddr<RefNode*>>
1211 DataFlowGraph::locateNextRef(NodeAddr<InstrNode*> IA, NodeAddr<RefNode*> RA,
1212 Predicate P) const {
1213 assert(IA.Id != 0 && RA.Id != 0);
1215 NodeAddr<RefNode*> NA;
1216 NodeId Start = RA.Id;
1217 while (true) {
1218 NA = getNextRelated(IA, RA);
1219 if (NA.Id == 0 || NA.Id == Start)
1220 break;
1221 if (P(NA))
1222 break;
1223 RA = NA;
1226 if (NA.Id != 0 && NA.Id != Start)
1227 return std::make_pair(RA, NA);
1228 return std::make_pair(RA, NodeAddr<RefNode*>());
1231 // Get the next shadow node in IA corresponding to RA, and optionally create
1232 // such a node if it does not exist.
1233 NodeAddr<RefNode*> DataFlowGraph::getNextShadow(NodeAddr<InstrNode*> IA,
1234 NodeAddr<RefNode*> RA, bool Create) {
1235 assert(IA.Id != 0 && RA.Id != 0);
1237 uint16_t Flags = RA.Addr->getFlags() | NodeAttrs::Shadow;
1238 auto IsShadow = [Flags] (NodeAddr<RefNode*> TA) -> bool {
1239 return TA.Addr->getFlags() == Flags;
1241 auto Loc = locateNextRef(IA, RA, IsShadow);
1242 if (Loc.second.Id != 0 || !Create)
1243 return Loc.second;
1245 // Create a copy of RA and mark is as shadow.
1246 NodeAddr<RefNode*> NA = cloneNode(RA);
1247 NA.Addr->setFlags(Flags | NodeAttrs::Shadow);
1248 IA.Addr->addMemberAfter(Loc.first, NA, *this);
1249 return NA;
1252 // Get the next shadow node in IA corresponding to RA. Return null-address
1253 // if such a node does not exist.
1254 NodeAddr<RefNode*> DataFlowGraph::getNextShadow(NodeAddr<InstrNode*> IA,
1255 NodeAddr<RefNode*> RA) const {
1256 assert(IA.Id != 0 && RA.Id != 0);
1257 uint16_t Flags = RA.Addr->getFlags() | NodeAttrs::Shadow;
1258 auto IsShadow = [Flags] (NodeAddr<RefNode*> TA) -> bool {
1259 return TA.Addr->getFlags() == Flags;
1261 return locateNextRef(IA, RA, IsShadow).second;
1264 // Create a new statement node in the block node BA that corresponds to
1265 // the machine instruction MI.
1266 void DataFlowGraph::buildStmt(NodeAddr<BlockNode*> BA, MachineInstr &In) {
1267 NodeAddr<StmtNode*> SA = newStmt(BA, &In);
1269 auto isCall = [] (const MachineInstr &In) -> bool {
1270 if (In.isCall())
1271 return true;
1272 // Is tail call?
1273 if (In.isBranch()) {
1274 for (const MachineOperand &Op : In.operands())
1275 if (Op.isGlobal() || Op.isSymbol())
1276 return true;
1277 // Assume indirect branches are calls. This is for the purpose of
1278 // keeping implicit operands, and so it won't hurt on intra-function
1279 // indirect branches.
1280 if (In.isIndirectBranch())
1281 return true;
1283 return false;
1286 auto isDefUndef = [this] (const MachineInstr &In, RegisterRef DR) -> bool {
1287 // This instruction defines DR. Check if there is a use operand that
1288 // would make DR live on entry to the instruction.
1289 for (const MachineOperand &Op : In.operands()) {
1290 if (!Op.isReg() || Op.getReg() == 0 || !Op.isUse() || Op.isUndef())
1291 continue;
1292 RegisterRef UR = makeRegRef(Op);
1293 if (PRI.alias(DR, UR))
1294 return false;
1296 return true;
1299 bool IsCall = isCall(In);
1300 unsigned NumOps = In.getNumOperands();
1302 // Avoid duplicate implicit defs. This will not detect cases of implicit
1303 // defs that define registers that overlap, but it is not clear how to
1304 // interpret that in the absence of explicit defs. Overlapping explicit
1305 // defs are likely illegal already.
1306 BitVector DoneDefs(TRI.getNumRegs());
1307 // Process explicit defs first.
1308 for (unsigned OpN = 0; OpN < NumOps; ++OpN) {
1309 MachineOperand &Op = In.getOperand(OpN);
1310 if (!Op.isReg() || !Op.isDef() || Op.isImplicit())
1311 continue;
1312 unsigned R = Op.getReg();
1313 if (!R || !TargetRegisterInfo::isPhysicalRegister(R))
1314 continue;
1315 uint16_t Flags = NodeAttrs::None;
1316 if (TOI.isPreserving(In, OpN)) {
1317 Flags |= NodeAttrs::Preserving;
1318 // If the def is preserving, check if it is also undefined.
1319 if (isDefUndef(In, makeRegRef(Op)))
1320 Flags |= NodeAttrs::Undef;
1322 if (TOI.isClobbering(In, OpN))
1323 Flags |= NodeAttrs::Clobbering;
1324 if (TOI.isFixedReg(In, OpN))
1325 Flags |= NodeAttrs::Fixed;
1326 if (IsCall && Op.isDead())
1327 Flags |= NodeAttrs::Dead;
1328 NodeAddr<DefNode*> DA = newDef(SA, Op, Flags);
1329 SA.Addr->addMember(DA, *this);
1330 assert(!DoneDefs.test(R));
1331 DoneDefs.set(R);
1334 // Process reg-masks (as clobbers).
1335 BitVector DoneClobbers(TRI.getNumRegs());
1336 for (unsigned OpN = 0; OpN < NumOps; ++OpN) {
1337 MachineOperand &Op = In.getOperand(OpN);
1338 if (!Op.isRegMask())
1339 continue;
1340 uint16_t Flags = NodeAttrs::Clobbering | NodeAttrs::Fixed |
1341 NodeAttrs::Dead;
1342 NodeAddr<DefNode*> DA = newDef(SA, Op, Flags);
1343 SA.Addr->addMember(DA, *this);
1344 // Record all clobbered registers in DoneDefs.
1345 const uint32_t *RM = Op.getRegMask();
1346 for (unsigned i = 1, e = TRI.getNumRegs(); i != e; ++i)
1347 if (!(RM[i/32] & (1u << (i%32))))
1348 DoneClobbers.set(i);
1351 // Process implicit defs, skipping those that have already been added
1352 // as explicit.
1353 for (unsigned OpN = 0; OpN < NumOps; ++OpN) {
1354 MachineOperand &Op = In.getOperand(OpN);
1355 if (!Op.isReg() || !Op.isDef() || !Op.isImplicit())
1356 continue;
1357 unsigned R = Op.getReg();
1358 if (!R || !TargetRegisterInfo::isPhysicalRegister(R) || DoneDefs.test(R))
1359 continue;
1360 RegisterRef RR = makeRegRef(Op);
1361 uint16_t Flags = NodeAttrs::None;
1362 if (TOI.isPreserving(In, OpN)) {
1363 Flags |= NodeAttrs::Preserving;
1364 // If the def is preserving, check if it is also undefined.
1365 if (isDefUndef(In, RR))
1366 Flags |= NodeAttrs::Undef;
1368 if (TOI.isClobbering(In, OpN))
1369 Flags |= NodeAttrs::Clobbering;
1370 if (TOI.isFixedReg(In, OpN))
1371 Flags |= NodeAttrs::Fixed;
1372 if (IsCall && Op.isDead()) {
1373 if (DoneClobbers.test(R))
1374 continue;
1375 Flags |= NodeAttrs::Dead;
1377 NodeAddr<DefNode*> DA = newDef(SA, Op, Flags);
1378 SA.Addr->addMember(DA, *this);
1379 DoneDefs.set(R);
1382 for (unsigned OpN = 0; OpN < NumOps; ++OpN) {
1383 MachineOperand &Op = In.getOperand(OpN);
1384 if (!Op.isReg() || !Op.isUse())
1385 continue;
1386 unsigned R = Op.getReg();
1387 if (!R || !TargetRegisterInfo::isPhysicalRegister(R))
1388 continue;
1389 uint16_t Flags = NodeAttrs::None;
1390 if (Op.isUndef())
1391 Flags |= NodeAttrs::Undef;
1392 if (TOI.isFixedReg(In, OpN))
1393 Flags |= NodeAttrs::Fixed;
1394 NodeAddr<UseNode*> UA = newUse(SA, Op, Flags);
1395 SA.Addr->addMember(UA, *this);
1399 // Scan all defs in the block node BA and record in PhiM the locations of
1400 // phi nodes corresponding to these defs.
1401 void DataFlowGraph::recordDefsForDF(BlockRefsMap &PhiM,
1402 NodeAddr<BlockNode*> BA) {
1403 // Check all defs from block BA and record them in each block in BA's
1404 // iterated dominance frontier. This information will later be used to
1405 // create phi nodes.
1406 MachineBasicBlock *BB = BA.Addr->getCode();
1407 assert(BB);
1408 auto DFLoc = MDF.find(BB);
1409 if (DFLoc == MDF.end() || DFLoc->second.empty())
1410 return;
1412 // Traverse all instructions in the block and collect the set of all
1413 // defined references. For each reference there will be a phi created
1414 // in the block's iterated dominance frontier.
1415 // This is done to make sure that each defined reference gets only one
1416 // phi node, even if it is defined multiple times.
1417 RegisterSet Defs;
1418 for (NodeAddr<InstrNode*> IA : BA.Addr->members(*this))
1419 for (NodeAddr<RefNode*> RA : IA.Addr->members_if(IsDef, *this))
1420 Defs.insert(RA.Addr->getRegRef(*this));
1422 // Calculate the iterated dominance frontier of BB.
1423 const MachineDominanceFrontier::DomSetType &DF = DFLoc->second;
1424 SetVector<MachineBasicBlock*> IDF(DF.begin(), DF.end());
1425 for (unsigned i = 0; i < IDF.size(); ++i) {
1426 auto F = MDF.find(IDF[i]);
1427 if (F != MDF.end())
1428 IDF.insert(F->second.begin(), F->second.end());
1431 // Finally, add the set of defs to each block in the iterated dominance
1432 // frontier.
1433 for (auto DB : IDF) {
1434 NodeAddr<BlockNode*> DBA = findBlock(DB);
1435 PhiM[DBA.Id].insert(Defs.begin(), Defs.end());
1439 // Given the locations of phi nodes in the map PhiM, create the phi nodes
1440 // that are located in the block node BA.
1441 void DataFlowGraph::buildPhis(BlockRefsMap &PhiM, RegisterSet &AllRefs,
1442 NodeAddr<BlockNode*> BA) {
1443 // Check if this blocks has any DF defs, i.e. if there are any defs
1444 // that this block is in the iterated dominance frontier of.
1445 auto HasDF = PhiM.find(BA.Id);
1446 if (HasDF == PhiM.end() || HasDF->second.empty())
1447 return;
1449 // First, remove all R in Refs in such that there exists T in Refs
1450 // such that T covers R. In other words, only leave those refs that
1451 // are not covered by another ref (i.e. maximal with respect to covering).
1453 auto MaxCoverIn = [this] (RegisterRef RR, RegisterSet &RRs) -> RegisterRef {
1454 for (RegisterRef I : RRs)
1455 if (I != RR && RegisterAggr::isCoverOf(I, RR, PRI))
1456 RR = I;
1457 return RR;
1460 RegisterSet MaxDF;
1461 for (RegisterRef I : HasDF->second)
1462 MaxDF.insert(MaxCoverIn(I, HasDF->second));
1464 std::vector<RegisterRef> MaxRefs;
1465 for (RegisterRef I : MaxDF)
1466 MaxRefs.push_back(MaxCoverIn(I, AllRefs));
1468 // Now, for each R in MaxRefs, get the alias closure of R. If the closure
1469 // only has R in it, create a phi a def for R. Otherwise, create a phi,
1470 // and add a def for each S in the closure.
1472 // Sort the refs so that the phis will be created in a deterministic order.
1473 llvm::sort(MaxRefs);
1474 // Remove duplicates.
1475 auto NewEnd = std::unique(MaxRefs.begin(), MaxRefs.end());
1476 MaxRefs.erase(NewEnd, MaxRefs.end());
1478 auto Aliased = [this,&MaxRefs](RegisterRef RR,
1479 std::vector<unsigned> &Closure) -> bool {
1480 for (unsigned I : Closure)
1481 if (PRI.alias(RR, MaxRefs[I]))
1482 return true;
1483 return false;
1486 // Prepare a list of NodeIds of the block's predecessors.
1487 NodeList Preds;
1488 const MachineBasicBlock *MBB = BA.Addr->getCode();
1489 for (MachineBasicBlock *PB : MBB->predecessors())
1490 Preds.push_back(findBlock(PB));
1492 while (!MaxRefs.empty()) {
1493 // Put the first element in the closure, and then add all subsequent
1494 // elements from MaxRefs to it, if they alias at least one element
1495 // already in the closure.
1496 // ClosureIdx: vector of indices in MaxRefs of members of the closure.
1497 std::vector<unsigned> ClosureIdx = { 0 };
1498 for (unsigned i = 1; i != MaxRefs.size(); ++i)
1499 if (Aliased(MaxRefs[i], ClosureIdx))
1500 ClosureIdx.push_back(i);
1502 // Build a phi for the closure.
1503 unsigned CS = ClosureIdx.size();
1504 NodeAddr<PhiNode*> PA = newPhi(BA);
1506 // Add defs.
1507 for (unsigned X = 0; X != CS; ++X) {
1508 RegisterRef RR = MaxRefs[ClosureIdx[X]];
1509 uint16_t PhiFlags = NodeAttrs::PhiRef | NodeAttrs::Preserving;
1510 NodeAddr<DefNode*> DA = newDef(PA, RR, PhiFlags);
1511 PA.Addr->addMember(DA, *this);
1513 // Add phi uses.
1514 for (NodeAddr<BlockNode*> PBA : Preds) {
1515 for (unsigned X = 0; X != CS; ++X) {
1516 RegisterRef RR = MaxRefs[ClosureIdx[X]];
1517 NodeAddr<PhiUseNode*> PUA = newPhiUse(PA, RR, PBA);
1518 PA.Addr->addMember(PUA, *this);
1522 // Erase from MaxRefs all elements in the closure.
1523 auto Begin = MaxRefs.begin();
1524 for (unsigned i = ClosureIdx.size(); i != 0; --i)
1525 MaxRefs.erase(Begin + ClosureIdx[i-1]);
1529 // Remove any unneeded phi nodes that were created during the build process.
1530 void DataFlowGraph::removeUnusedPhis() {
1531 // This will remove unused phis, i.e. phis where each def does not reach
1532 // any uses or other defs. This will not detect or remove circular phi
1533 // chains that are otherwise dead. Unused/dead phis are created during
1534 // the build process and this function is intended to remove these cases
1535 // that are easily determinable to be unnecessary.
1537 SetVector<NodeId> PhiQ;
1538 for (NodeAddr<BlockNode*> BA : Func.Addr->members(*this)) {
1539 for (auto P : BA.Addr->members_if(IsPhi, *this))
1540 PhiQ.insert(P.Id);
1543 static auto HasUsedDef = [](NodeList &Ms) -> bool {
1544 for (NodeAddr<NodeBase*> M : Ms) {
1545 if (M.Addr->getKind() != NodeAttrs::Def)
1546 continue;
1547 NodeAddr<DefNode*> DA = M;
1548 if (DA.Addr->getReachedDef() != 0 || DA.Addr->getReachedUse() != 0)
1549 return true;
1551 return false;
1554 // Any phi, if it is removed, may affect other phis (make them dead).
1555 // For each removed phi, collect the potentially affected phis and add
1556 // them back to the queue.
1557 while (!PhiQ.empty()) {
1558 auto PA = addr<PhiNode*>(PhiQ[0]);
1559 PhiQ.remove(PA.Id);
1560 NodeList Refs = PA.Addr->members(*this);
1561 if (HasUsedDef(Refs))
1562 continue;
1563 for (NodeAddr<RefNode*> RA : Refs) {
1564 if (NodeId RD = RA.Addr->getReachingDef()) {
1565 auto RDA = addr<DefNode*>(RD);
1566 NodeAddr<InstrNode*> OA = RDA.Addr->getOwner(*this);
1567 if (IsPhi(OA))
1568 PhiQ.insert(OA.Id);
1570 if (RA.Addr->isDef())
1571 unlinkDef(RA, true);
1572 else
1573 unlinkUse(RA, true);
1575 NodeAddr<BlockNode*> BA = PA.Addr->getOwner(*this);
1576 BA.Addr->removeMember(PA, *this);
1580 // For a given reference node TA in an instruction node IA, connect the
1581 // reaching def of TA to the appropriate def node. Create any shadow nodes
1582 // as appropriate.
1583 template <typename T>
1584 void DataFlowGraph::linkRefUp(NodeAddr<InstrNode*> IA, NodeAddr<T> TA,
1585 DefStack &DS) {
1586 if (DS.empty())
1587 return;
1588 RegisterRef RR = TA.Addr->getRegRef(*this);
1589 NodeAddr<T> TAP;
1591 // References from the def stack that have been examined so far.
1592 RegisterAggr Defs(PRI);
1594 for (auto I = DS.top(), E = DS.bottom(); I != E; I.down()) {
1595 RegisterRef QR = I->Addr->getRegRef(*this);
1597 // Skip all defs that are aliased to any of the defs that we have already
1598 // seen. If this completes a cover of RR, stop the stack traversal.
1599 bool Alias = Defs.hasAliasOf(QR);
1600 bool Cover = Defs.insert(QR).hasCoverOf(RR);
1601 if (Alias) {
1602 if (Cover)
1603 break;
1604 continue;
1607 // The reaching def.
1608 NodeAddr<DefNode*> RDA = *I;
1610 // Pick the reached node.
1611 if (TAP.Id == 0) {
1612 TAP = TA;
1613 } else {
1614 // Mark the existing ref as "shadow" and create a new shadow.
1615 TAP.Addr->setFlags(TAP.Addr->getFlags() | NodeAttrs::Shadow);
1616 TAP = getNextShadow(IA, TAP, true);
1619 // Create the link.
1620 TAP.Addr->linkToDef(TAP.Id, RDA);
1622 if (Cover)
1623 break;
1627 // Create data-flow links for all reference nodes in the statement node SA.
1628 template <typename Predicate>
1629 void DataFlowGraph::linkStmtRefs(DefStackMap &DefM, NodeAddr<StmtNode*> SA,
1630 Predicate P) {
1631 #ifndef NDEBUG
1632 RegisterSet Defs;
1633 #endif
1635 // Link all nodes (upwards in the data-flow) with their reaching defs.
1636 for (NodeAddr<RefNode*> RA : SA.Addr->members_if(P, *this)) {
1637 uint16_t Kind = RA.Addr->getKind();
1638 assert(Kind == NodeAttrs::Def || Kind == NodeAttrs::Use);
1639 RegisterRef RR = RA.Addr->getRegRef(*this);
1640 #ifndef NDEBUG
1641 // Do not expect multiple defs of the same reference.
1642 assert(Kind != NodeAttrs::Def || !Defs.count(RR));
1643 Defs.insert(RR);
1644 #endif
1646 auto F = DefM.find(RR.Reg);
1647 if (F == DefM.end())
1648 continue;
1649 DefStack &DS = F->second;
1650 if (Kind == NodeAttrs::Use)
1651 linkRefUp<UseNode*>(SA, RA, DS);
1652 else if (Kind == NodeAttrs::Def)
1653 linkRefUp<DefNode*>(SA, RA, DS);
1654 else
1655 llvm_unreachable("Unexpected node in instruction");
1659 // Create data-flow links for all instructions in the block node BA. This
1660 // will include updating any phi nodes in BA.
1661 void DataFlowGraph::linkBlockRefs(DefStackMap &DefM, NodeAddr<BlockNode*> BA) {
1662 // Push block delimiters.
1663 markBlock(BA.Id, DefM);
1665 auto IsClobber = [] (NodeAddr<RefNode*> RA) -> bool {
1666 return IsDef(RA) && (RA.Addr->getFlags() & NodeAttrs::Clobbering);
1668 auto IsNoClobber = [] (NodeAddr<RefNode*> RA) -> bool {
1669 return IsDef(RA) && !(RA.Addr->getFlags() & NodeAttrs::Clobbering);
1672 assert(BA.Addr && "block node address is needed to create a data-flow link");
1673 // For each non-phi instruction in the block, link all the defs and uses
1674 // to their reaching defs. For any member of the block (including phis),
1675 // push the defs on the corresponding stacks.
1676 for (NodeAddr<InstrNode*> IA : BA.Addr->members(*this)) {
1677 // Ignore phi nodes here. They will be linked part by part from the
1678 // predecessors.
1679 if (IA.Addr->getKind() == NodeAttrs::Stmt) {
1680 linkStmtRefs(DefM, IA, IsUse);
1681 linkStmtRefs(DefM, IA, IsClobber);
1684 // Push the definitions on the stack.
1685 pushClobbers(IA, DefM);
1687 if (IA.Addr->getKind() == NodeAttrs::Stmt)
1688 linkStmtRefs(DefM, IA, IsNoClobber);
1690 pushDefs(IA, DefM);
1693 // Recursively process all children in the dominator tree.
1694 MachineDomTreeNode *N = MDT.getNode(BA.Addr->getCode());
1695 for (auto I : *N) {
1696 MachineBasicBlock *SB = I->getBlock();
1697 NodeAddr<BlockNode*> SBA = findBlock(SB);
1698 linkBlockRefs(DefM, SBA);
1701 // Link the phi uses from the successor blocks.
1702 auto IsUseForBA = [BA](NodeAddr<NodeBase*> NA) -> bool {
1703 if (NA.Addr->getKind() != NodeAttrs::Use)
1704 return false;
1705 assert(NA.Addr->getFlags() & NodeAttrs::PhiRef);
1706 NodeAddr<PhiUseNode*> PUA = NA;
1707 return PUA.Addr->getPredecessor() == BA.Id;
1710 RegisterSet EHLiveIns = getLandingPadLiveIns();
1711 MachineBasicBlock *MBB = BA.Addr->getCode();
1713 for (MachineBasicBlock *SB : MBB->successors()) {
1714 bool IsEHPad = SB->isEHPad();
1715 NodeAddr<BlockNode*> SBA = findBlock(SB);
1716 for (NodeAddr<InstrNode*> IA : SBA.Addr->members_if(IsPhi, *this)) {
1717 // Do not link phi uses for landing pad live-ins.
1718 if (IsEHPad) {
1719 // Find what register this phi is for.
1720 NodeAddr<RefNode*> RA = IA.Addr->getFirstMember(*this);
1721 assert(RA.Id != 0);
1722 if (EHLiveIns.count(RA.Addr->getRegRef(*this)))
1723 continue;
1725 // Go over each phi use associated with MBB, and link it.
1726 for (auto U : IA.Addr->members_if(IsUseForBA, *this)) {
1727 NodeAddr<PhiUseNode*> PUA = U;
1728 RegisterRef RR = PUA.Addr->getRegRef(*this);
1729 linkRefUp<UseNode*>(IA, PUA, DefM[RR.Reg]);
1734 // Pop all defs from this block from the definition stacks.
1735 releaseBlock(BA.Id, DefM);
1738 // Remove the use node UA from any data-flow and structural links.
1739 void DataFlowGraph::unlinkUseDF(NodeAddr<UseNode*> UA) {
1740 NodeId RD = UA.Addr->getReachingDef();
1741 NodeId Sib = UA.Addr->getSibling();
1743 if (RD == 0) {
1744 assert(Sib == 0);
1745 return;
1748 auto RDA = addr<DefNode*>(RD);
1749 auto TA = addr<UseNode*>(RDA.Addr->getReachedUse());
1750 if (TA.Id == UA.Id) {
1751 RDA.Addr->setReachedUse(Sib);
1752 return;
1755 while (TA.Id != 0) {
1756 NodeId S = TA.Addr->getSibling();
1757 if (S == UA.Id) {
1758 TA.Addr->setSibling(UA.Addr->getSibling());
1759 return;
1761 TA = addr<UseNode*>(S);
1765 // Remove the def node DA from any data-flow and structural links.
1766 void DataFlowGraph::unlinkDefDF(NodeAddr<DefNode*> DA) {
1768 // RD
1769 // | reached
1770 // | def
1771 // :
1772 // .
1773 // +----+
1774 // ... -- | DA | -- ... -- 0 : sibling chain of DA
1775 // +----+
1776 // | | reached
1777 // | : def
1778 // | .
1779 // | ... : Siblings (defs)
1780 // |
1781 // : reached
1782 // . use
1783 // ... : sibling chain of reached uses
1785 NodeId RD = DA.Addr->getReachingDef();
1787 // Visit all siblings of the reached def and reset their reaching defs.
1788 // Also, defs reached by DA are now "promoted" to being reached by RD,
1789 // so all of them will need to be spliced into the sibling chain where
1790 // DA belongs.
1791 auto getAllNodes = [this] (NodeId N) -> NodeList {
1792 NodeList Res;
1793 while (N) {
1794 auto RA = addr<RefNode*>(N);
1795 // Keep the nodes in the exact sibling order.
1796 Res.push_back(RA);
1797 N = RA.Addr->getSibling();
1799 return Res;
1801 NodeList ReachedDefs = getAllNodes(DA.Addr->getReachedDef());
1802 NodeList ReachedUses = getAllNodes(DA.Addr->getReachedUse());
1804 if (RD == 0) {
1805 for (NodeAddr<RefNode*> I : ReachedDefs)
1806 I.Addr->setSibling(0);
1807 for (NodeAddr<RefNode*> I : ReachedUses)
1808 I.Addr->setSibling(0);
1810 for (NodeAddr<DefNode*> I : ReachedDefs)
1811 I.Addr->setReachingDef(RD);
1812 for (NodeAddr<UseNode*> I : ReachedUses)
1813 I.Addr->setReachingDef(RD);
1815 NodeId Sib = DA.Addr->getSibling();
1816 if (RD == 0) {
1817 assert(Sib == 0);
1818 return;
1821 // Update the reaching def node and remove DA from the sibling list.
1822 auto RDA = addr<DefNode*>(RD);
1823 auto TA = addr<DefNode*>(RDA.Addr->getReachedDef());
1824 if (TA.Id == DA.Id) {
1825 // If DA is the first reached def, just update the RD's reached def
1826 // to the DA's sibling.
1827 RDA.Addr->setReachedDef(Sib);
1828 } else {
1829 // Otherwise, traverse the sibling list of the reached defs and remove
1830 // DA from it.
1831 while (TA.Id != 0) {
1832 NodeId S = TA.Addr->getSibling();
1833 if (S == DA.Id) {
1834 TA.Addr->setSibling(Sib);
1835 break;
1837 TA = addr<DefNode*>(S);
1841 // Splice the DA's reached defs into the RDA's reached def chain.
1842 if (!ReachedDefs.empty()) {
1843 auto Last = NodeAddr<DefNode*>(ReachedDefs.back());
1844 Last.Addr->setSibling(RDA.Addr->getReachedDef());
1845 RDA.Addr->setReachedDef(ReachedDefs.front().Id);
1847 // Splice the DA's reached uses into the RDA's reached use chain.
1848 if (!ReachedUses.empty()) {
1849 auto Last = NodeAddr<UseNode*>(ReachedUses.back());
1850 Last.Addr->setSibling(RDA.Addr->getReachedUse());
1851 RDA.Addr->setReachedUse(ReachedUses.front().Id);