[ORC] Add std::tuple support to SimplePackedSerialization.
[llvm-project.git] / llvm / lib / CodeGen / RDFLiveness.cpp
blobd92c6a997f31d26ba8bbd0101e899c3de620d443
1 //===- RDFLiveness.cpp ----------------------------------------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // Computation of the liveness information from the data-flow graph.
11 // The main functionality of this code is to compute block live-in
12 // information. With the live-in information in place, the placement
13 // of kill flags can also be recalculated.
15 // The block live-in calculation is based on the ideas from the following
16 // publication:
18 // Dibyendu Das, Ramakrishna Upadrasta, Benoit Dupont de Dinechin.
19 // "Efficient Liveness Computation Using Merge Sets and DJ-Graphs."
20 // ACM Transactions on Architecture and Code Optimization, Association for
21 // Computing Machinery, 2012, ACM TACO Special Issue on "High-Performance
22 // and Embedded Architectures and Compilers", 8 (4),
23 // <10.1145/2086696.2086706>. <hal-00647369>
25 #include "llvm/ADT/BitVector.h"
26 #include "llvm/ADT/DenseMap.h"
27 #include "llvm/ADT/STLExtras.h"
28 #include "llvm/ADT/SetVector.h"
29 #include "llvm/ADT/SmallSet.h"
30 #include "llvm/CodeGen/MachineBasicBlock.h"
31 #include "llvm/CodeGen/MachineDominanceFrontier.h"
32 #include "llvm/CodeGen/MachineDominators.h"
33 #include "llvm/CodeGen/MachineFunction.h"
34 #include "llvm/CodeGen/MachineInstr.h"
35 #include "llvm/CodeGen/RDFLiveness.h"
36 #include "llvm/CodeGen/RDFGraph.h"
37 #include "llvm/CodeGen/RDFRegisters.h"
38 #include "llvm/CodeGen/TargetRegisterInfo.h"
39 #include "llvm/MC/LaneBitmask.h"
40 #include "llvm/MC/MCRegisterInfo.h"
41 #include "llvm/Support/CommandLine.h"
42 #include "llvm/Support/Debug.h"
43 #include "llvm/Support/ErrorHandling.h"
44 #include "llvm/Support/raw_ostream.h"
45 #include <algorithm>
46 #include <cassert>
47 #include <cstdint>
48 #include <iterator>
49 #include <map>
50 #include <unordered_map>
51 #include <utility>
52 #include <vector>
54 using namespace llvm;
55 using namespace rdf;
57 static cl::opt<unsigned> MaxRecNest("rdf-liveness-max-rec", cl::init(25),
58 cl::Hidden, cl::desc("Maximum recursion level"));
60 namespace llvm {
61 namespace rdf {
63 raw_ostream &operator<< (raw_ostream &OS, const Print<Liveness::RefMap> &P) {
64 OS << '{';
65 for (auto &I : P.Obj) {
66 OS << ' ' << printReg(I.first, &P.G.getTRI()) << '{';
67 for (auto J = I.second.begin(), E = I.second.end(); J != E; ) {
68 OS << Print<NodeId>(J->first, P.G) << PrintLaneMaskOpt(J->second);
69 if (++J != E)
70 OS << ',';
72 OS << '}';
74 OS << " }";
75 return OS;
78 } // end namespace rdf
79 } // end namespace llvm
81 // The order in the returned sequence is the order of reaching defs in the
82 // upward traversal: the first def is the closest to the given reference RefA,
83 // the next one is further up, and so on.
84 // The list ends at a reaching phi def, or when the reference from RefA is
85 // covered by the defs in the list (see FullChain).
86 // This function provides two modes of operation:
87 // (1) Returning the sequence of reaching defs for a particular reference
88 // node. This sequence will terminate at the first phi node [1].
89 // (2) Returning a partial sequence of reaching defs, where the final goal
90 // is to traverse past phi nodes to the actual defs arising from the code
91 // itself.
92 // In mode (2), the register reference for which the search was started
93 // may be different from the reference node RefA, for which this call was
94 // made, hence the argument RefRR, which holds the original register.
95 // Also, some definitions may have already been encountered in a previous
96 // call that will influence register covering. The register references
97 // already defined are passed in through DefRRs.
98 // In mode (1), the "continuation" considerations do not apply, and the
99 // RefRR is the same as the register in RefA, and the set DefRRs is empty.
101 // [1] It is possible for multiple phi nodes to be included in the returned
102 // sequence:
103 // SubA = phi ...
104 // SubB = phi ...
105 // ... = SuperAB(rdef:SubA), SuperAB"(rdef:SubB)
106 // However, these phi nodes are independent from one another in terms of
107 // the data-flow.
109 NodeList Liveness::getAllReachingDefs(RegisterRef RefRR,
110 NodeAddr<RefNode*> RefA, bool TopShadows, bool FullChain,
111 const RegisterAggr &DefRRs) {
112 NodeList RDefs; // Return value.
113 SetVector<NodeId> DefQ;
114 DenseMap<MachineInstr*, uint32_t> OrdMap;
116 // Dead defs will be treated as if they were live, since they are actually
117 // on the data-flow path. They cannot be ignored because even though they
118 // do not generate meaningful values, they still modify registers.
120 // If the reference is undefined, there is nothing to do.
121 if (RefA.Addr->getFlags() & NodeAttrs::Undef)
122 return RDefs;
124 // The initial queue should not have reaching defs for shadows. The
125 // whole point of a shadow is that it will have a reaching def that
126 // is not aliased to the reaching defs of the related shadows.
127 NodeId Start = RefA.Id;
128 auto SNA = DFG.addr<RefNode*>(Start);
129 if (NodeId RD = SNA.Addr->getReachingDef())
130 DefQ.insert(RD);
131 if (TopShadows) {
132 for (auto S : DFG.getRelatedRefs(RefA.Addr->getOwner(DFG), RefA))
133 if (NodeId RD = NodeAddr<RefNode*>(S).Addr->getReachingDef())
134 DefQ.insert(RD);
137 // Collect all the reaching defs, going up until a phi node is encountered,
138 // or there are no more reaching defs. From this set, the actual set of
139 // reaching defs will be selected.
140 // The traversal upwards must go on until a covering def is encountered.
141 // It is possible that a collection of non-covering (individually) defs
142 // will be sufficient, but keep going until a covering one is found.
143 for (unsigned i = 0; i < DefQ.size(); ++i) {
144 auto TA = DFG.addr<DefNode*>(DefQ[i]);
145 if (TA.Addr->getFlags() & NodeAttrs::PhiRef)
146 continue;
147 // Stop at the covering/overwriting def of the initial register reference.
148 RegisterRef RR = TA.Addr->getRegRef(DFG);
149 if (!DFG.IsPreservingDef(TA))
150 if (RegisterAggr::isCoverOf(RR, RefRR, PRI))
151 continue;
152 // Get the next level of reaching defs. This will include multiple
153 // reaching defs for shadows.
154 for (auto S : DFG.getRelatedRefs(TA.Addr->getOwner(DFG), TA))
155 if (NodeId RD = NodeAddr<RefNode*>(S).Addr->getReachingDef())
156 DefQ.insert(RD);
157 // Don't visit sibling defs. They share the same reaching def (which
158 // will be visited anyway), but they define something not aliased to
159 // this ref.
162 // Return the MachineBasicBlock containing a given instruction.
163 auto Block = [this] (NodeAddr<InstrNode*> IA) -> MachineBasicBlock* {
164 if (IA.Addr->getKind() == NodeAttrs::Stmt)
165 return NodeAddr<StmtNode*>(IA).Addr->getCode()->getParent();
166 assert(IA.Addr->getKind() == NodeAttrs::Phi);
167 NodeAddr<PhiNode*> PA = IA;
168 NodeAddr<BlockNode*> BA = PA.Addr->getOwner(DFG);
169 return BA.Addr->getCode();
172 SmallSet<NodeId,32> Defs;
174 // Remove all non-phi defs that are not aliased to RefRR, and segregate
175 // the the remaining defs into buckets for containing blocks.
176 std::map<NodeId, NodeAddr<InstrNode*>> Owners;
177 std::map<MachineBasicBlock*, SmallVector<NodeId,32>> Blocks;
178 for (NodeId N : DefQ) {
179 auto TA = DFG.addr<DefNode*>(N);
180 bool IsPhi = TA.Addr->getFlags() & NodeAttrs::PhiRef;
181 if (!IsPhi && !PRI.alias(RefRR, TA.Addr->getRegRef(DFG)))
182 continue;
183 Defs.insert(TA.Id);
184 NodeAddr<InstrNode*> IA = TA.Addr->getOwner(DFG);
185 Owners[TA.Id] = IA;
186 Blocks[Block(IA)].push_back(IA.Id);
189 auto Precedes = [this,&OrdMap] (NodeId A, NodeId B) {
190 if (A == B)
191 return false;
192 NodeAddr<InstrNode*> OA = DFG.addr<InstrNode*>(A);
193 NodeAddr<InstrNode*> OB = DFG.addr<InstrNode*>(B);
194 bool StmtA = OA.Addr->getKind() == NodeAttrs::Stmt;
195 bool StmtB = OB.Addr->getKind() == NodeAttrs::Stmt;
196 if (StmtA && StmtB) {
197 const MachineInstr *InA = NodeAddr<StmtNode*>(OA).Addr->getCode();
198 const MachineInstr *InB = NodeAddr<StmtNode*>(OB).Addr->getCode();
199 assert(InA->getParent() == InB->getParent());
200 auto FA = OrdMap.find(InA);
201 if (FA != OrdMap.end())
202 return FA->second < OrdMap.find(InB)->second;
203 const MachineBasicBlock *BB = InA->getParent();
204 for (auto It = BB->begin(), E = BB->end(); It != E; ++It) {
205 if (It == InA->getIterator())
206 return true;
207 if (It == InB->getIterator())
208 return false;
210 llvm_unreachable("InA and InB should be in the same block");
212 // One of them is a phi node.
213 if (!StmtA && !StmtB) {
214 // Both are phis, which are unordered. Break the tie by id numbers.
215 return A < B;
217 // Only one of them is a phi. Phis always precede statements.
218 return !StmtA;
221 auto GetOrder = [&OrdMap] (MachineBasicBlock &B) {
222 uint32_t Pos = 0;
223 for (MachineInstr &In : B)
224 OrdMap.insert({&In, ++Pos});
227 // For each block, sort the nodes in it.
228 std::vector<MachineBasicBlock*> TmpBB;
229 for (auto &Bucket : Blocks) {
230 TmpBB.push_back(Bucket.first);
231 if (Bucket.second.size() > 2)
232 GetOrder(*Bucket.first);
233 llvm::sort(Bucket.second, Precedes);
236 // Sort the blocks with respect to dominance.
237 llvm::sort(TmpBB,
238 [this](auto A, auto B) { return MDT.properlyDominates(A, B); });
240 std::vector<NodeId> TmpInst;
241 for (MachineBasicBlock *MBB : llvm::reverse(TmpBB)) {
242 auto &Bucket = Blocks[MBB];
243 TmpInst.insert(TmpInst.end(), Bucket.rbegin(), Bucket.rend());
246 // The vector is a list of instructions, so that defs coming from
247 // the same instruction don't need to be artificially ordered.
248 // Then, when computing the initial segment, and iterating over an
249 // instruction, pick the defs that contribute to the covering (i.e. is
250 // not covered by previously added defs). Check the defs individually,
251 // i.e. first check each def if is covered or not (without adding them
252 // to the tracking set), and then add all the selected ones.
254 // The reason for this is this example:
255 // *d1<A>, *d2<B>, ... Assume A and B are aliased (can happen in phi nodes).
256 // *d3<C> If A \incl BuC, and B \incl AuC, then *d2 would be
257 // covered if we added A first, and A would be covered
258 // if we added B first.
259 // In this example we want both A and B, because we don't want to give
260 // either one priority over the other, since they belong to the same
261 // statement.
263 RegisterAggr RRs(DefRRs);
265 auto DefInSet = [&Defs] (NodeAddr<RefNode*> TA) -> bool {
266 return TA.Addr->getKind() == NodeAttrs::Def &&
267 Defs.count(TA.Id);
270 for (NodeId T : TmpInst) {
271 if (!FullChain && RRs.hasCoverOf(RefRR))
272 break;
273 auto TA = DFG.addr<InstrNode*>(T);
274 bool IsPhi = DFG.IsCode<NodeAttrs::Phi>(TA);
275 NodeList Ds;
276 for (NodeAddr<DefNode*> DA : TA.Addr->members_if(DefInSet, DFG)) {
277 RegisterRef QR = DA.Addr->getRegRef(DFG);
278 // Add phi defs even if they are covered by subsequent defs. This is
279 // for cases where the reached use is not covered by any of the defs
280 // encountered so far: the phi def is needed to expose the liveness
281 // of that use to the entry of the block.
282 // Example:
283 // phi d1<R3>(,d2,), ... Phi def d1 is covered by d2.
284 // d2<R3>(d1,,u3), ...
285 // ..., u3<D1>(d2) This use needs to be live on entry.
286 if (FullChain || IsPhi || !RRs.hasCoverOf(QR))
287 Ds.push_back(DA);
289 llvm::append_range(RDefs, Ds);
290 for (NodeAddr<DefNode*> DA : Ds) {
291 // When collecting a full chain of definitions, do not consider phi
292 // defs to actually define a register.
293 uint16_t Flags = DA.Addr->getFlags();
294 if (!FullChain || !(Flags & NodeAttrs::PhiRef))
295 if (!(Flags & NodeAttrs::Preserving)) // Don't care about Undef here.
296 RRs.insert(DA.Addr->getRegRef(DFG));
300 auto DeadP = [](const NodeAddr<DefNode*> DA) -> bool {
301 return DA.Addr->getFlags() & NodeAttrs::Dead;
303 llvm::erase_if(RDefs, DeadP);
305 return RDefs;
308 std::pair<NodeSet,bool>
309 Liveness::getAllReachingDefsRec(RegisterRef RefRR, NodeAddr<RefNode*> RefA,
310 NodeSet &Visited, const NodeSet &Defs) {
311 return getAllReachingDefsRecImpl(RefRR, RefA, Visited, Defs, 0, MaxRecNest);
314 std::pair<NodeSet,bool>
315 Liveness::getAllReachingDefsRecImpl(RegisterRef RefRR, NodeAddr<RefNode*> RefA,
316 NodeSet &Visited, const NodeSet &Defs, unsigned Nest, unsigned MaxNest) {
317 if (Nest > MaxNest)
318 return { NodeSet(), false };
319 // Collect all defined registers. Do not consider phis to be defining
320 // anything, only collect "real" definitions.
321 RegisterAggr DefRRs(PRI);
322 for (NodeId D : Defs) {
323 const auto DA = DFG.addr<const DefNode*>(D);
324 if (!(DA.Addr->getFlags() & NodeAttrs::PhiRef))
325 DefRRs.insert(DA.Addr->getRegRef(DFG));
328 NodeList RDs = getAllReachingDefs(RefRR, RefA, false, true, DefRRs);
329 if (RDs.empty())
330 return { Defs, true };
332 // Make a copy of the preexisting definitions and add the newly found ones.
333 NodeSet TmpDefs = Defs;
334 for (NodeAddr<NodeBase*> R : RDs)
335 TmpDefs.insert(R.Id);
337 NodeSet Result = Defs;
339 for (NodeAddr<DefNode*> DA : RDs) {
340 Result.insert(DA.Id);
341 if (!(DA.Addr->getFlags() & NodeAttrs::PhiRef))
342 continue;
343 NodeAddr<PhiNode*> PA = DA.Addr->getOwner(DFG);
344 if (Visited.count(PA.Id))
345 continue;
346 Visited.insert(PA.Id);
347 // Go over all phi uses and get the reaching defs for each use.
348 for (auto U : PA.Addr->members_if(DFG.IsRef<NodeAttrs::Use>, DFG)) {
349 const auto &T = getAllReachingDefsRecImpl(RefRR, U, Visited, TmpDefs,
350 Nest+1, MaxNest);
351 if (!T.second)
352 return { T.first, false };
353 Result.insert(T.first.begin(), T.first.end());
357 return { Result, true };
360 /// Find the nearest ref node aliased to RefRR, going upwards in the data
361 /// flow, starting from the instruction immediately preceding Inst.
362 NodeAddr<RefNode*> Liveness::getNearestAliasedRef(RegisterRef RefRR,
363 NodeAddr<InstrNode*> IA) {
364 NodeAddr<BlockNode*> BA = IA.Addr->getOwner(DFG);
365 NodeList Ins = BA.Addr->members(DFG);
366 NodeId FindId = IA.Id;
367 auto E = Ins.rend();
368 auto B = std::find_if(Ins.rbegin(), E,
369 [FindId] (const NodeAddr<InstrNode*> T) {
370 return T.Id == FindId;
372 // Do not scan IA (which is what B would point to).
373 if (B != E)
374 ++B;
376 do {
377 // Process the range of instructions from B to E.
378 for (NodeAddr<InstrNode*> I : make_range(B, E)) {
379 NodeList Refs = I.Addr->members(DFG);
380 NodeAddr<RefNode*> Clob, Use;
381 // Scan all the refs in I aliased to RefRR, and return the one that
382 // is the closest to the output of I, i.e. def > clobber > use.
383 for (NodeAddr<RefNode*> R : Refs) {
384 if (!PRI.alias(R.Addr->getRegRef(DFG), RefRR))
385 continue;
386 if (DFG.IsDef(R)) {
387 // If it's a non-clobbering def, just return it.
388 if (!(R.Addr->getFlags() & NodeAttrs::Clobbering))
389 return R;
390 Clob = R;
391 } else {
392 Use = R;
395 if (Clob.Id != 0)
396 return Clob;
397 if (Use.Id != 0)
398 return Use;
401 // Go up to the immediate dominator, if any.
402 MachineBasicBlock *BB = BA.Addr->getCode();
403 BA = NodeAddr<BlockNode*>();
404 if (MachineDomTreeNode *N = MDT.getNode(BB)) {
405 if ((N = N->getIDom()))
406 BA = DFG.findBlock(N->getBlock());
408 if (!BA.Id)
409 break;
411 Ins = BA.Addr->members(DFG);
412 B = Ins.rbegin();
413 E = Ins.rend();
414 } while (true);
416 return NodeAddr<RefNode*>();
419 NodeSet Liveness::getAllReachedUses(RegisterRef RefRR,
420 NodeAddr<DefNode*> DefA, const RegisterAggr &DefRRs) {
421 NodeSet Uses;
423 // If the original register is already covered by all the intervening
424 // defs, no more uses can be reached.
425 if (DefRRs.hasCoverOf(RefRR))
426 return Uses;
428 // Add all directly reached uses.
429 // If the def is dead, it does not provide a value for any use.
430 bool IsDead = DefA.Addr->getFlags() & NodeAttrs::Dead;
431 NodeId U = !IsDead ? DefA.Addr->getReachedUse() : 0;
432 while (U != 0) {
433 auto UA = DFG.addr<UseNode*>(U);
434 if (!(UA.Addr->getFlags() & NodeAttrs::Undef)) {
435 RegisterRef UR = UA.Addr->getRegRef(DFG);
436 if (PRI.alias(RefRR, UR) && !DefRRs.hasCoverOf(UR))
437 Uses.insert(U);
439 U = UA.Addr->getSibling();
442 // Traverse all reached defs. This time dead defs cannot be ignored.
443 for (NodeId D = DefA.Addr->getReachedDef(), NextD; D != 0; D = NextD) {
444 auto DA = DFG.addr<DefNode*>(D);
445 NextD = DA.Addr->getSibling();
446 RegisterRef DR = DA.Addr->getRegRef(DFG);
447 // If this def is already covered, it cannot reach anything new.
448 // Similarly, skip it if it is not aliased to the interesting register.
449 if (DefRRs.hasCoverOf(DR) || !PRI.alias(RefRR, DR))
450 continue;
451 NodeSet T;
452 if (DFG.IsPreservingDef(DA)) {
453 // If it is a preserving def, do not update the set of intervening defs.
454 T = getAllReachedUses(RefRR, DA, DefRRs);
455 } else {
456 RegisterAggr NewDefRRs = DefRRs;
457 NewDefRRs.insert(DR);
458 T = getAllReachedUses(RefRR, DA, NewDefRRs);
460 Uses.insert(T.begin(), T.end());
462 return Uses;
465 void Liveness::computePhiInfo() {
466 RealUseMap.clear();
468 NodeList Phis;
469 NodeAddr<FuncNode*> FA = DFG.getFunc();
470 NodeList Blocks = FA.Addr->members(DFG);
471 for (NodeAddr<BlockNode*> BA : Blocks) {
472 auto Ps = BA.Addr->members_if(DFG.IsCode<NodeAttrs::Phi>, DFG);
473 llvm::append_range(Phis, Ps);
476 // phi use -> (map: reaching phi -> set of registers defined in between)
477 std::map<NodeId,std::map<NodeId,RegisterAggr>> PhiUp;
478 std::vector<NodeId> PhiUQ; // Work list of phis for upward propagation.
479 std::unordered_map<NodeId,RegisterAggr> PhiDRs; // Phi -> registers defined by it.
481 // Go over all phis.
482 for (NodeAddr<PhiNode*> PhiA : Phis) {
483 // Go over all defs and collect the reached uses that are non-phi uses
484 // (i.e. the "real uses").
485 RefMap &RealUses = RealUseMap[PhiA.Id];
486 NodeList PhiRefs = PhiA.Addr->members(DFG);
488 // Have a work queue of defs whose reached uses need to be found.
489 // For each def, add to the queue all reached (non-phi) defs.
490 SetVector<NodeId> DefQ;
491 NodeSet PhiDefs;
492 RegisterAggr DRs(PRI);
493 for (NodeAddr<RefNode*> R : PhiRefs) {
494 if (!DFG.IsRef<NodeAttrs::Def>(R))
495 continue;
496 DRs.insert(R.Addr->getRegRef(DFG));
497 DefQ.insert(R.Id);
498 PhiDefs.insert(R.Id);
500 PhiDRs.insert(std::make_pair(PhiA.Id, DRs));
502 // Collect the super-set of all possible reached uses. This set will
503 // contain all uses reached from this phi, either directly from the
504 // phi defs, or (recursively) via non-phi defs reached by the phi defs.
505 // This set of uses will later be trimmed to only contain these uses that
506 // are actually reached by the phi defs.
507 for (unsigned i = 0; i < DefQ.size(); ++i) {
508 NodeAddr<DefNode*> DA = DFG.addr<DefNode*>(DefQ[i]);
509 // Visit all reached uses. Phi defs should not really have the "dead"
510 // flag set, but check it anyway for consistency.
511 bool IsDead = DA.Addr->getFlags() & NodeAttrs::Dead;
512 NodeId UN = !IsDead ? DA.Addr->getReachedUse() : 0;
513 while (UN != 0) {
514 NodeAddr<UseNode*> A = DFG.addr<UseNode*>(UN);
515 uint16_t F = A.Addr->getFlags();
516 if ((F & (NodeAttrs::Undef | NodeAttrs::PhiRef)) == 0) {
517 RegisterRef R = A.Addr->getRegRef(DFG);
518 RealUses[R.Reg].insert({A.Id,R.Mask});
520 UN = A.Addr->getSibling();
522 // Visit all reached defs, and add them to the queue. These defs may
523 // override some of the uses collected here, but that will be handled
524 // later.
525 NodeId DN = DA.Addr->getReachedDef();
526 while (DN != 0) {
527 NodeAddr<DefNode*> A = DFG.addr<DefNode*>(DN);
528 for (auto T : DFG.getRelatedRefs(A.Addr->getOwner(DFG), A)) {
529 uint16_t Flags = NodeAddr<DefNode*>(T).Addr->getFlags();
530 // Must traverse the reached-def chain. Consider:
531 // def(D0) -> def(R0) -> def(R0) -> use(D0)
532 // The reachable use of D0 passes through a def of R0.
533 if (!(Flags & NodeAttrs::PhiRef))
534 DefQ.insert(T.Id);
536 DN = A.Addr->getSibling();
539 // Filter out these uses that appear to be reachable, but really
540 // are not. For example:
542 // R1:0 = d1
543 // = R1:0 u2 Reached by d1.
544 // R0 = d3
545 // = R1:0 u4 Still reached by d1: indirectly through
546 // the def d3.
547 // R1 = d5
548 // = R1:0 u6 Not reached by d1 (covered collectively
549 // by d3 and d5), but following reached
550 // defs and uses from d1 will lead here.
551 for (auto UI = RealUses.begin(), UE = RealUses.end(); UI != UE; ) {
552 // For each reached register UI->first, there is a set UI->second, of
553 // uses of it. For each such use, check if it is reached by this phi,
554 // i.e. check if the set of its reaching uses intersects the set of
555 // this phi's defs.
556 NodeRefSet Uses = UI->second;
557 UI->second.clear();
558 for (std::pair<NodeId,LaneBitmask> I : Uses) {
559 auto UA = DFG.addr<UseNode*>(I.first);
560 // Undef flag is checked above.
561 assert((UA.Addr->getFlags() & NodeAttrs::Undef) == 0);
562 RegisterRef R(UI->first, I.second);
563 // Calculate the exposed part of the reached use.
564 RegisterAggr Covered(PRI);
565 for (NodeAddr<DefNode*> DA : getAllReachingDefs(R, UA)) {
566 if (PhiDefs.count(DA.Id))
567 break;
568 Covered.insert(DA.Addr->getRegRef(DFG));
570 if (RegisterRef RC = Covered.clearIn(R)) {
571 // We are updating the map for register UI->first, so we need
572 // to map RC to be expressed in terms of that register.
573 RegisterRef S = PRI.mapTo(RC, UI->first);
574 UI->second.insert({I.first, S.Mask});
577 UI = UI->second.empty() ? RealUses.erase(UI) : std::next(UI);
580 // If this phi reaches some "real" uses, add it to the queue for upward
581 // propagation.
582 if (!RealUses.empty())
583 PhiUQ.push_back(PhiA.Id);
585 // Go over all phi uses and check if the reaching def is another phi.
586 // Collect the phis that are among the reaching defs of these uses.
587 // While traversing the list of reaching defs for each phi use, accumulate
588 // the set of registers defined between this phi (PhiA) and the owner phi
589 // of the reaching def.
590 NodeSet SeenUses;
592 for (auto I : PhiRefs) {
593 if (!DFG.IsRef<NodeAttrs::Use>(I) || SeenUses.count(I.Id))
594 continue;
595 NodeAddr<PhiUseNode*> PUA = I;
596 if (PUA.Addr->getReachingDef() == 0)
597 continue;
599 RegisterRef UR = PUA.Addr->getRegRef(DFG);
600 NodeList Ds = getAllReachingDefs(UR, PUA, true, false, NoRegs);
601 RegisterAggr DefRRs(PRI);
603 for (NodeAddr<DefNode*> D : Ds) {
604 if (D.Addr->getFlags() & NodeAttrs::PhiRef) {
605 NodeId RP = D.Addr->getOwner(DFG).Id;
606 std::map<NodeId,RegisterAggr> &M = PhiUp[PUA.Id];
607 auto F = M.find(RP);
608 if (F == M.end())
609 M.insert(std::make_pair(RP, DefRRs));
610 else
611 F->second.insert(DefRRs);
613 DefRRs.insert(D.Addr->getRegRef(DFG));
616 for (NodeAddr<PhiUseNode*> T : DFG.getRelatedRefs(PhiA, PUA))
617 SeenUses.insert(T.Id);
621 if (Trace) {
622 dbgs() << "Phi-up-to-phi map with intervening defs:\n";
623 for (auto I : PhiUp) {
624 dbgs() << "phi " << Print<NodeId>(I.first, DFG) << " -> {";
625 for (auto R : I.second)
626 dbgs() << ' ' << Print<NodeId>(R.first, DFG)
627 << Print<RegisterAggr>(R.second, DFG);
628 dbgs() << " }\n";
632 // Propagate the reached registers up in the phi chain.
634 // The following type of situation needs careful handling:
636 // phi d1<R1:0> (1)
637 // |
638 // ... d2<R1>
639 // |
640 // phi u3<R1:0> (2)
641 // |
642 // ... u4<R1>
644 // The phi node (2) defines a register pair R1:0, and reaches a "real"
645 // use u4 of just R1. The same phi node is also known to reach (upwards)
646 // the phi node (1). However, the use u4 is not reached by phi (1),
647 // because of the intervening definition d2 of R1. The data flow between
648 // phis (1) and (2) is restricted to R1:0 minus R1, i.e. R0.
650 // When propagating uses up the phi chains, get the all reaching defs
651 // for a given phi use, and traverse the list until the propagated ref
652 // is covered, or until reaching the final phi. Only assume that the
653 // reference reaches the phi in the latter case.
655 // The operation "clearIn" can be expensive. For a given set of intervening
656 // defs, cache the result of subtracting these defs from a given register
657 // ref.
658 using SubMap = std::unordered_map<RegisterRef, RegisterRef>;
659 std::unordered_map<RegisterAggr, SubMap> Subs;
660 auto ClearIn = [] (RegisterRef RR, const RegisterAggr &Mid, SubMap &SM) {
661 if (Mid.empty())
662 return RR;
663 auto F = SM.find(RR);
664 if (F != SM.end())
665 return F->second;
666 RegisterRef S = Mid.clearIn(RR);
667 SM.insert({RR, S});
668 return S;
671 // Go over all phis.
672 for (unsigned i = 0; i < PhiUQ.size(); ++i) {
673 auto PA = DFG.addr<PhiNode*>(PhiUQ[i]);
674 NodeList PUs = PA.Addr->members_if(DFG.IsRef<NodeAttrs::Use>, DFG);
675 RefMap &RUM = RealUseMap[PA.Id];
677 for (NodeAddr<UseNode*> UA : PUs) {
678 std::map<NodeId,RegisterAggr> &PUM = PhiUp[UA.Id];
679 RegisterRef UR = UA.Addr->getRegRef(DFG);
680 for (const std::pair<const NodeId, RegisterAggr> &P : PUM) {
681 bool Changed = false;
682 const RegisterAggr &MidDefs = P.second;
683 // Collect the set PropUp of uses that are reached by the current
684 // phi PA, and are not covered by any intervening def between the
685 // currently visited use UA and the upward phi P.
687 if (MidDefs.hasCoverOf(UR))
688 continue;
689 SubMap &SM = Subs[MidDefs];
691 // General algorithm:
692 // for each (R,U) : U is use node of R, U is reached by PA
693 // if MidDefs does not cover (R,U)
694 // then add (R-MidDefs,U) to RealUseMap[P]
696 for (const std::pair<const RegisterId, NodeRefSet> &T : RUM) {
697 RegisterRef R(T.first);
698 // The current phi (PA) could be a phi for a regmask. It could
699 // reach a whole variety of uses that are not related to the
700 // specific upward phi (P.first).
701 const RegisterAggr &DRs = PhiDRs.at(P.first);
702 if (!DRs.hasAliasOf(R))
703 continue;
704 R = PRI.mapTo(DRs.intersectWith(R), T.first);
705 for (std::pair<NodeId,LaneBitmask> V : T.second) {
706 LaneBitmask M = R.Mask & V.second;
707 if (M.none())
708 continue;
709 if (RegisterRef SS = ClearIn(RegisterRef(R.Reg, M), MidDefs, SM)) {
710 NodeRefSet &RS = RealUseMap[P.first][SS.Reg];
711 Changed |= RS.insert({V.first,SS.Mask}).second;
716 if (Changed)
717 PhiUQ.push_back(P.first);
722 if (Trace) {
723 dbgs() << "Real use map:\n";
724 for (auto I : RealUseMap) {
725 dbgs() << "phi " << Print<NodeId>(I.first, DFG);
726 NodeAddr<PhiNode*> PA = DFG.addr<PhiNode*>(I.first);
727 NodeList Ds = PA.Addr->members_if(DFG.IsRef<NodeAttrs::Def>, DFG);
728 if (!Ds.empty()) {
729 RegisterRef RR = NodeAddr<DefNode*>(Ds[0]).Addr->getRegRef(DFG);
730 dbgs() << '<' << Print<RegisterRef>(RR, DFG) << '>';
731 } else {
732 dbgs() << "<noreg>";
734 dbgs() << " -> " << Print<RefMap>(I.second, DFG) << '\n';
739 void Liveness::computeLiveIns() {
740 // Populate the node-to-block map. This speeds up the calculations
741 // significantly.
742 NBMap.clear();
743 for (NodeAddr<BlockNode*> BA : DFG.getFunc().Addr->members(DFG)) {
744 MachineBasicBlock *BB = BA.Addr->getCode();
745 for (NodeAddr<InstrNode*> IA : BA.Addr->members(DFG)) {
746 for (NodeAddr<RefNode*> RA : IA.Addr->members(DFG))
747 NBMap.insert(std::make_pair(RA.Id, BB));
748 NBMap.insert(std::make_pair(IA.Id, BB));
752 MachineFunction &MF = DFG.getMF();
754 // Compute IDF first, then the inverse.
755 decltype(IIDF) IDF;
756 for (MachineBasicBlock &B : MF) {
757 auto F1 = MDF.find(&B);
758 if (F1 == MDF.end())
759 continue;
760 SetVector<MachineBasicBlock*> IDFB(F1->second.begin(), F1->second.end());
761 for (unsigned i = 0; i < IDFB.size(); ++i) {
762 auto F2 = MDF.find(IDFB[i]);
763 if (F2 != MDF.end())
764 IDFB.insert(F2->second.begin(), F2->second.end());
766 // Add B to the IDF(B). This will put B in the IIDF(B).
767 IDFB.insert(&B);
768 IDF[&B].insert(IDFB.begin(), IDFB.end());
771 for (auto I : IDF)
772 for (auto S : I.second)
773 IIDF[S].insert(I.first);
775 computePhiInfo();
777 NodeAddr<FuncNode*> FA = DFG.getFunc();
778 NodeList Blocks = FA.Addr->members(DFG);
780 // Build the phi live-on-entry map.
781 for (NodeAddr<BlockNode*> BA : Blocks) {
782 MachineBasicBlock *MB = BA.Addr->getCode();
783 RefMap &LON = PhiLON[MB];
784 for (auto P : BA.Addr->members_if(DFG.IsCode<NodeAttrs::Phi>, DFG))
785 for (const RefMap::value_type &S : RealUseMap[P.Id])
786 LON[S.first].insert(S.second.begin(), S.second.end());
789 if (Trace) {
790 dbgs() << "Phi live-on-entry map:\n";
791 for (auto &I : PhiLON)
792 dbgs() << "block #" << I.first->getNumber() << " -> "
793 << Print<RefMap>(I.second, DFG) << '\n';
796 // Build the phi live-on-exit map. Each phi node has some set of reached
797 // "real" uses. Propagate this set backwards into the block predecessors
798 // through the reaching defs of the corresponding phi uses.
799 for (NodeAddr<BlockNode*> BA : Blocks) {
800 NodeList Phis = BA.Addr->members_if(DFG.IsCode<NodeAttrs::Phi>, DFG);
801 for (NodeAddr<PhiNode*> PA : Phis) {
802 RefMap &RUs = RealUseMap[PA.Id];
803 if (RUs.empty())
804 continue;
806 NodeSet SeenUses;
807 for (auto U : PA.Addr->members_if(DFG.IsRef<NodeAttrs::Use>, DFG)) {
808 if (!SeenUses.insert(U.Id).second)
809 continue;
810 NodeAddr<PhiUseNode*> PUA = U;
811 if (PUA.Addr->getReachingDef() == 0)
812 continue;
814 // Each phi has some set (possibly empty) of reached "real" uses,
815 // that is, uses that are part of the compiled program. Such a use
816 // may be located in some farther block, but following a chain of
817 // reaching defs will eventually lead to this phi.
818 // Any chain of reaching defs may fork at a phi node, but there
819 // will be a path upwards that will lead to this phi. Now, this
820 // chain will need to fork at this phi, since some of the reached
821 // uses may have definitions joining in from multiple predecessors.
822 // For each reached "real" use, identify the set of reaching defs
823 // coming from each predecessor P, and add them to PhiLOX[P].
825 auto PrA = DFG.addr<BlockNode*>(PUA.Addr->getPredecessor());
826 RefMap &LOX = PhiLOX[PrA.Addr->getCode()];
828 for (const std::pair<const RegisterId, NodeRefSet> &RS : RUs) {
829 // We need to visit each individual use.
830 for (std::pair<NodeId,LaneBitmask> P : RS.second) {
831 // Create a register ref corresponding to the use, and find
832 // all reaching defs starting from the phi use, and treating
833 // all related shadows as a single use cluster.
834 RegisterRef S(RS.first, P.second);
835 NodeList Ds = getAllReachingDefs(S, PUA, true, false, NoRegs);
836 for (NodeAddr<DefNode*> D : Ds) {
837 // Calculate the mask corresponding to the visited def.
838 RegisterAggr TA(PRI);
839 TA.insert(D.Addr->getRegRef(DFG)).intersect(S);
840 LaneBitmask TM = TA.makeRegRef().Mask;
841 LOX[S.Reg].insert({D.Id, TM});
846 for (NodeAddr<PhiUseNode*> T : DFG.getRelatedRefs(PA, PUA))
847 SeenUses.insert(T.Id);
848 } // for U : phi uses
849 } // for P : Phis
850 } // for B : Blocks
852 if (Trace) {
853 dbgs() << "Phi live-on-exit map:\n";
854 for (auto &I : PhiLOX)
855 dbgs() << "block #" << I.first->getNumber() << " -> "
856 << Print<RefMap>(I.second, DFG) << '\n';
859 RefMap LiveIn;
860 traverse(&MF.front(), LiveIn);
862 // Add function live-ins to the live-in set of the function entry block.
863 LiveMap[&MF.front()].insert(DFG.getLiveIns());
865 if (Trace) {
866 // Dump the liveness map
867 for (MachineBasicBlock &B : MF) {
868 std::vector<RegisterRef> LV;
869 for (const MachineBasicBlock::RegisterMaskPair &LI : B.liveins())
870 LV.push_back(RegisterRef(LI.PhysReg, LI.LaneMask));
871 llvm::sort(LV);
872 dbgs() << printMBBReference(B) << "\t rec = {";
873 for (auto I : LV)
874 dbgs() << ' ' << Print<RegisterRef>(I, DFG);
875 dbgs() << " }\n";
876 //dbgs() << "\tcomp = " << Print<RegisterAggr>(LiveMap[&B], DFG) << '\n';
878 LV.clear();
879 const RegisterAggr &LG = LiveMap[&B];
880 for (auto I = LG.rr_begin(), E = LG.rr_end(); I != E; ++I)
881 LV.push_back(*I);
882 llvm::sort(LV);
883 dbgs() << "\tcomp = {";
884 for (auto I : LV)
885 dbgs() << ' ' << Print<RegisterRef>(I, DFG);
886 dbgs() << " }\n";
892 void Liveness::resetLiveIns() {
893 for (auto &B : DFG.getMF()) {
894 // Remove all live-ins.
895 std::vector<unsigned> T;
896 for (const MachineBasicBlock::RegisterMaskPair &LI : B.liveins())
897 T.push_back(LI.PhysReg);
898 for (auto I : T)
899 B.removeLiveIn(I);
900 // Add the newly computed live-ins.
901 const RegisterAggr &LiveIns = LiveMap[&B];
902 for (const RegisterRef R : make_range(LiveIns.rr_begin(), LiveIns.rr_end()))
903 B.addLiveIn({MCPhysReg(R.Reg), R.Mask});
907 void Liveness::resetKills() {
908 for (auto &B : DFG.getMF())
909 resetKills(&B);
912 void Liveness::resetKills(MachineBasicBlock *B) {
913 auto CopyLiveIns = [this] (MachineBasicBlock *B, BitVector &LV) -> void {
914 for (auto I : B->liveins()) {
915 MCSubRegIndexIterator S(I.PhysReg, &TRI);
916 if (!S.isValid()) {
917 LV.set(I.PhysReg);
918 continue;
920 do {
921 LaneBitmask M = TRI.getSubRegIndexLaneMask(S.getSubRegIndex());
922 if ((M & I.LaneMask).any())
923 LV.set(S.getSubReg());
924 ++S;
925 } while (S.isValid());
929 BitVector LiveIn(TRI.getNumRegs()), Live(TRI.getNumRegs());
930 CopyLiveIns(B, LiveIn);
931 for (auto SI : B->successors())
932 CopyLiveIns(SI, Live);
934 for (MachineInstr &MI : llvm::reverse(*B)) {
935 if (MI.isDebugInstr())
936 continue;
938 MI.clearKillInfo();
939 for (auto &Op : MI.operands()) {
940 // An implicit def of a super-register may not necessarily start a
941 // live range of it, since an implicit use could be used to keep parts
942 // of it live. Instead of analyzing the implicit operands, ignore
943 // implicit defs.
944 if (!Op.isReg() || !Op.isDef() || Op.isImplicit())
945 continue;
946 Register R = Op.getReg();
947 if (!Register::isPhysicalRegister(R))
948 continue;
949 for (MCSubRegIterator SR(R, &TRI, true); SR.isValid(); ++SR)
950 Live.reset(*SR);
952 for (auto &Op : MI.operands()) {
953 if (!Op.isReg() || !Op.isUse() || Op.isUndef())
954 continue;
955 Register R = Op.getReg();
956 if (!Register::isPhysicalRegister(R))
957 continue;
958 bool IsLive = false;
959 for (MCRegAliasIterator AR(R, &TRI, true); AR.isValid(); ++AR) {
960 if (!Live[*AR])
961 continue;
962 IsLive = true;
963 break;
965 if (!IsLive)
966 Op.setIsKill(true);
967 for (MCSubRegIterator SR(R, &TRI, true); SR.isValid(); ++SR)
968 Live.set(*SR);
973 // Helper function to obtain the basic block containing the reaching def
974 // of the given use.
975 MachineBasicBlock *Liveness::getBlockWithRef(NodeId RN) const {
976 auto F = NBMap.find(RN);
977 if (F != NBMap.end())
978 return F->second;
979 llvm_unreachable("Node id not in map");
982 void Liveness::traverse(MachineBasicBlock *B, RefMap &LiveIn) {
983 // The LiveIn map, for each (physical) register, contains the set of live
984 // reaching defs of that register that are live on entry to the associated
985 // block.
987 // The summary of the traversal algorithm:
989 // R is live-in in B, if there exists a U(R), such that rdef(R) dom B
990 // and (U \in IDF(B) or B dom U).
992 // for (C : children) {
993 // LU = {}
994 // traverse(C, LU)
995 // LiveUses += LU
996 // }
998 // LiveUses -= Defs(B);
999 // LiveUses += UpwardExposedUses(B);
1000 // for (C : IIDF[B])
1001 // for (U : LiveUses)
1002 // if (Rdef(U) dom C)
1003 // C.addLiveIn(U)
1006 // Go up the dominator tree (depth-first).
1007 MachineDomTreeNode *N = MDT.getNode(B);
1008 for (auto I : *N) {
1009 RefMap L;
1010 MachineBasicBlock *SB = I->getBlock();
1011 traverse(SB, L);
1013 for (auto S : L)
1014 LiveIn[S.first].insert(S.second.begin(), S.second.end());
1017 if (Trace) {
1018 dbgs() << "\n-- " << printMBBReference(*B) << ": " << __func__
1019 << " after recursion into: {";
1020 for (auto I : *N)
1021 dbgs() << ' ' << I->getBlock()->getNumber();
1022 dbgs() << " }\n";
1023 dbgs() << " LiveIn: " << Print<RefMap>(LiveIn, DFG) << '\n';
1024 dbgs() << " Local: " << Print<RegisterAggr>(LiveMap[B], DFG) << '\n';
1027 // Add reaching defs of phi uses that are live on exit from this block.
1028 RefMap &PUs = PhiLOX[B];
1029 for (auto &S : PUs)
1030 LiveIn[S.first].insert(S.second.begin(), S.second.end());
1032 if (Trace) {
1033 dbgs() << "after LOX\n";
1034 dbgs() << " LiveIn: " << Print<RefMap>(LiveIn, DFG) << '\n';
1035 dbgs() << " Local: " << Print<RegisterAggr>(LiveMap[B], DFG) << '\n';
1038 // The LiveIn map at this point has all defs that are live-on-exit from B,
1039 // as if they were live-on-entry to B. First, we need to filter out all
1040 // defs that are present in this block. Then we will add reaching defs of
1041 // all upward-exposed uses.
1043 // To filter out the defs, first make a copy of LiveIn, and then re-populate
1044 // LiveIn with the defs that should remain.
1045 RefMap LiveInCopy = LiveIn;
1046 LiveIn.clear();
1048 for (const std::pair<const RegisterId, NodeRefSet> &LE : LiveInCopy) {
1049 RegisterRef LRef(LE.first);
1050 NodeRefSet &NewDefs = LiveIn[LRef.Reg]; // To be filled.
1051 const NodeRefSet &OldDefs = LE.second;
1052 for (NodeRef OR : OldDefs) {
1053 // R is a def node that was live-on-exit
1054 auto DA = DFG.addr<DefNode*>(OR.first);
1055 NodeAddr<InstrNode*> IA = DA.Addr->getOwner(DFG);
1056 NodeAddr<BlockNode*> BA = IA.Addr->getOwner(DFG);
1057 if (B != BA.Addr->getCode()) {
1058 // Defs from a different block need to be preserved. Defs from this
1059 // block will need to be processed further, except for phi defs, the
1060 // liveness of which is handled through the PhiLON/PhiLOX maps.
1061 NewDefs.insert(OR);
1062 continue;
1065 // Defs from this block need to stop the liveness from being
1066 // propagated upwards. This only applies to non-preserving defs,
1067 // and to the parts of the register actually covered by those defs.
1068 // (Note that phi defs should always be preserving.)
1069 RegisterAggr RRs(PRI);
1070 LRef.Mask = OR.second;
1072 if (!DFG.IsPreservingDef(DA)) {
1073 assert(!(IA.Addr->getFlags() & NodeAttrs::Phi));
1074 // DA is a non-phi def that is live-on-exit from this block, and
1075 // that is also located in this block. LRef is a register ref
1076 // whose use this def reaches. If DA covers LRef, then no part
1077 // of LRef is exposed upwards.A
1078 if (RRs.insert(DA.Addr->getRegRef(DFG)).hasCoverOf(LRef))
1079 continue;
1082 // DA itself was not sufficient to cover LRef. In general, it is
1083 // the last in a chain of aliased defs before the exit from this block.
1084 // There could be other defs in this block that are a part of that
1085 // chain. Check that now: accumulate the registers from these defs,
1086 // and if they all together cover LRef, it is not live-on-entry.
1087 for (NodeAddr<DefNode*> TA : getAllReachingDefs(DA)) {
1088 // DefNode -> InstrNode -> BlockNode.
1089 NodeAddr<InstrNode*> ITA = TA.Addr->getOwner(DFG);
1090 NodeAddr<BlockNode*> BTA = ITA.Addr->getOwner(DFG);
1091 // Reaching defs are ordered in the upward direction.
1092 if (BTA.Addr->getCode() != B) {
1093 // We have reached past the beginning of B, and the accumulated
1094 // registers are not covering LRef. The first def from the
1095 // upward chain will be live.
1096 // Subtract all accumulated defs (RRs) from LRef.
1097 RegisterRef T = RRs.clearIn(LRef);
1098 assert(T);
1099 NewDefs.insert({TA.Id,T.Mask});
1100 break;
1103 // TA is in B. Only add this def to the accumulated cover if it is
1104 // not preserving.
1105 if (!(TA.Addr->getFlags() & NodeAttrs::Preserving))
1106 RRs.insert(TA.Addr->getRegRef(DFG));
1107 // If this is enough to cover LRef, then stop.
1108 if (RRs.hasCoverOf(LRef))
1109 break;
1114 emptify(LiveIn);
1116 if (Trace) {
1117 dbgs() << "after defs in block\n";
1118 dbgs() << " LiveIn: " << Print<RefMap>(LiveIn, DFG) << '\n';
1119 dbgs() << " Local: " << Print<RegisterAggr>(LiveMap[B], DFG) << '\n';
1122 // Scan the block for upward-exposed uses and add them to the tracking set.
1123 for (auto I : DFG.getFunc().Addr->findBlock(B, DFG).Addr->members(DFG)) {
1124 NodeAddr<InstrNode*> IA = I;
1125 if (IA.Addr->getKind() != NodeAttrs::Stmt)
1126 continue;
1127 for (NodeAddr<UseNode*> UA : IA.Addr->members_if(DFG.IsUse, DFG)) {
1128 if (UA.Addr->getFlags() & NodeAttrs::Undef)
1129 continue;
1130 RegisterRef RR = UA.Addr->getRegRef(DFG);
1131 for (NodeAddr<DefNode*> D : getAllReachingDefs(UA))
1132 if (getBlockWithRef(D.Id) != B)
1133 LiveIn[RR.Reg].insert({D.Id,RR.Mask});
1137 if (Trace) {
1138 dbgs() << "after uses in block\n";
1139 dbgs() << " LiveIn: " << Print<RefMap>(LiveIn, DFG) << '\n';
1140 dbgs() << " Local: " << Print<RegisterAggr>(LiveMap[B], DFG) << '\n';
1143 // Phi uses should not be propagated up the dominator tree, since they
1144 // are not dominated by their corresponding reaching defs.
1145 RegisterAggr &Local = LiveMap[B];
1146 RefMap &LON = PhiLON[B];
1147 for (auto &R : LON) {
1148 LaneBitmask M;
1149 for (auto P : R.second)
1150 M |= P.second;
1151 Local.insert(RegisterRef(R.first,M));
1154 if (Trace) {
1155 dbgs() << "after phi uses in block\n";
1156 dbgs() << " LiveIn: " << Print<RefMap>(LiveIn, DFG) << '\n';
1157 dbgs() << " Local: " << Print<RegisterAggr>(Local, DFG) << '\n';
1160 for (auto C : IIDF[B]) {
1161 RegisterAggr &LiveC = LiveMap[C];
1162 for (const std::pair<const RegisterId, NodeRefSet> &S : LiveIn)
1163 for (auto R : S.second)
1164 if (MDT.properlyDominates(getBlockWithRef(R.first), C))
1165 LiveC.insert(RegisterRef(S.first, R.second));
1169 void Liveness::emptify(RefMap &M) {
1170 for (auto I = M.begin(), E = M.end(); I != E; )
1171 I = I->second.empty() ? M.erase(I) : std::next(I);