1 //===--- ScheduleDAGSDNodes.cpp - Implement the ScheduleDAGSDNodes class --===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This implements the ScheduleDAG class, which is a base class used by
11 // scheduling implementation classes.
13 //===----------------------------------------------------------------------===//
15 #define DEBUG_TYPE "pre-RA-sched"
16 #include "SDNodeDbgValue.h"
17 #include "ScheduleDAGSDNodes.h"
18 #include "InstrEmitter.h"
19 #include "llvm/CodeGen/SelectionDAG.h"
20 #include "llvm/Target/TargetMachine.h"
21 #include "llvm/Target/TargetInstrInfo.h"
22 #include "llvm/Target/TargetLowering.h"
23 #include "llvm/Target/TargetRegisterInfo.h"
24 #include "llvm/Target/TargetSubtarget.h"
25 #include "llvm/ADT/DenseMap.h"
26 #include "llvm/ADT/SmallPtrSet.h"
27 #include "llvm/ADT/SmallSet.h"
28 #include "llvm/ADT/SmallVector.h"
29 #include "llvm/ADT/Statistic.h"
30 #include "llvm/Support/Debug.h"
31 #include "llvm/Support/raw_ostream.h"
34 STATISTIC(LoadsClustered
, "Number of loads clustered together");
36 ScheduleDAGSDNodes::ScheduleDAGSDNodes(MachineFunction
&mf
)
38 InstrItins(mf
.getTarget().getInstrItineraryData()) {}
40 /// Run - perform scheduling.
42 void ScheduleDAGSDNodes::Run(SelectionDAG
*dag
, MachineBasicBlock
*bb
,
43 MachineBasicBlock::iterator insertPos
) {
45 ScheduleDAG::Run(bb
, insertPos
);
48 /// NewSUnit - Creates a new SUnit and return a ptr to it.
50 SUnit
*ScheduleDAGSDNodes::NewSUnit(SDNode
*N
) {
52 const SUnit
*Addr
= 0;
56 SUnits
.push_back(SUnit(N
, (unsigned)SUnits
.size()));
57 assert((Addr
== 0 || Addr
== &SUnits
[0]) &&
58 "SUnits std::vector reallocated on the fly!");
59 SUnits
.back().OrigNode
= &SUnits
.back();
60 SUnit
*SU
= &SUnits
.back();
61 const TargetLowering
&TLI
= DAG
->getTargetLoweringInfo();
63 (N
->isMachineOpcode() &&
64 N
->getMachineOpcode() == TargetOpcode::IMPLICIT_DEF
))
65 SU
->SchedulingPref
= Sched::None
;
67 SU
->SchedulingPref
= TLI
.getSchedulingPreference(N
);
71 SUnit
*ScheduleDAGSDNodes::Clone(SUnit
*Old
) {
72 SUnit
*SU
= NewSUnit(Old
->getNode());
73 SU
->OrigNode
= Old
->OrigNode
;
74 SU
->Latency
= Old
->Latency
;
75 SU
->isCall
= Old
->isCall
;
76 SU
->isTwoAddress
= Old
->isTwoAddress
;
77 SU
->isCommutable
= Old
->isCommutable
;
78 SU
->hasPhysRegDefs
= Old
->hasPhysRegDefs
;
79 SU
->hasPhysRegClobbers
= Old
->hasPhysRegClobbers
;
80 SU
->SchedulingPref
= Old
->SchedulingPref
;
85 /// CheckForPhysRegDependency - Check if the dependency between def and use of
86 /// a specified operand is a physical register dependency. If so, returns the
87 /// register and the cost of copying the register.
88 static void CheckForPhysRegDependency(SDNode
*Def
, SDNode
*User
, unsigned Op
,
89 const TargetRegisterInfo
*TRI
,
90 const TargetInstrInfo
*TII
,
91 unsigned &PhysReg
, int &Cost
) {
92 if (Op
!= 2 || User
->getOpcode() != ISD::CopyToReg
)
95 unsigned Reg
= cast
<RegisterSDNode
>(User
->getOperand(1))->getReg();
96 if (TargetRegisterInfo::isVirtualRegister(Reg
))
99 unsigned ResNo
= User
->getOperand(2).getResNo();
100 if (Def
->isMachineOpcode()) {
101 const TargetInstrDesc
&II
= TII
->get(Def
->getMachineOpcode());
102 if (ResNo
>= II
.getNumDefs() &&
103 II
.ImplicitDefs
[ResNo
- II
.getNumDefs()] == Reg
) {
105 const TargetRegisterClass
*RC
=
106 TRI
->getMinimalPhysRegClass(Reg
, Def
->getValueType(ResNo
));
107 Cost
= RC
->getCopyCost();
112 static void AddFlags(SDNode
*N
, SDValue Flag
, bool AddFlag
,
114 SmallVector
<EVT
, 4> VTs
;
115 SDNode
*FlagDestNode
= Flag
.getNode();
117 // Don't add a flag from a node to itself.
118 if (FlagDestNode
== N
) return;
120 // Don't add a flag to something which already has a flag.
121 if (N
->getValueType(N
->getNumValues() - 1) == MVT::Flag
) return;
123 for (unsigned I
= 0, E
= N
->getNumValues(); I
!= E
; ++I
)
124 VTs
.push_back(N
->getValueType(I
));
127 VTs
.push_back(MVT::Flag
);
129 SmallVector
<SDValue
, 4> Ops
;
130 for (unsigned I
= 0, E
= N
->getNumOperands(); I
!= E
; ++I
)
131 Ops
.push_back(N
->getOperand(I
));
136 SDVTList VTList
= DAG
->getVTList(&VTs
[0], VTs
.size());
137 MachineSDNode::mmo_iterator Begin
= 0, End
= 0;
138 MachineSDNode
*MN
= dyn_cast
<MachineSDNode
>(N
);
140 // Store memory references.
142 Begin
= MN
->memoperands_begin();
143 End
= MN
->memoperands_end();
146 DAG
->MorphNodeTo(N
, N
->getOpcode(), VTList
, &Ops
[0], Ops
.size());
148 // Reset the memory references
150 MN
->setMemRefs(Begin
, End
);
153 /// ClusterNeighboringLoads - Force nearby loads together by "flagging" them.
154 /// This function finds loads of the same base and different offsets. If the
155 /// offsets are not far apart (target specific), it add MVT::Flag inputs and
156 /// outputs to ensure they are scheduled together and in order. This
157 /// optimization may benefit some targets by improving cache locality.
158 void ScheduleDAGSDNodes::ClusterNeighboringLoads(SDNode
*Node
) {
160 unsigned NumOps
= Node
->getNumOperands();
161 if (Node
->getOperand(NumOps
-1).getValueType() == MVT::Other
)
162 Chain
= Node
->getOperand(NumOps
-1).getNode();
166 // Look for other loads of the same chain. Find loads that are loading from
167 // the same base pointer and different offsets.
168 SmallPtrSet
<SDNode
*, 16> Visited
;
169 SmallVector
<int64_t, 4> Offsets
;
170 DenseMap
<long long, SDNode
*> O2SMap
; // Map from offset to SDNode.
171 bool Cluster
= false;
173 for (SDNode::use_iterator I
= Chain
->use_begin(), E
= Chain
->use_end();
176 if (User
== Node
|| !Visited
.insert(User
))
178 int64_t Offset1
, Offset2
;
179 if (!TII
->areLoadsFromSameBasePtr(Base
, User
, Offset1
, Offset2
) ||
181 // FIXME: Should be ok if they addresses are identical. But earlier
182 // optimizations really should have eliminated one of the loads.
184 if (O2SMap
.insert(std::make_pair(Offset1
, Base
)).second
)
185 Offsets
.push_back(Offset1
);
186 O2SMap
.insert(std::make_pair(Offset2
, User
));
187 Offsets
.push_back(Offset2
);
188 if (Offset2
< Offset1
)
196 // Sort them in increasing order.
197 std::sort(Offsets
.begin(), Offsets
.end());
199 // Check if the loads are close enough.
200 SmallVector
<SDNode
*, 4> Loads
;
201 unsigned NumLoads
= 0;
202 int64_t BaseOff
= Offsets
[0];
203 SDNode
*BaseLoad
= O2SMap
[BaseOff
];
204 Loads
.push_back(BaseLoad
);
205 for (unsigned i
= 1, e
= Offsets
.size(); i
!= e
; ++i
) {
206 int64_t Offset
= Offsets
[i
];
207 SDNode
*Load
= O2SMap
[Offset
];
208 if (!TII
->shouldScheduleLoadsNear(BaseLoad
, Load
, BaseOff
, Offset
,NumLoads
))
209 break; // Stop right here. Ignore loads that are further away.
210 Loads
.push_back(Load
);
217 // Cluster loads by adding MVT::Flag outputs and inputs. This also
218 // ensure they are scheduled in order of increasing addresses.
219 SDNode
*Lead
= Loads
[0];
220 AddFlags(Lead
, SDValue(0, 0), true, DAG
);
222 SDValue InFlag
= SDValue(Lead
, Lead
->getNumValues() - 1);
223 for (unsigned I
= 1, E
= Loads
.size(); I
!= E
; ++I
) {
224 bool OutFlag
= I
< E
- 1;
225 SDNode
*Load
= Loads
[I
];
227 AddFlags(Load
, InFlag
, OutFlag
, DAG
);
230 InFlag
= SDValue(Load
, Load
->getNumValues() - 1);
236 /// ClusterNodes - Cluster certain nodes which should be scheduled together.
238 void ScheduleDAGSDNodes::ClusterNodes() {
239 for (SelectionDAG::allnodes_iterator NI
= DAG
->allnodes_begin(),
240 E
= DAG
->allnodes_end(); NI
!= E
; ++NI
) {
242 if (!Node
|| !Node
->isMachineOpcode())
245 unsigned Opc
= Node
->getMachineOpcode();
246 const TargetInstrDesc
&TID
= TII
->get(Opc
);
248 // Cluster loads from "near" addresses into combined SUnits.
249 ClusterNeighboringLoads(Node
);
253 void ScheduleDAGSDNodes::BuildSchedUnits() {
254 // During scheduling, the NodeId field of SDNode is used to map SDNodes
255 // to their associated SUnits by holding SUnits table indices. A value
256 // of -1 means the SDNode does not yet have an associated SUnit.
257 unsigned NumNodes
= 0;
258 for (SelectionDAG::allnodes_iterator NI
= DAG
->allnodes_begin(),
259 E
= DAG
->allnodes_end(); NI
!= E
; ++NI
) {
264 // Reserve entries in the vector for each of the SUnits we are creating. This
265 // ensure that reallocation of the vector won't happen, so SUnit*'s won't get
267 // FIXME: Multiply by 2 because we may clone nodes during scheduling.
268 // This is a temporary workaround.
269 SUnits
.reserve(NumNodes
* 2);
271 // Add all nodes in depth first order.
272 SmallVector
<SDNode
*, 64> Worklist
;
273 SmallPtrSet
<SDNode
*, 64> Visited
;
274 Worklist
.push_back(DAG
->getRoot().getNode());
275 Visited
.insert(DAG
->getRoot().getNode());
277 while (!Worklist
.empty()) {
278 SDNode
*NI
= Worklist
.pop_back_val();
280 // Add all operands to the worklist unless they've already been added.
281 for (unsigned i
= 0, e
= NI
->getNumOperands(); i
!= e
; ++i
)
282 if (Visited
.insert(NI
->getOperand(i
).getNode()))
283 Worklist
.push_back(NI
->getOperand(i
).getNode());
285 if (isPassiveNode(NI
)) // Leaf node, e.g. a TargetImmediate.
288 // If this node has already been processed, stop now.
289 if (NI
->getNodeId() != -1) continue;
291 SUnit
*NodeSUnit
= NewSUnit(NI
);
293 // See if anything is flagged to this node, if so, add them to flagged
294 // nodes. Nodes can have at most one flag input and one flag output. Flags
295 // are required to be the last operand and result of a node.
297 // Scan up to find flagged preds.
299 while (N
->getNumOperands() &&
300 N
->getOperand(N
->getNumOperands()-1).getValueType() == MVT::Flag
) {
301 N
= N
->getOperand(N
->getNumOperands()-1).getNode();
302 assert(N
->getNodeId() == -1 && "Node already inserted!");
303 N
->setNodeId(NodeSUnit
->NodeNum
);
304 if (N
->isMachineOpcode() && TII
->get(N
->getMachineOpcode()).isCall())
305 NodeSUnit
->isCall
= true;
308 // Scan down to find any flagged succs.
310 while (N
->getValueType(N
->getNumValues()-1) == MVT::Flag
) {
311 SDValue
FlagVal(N
, N
->getNumValues()-1);
313 // There are either zero or one users of the Flag result.
314 bool HasFlagUse
= false;
315 for (SDNode::use_iterator UI
= N
->use_begin(), E
= N
->use_end();
317 if (FlagVal
.isOperandOf(*UI
)) {
319 assert(N
->getNodeId() == -1 && "Node already inserted!");
320 N
->setNodeId(NodeSUnit
->NodeNum
);
322 if (N
->isMachineOpcode() && TII
->get(N
->getMachineOpcode()).isCall())
323 NodeSUnit
->isCall
= true;
326 if (!HasFlagUse
) break;
329 // If there are flag operands involved, N is now the bottom-most node
330 // of the sequence of nodes that are flagged together.
332 NodeSUnit
->setNode(N
);
333 assert(N
->getNodeId() == -1 && "Node already inserted!");
334 N
->setNodeId(NodeSUnit
->NodeNum
);
336 // Assign the Latency field of NodeSUnit using target-provided information.
337 ComputeLatency(NodeSUnit
);
341 void ScheduleDAGSDNodes::AddSchedEdges() {
342 const TargetSubtarget
&ST
= TM
.getSubtarget
<TargetSubtarget
>();
344 // Check to see if the scheduler cares about latencies.
345 bool UnitLatencies
= ForceUnitLatencies();
347 // Pass 2: add the preds, succs, etc.
348 for (unsigned su
= 0, e
= SUnits
.size(); su
!= e
; ++su
) {
349 SUnit
*SU
= &SUnits
[su
];
350 SDNode
*MainNode
= SU
->getNode();
352 if (MainNode
->isMachineOpcode()) {
353 unsigned Opc
= MainNode
->getMachineOpcode();
354 const TargetInstrDesc
&TID
= TII
->get(Opc
);
355 for (unsigned i
= 0; i
!= TID
.getNumOperands(); ++i
) {
356 if (TID
.getOperandConstraint(i
, TOI::TIED_TO
) != -1) {
357 SU
->isTwoAddress
= true;
361 if (TID
.isCommutable())
362 SU
->isCommutable
= true;
365 // Find all predecessors and successors of the group.
366 for (SDNode
*N
= SU
->getNode(); N
; N
= N
->getFlaggedNode()) {
367 if (N
->isMachineOpcode() &&
368 TII
->get(N
->getMachineOpcode()).getImplicitDefs()) {
369 SU
->hasPhysRegClobbers
= true;
370 unsigned NumUsed
= InstrEmitter::CountResults(N
);
371 while (NumUsed
!= 0 && !N
->hasAnyUseOfValue(NumUsed
- 1))
372 --NumUsed
; // Skip over unused values at the end.
373 if (NumUsed
> TII
->get(N
->getMachineOpcode()).getNumDefs())
374 SU
->hasPhysRegDefs
= true;
377 for (unsigned i
= 0, e
= N
->getNumOperands(); i
!= e
; ++i
) {
378 SDNode
*OpN
= N
->getOperand(i
).getNode();
379 if (isPassiveNode(OpN
)) continue; // Not scheduled.
380 SUnit
*OpSU
= &SUnits
[OpN
->getNodeId()];
381 assert(OpSU
&& "Node has no SUnit!");
382 if (OpSU
== SU
) continue; // In the same group.
384 EVT OpVT
= N
->getOperand(i
).getValueType();
385 assert(OpVT
!= MVT::Flag
&& "Flagged nodes should be in same sunit!");
386 bool isChain
= OpVT
== MVT::Other
;
388 unsigned PhysReg
= 0;
390 // Determine if this is a physical register dependency.
391 CheckForPhysRegDependency(OpN
, N
, i
, TRI
, TII
, PhysReg
, Cost
);
392 assert((PhysReg
== 0 || !isChain
) &&
393 "Chain dependence via physreg data?");
394 // FIXME: See ScheduleDAGSDNodes::EmitCopyFromReg. For now, scheduler
395 // emits a copy from the physical register to a virtual register unless
396 // it requires a cross class copy (cost < 0). That means we are only
397 // treating "expensive to copy" register dependency as physical register
398 // dependency. This may change in the future though.
402 // If this is a ctrl dep, latency is 1.
403 unsigned OpLatency
= isChain
? 1 : OpSU
->Latency
;
404 const SDep
&dep
= SDep(OpSU
, isChain
? SDep::Order
: SDep::Data
,
406 if (!isChain
&& !UnitLatencies
) {
407 ComputeOperandLatency(OpN
, N
, i
, const_cast<SDep
&>(dep
));
408 ST
.adjustSchedDependency(OpSU
, SU
, const_cast<SDep
&>(dep
));
417 /// BuildSchedGraph - Build the SUnit graph from the selection dag that we
418 /// are input. This SUnit graph is similar to the SelectionDAG, but
419 /// excludes nodes that aren't interesting to scheduling, and represents
420 /// flagged together nodes with a single SUnit.
421 void ScheduleDAGSDNodes::BuildSchedGraph(AliasAnalysis
*AA
) {
422 // Cluster certain nodes which should be scheduled together.
424 // Populate the SUnits array.
426 // Compute all the scheduling dependencies between nodes.
430 void ScheduleDAGSDNodes::ComputeLatency(SUnit
*SU
) {
431 // Check to see if the scheduler cares about latencies.
432 if (ForceUnitLatencies()) {
437 if (!InstrItins
|| InstrItins
->isEmpty()) {
442 // Compute the latency for the node. We use the sum of the latencies for
443 // all nodes flagged together into this SUnit.
445 for (SDNode
*N
= SU
->getNode(); N
; N
= N
->getFlaggedNode())
446 if (N
->isMachineOpcode())
447 SU
->Latency
+= TII
->getInstrLatency(InstrItins
, N
);
450 void ScheduleDAGSDNodes::ComputeOperandLatency(SDNode
*Def
, SDNode
*Use
,
451 unsigned OpIdx
, SDep
& dep
) const{
452 // Check to see if the scheduler cares about latencies.
453 if (ForceUnitLatencies())
456 if (dep
.getKind() != SDep::Data
)
459 unsigned DefIdx
= Use
->getOperand(OpIdx
).getResNo();
460 if (Use
->isMachineOpcode())
461 // Adjust the use operand index by num of defs.
462 OpIdx
+= TII
->get(Use
->getMachineOpcode()).getNumDefs();
463 int Latency
= TII
->getOperandLatency(InstrItins
, Def
, DefIdx
, Use
, OpIdx
);
464 if (Latency
> 1 && Use
->getOpcode() == ISD::CopyToReg
&&
466 unsigned Reg
= cast
<RegisterSDNode
>(Use
->getOperand(1))->getReg();
467 if (TargetRegisterInfo::isVirtualRegister(Reg
))
468 // This copy is a liveout value. It is likely coalesced, so reduce the
469 // latency so not to penalize the def.
470 // FIXME: need target specific adjustment here?
471 Latency
= (Latency
> 1) ? Latency
- 1 : 1;
474 dep
.setLatency(Latency
);
477 void ScheduleDAGSDNodes::dumpNode(const SUnit
*SU
) const {
478 if (!SU
->getNode()) {
479 dbgs() << "PHYS REG COPY\n";
483 SU
->getNode()->dump(DAG
);
485 SmallVector
<SDNode
*, 4> FlaggedNodes
;
486 for (SDNode
*N
= SU
->getNode()->getFlaggedNode(); N
; N
= N
->getFlaggedNode())
487 FlaggedNodes
.push_back(N
);
488 while (!FlaggedNodes
.empty()) {
490 FlaggedNodes
.back()->dump(DAG
);
492 FlaggedNodes
.pop_back();
498 bool operator()(const std::pair
<unsigned, MachineInstr
*> &A
,
499 const std::pair
<unsigned, MachineInstr
*> &B
) {
500 return A
.first
< B
.first
;
505 // ProcessSourceNode - Process nodes with source order numbers. These are added
506 // to a vector which EmitSchedule uses to determine how to insert dbg_value
507 // instructions in the right order.
508 static void ProcessSourceNode(SDNode
*N
, SelectionDAG
*DAG
,
509 InstrEmitter
&Emitter
,
510 DenseMap
<SDValue
, unsigned> &VRBaseMap
,
511 SmallVector
<std::pair
<unsigned, MachineInstr
*>, 32> &Orders
,
512 SmallSet
<unsigned, 8> &Seen
) {
513 unsigned Order
= DAG
->GetOrdering(N
);
514 if (!Order
|| !Seen
.insert(Order
))
517 MachineBasicBlock
*BB
= Emitter
.getBlock();
518 if (Emitter
.getInsertPos() == BB
->begin() || BB
->back().isPHI()) {
519 // Did not insert any instruction.
520 Orders
.push_back(std::make_pair(Order
, (MachineInstr
*)0));
524 Orders
.push_back(std::make_pair(Order
, prior(Emitter
.getInsertPos())));
525 if (!N
->getHasDebugValue())
527 // Opportunistically insert immediate dbg_value uses, i.e. those with source
528 // order number right after the N.
529 MachineBasicBlock::iterator InsertPos
= Emitter
.getInsertPos();
530 SmallVector
<SDDbgValue
*,2> &DVs
= DAG
->GetDbgValues(N
);
531 for (unsigned i
= 0, e
= DVs
.size(); i
!= e
; ++i
) {
532 if (DVs
[i
]->isInvalidated())
534 unsigned DVOrder
= DVs
[i
]->getOrder();
535 if (DVOrder
== ++Order
) {
536 MachineInstr
*DbgMI
= Emitter
.EmitDbgValue(DVs
[i
], VRBaseMap
);
538 Orders
.push_back(std::make_pair(DVOrder
, DbgMI
));
539 BB
->insert(InsertPos
, DbgMI
);
541 DVs
[i
]->setIsInvalidated();
547 /// EmitSchedule - Emit the machine code in scheduled order.
548 MachineBasicBlock
*ScheduleDAGSDNodes::EmitSchedule() {
549 InstrEmitter
Emitter(BB
, InsertPos
);
550 DenseMap
<SDValue
, unsigned> VRBaseMap
;
551 DenseMap
<SUnit
*, unsigned> CopyVRBaseMap
;
552 SmallVector
<std::pair
<unsigned, MachineInstr
*>, 32> Orders
;
553 SmallSet
<unsigned, 8> Seen
;
554 bool HasDbg
= DAG
->hasDebugValues();
556 // If this is the first BB, emit byval parameter dbg_value's.
557 if (HasDbg
&& BB
->getParent()->begin() == MachineFunction::iterator(BB
)) {
558 SDDbgInfo::DbgIterator PDI
= DAG
->ByvalParmDbgBegin();
559 SDDbgInfo::DbgIterator PDE
= DAG
->ByvalParmDbgEnd();
560 for (; PDI
!= PDE
; ++PDI
) {
561 MachineInstr
*DbgMI
= Emitter
.EmitDbgValue(*PDI
, VRBaseMap
);
563 BB
->insert(InsertPos
, DbgMI
);
567 for (unsigned i
= 0, e
= Sequence
.size(); i
!= e
; i
++) {
568 SUnit
*SU
= Sequence
[i
];
570 // Null SUnit* is a noop.
575 // For pre-regalloc scheduling, create instructions corresponding to the
576 // SDNode and any flagged SDNodes and append them to the block.
577 if (!SU
->getNode()) {
579 EmitPhysRegCopy(SU
, CopyVRBaseMap
);
583 SmallVector
<SDNode
*, 4> FlaggedNodes
;
584 for (SDNode
*N
= SU
->getNode()->getFlaggedNode(); N
;
585 N
= N
->getFlaggedNode())
586 FlaggedNodes
.push_back(N
);
587 while (!FlaggedNodes
.empty()) {
588 SDNode
*N
= FlaggedNodes
.back();
589 Emitter
.EmitNode(FlaggedNodes
.back(), SU
->OrigNode
!= SU
, SU
->isCloned
,
591 // Remember the source order of the inserted instruction.
593 ProcessSourceNode(N
, DAG
, Emitter
, VRBaseMap
, Orders
, Seen
);
594 FlaggedNodes
.pop_back();
596 Emitter
.EmitNode(SU
->getNode(), SU
->OrigNode
!= SU
, SU
->isCloned
,
598 // Remember the source order of the inserted instruction.
600 ProcessSourceNode(SU
->getNode(), DAG
, Emitter
, VRBaseMap
, Orders
,
604 // Insert all the dbg_values which have not already been inserted in source
607 MachineBasicBlock::iterator BBBegin
= BB
->getFirstNonPHI();
609 // Sort the source order instructions and use the order to insert debug
611 std::sort(Orders
.begin(), Orders
.end(), OrderSorter());
613 SDDbgInfo::DbgIterator DI
= DAG
->DbgBegin();
614 SDDbgInfo::DbgIterator DE
= DAG
->DbgEnd();
615 // Now emit the rest according to source order.
616 unsigned LastOrder
= 0;
617 for (unsigned i
= 0, e
= Orders
.size(); i
!= e
&& DI
!= DE
; ++i
) {
618 unsigned Order
= Orders
[i
].first
;
619 MachineInstr
*MI
= Orders
[i
].second
;
620 // Insert all SDDbgValue's whose order(s) are before "Order".
624 unsigned LastDIOrder
= 0;
627 (*DI
)->getOrder() >= LastOrder
&& (*DI
)->getOrder() < Order
; ++DI
) {
629 assert((*DI
)->getOrder() >= LastDIOrder
&&
630 "SDDbgValue nodes must be in source order!");
631 LastDIOrder
= (*DI
)->getOrder();
633 if ((*DI
)->isInvalidated())
635 MachineInstr
*DbgMI
= Emitter
.EmitDbgValue(*DI
, VRBaseMap
);
638 // Insert to start of the BB (after PHIs).
639 BB
->insert(BBBegin
, DbgMI
);
641 // Insert at the instruction, which may be in a different
642 // block, if the block was split by a custom inserter.
643 MachineBasicBlock::iterator Pos
= MI
;
644 MI
->getParent()->insert(llvm::next(Pos
), DbgMI
);
650 // Add trailing DbgValue's before the terminator. FIXME: May want to add
651 // some of them before one or more conditional branches?
653 MachineBasicBlock
*InsertBB
= Emitter
.getBlock();
654 MachineBasicBlock::iterator Pos
= Emitter
.getBlock()->getFirstTerminator();
655 if (!(*DI
)->isInvalidated()) {
656 MachineInstr
*DbgMI
= Emitter
.EmitDbgValue(*DI
, VRBaseMap
);
658 InsertBB
->insert(Pos
, DbgMI
);
664 BB
= Emitter
.getBlock();
665 InsertPos
= Emitter
.getInsertPos();