1 //===---- ScheduleDAGInstrs.cpp - MachineInstr Rescheduling ---------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This implements the ScheduleDAGInstrs class, which implements re-scheduling
13 //===----------------------------------------------------------------------===//
15 #define DEBUG_TYPE "sched-instrs"
16 #include "ScheduleDAGInstrs.h"
17 #include "llvm/Operator.h"
18 #include "llvm/Analysis/AliasAnalysis.h"
19 #include "llvm/Analysis/ValueTracking.h"
20 #include "llvm/CodeGen/MachineFunctionPass.h"
21 #include "llvm/CodeGen/MachineMemOperand.h"
22 #include "llvm/CodeGen/MachineRegisterInfo.h"
23 #include "llvm/CodeGen/PseudoSourceValue.h"
24 #include "llvm/MC/MCInstrItineraries.h"
25 #include "llvm/Target/TargetMachine.h"
26 #include "llvm/Target/TargetInstrInfo.h"
27 #include "llvm/Target/TargetRegisterInfo.h"
28 #include "llvm/Target/TargetSubtargetInfo.h"
29 #include "llvm/Support/Debug.h"
30 #include "llvm/Support/raw_ostream.h"
31 #include "llvm/ADT/SmallSet.h"
34 ScheduleDAGInstrs::ScheduleDAGInstrs(MachineFunction
&mf
,
35 const MachineLoopInfo
&mli
,
36 const MachineDominatorTree
&mdt
)
37 : ScheduleDAG(mf
), MLI(mli
), MDT(mdt
), MFI(mf
.getFrameInfo()),
38 InstrItins(mf
.getTarget().getInstrItineraryData()),
39 Defs(TRI
->getNumRegs()), Uses(TRI
->getNumRegs()),
40 LoopRegs(MLI
, MDT
), FirstDbgValue(0) {
44 /// Run - perform scheduling.
46 void ScheduleDAGInstrs::Run(MachineBasicBlock
*bb
,
47 MachineBasicBlock::iterator begin
,
48 MachineBasicBlock::iterator end
,
52 InsertPosIndex
= endcount
;
54 ScheduleDAG::Run(bb
, end
);
57 /// getUnderlyingObjectFromInt - This is the function that does the work of
58 /// looking through basic ptrtoint+arithmetic+inttoptr sequences.
59 static const Value
*getUnderlyingObjectFromInt(const Value
*V
) {
61 if (const Operator
*U
= dyn_cast
<Operator
>(V
)) {
62 // If we find a ptrtoint, we can transfer control back to the
63 // regular getUnderlyingObjectFromInt.
64 if (U
->getOpcode() == Instruction::PtrToInt
)
65 return U
->getOperand(0);
66 // If we find an add of a constant or a multiplied value, it's
67 // likely that the other operand will lead us to the base
68 // object. We don't have to worry about the case where the
69 // object address is somehow being computed by the multiply,
70 // because our callers only care when the result is an
71 // identifibale object.
72 if (U
->getOpcode() != Instruction::Add
||
73 (!isa
<ConstantInt
>(U
->getOperand(1)) &&
74 Operator::getOpcode(U
->getOperand(1)) != Instruction::Mul
))
80 assert(V
->getType()->isIntegerTy() && "Unexpected operand type!");
84 /// getUnderlyingObject - This is a wrapper around GetUnderlyingObject
85 /// and adds support for basic ptrtoint+arithmetic+inttoptr sequences.
86 static const Value
*getUnderlyingObject(const Value
*V
) {
87 // First just call Value::getUnderlyingObject to let it do what it does.
89 V
= GetUnderlyingObject(V
);
90 // If it found an inttoptr, use special code to continue climing.
91 if (Operator::getOpcode(V
) != Instruction::IntToPtr
)
93 const Value
*O
= getUnderlyingObjectFromInt(cast
<User
>(V
)->getOperand(0));
94 // If that succeeded in finding a pointer, continue the search.
95 if (!O
->getType()->isPointerTy())
102 /// getUnderlyingObjectForInstr - If this machine instr has memory reference
103 /// information and it can be tracked to a normal reference to a known
104 /// object, return the Value for that object. Otherwise return null.
105 static const Value
*getUnderlyingObjectForInstr(const MachineInstr
*MI
,
106 const MachineFrameInfo
*MFI
,
109 if (!MI
->hasOneMemOperand() ||
110 !(*MI
->memoperands_begin())->getValue() ||
111 (*MI
->memoperands_begin())->isVolatile())
114 const Value
*V
= (*MI
->memoperands_begin())->getValue();
118 V
= getUnderlyingObject(V
);
119 if (const PseudoSourceValue
*PSV
= dyn_cast
<PseudoSourceValue
>(V
)) {
120 // For now, ignore PseudoSourceValues which may alias LLVM IR values
121 // because the code that uses this function has no way to cope with
123 if (PSV
->isAliased(MFI
))
126 MayAlias
= PSV
->mayAlias(MFI
);
130 if (isIdentifiedObject(V
))
136 void ScheduleDAGInstrs::StartBlock(MachineBasicBlock
*BB
) {
137 if (MachineLoop
*ML
= MLI
.getLoopFor(BB
))
138 if (BB
== ML
->getLoopLatch()) {
139 MachineBasicBlock
*Header
= ML
->getHeader();
140 for (MachineBasicBlock::livein_iterator I
= Header
->livein_begin(),
141 E
= Header
->livein_end(); I
!= E
; ++I
)
142 LoopLiveInRegs
.insert(*I
);
143 LoopRegs
.VisitLoop(ML
);
147 /// AddSchedBarrierDeps - Add dependencies from instructions in the current
148 /// list of instructions being scheduled to scheduling barrier by adding
149 /// the exit SU to the register defs and use list. This is because we want to
150 /// make sure instructions which define registers that are either used by
151 /// the terminator or are live-out are properly scheduled. This is
152 /// especially important when the definition latency of the return value(s)
153 /// are too high to be hidden by the branch or when the liveout registers
154 /// used by instructions in the fallthrough block.
155 void ScheduleDAGInstrs::AddSchedBarrierDeps() {
156 MachineInstr
*ExitMI
= InsertPos
!= BB
->end() ? &*InsertPos
: 0;
157 ExitSU
.setInstr(ExitMI
);
158 bool AllDepKnown
= ExitMI
&&
159 (ExitMI
->getDesc().isCall() || ExitMI
->getDesc().isBarrier());
160 if (ExitMI
&& AllDepKnown
) {
161 // If it's a call or a barrier, add dependencies on the defs and uses of
163 for (unsigned i
= 0, e
= ExitMI
->getNumOperands(); i
!= e
; ++i
) {
164 const MachineOperand
&MO
= ExitMI
->getOperand(i
);
165 if (!MO
.isReg() || MO
.isDef()) continue;
166 unsigned Reg
= MO
.getReg();
167 if (Reg
== 0) continue;
169 assert(TRI
->isPhysicalRegister(Reg
) && "Virtual register encountered!");
170 Uses
[Reg
].push_back(&ExitSU
);
173 // For others, e.g. fallthrough, conditional branch, assume the exit
174 // uses all the registers that are livein to the successor blocks.
175 SmallSet
<unsigned, 8> Seen
;
176 for (MachineBasicBlock::succ_iterator SI
= BB
->succ_begin(),
177 SE
= BB
->succ_end(); SI
!= SE
; ++SI
)
178 for (MachineBasicBlock::livein_iterator I
= (*SI
)->livein_begin(),
179 E
= (*SI
)->livein_end(); I
!= E
; ++I
) {
181 if (Seen
.insert(Reg
))
182 Uses
[Reg
].push_back(&ExitSU
);
187 void ScheduleDAGInstrs::BuildSchedGraph(AliasAnalysis
*AA
) {
188 // We'll be allocating one SUnit for each instruction, plus one for
189 // the region exit node.
190 SUnits
.reserve(BB
->size());
192 // We build scheduling units by walking a block's instruction list from bottom
195 // Remember where a generic side-effecting instruction is as we procede.
196 SUnit
*BarrierChain
= 0, *AliasChain
= 0;
198 // Memory references to specific known memory locations are tracked
199 // so that they can be given more precise dependencies. We track
200 // separately the known memory locations that may alias and those
201 // that are known not to alias
202 std::map
<const Value
*, SUnit
*> AliasMemDefs
, NonAliasMemDefs
;
203 std::map
<const Value
*, std::vector
<SUnit
*> > AliasMemUses
, NonAliasMemUses
;
205 // Check to see if the scheduler cares about latencies.
206 bool UnitLatencies
= ForceUnitLatencies();
208 // Ask the target if address-backscheduling is desirable, and if so how much.
209 const TargetSubtargetInfo
&ST
= TM
.getSubtarget
<TargetSubtargetInfo
>();
210 unsigned SpecialAddressLatency
= ST
.getSpecialAddressLatency();
212 // Remove any stale debug info; sometimes BuildSchedGraph is called again
213 // without emitting the info from the previous call.
215 FirstDbgValue
= NULL
;
217 // Model data dependencies between instructions being scheduled and the
219 AddSchedBarrierDeps();
221 for (int i
= 0, e
= TRI
->getNumRegs(); i
!= e
; ++i
) {
222 assert(Defs
[i
].empty() && "Only BuildGraph should push/pop Defs");
225 // Walk the list of instructions, from bottom moving up.
226 MachineInstr
*PrevMI
= NULL
;
227 for (MachineBasicBlock::iterator MII
= InsertPos
, MIE
= Begin
;
229 MachineInstr
*MI
= prior(MII
);
231 DbgValues
.push_back(std::make_pair(PrevMI
, MI
));
235 if (MI
->isDebugValue()) {
240 const MCInstrDesc
&MCID
= MI
->getDesc();
241 assert(!MCID
.isTerminator() && !MI
->isLabel() &&
242 "Cannot schedule terminators or labels!");
243 // Create the SUnit for this MI.
244 SUnit
*SU
= NewSUnit(MI
);
245 SU
->isCall
= MCID
.isCall();
246 SU
->isCommutable
= MCID
.isCommutable();
248 // Assign the Latency field of SU using target-provided information.
254 // Add register-based dependencies (data, anti, and output).
255 for (unsigned j
= 0, n
= MI
->getNumOperands(); j
!= n
; ++j
) {
256 const MachineOperand
&MO
= MI
->getOperand(j
);
257 if (!MO
.isReg()) continue;
258 unsigned Reg
= MO
.getReg();
259 if (Reg
== 0) continue;
261 assert(TRI
->isPhysicalRegister(Reg
) && "Virtual register encountered!");
263 std::vector
<SUnit
*> &UseList
= Uses
[Reg
];
264 // Defs are push in the order they are visited and never reordered.
265 std::vector
<SUnit
*> &DefList
= Defs
[Reg
];
266 // Optionally add output and anti dependencies. For anti
267 // dependencies we use a latency of 0 because for a multi-issue
268 // target we want to allow the defining instruction to issue
269 // in the same cycle as the using instruction.
270 // TODO: Using a latency of 1 here for output dependencies assumes
271 // there's no cost for reusing registers.
272 SDep::Kind Kind
= MO
.isUse() ? SDep::Anti
: SDep::Output
;
273 unsigned AOLatency
= (Kind
== SDep::Anti
) ? 0 : 1;
274 for (unsigned i
= 0, e
= DefList
.size(); i
!= e
; ++i
) {
275 SUnit
*DefSU
= DefList
[i
];
276 if (DefSU
== &ExitSU
)
279 (Kind
!= SDep::Output
|| !MO
.isDead() ||
280 !DefSU
->getInstr()->registerDefIsDead(Reg
)))
281 DefSU
->addPred(SDep(SU
, Kind
, AOLatency
, /*Reg=*/Reg
));
283 for (const unsigned *Alias
= TRI
->getAliasSet(Reg
); *Alias
; ++Alias
) {
284 std::vector
<SUnit
*> &MemDefList
= Defs
[*Alias
];
285 for (unsigned i
= 0, e
= MemDefList
.size(); i
!= e
; ++i
) {
286 SUnit
*DefSU
= MemDefList
[i
];
287 if (DefSU
== &ExitSU
)
290 (Kind
!= SDep::Output
|| !MO
.isDead() ||
291 !DefSU
->getInstr()->registerDefIsDead(*Alias
)))
292 DefSU
->addPred(SDep(SU
, Kind
, AOLatency
, /*Reg=*/ *Alias
));
297 // Add any data dependencies.
298 unsigned DataLatency
= SU
->Latency
;
299 for (unsigned i
= 0, e
= UseList
.size(); i
!= e
; ++i
) {
300 SUnit
*UseSU
= UseList
[i
];
303 unsigned LDataLatency
= DataLatency
;
304 // Optionally add in a special extra latency for nodes that
306 // TODO: Do this for register aliases too.
307 // TODO: Perhaps we should get rid of
308 // SpecialAddressLatency and just move this into
309 // adjustSchedDependency for the targets that care about it.
310 if (SpecialAddressLatency
!= 0 && !UnitLatencies
&&
312 MachineInstr
*UseMI
= UseSU
->getInstr();
313 const MCInstrDesc
&UseMCID
= UseMI
->getDesc();
314 int RegUseIndex
= UseMI
->findRegisterUseOperandIdx(Reg
);
315 assert(RegUseIndex
>= 0 && "UseMI doesn's use register!");
316 if (RegUseIndex
>= 0 &&
317 (UseMCID
.mayLoad() || UseMCID
.mayStore()) &&
318 (unsigned)RegUseIndex
< UseMCID
.getNumOperands() &&
319 UseMCID
.OpInfo
[RegUseIndex
].isLookupPtrRegClass())
320 LDataLatency
+= SpecialAddressLatency
;
322 // Adjust the dependence latency using operand def/use
323 // information (if any), and then allow the target to
324 // perform its own adjustments.
325 const SDep
& dep
= SDep(SU
, SDep::Data
, LDataLatency
, Reg
);
326 if (!UnitLatencies
) {
327 ComputeOperandLatency(SU
, UseSU
, const_cast<SDep
&>(dep
));
328 ST
.adjustSchedDependency(SU
, UseSU
, const_cast<SDep
&>(dep
));
332 for (const unsigned *Alias
= TRI
->getAliasSet(Reg
); *Alias
; ++Alias
) {
333 std::vector
<SUnit
*> &UseList
= Uses
[*Alias
];
334 for (unsigned i
= 0, e
= UseList
.size(); i
!= e
; ++i
) {
335 SUnit
*UseSU
= UseList
[i
];
338 const SDep
& dep
= SDep(SU
, SDep::Data
, DataLatency
, *Alias
);
339 if (!UnitLatencies
) {
340 ComputeOperandLatency(SU
, UseSU
, const_cast<SDep
&>(dep
));
341 ST
.adjustSchedDependency(SU
, UseSU
, const_cast<SDep
&>(dep
));
347 // If a def is going to wrap back around to the top of the loop,
349 if (!UnitLatencies
&& DefList
.empty()) {
350 LoopDependencies::LoopDeps::iterator I
= LoopRegs
.Deps
.find(Reg
);
351 if (I
!= LoopRegs
.Deps
.end()) {
352 const MachineOperand
*UseMO
= I
->second
.first
;
353 unsigned Count
= I
->second
.second
;
354 const MachineInstr
*UseMI
= UseMO
->getParent();
355 unsigned UseMOIdx
= UseMO
- &UseMI
->getOperand(0);
356 const MCInstrDesc
&UseMCID
= UseMI
->getDesc();
357 // TODO: If we knew the total depth of the region here, we could
358 // handle the case where the whole loop is inside the region but
359 // is large enough that the isScheduleHigh trick isn't needed.
360 if (UseMOIdx
< UseMCID
.getNumOperands()) {
361 // Currently, we only support scheduling regions consisting of
362 // single basic blocks. Check to see if the instruction is in
363 // the same region by checking to see if it has the same parent.
364 if (UseMI
->getParent() != MI
->getParent()) {
365 unsigned Latency
= SU
->Latency
;
366 if (UseMCID
.OpInfo
[UseMOIdx
].isLookupPtrRegClass())
367 Latency
+= SpecialAddressLatency
;
368 // This is a wild guess as to the portion of the latency which
369 // will be overlapped by work done outside the current
370 // scheduling region.
371 Latency
-= std::min(Latency
, Count
);
372 // Add the artificial edge.
373 ExitSU
.addPred(SDep(SU
, SDep::Order
, Latency
,
374 /*Reg=*/0, /*isNormalMemory=*/false,
375 /*isMustAlias=*/false,
376 /*isArtificial=*/true));
377 } else if (SpecialAddressLatency
> 0 &&
378 UseMCID
.OpInfo
[UseMOIdx
].isLookupPtrRegClass()) {
379 // The entire loop body is within the current scheduling region
380 // and the latency of this operation is assumed to be greater
381 // than the latency of the loop.
382 // TODO: Recursively mark data-edge predecessors as
383 // isScheduleHigh too.
384 SU
->isScheduleHigh
= true;
387 LoopRegs
.Deps
.erase(I
);
395 // Calls will not be reordered because of chain dependencies (see
396 // below). Since call operands are dead, calls may continue to be added
397 // to the DefList making dependence checking quadratic in the size of
398 // the block. Instead, we leave only one call at the back of the
401 while (!DefList
.empty() && DefList
.back()->isCall
)
404 DefList
.push_back(SU
);
406 UseList
.push_back(SU
);
410 // Add chain dependencies.
411 // Chain dependencies used to enforce memory order should have
412 // latency of 0 (except for true dependency of Store followed by
413 // aliased Load... we estimate that with a single cycle of latency
414 // assuming the hardware will bypass)
415 // Note that isStoreToStackSlot and isLoadFromStackSLot are not usable
416 // after stack slots are lowered to actual addresses.
417 // TODO: Use an AliasAnalysis and do real alias-analysis queries, and
418 // produce more precise dependence information.
419 #define STORE_LOAD_LATENCY 1
420 unsigned TrueMemOrderLatency
= 0;
421 if (MCID
.isCall() || MI
->hasUnmodeledSideEffects() ||
422 (MI
->hasVolatileMemoryRef() &&
423 (!MCID
.mayLoad() || !MI
->isInvariantLoad(AA
)))) {
424 // Be conservative with these and add dependencies on all memory
425 // references, even those that are known to not alias.
426 for (std::map
<const Value
*, SUnit
*>::iterator I
=
427 NonAliasMemDefs
.begin(), E
= NonAliasMemDefs
.end(); I
!= E
; ++I
) {
428 I
->second
->addPred(SDep(SU
, SDep::Order
, /*Latency=*/0));
430 for (std::map
<const Value
*, std::vector
<SUnit
*> >::iterator I
=
431 NonAliasMemUses
.begin(), E
= NonAliasMemUses
.end(); I
!= E
; ++I
) {
432 for (unsigned i
= 0, e
= I
->second
.size(); i
!= e
; ++i
)
433 I
->second
[i
]->addPred(SDep(SU
, SDep::Order
, TrueMemOrderLatency
));
435 NonAliasMemDefs
.clear();
436 NonAliasMemUses
.clear();
437 // Add SU to the barrier chain.
439 BarrierChain
->addPred(SDep(SU
, SDep::Order
, /*Latency=*/0));
444 // Chain all possibly aliasing memory references though SU.
446 AliasChain
->addPred(SDep(SU
, SDep::Order
, /*Latency=*/0));
448 for (unsigned k
= 0, m
= PendingLoads
.size(); k
!= m
; ++k
)
449 PendingLoads
[k
]->addPred(SDep(SU
, SDep::Order
, TrueMemOrderLatency
));
450 for (std::map
<const Value
*, SUnit
*>::iterator I
= AliasMemDefs
.begin(),
451 E
= AliasMemDefs
.end(); I
!= E
; ++I
) {
452 I
->second
->addPred(SDep(SU
, SDep::Order
, /*Latency=*/0));
454 for (std::map
<const Value
*, std::vector
<SUnit
*> >::iterator I
=
455 AliasMemUses
.begin(), E
= AliasMemUses
.end(); I
!= E
; ++I
) {
456 for (unsigned i
= 0, e
= I
->second
.size(); i
!= e
; ++i
)
457 I
->second
[i
]->addPred(SDep(SU
, SDep::Order
, TrueMemOrderLatency
));
459 PendingLoads
.clear();
460 AliasMemDefs
.clear();
461 AliasMemUses
.clear();
462 } else if (MCID
.mayStore()) {
463 bool MayAlias
= true;
464 TrueMemOrderLatency
= STORE_LOAD_LATENCY
;
465 if (const Value
*V
= getUnderlyingObjectForInstr(MI
, MFI
, MayAlias
)) {
466 // A store to a specific PseudoSourceValue. Add precise dependencies.
467 // Record the def in MemDefs, first adding a dep if there is
469 std::map
<const Value
*, SUnit
*>::iterator I
=
470 ((MayAlias
) ? AliasMemDefs
.find(V
) : NonAliasMemDefs
.find(V
));
471 std::map
<const Value
*, SUnit
*>::iterator IE
=
472 ((MayAlias
) ? AliasMemDefs
.end() : NonAliasMemDefs
.end());
474 I
->second
->addPred(SDep(SU
, SDep::Order
, /*Latency=*/0, /*Reg=*/0,
475 /*isNormalMemory=*/true));
479 AliasMemDefs
[V
] = SU
;
481 NonAliasMemDefs
[V
] = SU
;
483 // Handle the uses in MemUses, if there are any.
484 std::map
<const Value
*, std::vector
<SUnit
*> >::iterator J
=
485 ((MayAlias
) ? AliasMemUses
.find(V
) : NonAliasMemUses
.find(V
));
486 std::map
<const Value
*, std::vector
<SUnit
*> >::iterator JE
=
487 ((MayAlias
) ? AliasMemUses
.end() : NonAliasMemUses
.end());
489 for (unsigned i
= 0, e
= J
->second
.size(); i
!= e
; ++i
)
490 J
->second
[i
]->addPred(SDep(SU
, SDep::Order
, TrueMemOrderLatency
,
491 /*Reg=*/0, /*isNormalMemory=*/true));
495 // Add dependencies from all the PendingLoads, i.e. loads
496 // with no underlying object.
497 for (unsigned k
= 0, m
= PendingLoads
.size(); k
!= m
; ++k
)
498 PendingLoads
[k
]->addPred(SDep(SU
, SDep::Order
, TrueMemOrderLatency
));
499 // Add dependence on alias chain, if needed.
501 AliasChain
->addPred(SDep(SU
, SDep::Order
, /*Latency=*/0));
503 // Add dependence on barrier chain, if needed.
505 BarrierChain
->addPred(SDep(SU
, SDep::Order
, /*Latency=*/0));
507 // Treat all other stores conservatively.
508 goto new_alias_chain
;
511 if (!ExitSU
.isPred(SU
))
512 // Push store's up a bit to avoid them getting in between cmp
514 ExitSU
.addPred(SDep(SU
, SDep::Order
, 0,
515 /*Reg=*/0, /*isNormalMemory=*/false,
516 /*isMustAlias=*/false,
517 /*isArtificial=*/true));
518 } else if (MCID
.mayLoad()) {
519 bool MayAlias
= true;
520 TrueMemOrderLatency
= 0;
521 if (MI
->isInvariantLoad(AA
)) {
522 // Invariant load, no chain dependencies needed!
525 getUnderlyingObjectForInstr(MI
, MFI
, MayAlias
)) {
526 // A load from a specific PseudoSourceValue. Add precise dependencies.
527 std::map
<const Value
*, SUnit
*>::iterator I
=
528 ((MayAlias
) ? AliasMemDefs
.find(V
) : NonAliasMemDefs
.find(V
));
529 std::map
<const Value
*, SUnit
*>::iterator IE
=
530 ((MayAlias
) ? AliasMemDefs
.end() : NonAliasMemDefs
.end());
532 I
->second
->addPred(SDep(SU
, SDep::Order
, /*Latency=*/0, /*Reg=*/0,
533 /*isNormalMemory=*/true));
535 AliasMemUses
[V
].push_back(SU
);
537 NonAliasMemUses
[V
].push_back(SU
);
539 // A load with no underlying object. Depend on all
540 // potentially aliasing stores.
541 for (std::map
<const Value
*, SUnit
*>::iterator I
=
542 AliasMemDefs
.begin(), E
= AliasMemDefs
.end(); I
!= E
; ++I
)
543 I
->second
->addPred(SDep(SU
, SDep::Order
, /*Latency=*/0));
545 PendingLoads
.push_back(SU
);
549 // Add dependencies on alias and barrier chains, if needed.
550 if (MayAlias
&& AliasChain
)
551 AliasChain
->addPred(SDep(SU
, SDep::Order
, /*Latency=*/0));
553 BarrierChain
->addPred(SDep(SU
, SDep::Order
, /*Latency=*/0));
558 FirstDbgValue
= PrevMI
;
560 for (int i
= 0, e
= TRI
->getNumRegs(); i
!= e
; ++i
) {
564 PendingLoads
.clear();
567 void ScheduleDAGInstrs::FinishBlock() {
571 void ScheduleDAGInstrs::ComputeLatency(SUnit
*SU
) {
572 // Compute the latency for the node.
573 if (!InstrItins
|| InstrItins
->isEmpty()) {
576 // Simplistic target-independent heuristic: assume that loads take
578 if (SU
->getInstr()->getDesc().mayLoad())
581 SU
->Latency
= TII
->getInstrLatency(InstrItins
, SU
->getInstr());
585 void ScheduleDAGInstrs::ComputeOperandLatency(SUnit
*Def
, SUnit
*Use
,
587 if (!InstrItins
|| InstrItins
->isEmpty())
590 // For a data dependency with a known register...
591 if ((dep
.getKind() != SDep::Data
) || (dep
.getReg() == 0))
594 const unsigned Reg
= dep
.getReg();
596 // ... find the definition of the register in the defining
598 MachineInstr
*DefMI
= Def
->getInstr();
599 int DefIdx
= DefMI
->findRegisterDefOperandIdx(Reg
);
601 const MachineOperand
&MO
= DefMI
->getOperand(DefIdx
);
602 if (MO
.isReg() && MO
.isImplicit() &&
603 DefIdx
>= (int)DefMI
->getDesc().getNumOperands()) {
604 // This is an implicit def, getOperandLatency() won't return the correct
606 // %D6<def>, %D7<def> = VLD1q16 %R2<kill>, 0, ..., %Q3<imp-def>
607 // %Q1<def> = VMULv8i16 %Q1<kill>, %Q3<kill>, ...
608 // What we want is to compute latency between def of %D6/%D7 and use of
610 DefIdx
= DefMI
->findRegisterDefOperandIdx(Reg
, false, true, TRI
);
612 MachineInstr
*UseMI
= Use
->getInstr();
613 // For all uses of the register, calculate the maxmimum latency
616 for (unsigned i
= 0, e
= UseMI
->getNumOperands(); i
!= e
; ++i
) {
617 const MachineOperand
&MO
= UseMI
->getOperand(i
);
618 if (!MO
.isReg() || !MO
.isUse())
620 unsigned MOReg
= MO
.getReg();
624 int UseCycle
= TII
->getOperandLatency(InstrItins
, DefMI
, DefIdx
,
626 Latency
= std::max(Latency
, UseCycle
);
629 // UseMI is null, then it must be a scheduling barrier.
630 if (!InstrItins
|| InstrItins
->isEmpty())
632 unsigned DefClass
= DefMI
->getDesc().getSchedClass();
633 Latency
= InstrItins
->getOperandCycle(DefClass
, DefIdx
);
636 // If we found a latency, then replace the existing dependence latency.
638 dep
.setLatency(Latency
);
642 void ScheduleDAGInstrs::dumpNode(const SUnit
*SU
) const {
643 SU
->getInstr()->dump();
646 std::string
ScheduleDAGInstrs::getGraphNodeLabel(const SUnit
*SU
) const {
648 raw_string_ostream
oss(s
);
651 else if (SU
== &ExitSU
)
654 SU
->getInstr()->print(oss
);
658 // EmitSchedule - Emit the machine code in scheduled order.
659 MachineBasicBlock
*ScheduleDAGInstrs::EmitSchedule() {
660 // For MachineInstr-based scheduling, we're rescheduling the instructions in
661 // the block, so start by removing them from the block.
662 while (Begin
!= InsertPos
) {
663 MachineBasicBlock::iterator I
= Begin
;
668 // If first instruction was a DBG_VALUE then put it back.
670 BB
->insert(InsertPos
, FirstDbgValue
);
672 // Then re-insert them according to the given schedule.
673 for (unsigned i
= 0, e
= Sequence
.size(); i
!= e
; i
++) {
674 if (SUnit
*SU
= Sequence
[i
])
675 BB
->insert(InsertPos
, SU
->getInstr());
677 // Null SUnit* is a noop.
681 // Update the Begin iterator, as the first instruction in the block
682 // may have been scheduled later.
683 if (!Sequence
.empty())
684 Begin
= Sequence
[0]->getInstr();
686 // Reinsert any remaining debug_values.
687 for (std::vector
<std::pair
<MachineInstr
*, MachineInstr
*> >::iterator
688 DI
= DbgValues
.end(), DE
= DbgValues
.begin(); DI
!= DE
; --DI
) {
689 std::pair
<MachineInstr
*, MachineInstr
*> P
= *prior(DI
);
690 MachineInstr
*DbgValue
= P
.first
;
691 MachineInstr
*OrigPrivMI
= P
.second
;
692 BB
->insertAfter(OrigPrivMI
, DbgValue
);
695 FirstDbgValue
= NULL
;