1 //===---- ScheduleDAGInstrs.cpp - MachineInstr Rescheduling ---------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This implements the ScheduleDAGInstrs class, which implements re-scheduling
13 //===----------------------------------------------------------------------===//
15 #define DEBUG_TYPE "sched-instrs"
16 #include "ScheduleDAGInstrs.h"
17 #include "llvm/Operator.h"
18 #include "llvm/Analysis/AliasAnalysis.h"
19 #include "llvm/Analysis/ValueTracking.h"
20 #include "llvm/CodeGen/MachineFunctionPass.h"
21 #include "llvm/CodeGen/MachineMemOperand.h"
22 #include "llvm/CodeGen/MachineRegisterInfo.h"
23 #include "llvm/CodeGen/PseudoSourceValue.h"
24 #include "llvm/Target/TargetMachine.h"
25 #include "llvm/Target/TargetInstrInfo.h"
26 #include "llvm/Target/TargetRegisterInfo.h"
27 #include "llvm/Target/TargetSubtarget.h"
28 #include "llvm/Support/Debug.h"
29 #include "llvm/Support/raw_ostream.h"
30 #include "llvm/ADT/SmallSet.h"
33 ScheduleDAGInstrs::ScheduleDAGInstrs(MachineFunction
&mf
,
34 const MachineLoopInfo
&mli
,
35 const MachineDominatorTree
&mdt
)
36 : ScheduleDAG(mf
), MLI(mli
), MDT(mdt
), MFI(mf
.getFrameInfo()),
37 InstrItins(mf
.getTarget().getInstrItineraryData()),
38 Defs(TRI
->getNumRegs()), Uses(TRI
->getNumRegs()), LoopRegs(MLI
, MDT
) {
42 /// Run - perform scheduling.
44 void ScheduleDAGInstrs::Run(MachineBasicBlock
*bb
,
45 MachineBasicBlock::iterator begin
,
46 MachineBasicBlock::iterator end
,
50 InsertPosIndex
= endcount
;
52 ScheduleDAG::Run(bb
, end
);
55 /// getUnderlyingObjectFromInt - This is the function that does the work of
56 /// looking through basic ptrtoint+arithmetic+inttoptr sequences.
57 static const Value
*getUnderlyingObjectFromInt(const Value
*V
) {
59 if (const Operator
*U
= dyn_cast
<Operator
>(V
)) {
60 // If we find a ptrtoint, we can transfer control back to the
61 // regular getUnderlyingObjectFromInt.
62 if (U
->getOpcode() == Instruction::PtrToInt
)
63 return U
->getOperand(0);
64 // If we find an add of a constant or a multiplied value, it's
65 // likely that the other operand will lead us to the base
66 // object. We don't have to worry about the case where the
67 // object address is somehow being computed by the multiply,
68 // because our callers only care when the result is an
69 // identifibale object.
70 if (U
->getOpcode() != Instruction::Add
||
71 (!isa
<ConstantInt
>(U
->getOperand(1)) &&
72 Operator::getOpcode(U
->getOperand(1)) != Instruction::Mul
))
78 assert(V
->getType()->isIntegerTy() && "Unexpected operand type!");
82 /// getUnderlyingObject - This is a wrapper around GetUnderlyingObject
83 /// and adds support for basic ptrtoint+arithmetic+inttoptr sequences.
84 static const Value
*getUnderlyingObject(const Value
*V
) {
85 // First just call Value::getUnderlyingObject to let it do what it does.
87 V
= GetUnderlyingObject(V
);
88 // If it found an inttoptr, use special code to continue climing.
89 if (Operator::getOpcode(V
) != Instruction::IntToPtr
)
91 const Value
*O
= getUnderlyingObjectFromInt(cast
<User
>(V
)->getOperand(0));
92 // If that succeeded in finding a pointer, continue the search.
93 if (!O
->getType()->isPointerTy())
100 /// getUnderlyingObjectForInstr - If this machine instr has memory reference
101 /// information and it can be tracked to a normal reference to a known
102 /// object, return the Value for that object. Otherwise return null.
103 static const Value
*getUnderlyingObjectForInstr(const MachineInstr
*MI
,
104 const MachineFrameInfo
*MFI
,
107 if (!MI
->hasOneMemOperand() ||
108 !(*MI
->memoperands_begin())->getValue() ||
109 (*MI
->memoperands_begin())->isVolatile())
112 const Value
*V
= (*MI
->memoperands_begin())->getValue();
116 V
= getUnderlyingObject(V
);
117 if (const PseudoSourceValue
*PSV
= dyn_cast
<PseudoSourceValue
>(V
)) {
118 // For now, ignore PseudoSourceValues which may alias LLVM IR values
119 // because the code that uses this function has no way to cope with
121 if (PSV
->isAliased(MFI
))
124 MayAlias
= PSV
->mayAlias(MFI
);
128 if (isIdentifiedObject(V
))
134 void ScheduleDAGInstrs::StartBlock(MachineBasicBlock
*BB
) {
135 if (MachineLoop
*ML
= MLI
.getLoopFor(BB
))
136 if (BB
== ML
->getLoopLatch()) {
137 MachineBasicBlock
*Header
= ML
->getHeader();
138 for (MachineBasicBlock::livein_iterator I
= Header
->livein_begin(),
139 E
= Header
->livein_end(); I
!= E
; ++I
)
140 LoopLiveInRegs
.insert(*I
);
141 LoopRegs
.VisitLoop(ML
);
145 /// AddSchedBarrierDeps - Add dependencies from instructions in the current
146 /// list of instructions being scheduled to scheduling barrier by adding
147 /// the exit SU to the register defs and use list. This is because we want to
148 /// make sure instructions which define registers that are either used by
149 /// the terminator or are live-out are properly scheduled. This is
150 /// especially important when the definition latency of the return value(s)
151 /// are too high to be hidden by the branch or when the liveout registers
152 /// used by instructions in the fallthrough block.
153 void ScheduleDAGInstrs::AddSchedBarrierDeps() {
154 MachineInstr
*ExitMI
= InsertPos
!= BB
->end() ? &*InsertPos
: 0;
155 ExitSU
.setInstr(ExitMI
);
156 bool AllDepKnown
= ExitMI
&&
157 (ExitMI
->getDesc().isCall() || ExitMI
->getDesc().isBarrier());
158 if (ExitMI
&& AllDepKnown
) {
159 // If it's a call or a barrier, add dependencies on the defs and uses of
161 for (unsigned i
= 0, e
= ExitMI
->getNumOperands(); i
!= e
; ++i
) {
162 const MachineOperand
&MO
= ExitMI
->getOperand(i
);
163 if (!MO
.isReg() || MO
.isDef()) continue;
164 unsigned Reg
= MO
.getReg();
165 if (Reg
== 0) continue;
167 assert(TRI
->isPhysicalRegister(Reg
) && "Virtual register encountered!");
168 Uses
[Reg
].push_back(&ExitSU
);
171 // For others, e.g. fallthrough, conditional branch, assume the exit
172 // uses all the registers that are livein to the successor blocks.
173 SmallSet
<unsigned, 8> Seen
;
174 for (MachineBasicBlock::succ_iterator SI
= BB
->succ_begin(),
175 SE
= BB
->succ_end(); SI
!= SE
; ++SI
)
176 for (MachineBasicBlock::livein_iterator I
= (*SI
)->livein_begin(),
177 E
= (*SI
)->livein_end(); I
!= E
; ++I
) {
179 if (Seen
.insert(Reg
))
180 Uses
[Reg
].push_back(&ExitSU
);
185 void ScheduleDAGInstrs::BuildSchedGraph(AliasAnalysis
*AA
) {
186 // We'll be allocating one SUnit for each instruction, plus one for
187 // the region exit node.
188 SUnits
.reserve(BB
->size());
190 // We build scheduling units by walking a block's instruction list from bottom
193 // Remember where a generic side-effecting instruction is as we procede.
194 SUnit
*BarrierChain
= 0, *AliasChain
= 0;
196 // Memory references to specific known memory locations are tracked
197 // so that they can be given more precise dependencies. We track
198 // separately the known memory locations that may alias and those
199 // that are known not to alias
200 std::map
<const Value
*, SUnit
*> AliasMemDefs
, NonAliasMemDefs
;
201 std::map
<const Value
*, std::vector
<SUnit
*> > AliasMemUses
, NonAliasMemUses
;
203 // Keep track of dangling debug references to registers.
204 std::vector
<std::pair
<MachineInstr
*, unsigned> >
205 DanglingDebugValue(TRI
->getNumRegs(),
206 std::make_pair(static_cast<MachineInstr
*>(0), 0));
208 // Check to see if the scheduler cares about latencies.
209 bool UnitLatencies
= ForceUnitLatencies();
211 // Ask the target if address-backscheduling is desirable, and if so how much.
212 const TargetSubtarget
&ST
= TM
.getSubtarget
<TargetSubtarget
>();
213 unsigned SpecialAddressLatency
= ST
.getSpecialAddressLatency();
215 // Remove any stale debug info; sometimes BuildSchedGraph is called again
216 // without emitting the info from the previous call.
219 // Model data dependencies between instructions being scheduled and the
221 AddSchedBarrierDeps();
223 // Walk the list of instructions, from bottom moving up.
224 for (MachineBasicBlock::iterator MII
= InsertPos
, MIE
= Begin
;
226 MachineInstr
*MI
= prior(MII
);
227 // DBG_VALUE does not have SUnit's built, so just remember these for later
229 if (MI
->isDebugValue()) {
230 if (MI
->getNumOperands()==3 && MI
->getOperand(0).isReg() &&
231 MI
->getOperand(0).getReg())
232 DanglingDebugValue
[MI
->getOperand(0).getReg()] =
233 std::make_pair(MI
, DbgValueVec
.size());
234 DbgValueVec
.push_back(MI
);
237 const TargetInstrDesc
&TID
= MI
->getDesc();
238 assert(!TID
.isTerminator() && !MI
->isLabel() &&
239 "Cannot schedule terminators or labels!");
240 // Create the SUnit for this MI.
241 SUnit
*SU
= NewSUnit(MI
);
242 SU
->isCall
= TID
.isCall();
243 SU
->isCommutable
= TID
.isCommutable();
245 // Assign the Latency field of SU using target-provided information.
251 // Add register-based dependencies (data, anti, and output).
252 for (unsigned j
= 0, n
= MI
->getNumOperands(); j
!= n
; ++j
) {
253 const MachineOperand
&MO
= MI
->getOperand(j
);
254 if (!MO
.isReg()) continue;
255 unsigned Reg
= MO
.getReg();
256 if (Reg
== 0) continue;
258 assert(TRI
->isPhysicalRegister(Reg
) && "Virtual register encountered!");
260 if (MO
.isDef() && DanglingDebugValue
[Reg
].first
!=0) {
261 SU
->DbgInstrList
.push_back(DanglingDebugValue
[Reg
].first
);
262 DbgValueVec
[DanglingDebugValue
[Reg
].second
] = 0;
263 DanglingDebugValue
[Reg
] = std::make_pair((MachineInstr
*)0, 0);
266 std::vector
<SUnit
*> &UseList
= Uses
[Reg
];
267 std::vector
<SUnit
*> &DefList
= Defs
[Reg
];
268 // Optionally add output and anti dependencies. For anti
269 // dependencies we use a latency of 0 because for a multi-issue
270 // target we want to allow the defining instruction to issue
271 // in the same cycle as the using instruction.
272 // TODO: Using a latency of 1 here for output dependencies assumes
273 // there's no cost for reusing registers.
274 SDep::Kind Kind
= MO
.isUse() ? SDep::Anti
: SDep::Output
;
275 unsigned AOLatency
= (Kind
== SDep::Anti
) ? 0 : 1;
276 for (unsigned i
= 0, e
= DefList
.size(); i
!= e
; ++i
) {
277 SUnit
*DefSU
= DefList
[i
];
278 if (DefSU
== &ExitSU
)
281 (Kind
!= SDep::Output
|| !MO
.isDead() ||
282 !DefSU
->getInstr()->registerDefIsDead(Reg
)))
283 DefSU
->addPred(SDep(SU
, Kind
, AOLatency
, /*Reg=*/Reg
));
285 for (const unsigned *Alias
= TRI
->getAliasSet(Reg
); *Alias
; ++Alias
) {
286 std::vector
<SUnit
*> &DefList
= Defs
[*Alias
];
287 for (unsigned i
= 0, e
= DefList
.size(); i
!= e
; ++i
) {
288 SUnit
*DefSU
= DefList
[i
];
289 if (DefSU
== &ExitSU
)
292 (Kind
!= SDep::Output
|| !MO
.isDead() ||
293 !DefSU
->getInstr()->registerDefIsDead(*Alias
)))
294 DefSU
->addPred(SDep(SU
, Kind
, AOLatency
, /*Reg=*/ *Alias
));
299 // Add any data dependencies.
300 unsigned DataLatency
= SU
->Latency
;
301 for (unsigned i
= 0, e
= UseList
.size(); i
!= e
; ++i
) {
302 SUnit
*UseSU
= UseList
[i
];
305 unsigned LDataLatency
= DataLatency
;
306 // Optionally add in a special extra latency for nodes that
308 // TODO: Do this for register aliases too.
309 // TODO: Perhaps we should get rid of
310 // SpecialAddressLatency and just move this into
311 // adjustSchedDependency for the targets that care about it.
312 if (SpecialAddressLatency
!= 0 && !UnitLatencies
&&
314 MachineInstr
*UseMI
= UseSU
->getInstr();
315 const TargetInstrDesc
&UseTID
= UseMI
->getDesc();
316 int RegUseIndex
= UseMI
->findRegisterUseOperandIdx(Reg
);
317 assert(RegUseIndex
>= 0 && "UseMI doesn's use register!");
318 if (RegUseIndex
>= 0 &&
319 (UseTID
.mayLoad() || UseTID
.mayStore()) &&
320 (unsigned)RegUseIndex
< UseTID
.getNumOperands() &&
321 UseTID
.OpInfo
[RegUseIndex
].isLookupPtrRegClass())
322 LDataLatency
+= SpecialAddressLatency
;
324 // Adjust the dependence latency using operand def/use
325 // information (if any), and then allow the target to
326 // perform its own adjustments.
327 const SDep
& dep
= SDep(SU
, SDep::Data
, LDataLatency
, Reg
);
328 if (!UnitLatencies
) {
329 ComputeOperandLatency(SU
, UseSU
, const_cast<SDep
&>(dep
));
330 ST
.adjustSchedDependency(SU
, UseSU
, const_cast<SDep
&>(dep
));
334 for (const unsigned *Alias
= TRI
->getAliasSet(Reg
); *Alias
; ++Alias
) {
335 std::vector
<SUnit
*> &UseList
= Uses
[*Alias
];
336 for (unsigned i
= 0, e
= UseList
.size(); i
!= e
; ++i
) {
337 SUnit
*UseSU
= UseList
[i
];
340 const SDep
& dep
= SDep(SU
, SDep::Data
, DataLatency
, *Alias
);
341 if (!UnitLatencies
) {
342 ComputeOperandLatency(SU
, UseSU
, const_cast<SDep
&>(dep
));
343 ST
.adjustSchedDependency(SU
, UseSU
, const_cast<SDep
&>(dep
));
349 // If a def is going to wrap back around to the top of the loop,
351 if (!UnitLatencies
&& DefList
.empty()) {
352 LoopDependencies::LoopDeps::iterator I
= LoopRegs
.Deps
.find(Reg
);
353 if (I
!= LoopRegs
.Deps
.end()) {
354 const MachineOperand
*UseMO
= I
->second
.first
;
355 unsigned Count
= I
->second
.second
;
356 const MachineInstr
*UseMI
= UseMO
->getParent();
357 unsigned UseMOIdx
= UseMO
- &UseMI
->getOperand(0);
358 const TargetInstrDesc
&UseTID
= UseMI
->getDesc();
359 // TODO: If we knew the total depth of the region here, we could
360 // handle the case where the whole loop is inside the region but
361 // is large enough that the isScheduleHigh trick isn't needed.
362 if (UseMOIdx
< UseTID
.getNumOperands()) {
363 // Currently, we only support scheduling regions consisting of
364 // single basic blocks. Check to see if the instruction is in
365 // the same region by checking to see if it has the same parent.
366 if (UseMI
->getParent() != MI
->getParent()) {
367 unsigned Latency
= SU
->Latency
;
368 if (UseTID
.OpInfo
[UseMOIdx
].isLookupPtrRegClass())
369 Latency
+= SpecialAddressLatency
;
370 // This is a wild guess as to the portion of the latency which
371 // will be overlapped by work done outside the current
372 // scheduling region.
373 Latency
-= std::min(Latency
, Count
);
374 // Add the artificial edge.
375 ExitSU
.addPred(SDep(SU
, SDep::Order
, Latency
,
376 /*Reg=*/0, /*isNormalMemory=*/false,
377 /*isMustAlias=*/false,
378 /*isArtificial=*/true));
379 } else if (SpecialAddressLatency
> 0 &&
380 UseTID
.OpInfo
[UseMOIdx
].isLookupPtrRegClass()) {
381 // The entire loop body is within the current scheduling region
382 // and the latency of this operation is assumed to be greater
383 // than the latency of the loop.
384 // TODO: Recursively mark data-edge predecessors as
385 // isScheduleHigh too.
386 SU
->isScheduleHigh
= true;
389 LoopRegs
.Deps
.erase(I
);
396 DefList
.push_back(SU
);
398 UseList
.push_back(SU
);
402 // Add chain dependencies.
403 // Chain dependencies used to enforce memory order should have
404 // latency of 0 (except for true dependency of Store followed by
405 // aliased Load... we estimate that with a single cycle of latency
406 // assuming the hardware will bypass)
407 // Note that isStoreToStackSlot and isLoadFromStackSLot are not usable
408 // after stack slots are lowered to actual addresses.
409 // TODO: Use an AliasAnalysis and do real alias-analysis queries, and
410 // produce more precise dependence information.
411 #define STORE_LOAD_LATENCY 1
412 unsigned TrueMemOrderLatency
= 0;
413 if (TID
.isCall() || MI
->hasUnmodeledSideEffects() ||
414 (MI
->hasVolatileMemoryRef() &&
415 (!TID
.mayLoad() || !MI
->isInvariantLoad(AA
)))) {
416 // Be conservative with these and add dependencies on all memory
417 // references, even those that are known to not alias.
418 for (std::map
<const Value
*, SUnit
*>::iterator I
=
419 NonAliasMemDefs
.begin(), E
= NonAliasMemDefs
.end(); I
!= E
; ++I
) {
420 I
->second
->addPred(SDep(SU
, SDep::Order
, /*Latency=*/0));
422 for (std::map
<const Value
*, std::vector
<SUnit
*> >::iterator I
=
423 NonAliasMemUses
.begin(), E
= NonAliasMemUses
.end(); I
!= E
; ++I
) {
424 for (unsigned i
= 0, e
= I
->second
.size(); i
!= e
; ++i
)
425 I
->second
[i
]->addPred(SDep(SU
, SDep::Order
, TrueMemOrderLatency
));
427 NonAliasMemDefs
.clear();
428 NonAliasMemUses
.clear();
429 // Add SU to the barrier chain.
431 BarrierChain
->addPred(SDep(SU
, SDep::Order
, /*Latency=*/0));
436 // Chain all possibly aliasing memory references though SU.
438 AliasChain
->addPred(SDep(SU
, SDep::Order
, /*Latency=*/0));
440 for (unsigned k
= 0, m
= PendingLoads
.size(); k
!= m
; ++k
)
441 PendingLoads
[k
]->addPred(SDep(SU
, SDep::Order
, TrueMemOrderLatency
));
442 for (std::map
<const Value
*, SUnit
*>::iterator I
= AliasMemDefs
.begin(),
443 E
= AliasMemDefs
.end(); I
!= E
; ++I
) {
444 I
->second
->addPred(SDep(SU
, SDep::Order
, /*Latency=*/0));
446 for (std::map
<const Value
*, std::vector
<SUnit
*> >::iterator I
=
447 AliasMemUses
.begin(), E
= AliasMemUses
.end(); I
!= E
; ++I
) {
448 for (unsigned i
= 0, e
= I
->second
.size(); i
!= e
; ++i
)
449 I
->second
[i
]->addPred(SDep(SU
, SDep::Order
, TrueMemOrderLatency
));
451 PendingLoads
.clear();
452 AliasMemDefs
.clear();
453 AliasMemUses
.clear();
454 } else if (TID
.mayStore()) {
455 bool MayAlias
= true;
456 TrueMemOrderLatency
= STORE_LOAD_LATENCY
;
457 if (const Value
*V
= getUnderlyingObjectForInstr(MI
, MFI
, MayAlias
)) {
458 // A store to a specific PseudoSourceValue. Add precise dependencies.
459 // Record the def in MemDefs, first adding a dep if there is
461 std::map
<const Value
*, SUnit
*>::iterator I
=
462 ((MayAlias
) ? AliasMemDefs
.find(V
) : NonAliasMemDefs
.find(V
));
463 std::map
<const Value
*, SUnit
*>::iterator IE
=
464 ((MayAlias
) ? AliasMemDefs
.end() : NonAliasMemDefs
.end());
466 I
->second
->addPred(SDep(SU
, SDep::Order
, /*Latency=*/0, /*Reg=*/0,
467 /*isNormalMemory=*/true));
471 AliasMemDefs
[V
] = SU
;
473 NonAliasMemDefs
[V
] = SU
;
475 // Handle the uses in MemUses, if there are any.
476 std::map
<const Value
*, std::vector
<SUnit
*> >::iterator J
=
477 ((MayAlias
) ? AliasMemUses
.find(V
) : NonAliasMemUses
.find(V
));
478 std::map
<const Value
*, std::vector
<SUnit
*> >::iterator JE
=
479 ((MayAlias
) ? AliasMemUses
.end() : NonAliasMemUses
.end());
481 for (unsigned i
= 0, e
= J
->second
.size(); i
!= e
; ++i
)
482 J
->second
[i
]->addPred(SDep(SU
, SDep::Order
, TrueMemOrderLatency
,
483 /*Reg=*/0, /*isNormalMemory=*/true));
487 // Add dependencies from all the PendingLoads, i.e. loads
488 // with no underlying object.
489 for (unsigned k
= 0, m
= PendingLoads
.size(); k
!= m
; ++k
)
490 PendingLoads
[k
]->addPred(SDep(SU
, SDep::Order
, TrueMemOrderLatency
));
491 // Add dependence on alias chain, if needed.
493 AliasChain
->addPred(SDep(SU
, SDep::Order
, /*Latency=*/0));
495 // Add dependence on barrier chain, if needed.
497 BarrierChain
->addPred(SDep(SU
, SDep::Order
, /*Latency=*/0));
499 // Treat all other stores conservatively.
500 goto new_alias_chain
;
503 if (!ExitSU
.isPred(SU
))
504 // Push store's up a bit to avoid them getting in between cmp
506 ExitSU
.addPred(SDep(SU
, SDep::Order
, 0,
507 /*Reg=*/0, /*isNormalMemory=*/false,
508 /*isMustAlias=*/false,
509 /*isArtificial=*/true));
510 } else if (TID
.mayLoad()) {
511 bool MayAlias
= true;
512 TrueMemOrderLatency
= 0;
513 if (MI
->isInvariantLoad(AA
)) {
514 // Invariant load, no chain dependencies needed!
517 getUnderlyingObjectForInstr(MI
, MFI
, MayAlias
)) {
518 // A load from a specific PseudoSourceValue. Add precise dependencies.
519 std::map
<const Value
*, SUnit
*>::iterator I
=
520 ((MayAlias
) ? AliasMemDefs
.find(V
) : NonAliasMemDefs
.find(V
));
521 std::map
<const Value
*, SUnit
*>::iterator IE
=
522 ((MayAlias
) ? AliasMemDefs
.end() : NonAliasMemDefs
.end());
524 I
->second
->addPred(SDep(SU
, SDep::Order
, /*Latency=*/0, /*Reg=*/0,
525 /*isNormalMemory=*/true));
527 AliasMemUses
[V
].push_back(SU
);
529 NonAliasMemUses
[V
].push_back(SU
);
531 // A load with no underlying object. Depend on all
532 // potentially aliasing stores.
533 for (std::map
<const Value
*, SUnit
*>::iterator I
=
534 AliasMemDefs
.begin(), E
= AliasMemDefs
.end(); I
!= E
; ++I
)
535 I
->second
->addPred(SDep(SU
, SDep::Order
, /*Latency=*/0));
537 PendingLoads
.push_back(SU
);
541 // Add dependencies on alias and barrier chains, if needed.
542 if (MayAlias
&& AliasChain
)
543 AliasChain
->addPred(SDep(SU
, SDep::Order
, /*Latency=*/0));
545 BarrierChain
->addPred(SDep(SU
, SDep::Order
, /*Latency=*/0));
550 for (int i
= 0, e
= TRI
->getNumRegs(); i
!= e
; ++i
) {
554 PendingLoads
.clear();
557 void ScheduleDAGInstrs::FinishBlock() {
561 void ScheduleDAGInstrs::ComputeLatency(SUnit
*SU
) {
562 // Compute the latency for the node.
563 if (!InstrItins
|| InstrItins
->isEmpty()) {
566 // Simplistic target-independent heuristic: assume that loads take
568 if (SU
->getInstr()->getDesc().mayLoad())
571 SU
->Latency
= TII
->getInstrLatency(InstrItins
, SU
->getInstr());
575 void ScheduleDAGInstrs::ComputeOperandLatency(SUnit
*Def
, SUnit
*Use
,
577 if (!InstrItins
|| InstrItins
->isEmpty())
580 // For a data dependency with a known register...
581 if ((dep
.getKind() != SDep::Data
) || (dep
.getReg() == 0))
584 const unsigned Reg
= dep
.getReg();
586 // ... find the definition of the register in the defining
588 MachineInstr
*DefMI
= Def
->getInstr();
589 int DefIdx
= DefMI
->findRegisterDefOperandIdx(Reg
);
591 const MachineOperand
&MO
= DefMI
->getOperand(DefIdx
);
592 if (MO
.isReg() && MO
.isImplicit() &&
593 DefIdx
>= (int)DefMI
->getDesc().getNumOperands()) {
594 // This is an implicit def, getOperandLatency() won't return the correct
596 // %D6<def>, %D7<def> = VLD1q16 %R2<kill>, 0, ..., %Q3<imp-def>
597 // %Q1<def> = VMULv8i16 %Q1<kill>, %Q3<kill>, ...
598 // What we want is to compute latency between def of %D6/%D7 and use of
600 DefIdx
= DefMI
->findRegisterDefOperandIdx(Reg
, false, true, TRI
);
602 MachineInstr
*UseMI
= Use
->getInstr();
603 // For all uses of the register, calculate the maxmimum latency
606 for (unsigned i
= 0, e
= UseMI
->getNumOperands(); i
!= e
; ++i
) {
607 const MachineOperand
&MO
= UseMI
->getOperand(i
);
608 if (!MO
.isReg() || !MO
.isUse())
610 unsigned MOReg
= MO
.getReg();
614 int UseCycle
= TII
->getOperandLatency(InstrItins
, DefMI
, DefIdx
,
616 Latency
= std::max(Latency
, UseCycle
);
619 // UseMI is null, then it must be a scheduling barrier.
620 if (!InstrItins
|| InstrItins
->isEmpty())
622 unsigned DefClass
= DefMI
->getDesc().getSchedClass();
623 Latency
= InstrItins
->getOperandCycle(DefClass
, DefIdx
);
626 // If we found a latency, then replace the existing dependence latency.
628 dep
.setLatency(Latency
);
632 void ScheduleDAGInstrs::dumpNode(const SUnit
*SU
) const {
633 SU
->getInstr()->dump();
636 std::string
ScheduleDAGInstrs::getGraphNodeLabel(const SUnit
*SU
) const {
638 raw_string_ostream
oss(s
);
641 else if (SU
== &ExitSU
)
644 SU
->getInstr()->print(oss
);
648 // EmitSchedule - Emit the machine code in scheduled order.
649 MachineBasicBlock
*ScheduleDAGInstrs::EmitSchedule() {
650 // For MachineInstr-based scheduling, we're rescheduling the instructions in
651 // the block, so start by removing them from the block.
652 while (Begin
!= InsertPos
) {
653 MachineBasicBlock::iterator I
= Begin
;
658 // First reinsert any remaining debug_values; these are either constants,
659 // or refer to live-in registers. The beginning of the block is the right
660 // place for the latter. The former might reasonably be placed elsewhere
661 // using some kind of ordering algorithm, but right now it doesn't matter.
662 for (int i
= DbgValueVec
.size()-1; i
>=0; --i
)
664 BB
->insert(InsertPos
, DbgValueVec
[i
]);
666 // Then re-insert them according to the given schedule.
667 for (unsigned i
= 0, e
= Sequence
.size(); i
!= e
; i
++) {
668 SUnit
*SU
= Sequence
[i
];
670 // Null SUnit* is a noop.
675 BB
->insert(InsertPos
, SU
->getInstr());
676 for (unsigned i
= 0, e
= SU
->DbgInstrList
.size() ; i
< e
; ++i
)
677 BB
->insert(InsertPos
, SU
->DbgInstrList
[i
]);
680 // Update the Begin iterator, as the first instruction in the block
681 // may have been scheduled later.
682 if (!DbgValueVec
.empty()) {
683 for (int i
= DbgValueVec
.size()-1; i
>=0; --i
)
684 if (DbgValueVec
[i
]!=0) {
685 Begin
= DbgValueVec
[DbgValueVec
.size()-1];
688 } else if (!Sequence
.empty())
689 Begin
= Sequence
[0]->getInstr();