1 //===---- ScheduleDAGInstrs.cpp - MachineInstr Rescheduling ---------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This implements the ScheduleDAGInstrs class, which implements re-scheduling
13 //===----------------------------------------------------------------------===//
15 #define DEBUG_TYPE "sched-instrs"
16 #include "ScheduleDAGInstrs.h"
17 #include "llvm/Operator.h"
18 #include "llvm/Analysis/AliasAnalysis.h"
19 #include "llvm/CodeGen/MachineFunctionPass.h"
20 #include "llvm/CodeGen/MachineMemOperand.h"
21 #include "llvm/CodeGen/MachineRegisterInfo.h"
22 #include "llvm/CodeGen/PseudoSourceValue.h"
23 #include "llvm/Target/TargetMachine.h"
24 #include "llvm/Target/TargetInstrInfo.h"
25 #include "llvm/Target/TargetRegisterInfo.h"
26 #include "llvm/Target/TargetSubtarget.h"
27 #include "llvm/Support/Debug.h"
28 #include "llvm/Support/raw_ostream.h"
29 #include "llvm/ADT/SmallSet.h"
32 ScheduleDAGInstrs::ScheduleDAGInstrs(MachineFunction
&mf
,
33 const MachineLoopInfo
&mli
,
34 const MachineDominatorTree
&mdt
)
35 : ScheduleDAG(mf
), MLI(mli
), MDT(mdt
), MFI(mf
.getFrameInfo()),
36 InstrItins(mf
.getTarget().getInstrItineraryData()),
37 Defs(TRI
->getNumRegs()), Uses(TRI
->getNumRegs()), LoopRegs(MLI
, MDT
) {
41 /// Run - perform scheduling.
43 void ScheduleDAGInstrs::Run(MachineBasicBlock
*bb
,
44 MachineBasicBlock::iterator begin
,
45 MachineBasicBlock::iterator end
,
49 InsertPosIndex
= endcount
;
51 ScheduleDAG::Run(bb
, end
);
54 /// getUnderlyingObjectFromInt - This is the function that does the work of
55 /// looking through basic ptrtoint+arithmetic+inttoptr sequences.
56 static const Value
*getUnderlyingObjectFromInt(const Value
*V
) {
58 if (const Operator
*U
= dyn_cast
<Operator
>(V
)) {
59 // If we find a ptrtoint, we can transfer control back to the
60 // regular getUnderlyingObjectFromInt.
61 if (U
->getOpcode() == Instruction::PtrToInt
)
62 return U
->getOperand(0);
63 // If we find an add of a constant or a multiplied value, it's
64 // likely that the other operand will lead us to the base
65 // object. We don't have to worry about the case where the
66 // object address is somehow being computed by the multiply,
67 // because our callers only care when the result is an
68 // identifibale object.
69 if (U
->getOpcode() != Instruction::Add
||
70 (!isa
<ConstantInt
>(U
->getOperand(1)) &&
71 Operator::getOpcode(U
->getOperand(1)) != Instruction::Mul
))
77 assert(V
->getType()->isIntegerTy() && "Unexpected operand type!");
81 /// getUnderlyingObject - This is a wrapper around Value::getUnderlyingObject
82 /// and adds support for basic ptrtoint+arithmetic+inttoptr sequences.
83 static const Value
*getUnderlyingObject(const Value
*V
) {
84 // First just call Value::getUnderlyingObject to let it do what it does.
86 V
= V
->getUnderlyingObject();
87 // If it found an inttoptr, use special code to continue climing.
88 if (Operator::getOpcode(V
) != Instruction::IntToPtr
)
90 const Value
*O
= getUnderlyingObjectFromInt(cast
<User
>(V
)->getOperand(0));
91 // If that succeeded in finding a pointer, continue the search.
92 if (!O
->getType()->isPointerTy())
99 /// getUnderlyingObjectForInstr - If this machine instr has memory reference
100 /// information and it can be tracked to a normal reference to a known
101 /// object, return the Value for that object. Otherwise return null.
102 static const Value
*getUnderlyingObjectForInstr(const MachineInstr
*MI
,
103 const MachineFrameInfo
*MFI
,
106 if (!MI
->hasOneMemOperand() ||
107 !(*MI
->memoperands_begin())->getValue() ||
108 (*MI
->memoperands_begin())->isVolatile())
111 const Value
*V
= (*MI
->memoperands_begin())->getValue();
115 V
= getUnderlyingObject(V
);
116 if (const PseudoSourceValue
*PSV
= dyn_cast
<PseudoSourceValue
>(V
)) {
117 // For now, ignore PseudoSourceValues which may alias LLVM IR values
118 // because the code that uses this function has no way to cope with
120 if (PSV
->isAliased(MFI
))
123 MayAlias
= PSV
->mayAlias(MFI
);
127 if (isIdentifiedObject(V
))
133 void ScheduleDAGInstrs::StartBlock(MachineBasicBlock
*BB
) {
134 if (MachineLoop
*ML
= MLI
.getLoopFor(BB
))
135 if (BB
== ML
->getLoopLatch()) {
136 MachineBasicBlock
*Header
= ML
->getHeader();
137 for (MachineBasicBlock::livein_iterator I
= Header
->livein_begin(),
138 E
= Header
->livein_end(); I
!= E
; ++I
)
139 LoopLiveInRegs
.insert(*I
);
140 LoopRegs
.VisitLoop(ML
);
144 /// AddSchedBarrierDeps - Add dependencies from instructions in the current
145 /// list of instructions being scheduled to scheduling barrier by adding
146 /// the exit SU to the register defs and use list. This is because we want to
147 /// make sure instructions which define registers that are either used by
148 /// the terminator or are live-out are properly scheduled. This is
149 /// especially important when the definition latency of the return value(s)
150 /// are too high to be hidden by the branch or when the liveout registers
151 /// used by instructions in the fallthrough block.
152 void ScheduleDAGInstrs::AddSchedBarrierDeps() {
153 MachineInstr
*ExitMI
= InsertPos
!= BB
->end() ? &*InsertPos
: 0;
154 ExitSU
.setInstr(ExitMI
);
155 bool AllDepKnown
= ExitMI
&&
156 (ExitMI
->getDesc().isCall() || ExitMI
->getDesc().isBarrier());
157 if (ExitMI
&& AllDepKnown
) {
158 // If it's a call or a barrier, add dependencies on the defs and uses of
160 for (unsigned i
= 0, e
= ExitMI
->getNumOperands(); i
!= e
; ++i
) {
161 const MachineOperand
&MO
= ExitMI
->getOperand(i
);
162 if (!MO
.isReg() || MO
.isDef()) continue;
163 unsigned Reg
= MO
.getReg();
164 if (Reg
== 0) continue;
166 assert(TRI
->isPhysicalRegister(Reg
) && "Virtual register encountered!");
167 Uses
[Reg
].push_back(&ExitSU
);
170 // For others, e.g. fallthrough, conditional branch, assume the exit
171 // uses all the registers that are livein to the successor blocks.
172 SmallSet
<unsigned, 8> Seen
;
173 for (MachineBasicBlock::succ_iterator SI
= BB
->succ_begin(),
174 SE
= BB
->succ_end(); SI
!= SE
; ++SI
)
175 for (MachineBasicBlock::livein_iterator I
= (*SI
)->livein_begin(),
176 E
= (*SI
)->livein_end(); I
!= E
; ++I
) {
178 if (Seen
.insert(Reg
))
179 Uses
[Reg
].push_back(&ExitSU
);
184 void ScheduleDAGInstrs::BuildSchedGraph(AliasAnalysis
*AA
) {
185 // We'll be allocating one SUnit for each instruction, plus one for
186 // the region exit node.
187 SUnits
.reserve(BB
->size());
189 // We build scheduling units by walking a block's instruction list from bottom
192 // Remember where a generic side-effecting instruction is as we procede.
193 SUnit
*BarrierChain
= 0, *AliasChain
= 0;
195 // Memory references to specific known memory locations are tracked
196 // so that they can be given more precise dependencies. We track
197 // separately the known memory locations that may alias and those
198 // that are known not to alias
199 std::map
<const Value
*, SUnit
*> AliasMemDefs
, NonAliasMemDefs
;
200 std::map
<const Value
*, std::vector
<SUnit
*> > AliasMemUses
, NonAliasMemUses
;
202 // Keep track of dangling debug references to registers.
203 std::vector
<std::pair
<MachineInstr
*, unsigned> >
204 DanglingDebugValue(TRI
->getNumRegs(),
205 std::make_pair(static_cast<MachineInstr
*>(0), 0));
207 // Check to see if the scheduler cares about latencies.
208 bool UnitLatencies
= ForceUnitLatencies();
210 // Ask the target if address-backscheduling is desirable, and if so how much.
211 const TargetSubtarget
&ST
= TM
.getSubtarget
<TargetSubtarget
>();
212 unsigned SpecialAddressLatency
= ST
.getSpecialAddressLatency();
214 // Remove any stale debug info; sometimes BuildSchedGraph is called again
215 // without emitting the info from the previous call.
218 // Model data dependencies between instructions being scheduled and the
220 AddSchedBarrierDeps();
222 // Walk the list of instructions, from bottom moving up.
223 for (MachineBasicBlock::iterator MII
= InsertPos
, MIE
= Begin
;
225 MachineInstr
*MI
= prior(MII
);
226 // DBG_VALUE does not have SUnit's built, so just remember these for later
228 if (MI
->isDebugValue()) {
229 if (MI
->getNumOperands()==3 && MI
->getOperand(0).isReg() &&
230 MI
->getOperand(0).getReg())
231 DanglingDebugValue
[MI
->getOperand(0).getReg()] =
232 std::make_pair(MI
, DbgValueVec
.size());
233 DbgValueVec
.push_back(MI
);
236 const TargetInstrDesc
&TID
= MI
->getDesc();
237 assert(!TID
.isTerminator() && !MI
->isLabel() &&
238 "Cannot schedule terminators or labels!");
239 // Create the SUnit for this MI.
240 SUnit
*SU
= NewSUnit(MI
);
241 SU
->isCall
= TID
.isCall();
242 SU
->isCommutable
= TID
.isCommutable();
244 // Assign the Latency field of SU using target-provided information.
250 // Add register-based dependencies (data, anti, and output).
251 for (unsigned j
= 0, n
= MI
->getNumOperands(); j
!= n
; ++j
) {
252 const MachineOperand
&MO
= MI
->getOperand(j
);
253 if (!MO
.isReg()) continue;
254 unsigned Reg
= MO
.getReg();
255 if (Reg
== 0) continue;
257 assert(TRI
->isPhysicalRegister(Reg
) && "Virtual register encountered!");
259 if (MO
.isDef() && DanglingDebugValue
[Reg
].first
!=0) {
260 SU
->DbgInstrList
.push_back(DanglingDebugValue
[Reg
].first
);
261 DbgValueVec
[DanglingDebugValue
[Reg
].second
] = 0;
262 DanglingDebugValue
[Reg
] = std::make_pair((MachineInstr
*)0, 0);
265 std::vector
<SUnit
*> &UseList
= Uses
[Reg
];
266 std::vector
<SUnit
*> &DefList
= Defs
[Reg
];
267 // Optionally add output and anti dependencies. For anti
268 // dependencies we use a latency of 0 because for a multi-issue
269 // target we want to allow the defining instruction to issue
270 // in the same cycle as the using instruction.
271 // TODO: Using a latency of 1 here for output dependencies assumes
272 // there's no cost for reusing registers.
273 SDep::Kind Kind
= MO
.isUse() ? SDep::Anti
: SDep::Output
;
274 unsigned AOLatency
= (Kind
== SDep::Anti
) ? 0 : 1;
275 for (unsigned i
= 0, e
= DefList
.size(); i
!= e
; ++i
) {
276 SUnit
*DefSU
= DefList
[i
];
277 if (DefSU
== &ExitSU
)
280 (Kind
!= SDep::Output
|| !MO
.isDead() ||
281 !DefSU
->getInstr()->registerDefIsDead(Reg
)))
282 DefSU
->addPred(SDep(SU
, Kind
, AOLatency
, /*Reg=*/Reg
));
284 for (const unsigned *Alias
= TRI
->getAliasSet(Reg
); *Alias
; ++Alias
) {
285 std::vector
<SUnit
*> &DefList
= Defs
[*Alias
];
286 for (unsigned i
= 0, e
= DefList
.size(); i
!= e
; ++i
) {
287 SUnit
*DefSU
= DefList
[i
];
288 if (DefSU
== &ExitSU
)
291 (Kind
!= SDep::Output
|| !MO
.isDead() ||
292 !DefSU
->getInstr()->registerDefIsDead(*Alias
)))
293 DefSU
->addPred(SDep(SU
, Kind
, AOLatency
, /*Reg=*/ *Alias
));
298 // Add any data dependencies.
299 unsigned DataLatency
= SU
->Latency
;
300 for (unsigned i
= 0, e
= UseList
.size(); i
!= e
; ++i
) {
301 SUnit
*UseSU
= UseList
[i
];
304 unsigned LDataLatency
= DataLatency
;
305 // Optionally add in a special extra latency for nodes that
307 // TODO: Do this for register aliases too.
308 // TODO: Perhaps we should get rid of
309 // SpecialAddressLatency and just move this into
310 // adjustSchedDependency for the targets that care about it.
311 if (SpecialAddressLatency
!= 0 && !UnitLatencies
&&
313 MachineInstr
*UseMI
= UseSU
->getInstr();
314 const TargetInstrDesc
&UseTID
= UseMI
->getDesc();
315 int RegUseIndex
= UseMI
->findRegisterUseOperandIdx(Reg
);
316 assert(RegUseIndex
>= 0 && "UseMI doesn's use register!");
317 if (RegUseIndex
>= 0 &&
318 (UseTID
.mayLoad() || UseTID
.mayStore()) &&
319 (unsigned)RegUseIndex
< UseTID
.getNumOperands() &&
320 UseTID
.OpInfo
[RegUseIndex
].isLookupPtrRegClass())
321 LDataLatency
+= SpecialAddressLatency
;
323 // Adjust the dependence latency using operand def/use
324 // information (if any), and then allow the target to
325 // perform its own adjustments.
326 const SDep
& dep
= SDep(SU
, SDep::Data
, LDataLatency
, Reg
);
327 if (!UnitLatencies
) {
328 ComputeOperandLatency(SU
, UseSU
, const_cast<SDep
&>(dep
));
329 ST
.adjustSchedDependency(SU
, UseSU
, const_cast<SDep
&>(dep
));
333 for (const unsigned *Alias
= TRI
->getAliasSet(Reg
); *Alias
; ++Alias
) {
334 std::vector
<SUnit
*> &UseList
= Uses
[*Alias
];
335 for (unsigned i
= 0, e
= UseList
.size(); i
!= e
; ++i
) {
336 SUnit
*UseSU
= UseList
[i
];
339 const SDep
& dep
= SDep(SU
, SDep::Data
, DataLatency
, *Alias
);
340 if (!UnitLatencies
) {
341 ComputeOperandLatency(SU
, UseSU
, const_cast<SDep
&>(dep
));
342 ST
.adjustSchedDependency(SU
, UseSU
, const_cast<SDep
&>(dep
));
348 // If a def is going to wrap back around to the top of the loop,
350 if (!UnitLatencies
&& DefList
.empty()) {
351 LoopDependencies::LoopDeps::iterator I
= LoopRegs
.Deps
.find(Reg
);
352 if (I
!= LoopRegs
.Deps
.end()) {
353 const MachineOperand
*UseMO
= I
->second
.first
;
354 unsigned Count
= I
->second
.second
;
355 const MachineInstr
*UseMI
= UseMO
->getParent();
356 unsigned UseMOIdx
= UseMO
- &UseMI
->getOperand(0);
357 const TargetInstrDesc
&UseTID
= UseMI
->getDesc();
358 // TODO: If we knew the total depth of the region here, we could
359 // handle the case where the whole loop is inside the region but
360 // is large enough that the isScheduleHigh trick isn't needed.
361 if (UseMOIdx
< UseTID
.getNumOperands()) {
362 // Currently, we only support scheduling regions consisting of
363 // single basic blocks. Check to see if the instruction is in
364 // the same region by checking to see if it has the same parent.
365 if (UseMI
->getParent() != MI
->getParent()) {
366 unsigned Latency
= SU
->Latency
;
367 if (UseTID
.OpInfo
[UseMOIdx
].isLookupPtrRegClass())
368 Latency
+= SpecialAddressLatency
;
369 // This is a wild guess as to the portion of the latency which
370 // will be overlapped by work done outside the current
371 // scheduling region.
372 Latency
-= std::min(Latency
, Count
);
373 // Add the artifical edge.
374 ExitSU
.addPred(SDep(SU
, SDep::Order
, Latency
,
375 /*Reg=*/0, /*isNormalMemory=*/false,
376 /*isMustAlias=*/false,
377 /*isArtificial=*/true));
378 } else if (SpecialAddressLatency
> 0 &&
379 UseTID
.OpInfo
[UseMOIdx
].isLookupPtrRegClass()) {
380 // The entire loop body is within the current scheduling region
381 // and the latency of this operation is assumed to be greater
382 // than the latency of the loop.
383 // TODO: Recursively mark data-edge predecessors as
384 // isScheduleHigh too.
385 SU
->isScheduleHigh
= true;
388 LoopRegs
.Deps
.erase(I
);
395 DefList
.push_back(SU
);
397 UseList
.push_back(SU
);
401 // Add chain dependencies.
402 // Chain dependencies used to enforce memory order should have
403 // latency of 0 (except for true dependency of Store followed by
404 // aliased Load... we estimate that with a single cycle of latency
405 // assuming the hardware will bypass)
406 // Note that isStoreToStackSlot and isLoadFromStackSLot are not usable
407 // after stack slots are lowered to actual addresses.
408 // TODO: Use an AliasAnalysis and do real alias-analysis queries, and
409 // produce more precise dependence information.
410 #define STORE_LOAD_LATENCY 1
411 unsigned TrueMemOrderLatency
= 0;
412 if (TID
.isCall() || TID
.hasUnmodeledSideEffects() ||
413 (MI
->hasVolatileMemoryRef() &&
414 (!TID
.mayLoad() || !MI
->isInvariantLoad(AA
)))) {
415 // Be conservative with these and add dependencies on all memory
416 // references, even those that are known to not alias.
417 for (std::map
<const Value
*, SUnit
*>::iterator I
=
418 NonAliasMemDefs
.begin(), E
= NonAliasMemDefs
.end(); I
!= E
; ++I
) {
419 I
->second
->addPred(SDep(SU
, SDep::Order
, /*Latency=*/0));
421 for (std::map
<const Value
*, std::vector
<SUnit
*> >::iterator I
=
422 NonAliasMemUses
.begin(), E
= NonAliasMemUses
.end(); I
!= E
; ++I
) {
423 for (unsigned i
= 0, e
= I
->second
.size(); i
!= e
; ++i
)
424 I
->second
[i
]->addPred(SDep(SU
, SDep::Order
, TrueMemOrderLatency
));
426 NonAliasMemDefs
.clear();
427 NonAliasMemUses
.clear();
428 // Add SU to the barrier chain.
430 BarrierChain
->addPred(SDep(SU
, SDep::Order
, /*Latency=*/0));
435 // Chain all possibly aliasing memory references though SU.
437 AliasChain
->addPred(SDep(SU
, SDep::Order
, /*Latency=*/0));
439 for (unsigned k
= 0, m
= PendingLoads
.size(); k
!= m
; ++k
)
440 PendingLoads
[k
]->addPred(SDep(SU
, SDep::Order
, TrueMemOrderLatency
));
441 for (std::map
<const Value
*, SUnit
*>::iterator I
= AliasMemDefs
.begin(),
442 E
= AliasMemDefs
.end(); I
!= E
; ++I
) {
443 I
->second
->addPred(SDep(SU
, SDep::Order
, /*Latency=*/0));
445 for (std::map
<const Value
*, std::vector
<SUnit
*> >::iterator I
=
446 AliasMemUses
.begin(), E
= AliasMemUses
.end(); I
!= E
; ++I
) {
447 for (unsigned i
= 0, e
= I
->second
.size(); i
!= e
; ++i
)
448 I
->second
[i
]->addPred(SDep(SU
, SDep::Order
, TrueMemOrderLatency
));
450 PendingLoads
.clear();
451 AliasMemDefs
.clear();
452 AliasMemUses
.clear();
453 } else if (TID
.mayStore()) {
454 bool MayAlias
= true;
455 TrueMemOrderLatency
= STORE_LOAD_LATENCY
;
456 if (const Value
*V
= getUnderlyingObjectForInstr(MI
, MFI
, MayAlias
)) {
457 // A store to a specific PseudoSourceValue. Add precise dependencies.
458 // Record the def in MemDefs, first adding a dep if there is
460 std::map
<const Value
*, SUnit
*>::iterator I
=
461 ((MayAlias
) ? AliasMemDefs
.find(V
) : NonAliasMemDefs
.find(V
));
462 std::map
<const Value
*, SUnit
*>::iterator IE
=
463 ((MayAlias
) ? AliasMemDefs
.end() : NonAliasMemDefs
.end());
465 I
->second
->addPred(SDep(SU
, SDep::Order
, /*Latency=*/0, /*Reg=*/0,
466 /*isNormalMemory=*/true));
470 AliasMemDefs
[V
] = SU
;
472 NonAliasMemDefs
[V
] = SU
;
474 // Handle the uses in MemUses, if there are any.
475 std::map
<const Value
*, std::vector
<SUnit
*> >::iterator J
=
476 ((MayAlias
) ? AliasMemUses
.find(V
) : NonAliasMemUses
.find(V
));
477 std::map
<const Value
*, std::vector
<SUnit
*> >::iterator JE
=
478 ((MayAlias
) ? AliasMemUses
.end() : NonAliasMemUses
.end());
480 for (unsigned i
= 0, e
= J
->second
.size(); i
!= e
; ++i
)
481 J
->second
[i
]->addPred(SDep(SU
, SDep::Order
, TrueMemOrderLatency
,
482 /*Reg=*/0, /*isNormalMemory=*/true));
486 // Add dependencies from all the PendingLoads, i.e. loads
487 // with no underlying object.
488 for (unsigned k
= 0, m
= PendingLoads
.size(); k
!= m
; ++k
)
489 PendingLoads
[k
]->addPred(SDep(SU
, SDep::Order
, TrueMemOrderLatency
));
490 // Add dependence on alias chain, if needed.
492 AliasChain
->addPred(SDep(SU
, SDep::Order
, /*Latency=*/0));
494 // Add dependence on barrier chain, if needed.
496 BarrierChain
->addPred(SDep(SU
, SDep::Order
, /*Latency=*/0));
498 // Treat all other stores conservatively.
499 goto new_alias_chain
;
502 if (!ExitSU
.isPred(SU
))
503 // Push store's up a bit to avoid them getting in between cmp
505 ExitSU
.addPred(SDep(SU
, SDep::Order
, 0,
506 /*Reg=*/0, /*isNormalMemory=*/false,
507 /*isMustAlias=*/false,
508 /*isArtificial=*/true));
509 } else if (TID
.mayLoad()) {
510 bool MayAlias
= true;
511 TrueMemOrderLatency
= 0;
512 if (MI
->isInvariantLoad(AA
)) {
513 // Invariant load, no chain dependencies needed!
516 getUnderlyingObjectForInstr(MI
, MFI
, MayAlias
)) {
517 // A load from a specific PseudoSourceValue. Add precise dependencies.
518 std::map
<const Value
*, SUnit
*>::iterator I
=
519 ((MayAlias
) ? AliasMemDefs
.find(V
) : NonAliasMemDefs
.find(V
));
520 std::map
<const Value
*, SUnit
*>::iterator IE
=
521 ((MayAlias
) ? AliasMemDefs
.end() : NonAliasMemDefs
.end());
523 I
->second
->addPred(SDep(SU
, SDep::Order
, /*Latency=*/0, /*Reg=*/0,
524 /*isNormalMemory=*/true));
526 AliasMemUses
[V
].push_back(SU
);
528 NonAliasMemUses
[V
].push_back(SU
);
530 // A load with no underlying object. Depend on all
531 // potentially aliasing stores.
532 for (std::map
<const Value
*, SUnit
*>::iterator I
=
533 AliasMemDefs
.begin(), E
= AliasMemDefs
.end(); I
!= E
; ++I
)
534 I
->second
->addPred(SDep(SU
, SDep::Order
, /*Latency=*/0));
536 PendingLoads
.push_back(SU
);
540 // Add dependencies on alias and barrier chains, if needed.
541 if (MayAlias
&& AliasChain
)
542 AliasChain
->addPred(SDep(SU
, SDep::Order
, /*Latency=*/0));
544 BarrierChain
->addPred(SDep(SU
, SDep::Order
, /*Latency=*/0));
549 for (int i
= 0, e
= TRI
->getNumRegs(); i
!= e
; ++i
) {
553 PendingLoads
.clear();
556 void ScheduleDAGInstrs::FinishBlock() {
560 void ScheduleDAGInstrs::ComputeLatency(SUnit
*SU
) {
561 // Compute the latency for the node.
562 if (!InstrItins
|| InstrItins
->isEmpty()) {
565 // Simplistic target-independent heuristic: assume that loads take
567 if (SU
->getInstr()->getDesc().mayLoad())
570 SU
->Latency
= TII
->getInstrLatency(InstrItins
, SU
->getInstr());
574 void ScheduleDAGInstrs::ComputeOperandLatency(SUnit
*Def
, SUnit
*Use
,
576 if (!InstrItins
|| InstrItins
->isEmpty())
579 // For a data dependency with a known register...
580 if ((dep
.getKind() != SDep::Data
) || (dep
.getReg() == 0))
583 const unsigned Reg
= dep
.getReg();
585 // ... find the definition of the register in the defining
587 MachineInstr
*DefMI
= Def
->getInstr();
588 int DefIdx
= DefMI
->findRegisterDefOperandIdx(Reg
);
590 const MachineOperand
&MO
= DefMI
->getOperand(DefIdx
);
591 if (MO
.isReg() && MO
.isImplicit() &&
592 DefIdx
>= (int)DefMI
->getDesc().getNumOperands()) {
593 // This is an implicit def, getOperandLatency() won't return the correct
595 // %D6<def>, %D7<def> = VLD1q16 %R2<kill>, 0, ..., %Q3<imp-def>
596 // %Q1<def> = VMULv8i16 %Q1<kill>, %Q3<kill>, ...
597 // What we want is to compute latency between def of %D6/%D7 and use of
599 DefIdx
= DefMI
->findRegisterDefOperandIdx(Reg
, false, true, TRI
);
601 MachineInstr
*UseMI
= Use
->getInstr();
602 // For all uses of the register, calculate the maxmimum latency
605 for (unsigned i
= 0, e
= UseMI
->getNumOperands(); i
!= e
; ++i
) {
606 const MachineOperand
&MO
= UseMI
->getOperand(i
);
607 if (!MO
.isReg() || !MO
.isUse())
609 unsigned MOReg
= MO
.getReg();
613 int UseCycle
= TII
->getOperandLatency(InstrItins
, DefMI
, DefIdx
,
615 Latency
= std::max(Latency
, UseCycle
);
618 // UseMI is null, then it must be a scheduling barrier.
619 if (!InstrItins
|| InstrItins
->isEmpty())
621 unsigned DefClass
= DefMI
->getDesc().getSchedClass();
622 Latency
= InstrItins
->getOperandCycle(DefClass
, DefIdx
);
625 // If we found a latency, then replace the existing dependence latency.
627 dep
.setLatency(Latency
);
631 void ScheduleDAGInstrs::dumpNode(const SUnit
*SU
) const {
632 SU
->getInstr()->dump();
635 std::string
ScheduleDAGInstrs::getGraphNodeLabel(const SUnit
*SU
) const {
637 raw_string_ostream
oss(s
);
640 else if (SU
== &ExitSU
)
643 SU
->getInstr()->print(oss
);
647 // EmitSchedule - Emit the machine code in scheduled order.
648 MachineBasicBlock
*ScheduleDAGInstrs::EmitSchedule() {
649 // For MachineInstr-based scheduling, we're rescheduling the instructions in
650 // the block, so start by removing them from the block.
651 while (Begin
!= InsertPos
) {
652 MachineBasicBlock::iterator I
= Begin
;
657 // First reinsert any remaining debug_values; these are either constants,
658 // or refer to live-in registers. The beginning of the block is the right
659 // place for the latter. The former might reasonably be placed elsewhere
660 // using some kind of ordering algorithm, but right now it doesn't matter.
661 for (int i
= DbgValueVec
.size()-1; i
>=0; --i
)
663 BB
->insert(InsertPos
, DbgValueVec
[i
]);
665 // Then re-insert them according to the given schedule.
666 for (unsigned i
= 0, e
= Sequence
.size(); i
!= e
; i
++) {
667 SUnit
*SU
= Sequence
[i
];
669 // Null SUnit* is a noop.
674 BB
->insert(InsertPos
, SU
->getInstr());
675 for (unsigned i
= 0, e
= SU
->DbgInstrList
.size() ; i
< e
; ++i
)
676 BB
->insert(InsertPos
, SU
->DbgInstrList
[i
]);
679 // Update the Begin iterator, as the first instruction in the block
680 // may have been scheduled later.
681 if (!DbgValueVec
.empty()) {
682 for (int i
= DbgValueVec
.size()-1; i
>=0; --i
)
683 if (DbgValueVec
[i
]!=0) {
684 Begin
= DbgValueVec
[DbgValueVec
.size()-1];
687 } else if (!Sequence
.empty())
688 Begin
= Sequence
[0]->getInstr();