1 //===---- ScheduleDAGInstrs.cpp - MachineInstr Rescheduling ---------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This implements the ScheduleDAGInstrs class, which implements re-scheduling
13 //===----------------------------------------------------------------------===//
15 #define DEBUG_TYPE "sched-instrs"
16 #include "ScheduleDAGInstrs.h"
17 #include "llvm/Operator.h"
18 #include "llvm/Analysis/AliasAnalysis.h"
19 #include "llvm/CodeGen/MachineFunctionPass.h"
20 #include "llvm/CodeGen/MachineRegisterInfo.h"
21 #include "llvm/CodeGen/PseudoSourceValue.h"
22 #include "llvm/Target/TargetMachine.h"
23 #include "llvm/Target/TargetInstrInfo.h"
24 #include "llvm/Target/TargetRegisterInfo.h"
25 #include "llvm/Target/TargetSubtarget.h"
26 #include "llvm/Support/Debug.h"
27 #include "llvm/Support/raw_ostream.h"
28 #include "llvm/ADT/SmallSet.h"
31 ScheduleDAGInstrs::ScheduleDAGInstrs(MachineFunction
&mf
,
32 const MachineLoopInfo
&mli
,
33 const MachineDominatorTree
&mdt
)
34 : ScheduleDAG(mf
), MLI(mli
), MDT(mdt
), LoopRegs(MLI
, MDT
) {}
36 /// Run - perform scheduling.
38 void ScheduleDAGInstrs::Run(MachineBasicBlock
*bb
,
39 MachineBasicBlock::iterator begin
,
40 MachineBasicBlock::iterator end
,
44 InsertPosIndex
= endcount
;
46 ScheduleDAG::Run(bb
, end
);
49 /// getUnderlyingObjectFromInt - This is the function that does the work of
50 /// looking through basic ptrtoint+arithmetic+inttoptr sequences.
51 static const Value
*getUnderlyingObjectFromInt(const Value
*V
) {
53 if (const Operator
*U
= dyn_cast
<Operator
>(V
)) {
54 // If we find a ptrtoint, we can transfer control back to the
55 // regular getUnderlyingObjectFromInt.
56 if (U
->getOpcode() == Instruction::PtrToInt
)
57 return U
->getOperand(0);
58 // If we find an add of a constant or a multiplied value, it's
59 // likely that the other operand will lead us to the base
60 // object. We don't have to worry about the case where the
61 // object address is somehow being computed by the multiply,
62 // because our callers only care when the result is an
63 // identifibale object.
64 if (U
->getOpcode() != Instruction::Add
||
65 (!isa
<ConstantInt
>(U
->getOperand(1)) &&
66 Operator::getOpcode(U
->getOperand(1)) != Instruction::Mul
))
72 assert(isa
<IntegerType
>(V
->getType()) && "Unexpected operand type!");
76 /// getUnderlyingObject - This is a wrapper around Value::getUnderlyingObject
77 /// and adds support for basic ptrtoint+arithmetic+inttoptr sequences.
78 static const Value
*getUnderlyingObject(const Value
*V
) {
79 // First just call Value::getUnderlyingObject to let it do what it does.
81 V
= V
->getUnderlyingObject();
82 // If it found an inttoptr, use special code to continue climing.
83 if (Operator::getOpcode(V
) != Instruction::IntToPtr
)
85 const Value
*O
= getUnderlyingObjectFromInt(cast
<User
>(V
)->getOperand(0));
86 // If that succeeded in finding a pointer, continue the search.
87 if (!isa
<PointerType
>(O
->getType()))
94 /// getUnderlyingObjectForInstr - If this machine instr has memory reference
95 /// information and it can be tracked to a normal reference to a known
96 /// object, return the Value for that object. Otherwise return null.
97 static const Value
*getUnderlyingObjectForInstr(const MachineInstr
*MI
) {
98 if (!MI
->hasOneMemOperand() ||
99 !MI
->memoperands_begin()->getValue() ||
100 MI
->memoperands_begin()->isVolatile())
103 const Value
*V
= MI
->memoperands_begin()->getValue();
107 V
= getUnderlyingObject(V
);
108 if (!isa
<PseudoSourceValue
>(V
) && !isIdentifiedObject(V
))
114 void ScheduleDAGInstrs::StartBlock(MachineBasicBlock
*BB
) {
115 if (MachineLoop
*ML
= MLI
.getLoopFor(BB
))
116 if (BB
== ML
->getLoopLatch()) {
117 MachineBasicBlock
*Header
= ML
->getHeader();
118 for (MachineBasicBlock::livein_iterator I
= Header
->livein_begin(),
119 E
= Header
->livein_end(); I
!= E
; ++I
)
120 LoopLiveInRegs
.insert(*I
);
121 LoopRegs
.VisitLoop(ML
);
125 void ScheduleDAGInstrs::BuildSchedGraph() {
126 // We'll be allocating one SUnit for each instruction, plus one for
127 // the region exit node.
128 SUnits
.reserve(BB
->size());
130 // We build scheduling units by walking a block's instruction list from bottom
133 // Remember where a generic side-effecting instruction is as we procede. If
134 // ChainMMO is null, this is assumed to have arbitrary side-effects. If
135 // ChainMMO is non-null, then Chain makes only a single memory reference.
137 MachineMemOperand
*ChainMMO
= 0;
139 // Memory references to specific known memory locations are tracked so that
140 // they can be given more precise dependencies.
141 std::map
<const Value
*, SUnit
*> MemDefs
;
142 std::map
<const Value
*, std::vector
<SUnit
*> > MemUses
;
144 // Check to see if the scheduler cares about latencies.
145 bool UnitLatencies
= ForceUnitLatencies();
147 // Ask the target if address-backscheduling is desirable, and if so how much.
148 const TargetSubtarget
&ST
= TM
.getSubtarget
<TargetSubtarget
>();
149 unsigned SpecialAddressLatency
= ST
.getSpecialAddressLatency();
151 // Walk the list of instructions, from bottom moving up.
152 for (MachineBasicBlock::iterator MII
= InsertPos
, MIE
= Begin
;
154 MachineInstr
*MI
= prior(MII
);
155 const TargetInstrDesc
&TID
= MI
->getDesc();
156 assert(!TID
.isTerminator() && !MI
->isLabel() &&
157 "Cannot schedule terminators or labels!");
158 // Create the SUnit for this MI.
159 SUnit
*SU
= NewSUnit(MI
);
161 // Assign the Latency field of SU using target-provided information.
167 // Add register-based dependencies (data, anti, and output).
168 for (unsigned j
= 0, n
= MI
->getNumOperands(); j
!= n
; ++j
) {
169 const MachineOperand
&MO
= MI
->getOperand(j
);
170 if (!MO
.isReg()) continue;
171 unsigned Reg
= MO
.getReg();
172 if (Reg
== 0) continue;
174 assert(TRI
->isPhysicalRegister(Reg
) && "Virtual register encountered!");
175 std::vector
<SUnit
*> &UseList
= Uses
[Reg
];
176 std::vector
<SUnit
*> &DefList
= Defs
[Reg
];
177 // Optionally add output and anti dependencies. For anti
178 // dependencies we use a latency of 0 because for a multi-issue
179 // target we want to allow the defining instruction to issue
180 // in the same cycle as the using instruction.
181 // TODO: Using a latency of 1 here for output dependencies assumes
182 // there's no cost for reusing registers.
183 SDep::Kind Kind
= MO
.isUse() ? SDep::Anti
: SDep::Output
;
184 unsigned AOLatency
= (Kind
== SDep::Anti
) ? 0 : 1;
185 for (unsigned i
= 0, e
= DefList
.size(); i
!= e
; ++i
) {
186 SUnit
*DefSU
= DefList
[i
];
188 (Kind
!= SDep::Output
|| !MO
.isDead() ||
189 !DefSU
->getInstr()->registerDefIsDead(Reg
)))
190 DefSU
->addPred(SDep(SU
, Kind
, AOLatency
, /*Reg=*/Reg
));
192 for (const unsigned *Alias
= TRI
->getAliasSet(Reg
); *Alias
; ++Alias
) {
193 std::vector
<SUnit
*> &DefList
= Defs
[*Alias
];
194 for (unsigned i
= 0, e
= DefList
.size(); i
!= e
; ++i
) {
195 SUnit
*DefSU
= DefList
[i
];
197 (Kind
!= SDep::Output
|| !MO
.isDead() ||
198 !DefSU
->getInstr()->registerDefIsDead(Reg
)))
199 DefSU
->addPred(SDep(SU
, Kind
, AOLatency
, /*Reg=*/ *Alias
));
204 // Add any data dependencies.
205 unsigned DataLatency
= SU
->Latency
;
206 for (unsigned i
= 0, e
= UseList
.size(); i
!= e
; ++i
) {
207 SUnit
*UseSU
= UseList
[i
];
209 unsigned LDataLatency
= DataLatency
;
210 // Optionally add in a special extra latency for nodes that
212 // TODO: Do this for register aliases too.
213 // TODO: Perhaps we should get rid of
214 // SpecialAddressLatency and just move this into
215 // adjustSchedDependency for the targets that care about
217 if (SpecialAddressLatency
!= 0 && !UnitLatencies
) {
218 MachineInstr
*UseMI
= UseSU
->getInstr();
219 const TargetInstrDesc
&UseTID
= UseMI
->getDesc();
220 int RegUseIndex
= UseMI
->findRegisterUseOperandIdx(Reg
);
221 assert(RegUseIndex
>= 0 && "UseMI doesn's use register!");
222 if ((UseTID
.mayLoad() || UseTID
.mayStore()) &&
223 (unsigned)RegUseIndex
< UseTID
.getNumOperands() &&
224 UseTID
.OpInfo
[RegUseIndex
].isLookupPtrRegClass())
225 LDataLatency
+= SpecialAddressLatency
;
227 // Adjust the dependence latency using operand def/use
228 // information (if any), and then allow the target to
229 // perform its own adjustments.
230 const SDep
& dep
= SDep(SU
, SDep::Data
, LDataLatency
, Reg
);
231 if (!UnitLatencies
) {
232 ComputeOperandLatency(SU
, UseSU
, (SDep
&)dep
);
233 ST
.adjustSchedDependency(SU
, UseSU
, (SDep
&)dep
);
238 for (const unsigned *Alias
= TRI
->getAliasSet(Reg
); *Alias
; ++Alias
) {
239 std::vector
<SUnit
*> &UseList
= Uses
[*Alias
];
240 for (unsigned i
= 0, e
= UseList
.size(); i
!= e
; ++i
) {
241 SUnit
*UseSU
= UseList
[i
];
243 const SDep
& dep
= SDep(SU
, SDep::Data
, DataLatency
, *Alias
);
244 if (!UnitLatencies
) {
245 ComputeOperandLatency(SU
, UseSU
, (SDep
&)dep
);
246 ST
.adjustSchedDependency(SU
, UseSU
, (SDep
&)dep
);
253 // If a def is going to wrap back around to the top of the loop,
255 if (!UnitLatencies
&& DefList
.empty()) {
256 LoopDependencies::LoopDeps::iterator I
= LoopRegs
.Deps
.find(Reg
);
257 if (I
!= LoopRegs
.Deps
.end()) {
258 const MachineOperand
*UseMO
= I
->second
.first
;
259 unsigned Count
= I
->second
.second
;
260 const MachineInstr
*UseMI
= UseMO
->getParent();
261 unsigned UseMOIdx
= UseMO
- &UseMI
->getOperand(0);
262 const TargetInstrDesc
&UseTID
= UseMI
->getDesc();
263 // TODO: If we knew the total depth of the region here, we could
264 // handle the case where the whole loop is inside the region but
265 // is large enough that the isScheduleHigh trick isn't needed.
266 if (UseMOIdx
< UseTID
.getNumOperands()) {
267 // Currently, we only support scheduling regions consisting of
268 // single basic blocks. Check to see if the instruction is in
269 // the same region by checking to see if it has the same parent.
270 if (UseMI
->getParent() != MI
->getParent()) {
271 unsigned Latency
= SU
->Latency
;
272 if (UseTID
.OpInfo
[UseMOIdx
].isLookupPtrRegClass())
273 Latency
+= SpecialAddressLatency
;
274 // This is a wild guess as to the portion of the latency which
275 // will be overlapped by work done outside the current
276 // scheduling region.
277 Latency
-= std::min(Latency
, Count
);
278 // Add the artifical edge.
279 ExitSU
.addPred(SDep(SU
, SDep::Order
, Latency
,
280 /*Reg=*/0, /*isNormalMemory=*/false,
281 /*isMustAlias=*/false,
282 /*isArtificial=*/true));
283 } else if (SpecialAddressLatency
> 0 &&
284 UseTID
.OpInfo
[UseMOIdx
].isLookupPtrRegClass()) {
285 // The entire loop body is within the current scheduling region
286 // and the latency of this operation is assumed to be greater
287 // than the latency of the loop.
288 // TODO: Recursively mark data-edge predecessors as
289 // isScheduleHigh too.
290 SU
->isScheduleHigh
= true;
293 LoopRegs
.Deps
.erase(I
);
300 DefList
.push_back(SU
);
302 UseList
.push_back(SU
);
306 // Add chain dependencies.
307 // Note that isStoreToStackSlot and isLoadFromStackSLot are not usable
308 // after stack slots are lowered to actual addresses.
309 // TODO: Use an AliasAnalysis and do real alias-analysis queries, and
310 // produce more precise dependence information.
311 if (TID
.isCall() || TID
.hasUnmodeledSideEffects()) {
313 // This is the conservative case. Add dependencies on all memory
316 Chain
->addPred(SDep(SU
, SDep::Order
, SU
->Latency
));
318 for (unsigned k
= 0, m
= PendingLoads
.size(); k
!= m
; ++k
)
319 PendingLoads
[k
]->addPred(SDep(SU
, SDep::Order
, SU
->Latency
));
320 PendingLoads
.clear();
321 for (std::map
<const Value
*, SUnit
*>::iterator I
= MemDefs
.begin(),
322 E
= MemDefs
.end(); I
!= E
; ++I
) {
323 I
->second
->addPred(SDep(SU
, SDep::Order
, SU
->Latency
));
326 for (std::map
<const Value
*, std::vector
<SUnit
*> >::iterator I
=
327 MemUses
.begin(), E
= MemUses
.end(); I
!= E
; ++I
) {
328 for (unsigned i
= 0, e
= I
->second
.size(); i
!= e
; ++i
)
329 I
->second
[i
]->addPred(SDep(SU
, SDep::Order
, SU
->Latency
));
332 // See if it is known to just have a single memory reference.
333 MachineInstr
*ChainMI
= Chain
->getInstr();
334 const TargetInstrDesc
&ChainTID
= ChainMI
->getDesc();
335 if (!ChainTID
.isCall() &&
336 !ChainTID
.hasUnmodeledSideEffects() &&
337 ChainMI
->hasOneMemOperand() &&
338 !ChainMI
->memoperands_begin()->isVolatile() &&
339 ChainMI
->memoperands_begin()->getValue())
340 // We know that the Chain accesses one specific memory location.
341 ChainMMO
= &*ChainMI
->memoperands_begin();
343 // Unknown memory accesses. Assume the worst.
345 } else if (TID
.mayStore()) {
346 if (const Value
*V
= getUnderlyingObjectForInstr(MI
)) {
347 // A store to a specific PseudoSourceValue. Add precise dependencies.
348 // Handle the def in MemDefs, if there is one.
349 std::map
<const Value
*, SUnit
*>::iterator I
= MemDefs
.find(V
);
350 if (I
!= MemDefs
.end()) {
351 I
->second
->addPred(SDep(SU
, SDep::Order
, SU
->Latency
, /*Reg=*/0,
352 /*isNormalMemory=*/true));
357 // Handle the uses in MemUses, if there are any.
358 std::map
<const Value
*, std::vector
<SUnit
*> >::iterator J
=
360 if (J
!= MemUses
.end()) {
361 for (unsigned i
= 0, e
= J
->second
.size(); i
!= e
; ++i
)
362 J
->second
[i
]->addPred(SDep(SU
, SDep::Order
, SU
->Latency
, /*Reg=*/0,
363 /*isNormalMemory=*/true));
366 // Add dependencies from all the PendingLoads, since without
367 // memoperands we must assume they alias anything.
368 for (unsigned k
= 0, m
= PendingLoads
.size(); k
!= m
; ++k
)
369 PendingLoads
[k
]->addPred(SDep(SU
, SDep::Order
, SU
->Latency
));
370 // Add a general dependence too, if needed.
372 Chain
->addPred(SDep(SU
, SDep::Order
, SU
->Latency
));
374 // Treat all other stores conservatively.
376 } else if (TID
.mayLoad()) {
377 if (TII
->isInvariantLoad(MI
)) {
378 // Invariant load, no chain dependencies needed!
379 } else if (const Value
*V
= getUnderlyingObjectForInstr(MI
)) {
380 // A load from a specific PseudoSourceValue. Add precise dependencies.
381 std::map
<const Value
*, SUnit
*>::iterator I
= MemDefs
.find(V
);
382 if (I
!= MemDefs
.end())
383 I
->second
->addPred(SDep(SU
, SDep::Order
, SU
->Latency
, /*Reg=*/0,
384 /*isNormalMemory=*/true));
385 MemUses
[V
].push_back(SU
);
387 // Add a general dependence too, if needed.
388 if (Chain
&& (!ChainMMO
||
389 (ChainMMO
->isStore() || ChainMMO
->isVolatile())))
390 Chain
->addPred(SDep(SU
, SDep::Order
, SU
->Latency
));
391 } else if (MI
->hasVolatileMemoryRef()) {
392 // Treat volatile loads conservatively. Note that this includes
393 // cases where memoperand information is unavailable.
396 // A normal load. Depend on the general chain, as well as on
397 // all stores. In the absense of MachineMemOperand information,
398 // we can't even assume that the load doesn't alias well-behaved
401 Chain
->addPred(SDep(SU
, SDep::Order
, SU
->Latency
));
402 for (std::map
<const Value
*, SUnit
*>::iterator I
= MemDefs
.begin(),
403 E
= MemDefs
.end(); I
!= E
; ++I
)
404 I
->second
->addPred(SDep(SU
, SDep::Order
, SU
->Latency
));
405 PendingLoads
.push_back(SU
);
410 for (int i
= 0, e
= TRI
->getNumRegs(); i
!= e
; ++i
) {
414 PendingLoads
.clear();
417 void ScheduleDAGInstrs::FinishBlock() {
421 void ScheduleDAGInstrs::ComputeLatency(SUnit
*SU
) {
422 const InstrItineraryData
&InstrItins
= TM
.getInstrItineraryData();
424 // Compute the latency for the node.
426 InstrItins
.getStageLatency(SU
->getInstr()->getDesc().getSchedClass());
428 // Simplistic target-independent heuristic: assume that loads take
430 if (InstrItins
.isEmpty())
431 if (SU
->getInstr()->getDesc().mayLoad())
435 void ScheduleDAGInstrs::ComputeOperandLatency(SUnit
*Def
, SUnit
*Use
,
437 const InstrItineraryData
&InstrItins
= TM
.getInstrItineraryData();
438 if (InstrItins
.isEmpty())
441 // For a data dependency with a known register...
442 if ((dep
.getKind() != SDep::Data
) || (dep
.getReg() == 0))
445 const unsigned Reg
= dep
.getReg();
447 // ... find the definition of the register in the defining
449 MachineInstr
*DefMI
= Def
->getInstr();
450 int DefIdx
= DefMI
->findRegisterDefOperandIdx(Reg
);
452 int DefCycle
= InstrItins
.getOperandCycle(DefMI
->getDesc().getSchedClass(), DefIdx
);
454 MachineInstr
*UseMI
= Use
->getInstr();
455 const unsigned UseClass
= UseMI
->getDesc().getSchedClass();
457 // For all uses of the register, calculate the maxmimum latency
459 for (unsigned i
= 0, e
= UseMI
->getNumOperands(); i
!= e
; ++i
) {
460 const MachineOperand
&MO
= UseMI
->getOperand(i
);
461 if (!MO
.isReg() || !MO
.isUse())
463 unsigned MOReg
= MO
.getReg();
467 int UseCycle
= InstrItins
.getOperandCycle(UseClass
, i
);
469 Latency
= std::max(Latency
, DefCycle
- UseCycle
+ 1);
472 // If we found a latency, then replace the existing dependence latency.
474 dep
.setLatency(Latency
);
479 void ScheduleDAGInstrs::dumpNode(const SUnit
*SU
) const {
480 SU
->getInstr()->dump();
483 std::string
ScheduleDAGInstrs::getGraphNodeLabel(const SUnit
*SU
) const {
485 raw_string_ostream
oss(s
);
488 else if (SU
== &ExitSU
)
491 SU
->getInstr()->print(oss
);
495 // EmitSchedule - Emit the machine code in scheduled order.
496 MachineBasicBlock
*ScheduleDAGInstrs::EmitSchedule() {
497 // For MachineInstr-based scheduling, we're rescheduling the instructions in
498 // the block, so start by removing them from the block.
499 while (Begin
!= InsertPos
) {
500 MachineBasicBlock::iterator I
= Begin
;
505 // Then re-insert them according to the given schedule.
506 for (unsigned i
= 0, e
= Sequence
.size(); i
!= e
; i
++) {
507 SUnit
*SU
= Sequence
[i
];
509 // Null SUnit* is a noop.
514 BB
->insert(InsertPos
, SU
->getInstr());
517 // Update the Begin iterator, as the first instruction in the block
518 // may have been scheduled later.
519 if (!Sequence
.empty())
520 Begin
= Sequence
[0]->getInstr();