1 //===---- ScheduleDAGInstrs.cpp - MachineInstr Rescheduling ---------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This implements the ScheduleDAGInstrs class, which implements re-scheduling
13 //===----------------------------------------------------------------------===//
15 #define DEBUG_TYPE "sched-instrs"
16 #include "ScheduleDAGInstrs.h"
17 #include "llvm/Operator.h"
18 #include "llvm/Analysis/AliasAnalysis.h"
19 #include "llvm/CodeGen/MachineFunctionPass.h"
20 #include "llvm/CodeGen/MachineRegisterInfo.h"
21 #include "llvm/CodeGen/PseudoSourceValue.h"
22 #include "llvm/Target/TargetMachine.h"
23 #include "llvm/Target/TargetInstrInfo.h"
24 #include "llvm/Target/TargetRegisterInfo.h"
25 #include "llvm/Target/TargetSubtarget.h"
26 #include "llvm/Support/Debug.h"
27 #include "llvm/Support/raw_ostream.h"
28 #include "llvm/ADT/SmallSet.h"
31 ScheduleDAGInstrs::ScheduleDAGInstrs(MachineFunction
&mf
,
32 const MachineLoopInfo
&mli
,
33 const MachineDominatorTree
&mdt
)
34 : ScheduleDAG(mf
), MLI(mli
), MDT(mdt
), LoopRegs(MLI
, MDT
) {}
36 /// Run - perform scheduling.
38 void ScheduleDAGInstrs::Run(MachineBasicBlock
*bb
,
39 MachineBasicBlock::iterator begin
,
40 MachineBasicBlock::iterator end
,
44 InsertPosIndex
= endcount
;
46 ScheduleDAG::Run(bb
, end
);
49 /// getUnderlyingObjectFromInt - This is the function that does the work of
50 /// looking through basic ptrtoint+arithmetic+inttoptr sequences.
51 static const Value
*getUnderlyingObjectFromInt(const Value
*V
) {
53 if (const Operator
*U
= dyn_cast
<Operator
>(V
)) {
54 // If we find a ptrtoint, we can transfer control back to the
55 // regular getUnderlyingObjectFromInt.
56 if (U
->getOpcode() == Instruction::PtrToInt
)
57 return U
->getOperand(0);
58 // If we find an add of a constant or a multiplied value, it's
59 // likely that the other operand will lead us to the base
60 // object. We don't have to worry about the case where the
61 // object address is somehow being computed by the multiply,
62 // because our callers only care when the result is an
63 // identifibale object.
64 if (U
->getOpcode() != Instruction::Add
||
65 (!isa
<ConstantInt
>(U
->getOperand(1)) &&
66 Operator::getOpcode(U
->getOperand(1)) != Instruction::Mul
))
72 assert(isa
<IntegerType
>(V
->getType()) && "Unexpected operand type!");
76 /// getUnderlyingObject - This is a wrapper around Value::getUnderlyingObject
77 /// and adds support for basic ptrtoint+arithmetic+inttoptr sequences.
78 static const Value
*getUnderlyingObject(const Value
*V
) {
79 // First just call Value::getUnderlyingObject to let it do what it does.
81 V
= V
->getUnderlyingObject();
82 // If it found an inttoptr, use special code to continue climing.
83 if (Operator::getOpcode(V
) != Instruction::IntToPtr
)
85 const Value
*O
= getUnderlyingObjectFromInt(cast
<User
>(V
)->getOperand(0));
86 // If that succeeded in finding a pointer, continue the search.
87 if (!isa
<PointerType
>(O
->getType()))
94 /// getUnderlyingObjectForInstr - If this machine instr has memory reference
95 /// information and it can be tracked to a normal reference to a known
96 /// object, return the Value for that object. Otherwise return null.
97 static const Value
*getUnderlyingObjectForInstr(const MachineInstr
*MI
) {
98 if (!MI
->hasOneMemOperand() ||
99 !MI
->memoperands_begin()->getValue() ||
100 MI
->memoperands_begin()->isVolatile())
103 const Value
*V
= MI
->memoperands_begin()->getValue();
107 V
= getUnderlyingObject(V
);
108 if (!isa
<PseudoSourceValue
>(V
) && !isIdentifiedObject(V
))
114 void ScheduleDAGInstrs::StartBlock(MachineBasicBlock
*BB
) {
115 if (MachineLoop
*ML
= MLI
.getLoopFor(BB
))
116 if (BB
== ML
->getLoopLatch()) {
117 MachineBasicBlock
*Header
= ML
->getHeader();
118 for (MachineBasicBlock::livein_iterator I
= Header
->livein_begin(),
119 E
= Header
->livein_end(); I
!= E
; ++I
)
120 LoopLiveInRegs
.insert(*I
);
121 LoopRegs
.VisitLoop(ML
);
125 void ScheduleDAGInstrs::BuildSchedGraph() {
126 // We'll be allocating one SUnit for each instruction, plus one for
127 // the region exit node.
128 SUnits
.reserve(BB
->size());
130 // We build scheduling units by walking a block's instruction list from bottom
133 // Remember where a generic side-effecting instruction is as we procede. If
134 // ChainMMO is null, this is assumed to have arbitrary side-effects. If
135 // ChainMMO is non-null, then Chain makes only a single memory reference.
137 MachineMemOperand
*ChainMMO
= 0;
139 // Memory references to specific known memory locations are tracked so that
140 // they can be given more precise dependencies.
141 std::map
<const Value
*, SUnit
*> MemDefs
;
142 std::map
<const Value
*, std::vector
<SUnit
*> > MemUses
;
144 // Check to see if the scheduler cares about latencies.
145 bool UnitLatencies
= ForceUnitLatencies();
147 // Ask the target if address-backscheduling is desirable, and if so how much.
148 const TargetSubtarget
&ST
= TM
.getSubtarget
<TargetSubtarget
>();
149 unsigned SpecialAddressLatency
= ST
.getSpecialAddressLatency();
151 // Walk the list of instructions, from bottom moving up.
152 for (MachineBasicBlock::iterator MII
= InsertPos
, MIE
= Begin
;
154 MachineInstr
*MI
= prior(MII
);
155 const TargetInstrDesc
&TID
= MI
->getDesc();
156 assert(!TID
.isTerminator() && !MI
->isLabel() &&
157 "Cannot schedule terminators or labels!");
158 // Create the SUnit for this MI.
159 SUnit
*SU
= NewSUnit(MI
);
161 // Assign the Latency field of SU using target-provided information.
167 // Add register-based dependencies (data, anti, and output).
168 for (unsigned j
= 0, n
= MI
->getNumOperands(); j
!= n
; ++j
) {
169 const MachineOperand
&MO
= MI
->getOperand(j
);
170 if (!MO
.isReg()) continue;
171 unsigned Reg
= MO
.getReg();
172 if (Reg
== 0) continue;
174 assert(TRI
->isPhysicalRegister(Reg
) && "Virtual register encountered!");
175 std::vector
<SUnit
*> &UseList
= Uses
[Reg
];
176 std::vector
<SUnit
*> &DefList
= Defs
[Reg
];
177 // Optionally add output and anti dependencies. For anti
178 // dependencies we use a latency of 0 because for a multi-issue
179 // target we want to allow the defining instruction to issue
180 // in the same cycle as the using instruction.
181 // TODO: Using a latency of 1 here for output dependencies assumes
182 // there's no cost for reusing registers.
183 SDep::Kind Kind
= MO
.isUse() ? SDep::Anti
: SDep::Output
;
184 unsigned AOLatency
= (Kind
== SDep::Anti
) ? 0 : 1;
185 for (unsigned i
= 0, e
= DefList
.size(); i
!= e
; ++i
) {
186 SUnit
*DefSU
= DefList
[i
];
188 (Kind
!= SDep::Output
|| !MO
.isDead() ||
189 !DefSU
->getInstr()->registerDefIsDead(Reg
)))
190 DefSU
->addPred(SDep(SU
, Kind
, AOLatency
, /*Reg=*/Reg
));
192 for (const unsigned *Alias
= TRI
->getAliasSet(Reg
); *Alias
; ++Alias
) {
193 std::vector
<SUnit
*> &DefList
= Defs
[*Alias
];
194 for (unsigned i
= 0, e
= DefList
.size(); i
!= e
; ++i
) {
195 SUnit
*DefSU
= DefList
[i
];
197 (Kind
!= SDep::Output
|| !MO
.isDead() ||
198 !DefSU
->getInstr()->registerDefIsDead(Reg
)))
199 DefSU
->addPred(SDep(SU
, Kind
, AOLatency
, /*Reg=*/ *Alias
));
204 // Add any data dependencies.
205 unsigned DataLatency
= SU
->Latency
;
206 for (unsigned i
= 0, e
= UseList
.size(); i
!= e
; ++i
) {
207 SUnit
*UseSU
= UseList
[i
];
209 unsigned LDataLatency
= DataLatency
;
210 // Optionally add in a special extra latency for nodes that
212 // TODO: Do this for register aliases too.
213 if (SpecialAddressLatency
!= 0 && !UnitLatencies
) {
214 MachineInstr
*UseMI
= UseSU
->getInstr();
215 const TargetInstrDesc
&UseTID
= UseMI
->getDesc();
216 int RegUseIndex
= UseMI
->findRegisterUseOperandIdx(Reg
);
217 assert(RegUseIndex
>= 0 && "UseMI doesn's use register!");
218 if ((UseTID
.mayLoad() || UseTID
.mayStore()) &&
219 (unsigned)RegUseIndex
< UseTID
.getNumOperands() &&
220 UseTID
.OpInfo
[RegUseIndex
].isLookupPtrRegClass())
221 LDataLatency
+= SpecialAddressLatency
;
223 const SDep
& dep
= SDep(SU
, SDep::Data
, LDataLatency
, Reg
);
224 ST
.adjustSchedDependency((SDep
&)dep
);
228 for (const unsigned *Alias
= TRI
->getAliasSet(Reg
); *Alias
; ++Alias
) {
229 std::vector
<SUnit
*> &UseList
= Uses
[*Alias
];
230 for (unsigned i
= 0, e
= UseList
.size(); i
!= e
; ++i
) {
231 SUnit
*UseSU
= UseList
[i
];
233 const SDep
& dep
= SDep(SU
, SDep::Data
, DataLatency
, *Alias
);
234 ST
.adjustSchedDependency((SDep
&)dep
);
240 // If a def is going to wrap back around to the top of the loop,
242 if (!UnitLatencies
&& DefList
.empty()) {
243 LoopDependencies::LoopDeps::iterator I
= LoopRegs
.Deps
.find(Reg
);
244 if (I
!= LoopRegs
.Deps
.end()) {
245 const MachineOperand
*UseMO
= I
->second
.first
;
246 unsigned Count
= I
->second
.second
;
247 const MachineInstr
*UseMI
= UseMO
->getParent();
248 unsigned UseMOIdx
= UseMO
- &UseMI
->getOperand(0);
249 const TargetInstrDesc
&UseTID
= UseMI
->getDesc();
250 // TODO: If we knew the total depth of the region here, we could
251 // handle the case where the whole loop is inside the region but
252 // is large enough that the isScheduleHigh trick isn't needed.
253 if (UseMOIdx
< UseTID
.getNumOperands()) {
254 // Currently, we only support scheduling regions consisting of
255 // single basic blocks. Check to see if the instruction is in
256 // the same region by checking to see if it has the same parent.
257 if (UseMI
->getParent() != MI
->getParent()) {
258 unsigned Latency
= SU
->Latency
;
259 if (UseTID
.OpInfo
[UseMOIdx
].isLookupPtrRegClass())
260 Latency
+= SpecialAddressLatency
;
261 // This is a wild guess as to the portion of the latency which
262 // will be overlapped by work done outside the current
263 // scheduling region.
264 Latency
-= std::min(Latency
, Count
);
265 // Add the artifical edge.
266 ExitSU
.addPred(SDep(SU
, SDep::Order
, Latency
,
267 /*Reg=*/0, /*isNormalMemory=*/false,
268 /*isMustAlias=*/false,
269 /*isArtificial=*/true));
270 } else if (SpecialAddressLatency
> 0 &&
271 UseTID
.OpInfo
[UseMOIdx
].isLookupPtrRegClass()) {
272 // The entire loop body is within the current scheduling region
273 // and the latency of this operation is assumed to be greater
274 // than the latency of the loop.
275 // TODO: Recursively mark data-edge predecessors as
276 // isScheduleHigh too.
277 SU
->isScheduleHigh
= true;
280 LoopRegs
.Deps
.erase(I
);
287 DefList
.push_back(SU
);
289 UseList
.push_back(SU
);
293 // Add chain dependencies.
294 // Note that isStoreToStackSlot and isLoadFromStackSLot are not usable
295 // after stack slots are lowered to actual addresses.
296 // TODO: Use an AliasAnalysis and do real alias-analysis queries, and
297 // produce more precise dependence information.
298 if (TID
.isCall() || TID
.hasUnmodeledSideEffects()) {
300 // This is the conservative case. Add dependencies on all memory
303 Chain
->addPred(SDep(SU
, SDep::Order
, SU
->Latency
));
305 for (unsigned k
= 0, m
= PendingLoads
.size(); k
!= m
; ++k
)
306 PendingLoads
[k
]->addPred(SDep(SU
, SDep::Order
, SU
->Latency
));
307 PendingLoads
.clear();
308 for (std::map
<const Value
*, SUnit
*>::iterator I
= MemDefs
.begin(),
309 E
= MemDefs
.end(); I
!= E
; ++I
) {
310 I
->second
->addPred(SDep(SU
, SDep::Order
, SU
->Latency
));
313 for (std::map
<const Value
*, std::vector
<SUnit
*> >::iterator I
=
314 MemUses
.begin(), E
= MemUses
.end(); I
!= E
; ++I
) {
315 for (unsigned i
= 0, e
= I
->second
.size(); i
!= e
; ++i
)
316 I
->second
[i
]->addPred(SDep(SU
, SDep::Order
, SU
->Latency
));
319 // See if it is known to just have a single memory reference.
320 MachineInstr
*ChainMI
= Chain
->getInstr();
321 const TargetInstrDesc
&ChainTID
= ChainMI
->getDesc();
322 if (!ChainTID
.isCall() &&
323 !ChainTID
.hasUnmodeledSideEffects() &&
324 ChainMI
->hasOneMemOperand() &&
325 !ChainMI
->memoperands_begin()->isVolatile() &&
326 ChainMI
->memoperands_begin()->getValue())
327 // We know that the Chain accesses one specific memory location.
328 ChainMMO
= &*ChainMI
->memoperands_begin();
330 // Unknown memory accesses. Assume the worst.
332 } else if (TID
.mayStore()) {
333 if (const Value
*V
= getUnderlyingObjectForInstr(MI
)) {
334 // A store to a specific PseudoSourceValue. Add precise dependencies.
335 // Handle the def in MemDefs, if there is one.
336 std::map
<const Value
*, SUnit
*>::iterator I
= MemDefs
.find(V
);
337 if (I
!= MemDefs
.end()) {
338 I
->second
->addPred(SDep(SU
, SDep::Order
, SU
->Latency
, /*Reg=*/0,
339 /*isNormalMemory=*/true));
344 // Handle the uses in MemUses, if there are any.
345 std::map
<const Value
*, std::vector
<SUnit
*> >::iterator J
=
347 if (J
!= MemUses
.end()) {
348 for (unsigned i
= 0, e
= J
->second
.size(); i
!= e
; ++i
)
349 J
->second
[i
]->addPred(SDep(SU
, SDep::Order
, SU
->Latency
, /*Reg=*/0,
350 /*isNormalMemory=*/true));
353 // Add dependencies from all the PendingLoads, since without
354 // memoperands we must assume they alias anything.
355 for (unsigned k
= 0, m
= PendingLoads
.size(); k
!= m
; ++k
)
356 PendingLoads
[k
]->addPred(SDep(SU
, SDep::Order
, SU
->Latency
));
357 // Add a general dependence too, if needed.
359 Chain
->addPred(SDep(SU
, SDep::Order
, SU
->Latency
));
361 // Treat all other stores conservatively.
363 } else if (TID
.mayLoad()) {
364 if (TII
->isInvariantLoad(MI
)) {
365 // Invariant load, no chain dependencies needed!
366 } else if (const Value
*V
= getUnderlyingObjectForInstr(MI
)) {
367 // A load from a specific PseudoSourceValue. Add precise dependencies.
368 std::map
<const Value
*, SUnit
*>::iterator I
= MemDefs
.find(V
);
369 if (I
!= MemDefs
.end())
370 I
->second
->addPred(SDep(SU
, SDep::Order
, SU
->Latency
, /*Reg=*/0,
371 /*isNormalMemory=*/true));
372 MemUses
[V
].push_back(SU
);
374 // Add a general dependence too, if needed.
375 if (Chain
&& (!ChainMMO
||
376 (ChainMMO
->isStore() || ChainMMO
->isVolatile())))
377 Chain
->addPred(SDep(SU
, SDep::Order
, SU
->Latency
));
378 } else if (MI
->hasVolatileMemoryRef()) {
379 // Treat volatile loads conservatively. Note that this includes
380 // cases where memoperand information is unavailable.
383 // A normal load. Depend on the general chain, as well as on
384 // all stores. In the absense of MachineMemOperand information,
385 // we can't even assume that the load doesn't alias well-behaved
388 Chain
->addPred(SDep(SU
, SDep::Order
, SU
->Latency
));
389 for (std::map
<const Value
*, SUnit
*>::iterator I
= MemDefs
.begin(),
390 E
= MemDefs
.end(); I
!= E
; ++I
)
391 I
->second
->addPred(SDep(SU
, SDep::Order
, SU
->Latency
));
392 PendingLoads
.push_back(SU
);
397 for (int i
= 0, e
= TRI
->getNumRegs(); i
!= e
; ++i
) {
401 PendingLoads
.clear();
404 void ScheduleDAGInstrs::FinishBlock() {
408 void ScheduleDAGInstrs::ComputeLatency(SUnit
*SU
) {
409 const InstrItineraryData
&InstrItins
= TM
.getInstrItineraryData();
411 // Compute the latency for the node.
413 InstrItins
.getLatency(SU
->getInstr()->getDesc().getSchedClass());
415 // Simplistic target-independent heuristic: assume that loads take
417 if (InstrItins
.isEmpty())
418 if (SU
->getInstr()->getDesc().mayLoad())
422 void ScheduleDAGInstrs::dumpNode(const SUnit
*SU
) const {
423 SU
->getInstr()->dump();
426 std::string
ScheduleDAGInstrs::getGraphNodeLabel(const SUnit
*SU
) const {
428 raw_string_ostream
oss(s
);
431 else if (SU
== &ExitSU
)
434 SU
->getInstr()->print(oss
);
438 // EmitSchedule - Emit the machine code in scheduled order.
439 MachineBasicBlock
*ScheduleDAGInstrs::EmitSchedule() {
440 // For MachineInstr-based scheduling, we're rescheduling the instructions in
441 // the block, so start by removing them from the block.
442 while (Begin
!= InsertPos
) {
443 MachineBasicBlock::iterator I
= Begin
;
448 // Then re-insert them according to the given schedule.
449 for (unsigned i
= 0, e
= Sequence
.size(); i
!= e
; i
++) {
450 SUnit
*SU
= Sequence
[i
];
452 // Null SUnit* is a noop.
457 BB
->insert(InsertPos
, SU
->getInstr());
460 // Update the Begin iterator, as the first instruction in the block
461 // may have been scheduled later.
462 if (!Sequence
.empty())
463 Begin
= Sequence
[0]->getInstr();