1 //===---- ScheduleDAGInstrs.cpp - MachineInstr Rescheduling ---------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This implements the ScheduleDAGInstrs class, which implements re-scheduling
13 //===----------------------------------------------------------------------===//
15 #define DEBUG_TYPE "sched-instrs"
16 #include "ScheduleDAGInstrs.h"
17 #include "llvm/Operator.h"
18 #include "llvm/Analysis/AliasAnalysis.h"
19 #include "llvm/CodeGen/MachineFunctionPass.h"
20 #include "llvm/CodeGen/MachineRegisterInfo.h"
21 #include "llvm/CodeGen/PseudoSourceValue.h"
22 #include "llvm/Target/TargetMachine.h"
23 #include "llvm/Target/TargetInstrInfo.h"
24 #include "llvm/Target/TargetRegisterInfo.h"
25 #include "llvm/Target/TargetSubtarget.h"
26 #include "llvm/Support/Debug.h"
27 #include "llvm/Support/raw_ostream.h"
28 #include "llvm/ADT/SmallSet.h"
31 ScheduleDAGInstrs::ScheduleDAGInstrs(MachineFunction
&mf
,
32 const MachineLoopInfo
&mli
,
33 const MachineDominatorTree
&mdt
)
34 : ScheduleDAG(mf
), MLI(mli
), MDT(mdt
), LoopRegs(MLI
, MDT
) {}
36 /// Run - perform scheduling.
38 void ScheduleDAGInstrs::Run(MachineBasicBlock
*bb
,
39 MachineBasicBlock::iterator begin
,
40 MachineBasicBlock::iterator end
,
44 InsertPosIndex
= endcount
;
46 ScheduleDAG::Run(bb
, end
);
49 /// getUnderlyingObjectFromInt - This is the function that does the work of
50 /// looking through basic ptrtoint+arithmetic+inttoptr sequences.
51 static const Value
*getUnderlyingObjectFromInt(const Value
*V
) {
53 if (const Operator
*U
= dyn_cast
<Operator
>(V
)) {
54 // If we find a ptrtoint, we can transfer control back to the
55 // regular getUnderlyingObjectFromInt.
56 if (U
->getOpcode() == Instruction::PtrToInt
)
57 return U
->getOperand(0);
58 // If we find an add of a constant or a multiplied value, it's
59 // likely that the other operand will lead us to the base
60 // object. We don't have to worry about the case where the
61 // object address is somehow being computed bt the multiply,
62 // because our callers only care when the result is an
63 // identifibale object.
64 if (U
->getOpcode() != Instruction::Add
||
65 (!isa
<ConstantInt
>(U
->getOperand(1)) &&
66 Operator::getOpcode(U
->getOperand(1)) != Instruction::Mul
))
72 assert(isa
<IntegerType
>(V
->getType()) && "Unexpected operand type!");
76 /// getUnderlyingObject - This is a wrapper around Value::getUnderlyingObject
77 /// and adds support for basic ptrtoint+arithmetic+inttoptr sequences.
78 static const Value
*getUnderlyingObject(const Value
*V
) {
79 // First just call Value::getUnderlyingObject to let it do what it does.
81 V
= V
->getUnderlyingObject();
82 // If it found an inttoptr, use special code to continue climing.
83 if (Operator::getOpcode(V
) != Instruction::IntToPtr
)
85 const Value
*O
= getUnderlyingObjectFromInt(cast
<User
>(V
)->getOperand(0));
86 // If that succeeded in finding a pointer, continue the search.
87 if (!isa
<PointerType
>(O
->getType()))
94 /// getUnderlyingObjectForInstr - If this machine instr has memory reference
95 /// information and it can be tracked to a normal reference to a known
96 /// object, return the Value for that object. Otherwise return null.
97 static const Value
*getUnderlyingObjectForInstr(const MachineInstr
*MI
) {
98 if (!MI
->hasOneMemOperand() ||
99 !MI
->memoperands_begin()->getValue() ||
100 MI
->memoperands_begin()->isVolatile())
103 const Value
*V
= MI
->memoperands_begin()->getValue();
107 V
= getUnderlyingObject(V
);
108 if (!isa
<PseudoSourceValue
>(V
) && !isIdentifiedObject(V
))
114 void ScheduleDAGInstrs::StartBlock(MachineBasicBlock
*BB
) {
115 if (MachineLoop
*ML
= MLI
.getLoopFor(BB
))
116 if (BB
== ML
->getLoopLatch()) {
117 MachineBasicBlock
*Header
= ML
->getHeader();
118 for (MachineBasicBlock::livein_iterator I
= Header
->livein_begin(),
119 E
= Header
->livein_end(); I
!= E
; ++I
)
120 LoopLiveInRegs
.insert(*I
);
121 LoopRegs
.VisitLoop(ML
);
125 void ScheduleDAGInstrs::BuildSchedGraph() {
126 // We'll be allocating one SUnit for each instruction, plus one for
127 // the region exit node.
128 SUnits
.reserve(BB
->size());
130 // We build scheduling units by walking a block's instruction list from bottom
133 // Remember where a generic side-effecting instruction is as we procede. If
134 // ChainMMO is null, this is assumed to have arbitrary side-effects. If
135 // ChainMMO is non-null, then Chain makes only a single memory reference.
137 MachineMemOperand
*ChainMMO
= 0;
139 // Memory references to specific known memory locations are tracked so that
140 // they can be given more precise dependencies.
141 std::map
<const Value
*, SUnit
*> MemDefs
;
142 std::map
<const Value
*, std::vector
<SUnit
*> > MemUses
;
144 // Check to see if the scheduler cares about latencies.
145 bool UnitLatencies
= ForceUnitLatencies();
147 // Ask the target if address-backscheduling is desirable, and if so how much.
148 unsigned SpecialAddressLatency
=
149 TM
.getSubtarget
<TargetSubtarget
>().getSpecialAddressLatency();
151 // Walk the list of instructions, from bottom moving up.
152 for (MachineBasicBlock::iterator MII
= InsertPos
, MIE
= Begin
;
154 MachineInstr
*MI
= prior(MII
);
155 const TargetInstrDesc
&TID
= MI
->getDesc();
156 assert(!TID
.isTerminator() && !MI
->isLabel() &&
157 "Cannot schedule terminators or labels!");
158 // Create the SUnit for this MI.
159 SUnit
*SU
= NewSUnit(MI
);
161 // Assign the Latency field of SU using target-provided information.
167 // Add register-based dependencies (data, anti, and output).
168 for (unsigned j
= 0, n
= MI
->getNumOperands(); j
!= n
; ++j
) {
169 const MachineOperand
&MO
= MI
->getOperand(j
);
170 if (!MO
.isReg()) continue;
171 unsigned Reg
= MO
.getReg();
172 if (Reg
== 0) continue;
174 assert(TRI
->isPhysicalRegister(Reg
) && "Virtual register encountered!");
175 std::vector
<SUnit
*> &UseList
= Uses
[Reg
];
176 std::vector
<SUnit
*> &DefList
= Defs
[Reg
];
177 // Optionally add output and anti dependencies.
178 // TODO: Using a latency of 1 here assumes there's no cost for
179 // reusing registers.
180 SDep::Kind Kind
= MO
.isUse() ? SDep::Anti
: SDep::Output
;
181 for (unsigned i
= 0, e
= DefList
.size(); i
!= e
; ++i
) {
182 SUnit
*DefSU
= DefList
[i
];
184 (Kind
!= SDep::Output
|| !MO
.isDead() ||
185 !DefSU
->getInstr()->registerDefIsDead(Reg
)))
186 DefSU
->addPred(SDep(SU
, Kind
, /*Latency=*/1, /*Reg=*/Reg
));
188 for (const unsigned *Alias
= TRI
->getAliasSet(Reg
); *Alias
; ++Alias
) {
189 std::vector
<SUnit
*> &DefList
= Defs
[*Alias
];
190 for (unsigned i
= 0, e
= DefList
.size(); i
!= e
; ++i
) {
191 SUnit
*DefSU
= DefList
[i
];
193 (Kind
!= SDep::Output
|| !MO
.isDead() ||
194 !DefSU
->getInstr()->registerDefIsDead(Reg
)))
195 DefSU
->addPred(SDep(SU
, Kind
, /*Latency=*/1, /*Reg=*/ *Alias
));
200 // Add any data dependencies.
201 unsigned DataLatency
= SU
->Latency
;
202 for (unsigned i
= 0, e
= UseList
.size(); i
!= e
; ++i
) {
203 SUnit
*UseSU
= UseList
[i
];
205 unsigned LDataLatency
= DataLatency
;
206 // Optionally add in a special extra latency for nodes that
208 // TODO: Do this for register aliases too.
209 if (SpecialAddressLatency
!= 0 && !UnitLatencies
) {
210 MachineInstr
*UseMI
= UseSU
->getInstr();
211 const TargetInstrDesc
&UseTID
= UseMI
->getDesc();
212 int RegUseIndex
= UseMI
->findRegisterUseOperandIdx(Reg
);
213 assert(RegUseIndex
>= 0 && "UseMI doesn's use register!");
214 if ((UseTID
.mayLoad() || UseTID
.mayStore()) &&
215 (unsigned)RegUseIndex
< UseTID
.getNumOperands() &&
216 UseTID
.OpInfo
[RegUseIndex
].isLookupPtrRegClass())
217 LDataLatency
+= SpecialAddressLatency
;
219 UseSU
->addPred(SDep(SU
, SDep::Data
, LDataLatency
, Reg
));
222 for (const unsigned *Alias
= TRI
->getAliasSet(Reg
); *Alias
; ++Alias
) {
223 std::vector
<SUnit
*> &UseList
= Uses
[*Alias
];
224 for (unsigned i
= 0, e
= UseList
.size(); i
!= e
; ++i
) {
225 SUnit
*UseSU
= UseList
[i
];
227 UseSU
->addPred(SDep(SU
, SDep::Data
, DataLatency
, *Alias
));
231 // If a def is going to wrap back around to the top of the loop,
233 if (!UnitLatencies
&& DefList
.empty()) {
234 LoopDependencies::LoopDeps::iterator I
= LoopRegs
.Deps
.find(Reg
);
235 if (I
!= LoopRegs
.Deps
.end()) {
236 const MachineOperand
*UseMO
= I
->second
.first
;
237 unsigned Count
= I
->second
.second
;
238 const MachineInstr
*UseMI
= UseMO
->getParent();
239 unsigned UseMOIdx
= UseMO
- &UseMI
->getOperand(0);
240 const TargetInstrDesc
&UseTID
= UseMI
->getDesc();
241 // TODO: If we knew the total depth of the region here, we could
242 // handle the case where the whole loop is inside the region but
243 // is large enough that the isScheduleHigh trick isn't needed.
244 if (UseMOIdx
< UseTID
.getNumOperands()) {
245 // Currently, we only support scheduling regions consisting of
246 // single basic blocks. Check to see if the instruction is in
247 // the same region by checking to see if it has the same parent.
248 if (UseMI
->getParent() != MI
->getParent()) {
249 unsigned Latency
= SU
->Latency
;
250 if (UseTID
.OpInfo
[UseMOIdx
].isLookupPtrRegClass())
251 Latency
+= SpecialAddressLatency
;
252 // This is a wild guess as to the portion of the latency which
253 // will be overlapped by work done outside the current
254 // scheduling region.
255 Latency
-= std::min(Latency
, Count
);
256 // Add the artifical edge.
257 ExitSU
.addPred(SDep(SU
, SDep::Order
, Latency
,
258 /*Reg=*/0, /*isNormalMemory=*/false,
259 /*isMustAlias=*/false,
260 /*isArtificial=*/true));
261 } else if (SpecialAddressLatency
> 0 &&
262 UseTID
.OpInfo
[UseMOIdx
].isLookupPtrRegClass()) {
263 // The entire loop body is within the current scheduling region
264 // and the latency of this operation is assumed to be greater
265 // than the latency of the loop.
266 // TODO: Recursively mark data-edge predecessors as
267 // isScheduleHigh too.
268 SU
->isScheduleHigh
= true;
271 LoopRegs
.Deps
.erase(I
);
278 DefList
.push_back(SU
);
280 UseList
.push_back(SU
);
284 // Add chain dependencies.
285 // Note that isStoreToStackSlot and isLoadFromStackSLot are not usable
286 // after stack slots are lowered to actual addresses.
287 // TODO: Use an AliasAnalysis and do real alias-analysis queries, and
288 // produce more precise dependence information.
289 if (TID
.isCall() || TID
.hasUnmodeledSideEffects()) {
291 // This is the conservative case. Add dependencies on all memory
294 Chain
->addPred(SDep(SU
, SDep::Order
, SU
->Latency
));
296 for (unsigned k
= 0, m
= PendingLoads
.size(); k
!= m
; ++k
)
297 PendingLoads
[k
]->addPred(SDep(SU
, SDep::Order
, SU
->Latency
));
298 PendingLoads
.clear();
299 for (std::map
<const Value
*, SUnit
*>::iterator I
= MemDefs
.begin(),
300 E
= MemDefs
.end(); I
!= E
; ++I
) {
301 I
->second
->addPred(SDep(SU
, SDep::Order
, SU
->Latency
));
304 for (std::map
<const Value
*, std::vector
<SUnit
*> >::iterator I
=
305 MemUses
.begin(), E
= MemUses
.end(); I
!= E
; ++I
) {
306 for (unsigned i
= 0, e
= I
->second
.size(); i
!= e
; ++i
)
307 I
->second
[i
]->addPred(SDep(SU
, SDep::Order
, SU
->Latency
));
310 // See if it is known to just have a single memory reference.
311 MachineInstr
*ChainMI
= Chain
->getInstr();
312 const TargetInstrDesc
&ChainTID
= ChainMI
->getDesc();
313 if (!ChainTID
.isCall() &&
314 !ChainTID
.hasUnmodeledSideEffects() &&
315 ChainMI
->hasOneMemOperand() &&
316 !ChainMI
->memoperands_begin()->isVolatile() &&
317 ChainMI
->memoperands_begin()->getValue())
318 // We know that the Chain accesses one specific memory location.
319 ChainMMO
= &*ChainMI
->memoperands_begin();
321 // Unknown memory accesses. Assume the worst.
323 } else if (TID
.mayStore()) {
324 if (const Value
*V
= getUnderlyingObjectForInstr(MI
)) {
325 // A store to a specific PseudoSourceValue. Add precise dependencies.
326 // Handle the def in MemDefs, if there is one.
327 std::map
<const Value
*, SUnit
*>::iterator I
= MemDefs
.find(V
);
328 if (I
!= MemDefs
.end()) {
329 I
->second
->addPred(SDep(SU
, SDep::Order
, SU
->Latency
, /*Reg=*/0,
330 /*isNormalMemory=*/true));
335 // Handle the uses in MemUses, if there are any.
336 std::map
<const Value
*, std::vector
<SUnit
*> >::iterator J
=
338 if (J
!= MemUses
.end()) {
339 for (unsigned i
= 0, e
= J
->second
.size(); i
!= e
; ++i
)
340 J
->second
[i
]->addPred(SDep(SU
, SDep::Order
, SU
->Latency
, /*Reg=*/0,
341 /*isNormalMemory=*/true));
344 // Add dependencies from all the PendingLoads, since without
345 // memoperands we must assume they alias anything.
346 for (unsigned k
= 0, m
= PendingLoads
.size(); k
!= m
; ++k
)
347 PendingLoads
[k
]->addPred(SDep(SU
, SDep::Order
, SU
->Latency
));
348 // Add a general dependence too, if needed.
350 Chain
->addPred(SDep(SU
, SDep::Order
, SU
->Latency
));
352 // Treat all other stores conservatively.
354 } else if (TID
.mayLoad()) {
355 if (TII
->isInvariantLoad(MI
)) {
356 // Invariant load, no chain dependencies needed!
357 } else if (const Value
*V
= getUnderlyingObjectForInstr(MI
)) {
358 // A load from a specific PseudoSourceValue. Add precise dependencies.
359 std::map
<const Value
*, SUnit
*>::iterator I
= MemDefs
.find(V
);
360 if (I
!= MemDefs
.end())
361 I
->second
->addPred(SDep(SU
, SDep::Order
, SU
->Latency
, /*Reg=*/0,
362 /*isNormalMemory=*/true));
363 MemUses
[V
].push_back(SU
);
365 // Add a general dependence too, if needed.
366 if (Chain
&& (!ChainMMO
||
367 (ChainMMO
->isStore() || ChainMMO
->isVolatile())))
368 Chain
->addPred(SDep(SU
, SDep::Order
, SU
->Latency
));
369 } else if (MI
->hasVolatileMemoryRef()) {
370 // Treat volatile loads conservatively. Note that this includes
371 // cases where memoperand information is unavailable.
374 // A normal load. Depend on the general chain, as well as on
375 // all stores. In the absense of MachineMemOperand information,
376 // we can't even assume that the load doesn't alias well-behaved
379 Chain
->addPred(SDep(SU
, SDep::Order
, SU
->Latency
));
380 for (std::map
<const Value
*, SUnit
*>::iterator I
= MemDefs
.begin(),
381 E
= MemDefs
.end(); I
!= E
; ++I
)
382 I
->second
->addPred(SDep(SU
, SDep::Order
, SU
->Latency
));
383 PendingLoads
.push_back(SU
);
388 for (int i
= 0, e
= TRI
->getNumRegs(); i
!= e
; ++i
) {
392 PendingLoads
.clear();
395 void ScheduleDAGInstrs::FinishBlock() {
399 void ScheduleDAGInstrs::ComputeLatency(SUnit
*SU
) {
400 const InstrItineraryData
&InstrItins
= TM
.getInstrItineraryData();
402 // Compute the latency for the node. We use the sum of the latencies for
403 // all nodes flagged together into this SUnit.
405 InstrItins
.getLatency(SU
->getInstr()->getDesc().getSchedClass());
407 // Simplistic target-independent heuristic: assume that loads take
409 if (InstrItins
.isEmpty())
410 if (SU
->getInstr()->getDesc().mayLoad())
414 void ScheduleDAGInstrs::dumpNode(const SUnit
*SU
) const {
415 SU
->getInstr()->dump();
418 std::string
ScheduleDAGInstrs::getGraphNodeLabel(const SUnit
*SU
) const {
420 raw_string_ostream
oss(s
);
423 else if (SU
== &ExitSU
)
426 SU
->getInstr()->print(oss
);
430 // EmitSchedule - Emit the machine code in scheduled order.
431 MachineBasicBlock
*ScheduleDAGInstrs::EmitSchedule() {
432 // For MachineInstr-based scheduling, we're rescheduling the instructions in
433 // the block, so start by removing them from the block.
434 while (Begin
!= InsertPos
) {
435 MachineBasicBlock::iterator I
= Begin
;
440 // Then re-insert them according to the given schedule.
441 for (unsigned i
= 0, e
= Sequence
.size(); i
!= e
; i
++) {
442 SUnit
*SU
= Sequence
[i
];
444 // Null SUnit* is a noop.
449 BB
->insert(InsertPos
, SU
->getInstr());
452 // Update the Begin iterator, as the first instruction in the block
453 // may have been scheduled later.
454 if (!Sequence
.empty())
455 Begin
= Sequence
[0]->getInstr();