Fix comment for consistency sake.
[llvm/avr.git] / lib / CodeGen / ScheduleDAGInstrs.cpp
blob1aceda5ea3a9f721fb5eab6f516fb18a7fceee9f
1 //===---- ScheduleDAGInstrs.cpp - MachineInstr Rescheduling ---------------===//
2 //
3 // The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 // This implements the ScheduleDAGInstrs class, which implements re-scheduling
11 // of MachineInstrs.
13 //===----------------------------------------------------------------------===//
15 #define DEBUG_TYPE "sched-instrs"
16 #include "ScheduleDAGInstrs.h"
17 #include "llvm/Operator.h"
18 #include "llvm/Analysis/AliasAnalysis.h"
19 #include "llvm/CodeGen/MachineFunctionPass.h"
20 #include "llvm/CodeGen/MachineRegisterInfo.h"
21 #include "llvm/CodeGen/PseudoSourceValue.h"
22 #include "llvm/Target/TargetMachine.h"
23 #include "llvm/Target/TargetInstrInfo.h"
24 #include "llvm/Target/TargetRegisterInfo.h"
25 #include "llvm/Target/TargetSubtarget.h"
26 #include "llvm/Support/Debug.h"
27 #include "llvm/Support/raw_ostream.h"
28 #include "llvm/ADT/SmallSet.h"
29 using namespace llvm;
31 ScheduleDAGInstrs::ScheduleDAGInstrs(MachineFunction &mf,
32 const MachineLoopInfo &mli,
33 const MachineDominatorTree &mdt)
34 : ScheduleDAG(mf), MLI(mli), MDT(mdt), LoopRegs(MLI, MDT) {}
36 /// Run - perform scheduling.
37 ///
38 void ScheduleDAGInstrs::Run(MachineBasicBlock *bb,
39 MachineBasicBlock::iterator begin,
40 MachineBasicBlock::iterator end,
41 unsigned endcount) {
42 BB = bb;
43 Begin = begin;
44 InsertPosIndex = endcount;
46 ScheduleDAG::Run(bb, end);
49 /// getUnderlyingObjectFromInt - This is the function that does the work of
50 /// looking through basic ptrtoint+arithmetic+inttoptr sequences.
51 static const Value *getUnderlyingObjectFromInt(const Value *V) {
52 do {
53 if (const Operator *U = dyn_cast<Operator>(V)) {
54 // If we find a ptrtoint, we can transfer control back to the
55 // regular getUnderlyingObjectFromInt.
56 if (U->getOpcode() == Instruction::PtrToInt)
57 return U->getOperand(0);
58 // If we find an add of a constant or a multiplied value, it's
59 // likely that the other operand will lead us to the base
60 // object. We don't have to worry about the case where the
61 // object address is somehow being computed by the multiply,
62 // because our callers only care when the result is an
63 // identifibale object.
64 if (U->getOpcode() != Instruction::Add ||
65 (!isa<ConstantInt>(U->getOperand(1)) &&
66 Operator::getOpcode(U->getOperand(1)) != Instruction::Mul))
67 return V;
68 V = U->getOperand(0);
69 } else {
70 return V;
72 assert(isa<IntegerType>(V->getType()) && "Unexpected operand type!");
73 } while (1);
76 /// getUnderlyingObject - This is a wrapper around Value::getUnderlyingObject
77 /// and adds support for basic ptrtoint+arithmetic+inttoptr sequences.
78 static const Value *getUnderlyingObject(const Value *V) {
79 // First just call Value::getUnderlyingObject to let it do what it does.
80 do {
81 V = V->getUnderlyingObject();
82 // If it found an inttoptr, use special code to continue climing.
83 if (Operator::getOpcode(V) != Instruction::IntToPtr)
84 break;
85 const Value *O = getUnderlyingObjectFromInt(cast<User>(V)->getOperand(0));
86 // If that succeeded in finding a pointer, continue the search.
87 if (!isa<PointerType>(O->getType()))
88 break;
89 V = O;
90 } while (1);
91 return V;
94 /// getUnderlyingObjectForInstr - If this machine instr has memory reference
95 /// information and it can be tracked to a normal reference to a known
96 /// object, return the Value for that object. Otherwise return null.
97 static const Value *getUnderlyingObjectForInstr(const MachineInstr *MI) {
98 if (!MI->hasOneMemOperand() ||
99 !MI->memoperands_begin()->getValue() ||
100 MI->memoperands_begin()->isVolatile())
101 return 0;
103 const Value *V = MI->memoperands_begin()->getValue();
104 if (!V)
105 return 0;
107 V = getUnderlyingObject(V);
108 if (!isa<PseudoSourceValue>(V) && !isIdentifiedObject(V))
109 return 0;
111 return V;
114 void ScheduleDAGInstrs::StartBlock(MachineBasicBlock *BB) {
115 if (MachineLoop *ML = MLI.getLoopFor(BB))
116 if (BB == ML->getLoopLatch()) {
117 MachineBasicBlock *Header = ML->getHeader();
118 for (MachineBasicBlock::livein_iterator I = Header->livein_begin(),
119 E = Header->livein_end(); I != E; ++I)
120 LoopLiveInRegs.insert(*I);
121 LoopRegs.VisitLoop(ML);
125 void ScheduleDAGInstrs::BuildSchedGraph() {
126 // We'll be allocating one SUnit for each instruction, plus one for
127 // the region exit node.
128 SUnits.reserve(BB->size());
130 // We build scheduling units by walking a block's instruction list from bottom
131 // to top.
133 // Remember where a generic side-effecting instruction is as we procede. If
134 // ChainMMO is null, this is assumed to have arbitrary side-effects. If
135 // ChainMMO is non-null, then Chain makes only a single memory reference.
136 SUnit *Chain = 0;
137 MachineMemOperand *ChainMMO = 0;
139 // Memory references to specific known memory locations are tracked so that
140 // they can be given more precise dependencies.
141 std::map<const Value *, SUnit *> MemDefs;
142 std::map<const Value *, std::vector<SUnit *> > MemUses;
144 // Check to see if the scheduler cares about latencies.
145 bool UnitLatencies = ForceUnitLatencies();
147 // Ask the target if address-backscheduling is desirable, and if so how much.
148 const TargetSubtarget &ST = TM.getSubtarget<TargetSubtarget>();
149 unsigned SpecialAddressLatency = ST.getSpecialAddressLatency();
151 // Walk the list of instructions, from bottom moving up.
152 for (MachineBasicBlock::iterator MII = InsertPos, MIE = Begin;
153 MII != MIE; --MII) {
154 MachineInstr *MI = prior(MII);
155 const TargetInstrDesc &TID = MI->getDesc();
156 assert(!TID.isTerminator() && !MI->isLabel() &&
157 "Cannot schedule terminators or labels!");
158 // Create the SUnit for this MI.
159 SUnit *SU = NewSUnit(MI);
161 // Assign the Latency field of SU using target-provided information.
162 if (UnitLatencies)
163 SU->Latency = 1;
164 else
165 ComputeLatency(SU);
167 // Add register-based dependencies (data, anti, and output).
168 for (unsigned j = 0, n = MI->getNumOperands(); j != n; ++j) {
169 const MachineOperand &MO = MI->getOperand(j);
170 if (!MO.isReg()) continue;
171 unsigned Reg = MO.getReg();
172 if (Reg == 0) continue;
174 assert(TRI->isPhysicalRegister(Reg) && "Virtual register encountered!");
175 std::vector<SUnit *> &UseList = Uses[Reg];
176 std::vector<SUnit *> &DefList = Defs[Reg];
177 // Optionally add output and anti dependencies. For anti
178 // dependencies we use a latency of 0 because for a multi-issue
179 // target we want to allow the defining instruction to issue
180 // in the same cycle as the using instruction.
181 // TODO: Using a latency of 1 here for output dependencies assumes
182 // there's no cost for reusing registers.
183 SDep::Kind Kind = MO.isUse() ? SDep::Anti : SDep::Output;
184 unsigned AOLatency = (Kind == SDep::Anti) ? 0 : 1;
185 for (unsigned i = 0, e = DefList.size(); i != e; ++i) {
186 SUnit *DefSU = DefList[i];
187 if (DefSU != SU &&
188 (Kind != SDep::Output || !MO.isDead() ||
189 !DefSU->getInstr()->registerDefIsDead(Reg)))
190 DefSU->addPred(SDep(SU, Kind, AOLatency, /*Reg=*/Reg));
192 for (const unsigned *Alias = TRI->getAliasSet(Reg); *Alias; ++Alias) {
193 std::vector<SUnit *> &DefList = Defs[*Alias];
194 for (unsigned i = 0, e = DefList.size(); i != e; ++i) {
195 SUnit *DefSU = DefList[i];
196 if (DefSU != SU &&
197 (Kind != SDep::Output || !MO.isDead() ||
198 !DefSU->getInstr()->registerDefIsDead(Reg)))
199 DefSU->addPred(SDep(SU, Kind, AOLatency, /*Reg=*/ *Alias));
203 if (MO.isDef()) {
204 // Add any data dependencies.
205 unsigned DataLatency = SU->Latency;
206 for (unsigned i = 0, e = UseList.size(); i != e; ++i) {
207 SUnit *UseSU = UseList[i];
208 if (UseSU != SU) {
209 unsigned LDataLatency = DataLatency;
210 // Optionally add in a special extra latency for nodes that
211 // feed addresses.
212 // TODO: Do this for register aliases too.
213 // TODO: Perhaps we should get rid of
214 // SpecialAddressLatency and just move this into
215 // adjustSchedDependency for the targets that care about
216 // it.
217 if (SpecialAddressLatency != 0 && !UnitLatencies) {
218 MachineInstr *UseMI = UseSU->getInstr();
219 const TargetInstrDesc &UseTID = UseMI->getDesc();
220 int RegUseIndex = UseMI->findRegisterUseOperandIdx(Reg);
221 assert(RegUseIndex >= 0 && "UseMI doesn's use register!");
222 if ((UseTID.mayLoad() || UseTID.mayStore()) &&
223 (unsigned)RegUseIndex < UseTID.getNumOperands() &&
224 UseTID.OpInfo[RegUseIndex].isLookupPtrRegClass())
225 LDataLatency += SpecialAddressLatency;
227 // Adjust the dependence latency using operand def/use
228 // information (if any), and then allow the target to
229 // perform its own adjustments.
230 const SDep& dep = SDep(SU, SDep::Data, LDataLatency, Reg);
231 if (!UnitLatencies) {
232 ComputeOperandLatency(SU, UseSU, (SDep &)dep);
233 ST.adjustSchedDependency(SU, UseSU, (SDep &)dep);
235 UseSU->addPred(dep);
238 for (const unsigned *Alias = TRI->getAliasSet(Reg); *Alias; ++Alias) {
239 std::vector<SUnit *> &UseList = Uses[*Alias];
240 for (unsigned i = 0, e = UseList.size(); i != e; ++i) {
241 SUnit *UseSU = UseList[i];
242 if (UseSU != SU) {
243 const SDep& dep = SDep(SU, SDep::Data, DataLatency, *Alias);
244 if (!UnitLatencies) {
245 ComputeOperandLatency(SU, UseSU, (SDep &)dep);
246 ST.adjustSchedDependency(SU, UseSU, (SDep &)dep);
248 UseSU->addPred(dep);
253 // If a def is going to wrap back around to the top of the loop,
254 // backschedule it.
255 if (!UnitLatencies && DefList.empty()) {
256 LoopDependencies::LoopDeps::iterator I = LoopRegs.Deps.find(Reg);
257 if (I != LoopRegs.Deps.end()) {
258 const MachineOperand *UseMO = I->second.first;
259 unsigned Count = I->second.second;
260 const MachineInstr *UseMI = UseMO->getParent();
261 unsigned UseMOIdx = UseMO - &UseMI->getOperand(0);
262 const TargetInstrDesc &UseTID = UseMI->getDesc();
263 // TODO: If we knew the total depth of the region here, we could
264 // handle the case where the whole loop is inside the region but
265 // is large enough that the isScheduleHigh trick isn't needed.
266 if (UseMOIdx < UseTID.getNumOperands()) {
267 // Currently, we only support scheduling regions consisting of
268 // single basic blocks. Check to see if the instruction is in
269 // the same region by checking to see if it has the same parent.
270 if (UseMI->getParent() != MI->getParent()) {
271 unsigned Latency = SU->Latency;
272 if (UseTID.OpInfo[UseMOIdx].isLookupPtrRegClass())
273 Latency += SpecialAddressLatency;
274 // This is a wild guess as to the portion of the latency which
275 // will be overlapped by work done outside the current
276 // scheduling region.
277 Latency -= std::min(Latency, Count);
278 // Add the artifical edge.
279 ExitSU.addPred(SDep(SU, SDep::Order, Latency,
280 /*Reg=*/0, /*isNormalMemory=*/false,
281 /*isMustAlias=*/false,
282 /*isArtificial=*/true));
283 } else if (SpecialAddressLatency > 0 &&
284 UseTID.OpInfo[UseMOIdx].isLookupPtrRegClass()) {
285 // The entire loop body is within the current scheduling region
286 // and the latency of this operation is assumed to be greater
287 // than the latency of the loop.
288 // TODO: Recursively mark data-edge predecessors as
289 // isScheduleHigh too.
290 SU->isScheduleHigh = true;
293 LoopRegs.Deps.erase(I);
297 UseList.clear();
298 if (!MO.isDead())
299 DefList.clear();
300 DefList.push_back(SU);
301 } else {
302 UseList.push_back(SU);
306 // Add chain dependencies.
307 // Note that isStoreToStackSlot and isLoadFromStackSLot are not usable
308 // after stack slots are lowered to actual addresses.
309 // TODO: Use an AliasAnalysis and do real alias-analysis queries, and
310 // produce more precise dependence information.
311 if (TID.isCall() || TID.hasUnmodeledSideEffects()) {
312 new_chain:
313 // This is the conservative case. Add dependencies on all memory
314 // references.
315 if (Chain)
316 Chain->addPred(SDep(SU, SDep::Order, SU->Latency));
317 Chain = SU;
318 for (unsigned k = 0, m = PendingLoads.size(); k != m; ++k)
319 PendingLoads[k]->addPred(SDep(SU, SDep::Order, SU->Latency));
320 PendingLoads.clear();
321 for (std::map<const Value *, SUnit *>::iterator I = MemDefs.begin(),
322 E = MemDefs.end(); I != E; ++I) {
323 I->second->addPred(SDep(SU, SDep::Order, SU->Latency));
324 I->second = SU;
326 for (std::map<const Value *, std::vector<SUnit *> >::iterator I =
327 MemUses.begin(), E = MemUses.end(); I != E; ++I) {
328 for (unsigned i = 0, e = I->second.size(); i != e; ++i)
329 I->second[i]->addPred(SDep(SU, SDep::Order, SU->Latency));
330 I->second.clear();
332 // See if it is known to just have a single memory reference.
333 MachineInstr *ChainMI = Chain->getInstr();
334 const TargetInstrDesc &ChainTID = ChainMI->getDesc();
335 if (!ChainTID.isCall() &&
336 !ChainTID.hasUnmodeledSideEffects() &&
337 ChainMI->hasOneMemOperand() &&
338 !ChainMI->memoperands_begin()->isVolatile() &&
339 ChainMI->memoperands_begin()->getValue())
340 // We know that the Chain accesses one specific memory location.
341 ChainMMO = &*ChainMI->memoperands_begin();
342 else
343 // Unknown memory accesses. Assume the worst.
344 ChainMMO = 0;
345 } else if (TID.mayStore()) {
346 if (const Value *V = getUnderlyingObjectForInstr(MI)) {
347 // A store to a specific PseudoSourceValue. Add precise dependencies.
348 // Handle the def in MemDefs, if there is one.
349 std::map<const Value *, SUnit *>::iterator I = MemDefs.find(V);
350 if (I != MemDefs.end()) {
351 I->second->addPred(SDep(SU, SDep::Order, SU->Latency, /*Reg=*/0,
352 /*isNormalMemory=*/true));
353 I->second = SU;
354 } else {
355 MemDefs[V] = SU;
357 // Handle the uses in MemUses, if there are any.
358 std::map<const Value *, std::vector<SUnit *> >::iterator J =
359 MemUses.find(V);
360 if (J != MemUses.end()) {
361 for (unsigned i = 0, e = J->second.size(); i != e; ++i)
362 J->second[i]->addPred(SDep(SU, SDep::Order, SU->Latency, /*Reg=*/0,
363 /*isNormalMemory=*/true));
364 J->second.clear();
366 // Add dependencies from all the PendingLoads, since without
367 // memoperands we must assume they alias anything.
368 for (unsigned k = 0, m = PendingLoads.size(); k != m; ++k)
369 PendingLoads[k]->addPred(SDep(SU, SDep::Order, SU->Latency));
370 // Add a general dependence too, if needed.
371 if (Chain)
372 Chain->addPred(SDep(SU, SDep::Order, SU->Latency));
373 } else
374 // Treat all other stores conservatively.
375 goto new_chain;
376 } else if (TID.mayLoad()) {
377 if (TII->isInvariantLoad(MI)) {
378 // Invariant load, no chain dependencies needed!
379 } else if (const Value *V = getUnderlyingObjectForInstr(MI)) {
380 // A load from a specific PseudoSourceValue. Add precise dependencies.
381 std::map<const Value *, SUnit *>::iterator I = MemDefs.find(V);
382 if (I != MemDefs.end())
383 I->second->addPred(SDep(SU, SDep::Order, SU->Latency, /*Reg=*/0,
384 /*isNormalMemory=*/true));
385 MemUses[V].push_back(SU);
387 // Add a general dependence too, if needed.
388 if (Chain && (!ChainMMO ||
389 (ChainMMO->isStore() || ChainMMO->isVolatile())))
390 Chain->addPred(SDep(SU, SDep::Order, SU->Latency));
391 } else if (MI->hasVolatileMemoryRef()) {
392 // Treat volatile loads conservatively. Note that this includes
393 // cases where memoperand information is unavailable.
394 goto new_chain;
395 } else {
396 // A normal load. Depend on the general chain, as well as on
397 // all stores. In the absense of MachineMemOperand information,
398 // we can't even assume that the load doesn't alias well-behaved
399 // memory locations.
400 if (Chain)
401 Chain->addPred(SDep(SU, SDep::Order, SU->Latency));
402 for (std::map<const Value *, SUnit *>::iterator I = MemDefs.begin(),
403 E = MemDefs.end(); I != E; ++I)
404 I->second->addPred(SDep(SU, SDep::Order, SU->Latency));
405 PendingLoads.push_back(SU);
410 for (int i = 0, e = TRI->getNumRegs(); i != e; ++i) {
411 Defs[i].clear();
412 Uses[i].clear();
414 PendingLoads.clear();
417 void ScheduleDAGInstrs::FinishBlock() {
418 // Nothing to do.
421 void ScheduleDAGInstrs::ComputeLatency(SUnit *SU) {
422 const InstrItineraryData &InstrItins = TM.getInstrItineraryData();
424 // Compute the latency for the node.
425 SU->Latency =
426 InstrItins.getStageLatency(SU->getInstr()->getDesc().getSchedClass());
428 // Simplistic target-independent heuristic: assume that loads take
429 // extra time.
430 if (InstrItins.isEmpty())
431 if (SU->getInstr()->getDesc().mayLoad())
432 SU->Latency += 2;
435 void ScheduleDAGInstrs::ComputeOperandLatency(SUnit *Def, SUnit *Use,
436 SDep& dep) const {
437 const InstrItineraryData &InstrItins = TM.getInstrItineraryData();
438 if (InstrItins.isEmpty())
439 return;
441 // For a data dependency with a known register...
442 if ((dep.getKind() != SDep::Data) || (dep.getReg() == 0))
443 return;
445 const unsigned Reg = dep.getReg();
447 // ... find the definition of the register in the defining
448 // instruction
449 MachineInstr *DefMI = Def->getInstr();
450 int DefIdx = DefMI->findRegisterDefOperandIdx(Reg);
451 if (DefIdx != -1) {
452 int DefCycle = InstrItins.getOperandCycle(DefMI->getDesc().getSchedClass(), DefIdx);
453 if (DefCycle >= 0) {
454 MachineInstr *UseMI = Use->getInstr();
455 const unsigned UseClass = UseMI->getDesc().getSchedClass();
457 // For all uses of the register, calculate the maxmimum latency
458 int Latency = -1;
459 for (unsigned i = 0, e = UseMI->getNumOperands(); i != e; ++i) {
460 const MachineOperand &MO = UseMI->getOperand(i);
461 if (!MO.isReg() || !MO.isUse())
462 continue;
463 unsigned MOReg = MO.getReg();
464 if (MOReg != Reg)
465 continue;
467 int UseCycle = InstrItins.getOperandCycle(UseClass, i);
468 if (UseCycle >= 0)
469 Latency = std::max(Latency, DefCycle - UseCycle + 1);
472 // If we found a latency, then replace the existing dependence latency.
473 if (Latency >= 0)
474 dep.setLatency(Latency);
479 void ScheduleDAGInstrs::dumpNode(const SUnit *SU) const {
480 SU->getInstr()->dump();
483 std::string ScheduleDAGInstrs::getGraphNodeLabel(const SUnit *SU) const {
484 std::string s;
485 raw_string_ostream oss(s);
486 if (SU == &EntrySU)
487 oss << "<entry>";
488 else if (SU == &ExitSU)
489 oss << "<exit>";
490 else
491 SU->getInstr()->print(oss);
492 return oss.str();
495 // EmitSchedule - Emit the machine code in scheduled order.
496 MachineBasicBlock *ScheduleDAGInstrs::EmitSchedule() {
497 // For MachineInstr-based scheduling, we're rescheduling the instructions in
498 // the block, so start by removing them from the block.
499 while (Begin != InsertPos) {
500 MachineBasicBlock::iterator I = Begin;
501 ++Begin;
502 BB->remove(I);
505 // Then re-insert them according to the given schedule.
506 for (unsigned i = 0, e = Sequence.size(); i != e; i++) {
507 SUnit *SU = Sequence[i];
508 if (!SU) {
509 // Null SUnit* is a noop.
510 EmitNoop();
511 continue;
514 BB->insert(InsertPos, SU->getInstr());
517 // Update the Begin iterator, as the first instruction in the block
518 // may have been scheduled later.
519 if (!Sequence.empty())
520 Begin = Sequence[0]->getInstr();
522 return BB;