(Hopefully) unbreak Apple-style builds.
[llvm/msp430.git] / lib / CodeGen / ScheduleDAGInstrs.cpp
blob8e18b3d17fda28e37b3a04a825690cdf6e5ffbd5
1 //===---- ScheduleDAGInstrs.cpp - MachineInstr Rescheduling ---------------===//
2 //
3 // The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 // This implements the ScheduleDAGInstrs class, which implements re-scheduling
11 // of MachineInstrs.
13 //===----------------------------------------------------------------------===//
15 #define DEBUG_TYPE "sched-instrs"
16 #include "ScheduleDAGInstrs.h"
17 #include "llvm/Analysis/AliasAnalysis.h"
18 #include "llvm/CodeGen/MachineFunctionPass.h"
19 #include "llvm/CodeGen/MachineRegisterInfo.h"
20 #include "llvm/CodeGen/PseudoSourceValue.h"
21 #include "llvm/Target/TargetMachine.h"
22 #include "llvm/Target/TargetInstrInfo.h"
23 #include "llvm/Target/TargetRegisterInfo.h"
24 #include "llvm/Target/TargetSubtarget.h"
25 #include "llvm/Support/Debug.h"
26 #include "llvm/Support/raw_ostream.h"
27 #include "llvm/ADT/SmallSet.h"
28 using namespace llvm;
30 ScheduleDAGInstrs::ScheduleDAGInstrs(MachineFunction &mf,
31 const MachineLoopInfo &mli,
32 const MachineDominatorTree &mdt)
33 : ScheduleDAG(mf), MLI(mli), MDT(mdt), LoopRegs(MLI, MDT) {}
35 /// Run - perform scheduling.
36 ///
37 void ScheduleDAGInstrs::Run(MachineBasicBlock *bb,
38 MachineBasicBlock::iterator begin,
39 MachineBasicBlock::iterator end,
40 unsigned endcount) {
41 BB = bb;
42 Begin = begin;
43 InsertPosIndex = endcount;
45 ScheduleDAG::Run(bb, end);
48 /// getOpcode - If this is an Instruction or a ConstantExpr, return the
49 /// opcode value. Otherwise return UserOp1.
50 static unsigned getOpcode(const Value *V) {
51 if (const Instruction *I = dyn_cast<Instruction>(V))
52 return I->getOpcode();
53 if (const ConstantExpr *CE = dyn_cast<ConstantExpr>(V))
54 return CE->getOpcode();
55 // Use UserOp1 to mean there's no opcode.
56 return Instruction::UserOp1;
59 /// getUnderlyingObjectFromInt - This is the function that does the work of
60 /// looking through basic ptrtoint+arithmetic+inttoptr sequences.
61 static const Value *getUnderlyingObjectFromInt(const Value *V) {
62 do {
63 if (const User *U = dyn_cast<User>(V)) {
64 // If we find a ptrtoint, we can transfer control back to the
65 // regular getUnderlyingObjectFromInt.
66 if (getOpcode(U) == Instruction::PtrToInt)
67 return U->getOperand(0);
68 // If we find an add of a constant or a multiplied value, it's
69 // likely that the other operand will lead us to the base
70 // object. We don't have to worry about the case where the
71 // object address is somehow being computed bt the multiply,
72 // because our callers only care when the result is an
73 // identifibale object.
74 if (getOpcode(U) != Instruction::Add ||
75 (!isa<ConstantInt>(U->getOperand(1)) &&
76 getOpcode(U->getOperand(1)) != Instruction::Mul))
77 return V;
78 V = U->getOperand(0);
79 } else {
80 return V;
82 assert(isa<IntegerType>(V->getType()) && "Unexpected operand type!");
83 } while (1);
86 /// getUnderlyingObject - This is a wrapper around Value::getUnderlyingObject
87 /// and adds support for basic ptrtoint+arithmetic+inttoptr sequences.
88 static const Value *getUnderlyingObject(const Value *V) {
89 // First just call Value::getUnderlyingObject to let it do what it does.
90 do {
91 V = V->getUnderlyingObject();
92 // If it found an inttoptr, use special code to continue climing.
93 if (getOpcode(V) != Instruction::IntToPtr)
94 break;
95 const Value *O = getUnderlyingObjectFromInt(cast<User>(V)->getOperand(0));
96 // If that succeeded in finding a pointer, continue the search.
97 if (!isa<PointerType>(O->getType()))
98 break;
99 V = O;
100 } while (1);
101 return V;
104 /// getUnderlyingObjectForInstr - If this machine instr has memory reference
105 /// information and it can be tracked to a normal reference to a known
106 /// object, return the Value for that object. Otherwise return null.
107 static const Value *getUnderlyingObjectForInstr(const MachineInstr *MI) {
108 if (!MI->hasOneMemOperand() ||
109 !MI->memoperands_begin()->getValue() ||
110 MI->memoperands_begin()->isVolatile())
111 return 0;
113 const Value *V = MI->memoperands_begin()->getValue();
114 if (!V)
115 return 0;
117 V = getUnderlyingObject(V);
118 if (!isa<PseudoSourceValue>(V) && !isIdentifiedObject(V))
119 return 0;
121 return V;
124 void ScheduleDAGInstrs::StartBlock(MachineBasicBlock *BB) {
125 if (MachineLoop *ML = MLI.getLoopFor(BB))
126 if (BB == ML->getLoopLatch()) {
127 MachineBasicBlock *Header = ML->getHeader();
128 for (MachineBasicBlock::livein_iterator I = Header->livein_begin(),
129 E = Header->livein_end(); I != E; ++I)
130 LoopLiveInRegs.insert(*I);
131 LoopRegs.VisitLoop(ML);
135 void ScheduleDAGInstrs::BuildSchedGraph() {
136 // We'll be allocating one SUnit for each instruction, plus one for
137 // the region exit node.
138 SUnits.reserve(BB->size());
140 // We build scheduling units by walking a block's instruction list from bottom
141 // to top.
143 // Remember where a generic side-effecting instruction is as we procede. If
144 // ChainMMO is null, this is assumed to have arbitrary side-effects. If
145 // ChainMMO is non-null, then Chain makes only a single memory reference.
146 SUnit *Chain = 0;
147 MachineMemOperand *ChainMMO = 0;
149 // Memory references to specific known memory locations are tracked so that
150 // they can be given more precise dependencies.
151 std::map<const Value *, SUnit *> MemDefs;
152 std::map<const Value *, std::vector<SUnit *> > MemUses;
154 // Check to see if the scheduler cares about latencies.
155 bool UnitLatencies = ForceUnitLatencies();
157 // Ask the target if address-backscheduling is desirable, and if so how much.
158 unsigned SpecialAddressLatency =
159 TM.getSubtarget<TargetSubtarget>().getSpecialAddressLatency();
161 // Walk the list of instructions, from bottom moving up.
162 for (MachineBasicBlock::iterator MII = InsertPos, MIE = Begin;
163 MII != MIE; --MII) {
164 MachineInstr *MI = prior(MII);
165 const TargetInstrDesc &TID = MI->getDesc();
166 assert(!TID.isTerminator() && !MI->isLabel() &&
167 "Cannot schedule terminators or labels!");
168 // Create the SUnit for this MI.
169 SUnit *SU = NewSUnit(MI);
171 // Assign the Latency field of SU using target-provided information.
172 if (UnitLatencies)
173 SU->Latency = 1;
174 else
175 ComputeLatency(SU);
177 // Add register-based dependencies (data, anti, and output).
178 for (unsigned j = 0, n = MI->getNumOperands(); j != n; ++j) {
179 const MachineOperand &MO = MI->getOperand(j);
180 if (!MO.isReg()) continue;
181 unsigned Reg = MO.getReg();
182 if (Reg == 0) continue;
184 assert(TRI->isPhysicalRegister(Reg) && "Virtual register encountered!");
185 std::vector<SUnit *> &UseList = Uses[Reg];
186 std::vector<SUnit *> &DefList = Defs[Reg];
187 // Optionally add output and anti dependencies.
188 // TODO: Using a latency of 1 here assumes there's no cost for
189 // reusing registers.
190 SDep::Kind Kind = MO.isUse() ? SDep::Anti : SDep::Output;
191 for (unsigned i = 0, e = DefList.size(); i != e; ++i) {
192 SUnit *DefSU = DefList[i];
193 if (DefSU != SU &&
194 (Kind != SDep::Output || !MO.isDead() ||
195 !DefSU->getInstr()->registerDefIsDead(Reg)))
196 DefSU->addPred(SDep(SU, Kind, /*Latency=*/1, /*Reg=*/Reg));
198 for (const unsigned *Alias = TRI->getAliasSet(Reg); *Alias; ++Alias) {
199 std::vector<SUnit *> &DefList = Defs[*Alias];
200 for (unsigned i = 0, e = DefList.size(); i != e; ++i) {
201 SUnit *DefSU = DefList[i];
202 if (DefSU != SU &&
203 (Kind != SDep::Output || !MO.isDead() ||
204 !DefSU->getInstr()->registerDefIsDead(Reg)))
205 DefSU->addPred(SDep(SU, Kind, /*Latency=*/1, /*Reg=*/ *Alias));
209 if (MO.isDef()) {
210 // Add any data dependencies.
211 unsigned DataLatency = SU->Latency;
212 for (unsigned i = 0, e = UseList.size(); i != e; ++i) {
213 SUnit *UseSU = UseList[i];
214 if (UseSU != SU) {
215 unsigned LDataLatency = DataLatency;
216 // Optionally add in a special extra latency for nodes that
217 // feed addresses.
218 // TODO: Do this for register aliases too.
219 if (SpecialAddressLatency != 0 && !UnitLatencies) {
220 MachineInstr *UseMI = UseSU->getInstr();
221 const TargetInstrDesc &UseTID = UseMI->getDesc();
222 int RegUseIndex = UseMI->findRegisterUseOperandIdx(Reg);
223 assert(RegUseIndex >= 0 && "UseMI doesn's use register!");
224 if ((UseTID.mayLoad() || UseTID.mayStore()) &&
225 (unsigned)RegUseIndex < UseTID.getNumOperands() &&
226 UseTID.OpInfo[RegUseIndex].isLookupPtrRegClass())
227 LDataLatency += SpecialAddressLatency;
229 UseSU->addPred(SDep(SU, SDep::Data, LDataLatency, Reg));
232 for (const unsigned *Alias = TRI->getAliasSet(Reg); *Alias; ++Alias) {
233 std::vector<SUnit *> &UseList = Uses[*Alias];
234 for (unsigned i = 0, e = UseList.size(); i != e; ++i) {
235 SUnit *UseSU = UseList[i];
236 if (UseSU != SU)
237 UseSU->addPred(SDep(SU, SDep::Data, DataLatency, *Alias));
241 // If a def is going to wrap back around to the top of the loop,
242 // backschedule it.
243 if (!UnitLatencies && DefList.empty()) {
244 LoopDependencies::LoopDeps::iterator I = LoopRegs.Deps.find(Reg);
245 if (I != LoopRegs.Deps.end()) {
246 const MachineOperand *UseMO = I->second.first;
247 unsigned Count = I->second.second;
248 const MachineInstr *UseMI = UseMO->getParent();
249 unsigned UseMOIdx = UseMO - &UseMI->getOperand(0);
250 const TargetInstrDesc &UseTID = UseMI->getDesc();
251 // TODO: If we knew the total depth of the region here, we could
252 // handle the case where the whole loop is inside the region but
253 // is large enough that the isScheduleHigh trick isn't needed.
254 if (UseMOIdx < UseTID.getNumOperands()) {
255 // Currently, we only support scheduling regions consisting of
256 // single basic blocks. Check to see if the instruction is in
257 // the same region by checking to see if it has the same parent.
258 if (UseMI->getParent() != MI->getParent()) {
259 unsigned Latency = SU->Latency;
260 if (UseTID.OpInfo[UseMOIdx].isLookupPtrRegClass())
261 Latency += SpecialAddressLatency;
262 // This is a wild guess as to the portion of the latency which
263 // will be overlapped by work done outside the current
264 // scheduling region.
265 Latency -= std::min(Latency, Count);
266 // Add the artifical edge.
267 ExitSU.addPred(SDep(SU, SDep::Order, Latency,
268 /*Reg=*/0, /*isNormalMemory=*/false,
269 /*isMustAlias=*/false,
270 /*isArtificial=*/true));
271 } else if (SpecialAddressLatency > 0 &&
272 UseTID.OpInfo[UseMOIdx].isLookupPtrRegClass()) {
273 // The entire loop body is within the current scheduling region
274 // and the latency of this operation is assumed to be greater
275 // than the latency of the loop.
276 // TODO: Recursively mark data-edge predecessors as
277 // isScheduleHigh too.
278 SU->isScheduleHigh = true;
281 LoopRegs.Deps.erase(I);
285 UseList.clear();
286 if (!MO.isDead())
287 DefList.clear();
288 DefList.push_back(SU);
289 } else {
290 UseList.push_back(SU);
294 // Add chain dependencies.
295 // Note that isStoreToStackSlot and isLoadFromStackSLot are not usable
296 // after stack slots are lowered to actual addresses.
297 // TODO: Use an AliasAnalysis and do real alias-analysis queries, and
298 // produce more precise dependence information.
299 if (TID.isCall() || TID.hasUnmodeledSideEffects()) {
300 new_chain:
301 // This is the conservative case. Add dependencies on all memory
302 // references.
303 if (Chain)
304 Chain->addPred(SDep(SU, SDep::Order, SU->Latency));
305 Chain = SU;
306 for (unsigned k = 0, m = PendingLoads.size(); k != m; ++k)
307 PendingLoads[k]->addPred(SDep(SU, SDep::Order, SU->Latency));
308 PendingLoads.clear();
309 for (std::map<const Value *, SUnit *>::iterator I = MemDefs.begin(),
310 E = MemDefs.end(); I != E; ++I) {
311 I->second->addPred(SDep(SU, SDep::Order, SU->Latency));
312 I->second = SU;
314 for (std::map<const Value *, std::vector<SUnit *> >::iterator I =
315 MemUses.begin(), E = MemUses.end(); I != E; ++I) {
316 for (unsigned i = 0, e = I->second.size(); i != e; ++i)
317 I->second[i]->addPred(SDep(SU, SDep::Order, SU->Latency));
318 I->second.clear();
320 // See if it is known to just have a single memory reference.
321 MachineInstr *ChainMI = Chain->getInstr();
322 const TargetInstrDesc &ChainTID = ChainMI->getDesc();
323 if (!ChainTID.isCall() &&
324 !ChainTID.hasUnmodeledSideEffects() &&
325 ChainMI->hasOneMemOperand() &&
326 !ChainMI->memoperands_begin()->isVolatile() &&
327 ChainMI->memoperands_begin()->getValue())
328 // We know that the Chain accesses one specific memory location.
329 ChainMMO = &*ChainMI->memoperands_begin();
330 else
331 // Unknown memory accesses. Assume the worst.
332 ChainMMO = 0;
333 } else if (TID.mayStore()) {
334 if (const Value *V = getUnderlyingObjectForInstr(MI)) {
335 // A store to a specific PseudoSourceValue. Add precise dependencies.
336 // Handle the def in MemDefs, if there is one.
337 std::map<const Value *, SUnit *>::iterator I = MemDefs.find(V);
338 if (I != MemDefs.end()) {
339 I->second->addPred(SDep(SU, SDep::Order, SU->Latency, /*Reg=*/0,
340 /*isNormalMemory=*/true));
341 I->second = SU;
342 } else {
343 MemDefs[V] = SU;
345 // Handle the uses in MemUses, if there are any.
346 std::map<const Value *, std::vector<SUnit *> >::iterator J =
347 MemUses.find(V);
348 if (J != MemUses.end()) {
349 for (unsigned i = 0, e = J->second.size(); i != e; ++i)
350 J->second[i]->addPred(SDep(SU, SDep::Order, SU->Latency, /*Reg=*/0,
351 /*isNormalMemory=*/true));
352 J->second.clear();
354 // Add dependencies from all the PendingLoads, since without
355 // memoperands we must assume they alias anything.
356 for (unsigned k = 0, m = PendingLoads.size(); k != m; ++k)
357 PendingLoads[k]->addPred(SDep(SU, SDep::Order, SU->Latency));
358 // Add a general dependence too, if needed.
359 if (Chain)
360 Chain->addPred(SDep(SU, SDep::Order, SU->Latency));
361 } else
362 // Treat all other stores conservatively.
363 goto new_chain;
364 } else if (TID.mayLoad()) {
365 if (TII->isInvariantLoad(MI)) {
366 // Invariant load, no chain dependencies needed!
367 } else if (const Value *V = getUnderlyingObjectForInstr(MI)) {
368 // A load from a specific PseudoSourceValue. Add precise dependencies.
369 std::map<const Value *, SUnit *>::iterator I = MemDefs.find(V);
370 if (I != MemDefs.end())
371 I->second->addPred(SDep(SU, SDep::Order, SU->Latency, /*Reg=*/0,
372 /*isNormalMemory=*/true));
373 MemUses[V].push_back(SU);
375 // Add a general dependence too, if needed.
376 if (Chain && (!ChainMMO ||
377 (ChainMMO->isStore() || ChainMMO->isVolatile())))
378 Chain->addPred(SDep(SU, SDep::Order, SU->Latency));
379 } else if (MI->hasVolatileMemoryRef()) {
380 // Treat volatile loads conservatively. Note that this includes
381 // cases where memoperand information is unavailable.
382 goto new_chain;
383 } else {
384 // A normal load. Depend on the general chain, as well as on
385 // all stores. In the absense of MachineMemOperand information,
386 // we can't even assume that the load doesn't alias well-behaved
387 // memory locations.
388 if (Chain)
389 Chain->addPred(SDep(SU, SDep::Order, SU->Latency));
390 for (std::map<const Value *, SUnit *>::iterator I = MemDefs.begin(),
391 E = MemDefs.end(); I != E; ++I)
392 I->second->addPred(SDep(SU, SDep::Order, SU->Latency));
393 PendingLoads.push_back(SU);
398 for (int i = 0, e = TRI->getNumRegs(); i != e; ++i) {
399 Defs[i].clear();
400 Uses[i].clear();
402 PendingLoads.clear();
405 void ScheduleDAGInstrs::FinishBlock() {
406 // Nothing to do.
409 void ScheduleDAGInstrs::ComputeLatency(SUnit *SU) {
410 const InstrItineraryData &InstrItins = TM.getInstrItineraryData();
412 // Compute the latency for the node. We use the sum of the latencies for
413 // all nodes flagged together into this SUnit.
414 SU->Latency =
415 InstrItins.getLatency(SU->getInstr()->getDesc().getSchedClass());
417 // Simplistic target-independent heuristic: assume that loads take
418 // extra time.
419 if (InstrItins.isEmpty())
420 if (SU->getInstr()->getDesc().mayLoad())
421 SU->Latency += 2;
424 void ScheduleDAGInstrs::dumpNode(const SUnit *SU) const {
425 SU->getInstr()->dump();
428 std::string ScheduleDAGInstrs::getGraphNodeLabel(const SUnit *SU) const {
429 std::string s;
430 raw_string_ostream oss(s);
431 if (SU == &EntrySU)
432 oss << "<entry>";
433 else if (SU == &ExitSU)
434 oss << "<exit>";
435 else
436 SU->getInstr()->print(oss);
437 return oss.str();
440 // EmitSchedule - Emit the machine code in scheduled order.
441 MachineBasicBlock *ScheduleDAGInstrs::EmitSchedule() {
442 // For MachineInstr-based scheduling, we're rescheduling the instructions in
443 // the block, so start by removing them from the block.
444 while (Begin != InsertPos) {
445 MachineBasicBlock::iterator I = Begin;
446 ++Begin;
447 BB->remove(I);
450 // Then re-insert them according to the given schedule.
451 for (unsigned i = 0, e = Sequence.size(); i != e; i++) {
452 SUnit *SU = Sequence[i];
453 if (!SU) {
454 // Null SUnit* is a noop.
455 EmitNoop();
456 continue;
459 BB->insert(InsertPos, SU->getInstr());
462 // Update the Begin iterator, as the first instruction in the block
463 // may have been scheduled later.
464 if (!Sequence.empty())
465 Begin = Sequence[0]->getInstr();
467 return BB;