1 //===---- ScheduleDAGInstrs.cpp - MachineInstr Rescheduling ---------------===//
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7 //===----------------------------------------------------------------------===//
9 /// \file This implements the ScheduleDAGInstrs class, which implements
10 /// re-scheduling of MachineInstrs.
12 //===----------------------------------------------------------------------===//
14 #include "llvm/CodeGen/ScheduleDAGInstrs.h"
15 #include "llvm/ADT/IntEqClasses.h"
16 #include "llvm/ADT/MapVector.h"
17 #include "llvm/ADT/SmallPtrSet.h"
18 #include "llvm/ADT/SmallVector.h"
19 #include "llvm/ADT/SparseSet.h"
20 #include "llvm/ADT/iterator_range.h"
21 #include "llvm/Analysis/AliasAnalysis.h"
22 #include "llvm/Analysis/ValueTracking.h"
23 #include "llvm/CodeGen/LiveIntervals.h"
24 #include "llvm/CodeGen/LivePhysRegs.h"
25 #include "llvm/CodeGen/MachineBasicBlock.h"
26 #include "llvm/CodeGen/MachineFrameInfo.h"
27 #include "llvm/CodeGen/MachineFunction.h"
28 #include "llvm/CodeGen/MachineInstr.h"
29 #include "llvm/CodeGen/MachineInstrBundle.h"
30 #include "llvm/CodeGen/MachineMemOperand.h"
31 #include "llvm/CodeGen/MachineOperand.h"
32 #include "llvm/CodeGen/MachineRegisterInfo.h"
33 #include "llvm/CodeGen/PseudoSourceValue.h"
34 #include "llvm/CodeGen/RegisterPressure.h"
35 #include "llvm/CodeGen/ScheduleDAG.h"
36 #include "llvm/CodeGen/ScheduleDFS.h"
37 #include "llvm/CodeGen/SlotIndexes.h"
38 #include "llvm/CodeGen/TargetRegisterInfo.h"
39 #include "llvm/CodeGen/TargetSubtargetInfo.h"
40 #include "llvm/Config/llvm-config.h"
41 #include "llvm/IR/Constants.h"
42 #include "llvm/IR/Function.h"
43 #include "llvm/IR/Instruction.h"
44 #include "llvm/IR/Instructions.h"
45 #include "llvm/IR/Operator.h"
46 #include "llvm/IR/Type.h"
47 #include "llvm/IR/Value.h"
48 #include "llvm/MC/LaneBitmask.h"
49 #include "llvm/MC/MCRegisterInfo.h"
50 #include "llvm/Support/Casting.h"
51 #include "llvm/Support/CommandLine.h"
52 #include "llvm/Support/Compiler.h"
53 #include "llvm/Support/Debug.h"
54 #include "llvm/Support/ErrorHandling.h"
55 #include "llvm/Support/Format.h"
56 #include "llvm/Support/raw_ostream.h"
66 #define DEBUG_TYPE "machine-scheduler"
68 static cl::opt
<bool> EnableAASchedMI("enable-aa-sched-mi", cl::Hidden
,
69 cl::ZeroOrMore
, cl::init(false),
70 cl::desc("Enable use of AA during MI DAG construction"));
72 static cl::opt
<bool> UseTBAA("use-tbaa-in-sched-mi", cl::Hidden
,
73 cl::init(true), cl::desc("Enable use of TBAA during MI DAG construction"));
75 // Note: the two options below might be used in tuning compile time vs
76 // output quality. Setting HugeRegion so large that it will never be
77 // reached means best-effort, but may be slow.
79 // When Stores and Loads maps (or NonAliasStores and NonAliasLoads)
80 // together hold this many SUs, a reduction of maps will be done.
81 static cl::opt
<unsigned> HugeRegion("dag-maps-huge-region", cl::Hidden
,
82 cl::init(1000), cl::desc("The limit to use while constructing the DAG "
83 "prior to scheduling, at which point a trade-off "
84 "is made to avoid excessive compile time."));
86 static cl::opt
<unsigned> ReductionSize(
87 "dag-maps-reduction-size", cl::Hidden
,
88 cl::desc("A huge scheduling region will have maps reduced by this many "
89 "nodes at a time. Defaults to HugeRegion / 2."));
91 static unsigned getReductionSize() {
92 // Always reduce a huge region with half of the elements, except
93 // when user sets this number explicitly.
94 if (ReductionSize
.getNumOccurrences() == 0)
95 return HugeRegion
/ 2;
99 static void dumpSUList(ScheduleDAGInstrs::SUList
&L
) {
100 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
102 for (const SUnit
*su
: L
) {
103 dbgs() << "SU(" << su
->NodeNum
<< ")";
111 ScheduleDAGInstrs::ScheduleDAGInstrs(MachineFunction
&mf
,
112 const MachineLoopInfo
*mli
,
113 bool RemoveKillFlags
)
114 : ScheduleDAG(mf
), MLI(mli
), MFI(mf
.getFrameInfo()),
115 RemoveKillFlags(RemoveKillFlags
),
116 UnknownValue(UndefValue::get(
117 Type::getVoidTy(mf
.getFunction().getContext()))), Topo(SUnits
, &ExitSU
) {
120 const TargetSubtargetInfo
&ST
= mf
.getSubtarget();
121 SchedModel
.init(&ST
);
124 /// If this machine instr has memory reference information and it can be
125 /// tracked to a normal reference to a known object, return the Value
126 /// for that object. This function returns false the memory location is
127 /// unknown or may alias anything.
128 static bool getUnderlyingObjectsForInstr(const MachineInstr
*MI
,
129 const MachineFrameInfo
&MFI
,
130 UnderlyingObjectsVector
&Objects
,
131 const DataLayout
&DL
) {
132 auto allMMOsOkay
= [&]() {
133 for (const MachineMemOperand
*MMO
: MI
->memoperands()) {
134 // TODO: Figure out whether isAtomic is really necessary (see D57601).
135 if (MMO
->isVolatile() || MMO
->isAtomic())
138 if (const PseudoSourceValue
*PSV
= MMO
->getPseudoValue()) {
139 // Function that contain tail calls don't have unique PseudoSourceValue
140 // objects. Two PseudoSourceValues might refer to the same or
141 // overlapping locations. The client code calling this function assumes
142 // this is not the case. So return a conservative answer of no known
144 if (MFI
.hasTailCall())
147 // For now, ignore PseudoSourceValues which may alias LLVM IR values
148 // because the code that uses this function has no way to cope with
150 if (PSV
->isAliased(&MFI
))
153 bool MayAlias
= PSV
->mayAlias(&MFI
);
154 Objects
.push_back(UnderlyingObjectsVector::value_type(PSV
, MayAlias
));
155 } else if (const Value
*V
= MMO
->getValue()) {
156 SmallVector
<Value
*, 4> Objs
;
157 if (!getUnderlyingObjectsForCodeGen(V
, Objs
, DL
))
160 for (Value
*V
: Objs
) {
161 assert(isIdentifiedObject(V
));
162 Objects
.push_back(UnderlyingObjectsVector::value_type(V
, true));
170 if (!allMMOsOkay()) {
178 void ScheduleDAGInstrs::startBlock(MachineBasicBlock
*bb
) {
182 void ScheduleDAGInstrs::finishBlock() {
183 // Subclasses should no longer refer to the old block.
187 void ScheduleDAGInstrs::enterRegion(MachineBasicBlock
*bb
,
188 MachineBasicBlock::iterator begin
,
189 MachineBasicBlock::iterator end
,
190 unsigned regioninstrs
) {
191 assert(bb
== BB
&& "startBlock should set BB");
194 NumRegionInstrs
= regioninstrs
;
197 void ScheduleDAGInstrs::exitRegion() {
201 void ScheduleDAGInstrs::addSchedBarrierDeps() {
202 MachineInstr
*ExitMI
= RegionEnd
!= BB
->end() ? &*RegionEnd
: nullptr;
203 ExitSU
.setInstr(ExitMI
);
204 // Add dependencies on the defs and uses of the instruction.
206 for (const MachineOperand
&MO
: ExitMI
->operands()) {
207 if (!MO
.isReg() || MO
.isDef()) continue;
208 Register Reg
= MO
.getReg();
209 if (Register::isPhysicalRegister(Reg
)) {
210 Uses
.insert(PhysRegSUOper(&ExitSU
, -1, Reg
));
211 } else if (Register::isVirtualRegister(Reg
) && MO
.readsReg()) {
212 addVRegUseDeps(&ExitSU
, ExitMI
->getOperandNo(&MO
));
216 if (!ExitMI
|| (!ExitMI
->isCall() && !ExitMI
->isBarrier())) {
217 // For others, e.g. fallthrough, conditional branch, assume the exit
218 // uses all the registers that are livein to the successor blocks.
219 for (const MachineBasicBlock
*Succ
: BB
->successors()) {
220 for (const auto &LI
: Succ
->liveins()) {
221 if (!Uses
.contains(LI
.PhysReg
))
222 Uses
.insert(PhysRegSUOper(&ExitSU
, -1, LI
.PhysReg
));
228 /// MO is an operand of SU's instruction that defines a physical register. Adds
229 /// data dependencies from SU to any uses of the physical register.
230 void ScheduleDAGInstrs::addPhysRegDataDeps(SUnit
*SU
, unsigned OperIdx
) {
231 const MachineOperand
&MO
= SU
->getInstr()->getOperand(OperIdx
);
232 assert(MO
.isDef() && "expect physreg def");
234 // Ask the target if address-backscheduling is desirable, and if so how much.
235 const TargetSubtargetInfo
&ST
= MF
.getSubtarget();
237 // Only use any non-zero latency for real defs/uses, in contrast to
238 // "fake" operands added by regalloc.
239 const MCInstrDesc
*DefMIDesc
= &SU
->getInstr()->getDesc();
240 bool ImplicitPseudoDef
= (OperIdx
>= DefMIDesc
->getNumOperands() &&
241 !DefMIDesc
->hasImplicitDefOfPhysReg(MO
.getReg()));
242 for (MCRegAliasIterator
Alias(MO
.getReg(), TRI
, true);
243 Alias
.isValid(); ++Alias
) {
244 if (!Uses
.contains(*Alias
))
246 for (Reg2SUnitsMap::iterator I
= Uses
.find(*Alias
); I
!= Uses
.end(); ++I
) {
247 SUnit
*UseSU
= I
->SU
;
251 // Adjust the dependence latency using operand def/use information,
252 // then allow the target to perform its own adjustments.
253 int UseOp
= I
->OpIdx
;
254 MachineInstr
*RegUse
= nullptr;
257 Dep
= SDep(SU
, SDep::Artificial
);
259 // Set the hasPhysRegDefs only for physreg defs that have a use within
260 // the scheduling region.
261 SU
->hasPhysRegDefs
= true;
262 Dep
= SDep(SU
, SDep::Data
, *Alias
);
263 RegUse
= UseSU
->getInstr();
265 const MCInstrDesc
*UseMIDesc
=
266 (RegUse
? &UseSU
->getInstr()->getDesc() : nullptr);
267 bool ImplicitPseudoUse
=
268 (UseMIDesc
&& UseOp
>= ((int)UseMIDesc
->getNumOperands()) &&
269 !UseMIDesc
->hasImplicitUseOfPhysReg(*Alias
));
270 if (!ImplicitPseudoDef
&& !ImplicitPseudoUse
) {
271 Dep
.setLatency(SchedModel
.computeOperandLatency(SU
->getInstr(), OperIdx
,
273 ST
.adjustSchedDependency(SU
, UseSU
, Dep
);
282 /// Adds register dependencies (data, anti, and output) from this SUnit
283 /// to following instructions in the same scheduling region that depend the
284 /// physical register referenced at OperIdx.
285 void ScheduleDAGInstrs::addPhysRegDeps(SUnit
*SU
, unsigned OperIdx
) {
286 MachineInstr
*MI
= SU
->getInstr();
287 MachineOperand
&MO
= MI
->getOperand(OperIdx
);
288 Register Reg
= MO
.getReg();
289 // We do not need to track any dependencies for constant registers.
290 if (MRI
.isConstantPhysReg(Reg
))
293 // Optionally add output and anti dependencies. For anti
294 // dependencies we use a latency of 0 because for a multi-issue
295 // target we want to allow the defining instruction to issue
296 // in the same cycle as the using instruction.
297 // TODO: Using a latency of 1 here for output dependencies assumes
298 // there's no cost for reusing registers.
299 SDep::Kind Kind
= MO
.isUse() ? SDep::Anti
: SDep::Output
;
300 for (MCRegAliasIterator
Alias(Reg
, TRI
, true); Alias
.isValid(); ++Alias
) {
301 if (!Defs
.contains(*Alias
))
303 for (Reg2SUnitsMap::iterator I
= Defs
.find(*Alias
); I
!= Defs
.end(); ++I
) {
304 SUnit
*DefSU
= I
->SU
;
305 if (DefSU
== &ExitSU
)
308 (Kind
!= SDep::Output
|| !MO
.isDead() ||
309 !DefSU
->getInstr()->registerDefIsDead(*Alias
))) {
310 if (Kind
== SDep::Anti
)
311 DefSU
->addPred(SDep(SU
, Kind
, /*Reg=*/*Alias
));
313 SDep
Dep(SU
, Kind
, /*Reg=*/*Alias
);
315 SchedModel
.computeOutputLatency(MI
, OperIdx
, DefSU
->getInstr()));
323 SU
->hasPhysRegUses
= true;
324 // Either insert a new Reg2SUnits entry with an empty SUnits list, or
325 // retrieve the existing SUnits list for this register's uses.
326 // Push this SUnit on the use list.
327 Uses
.insert(PhysRegSUOper(SU
, OperIdx
, Reg
));
331 addPhysRegDataDeps(SU
, OperIdx
);
333 // Clear previous uses and defs of this register and its subergisters.
334 for (MCSubRegIterator
SubReg(Reg
, TRI
, true); SubReg
.isValid(); ++SubReg
) {
335 if (Uses
.contains(*SubReg
))
336 Uses
.eraseAll(*SubReg
);
338 Defs
.eraseAll(*SubReg
);
340 if (MO
.isDead() && SU
->isCall
) {
341 // Calls will not be reordered because of chain dependencies (see
342 // below). Since call operands are dead, calls may continue to be added
343 // to the DefList making dependence checking quadratic in the size of
344 // the block. Instead, we leave only one call at the back of the
346 Reg2SUnitsMap::RangePair P
= Defs
.equal_range(Reg
);
347 Reg2SUnitsMap::iterator B
= P
.first
;
348 Reg2SUnitsMap::iterator I
= P
.second
;
349 for (bool isBegin
= I
== B
; !isBegin
; /* empty */) {
350 isBegin
= (--I
) == B
;
357 // Defs are pushed in the order they are visited and never reordered.
358 Defs
.insert(PhysRegSUOper(SU
, OperIdx
, Reg
));
362 LaneBitmask
ScheduleDAGInstrs::getLaneMaskForMO(const MachineOperand
&MO
) const
364 Register Reg
= MO
.getReg();
365 // No point in tracking lanemasks if we don't have interesting subregisters.
366 const TargetRegisterClass
&RC
= *MRI
.getRegClass(Reg
);
367 if (!RC
.HasDisjunctSubRegs
)
368 return LaneBitmask::getAll();
370 unsigned SubReg
= MO
.getSubReg();
372 return RC
.getLaneMask();
373 return TRI
->getSubRegIndexLaneMask(SubReg
);
376 /// Adds register output and data dependencies from this SUnit to instructions
377 /// that occur later in the same scheduling region if they read from or write to
378 /// the virtual register defined at OperIdx.
380 /// TODO: Hoist loop induction variable increments. This has to be
381 /// reevaluated. Generally, IV scheduling should be done before coalescing.
382 void ScheduleDAGInstrs::addVRegDefDeps(SUnit
*SU
, unsigned OperIdx
) {
383 MachineInstr
*MI
= SU
->getInstr();
384 MachineOperand
&MO
= MI
->getOperand(OperIdx
);
385 Register Reg
= MO
.getReg();
387 LaneBitmask DefLaneMask
;
388 LaneBitmask KillLaneMask
;
389 if (TrackLaneMasks
) {
390 bool IsKill
= MO
.getSubReg() == 0 || MO
.isUndef();
391 DefLaneMask
= getLaneMaskForMO(MO
);
392 // If we have a <read-undef> flag, none of the lane values comes from an
393 // earlier instruction.
394 KillLaneMask
= IsKill
? LaneBitmask::getAll() : DefLaneMask
;
396 // Clear undef flag, we'll re-add it later once we know which subregister
398 MO
.setIsUndef(false);
400 DefLaneMask
= LaneBitmask::getAll();
401 KillLaneMask
= LaneBitmask::getAll();
405 assert(CurrentVRegUses
.find(Reg
) == CurrentVRegUses
.end() &&
406 "Dead defs should have no uses");
408 // Add data dependence to all uses we found so far.
409 const TargetSubtargetInfo
&ST
= MF
.getSubtarget();
410 for (VReg2SUnitOperIdxMultiMap::iterator I
= CurrentVRegUses
.find(Reg
),
411 E
= CurrentVRegUses
.end(); I
!= E
; /*empty*/) {
412 LaneBitmask LaneMask
= I
->LaneMask
;
413 // Ignore uses of other lanes.
414 if ((LaneMask
& KillLaneMask
).none()) {
419 if ((LaneMask
& DefLaneMask
).any()) {
420 SUnit
*UseSU
= I
->SU
;
421 MachineInstr
*Use
= UseSU
->getInstr();
422 SDep
Dep(SU
, SDep::Data
, Reg
);
423 Dep
.setLatency(SchedModel
.computeOperandLatency(MI
, OperIdx
, Use
,
425 ST
.adjustSchedDependency(SU
, UseSU
, Dep
);
429 LaneMask
&= ~KillLaneMask
;
430 // If we found a Def for all lanes of this use, remove it from the list.
431 if (LaneMask
.any()) {
432 I
->LaneMask
= LaneMask
;
435 I
= CurrentVRegUses
.erase(I
);
439 // Shortcut: Singly defined vregs do not have output/anti dependencies.
440 if (MRI
.hasOneDef(Reg
))
443 // Add output dependence to the next nearest defs of this vreg.
445 // Unless this definition is dead, the output dependence should be
446 // transitively redundant with antidependencies from this definition's
447 // uses. We're conservative for now until we have a way to guarantee the uses
448 // are not eliminated sometime during scheduling. The output dependence edge
449 // is also useful if output latency exceeds def-use latency.
450 LaneBitmask LaneMask
= DefLaneMask
;
451 for (VReg2SUnit
&V2SU
: make_range(CurrentVRegDefs
.find(Reg
),
452 CurrentVRegDefs
.end())) {
453 // Ignore defs for other lanes.
454 if ((V2SU
.LaneMask
& LaneMask
).none())
456 // Add an output dependence.
457 SUnit
*DefSU
= V2SU
.SU
;
458 // Ignore additional defs of the same lanes in one instruction. This can
459 // happen because lanemasks are shared for targets with too many
460 // subregisters. We also use some representration tricks/hacks where we
461 // add super-register defs/uses, to imply that although we only access parts
462 // of the reg we care about the full one.
465 SDep
Dep(SU
, SDep::Output
, Reg
);
467 SchedModel
.computeOutputLatency(MI
, OperIdx
, DefSU
->getInstr()));
470 // Update current definition. This can get tricky if the def was about a
471 // bigger lanemask before. We then have to shrink it and create a new
472 // VReg2SUnit for the non-overlapping part.
473 LaneBitmask OverlapMask
= V2SU
.LaneMask
& LaneMask
;
474 LaneBitmask NonOverlapMask
= V2SU
.LaneMask
& ~LaneMask
;
476 V2SU
.LaneMask
= OverlapMask
;
477 if (NonOverlapMask
.any())
478 CurrentVRegDefs
.insert(VReg2SUnit(Reg
, NonOverlapMask
, DefSU
));
480 // If there was no CurrentVRegDefs entry for some lanes yet, create one.
482 CurrentVRegDefs
.insert(VReg2SUnit(Reg
, LaneMask
, SU
));
485 /// Adds a register data dependency if the instruction that defines the
486 /// virtual register used at OperIdx is mapped to an SUnit. Add a register
487 /// antidependency from this SUnit to instructions that occur later in the same
488 /// scheduling region if they write the virtual register.
490 /// TODO: Handle ExitSU "uses" properly.
491 void ScheduleDAGInstrs::addVRegUseDeps(SUnit
*SU
, unsigned OperIdx
) {
492 const MachineInstr
*MI
= SU
->getInstr();
493 const MachineOperand
&MO
= MI
->getOperand(OperIdx
);
494 Register Reg
= MO
.getReg();
496 // Remember the use. Data dependencies will be added when we find the def.
497 LaneBitmask LaneMask
= TrackLaneMasks
? getLaneMaskForMO(MO
)
498 : LaneBitmask::getAll();
499 CurrentVRegUses
.insert(VReg2SUnitOperIdx(Reg
, LaneMask
, OperIdx
, SU
));
501 // Add antidependences to the following defs of the vreg.
502 for (VReg2SUnit
&V2SU
: make_range(CurrentVRegDefs
.find(Reg
),
503 CurrentVRegDefs
.end())) {
504 // Ignore defs for unrelated lanes.
505 LaneBitmask PrevDefLaneMask
= V2SU
.LaneMask
;
506 if ((PrevDefLaneMask
& LaneMask
).none())
511 V2SU
.SU
->addPred(SDep(SU
, SDep::Anti
, Reg
));
515 /// Returns true if MI is an instruction we are unable to reason about
516 /// (like a call or something with unmodeled side effects).
517 static inline bool isGlobalMemoryObject(AliasAnalysis
*AA
, MachineInstr
*MI
) {
518 return MI
->isCall() || MI
->hasUnmodeledSideEffects() ||
519 (MI
->hasOrderedMemoryRef() && !MI
->isDereferenceableInvariantLoad(AA
));
522 void ScheduleDAGInstrs::addChainDependency (SUnit
*SUa
, SUnit
*SUb
,
524 if (SUa
->getInstr()->mayAlias(AAForDep
, *SUb
->getInstr(), UseTBAA
)) {
525 SDep
Dep(SUa
, SDep::MayAliasMem
);
526 Dep
.setLatency(Latency
);
531 /// Creates an SUnit for each real instruction, numbered in top-down
532 /// topological order. The instruction order A < B, implies that no edge exists
535 /// Map each real instruction to its SUnit.
537 /// After initSUnits, the SUnits vector cannot be resized and the scheduler may
538 /// hang onto SUnit pointers. We may relax this in the future by using SUnit IDs
539 /// instead of pointers.
541 /// MachineScheduler relies on initSUnits numbering the nodes by their order in
542 /// the original instruction list.
543 void ScheduleDAGInstrs::initSUnits() {
544 // We'll be allocating one SUnit for each real instruction in the region,
545 // which is contained within a basic block.
546 SUnits
.reserve(NumRegionInstrs
);
548 for (MachineInstr
&MI
: make_range(RegionBegin
, RegionEnd
)) {
549 if (MI
.isDebugInstr())
552 SUnit
*SU
= newSUnit(&MI
);
553 MISUnitMap
[&MI
] = SU
;
555 SU
->isCall
= MI
.isCall();
556 SU
->isCommutable
= MI
.isCommutable();
558 // Assign the Latency field of SU using target-provided information.
559 SU
->Latency
= SchedModel
.computeInstrLatency(SU
->getInstr());
561 // If this SUnit uses a reserved or unbuffered resource, mark it as such.
563 // Reserved resources block an instruction from issuing and stall the
564 // entire pipeline. These are identified by BufferSize=0.
566 // Unbuffered resources prevent execution of subsequent instructions that
567 // require the same resources. This is used for in-order execution pipelines
568 // within an out-of-order core. These are identified by BufferSize=1.
569 if (SchedModel
.hasInstrSchedModel()) {
570 const MCSchedClassDesc
*SC
= getSchedClass(SU
);
571 for (const MCWriteProcResEntry
&PRE
:
572 make_range(SchedModel
.getWriteProcResBegin(SC
),
573 SchedModel
.getWriteProcResEnd(SC
))) {
574 switch (SchedModel
.getProcResource(PRE
.ProcResourceIdx
)->BufferSize
) {
576 SU
->hasReservedResource
= true;
579 SU
->isUnbuffered
= true;
589 class ScheduleDAGInstrs::Value2SUsMap
: public MapVector
<ValueType
, SUList
> {
590 /// Current total number of SUs in map.
591 unsigned NumNodes
= 0;
593 /// 1 for loads, 0 for stores. (see comment in SUList)
594 unsigned TrueMemOrderLatency
;
597 Value2SUsMap(unsigned lat
= 0) : TrueMemOrderLatency(lat
) {}
599 /// To keep NumNodes up to date, insert() is used instead of
600 /// this operator w/ push_back().
601 ValueType
&operator[](const SUList
&Key
) {
602 llvm_unreachable("Don't use. Use insert() instead."); };
604 /// Adds SU to the SUList of V. If Map grows huge, reduce its size by calling
606 void inline insert(SUnit
*SU
, ValueType V
) {
607 MapVector::operator[](V
).push_back(SU
);
611 /// Clears the list of SUs mapped to V.
612 void inline clearList(ValueType V
) {
613 iterator Itr
= find(V
);
615 assert(NumNodes
>= Itr
->second
.size());
616 NumNodes
-= Itr
->second
.size();
622 /// Clears map from all contents.
624 MapVector
<ValueType
, SUList
>::clear();
628 unsigned inline size() const { return NumNodes
; }
630 /// Counts the number of SUs in this map after a reduction.
631 void reComputeSize() {
633 for (auto &I
: *this)
634 NumNodes
+= I
.second
.size();
637 unsigned inline getTrueMemOrderLatency() const {
638 return TrueMemOrderLatency
;
644 void ScheduleDAGInstrs::addChainDependencies(SUnit
*SU
,
645 Value2SUsMap
&Val2SUsMap
) {
646 for (auto &I
: Val2SUsMap
)
647 addChainDependencies(SU
, I
.second
,
648 Val2SUsMap
.getTrueMemOrderLatency());
651 void ScheduleDAGInstrs::addChainDependencies(SUnit
*SU
,
652 Value2SUsMap
&Val2SUsMap
,
654 Value2SUsMap::iterator Itr
= Val2SUsMap
.find(V
);
655 if (Itr
!= Val2SUsMap
.end())
656 addChainDependencies(SU
, Itr
->second
,
657 Val2SUsMap
.getTrueMemOrderLatency());
660 void ScheduleDAGInstrs::addBarrierChain(Value2SUsMap
&map
) {
661 assert(BarrierChain
!= nullptr);
663 for (auto &I
: map
) {
664 SUList
&sus
= I
.second
;
666 SU
->addPredBarrier(BarrierChain
);
671 void ScheduleDAGInstrs::insertBarrierChain(Value2SUsMap
&map
) {
672 assert(BarrierChain
!= nullptr);
674 // Go through all lists of SUs.
675 for (Value2SUsMap::iterator I
= map
.begin(), EE
= map
.end(); I
!= EE
;) {
676 Value2SUsMap::iterator CurrItr
= I
++;
677 SUList
&sus
= CurrItr
->second
;
678 SUList::iterator SUItr
= sus
.begin(), SUEE
= sus
.end();
679 for (; SUItr
!= SUEE
; ++SUItr
) {
680 // Stop on BarrierChain or any instruction above it.
681 if ((*SUItr
)->NodeNum
<= BarrierChain
->NodeNum
)
684 (*SUItr
)->addPredBarrier(BarrierChain
);
687 // Remove also the BarrierChain from list if present.
688 if (SUItr
!= SUEE
&& *SUItr
== BarrierChain
)
691 // Remove all SUs that are now successors of BarrierChain.
692 if (SUItr
!= sus
.begin())
693 sus
.erase(sus
.begin(), SUItr
);
696 // Remove all entries with empty su lists.
697 map
.remove_if([&](std::pair
<ValueType
, SUList
> &mapEntry
) {
698 return (mapEntry
.second
.empty()); });
700 // Recompute the size of the map (NumNodes).
704 void ScheduleDAGInstrs::buildSchedGraph(AliasAnalysis
*AA
,
705 RegPressureTracker
*RPTracker
,
706 PressureDiffs
*PDiffs
,
708 bool TrackLaneMasks
) {
709 const TargetSubtargetInfo
&ST
= MF
.getSubtarget();
710 bool UseAA
= EnableAASchedMI
.getNumOccurrences() > 0 ? EnableAASchedMI
712 AAForDep
= UseAA
? AA
: nullptr;
714 BarrierChain
= nullptr;
716 this->TrackLaneMasks
= TrackLaneMasks
;
718 ScheduleDAG::clearDAG();
720 // Create an SUnit for each real instruction.
724 PDiffs
->init(SUnits
.size());
726 // We build scheduling units by walking a block's instruction list
727 // from bottom to top.
729 // Each MIs' memory operand(s) is analyzed to a list of underlying
730 // objects. The SU is then inserted in the SUList(s) mapped from the
731 // Value(s). Each Value thus gets mapped to lists of SUs depending
732 // on it, stores and loads kept separately. Two SUs are trivially
733 // non-aliasing if they both depend on only identified Values and do
734 // not share any common Value.
735 Value2SUsMap Stores
, Loads(1 /*TrueMemOrderLatency*/);
737 // Certain memory accesses are known to not alias any SU in Stores
738 // or Loads, and have therefore their own 'NonAlias'
739 // domain. E.g. spill / reload instructions never alias LLVM I/R
740 // Values. It would be nice to assume that this type of memory
741 // accesses always have a proper memory operand modelling, and are
742 // therefore never unanalyzable, but this is conservatively not
744 Value2SUsMap NonAliasStores
, NonAliasLoads(1 /*TrueMemOrderLatency*/);
746 // Track all instructions that may raise floating-point exceptions.
747 // These do not depend on one other (or normal loads or stores), but
748 // must not be rescheduled across global barriers. Note that we don't
749 // really need a "map" here since we don't track those MIs by value;
750 // using the same Value2SUsMap data type here is simply a matter of
752 Value2SUsMap FPExceptions
;
754 // Remove any stale debug info; sometimes BuildSchedGraph is called again
755 // without emitting the info from the previous call.
757 FirstDbgValue
= nullptr;
759 assert(Defs
.empty() && Uses
.empty() &&
760 "Only BuildGraph should update Defs/Uses");
761 Defs
.setUniverse(TRI
->getNumRegs());
762 Uses
.setUniverse(TRI
->getNumRegs());
764 assert(CurrentVRegDefs
.empty() && "nobody else should use CurrentVRegDefs");
765 assert(CurrentVRegUses
.empty() && "nobody else should use CurrentVRegUses");
766 unsigned NumVirtRegs
= MRI
.getNumVirtRegs();
767 CurrentVRegDefs
.setUniverse(NumVirtRegs
);
768 CurrentVRegUses
.setUniverse(NumVirtRegs
);
770 // Model data dependencies between instructions being scheduled and the
772 addSchedBarrierDeps();
774 // Walk the list of instructions, from bottom moving up.
775 MachineInstr
*DbgMI
= nullptr;
776 for (MachineBasicBlock::iterator MII
= RegionEnd
, MIE
= RegionBegin
;
778 MachineInstr
&MI
= *std::prev(MII
);
780 DbgValues
.push_back(std::make_pair(DbgMI
, &MI
));
784 if (MI
.isDebugValue()) {
788 if (MI
.isDebugLabel())
791 SUnit
*SU
= MISUnitMap
[&MI
];
792 assert(SU
&& "No SUnit mapped to this MI");
795 RegisterOperands RegOpers
;
796 RegOpers
.collect(MI
, *TRI
, MRI
, TrackLaneMasks
, false);
797 if (TrackLaneMasks
) {
798 SlotIndex SlotIdx
= LIS
->getInstructionIndex(MI
);
799 RegOpers
.adjustLaneLiveness(*LIS
, MRI
, SlotIdx
);
801 if (PDiffs
!= nullptr)
802 PDiffs
->addInstruction(SU
->NodeNum
, RegOpers
, MRI
);
804 if (RPTracker
->getPos() == RegionEnd
|| &*RPTracker
->getPos() != &MI
)
805 RPTracker
->recedeSkipDebugValues();
806 assert(&*RPTracker
->getPos() == &MI
&& "RPTracker in sync");
807 RPTracker
->recede(RegOpers
);
811 (CanHandleTerminators
|| (!MI
.isTerminator() && !MI
.isPosition())) &&
812 "Cannot schedule terminators or labels!");
814 // Add register-based dependencies (data, anti, and output).
815 // For some instructions (calls, returns, inline-asm, etc.) there can
816 // be explicit uses and implicit defs, in which case the use will appear
817 // on the operand list before the def. Do two passes over the operand
818 // list to make sure that defs are processed before any uses.
819 bool HasVRegDef
= false;
820 for (unsigned j
= 0, n
= MI
.getNumOperands(); j
!= n
; ++j
) {
821 const MachineOperand
&MO
= MI
.getOperand(j
);
822 if (!MO
.isReg() || !MO
.isDef())
824 Register Reg
= MO
.getReg();
825 if (Register::isPhysicalRegister(Reg
)) {
826 addPhysRegDeps(SU
, j
);
827 } else if (Register::isVirtualRegister(Reg
)) {
829 addVRegDefDeps(SU
, j
);
832 // Now process all uses.
833 for (unsigned j
= 0, n
= MI
.getNumOperands(); j
!= n
; ++j
) {
834 const MachineOperand
&MO
= MI
.getOperand(j
);
835 // Only look at use operands.
836 // We do not need to check for MO.readsReg() here because subsequent
837 // subregister defs will get output dependence edges and need no
838 // additional use dependencies.
839 if (!MO
.isReg() || !MO
.isUse())
841 Register Reg
= MO
.getReg();
842 if (Register::isPhysicalRegister(Reg
)) {
843 addPhysRegDeps(SU
, j
);
844 } else if (Register::isVirtualRegister(Reg
) && MO
.readsReg()) {
845 addVRegUseDeps(SU
, j
);
849 // If we haven't seen any uses in this scheduling region, create a
850 // dependence edge to ExitSU to model the live-out latency. This is required
851 // for vreg defs with no in-region use, and prefetches with no vreg def.
853 // FIXME: NumDataSuccs would be more precise than NumSuccs here. This
854 // check currently relies on being called before adding chain deps.
855 if (SU
->NumSuccs
== 0 && SU
->Latency
> 1 && (HasVRegDef
|| MI
.mayLoad())) {
856 SDep
Dep(SU
, SDep::Artificial
);
857 Dep
.setLatency(SU
->Latency
- 1);
861 // Add memory dependencies (Note: isStoreToStackSlot and
862 // isLoadFromStackSLot are not usable after stack slots are lowered to
863 // actual addresses).
865 // This is a barrier event that acts as a pivotal node in the DAG.
866 if (isGlobalMemoryObject(AA
, &MI
)) {
868 // Become the barrier chain.
870 BarrierChain
->addPredBarrier(SU
);
873 LLVM_DEBUG(dbgs() << "Global memory object and new barrier chain: SU("
874 << BarrierChain
->NodeNum
<< ").\n";);
876 // Add dependencies against everything below it and clear maps.
877 addBarrierChain(Stores
);
878 addBarrierChain(Loads
);
879 addBarrierChain(NonAliasStores
);
880 addBarrierChain(NonAliasLoads
);
881 addBarrierChain(FPExceptions
);
886 // Instructions that may raise FP exceptions may not be moved
887 // across any global barriers.
888 if (MI
.mayRaiseFPException()) {
890 BarrierChain
->addPredBarrier(SU
);
892 FPExceptions
.insert(SU
, UnknownValue
);
894 if (FPExceptions
.size() >= HugeRegion
) {
895 LLVM_DEBUG(dbgs() << "Reducing FPExceptions map.\n";);
897 reduceHugeMemNodeMaps(FPExceptions
, empty
, getReductionSize());
901 // If it's not a store or a variant load, we're done.
902 if (!MI
.mayStore() &&
903 !(MI
.mayLoad() && !MI
.isDereferenceableInvariantLoad(AA
)))
906 // Always add dependecy edge to BarrierChain if present.
908 BarrierChain
->addPredBarrier(SU
);
910 // Find the underlying objects for MI. The Objs vector is either
911 // empty, or filled with the Values of memory locations which this
913 UnderlyingObjectsVector Objs
;
914 bool ObjsFound
= getUnderlyingObjectsForInstr(&MI
, MFI
, Objs
,
919 // An unknown store depends on all stores and loads.
920 addChainDependencies(SU
, Stores
);
921 addChainDependencies(SU
, NonAliasStores
);
922 addChainDependencies(SU
, Loads
);
923 addChainDependencies(SU
, NonAliasLoads
);
925 // Map this store to 'UnknownValue'.
926 Stores
.insert(SU
, UnknownValue
);
928 // Add precise dependencies against all previously seen memory
929 // accesses mapped to the same Value(s).
930 for (const UnderlyingObject
&UnderlObj
: Objs
) {
931 ValueType V
= UnderlObj
.getValue();
932 bool ThisMayAlias
= UnderlObj
.mayAlias();
934 // Add dependencies to previous stores and loads mapped to V.
935 addChainDependencies(SU
, (ThisMayAlias
? Stores
: NonAliasStores
), V
);
936 addChainDependencies(SU
, (ThisMayAlias
? Loads
: NonAliasLoads
), V
);
938 // Update the store map after all chains have been added to avoid adding
939 // self-loop edge if multiple underlying objects are present.
940 for (const UnderlyingObject
&UnderlObj
: Objs
) {
941 ValueType V
= UnderlObj
.getValue();
942 bool ThisMayAlias
= UnderlObj
.mayAlias();
944 // Map this store to V.
945 (ThisMayAlias
? Stores
: NonAliasStores
).insert(SU
, V
);
947 // The store may have dependencies to unanalyzable loads and
949 addChainDependencies(SU
, Loads
, UnknownValue
);
950 addChainDependencies(SU
, Stores
, UnknownValue
);
952 } else { // SU is a load.
954 // An unknown load depends on all stores.
955 addChainDependencies(SU
, Stores
);
956 addChainDependencies(SU
, NonAliasStores
);
958 Loads
.insert(SU
, UnknownValue
);
960 for (const UnderlyingObject
&UnderlObj
: Objs
) {
961 ValueType V
= UnderlObj
.getValue();
962 bool ThisMayAlias
= UnderlObj
.mayAlias();
964 // Add precise dependencies against all previously seen stores
965 // mapping to the same Value(s).
966 addChainDependencies(SU
, (ThisMayAlias
? Stores
: NonAliasStores
), V
);
968 // Map this load to V.
969 (ThisMayAlias
? Loads
: NonAliasLoads
).insert(SU
, V
);
971 // The load may have dependencies to unanalyzable stores.
972 addChainDependencies(SU
, Stores
, UnknownValue
);
976 // Reduce maps if they grow huge.
977 if (Stores
.size() + Loads
.size() >= HugeRegion
) {
978 LLVM_DEBUG(dbgs() << "Reducing Stores and Loads maps.\n";);
979 reduceHugeMemNodeMaps(Stores
, Loads
, getReductionSize());
981 if (NonAliasStores
.size() + NonAliasLoads
.size() >= HugeRegion
) {
983 dbgs() << "Reducing NonAliasStores and NonAliasLoads maps.\n";);
984 reduceHugeMemNodeMaps(NonAliasStores
, NonAliasLoads
, getReductionSize());
989 FirstDbgValue
= DbgMI
;
993 CurrentVRegDefs
.clear();
994 CurrentVRegUses
.clear();
999 raw_ostream
&llvm::operator<<(raw_ostream
&OS
, const PseudoSourceValue
* PSV
) {
1000 PSV
->printCustom(OS
);
1004 void ScheduleDAGInstrs::Value2SUsMap::dump() {
1005 for (auto &Itr
: *this) {
1006 if (Itr
.first
.is
<const Value
*>()) {
1007 const Value
*V
= Itr
.first
.get
<const Value
*>();
1008 if (isa
<UndefValue
>(V
))
1009 dbgs() << "Unknown";
1011 V
->printAsOperand(dbgs());
1013 else if (Itr
.first
.is
<const PseudoSourceValue
*>())
1014 dbgs() << Itr
.first
.get
<const PseudoSourceValue
*>();
1016 llvm_unreachable("Unknown Value type.");
1019 dumpSUList(Itr
.second
);
1023 void ScheduleDAGInstrs::reduceHugeMemNodeMaps(Value2SUsMap
&stores
,
1024 Value2SUsMap
&loads
, unsigned N
) {
1025 LLVM_DEBUG(dbgs() << "Before reduction:\nStoring SUnits:\n"; stores
.dump();
1026 dbgs() << "Loading SUnits:\n"; loads
.dump());
1028 // Insert all SU's NodeNums into a vector and sort it.
1029 std::vector
<unsigned> NodeNums
;
1030 NodeNums
.reserve(stores
.size() + loads
.size());
1031 for (auto &I
: stores
)
1032 for (auto *SU
: I
.second
)
1033 NodeNums
.push_back(SU
->NodeNum
);
1034 for (auto &I
: loads
)
1035 for (auto *SU
: I
.second
)
1036 NodeNums
.push_back(SU
->NodeNum
);
1037 llvm::sort(NodeNums
);
1039 // The N last elements in NodeNums will be removed, and the SU with
1040 // the lowest NodeNum of them will become the new BarrierChain to
1041 // let the not yet seen SUs have a dependency to the removed SUs.
1042 assert(N
<= NodeNums
.size());
1043 SUnit
*newBarrierChain
= &SUnits
[*(NodeNums
.end() - N
)];
1045 // The aliasing and non-aliasing maps reduce independently of each
1046 // other, but share a common BarrierChain. Check if the
1047 // newBarrierChain is above the former one. If it is not, it may
1048 // introduce a loop to use newBarrierChain, so keep the old one.
1049 if (newBarrierChain
->NodeNum
< BarrierChain
->NodeNum
) {
1050 BarrierChain
->addPredBarrier(newBarrierChain
);
1051 BarrierChain
= newBarrierChain
;
1052 LLVM_DEBUG(dbgs() << "Inserting new barrier chain: SU("
1053 << BarrierChain
->NodeNum
<< ").\n";);
1056 LLVM_DEBUG(dbgs() << "Keeping old barrier chain: SU("
1057 << BarrierChain
->NodeNum
<< ").\n";);
1060 BarrierChain
= newBarrierChain
;
1062 insertBarrierChain(stores
);
1063 insertBarrierChain(loads
);
1065 LLVM_DEBUG(dbgs() << "After reduction:\nStoring SUnits:\n"; stores
.dump();
1066 dbgs() << "Loading SUnits:\n"; loads
.dump());
1069 static void toggleKills(const MachineRegisterInfo
&MRI
, LivePhysRegs
&LiveRegs
,
1070 MachineInstr
&MI
, bool addToLiveRegs
) {
1071 for (MachineOperand
&MO
: MI
.operands()) {
1072 if (!MO
.isReg() || !MO
.readsReg())
1074 Register Reg
= MO
.getReg();
1078 // Things that are available after the instruction are killed by it.
1079 bool IsKill
= LiveRegs
.available(MRI
, Reg
);
1080 MO
.setIsKill(IsKill
);
1082 LiveRegs
.addReg(Reg
);
1086 void ScheduleDAGInstrs::fixupKills(MachineBasicBlock
&MBB
) {
1087 LLVM_DEBUG(dbgs() << "Fixup kills for " << printMBBReference(MBB
) << '\n');
1089 LiveRegs
.init(*TRI
);
1090 LiveRegs
.addLiveOuts(MBB
);
1092 // Examine block from end to start...
1093 for (MachineInstr
&MI
: make_range(MBB
.rbegin(), MBB
.rend())) {
1094 if (MI
.isDebugInstr())
1097 // Update liveness. Registers that are defed but not used in this
1098 // instruction are now dead. Mark register and all subregs as they
1099 // are completely defined.
1100 for (ConstMIBundleOperands
O(MI
); O
.isValid(); ++O
) {
1101 const MachineOperand
&MO
= *O
;
1105 Register Reg
= MO
.getReg();
1108 LiveRegs
.removeReg(Reg
);
1109 } else if (MO
.isRegMask()) {
1110 LiveRegs
.removeRegsInMask(MO
);
1114 // If there is a bundle header fix it up first.
1115 if (!MI
.isBundled()) {
1116 toggleKills(MRI
, LiveRegs
, MI
, true);
1118 MachineBasicBlock::instr_iterator Bundle
= MI
.getIterator();
1120 toggleKills(MRI
, LiveRegs
, MI
, false);
1122 // Some targets make the (questionable) assumtion that the instructions
1123 // inside the bundle are ordered and consequently only the last use of
1124 // a register inside the bundle can kill it.
1125 MachineBasicBlock::instr_iterator I
= std::next(Bundle
);
1126 while (I
->isBundledWithSucc())
1129 if (!I
->isDebugInstr())
1130 toggleKills(MRI
, LiveRegs
, *I
, true);
1132 } while (I
!= Bundle
);
1137 void ScheduleDAGInstrs::dumpNode(const SUnit
&SU
) const {
1138 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
1141 SU
.getInstr()->dump();
1145 void ScheduleDAGInstrs::dump() const {
1146 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
1147 if (EntrySU
.getInstr() != nullptr)
1148 dumpNodeAll(EntrySU
);
1149 for (const SUnit
&SU
: SUnits
)
1151 if (ExitSU
.getInstr() != nullptr)
1152 dumpNodeAll(ExitSU
);
1156 std::string
ScheduleDAGInstrs::getGraphNodeLabel(const SUnit
*SU
) const {
1158 raw_string_ostream
oss(s
);
1161 else if (SU
== &ExitSU
)
1164 SU
->getInstr()->print(oss
, /*SkipOpers=*/true);
1168 /// Return the basic block label. It is not necessarilly unique because a block
1169 /// contains multiple scheduling regions. But it is fine for visualization.
1170 std::string
ScheduleDAGInstrs::getDAGName() const {
1171 return "dag." + BB
->getFullName();
1174 bool ScheduleDAGInstrs::canAddEdge(SUnit
*SuccSU
, SUnit
*PredSU
) {
1175 return SuccSU
== &ExitSU
|| !Topo
.IsReachable(PredSU
, SuccSU
);
1178 bool ScheduleDAGInstrs::addEdge(SUnit
*SuccSU
, const SDep
&PredDep
) {
1179 if (SuccSU
!= &ExitSU
) {
1180 // Do not use WillCreateCycle, it assumes SD scheduling.
1181 // If Pred is reachable from Succ, then the edge creates a cycle.
1182 if (Topo
.IsReachable(PredDep
.getSUnit(), SuccSU
))
1184 Topo
.AddPredQueued(SuccSU
, PredDep
.getSUnit());
1186 SuccSU
->addPred(PredDep
, /*Required=*/!PredDep
.isArtificial());
1187 // Return true regardless of whether a new edge needed to be inserted.
1191 //===----------------------------------------------------------------------===//
1192 // SchedDFSResult Implementation
1193 //===----------------------------------------------------------------------===//
1197 /// Internal state used to compute SchedDFSResult.
1198 class SchedDFSImpl
{
1201 /// Join DAG nodes into equivalence classes by their subtree.
1202 IntEqClasses SubtreeClasses
;
1203 /// List PredSU, SuccSU pairs that represent data edges between subtrees.
1204 std::vector
<std::pair
<const SUnit
*, const SUnit
*>> ConnectionPairs
;
1208 unsigned ParentNodeID
; ///< Parent node (member of the parent subtree).
1209 unsigned SubInstrCount
= 0; ///< Instr count in this tree only, not
1212 RootData(unsigned id
): NodeID(id
),
1213 ParentNodeID(SchedDFSResult::InvalidSubtreeID
) {}
1215 unsigned getSparseSetIndex() const { return NodeID
; }
1218 SparseSet
<RootData
> RootSet
;
1221 SchedDFSImpl(SchedDFSResult
&r
): R(r
), SubtreeClasses(R
.DFSNodeData
.size()) {
1222 RootSet
.setUniverse(R
.DFSNodeData
.size());
1225 /// Returns true if this node been visited by the DFS traversal.
1227 /// During visitPostorderNode the Node's SubtreeID is assigned to the Node
1228 /// ID. Later, SubtreeID is updated but remains valid.
1229 bool isVisited(const SUnit
*SU
) const {
1230 return R
.DFSNodeData
[SU
->NodeNum
].SubtreeID
1231 != SchedDFSResult::InvalidSubtreeID
;
1234 /// Initializes this node's instruction count. We don't need to flag the node
1235 /// visited until visitPostorder because the DAG cannot have cycles.
1236 void visitPreorder(const SUnit
*SU
) {
1237 R
.DFSNodeData
[SU
->NodeNum
].InstrCount
=
1238 SU
->getInstr()->isTransient() ? 0 : 1;
1241 /// Called once for each node after all predecessors are visited. Revisit this
1242 /// node's predecessors and potentially join them now that we know the ILP of
1243 /// the other predecessors.
1244 void visitPostorderNode(const SUnit
*SU
) {
1245 // Mark this node as the root of a subtree. It may be joined with its
1246 // successors later.
1247 R
.DFSNodeData
[SU
->NodeNum
].SubtreeID
= SU
->NodeNum
;
1248 RootData
RData(SU
->NodeNum
);
1249 RData
.SubInstrCount
= SU
->getInstr()->isTransient() ? 0 : 1;
1251 // If any predecessors are still in their own subtree, they either cannot be
1252 // joined or are large enough to remain separate. If this parent node's
1253 // total instruction count is not greater than a child subtree by at least
1254 // the subtree limit, then try to join it now since splitting subtrees is
1255 // only useful if multiple high-pressure paths are possible.
1256 unsigned InstrCount
= R
.DFSNodeData
[SU
->NodeNum
].InstrCount
;
1257 for (const SDep
&PredDep
: SU
->Preds
) {
1258 if (PredDep
.getKind() != SDep::Data
)
1260 unsigned PredNum
= PredDep
.getSUnit()->NodeNum
;
1261 if ((InstrCount
- R
.DFSNodeData
[PredNum
].InstrCount
) < R
.SubtreeLimit
)
1262 joinPredSubtree(PredDep
, SU
, /*CheckLimit=*/false);
1264 // Either link or merge the TreeData entry from the child to the parent.
1265 if (R
.DFSNodeData
[PredNum
].SubtreeID
== PredNum
) {
1266 // If the predecessor's parent is invalid, this is a tree edge and the
1267 // current node is the parent.
1268 if (RootSet
[PredNum
].ParentNodeID
== SchedDFSResult::InvalidSubtreeID
)
1269 RootSet
[PredNum
].ParentNodeID
= SU
->NodeNum
;
1271 else if (RootSet
.count(PredNum
)) {
1272 // The predecessor is not a root, but is still in the root set. This
1273 // must be the new parent that it was just joined to. Note that
1274 // RootSet[PredNum].ParentNodeID may either be invalid or may still be
1275 // set to the original parent.
1276 RData
.SubInstrCount
+= RootSet
[PredNum
].SubInstrCount
;
1277 RootSet
.erase(PredNum
);
1280 RootSet
[SU
->NodeNum
] = RData
;
1283 /// Called once for each tree edge after calling visitPostOrderNode on
1284 /// the predecessor. Increment the parent node's instruction count and
1285 /// preemptively join this subtree to its parent's if it is small enough.
1286 void visitPostorderEdge(const SDep
&PredDep
, const SUnit
*Succ
) {
1287 R
.DFSNodeData
[Succ
->NodeNum
].InstrCount
1288 += R
.DFSNodeData
[PredDep
.getSUnit()->NodeNum
].InstrCount
;
1289 joinPredSubtree(PredDep
, Succ
);
1292 /// Adds a connection for cross edges.
1293 void visitCrossEdge(const SDep
&PredDep
, const SUnit
*Succ
) {
1294 ConnectionPairs
.push_back(std::make_pair(PredDep
.getSUnit(), Succ
));
1297 /// Sets each node's subtree ID to the representative ID and record
1298 /// connections between trees.
1300 SubtreeClasses
.compress();
1301 R
.DFSTreeData
.resize(SubtreeClasses
.getNumClasses());
1302 assert(SubtreeClasses
.getNumClasses() == RootSet
.size()
1303 && "number of roots should match trees");
1304 for (const RootData
&Root
: RootSet
) {
1305 unsigned TreeID
= SubtreeClasses
[Root
.NodeID
];
1306 if (Root
.ParentNodeID
!= SchedDFSResult::InvalidSubtreeID
)
1307 R
.DFSTreeData
[TreeID
].ParentTreeID
= SubtreeClasses
[Root
.ParentNodeID
];
1308 R
.DFSTreeData
[TreeID
].SubInstrCount
= Root
.SubInstrCount
;
1309 // Note that SubInstrCount may be greater than InstrCount if we joined
1310 // subtrees across a cross edge. InstrCount will be attributed to the
1311 // original parent, while SubInstrCount will be attributed to the joined
1314 R
.SubtreeConnections
.resize(SubtreeClasses
.getNumClasses());
1315 R
.SubtreeConnectLevels
.resize(SubtreeClasses
.getNumClasses());
1316 LLVM_DEBUG(dbgs() << R
.getNumSubtrees() << " subtrees:\n");
1317 for (unsigned Idx
= 0, End
= R
.DFSNodeData
.size(); Idx
!= End
; ++Idx
) {
1318 R
.DFSNodeData
[Idx
].SubtreeID
= SubtreeClasses
[Idx
];
1319 LLVM_DEBUG(dbgs() << " SU(" << Idx
<< ") in tree "
1320 << R
.DFSNodeData
[Idx
].SubtreeID
<< '\n');
1322 for (const std::pair
<const SUnit
*, const SUnit
*> &P
: ConnectionPairs
) {
1323 unsigned PredTree
= SubtreeClasses
[P
.first
->NodeNum
];
1324 unsigned SuccTree
= SubtreeClasses
[P
.second
->NodeNum
];
1325 if (PredTree
== SuccTree
)
1327 unsigned Depth
= P
.first
->getDepth();
1328 addConnection(PredTree
, SuccTree
, Depth
);
1329 addConnection(SuccTree
, PredTree
, Depth
);
1334 /// Joins the predecessor subtree with the successor that is its DFS parent.
1335 /// Applies some heuristics before joining.
1336 bool joinPredSubtree(const SDep
&PredDep
, const SUnit
*Succ
,
1337 bool CheckLimit
= true) {
1338 assert(PredDep
.getKind() == SDep::Data
&& "Subtrees are for data edges");
1340 // Check if the predecessor is already joined.
1341 const SUnit
*PredSU
= PredDep
.getSUnit();
1342 unsigned PredNum
= PredSU
->NodeNum
;
1343 if (R
.DFSNodeData
[PredNum
].SubtreeID
!= PredNum
)
1346 // Four is the magic number of successors before a node is considered a
1348 unsigned NumDataSucs
= 0;
1349 for (const SDep
&SuccDep
: PredSU
->Succs
) {
1350 if (SuccDep
.getKind() == SDep::Data
) {
1351 if (++NumDataSucs
>= 4)
1355 if (CheckLimit
&& R
.DFSNodeData
[PredNum
].InstrCount
> R
.SubtreeLimit
)
1357 R
.DFSNodeData
[PredNum
].SubtreeID
= Succ
->NodeNum
;
1358 SubtreeClasses
.join(Succ
->NodeNum
, PredNum
);
1362 /// Called by finalize() to record a connection between trees.
1363 void addConnection(unsigned FromTree
, unsigned ToTree
, unsigned Depth
) {
1368 SmallVectorImpl
<SchedDFSResult::Connection
> &Connections
=
1369 R
.SubtreeConnections
[FromTree
];
1370 for (SchedDFSResult::Connection
&C
: Connections
) {
1371 if (C
.TreeID
== ToTree
) {
1372 C
.Level
= std::max(C
.Level
, Depth
);
1376 Connections
.push_back(SchedDFSResult::Connection(ToTree
, Depth
));
1377 FromTree
= R
.DFSTreeData
[FromTree
].ParentTreeID
;
1378 } while (FromTree
!= SchedDFSResult::InvalidSubtreeID
);
1382 } // end namespace llvm
1386 /// Manage the stack used by a reverse depth-first search over the DAG.
1387 class SchedDAGReverseDFS
{
1388 std::vector
<std::pair
<const SUnit
*, SUnit::const_pred_iterator
>> DFSStack
;
1391 bool isComplete() const { return DFSStack
.empty(); }
1393 void follow(const SUnit
*SU
) {
1394 DFSStack
.push_back(std::make_pair(SU
, SU
->Preds
.begin()));
1396 void advance() { ++DFSStack
.back().second
; }
1398 const SDep
*backtrack() {
1399 DFSStack
.pop_back();
1400 return DFSStack
.empty() ? nullptr : std::prev(DFSStack
.back().second
);
1403 const SUnit
*getCurr() const { return DFSStack
.back().first
; }
1405 SUnit::const_pred_iterator
getPred() const { return DFSStack
.back().second
; }
1407 SUnit::const_pred_iterator
getPredEnd() const {
1408 return getCurr()->Preds
.end();
1412 } // end anonymous namespace
1414 static bool hasDataSucc(const SUnit
*SU
) {
1415 for (const SDep
&SuccDep
: SU
->Succs
) {
1416 if (SuccDep
.getKind() == SDep::Data
&&
1417 !SuccDep
.getSUnit()->isBoundaryNode())
1423 /// Computes an ILP metric for all nodes in the subDAG reachable via depth-first
1424 /// search from this root.
1425 void SchedDFSResult::compute(ArrayRef
<SUnit
> SUnits
) {
1427 llvm_unreachable("Top-down ILP metric is unimplemented");
1429 SchedDFSImpl
Impl(*this);
1430 for (const SUnit
&SU
: SUnits
) {
1431 if (Impl
.isVisited(&SU
) || hasDataSucc(&SU
))
1434 SchedDAGReverseDFS DFS
;
1435 Impl
.visitPreorder(&SU
);
1438 // Traverse the leftmost path as far as possible.
1439 while (DFS
.getPred() != DFS
.getPredEnd()) {
1440 const SDep
&PredDep
= *DFS
.getPred();
1442 // Ignore non-data edges.
1443 if (PredDep
.getKind() != SDep::Data
1444 || PredDep
.getSUnit()->isBoundaryNode()) {
1447 // An already visited edge is a cross edge, assuming an acyclic DAG.
1448 if (Impl
.isVisited(PredDep
.getSUnit())) {
1449 Impl
.visitCrossEdge(PredDep
, DFS
.getCurr());
1452 Impl
.visitPreorder(PredDep
.getSUnit());
1453 DFS
.follow(PredDep
.getSUnit());
1455 // Visit the top of the stack in postorder and backtrack.
1456 const SUnit
*Child
= DFS
.getCurr();
1457 const SDep
*PredDep
= DFS
.backtrack();
1458 Impl
.visitPostorderNode(Child
);
1460 Impl
.visitPostorderEdge(*PredDep
, DFS
.getCurr());
1461 if (DFS
.isComplete())
1468 /// The root of the given SubtreeID was just scheduled. For all subtrees
1469 /// connected to this tree, record the depth of the connection so that the
1470 /// nearest connected subtrees can be prioritized.
1471 void SchedDFSResult::scheduleTree(unsigned SubtreeID
) {
1472 for (const Connection
&C
: SubtreeConnections
[SubtreeID
]) {
1473 SubtreeConnectLevels
[C
.TreeID
] =
1474 std::max(SubtreeConnectLevels
[C
.TreeID
], C
.Level
);
1475 LLVM_DEBUG(dbgs() << " Tree: " << C
.TreeID
<< " @"
1476 << SubtreeConnectLevels
[C
.TreeID
] << '\n');
1480 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
1481 LLVM_DUMP_METHOD
void ILPValue::print(raw_ostream
&OS
) const {
1482 OS
<< InstrCount
<< " / " << Length
<< " = ";
1486 OS
<< format("%g", ((double)InstrCount
/ Length
));
1489 LLVM_DUMP_METHOD
void ILPValue::dump() const {
1490 dbgs() << *this << '\n';
1496 raw_ostream
&operator<<(raw_ostream
&OS
, const ILPValue
&Val
) {
1501 } // end namespace llvm