1 //===---- ScheduleDAGInstrs.cpp - MachineInstr Rescheduling ---------------===//
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7 //===----------------------------------------------------------------------===//
9 /// \file This implements the ScheduleDAGInstrs class, which implements
10 /// re-scheduling of MachineInstrs.
12 //===----------------------------------------------------------------------===//
14 #include "llvm/CodeGen/ScheduleDAGInstrs.h"
16 #include "llvm/ADT/IntEqClasses.h"
17 #include "llvm/ADT/MapVector.h"
18 #include "llvm/ADT/SmallVector.h"
19 #include "llvm/ADT/SparseSet.h"
20 #include "llvm/ADT/iterator_range.h"
21 #include "llvm/Analysis/AliasAnalysis.h"
22 #include "llvm/Analysis/ValueTracking.h"
23 #include "llvm/CodeGen/LiveIntervals.h"
24 #include "llvm/CodeGen/LivePhysRegs.h"
25 #include "llvm/CodeGen/MachineBasicBlock.h"
26 #include "llvm/CodeGen/MachineFrameInfo.h"
27 #include "llvm/CodeGen/MachineFunction.h"
28 #include "llvm/CodeGen/MachineInstr.h"
29 #include "llvm/CodeGen/MachineInstrBundle.h"
30 #include "llvm/CodeGen/MachineMemOperand.h"
31 #include "llvm/CodeGen/MachineOperand.h"
32 #include "llvm/CodeGen/MachineRegisterInfo.h"
33 #include "llvm/CodeGen/PseudoSourceValue.h"
34 #include "llvm/CodeGen/RegisterPressure.h"
35 #include "llvm/CodeGen/ScheduleDAG.h"
36 #include "llvm/CodeGen/ScheduleDFS.h"
37 #include "llvm/CodeGen/SlotIndexes.h"
38 #include "llvm/CodeGen/TargetRegisterInfo.h"
39 #include "llvm/CodeGen/TargetSubtargetInfo.h"
40 #include "llvm/Config/llvm-config.h"
41 #include "llvm/IR/Constants.h"
42 #include "llvm/IR/Function.h"
43 #include "llvm/IR/Type.h"
44 #include "llvm/IR/Value.h"
45 #include "llvm/MC/LaneBitmask.h"
46 #include "llvm/MC/MCRegisterInfo.h"
47 #include "llvm/Support/Casting.h"
48 #include "llvm/Support/CommandLine.h"
49 #include "llvm/Support/Compiler.h"
50 #include "llvm/Support/Debug.h"
51 #include "llvm/Support/ErrorHandling.h"
52 #include "llvm/Support/Format.h"
53 #include "llvm/Support/raw_ostream.h"
62 #define DEBUG_TYPE "machine-scheduler"
65 EnableAASchedMI("enable-aa-sched-mi", cl::Hidden
,
66 cl::desc("Enable use of AA during MI DAG construction"));
68 static cl::opt
<bool> UseTBAA("use-tbaa-in-sched-mi", cl::Hidden
,
69 cl::init(true), cl::desc("Enable use of TBAA during MI DAG construction"));
71 // Note: the two options below might be used in tuning compile time vs
72 // output quality. Setting HugeRegion so large that it will never be
73 // reached means best-effort, but may be slow.
75 // When Stores and Loads maps (or NonAliasStores and NonAliasLoads)
76 // together hold this many SUs, a reduction of maps will be done.
77 static cl::opt
<unsigned> HugeRegion("dag-maps-huge-region", cl::Hidden
,
78 cl::init(1000), cl::desc("The limit to use while constructing the DAG "
79 "prior to scheduling, at which point a trade-off "
80 "is made to avoid excessive compile time."));
82 static cl::opt
<unsigned> ReductionSize(
83 "dag-maps-reduction-size", cl::Hidden
,
84 cl::desc("A huge scheduling region will have maps reduced by this many "
85 "nodes at a time. Defaults to HugeRegion / 2."));
87 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
88 static cl::opt
<bool> SchedPrintCycles(
89 "sched-print-cycles", cl::Hidden
, cl::init(false),
90 cl::desc("Report top/bottom cycles when dumping SUnit instances"));
93 static unsigned getReductionSize() {
94 // Always reduce a huge region with half of the elements, except
95 // when user sets this number explicitly.
96 if (ReductionSize
.getNumOccurrences() == 0)
97 return HugeRegion
/ 2;
101 static void dumpSUList(const ScheduleDAGInstrs::SUList
&L
) {
102 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
104 for (const SUnit
*SU
: L
) {
105 dbgs() << "SU(" << SU
->NodeNum
<< ")";
113 ScheduleDAGInstrs::ScheduleDAGInstrs(MachineFunction
&mf
,
114 const MachineLoopInfo
*mli
,
115 bool RemoveKillFlags
)
116 : ScheduleDAG(mf
), MLI(mli
), MFI(mf
.getFrameInfo()),
117 RemoveKillFlags(RemoveKillFlags
),
118 UnknownValue(UndefValue::get(
119 Type::getVoidTy(mf
.getFunction().getContext()))), Topo(SUnits
, &ExitSU
) {
122 const TargetSubtargetInfo
&ST
= mf
.getSubtarget();
123 SchedModel
.init(&ST
);
126 /// If this machine instr has memory reference information and it can be
127 /// tracked to a normal reference to a known object, return the Value
128 /// for that object. This function returns false the memory location is
129 /// unknown or may alias anything.
130 static bool getUnderlyingObjectsForInstr(const MachineInstr
*MI
,
131 const MachineFrameInfo
&MFI
,
132 UnderlyingObjectsVector
&Objects
,
133 const DataLayout
&DL
) {
134 auto AllMMOsOkay
= [&]() {
135 for (const MachineMemOperand
*MMO
: MI
->memoperands()) {
136 // TODO: Figure out whether isAtomic is really necessary (see D57601).
137 if (MMO
->isVolatile() || MMO
->isAtomic())
140 if (const PseudoSourceValue
*PSV
= MMO
->getPseudoValue()) {
141 // Function that contain tail calls don't have unique PseudoSourceValue
142 // objects. Two PseudoSourceValues might refer to the same or
143 // overlapping locations. The client code calling this function assumes
144 // this is not the case. So return a conservative answer of no known
146 if (MFI
.hasTailCall())
149 // For now, ignore PseudoSourceValues which may alias LLVM IR values
150 // because the code that uses this function has no way to cope with
152 if (PSV
->isAliased(&MFI
))
155 bool MayAlias
= PSV
->mayAlias(&MFI
);
156 Objects
.emplace_back(PSV
, MayAlias
);
157 } else if (const Value
*V
= MMO
->getValue()) {
158 SmallVector
<Value
*, 4> Objs
;
159 if (!getUnderlyingObjectsForCodeGen(V
, Objs
))
162 for (Value
*V
: Objs
) {
163 assert(isIdentifiedObject(V
));
164 Objects
.emplace_back(V
, true);
172 if (!AllMMOsOkay()) {
180 void ScheduleDAGInstrs::startBlock(MachineBasicBlock
*bb
) {
184 void ScheduleDAGInstrs::finishBlock() {
185 // Subclasses should no longer refer to the old block.
189 void ScheduleDAGInstrs::enterRegion(MachineBasicBlock
*bb
,
190 MachineBasicBlock::iterator begin
,
191 MachineBasicBlock::iterator end
,
192 unsigned regioninstrs
) {
193 assert(bb
== BB
&& "startBlock should set BB");
196 NumRegionInstrs
= regioninstrs
;
199 void ScheduleDAGInstrs::exitRegion() {
203 void ScheduleDAGInstrs::addSchedBarrierDeps() {
204 MachineInstr
*ExitMI
=
205 RegionEnd
!= BB
->end()
206 ? &*skipDebugInstructionsBackward(RegionEnd
, RegionBegin
)
208 ExitSU
.setInstr(ExitMI
);
209 // Add dependencies on the defs and uses of the instruction.
211 for (const MachineOperand
&MO
: ExitMI
->all_uses()) {
212 Register Reg
= MO
.getReg();
213 if (Reg
.isPhysical()) {
214 Uses
.insert(PhysRegSUOper(&ExitSU
, -1, Reg
));
215 } else if (Reg
.isVirtual() && MO
.readsReg()) {
216 addVRegUseDeps(&ExitSU
, MO
.getOperandNo());
220 if (!ExitMI
|| (!ExitMI
->isCall() && !ExitMI
->isBarrier())) {
221 // For others, e.g. fallthrough, conditional branch, assume the exit
222 // uses all the registers that are livein to the successor blocks.
223 for (const MachineBasicBlock
*Succ
: BB
->successors()) {
224 for (const auto &LI
: Succ
->liveins()) {
225 if (!Uses
.contains(LI
.PhysReg
))
226 Uses
.insert(PhysRegSUOper(&ExitSU
, -1, LI
.PhysReg
));
232 /// MO is an operand of SU's instruction that defines a physical register. Adds
233 /// data dependencies from SU to any uses of the physical register.
234 void ScheduleDAGInstrs::addPhysRegDataDeps(SUnit
*SU
, unsigned OperIdx
) {
235 const MachineOperand
&MO
= SU
->getInstr()->getOperand(OperIdx
);
236 assert(MO
.isDef() && "expect physreg def");
238 // Ask the target if address-backscheduling is desirable, and if so how much.
239 const TargetSubtargetInfo
&ST
= MF
.getSubtarget();
241 // Only use any non-zero latency for real defs/uses, in contrast to
242 // "fake" operands added by regalloc.
243 const MCInstrDesc
*DefMIDesc
= &SU
->getInstr()->getDesc();
244 bool ImplicitPseudoDef
= (OperIdx
>= DefMIDesc
->getNumOperands() &&
245 !DefMIDesc
->hasImplicitDefOfPhysReg(MO
.getReg()));
246 for (MCRegAliasIterator
Alias(MO
.getReg(), TRI
, true);
247 Alias
.isValid(); ++Alias
) {
248 for (Reg2SUnitsMap::iterator I
= Uses
.find(*Alias
); I
!= Uses
.end(); ++I
) {
249 SUnit
*UseSU
= I
->SU
;
253 // Adjust the dependence latency using operand def/use information,
254 // then allow the target to perform its own adjustments.
255 int UseOp
= I
->OpIdx
;
256 MachineInstr
*RegUse
= nullptr;
259 Dep
= SDep(SU
, SDep::Artificial
);
261 // Set the hasPhysRegDefs only for physreg defs that have a use within
262 // the scheduling region.
263 SU
->hasPhysRegDefs
= true;
264 Dep
= SDep(SU
, SDep::Data
, *Alias
);
265 RegUse
= UseSU
->getInstr();
267 const MCInstrDesc
*UseMIDesc
=
268 (RegUse
? &UseSU
->getInstr()->getDesc() : nullptr);
269 bool ImplicitPseudoUse
=
270 (UseMIDesc
&& UseOp
>= ((int)UseMIDesc
->getNumOperands()) &&
271 !UseMIDesc
->hasImplicitUseOfPhysReg(*Alias
));
272 if (!ImplicitPseudoDef
&& !ImplicitPseudoUse
) {
273 Dep
.setLatency(SchedModel
.computeOperandLatency(SU
->getInstr(), OperIdx
,
278 ST
.adjustSchedDependency(SU
, OperIdx
, UseSU
, UseOp
, Dep
);
284 /// Adds register dependencies (data, anti, and output) from this SUnit
285 /// to following instructions in the same scheduling region that depend the
286 /// physical register referenced at OperIdx.
287 void ScheduleDAGInstrs::addPhysRegDeps(SUnit
*SU
, unsigned OperIdx
) {
288 MachineInstr
*MI
= SU
->getInstr();
289 MachineOperand
&MO
= MI
->getOperand(OperIdx
);
290 Register Reg
= MO
.getReg();
291 // We do not need to track any dependencies for constant registers.
292 if (MRI
.isConstantPhysReg(Reg
))
295 const TargetSubtargetInfo
&ST
= MF
.getSubtarget();
297 // Optionally add output and anti dependencies. For anti
298 // dependencies we use a latency of 0 because for a multi-issue
299 // target we want to allow the defining instruction to issue
300 // in the same cycle as the using instruction.
301 // TODO: Using a latency of 1 here for output dependencies assumes
302 // there's no cost for reusing registers.
303 SDep::Kind Kind
= MO
.isUse() ? SDep::Anti
: SDep::Output
;
304 for (MCRegAliasIterator
Alias(Reg
, TRI
, true); Alias
.isValid(); ++Alias
) {
305 if (!Defs
.contains(*Alias
))
307 for (Reg2SUnitsMap::iterator I
= Defs
.find(*Alias
); I
!= Defs
.end(); ++I
) {
308 SUnit
*DefSU
= I
->SU
;
309 if (DefSU
== &ExitSU
)
312 (Kind
!= SDep::Output
|| !MO
.isDead() ||
313 !DefSU
->getInstr()->registerDefIsDead(*Alias
))) {
314 SDep
Dep(SU
, Kind
, /*Reg=*/*Alias
);
315 if (Kind
!= SDep::Anti
)
317 SchedModel
.computeOutputLatency(MI
, OperIdx
, DefSU
->getInstr()));
318 ST
.adjustSchedDependency(SU
, OperIdx
, DefSU
, I
->OpIdx
, Dep
);
325 SU
->hasPhysRegUses
= true;
326 // Either insert a new Reg2SUnits entry with an empty SUnits list, or
327 // retrieve the existing SUnits list for this register's uses.
328 // Push this SUnit on the use list.
329 Uses
.insert(PhysRegSUOper(SU
, OperIdx
, Reg
));
333 addPhysRegDataDeps(SU
, OperIdx
);
335 // Clear previous uses and defs of this register and its subergisters.
336 for (MCPhysReg SubReg
: TRI
->subregs_inclusive(Reg
)) {
337 if (Uses
.contains(SubReg
))
338 Uses
.eraseAll(SubReg
);
340 Defs
.eraseAll(SubReg
);
342 if (MO
.isDead() && SU
->isCall
) {
343 // Calls will not be reordered because of chain dependencies (see
344 // below). Since call operands are dead, calls may continue to be added
345 // to the DefList making dependence checking quadratic in the size of
346 // the block. Instead, we leave only one call at the back of the
348 Reg2SUnitsMap::RangePair P
= Defs
.equal_range(Reg
);
349 Reg2SUnitsMap::iterator B
= P
.first
;
350 Reg2SUnitsMap::iterator I
= P
.second
;
351 for (bool isBegin
= I
== B
; !isBegin
; /* empty */) {
352 isBegin
= (--I
) == B
;
359 // Defs are pushed in the order they are visited and never reordered.
360 Defs
.insert(PhysRegSUOper(SU
, OperIdx
, Reg
));
364 LaneBitmask
ScheduleDAGInstrs::getLaneMaskForMO(const MachineOperand
&MO
) const
366 Register Reg
= MO
.getReg();
367 // No point in tracking lanemasks if we don't have interesting subregisters.
368 const TargetRegisterClass
&RC
= *MRI
.getRegClass(Reg
);
369 if (!RC
.HasDisjunctSubRegs
)
370 return LaneBitmask::getAll();
372 unsigned SubReg
= MO
.getSubReg();
374 return RC
.getLaneMask();
375 return TRI
->getSubRegIndexLaneMask(SubReg
);
378 bool ScheduleDAGInstrs::deadDefHasNoUse(const MachineOperand
&MO
) {
379 auto RegUse
= CurrentVRegUses
.find(MO
.getReg());
380 if (RegUse
== CurrentVRegUses
.end())
382 return (RegUse
->LaneMask
& getLaneMaskForMO(MO
)).none();
385 /// Adds register output and data dependencies from this SUnit to instructions
386 /// that occur later in the same scheduling region if they read from or write to
387 /// the virtual register defined at OperIdx.
389 /// TODO: Hoist loop induction variable increments. This has to be
390 /// reevaluated. Generally, IV scheduling should be done before coalescing.
391 void ScheduleDAGInstrs::addVRegDefDeps(SUnit
*SU
, unsigned OperIdx
) {
392 MachineInstr
*MI
= SU
->getInstr();
393 MachineOperand
&MO
= MI
->getOperand(OperIdx
);
394 Register Reg
= MO
.getReg();
396 LaneBitmask DefLaneMask
;
397 LaneBitmask KillLaneMask
;
398 if (TrackLaneMasks
) {
399 bool IsKill
= MO
.getSubReg() == 0 || MO
.isUndef();
400 DefLaneMask
= getLaneMaskForMO(MO
);
401 // If we have a <read-undef> flag, none of the lane values comes from an
402 // earlier instruction.
403 KillLaneMask
= IsKill
? LaneBitmask::getAll() : DefLaneMask
;
405 if (MO
.getSubReg() != 0 && MO
.isUndef()) {
406 // There may be other subregister defs on the same instruction of the same
407 // register in later operands. The lanes of other defs will now be live
408 // after this instruction, so these should not be treated as killed by the
409 // instruction even though they appear to be killed in this one operand.
410 for (const MachineOperand
&OtherMO
:
411 llvm::drop_begin(MI
->operands(), OperIdx
+ 1))
412 if (OtherMO
.isReg() && OtherMO
.isDef() && OtherMO
.getReg() == Reg
)
413 KillLaneMask
&= ~getLaneMaskForMO(OtherMO
);
416 // Clear undef flag, we'll re-add it later once we know which subregister
418 MO
.setIsUndef(false);
420 DefLaneMask
= LaneBitmask::getAll();
421 KillLaneMask
= LaneBitmask::getAll();
425 assert(deadDefHasNoUse(MO
) && "Dead defs should have no uses");
427 // Add data dependence to all uses we found so far.
428 const TargetSubtargetInfo
&ST
= MF
.getSubtarget();
429 for (VReg2SUnitOperIdxMultiMap::iterator I
= CurrentVRegUses
.find(Reg
),
430 E
= CurrentVRegUses
.end(); I
!= E
; /*empty*/) {
431 LaneBitmask LaneMask
= I
->LaneMask
;
432 // Ignore uses of other lanes.
433 if ((LaneMask
& KillLaneMask
).none()) {
438 if ((LaneMask
& DefLaneMask
).any()) {
439 SUnit
*UseSU
= I
->SU
;
440 MachineInstr
*Use
= UseSU
->getInstr();
441 SDep
Dep(SU
, SDep::Data
, Reg
);
442 Dep
.setLatency(SchedModel
.computeOperandLatency(MI
, OperIdx
, Use
,
444 ST
.adjustSchedDependency(SU
, OperIdx
, UseSU
, I
->OperandIndex
, Dep
);
448 LaneMask
&= ~KillLaneMask
;
449 // If we found a Def for all lanes of this use, remove it from the list.
450 if (LaneMask
.any()) {
451 I
->LaneMask
= LaneMask
;
454 I
= CurrentVRegUses
.erase(I
);
458 // Shortcut: Singly defined vregs do not have output/anti dependencies.
459 if (MRI
.hasOneDef(Reg
))
462 // Add output dependence to the next nearest defs of this vreg.
464 // Unless this definition is dead, the output dependence should be
465 // transitively redundant with antidependencies from this definition's
466 // uses. We're conservative for now until we have a way to guarantee the uses
467 // are not eliminated sometime during scheduling. The output dependence edge
468 // is also useful if output latency exceeds def-use latency.
469 LaneBitmask LaneMask
= DefLaneMask
;
470 for (VReg2SUnit
&V2SU
: make_range(CurrentVRegDefs
.find(Reg
),
471 CurrentVRegDefs
.end())) {
472 // Ignore defs for other lanes.
473 if ((V2SU
.LaneMask
& LaneMask
).none())
475 // Add an output dependence.
476 SUnit
*DefSU
= V2SU
.SU
;
477 // Ignore additional defs of the same lanes in one instruction. This can
478 // happen because lanemasks are shared for targets with too many
479 // subregisters. We also use some representration tricks/hacks where we
480 // add super-register defs/uses, to imply that although we only access parts
481 // of the reg we care about the full one.
484 SDep
Dep(SU
, SDep::Output
, Reg
);
486 SchedModel
.computeOutputLatency(MI
, OperIdx
, DefSU
->getInstr()));
489 // Update current definition. This can get tricky if the def was about a
490 // bigger lanemask before. We then have to shrink it and create a new
491 // VReg2SUnit for the non-overlapping part.
492 LaneBitmask OverlapMask
= V2SU
.LaneMask
& LaneMask
;
493 LaneBitmask NonOverlapMask
= V2SU
.LaneMask
& ~LaneMask
;
495 V2SU
.LaneMask
= OverlapMask
;
496 if (NonOverlapMask
.any())
497 CurrentVRegDefs
.insert(VReg2SUnit(Reg
, NonOverlapMask
, DefSU
));
499 // If there was no CurrentVRegDefs entry for some lanes yet, create one.
501 CurrentVRegDefs
.insert(VReg2SUnit(Reg
, LaneMask
, SU
));
504 /// Adds a register data dependency if the instruction that defines the
505 /// virtual register used at OperIdx is mapped to an SUnit. Add a register
506 /// antidependency from this SUnit to instructions that occur later in the same
507 /// scheduling region if they write the virtual register.
509 /// TODO: Handle ExitSU "uses" properly.
510 void ScheduleDAGInstrs::addVRegUseDeps(SUnit
*SU
, unsigned OperIdx
) {
511 const MachineInstr
*MI
= SU
->getInstr();
512 assert(!MI
->isDebugOrPseudoInstr());
514 const MachineOperand
&MO
= MI
->getOperand(OperIdx
);
515 Register Reg
= MO
.getReg();
517 // Remember the use. Data dependencies will be added when we find the def.
518 LaneBitmask LaneMask
= TrackLaneMasks
? getLaneMaskForMO(MO
)
519 : LaneBitmask::getAll();
520 CurrentVRegUses
.insert(VReg2SUnitOperIdx(Reg
, LaneMask
, OperIdx
, SU
));
522 // Add antidependences to the following defs of the vreg.
523 for (VReg2SUnit
&V2SU
: make_range(CurrentVRegDefs
.find(Reg
),
524 CurrentVRegDefs
.end())) {
525 // Ignore defs for unrelated lanes.
526 LaneBitmask PrevDefLaneMask
= V2SU
.LaneMask
;
527 if ((PrevDefLaneMask
& LaneMask
).none())
532 V2SU
.SU
->addPred(SDep(SU
, SDep::Anti
, Reg
));
536 /// Returns true if MI is an instruction we are unable to reason about
537 /// (like a call or something with unmodeled side effects).
538 static inline bool isGlobalMemoryObject(MachineInstr
*MI
) {
539 return MI
->isCall() || MI
->hasUnmodeledSideEffects() ||
540 (MI
->hasOrderedMemoryRef() && !MI
->isDereferenceableInvariantLoad());
543 void ScheduleDAGInstrs::addChainDependency (SUnit
*SUa
, SUnit
*SUb
,
545 if (SUa
->getInstr()->mayAlias(AAForDep
, *SUb
->getInstr(), UseTBAA
)) {
546 SDep
Dep(SUa
, SDep::MayAliasMem
);
547 Dep
.setLatency(Latency
);
552 /// Creates an SUnit for each real instruction, numbered in top-down
553 /// topological order. The instruction order A < B, implies that no edge exists
556 /// Map each real instruction to its SUnit.
558 /// After initSUnits, the SUnits vector cannot be resized and the scheduler may
559 /// hang onto SUnit pointers. We may relax this in the future by using SUnit IDs
560 /// instead of pointers.
562 /// MachineScheduler relies on initSUnits numbering the nodes by their order in
563 /// the original instruction list.
564 void ScheduleDAGInstrs::initSUnits() {
565 // We'll be allocating one SUnit for each real instruction in the region,
566 // which is contained within a basic block.
567 SUnits
.reserve(NumRegionInstrs
);
569 for (MachineInstr
&MI
: make_range(RegionBegin
, RegionEnd
)) {
570 if (MI
.isDebugOrPseudoInstr())
573 SUnit
*SU
= newSUnit(&MI
);
574 MISUnitMap
[&MI
] = SU
;
576 SU
->isCall
= MI
.isCall();
577 SU
->isCommutable
= MI
.isCommutable();
579 // Assign the Latency field of SU using target-provided information.
580 SU
->Latency
= SchedModel
.computeInstrLatency(SU
->getInstr());
582 // If this SUnit uses a reserved or unbuffered resource, mark it as such.
584 // Reserved resources block an instruction from issuing and stall the
585 // entire pipeline. These are identified by BufferSize=0.
587 // Unbuffered resources prevent execution of subsequent instructions that
588 // require the same resources. This is used for in-order execution pipelines
589 // within an out-of-order core. These are identified by BufferSize=1.
590 if (SchedModel
.hasInstrSchedModel()) {
591 const MCSchedClassDesc
*SC
= getSchedClass(SU
);
592 for (const MCWriteProcResEntry
&PRE
:
593 make_range(SchedModel
.getWriteProcResBegin(SC
),
594 SchedModel
.getWriteProcResEnd(SC
))) {
595 switch (SchedModel
.getProcResource(PRE
.ProcResourceIdx
)->BufferSize
) {
597 SU
->hasReservedResource
= true;
600 SU
->isUnbuffered
= true;
610 class ScheduleDAGInstrs::Value2SUsMap
: public MapVector
<ValueType
, SUList
> {
611 /// Current total number of SUs in map.
612 unsigned NumNodes
= 0;
614 /// 1 for loads, 0 for stores. (see comment in SUList)
615 unsigned TrueMemOrderLatency
;
618 Value2SUsMap(unsigned lat
= 0) : TrueMemOrderLatency(lat
) {}
620 /// To keep NumNodes up to date, insert() is used instead of
621 /// this operator w/ push_back().
622 ValueType
&operator[](const SUList
&Key
) {
623 llvm_unreachable("Don't use. Use insert() instead."); };
625 /// Adds SU to the SUList of V. If Map grows huge, reduce its size by calling
627 void inline insert(SUnit
*SU
, ValueType V
) {
628 MapVector::operator[](V
).push_back(SU
);
632 /// Clears the list of SUs mapped to V.
633 void inline clearList(ValueType V
) {
634 iterator Itr
= find(V
);
636 assert(NumNodes
>= Itr
->second
.size());
637 NumNodes
-= Itr
->second
.size();
643 /// Clears map from all contents.
645 MapVector
<ValueType
, SUList
>::clear();
649 unsigned inline size() const { return NumNodes
; }
651 /// Counts the number of SUs in this map after a reduction.
652 void reComputeSize() {
654 for (auto &I
: *this)
655 NumNodes
+= I
.second
.size();
658 unsigned inline getTrueMemOrderLatency() const {
659 return TrueMemOrderLatency
;
665 void ScheduleDAGInstrs::addChainDependencies(SUnit
*SU
,
666 Value2SUsMap
&Val2SUsMap
) {
667 for (auto &I
: Val2SUsMap
)
668 addChainDependencies(SU
, I
.second
,
669 Val2SUsMap
.getTrueMemOrderLatency());
672 void ScheduleDAGInstrs::addChainDependencies(SUnit
*SU
,
673 Value2SUsMap
&Val2SUsMap
,
675 Value2SUsMap::iterator Itr
= Val2SUsMap
.find(V
);
676 if (Itr
!= Val2SUsMap
.end())
677 addChainDependencies(SU
, Itr
->second
,
678 Val2SUsMap
.getTrueMemOrderLatency());
681 void ScheduleDAGInstrs::addBarrierChain(Value2SUsMap
&map
) {
682 assert(BarrierChain
!= nullptr);
684 for (auto &[V
, SUs
] : map
) {
687 SU
->addPredBarrier(BarrierChain
);
692 void ScheduleDAGInstrs::insertBarrierChain(Value2SUsMap
&map
) {
693 assert(BarrierChain
!= nullptr);
695 // Go through all lists of SUs.
696 for (Value2SUsMap::iterator I
= map
.begin(), EE
= map
.end(); I
!= EE
;) {
697 Value2SUsMap::iterator CurrItr
= I
++;
698 SUList
&sus
= CurrItr
->second
;
699 SUList::iterator SUItr
= sus
.begin(), SUEE
= sus
.end();
700 for (; SUItr
!= SUEE
; ++SUItr
) {
701 // Stop on BarrierChain or any instruction above it.
702 if ((*SUItr
)->NodeNum
<= BarrierChain
->NodeNum
)
705 (*SUItr
)->addPredBarrier(BarrierChain
);
708 // Remove also the BarrierChain from list if present.
709 if (SUItr
!= SUEE
&& *SUItr
== BarrierChain
)
712 // Remove all SUs that are now successors of BarrierChain.
713 if (SUItr
!= sus
.begin())
714 sus
.erase(sus
.begin(), SUItr
);
717 // Remove all entries with empty su lists.
718 map
.remove_if([&](std::pair
<ValueType
, SUList
> &mapEntry
) {
719 return (mapEntry
.second
.empty()); });
721 // Recompute the size of the map (NumNodes).
725 void ScheduleDAGInstrs::buildSchedGraph(AAResults
*AA
,
726 RegPressureTracker
*RPTracker
,
727 PressureDiffs
*PDiffs
,
729 bool TrackLaneMasks
) {
730 const TargetSubtargetInfo
&ST
= MF
.getSubtarget();
731 bool UseAA
= EnableAASchedMI
.getNumOccurrences() > 0 ? EnableAASchedMI
733 AAForDep
= UseAA
? AA
: nullptr;
735 BarrierChain
= nullptr;
737 this->TrackLaneMasks
= TrackLaneMasks
;
739 ScheduleDAG::clearDAG();
741 // Create an SUnit for each real instruction.
745 PDiffs
->init(SUnits
.size());
747 // We build scheduling units by walking a block's instruction list
748 // from bottom to top.
750 // Each MIs' memory operand(s) is analyzed to a list of underlying
751 // objects. The SU is then inserted in the SUList(s) mapped from the
752 // Value(s). Each Value thus gets mapped to lists of SUs depending
753 // on it, stores and loads kept separately. Two SUs are trivially
754 // non-aliasing if they both depend on only identified Values and do
755 // not share any common Value.
756 Value2SUsMap Stores
, Loads(1 /*TrueMemOrderLatency*/);
758 // Certain memory accesses are known to not alias any SU in Stores
759 // or Loads, and have therefore their own 'NonAlias'
760 // domain. E.g. spill / reload instructions never alias LLVM I/R
761 // Values. It would be nice to assume that this type of memory
762 // accesses always have a proper memory operand modelling, and are
763 // therefore never unanalyzable, but this is conservatively not
765 Value2SUsMap NonAliasStores
, NonAliasLoads(1 /*TrueMemOrderLatency*/);
767 // Track all instructions that may raise floating-point exceptions.
768 // These do not depend on one other (or normal loads or stores), but
769 // must not be rescheduled across global barriers. Note that we don't
770 // really need a "map" here since we don't track those MIs by value;
771 // using the same Value2SUsMap data type here is simply a matter of
773 Value2SUsMap FPExceptions
;
775 // Remove any stale debug info; sometimes BuildSchedGraph is called again
776 // without emitting the info from the previous call.
778 FirstDbgValue
= nullptr;
780 assert(Defs
.empty() && Uses
.empty() &&
781 "Only BuildGraph should update Defs/Uses");
782 Defs
.setUniverse(TRI
->getNumRegs());
783 Uses
.setUniverse(TRI
->getNumRegs());
785 assert(CurrentVRegDefs
.empty() && "nobody else should use CurrentVRegDefs");
786 assert(CurrentVRegUses
.empty() && "nobody else should use CurrentVRegUses");
787 unsigned NumVirtRegs
= MRI
.getNumVirtRegs();
788 CurrentVRegDefs
.setUniverse(NumVirtRegs
);
789 CurrentVRegUses
.setUniverse(NumVirtRegs
);
791 // Model data dependencies between instructions being scheduled and the
793 addSchedBarrierDeps();
795 // Walk the list of instructions, from bottom moving up.
796 MachineInstr
*DbgMI
= nullptr;
797 for (MachineBasicBlock::iterator MII
= RegionEnd
, MIE
= RegionBegin
;
799 MachineInstr
&MI
= *std::prev(MII
);
801 DbgValues
.emplace_back(DbgMI
, &MI
);
805 if (MI
.isDebugValue() || MI
.isDebugPHI()) {
810 if (MI
.isDebugLabel() || MI
.isDebugRef() || MI
.isPseudoProbe())
813 SUnit
*SU
= MISUnitMap
[&MI
];
814 assert(SU
&& "No SUnit mapped to this MI");
817 RegisterOperands RegOpers
;
818 RegOpers
.collect(MI
, *TRI
, MRI
, TrackLaneMasks
, false);
819 if (TrackLaneMasks
) {
820 SlotIndex SlotIdx
= LIS
->getInstructionIndex(MI
);
821 RegOpers
.adjustLaneLiveness(*LIS
, MRI
, SlotIdx
);
823 if (PDiffs
!= nullptr)
824 PDiffs
->addInstruction(SU
->NodeNum
, RegOpers
, MRI
);
826 if (RPTracker
->getPos() == RegionEnd
|| &*RPTracker
->getPos() != &MI
)
827 RPTracker
->recedeSkipDebugValues();
828 assert(&*RPTracker
->getPos() == &MI
&& "RPTracker in sync");
829 RPTracker
->recede(RegOpers
);
833 (CanHandleTerminators
|| (!MI
.isTerminator() && !MI
.isPosition())) &&
834 "Cannot schedule terminators or labels!");
836 // Add register-based dependencies (data, anti, and output).
837 // For some instructions (calls, returns, inline-asm, etc.) there can
838 // be explicit uses and implicit defs, in which case the use will appear
839 // on the operand list before the def. Do two passes over the operand
840 // list to make sure that defs are processed before any uses.
841 bool HasVRegDef
= false;
842 for (unsigned j
= 0, n
= MI
.getNumOperands(); j
!= n
; ++j
) {
843 const MachineOperand
&MO
= MI
.getOperand(j
);
844 if (!MO
.isReg() || !MO
.isDef())
846 Register Reg
= MO
.getReg();
847 if (Reg
.isPhysical()) {
848 addPhysRegDeps(SU
, j
);
849 } else if (Reg
.isVirtual()) {
851 addVRegDefDeps(SU
, j
);
854 // Now process all uses.
855 for (unsigned j
= 0, n
= MI
.getNumOperands(); j
!= n
; ++j
) {
856 const MachineOperand
&MO
= MI
.getOperand(j
);
857 // Only look at use operands.
858 // We do not need to check for MO.readsReg() here because subsequent
859 // subregister defs will get output dependence edges and need no
860 // additional use dependencies.
861 if (!MO
.isReg() || !MO
.isUse())
863 Register Reg
= MO
.getReg();
864 if (Reg
.isPhysical()) {
865 addPhysRegDeps(SU
, j
);
866 } else if (Reg
.isVirtual() && MO
.readsReg()) {
867 addVRegUseDeps(SU
, j
);
871 // If we haven't seen any uses in this scheduling region, create a
872 // dependence edge to ExitSU to model the live-out latency. This is required
873 // for vreg defs with no in-region use, and prefetches with no vreg def.
875 // FIXME: NumDataSuccs would be more precise than NumSuccs here. This
876 // check currently relies on being called before adding chain deps.
877 if (SU
->NumSuccs
== 0 && SU
->Latency
> 1 && (HasVRegDef
|| MI
.mayLoad())) {
878 SDep
Dep(SU
, SDep::Artificial
);
879 Dep
.setLatency(SU
->Latency
- 1);
883 // Add memory dependencies (Note: isStoreToStackSlot and
884 // isLoadFromStackSLot are not usable after stack slots are lowered to
885 // actual addresses).
887 // This is a barrier event that acts as a pivotal node in the DAG.
888 if (isGlobalMemoryObject(&MI
)) {
890 // Become the barrier chain.
892 BarrierChain
->addPredBarrier(SU
);
895 LLVM_DEBUG(dbgs() << "Global memory object and new barrier chain: SU("
896 << BarrierChain
->NodeNum
<< ").\n";);
898 // Add dependencies against everything below it and clear maps.
899 addBarrierChain(Stores
);
900 addBarrierChain(Loads
);
901 addBarrierChain(NonAliasStores
);
902 addBarrierChain(NonAliasLoads
);
903 addBarrierChain(FPExceptions
);
908 // Instructions that may raise FP exceptions may not be moved
909 // across any global barriers.
910 if (MI
.mayRaiseFPException()) {
912 BarrierChain
->addPredBarrier(SU
);
914 FPExceptions
.insert(SU
, UnknownValue
);
916 if (FPExceptions
.size() >= HugeRegion
) {
917 LLVM_DEBUG(dbgs() << "Reducing FPExceptions map.\n";);
919 reduceHugeMemNodeMaps(FPExceptions
, empty
, getReductionSize());
923 // If it's not a store or a variant load, we're done.
924 if (!MI
.mayStore() &&
925 !(MI
.mayLoad() && !MI
.isDereferenceableInvariantLoad()))
928 // Always add dependecy edge to BarrierChain if present.
930 BarrierChain
->addPredBarrier(SU
);
932 // Find the underlying objects for MI. The Objs vector is either
933 // empty, or filled with the Values of memory locations which this
935 UnderlyingObjectsVector Objs
;
936 bool ObjsFound
= getUnderlyingObjectsForInstr(&MI
, MFI
, Objs
,
941 // An unknown store depends on all stores and loads.
942 addChainDependencies(SU
, Stores
);
943 addChainDependencies(SU
, NonAliasStores
);
944 addChainDependencies(SU
, Loads
);
945 addChainDependencies(SU
, NonAliasLoads
);
947 // Map this store to 'UnknownValue'.
948 Stores
.insert(SU
, UnknownValue
);
950 // Add precise dependencies against all previously seen memory
951 // accesses mapped to the same Value(s).
952 for (const UnderlyingObject
&UnderlObj
: Objs
) {
953 ValueType V
= UnderlObj
.getValue();
954 bool ThisMayAlias
= UnderlObj
.mayAlias();
956 // Add dependencies to previous stores and loads mapped to V.
957 addChainDependencies(SU
, (ThisMayAlias
? Stores
: NonAliasStores
), V
);
958 addChainDependencies(SU
, (ThisMayAlias
? Loads
: NonAliasLoads
), V
);
960 // Update the store map after all chains have been added to avoid adding
961 // self-loop edge if multiple underlying objects are present.
962 for (const UnderlyingObject
&UnderlObj
: Objs
) {
963 ValueType V
= UnderlObj
.getValue();
964 bool ThisMayAlias
= UnderlObj
.mayAlias();
966 // Map this store to V.
967 (ThisMayAlias
? Stores
: NonAliasStores
).insert(SU
, V
);
969 // The store may have dependencies to unanalyzable loads and
971 addChainDependencies(SU
, Loads
, UnknownValue
);
972 addChainDependencies(SU
, Stores
, UnknownValue
);
974 } else { // SU is a load.
976 // An unknown load depends on all stores.
977 addChainDependencies(SU
, Stores
);
978 addChainDependencies(SU
, NonAliasStores
);
980 Loads
.insert(SU
, UnknownValue
);
982 for (const UnderlyingObject
&UnderlObj
: Objs
) {
983 ValueType V
= UnderlObj
.getValue();
984 bool ThisMayAlias
= UnderlObj
.mayAlias();
986 // Add precise dependencies against all previously seen stores
987 // mapping to the same Value(s).
988 addChainDependencies(SU
, (ThisMayAlias
? Stores
: NonAliasStores
), V
);
990 // Map this load to V.
991 (ThisMayAlias
? Loads
: NonAliasLoads
).insert(SU
, V
);
993 // The load may have dependencies to unanalyzable stores.
994 addChainDependencies(SU
, Stores
, UnknownValue
);
998 // Reduce maps if they grow huge.
999 if (Stores
.size() + Loads
.size() >= HugeRegion
) {
1000 LLVM_DEBUG(dbgs() << "Reducing Stores and Loads maps.\n";);
1001 reduceHugeMemNodeMaps(Stores
, Loads
, getReductionSize());
1003 if (NonAliasStores
.size() + NonAliasLoads
.size() >= HugeRegion
) {
1005 dbgs() << "Reducing NonAliasStores and NonAliasLoads maps.\n";);
1006 reduceHugeMemNodeMaps(NonAliasStores
, NonAliasLoads
, getReductionSize());
1011 FirstDbgValue
= DbgMI
;
1015 CurrentVRegDefs
.clear();
1016 CurrentVRegUses
.clear();
1021 raw_ostream
&llvm::operator<<(raw_ostream
&OS
, const PseudoSourceValue
* PSV
) {
1022 PSV
->printCustom(OS
);
1026 void ScheduleDAGInstrs::Value2SUsMap::dump() {
1027 for (const auto &[ValType
, SUs
] : *this) {
1028 if (isa
<const Value
*>(ValType
)) {
1029 const Value
*V
= cast
<const Value
*>(ValType
);
1030 if (isa
<UndefValue
>(V
))
1031 dbgs() << "Unknown";
1033 V
->printAsOperand(dbgs());
1034 } else if (isa
<const PseudoSourceValue
*>(ValType
))
1035 dbgs() << cast
<const PseudoSourceValue
*>(ValType
);
1037 llvm_unreachable("Unknown Value type.");
1044 void ScheduleDAGInstrs::reduceHugeMemNodeMaps(Value2SUsMap
&stores
,
1045 Value2SUsMap
&loads
, unsigned N
) {
1046 LLVM_DEBUG(dbgs() << "Before reduction:\nStoring SUnits:\n"; stores
.dump();
1047 dbgs() << "Loading SUnits:\n"; loads
.dump());
1049 // Insert all SU's NodeNums into a vector and sort it.
1050 std::vector
<unsigned> NodeNums
;
1051 NodeNums
.reserve(stores
.size() + loads
.size());
1052 for (const auto &[V
, SUs
] : stores
) {
1054 for (const auto *SU
: SUs
)
1055 NodeNums
.push_back(SU
->NodeNum
);
1057 for (const auto &[V
, SUs
] : loads
) {
1059 for (const auto *SU
: SUs
)
1060 NodeNums
.push_back(SU
->NodeNum
);
1062 llvm::sort(NodeNums
);
1064 // The N last elements in NodeNums will be removed, and the SU with
1065 // the lowest NodeNum of them will become the new BarrierChain to
1066 // let the not yet seen SUs have a dependency to the removed SUs.
1067 assert(N
<= NodeNums
.size());
1068 SUnit
*newBarrierChain
= &SUnits
[*(NodeNums
.end() - N
)];
1070 // The aliasing and non-aliasing maps reduce independently of each
1071 // other, but share a common BarrierChain. Check if the
1072 // newBarrierChain is above the former one. If it is not, it may
1073 // introduce a loop to use newBarrierChain, so keep the old one.
1074 if (newBarrierChain
->NodeNum
< BarrierChain
->NodeNum
) {
1075 BarrierChain
->addPredBarrier(newBarrierChain
);
1076 BarrierChain
= newBarrierChain
;
1077 LLVM_DEBUG(dbgs() << "Inserting new barrier chain: SU("
1078 << BarrierChain
->NodeNum
<< ").\n";);
1081 LLVM_DEBUG(dbgs() << "Keeping old barrier chain: SU("
1082 << BarrierChain
->NodeNum
<< ").\n";);
1085 BarrierChain
= newBarrierChain
;
1087 insertBarrierChain(stores
);
1088 insertBarrierChain(loads
);
1090 LLVM_DEBUG(dbgs() << "After reduction:\nStoring SUnits:\n"; stores
.dump();
1091 dbgs() << "Loading SUnits:\n"; loads
.dump());
1094 static void toggleKills(const MachineRegisterInfo
&MRI
, LivePhysRegs
&LiveRegs
,
1095 MachineInstr
&MI
, bool addToLiveRegs
) {
1096 for (MachineOperand
&MO
: MI
.operands()) {
1097 if (!MO
.isReg() || !MO
.readsReg())
1099 Register Reg
= MO
.getReg();
1103 // Things that are available after the instruction are killed by it.
1104 bool IsKill
= LiveRegs
.available(MRI
, Reg
);
1105 MO
.setIsKill(IsKill
);
1107 LiveRegs
.addReg(Reg
);
1111 void ScheduleDAGInstrs::fixupKills(MachineBasicBlock
&MBB
) {
1112 LLVM_DEBUG(dbgs() << "Fixup kills for " << printMBBReference(MBB
) << '\n');
1114 LiveRegs
.init(*TRI
);
1115 LiveRegs
.addLiveOuts(MBB
);
1117 // Examine block from end to start...
1118 for (MachineInstr
&MI
: llvm::reverse(MBB
)) {
1119 if (MI
.isDebugOrPseudoInstr())
1122 // Update liveness. Registers that are defed but not used in this
1123 // instruction are now dead. Mark register and all subregs as they
1124 // are completely defined.
1125 for (ConstMIBundleOperands
O(MI
); O
.isValid(); ++O
) {
1126 const MachineOperand
&MO
= *O
;
1130 Register Reg
= MO
.getReg();
1133 LiveRegs
.removeReg(Reg
);
1134 } else if (MO
.isRegMask()) {
1135 LiveRegs
.removeRegsInMask(MO
);
1139 // If there is a bundle header fix it up first.
1140 if (!MI
.isBundled()) {
1141 toggleKills(MRI
, LiveRegs
, MI
, true);
1143 MachineBasicBlock::instr_iterator Bundle
= MI
.getIterator();
1145 toggleKills(MRI
, LiveRegs
, MI
, false);
1147 // Some targets make the (questionable) assumtion that the instructions
1148 // inside the bundle are ordered and consequently only the last use of
1149 // a register inside the bundle can kill it.
1150 MachineBasicBlock::instr_iterator I
= std::next(Bundle
);
1151 while (I
->isBundledWithSucc())
1154 if (!I
->isDebugOrPseudoInstr())
1155 toggleKills(MRI
, LiveRegs
, *I
, true);
1157 } while (I
!= Bundle
);
1162 void ScheduleDAGInstrs::dumpNode(const SUnit
&SU
) const {
1163 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
1165 if (SchedPrintCycles
)
1166 dbgs() << " [TopReadyCycle = " << SU
.TopReadyCycle
1167 << ", BottomReadyCycle = " << SU
.BotReadyCycle
<< "]";
1169 SU
.getInstr()->dump();
1173 void ScheduleDAGInstrs::dump() const {
1174 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
1175 if (EntrySU
.getInstr() != nullptr)
1176 dumpNodeAll(EntrySU
);
1177 for (const SUnit
&SU
: SUnits
)
1179 if (ExitSU
.getInstr() != nullptr)
1180 dumpNodeAll(ExitSU
);
1184 std::string
ScheduleDAGInstrs::getGraphNodeLabel(const SUnit
*SU
) const {
1186 raw_string_ostream
oss(s
);
1189 else if (SU
== &ExitSU
)
1192 SU
->getInstr()->print(oss
, /*IsStandalone=*/true);
1196 /// Return the basic block label. It is not necessarilly unique because a block
1197 /// contains multiple scheduling regions. But it is fine for visualization.
1198 std::string
ScheduleDAGInstrs::getDAGName() const {
1199 return "dag." + BB
->getFullName();
1202 bool ScheduleDAGInstrs::canAddEdge(SUnit
*SuccSU
, SUnit
*PredSU
) {
1203 return SuccSU
== &ExitSU
|| !Topo
.IsReachable(PredSU
, SuccSU
);
1206 bool ScheduleDAGInstrs::addEdge(SUnit
*SuccSU
, const SDep
&PredDep
) {
1207 if (SuccSU
!= &ExitSU
) {
1208 // Do not use WillCreateCycle, it assumes SD scheduling.
1209 // If Pred is reachable from Succ, then the edge creates a cycle.
1210 if (Topo
.IsReachable(PredDep
.getSUnit(), SuccSU
))
1212 Topo
.AddPredQueued(SuccSU
, PredDep
.getSUnit());
1214 SuccSU
->addPred(PredDep
, /*Required=*/!PredDep
.isArtificial());
1215 // Return true regardless of whether a new edge needed to be inserted.
1219 //===----------------------------------------------------------------------===//
1220 // SchedDFSResult Implementation
1221 //===----------------------------------------------------------------------===//
1225 /// Internal state used to compute SchedDFSResult.
1226 class SchedDFSImpl
{
1229 /// Join DAG nodes into equivalence classes by their subtree.
1230 IntEqClasses SubtreeClasses
;
1231 /// List PredSU, SuccSU pairs that represent data edges between subtrees.
1232 std::vector
<std::pair
<const SUnit
*, const SUnit
*>> ConnectionPairs
;
1236 unsigned ParentNodeID
; ///< Parent node (member of the parent subtree).
1237 unsigned SubInstrCount
= 0; ///< Instr count in this tree only, not
1240 RootData(unsigned id
): NodeID(id
),
1241 ParentNodeID(SchedDFSResult::InvalidSubtreeID
) {}
1243 unsigned getSparseSetIndex() const { return NodeID
; }
1246 SparseSet
<RootData
> RootSet
;
1249 SchedDFSImpl(SchedDFSResult
&r
): R(r
), SubtreeClasses(R
.DFSNodeData
.size()) {
1250 RootSet
.setUniverse(R
.DFSNodeData
.size());
1253 /// Returns true if this node been visited by the DFS traversal.
1255 /// During visitPostorderNode the Node's SubtreeID is assigned to the Node
1256 /// ID. Later, SubtreeID is updated but remains valid.
1257 bool isVisited(const SUnit
*SU
) const {
1258 return R
.DFSNodeData
[SU
->NodeNum
].SubtreeID
1259 != SchedDFSResult::InvalidSubtreeID
;
1262 /// Initializes this node's instruction count. We don't need to flag the node
1263 /// visited until visitPostorder because the DAG cannot have cycles.
1264 void visitPreorder(const SUnit
*SU
) {
1265 R
.DFSNodeData
[SU
->NodeNum
].InstrCount
=
1266 SU
->getInstr()->isTransient() ? 0 : 1;
1269 /// Called once for each node after all predecessors are visited. Revisit this
1270 /// node's predecessors and potentially join them now that we know the ILP of
1271 /// the other predecessors.
1272 void visitPostorderNode(const SUnit
*SU
) {
1273 // Mark this node as the root of a subtree. It may be joined with its
1274 // successors later.
1275 R
.DFSNodeData
[SU
->NodeNum
].SubtreeID
= SU
->NodeNum
;
1276 RootData
RData(SU
->NodeNum
);
1277 RData
.SubInstrCount
= SU
->getInstr()->isTransient() ? 0 : 1;
1279 // If any predecessors are still in their own subtree, they either cannot be
1280 // joined or are large enough to remain separate. If this parent node's
1281 // total instruction count is not greater than a child subtree by at least
1282 // the subtree limit, then try to join it now since splitting subtrees is
1283 // only useful if multiple high-pressure paths are possible.
1284 unsigned InstrCount
= R
.DFSNodeData
[SU
->NodeNum
].InstrCount
;
1285 for (const SDep
&PredDep
: SU
->Preds
) {
1286 if (PredDep
.getKind() != SDep::Data
)
1288 unsigned PredNum
= PredDep
.getSUnit()->NodeNum
;
1289 if ((InstrCount
- R
.DFSNodeData
[PredNum
].InstrCount
) < R
.SubtreeLimit
)
1290 joinPredSubtree(PredDep
, SU
, /*CheckLimit=*/false);
1292 // Either link or merge the TreeData entry from the child to the parent.
1293 if (R
.DFSNodeData
[PredNum
].SubtreeID
== PredNum
) {
1294 // If the predecessor's parent is invalid, this is a tree edge and the
1295 // current node is the parent.
1296 if (RootSet
[PredNum
].ParentNodeID
== SchedDFSResult::InvalidSubtreeID
)
1297 RootSet
[PredNum
].ParentNodeID
= SU
->NodeNum
;
1299 else if (RootSet
.count(PredNum
)) {
1300 // The predecessor is not a root, but is still in the root set. This
1301 // must be the new parent that it was just joined to. Note that
1302 // RootSet[PredNum].ParentNodeID may either be invalid or may still be
1303 // set to the original parent.
1304 RData
.SubInstrCount
+= RootSet
[PredNum
].SubInstrCount
;
1305 RootSet
.erase(PredNum
);
1308 RootSet
[SU
->NodeNum
] = RData
;
1311 /// Called once for each tree edge after calling visitPostOrderNode on
1312 /// the predecessor. Increment the parent node's instruction count and
1313 /// preemptively join this subtree to its parent's if it is small enough.
1314 void visitPostorderEdge(const SDep
&PredDep
, const SUnit
*Succ
) {
1315 R
.DFSNodeData
[Succ
->NodeNum
].InstrCount
1316 += R
.DFSNodeData
[PredDep
.getSUnit()->NodeNum
].InstrCount
;
1317 joinPredSubtree(PredDep
, Succ
);
1320 /// Adds a connection for cross edges.
1321 void visitCrossEdge(const SDep
&PredDep
, const SUnit
*Succ
) {
1322 ConnectionPairs
.emplace_back(PredDep
.getSUnit(), Succ
);
1325 /// Sets each node's subtree ID to the representative ID and record
1326 /// connections between trees.
1328 SubtreeClasses
.compress();
1329 R
.DFSTreeData
.resize(SubtreeClasses
.getNumClasses());
1330 assert(SubtreeClasses
.getNumClasses() == RootSet
.size()
1331 && "number of roots should match trees");
1332 for (const RootData
&Root
: RootSet
) {
1333 unsigned TreeID
= SubtreeClasses
[Root
.NodeID
];
1334 if (Root
.ParentNodeID
!= SchedDFSResult::InvalidSubtreeID
)
1335 R
.DFSTreeData
[TreeID
].ParentTreeID
= SubtreeClasses
[Root
.ParentNodeID
];
1336 R
.DFSTreeData
[TreeID
].SubInstrCount
= Root
.SubInstrCount
;
1337 // Note that SubInstrCount may be greater than InstrCount if we joined
1338 // subtrees across a cross edge. InstrCount will be attributed to the
1339 // original parent, while SubInstrCount will be attributed to the joined
1342 R
.SubtreeConnections
.resize(SubtreeClasses
.getNumClasses());
1343 R
.SubtreeConnectLevels
.resize(SubtreeClasses
.getNumClasses());
1344 LLVM_DEBUG(dbgs() << R
.getNumSubtrees() << " subtrees:\n");
1345 for (unsigned Idx
= 0, End
= R
.DFSNodeData
.size(); Idx
!= End
; ++Idx
) {
1346 R
.DFSNodeData
[Idx
].SubtreeID
= SubtreeClasses
[Idx
];
1347 LLVM_DEBUG(dbgs() << " SU(" << Idx
<< ") in tree "
1348 << R
.DFSNodeData
[Idx
].SubtreeID
<< '\n');
1350 for (const auto &[Pred
, Succ
] : ConnectionPairs
) {
1351 unsigned PredTree
= SubtreeClasses
[Pred
->NodeNum
];
1352 unsigned SuccTree
= SubtreeClasses
[Succ
->NodeNum
];
1353 if (PredTree
== SuccTree
)
1355 unsigned Depth
= Pred
->getDepth();
1356 addConnection(PredTree
, SuccTree
, Depth
);
1357 addConnection(SuccTree
, PredTree
, Depth
);
1362 /// Joins the predecessor subtree with the successor that is its DFS parent.
1363 /// Applies some heuristics before joining.
1364 bool joinPredSubtree(const SDep
&PredDep
, const SUnit
*Succ
,
1365 bool CheckLimit
= true) {
1366 assert(PredDep
.getKind() == SDep::Data
&& "Subtrees are for data edges");
1368 // Check if the predecessor is already joined.
1369 const SUnit
*PredSU
= PredDep
.getSUnit();
1370 unsigned PredNum
= PredSU
->NodeNum
;
1371 if (R
.DFSNodeData
[PredNum
].SubtreeID
!= PredNum
)
1374 // Four is the magic number of successors before a node is considered a
1376 unsigned NumDataSucs
= 0;
1377 for (const SDep
&SuccDep
: PredSU
->Succs
) {
1378 if (SuccDep
.getKind() == SDep::Data
) {
1379 if (++NumDataSucs
>= 4)
1383 if (CheckLimit
&& R
.DFSNodeData
[PredNum
].InstrCount
> R
.SubtreeLimit
)
1385 R
.DFSNodeData
[PredNum
].SubtreeID
= Succ
->NodeNum
;
1386 SubtreeClasses
.join(Succ
->NodeNum
, PredNum
);
1390 /// Called by finalize() to record a connection between trees.
1391 void addConnection(unsigned FromTree
, unsigned ToTree
, unsigned Depth
) {
1396 SmallVectorImpl
<SchedDFSResult::Connection
> &Connections
=
1397 R
.SubtreeConnections
[FromTree
];
1398 for (SchedDFSResult::Connection
&C
: Connections
) {
1399 if (C
.TreeID
== ToTree
) {
1400 C
.Level
= std::max(C
.Level
, Depth
);
1404 Connections
.push_back(SchedDFSResult::Connection(ToTree
, Depth
));
1405 FromTree
= R
.DFSTreeData
[FromTree
].ParentTreeID
;
1406 } while (FromTree
!= SchedDFSResult::InvalidSubtreeID
);
1410 } // end namespace llvm
1414 /// Manage the stack used by a reverse depth-first search over the DAG.
1415 class SchedDAGReverseDFS
{
1416 std::vector
<std::pair
<const SUnit
*, SUnit::const_pred_iterator
>> DFSStack
;
1419 bool isComplete() const { return DFSStack
.empty(); }
1421 void follow(const SUnit
*SU
) {
1422 DFSStack
.emplace_back(SU
, SU
->Preds
.begin());
1424 void advance() { ++DFSStack
.back().second
; }
1426 const SDep
*backtrack() {
1427 DFSStack
.pop_back();
1428 return DFSStack
.empty() ? nullptr : std::prev(DFSStack
.back().second
);
1431 const SUnit
*getCurr() const { return DFSStack
.back().first
; }
1433 SUnit::const_pred_iterator
getPred() const { return DFSStack
.back().second
; }
1435 SUnit::const_pred_iterator
getPredEnd() const {
1436 return getCurr()->Preds
.end();
1440 } // end anonymous namespace
1442 static bool hasDataSucc(const SUnit
*SU
) {
1443 for (const SDep
&SuccDep
: SU
->Succs
) {
1444 if (SuccDep
.getKind() == SDep::Data
&&
1445 !SuccDep
.getSUnit()->isBoundaryNode())
1451 /// Computes an ILP metric for all nodes in the subDAG reachable via depth-first
1452 /// search from this root.
1453 void SchedDFSResult::compute(ArrayRef
<SUnit
> SUnits
) {
1455 llvm_unreachable("Top-down ILP metric is unimplemented");
1457 SchedDFSImpl
Impl(*this);
1458 for (const SUnit
&SU
: SUnits
) {
1459 if (Impl
.isVisited(&SU
) || hasDataSucc(&SU
))
1462 SchedDAGReverseDFS DFS
;
1463 Impl
.visitPreorder(&SU
);
1466 // Traverse the leftmost path as far as possible.
1467 while (DFS
.getPred() != DFS
.getPredEnd()) {
1468 const SDep
&PredDep
= *DFS
.getPred();
1470 // Ignore non-data edges.
1471 if (PredDep
.getKind() != SDep::Data
1472 || PredDep
.getSUnit()->isBoundaryNode()) {
1475 // An already visited edge is a cross edge, assuming an acyclic DAG.
1476 if (Impl
.isVisited(PredDep
.getSUnit())) {
1477 Impl
.visitCrossEdge(PredDep
, DFS
.getCurr());
1480 Impl
.visitPreorder(PredDep
.getSUnit());
1481 DFS
.follow(PredDep
.getSUnit());
1483 // Visit the top of the stack in postorder and backtrack.
1484 const SUnit
*Child
= DFS
.getCurr();
1485 const SDep
*PredDep
= DFS
.backtrack();
1486 Impl
.visitPostorderNode(Child
);
1488 Impl
.visitPostorderEdge(*PredDep
, DFS
.getCurr());
1489 if (DFS
.isComplete())
1496 /// The root of the given SubtreeID was just scheduled. For all subtrees
1497 /// connected to this tree, record the depth of the connection so that the
1498 /// nearest connected subtrees can be prioritized.
1499 void SchedDFSResult::scheduleTree(unsigned SubtreeID
) {
1500 for (const Connection
&C
: SubtreeConnections
[SubtreeID
]) {
1501 SubtreeConnectLevels
[C
.TreeID
] =
1502 std::max(SubtreeConnectLevels
[C
.TreeID
], C
.Level
);
1503 LLVM_DEBUG(dbgs() << " Tree: " << C
.TreeID
<< " @"
1504 << SubtreeConnectLevels
[C
.TreeID
] << '\n');
1508 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
1509 LLVM_DUMP_METHOD
void ILPValue::print(raw_ostream
&OS
) const {
1510 OS
<< InstrCount
<< " / " << Length
<< " = ";
1514 OS
<< format("%g", ((double)InstrCount
/ Length
));
1517 LLVM_DUMP_METHOD
void ILPValue::dump() const {
1518 dbgs() << *this << '\n';
1523 LLVM_ATTRIBUTE_UNUSED
1524 raw_ostream
&operator<<(raw_ostream
&OS
, const ILPValue
&Val
) {
1529 } // end namespace llvm