1 //===---- ScheduleDAGInstrs.cpp - MachineInstr Rescheduling ---------------===//
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7 //===----------------------------------------------------------------------===//
9 /// \file This implements the ScheduleDAGInstrs class, which implements
10 /// re-scheduling of MachineInstrs.
12 //===----------------------------------------------------------------------===//
14 #include "llvm/CodeGen/ScheduleDAGInstrs.h"
16 #include "llvm/ADT/IntEqClasses.h"
17 #include "llvm/ADT/MapVector.h"
18 #include "llvm/ADT/SmallVector.h"
19 #include "llvm/ADT/SparseSet.h"
20 #include "llvm/ADT/iterator_range.h"
21 #include "llvm/Analysis/AliasAnalysis.h"
22 #include "llvm/Analysis/ValueTracking.h"
23 #include "llvm/CodeGen/LiveIntervals.h"
24 #include "llvm/CodeGen/LivePhysRegs.h"
25 #include "llvm/CodeGen/MachineBasicBlock.h"
26 #include "llvm/CodeGen/MachineFrameInfo.h"
27 #include "llvm/CodeGen/MachineFunction.h"
28 #include "llvm/CodeGen/MachineInstr.h"
29 #include "llvm/CodeGen/MachineInstrBundle.h"
30 #include "llvm/CodeGen/MachineMemOperand.h"
31 #include "llvm/CodeGen/MachineOperand.h"
32 #include "llvm/CodeGen/MachineRegisterInfo.h"
33 #include "llvm/CodeGen/PseudoSourceValue.h"
34 #include "llvm/CodeGen/RegisterPressure.h"
35 #include "llvm/CodeGen/ScheduleDAG.h"
36 #include "llvm/CodeGen/ScheduleDFS.h"
37 #include "llvm/CodeGen/SlotIndexes.h"
38 #include "llvm/CodeGen/TargetInstrInfo.h"
39 #include "llvm/CodeGen/TargetRegisterInfo.h"
40 #include "llvm/CodeGen/TargetSubtargetInfo.h"
41 #include "llvm/Config/llvm-config.h"
42 #include "llvm/IR/Constants.h"
43 #include "llvm/IR/Function.h"
44 #include "llvm/IR/Type.h"
45 #include "llvm/IR/Value.h"
46 #include "llvm/MC/LaneBitmask.h"
47 #include "llvm/MC/MCRegisterInfo.h"
48 #include "llvm/Support/Casting.h"
49 #include "llvm/Support/CommandLine.h"
50 #include "llvm/Support/Compiler.h"
51 #include "llvm/Support/Debug.h"
52 #include "llvm/Support/ErrorHandling.h"
53 #include "llvm/Support/Format.h"
54 #include "llvm/Support/raw_ostream.h"
63 #define DEBUG_TYPE "machine-scheduler"
66 EnableAASchedMI("enable-aa-sched-mi", cl::Hidden
,
67 cl::desc("Enable use of AA during MI DAG construction"));
69 static cl::opt
<bool> UseTBAA("use-tbaa-in-sched-mi", cl::Hidden
,
70 cl::init(true), cl::desc("Enable use of TBAA during MI DAG construction"));
72 // Note: the two options below might be used in tuning compile time vs
73 // output quality. Setting HugeRegion so large that it will never be
74 // reached means best-effort, but may be slow.
76 // When Stores and Loads maps (or NonAliasStores and NonAliasLoads)
77 // together hold this many SUs, a reduction of maps will be done.
78 static cl::opt
<unsigned> HugeRegion("dag-maps-huge-region", cl::Hidden
,
79 cl::init(1000), cl::desc("The limit to use while constructing the DAG "
80 "prior to scheduling, at which point a trade-off "
81 "is made to avoid excessive compile time."));
83 static cl::opt
<unsigned> ReductionSize(
84 "dag-maps-reduction-size", cl::Hidden
,
85 cl::desc("A huge scheduling region will have maps reduced by this many "
86 "nodes at a time. Defaults to HugeRegion / 2."));
88 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
89 static cl::opt
<bool> SchedPrintCycles(
90 "sched-print-cycles", cl::Hidden
, cl::init(false),
91 cl::desc("Report top/bottom cycles when dumping SUnit instances"));
94 static unsigned getReductionSize() {
95 // Always reduce a huge region with half of the elements, except
96 // when user sets this number explicitly.
97 if (ReductionSize
.getNumOccurrences() == 0)
98 return HugeRegion
/ 2;
102 static void dumpSUList(const ScheduleDAGInstrs::SUList
&L
) {
103 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
105 for (const SUnit
*SU
: L
) {
106 dbgs() << "SU(" << SU
->NodeNum
<< ")";
114 ScheduleDAGInstrs::ScheduleDAGInstrs(MachineFunction
&mf
,
115 const MachineLoopInfo
*mli
,
116 bool RemoveKillFlags
)
117 : ScheduleDAG(mf
), MLI(mli
), MFI(mf
.getFrameInfo()),
118 RemoveKillFlags(RemoveKillFlags
),
119 UnknownValue(UndefValue::get(
120 Type::getVoidTy(mf
.getFunction().getContext()))), Topo(SUnits
, &ExitSU
) {
123 const TargetSubtargetInfo
&ST
= mf
.getSubtarget();
124 SchedModel
.init(&ST
);
127 /// If this machine instr has memory reference information and it can be
128 /// tracked to a normal reference to a known object, return the Value
129 /// for that object. This function returns false the memory location is
130 /// unknown or may alias anything.
131 static bool getUnderlyingObjectsForInstr(const MachineInstr
*MI
,
132 const MachineFrameInfo
&MFI
,
133 UnderlyingObjectsVector
&Objects
,
134 const DataLayout
&DL
) {
135 auto AllMMOsOkay
= [&]() {
136 for (const MachineMemOperand
*MMO
: MI
->memoperands()) {
137 // TODO: Figure out whether isAtomic is really necessary (see D57601).
138 if (MMO
->isVolatile() || MMO
->isAtomic())
141 if (const PseudoSourceValue
*PSV
= MMO
->getPseudoValue()) {
142 // Function that contain tail calls don't have unique PseudoSourceValue
143 // objects. Two PseudoSourceValues might refer to the same or
144 // overlapping locations. The client code calling this function assumes
145 // this is not the case. So return a conservative answer of no known
147 if (MFI
.hasTailCall())
150 // For now, ignore PseudoSourceValues which may alias LLVM IR values
151 // because the code that uses this function has no way to cope with
153 if (PSV
->isAliased(&MFI
))
156 bool MayAlias
= PSV
->mayAlias(&MFI
);
157 Objects
.emplace_back(PSV
, MayAlias
);
158 } else if (const Value
*V
= MMO
->getValue()) {
159 SmallVector
<Value
*, 4> Objs
;
160 if (!getUnderlyingObjectsForCodeGen(V
, Objs
))
163 for (Value
*V
: Objs
) {
164 assert(isIdentifiedObject(V
));
165 Objects
.emplace_back(V
, true);
173 if (!AllMMOsOkay()) {
181 void ScheduleDAGInstrs::startBlock(MachineBasicBlock
*bb
) {
185 void ScheduleDAGInstrs::finishBlock() {
186 // Subclasses should no longer refer to the old block.
190 void ScheduleDAGInstrs::enterRegion(MachineBasicBlock
*bb
,
191 MachineBasicBlock::iterator begin
,
192 MachineBasicBlock::iterator end
,
193 unsigned regioninstrs
) {
194 assert(bb
== BB
&& "startBlock should set BB");
197 NumRegionInstrs
= regioninstrs
;
200 void ScheduleDAGInstrs::exitRegion() {
204 void ScheduleDAGInstrs::addSchedBarrierDeps() {
205 MachineInstr
*ExitMI
=
206 RegionEnd
!= BB
->end()
207 ? &*skipDebugInstructionsBackward(RegionEnd
, RegionBegin
)
209 ExitSU
.setInstr(ExitMI
);
210 // Add dependencies on the defs and uses of the instruction.
212 const MCInstrDesc
&MIDesc
= ExitMI
->getDesc();
213 for (const MachineOperand
&MO
: ExitMI
->all_uses()) {
214 unsigned OpIdx
= MO
.getOperandNo();
215 Register Reg
= MO
.getReg();
216 if (Reg
.isPhysical()) {
217 // addPhysRegDataDeps uses the provided operand index to retrieve
218 // the operand use cycle from the scheduling model. If the operand
219 // is "fake" (e.g., an operand of a call instruction used to pass
220 // an argument to the called function.), the scheduling model may not
221 // have an entry for it. If this is the case, pass -1 as operand index,
222 // which will cause addPhysRegDataDeps to add an artificial dependency.
223 // FIXME: Using hasImplicitUseOfPhysReg here is inaccurate as it misses
224 // aliases. When fixing, make sure to update addPhysRegDataDeps, too.
225 bool IsRealUse
= OpIdx
< MIDesc
.getNumOperands() ||
226 MIDesc
.hasImplicitUseOfPhysReg(Reg
);
227 for (MCRegUnit Unit
: TRI
->regunits(Reg
))
228 Uses
.insert(PhysRegSUOper(&ExitSU
, IsRealUse
? OpIdx
: -1, Unit
));
229 } else if (Reg
.isVirtual() && MO
.readsReg()) {
230 addVRegUseDeps(&ExitSU
, OpIdx
);
234 if (!ExitMI
|| (!ExitMI
->isCall() && !ExitMI
->isBarrier())) {
235 // For others, e.g. fallthrough, conditional branch, assume the exit
236 // uses all the registers that are livein to the successor blocks.
237 for (const MachineBasicBlock
*Succ
: BB
->successors()) {
238 for (const auto &LI
: Succ
->liveins()) {
239 for (MCRegUnitMaskIterator
U(LI
.PhysReg
, TRI
); U
.isValid(); ++U
) {
240 auto [Unit
, Mask
] = *U
;
241 if ((Mask
& LI
.LaneMask
).any() && !Uses
.contains(Unit
))
242 Uses
.insert(PhysRegSUOper(&ExitSU
, -1, Unit
));
249 /// MO is an operand of SU's instruction that defines a physical register. Adds
250 /// data dependencies from SU to any uses of the physical register.
251 void ScheduleDAGInstrs::addPhysRegDataDeps(SUnit
*SU
, unsigned OperIdx
) {
252 const MachineOperand
&MO
= SU
->getInstr()->getOperand(OperIdx
);
253 assert(MO
.isDef() && "expect physreg def");
254 Register Reg
= MO
.getReg();
256 // Ask the target if address-backscheduling is desirable, and if so how much.
257 const TargetSubtargetInfo
&ST
= MF
.getSubtarget();
259 // Only use any non-zero latency for real defs/uses, in contrast to
260 // "fake" operands added by regalloc.
261 const MCInstrDesc
&DefMIDesc
= SU
->getInstr()->getDesc();
262 bool ImplicitPseudoDef
= (OperIdx
>= DefMIDesc
.getNumOperands() &&
263 !DefMIDesc
.hasImplicitDefOfPhysReg(Reg
));
264 for (MCRegUnit Unit
: TRI
->regunits(Reg
)) {
265 for (RegUnit2SUnitsMap::iterator I
= Uses
.find(Unit
); I
!= Uses
.end();
267 SUnit
*UseSU
= I
->SU
;
271 // Adjust the dependence latency using operand def/use information,
272 // then allow the target to perform its own adjustments.
273 MachineInstr
*UseInstr
= nullptr;
274 int UseOpIdx
= I
->OpIdx
;
275 bool ImplicitPseudoUse
= false;
278 Dep
= SDep(SU
, SDep::Artificial
);
280 // Set the hasPhysRegDefs only for physreg defs that have a use within
281 // the scheduling region.
282 SU
->hasPhysRegDefs
= true;
284 UseInstr
= UseSU
->getInstr();
285 Register UseReg
= UseInstr
->getOperand(UseOpIdx
).getReg();
286 const MCInstrDesc
&UseMIDesc
= UseInstr
->getDesc();
287 ImplicitPseudoUse
= UseOpIdx
>= ((int)UseMIDesc
.getNumOperands()) &&
288 !UseMIDesc
.hasImplicitUseOfPhysReg(UseReg
);
290 Dep
= SDep(SU
, SDep::Data
, UseReg
);
292 if (!ImplicitPseudoDef
&& !ImplicitPseudoUse
) {
293 Dep
.setLatency(SchedModel
.computeOperandLatency(SU
->getInstr(), OperIdx
,
294 UseInstr
, UseOpIdx
));
298 ST
.adjustSchedDependency(SU
, OperIdx
, UseSU
, UseOpIdx
, Dep
, &SchedModel
);
304 /// Adds register dependencies (data, anti, and output) from this SUnit
305 /// to following instructions in the same scheduling region that depend the
306 /// physical register referenced at OperIdx.
307 void ScheduleDAGInstrs::addPhysRegDeps(SUnit
*SU
, unsigned OperIdx
) {
308 MachineInstr
*MI
= SU
->getInstr();
309 MachineOperand
&MO
= MI
->getOperand(OperIdx
);
310 Register Reg
= MO
.getReg();
311 // We do not need to track any dependencies for constant registers.
312 if (MRI
.isConstantPhysReg(Reg
))
315 const TargetSubtargetInfo
&ST
= MF
.getSubtarget();
317 // Optionally add output and anti dependencies. For anti
318 // dependencies we use a latency of 0 because for a multi-issue
319 // target we want to allow the defining instruction to issue
320 // in the same cycle as the using instruction.
321 // TODO: Using a latency of 1 here for output dependencies assumes
322 // there's no cost for reusing registers.
323 SDep::Kind Kind
= MO
.isUse() ? SDep::Anti
: SDep::Output
;
324 for (MCRegUnit Unit
: TRI
->regunits(Reg
)) {
325 for (RegUnit2SUnitsMap::iterator I
= Defs
.find(Unit
); I
!= Defs
.end();
327 SUnit
*DefSU
= I
->SU
;
328 if (DefSU
== &ExitSU
)
330 MachineInstr
*DefInstr
= DefSU
->getInstr();
331 MachineOperand
&DefMO
= DefInstr
->getOperand(I
->OpIdx
);
333 (Kind
!= SDep::Output
|| !MO
.isDead() || !DefMO
.isDead())) {
334 SDep
Dep(SU
, Kind
, DefMO
.getReg());
335 if (Kind
!= SDep::Anti
) {
337 SchedModel
.computeOutputLatency(MI
, OperIdx
, DefInstr
));
339 ST
.adjustSchedDependency(SU
, OperIdx
, DefSU
, I
->OpIdx
, Dep
,
347 SU
->hasPhysRegUses
= true;
348 // Either insert a new Reg2SUnits entry with an empty SUnits list, or
349 // retrieve the existing SUnits list for this register's uses.
350 // Push this SUnit on the use list.
351 for (MCRegUnit Unit
: TRI
->regunits(Reg
))
352 Uses
.insert(PhysRegSUOper(SU
, OperIdx
, Unit
));
356 addPhysRegDataDeps(SU
, OperIdx
);
358 // Clear previous uses and defs of this register and its subregisters.
359 for (MCRegUnit Unit
: TRI
->regunits(Reg
)) {
365 if (MO
.isDead() && SU
->isCall
) {
366 // Calls will not be reordered because of chain dependencies (see
367 // below). Since call operands are dead, calls may continue to be added
368 // to the DefList making dependence checking quadratic in the size of
369 // the block. Instead, we leave only one call at the back of the
371 for (MCRegUnit Unit
: TRI
->regunits(Reg
)) {
372 RegUnit2SUnitsMap::RangePair P
= Defs
.equal_range(Unit
);
373 RegUnit2SUnitsMap::iterator B
= P
.first
;
374 RegUnit2SUnitsMap::iterator I
= P
.second
;
375 for (bool isBegin
= I
== B
; !isBegin
; /* empty */) {
376 isBegin
= (--I
) == B
;
384 // Defs are pushed in the order they are visited and never reordered.
385 for (MCRegUnit Unit
: TRI
->regunits(Reg
))
386 Defs
.insert(PhysRegSUOper(SU
, OperIdx
, Unit
));
390 LaneBitmask
ScheduleDAGInstrs::getLaneMaskForMO(const MachineOperand
&MO
) const
392 Register Reg
= MO
.getReg();
393 // No point in tracking lanemasks if we don't have interesting subregisters.
394 const TargetRegisterClass
&RC
= *MRI
.getRegClass(Reg
);
395 if (!RC
.HasDisjunctSubRegs
)
396 return LaneBitmask::getAll();
398 unsigned SubReg
= MO
.getSubReg();
400 return RC
.getLaneMask();
401 return TRI
->getSubRegIndexLaneMask(SubReg
);
404 bool ScheduleDAGInstrs::deadDefHasNoUse(const MachineOperand
&MO
) {
405 auto RegUse
= CurrentVRegUses
.find(MO
.getReg());
406 if (RegUse
== CurrentVRegUses
.end())
408 return (RegUse
->LaneMask
& getLaneMaskForMO(MO
)).none();
411 /// Adds register output and data dependencies from this SUnit to instructions
412 /// that occur later in the same scheduling region if they read from or write to
413 /// the virtual register defined at OperIdx.
415 /// TODO: Hoist loop induction variable increments. This has to be
416 /// reevaluated. Generally, IV scheduling should be done before coalescing.
417 void ScheduleDAGInstrs::addVRegDefDeps(SUnit
*SU
, unsigned OperIdx
) {
418 MachineInstr
*MI
= SU
->getInstr();
419 MachineOperand
&MO
= MI
->getOperand(OperIdx
);
420 Register Reg
= MO
.getReg();
422 LaneBitmask DefLaneMask
;
423 LaneBitmask KillLaneMask
;
424 if (TrackLaneMasks
) {
425 bool IsKill
= MO
.getSubReg() == 0 || MO
.isUndef();
426 DefLaneMask
= getLaneMaskForMO(MO
);
427 // If we have a <read-undef> flag, none of the lane values comes from an
428 // earlier instruction.
429 KillLaneMask
= IsKill
? LaneBitmask::getAll() : DefLaneMask
;
431 if (MO
.getSubReg() != 0 && MO
.isUndef()) {
432 // There may be other subregister defs on the same instruction of the same
433 // register in later operands. The lanes of other defs will now be live
434 // after this instruction, so these should not be treated as killed by the
435 // instruction even though they appear to be killed in this one operand.
436 for (const MachineOperand
&OtherMO
:
437 llvm::drop_begin(MI
->operands(), OperIdx
+ 1))
438 if (OtherMO
.isReg() && OtherMO
.isDef() && OtherMO
.getReg() == Reg
)
439 KillLaneMask
&= ~getLaneMaskForMO(OtherMO
);
442 // Clear undef flag, we'll re-add it later once we know which subregister
444 MO
.setIsUndef(false);
446 DefLaneMask
= LaneBitmask::getAll();
447 KillLaneMask
= LaneBitmask::getAll();
451 assert(deadDefHasNoUse(MO
) && "Dead defs should have no uses");
453 // Add data dependence to all uses we found so far.
454 const TargetSubtargetInfo
&ST
= MF
.getSubtarget();
455 for (VReg2SUnitOperIdxMultiMap::iterator I
= CurrentVRegUses
.find(Reg
),
456 E
= CurrentVRegUses
.end(); I
!= E
; /*empty*/) {
457 LaneBitmask LaneMask
= I
->LaneMask
;
458 // Ignore uses of other lanes.
459 if ((LaneMask
& KillLaneMask
).none()) {
464 if ((LaneMask
& DefLaneMask
).any()) {
465 SUnit
*UseSU
= I
->SU
;
466 MachineInstr
*Use
= UseSU
->getInstr();
467 SDep
Dep(SU
, SDep::Data
, Reg
);
468 Dep
.setLatency(SchedModel
.computeOperandLatency(MI
, OperIdx
, Use
,
470 ST
.adjustSchedDependency(SU
, OperIdx
, UseSU
, I
->OperandIndex
, Dep
,
475 LaneMask
&= ~KillLaneMask
;
476 // If we found a Def for all lanes of this use, remove it from the list.
477 if (LaneMask
.any()) {
478 I
->LaneMask
= LaneMask
;
481 I
= CurrentVRegUses
.erase(I
);
485 // Shortcut: Singly defined vregs do not have output/anti dependencies.
486 if (MRI
.hasOneDef(Reg
))
489 // Add output dependence to the next nearest defs of this vreg.
491 // Unless this definition is dead, the output dependence should be
492 // transitively redundant with antidependencies from this definition's
493 // uses. We're conservative for now until we have a way to guarantee the uses
494 // are not eliminated sometime during scheduling. The output dependence edge
495 // is also useful if output latency exceeds def-use latency.
496 LaneBitmask LaneMask
= DefLaneMask
;
497 for (VReg2SUnit
&V2SU
: make_range(CurrentVRegDefs
.find(Reg
),
498 CurrentVRegDefs
.end())) {
499 // Ignore defs for other lanes.
500 if ((V2SU
.LaneMask
& LaneMask
).none())
502 // Add an output dependence.
503 SUnit
*DefSU
= V2SU
.SU
;
504 // Ignore additional defs of the same lanes in one instruction. This can
505 // happen because lanemasks are shared for targets with too many
506 // subregisters. We also use some representration tricks/hacks where we
507 // add super-register defs/uses, to imply that although we only access parts
508 // of the reg we care about the full one.
511 SDep
Dep(SU
, SDep::Output
, Reg
);
513 SchedModel
.computeOutputLatency(MI
, OperIdx
, DefSU
->getInstr()));
516 // Update current definition. This can get tricky if the def was about a
517 // bigger lanemask before. We then have to shrink it and create a new
518 // VReg2SUnit for the non-overlapping part.
519 LaneBitmask OverlapMask
= V2SU
.LaneMask
& LaneMask
;
520 LaneBitmask NonOverlapMask
= V2SU
.LaneMask
& ~LaneMask
;
522 V2SU
.LaneMask
= OverlapMask
;
523 if (NonOverlapMask
.any())
524 CurrentVRegDefs
.insert(VReg2SUnit(Reg
, NonOverlapMask
, DefSU
));
526 // If there was no CurrentVRegDefs entry for some lanes yet, create one.
528 CurrentVRegDefs
.insert(VReg2SUnit(Reg
, LaneMask
, SU
));
531 /// Adds a register data dependency if the instruction that defines the
532 /// virtual register used at OperIdx is mapped to an SUnit. Add a register
533 /// antidependency from this SUnit to instructions that occur later in the same
534 /// scheduling region if they write the virtual register.
536 /// TODO: Handle ExitSU "uses" properly.
537 void ScheduleDAGInstrs::addVRegUseDeps(SUnit
*SU
, unsigned OperIdx
) {
538 const MachineInstr
*MI
= SU
->getInstr();
539 assert(!MI
->isDebugOrPseudoInstr());
541 const MachineOperand
&MO
= MI
->getOperand(OperIdx
);
542 Register Reg
= MO
.getReg();
544 // Remember the use. Data dependencies will be added when we find the def.
545 LaneBitmask LaneMask
= TrackLaneMasks
? getLaneMaskForMO(MO
)
546 : LaneBitmask::getAll();
547 CurrentVRegUses
.insert(VReg2SUnitOperIdx(Reg
, LaneMask
, OperIdx
, SU
));
549 // Add antidependences to the following defs of the vreg.
550 for (VReg2SUnit
&V2SU
: make_range(CurrentVRegDefs
.find(Reg
),
551 CurrentVRegDefs
.end())) {
552 // Ignore defs for unrelated lanes.
553 LaneBitmask PrevDefLaneMask
= V2SU
.LaneMask
;
554 if ((PrevDefLaneMask
& LaneMask
).none())
559 V2SU
.SU
->addPred(SDep(SU
, SDep::Anti
, Reg
));
564 void ScheduleDAGInstrs::addChainDependency (SUnit
*SUa
, SUnit
*SUb
,
566 if (SUa
->getInstr()->mayAlias(getAAForDep(), *SUb
->getInstr(), UseTBAA
)) {
567 SDep
Dep(SUa
, SDep::MayAliasMem
);
568 Dep
.setLatency(Latency
);
573 /// Creates an SUnit for each real instruction, numbered in top-down
574 /// topological order. The instruction order A < B, implies that no edge exists
577 /// Map each real instruction to its SUnit.
579 /// After initSUnits, the SUnits vector cannot be resized and the scheduler may
580 /// hang onto SUnit pointers. We may relax this in the future by using SUnit IDs
581 /// instead of pointers.
583 /// MachineScheduler relies on initSUnits numbering the nodes by their order in
584 /// the original instruction list.
585 void ScheduleDAGInstrs::initSUnits() {
586 // We'll be allocating one SUnit for each real instruction in the region,
587 // which is contained within a basic block.
588 SUnits
.reserve(NumRegionInstrs
);
590 for (MachineInstr
&MI
: make_range(RegionBegin
, RegionEnd
)) {
591 if (MI
.isDebugOrPseudoInstr())
594 SUnit
*SU
= newSUnit(&MI
);
595 MISUnitMap
[&MI
] = SU
;
597 SU
->isCall
= MI
.isCall();
598 SU
->isCommutable
= MI
.isCommutable();
600 // Assign the Latency field of SU using target-provided information.
601 SU
->Latency
= SchedModel
.computeInstrLatency(SU
->getInstr());
603 // If this SUnit uses a reserved or unbuffered resource, mark it as such.
605 // Reserved resources block an instruction from issuing and stall the
606 // entire pipeline. These are identified by BufferSize=0.
608 // Unbuffered resources prevent execution of subsequent instructions that
609 // require the same resources. This is used for in-order execution pipelines
610 // within an out-of-order core. These are identified by BufferSize=1.
611 if (SchedModel
.hasInstrSchedModel()) {
612 const MCSchedClassDesc
*SC
= getSchedClass(SU
);
613 for (const MCWriteProcResEntry
&PRE
:
614 make_range(SchedModel
.getWriteProcResBegin(SC
),
615 SchedModel
.getWriteProcResEnd(SC
))) {
616 switch (SchedModel
.getProcResource(PRE
.ProcResourceIdx
)->BufferSize
) {
618 SU
->hasReservedResource
= true;
621 SU
->isUnbuffered
= true;
631 class ScheduleDAGInstrs::Value2SUsMap
632 : public SmallMapVector
<ValueType
, SUList
, 4> {
633 /// Current total number of SUs in map.
634 unsigned NumNodes
= 0;
636 /// 1 for loads, 0 for stores. (see comment in SUList)
637 unsigned TrueMemOrderLatency
;
640 Value2SUsMap(unsigned lat
= 0) : TrueMemOrderLatency(lat
) {}
642 /// To keep NumNodes up to date, insert() is used instead of
643 /// this operator w/ push_back().
644 ValueType
&operator[](const SUList
&Key
) {
645 llvm_unreachable("Don't use. Use insert() instead."); };
647 /// Adds SU to the SUList of V. If Map grows huge, reduce its size by calling
649 void inline insert(SUnit
*SU
, ValueType V
) {
650 MapVector::operator[](V
).push_back(SU
);
654 /// Clears the list of SUs mapped to V.
655 void inline clearList(ValueType V
) {
656 iterator Itr
= find(V
);
658 assert(NumNodes
>= Itr
->second
.size());
659 NumNodes
-= Itr
->second
.size();
665 /// Clears map from all contents.
667 SmallMapVector
<ValueType
, SUList
, 4>::clear();
671 unsigned inline size() const { return NumNodes
; }
673 /// Counts the number of SUs in this map after a reduction.
674 void reComputeSize() {
676 for (auto &I
: *this)
677 NumNodes
+= I
.second
.size();
680 unsigned inline getTrueMemOrderLatency() const {
681 return TrueMemOrderLatency
;
687 void ScheduleDAGInstrs::addChainDependencies(SUnit
*SU
,
688 Value2SUsMap
&Val2SUsMap
) {
689 for (auto &I
: Val2SUsMap
)
690 addChainDependencies(SU
, I
.second
,
691 Val2SUsMap
.getTrueMemOrderLatency());
694 void ScheduleDAGInstrs::addChainDependencies(SUnit
*SU
,
695 Value2SUsMap
&Val2SUsMap
,
697 Value2SUsMap::iterator Itr
= Val2SUsMap
.find(V
);
698 if (Itr
!= Val2SUsMap
.end())
699 addChainDependencies(SU
, Itr
->second
,
700 Val2SUsMap
.getTrueMemOrderLatency());
703 void ScheduleDAGInstrs::addBarrierChain(Value2SUsMap
&map
) {
704 assert(BarrierChain
!= nullptr);
706 for (auto &[V
, SUs
] : map
) {
709 SU
->addPredBarrier(BarrierChain
);
714 void ScheduleDAGInstrs::insertBarrierChain(Value2SUsMap
&map
) {
715 assert(BarrierChain
!= nullptr);
717 // Go through all lists of SUs.
718 for (Value2SUsMap::iterator I
= map
.begin(), EE
= map
.end(); I
!= EE
;) {
719 Value2SUsMap::iterator CurrItr
= I
++;
720 SUList
&sus
= CurrItr
->second
;
721 SUList::iterator SUItr
= sus
.begin(), SUEE
= sus
.end();
722 for (; SUItr
!= SUEE
; ++SUItr
) {
723 // Stop on BarrierChain or any instruction above it.
724 if ((*SUItr
)->NodeNum
<= BarrierChain
->NodeNum
)
727 (*SUItr
)->addPredBarrier(BarrierChain
);
730 // Remove also the BarrierChain from list if present.
731 if (SUItr
!= SUEE
&& *SUItr
== BarrierChain
)
734 // Remove all SUs that are now successors of BarrierChain.
735 if (SUItr
!= sus
.begin())
736 sus
.erase(sus
.begin(), SUItr
);
739 // Remove all entries with empty su lists.
740 map
.remove_if([&](std::pair
<ValueType
, SUList
> &mapEntry
) {
741 return (mapEntry
.second
.empty()); });
743 // Recompute the size of the map (NumNodes).
747 void ScheduleDAGInstrs::buildSchedGraph(AAResults
*AA
,
748 RegPressureTracker
*RPTracker
,
749 PressureDiffs
*PDiffs
,
751 bool TrackLaneMasks
) {
752 const TargetSubtargetInfo
&ST
= MF
.getSubtarget();
753 bool UseAA
= EnableAASchedMI
.getNumOccurrences() > 0 ? EnableAASchedMI
756 AAForDep
.emplace(*AA
);
758 BarrierChain
= nullptr;
760 this->TrackLaneMasks
= TrackLaneMasks
;
762 ScheduleDAG::clearDAG();
764 // Create an SUnit for each real instruction.
768 PDiffs
->init(SUnits
.size());
770 // We build scheduling units by walking a block's instruction list
771 // from bottom to top.
773 // Each MIs' memory operand(s) is analyzed to a list of underlying
774 // objects. The SU is then inserted in the SUList(s) mapped from the
775 // Value(s). Each Value thus gets mapped to lists of SUs depending
776 // on it, stores and loads kept separately. Two SUs are trivially
777 // non-aliasing if they both depend on only identified Values and do
778 // not share any common Value.
779 Value2SUsMap Stores
, Loads(1 /*TrueMemOrderLatency*/);
781 // Certain memory accesses are known to not alias any SU in Stores
782 // or Loads, and have therefore their own 'NonAlias'
783 // domain. E.g. spill / reload instructions never alias LLVM I/R
784 // Values. It would be nice to assume that this type of memory
785 // accesses always have a proper memory operand modelling, and are
786 // therefore never unanalyzable, but this is conservatively not
788 Value2SUsMap NonAliasStores
, NonAliasLoads(1 /*TrueMemOrderLatency*/);
790 // Track all instructions that may raise floating-point exceptions.
791 // These do not depend on one other (or normal loads or stores), but
792 // must not be rescheduled across global barriers. Note that we don't
793 // really need a "map" here since we don't track those MIs by value;
794 // using the same Value2SUsMap data type here is simply a matter of
796 Value2SUsMap FPExceptions
;
798 // Remove any stale debug info; sometimes BuildSchedGraph is called again
799 // without emitting the info from the previous call.
801 FirstDbgValue
= nullptr;
803 assert(Defs
.empty() && Uses
.empty() &&
804 "Only BuildGraph should update Defs/Uses");
805 Defs
.setUniverse(TRI
->getNumRegs());
806 Uses
.setUniverse(TRI
->getNumRegs());
808 assert(CurrentVRegDefs
.empty() && "nobody else should use CurrentVRegDefs");
809 assert(CurrentVRegUses
.empty() && "nobody else should use CurrentVRegUses");
810 unsigned NumVirtRegs
= MRI
.getNumVirtRegs();
811 CurrentVRegDefs
.setUniverse(NumVirtRegs
);
812 CurrentVRegUses
.setUniverse(NumVirtRegs
);
814 // Model data dependencies between instructions being scheduled and the
816 addSchedBarrierDeps();
818 // Walk the list of instructions, from bottom moving up.
819 MachineInstr
*DbgMI
= nullptr;
820 for (MachineBasicBlock::iterator MII
= RegionEnd
, MIE
= RegionBegin
;
822 MachineInstr
&MI
= *std::prev(MII
);
824 DbgValues
.emplace_back(DbgMI
, &MI
);
828 if (MI
.isDebugValue() || MI
.isDebugPHI()) {
833 if (MI
.isDebugLabel() || MI
.isDebugRef() || MI
.isPseudoProbe())
836 SUnit
*SU
= MISUnitMap
[&MI
];
837 assert(SU
&& "No SUnit mapped to this MI");
840 RegisterOperands RegOpers
;
841 RegOpers
.collect(MI
, *TRI
, MRI
, TrackLaneMasks
, false);
842 if (TrackLaneMasks
) {
843 SlotIndex SlotIdx
= LIS
->getInstructionIndex(MI
);
844 RegOpers
.adjustLaneLiveness(*LIS
, MRI
, SlotIdx
);
846 if (PDiffs
!= nullptr)
847 PDiffs
->addInstruction(SU
->NodeNum
, RegOpers
, MRI
);
849 if (RPTracker
->getPos() == RegionEnd
|| &*RPTracker
->getPos() != &MI
)
850 RPTracker
->recedeSkipDebugValues();
851 assert(&*RPTracker
->getPos() == &MI
&& "RPTracker in sync");
852 RPTracker
->recede(RegOpers
);
856 (CanHandleTerminators
|| (!MI
.isTerminator() && !MI
.isPosition())) &&
857 "Cannot schedule terminators or labels!");
859 // Add register-based dependencies (data, anti, and output).
860 // For some instructions (calls, returns, inline-asm, etc.) there can
861 // be explicit uses and implicit defs, in which case the use will appear
862 // on the operand list before the def. Do two passes over the operand
863 // list to make sure that defs are processed before any uses.
864 bool HasVRegDef
= false;
865 for (unsigned j
= 0, n
= MI
.getNumOperands(); j
!= n
; ++j
) {
866 const MachineOperand
&MO
= MI
.getOperand(j
);
867 if (!MO
.isReg() || !MO
.isDef())
869 Register Reg
= MO
.getReg();
870 if (Reg
.isPhysical()) {
871 addPhysRegDeps(SU
, j
);
872 } else if (Reg
.isVirtual()) {
874 addVRegDefDeps(SU
, j
);
877 // Now process all uses.
878 for (unsigned j
= 0, n
= MI
.getNumOperands(); j
!= n
; ++j
) {
879 const MachineOperand
&MO
= MI
.getOperand(j
);
880 // Only look at use operands.
881 // We do not need to check for MO.readsReg() here because subsequent
882 // subregister defs will get output dependence edges and need no
883 // additional use dependencies.
884 if (!MO
.isReg() || !MO
.isUse())
886 Register Reg
= MO
.getReg();
887 if (Reg
.isPhysical()) {
888 addPhysRegDeps(SU
, j
);
889 } else if (Reg
.isVirtual() && MO
.readsReg()) {
890 addVRegUseDeps(SU
, j
);
894 // If we haven't seen any uses in this scheduling region, create a
895 // dependence edge to ExitSU to model the live-out latency. This is required
896 // for vreg defs with no in-region use, and prefetches with no vreg def.
898 // FIXME: NumDataSuccs would be more precise than NumSuccs here. This
899 // check currently relies on being called before adding chain deps.
900 if (SU
->NumSuccs
== 0 && SU
->Latency
> 1 && (HasVRegDef
|| MI
.mayLoad())) {
901 SDep
Dep(SU
, SDep::Artificial
);
902 Dep
.setLatency(SU
->Latency
- 1);
906 // Add memory dependencies (Note: isStoreToStackSlot and
907 // isLoadFromStackSLot are not usable after stack slots are lowered to
908 // actual addresses).
910 const TargetInstrInfo
*TII
= ST
.getInstrInfo();
911 // This is a barrier event that acts as a pivotal node in the DAG.
912 if (TII
->isGlobalMemoryObject(&MI
)) {
914 // Become the barrier chain.
916 BarrierChain
->addPredBarrier(SU
);
919 LLVM_DEBUG(dbgs() << "Global memory object and new barrier chain: SU("
920 << BarrierChain
->NodeNum
<< ").\n");
922 // Add dependencies against everything below it and clear maps.
923 addBarrierChain(Stores
);
924 addBarrierChain(Loads
);
925 addBarrierChain(NonAliasStores
);
926 addBarrierChain(NonAliasLoads
);
927 addBarrierChain(FPExceptions
);
932 // Instructions that may raise FP exceptions may not be moved
933 // across any global barriers.
934 if (MI
.mayRaiseFPException()) {
936 BarrierChain
->addPredBarrier(SU
);
938 FPExceptions
.insert(SU
, UnknownValue
);
940 if (FPExceptions
.size() >= HugeRegion
) {
941 LLVM_DEBUG(dbgs() << "Reducing FPExceptions map.\n");
943 reduceHugeMemNodeMaps(FPExceptions
, empty
, getReductionSize());
947 // If it's not a store or a variant load, we're done.
948 if (!MI
.mayStore() &&
949 !(MI
.mayLoad() && !MI
.isDereferenceableInvariantLoad()))
952 // Always add dependecy edge to BarrierChain if present.
954 BarrierChain
->addPredBarrier(SU
);
956 // Find the underlying objects for MI. The Objs vector is either
957 // empty, or filled with the Values of memory locations which this
959 UnderlyingObjectsVector Objs
;
960 bool ObjsFound
= getUnderlyingObjectsForInstr(&MI
, MFI
, Objs
,
965 // An unknown store depends on all stores and loads.
966 addChainDependencies(SU
, Stores
);
967 addChainDependencies(SU
, NonAliasStores
);
968 addChainDependencies(SU
, Loads
);
969 addChainDependencies(SU
, NonAliasLoads
);
971 // Map this store to 'UnknownValue'.
972 Stores
.insert(SU
, UnknownValue
);
974 // Add precise dependencies against all previously seen memory
975 // accesses mapped to the same Value(s).
976 for (const UnderlyingObject
&UnderlObj
: Objs
) {
977 ValueType V
= UnderlObj
.getValue();
978 bool ThisMayAlias
= UnderlObj
.mayAlias();
980 // Add dependencies to previous stores and loads mapped to V.
981 addChainDependencies(SU
, (ThisMayAlias
? Stores
: NonAliasStores
), V
);
982 addChainDependencies(SU
, (ThisMayAlias
? Loads
: NonAliasLoads
), V
);
984 // Update the store map after all chains have been added to avoid adding
985 // self-loop edge if multiple underlying objects are present.
986 for (const UnderlyingObject
&UnderlObj
: Objs
) {
987 ValueType V
= UnderlObj
.getValue();
988 bool ThisMayAlias
= UnderlObj
.mayAlias();
990 // Map this store to V.
991 (ThisMayAlias
? Stores
: NonAliasStores
).insert(SU
, V
);
993 // The store may have dependencies to unanalyzable loads and
995 addChainDependencies(SU
, Loads
, UnknownValue
);
996 addChainDependencies(SU
, Stores
, UnknownValue
);
998 } else { // SU is a load.
1000 // An unknown load depends on all stores.
1001 addChainDependencies(SU
, Stores
);
1002 addChainDependencies(SU
, NonAliasStores
);
1004 Loads
.insert(SU
, UnknownValue
);
1006 for (const UnderlyingObject
&UnderlObj
: Objs
) {
1007 ValueType V
= UnderlObj
.getValue();
1008 bool ThisMayAlias
= UnderlObj
.mayAlias();
1010 // Add precise dependencies against all previously seen stores
1011 // mapping to the same Value(s).
1012 addChainDependencies(SU
, (ThisMayAlias
? Stores
: NonAliasStores
), V
);
1014 // Map this load to V.
1015 (ThisMayAlias
? Loads
: NonAliasLoads
).insert(SU
, V
);
1017 // The load may have dependencies to unanalyzable stores.
1018 addChainDependencies(SU
, Stores
, UnknownValue
);
1022 // Reduce maps if they grow huge.
1023 if (Stores
.size() + Loads
.size() >= HugeRegion
) {
1024 LLVM_DEBUG(dbgs() << "Reducing Stores and Loads maps.\n");
1025 reduceHugeMemNodeMaps(Stores
, Loads
, getReductionSize());
1027 if (NonAliasStores
.size() + NonAliasLoads
.size() >= HugeRegion
) {
1028 LLVM_DEBUG(dbgs() << "Reducing NonAliasStores and NonAliasLoads maps.\n");
1029 reduceHugeMemNodeMaps(NonAliasStores
, NonAliasLoads
, getReductionSize());
1034 FirstDbgValue
= DbgMI
;
1038 CurrentVRegDefs
.clear();
1039 CurrentVRegUses
.clear();
1044 raw_ostream
&llvm::operator<<(raw_ostream
&OS
, const PseudoSourceValue
* PSV
) {
1045 PSV
->printCustom(OS
);
1049 void ScheduleDAGInstrs::Value2SUsMap::dump() {
1050 for (const auto &[ValType
, SUs
] : *this) {
1051 if (isa
<const Value
*>(ValType
)) {
1052 const Value
*V
= cast
<const Value
*>(ValType
);
1053 if (isa
<UndefValue
>(V
))
1054 dbgs() << "Unknown";
1056 V
->printAsOperand(dbgs());
1057 } else if (isa
<const PseudoSourceValue
*>(ValType
))
1058 dbgs() << cast
<const PseudoSourceValue
*>(ValType
);
1060 llvm_unreachable("Unknown Value type.");
1067 void ScheduleDAGInstrs::reduceHugeMemNodeMaps(Value2SUsMap
&stores
,
1068 Value2SUsMap
&loads
, unsigned N
) {
1069 LLVM_DEBUG(dbgs() << "Before reduction:\nStoring SUnits:\n"; stores
.dump();
1070 dbgs() << "Loading SUnits:\n"; loads
.dump());
1072 // Insert all SU's NodeNums into a vector and sort it.
1073 std::vector
<unsigned> NodeNums
;
1074 NodeNums
.reserve(stores
.size() + loads
.size());
1075 for (const auto &[V
, SUs
] : stores
) {
1077 for (const auto *SU
: SUs
)
1078 NodeNums
.push_back(SU
->NodeNum
);
1080 for (const auto &[V
, SUs
] : loads
) {
1082 for (const auto *SU
: SUs
)
1083 NodeNums
.push_back(SU
->NodeNum
);
1085 llvm::sort(NodeNums
);
1087 // The N last elements in NodeNums will be removed, and the SU with
1088 // the lowest NodeNum of them will become the new BarrierChain to
1089 // let the not yet seen SUs have a dependency to the removed SUs.
1090 assert(N
<= NodeNums
.size());
1091 SUnit
*newBarrierChain
= &SUnits
[*(NodeNums
.end() - N
)];
1093 // The aliasing and non-aliasing maps reduce independently of each
1094 // other, but share a common BarrierChain. Check if the
1095 // newBarrierChain is above the former one. If it is not, it may
1096 // introduce a loop to use newBarrierChain, so keep the old one.
1097 if (newBarrierChain
->NodeNum
< BarrierChain
->NodeNum
) {
1098 BarrierChain
->addPredBarrier(newBarrierChain
);
1099 BarrierChain
= newBarrierChain
;
1100 LLVM_DEBUG(dbgs() << "Inserting new barrier chain: SU("
1101 << BarrierChain
->NodeNum
<< ").\n");
1104 LLVM_DEBUG(dbgs() << "Keeping old barrier chain: SU("
1105 << BarrierChain
->NodeNum
<< ").\n");
1108 BarrierChain
= newBarrierChain
;
1110 insertBarrierChain(stores
);
1111 insertBarrierChain(loads
);
1113 LLVM_DEBUG(dbgs() << "After reduction:\nStoring SUnits:\n"; stores
.dump();
1114 dbgs() << "Loading SUnits:\n"; loads
.dump());
1117 static void toggleKills(const MachineRegisterInfo
&MRI
, LiveRegUnits
&LiveRegs
,
1118 MachineInstr
&MI
, bool addToLiveRegs
) {
1119 for (MachineOperand
&MO
: MI
.operands()) {
1120 if (!MO
.isReg() || !MO
.readsReg())
1122 Register Reg
= MO
.getReg();
1126 // Things that are available after the instruction are killed by it.
1127 bool IsKill
= LiveRegs
.available(Reg
);
1129 // Exception: Do not kill reserved registers
1130 MO
.setIsKill(IsKill
&& !MRI
.isReserved(Reg
));
1132 LiveRegs
.addReg(Reg
);
1136 void ScheduleDAGInstrs::fixupKills(MachineBasicBlock
&MBB
) {
1137 LLVM_DEBUG(dbgs() << "Fixup kills for " << printMBBReference(MBB
) << '\n');
1139 LiveRegs
.init(*TRI
);
1140 LiveRegs
.addLiveOuts(MBB
);
1142 // Examine block from end to start...
1143 for (MachineInstr
&MI
: llvm::reverse(MBB
)) {
1144 if (MI
.isDebugOrPseudoInstr())
1147 // Update liveness. Registers that are defed but not used in this
1148 // instruction are now dead. Mark register and all subregs as they
1149 // are completely defined.
1150 for (ConstMIBundleOperands
O(MI
); O
.isValid(); ++O
) {
1151 const MachineOperand
&MO
= *O
;
1155 Register Reg
= MO
.getReg();
1158 LiveRegs
.removeReg(Reg
);
1159 } else if (MO
.isRegMask()) {
1160 LiveRegs
.removeRegsNotPreserved(MO
.getRegMask());
1164 // If there is a bundle header fix it up first.
1165 if (!MI
.isBundled()) {
1166 toggleKills(MRI
, LiveRegs
, MI
, true);
1168 MachineBasicBlock::instr_iterator Bundle
= MI
.getIterator();
1170 toggleKills(MRI
, LiveRegs
, MI
, false);
1172 // Some targets make the (questionable) assumtion that the instructions
1173 // inside the bundle are ordered and consequently only the last use of
1174 // a register inside the bundle can kill it.
1175 MachineBasicBlock::instr_iterator I
= std::next(Bundle
);
1176 while (I
->isBundledWithSucc())
1179 if (!I
->isDebugOrPseudoInstr())
1180 toggleKills(MRI
, LiveRegs
, *I
, true);
1182 } while (I
!= Bundle
);
1187 void ScheduleDAGInstrs::dumpNode(const SUnit
&SU
) const {
1188 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
1190 if (SchedPrintCycles
)
1191 dbgs() << " [TopReadyCycle = " << SU
.TopReadyCycle
1192 << ", BottomReadyCycle = " << SU
.BotReadyCycle
<< "]";
1194 SU
.getInstr()->dump();
1198 void ScheduleDAGInstrs::dump() const {
1199 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
1200 if (EntrySU
.getInstr() != nullptr)
1201 dumpNodeAll(EntrySU
);
1202 for (const SUnit
&SU
: SUnits
)
1204 if (ExitSU
.getInstr() != nullptr)
1205 dumpNodeAll(ExitSU
);
1209 std::string
ScheduleDAGInstrs::getGraphNodeLabel(const SUnit
*SU
) const {
1211 raw_string_ostream
oss(s
);
1214 else if (SU
== &ExitSU
)
1217 SU
->getInstr()->print(oss
, /*IsStandalone=*/true);
1221 /// Return the basic block label. It is not necessarily unique because a block
1222 /// contains multiple scheduling regions. But it is fine for visualization.
1223 std::string
ScheduleDAGInstrs::getDAGName() const {
1224 return "dag." + BB
->getFullName();
1227 bool ScheduleDAGInstrs::canAddEdge(SUnit
*SuccSU
, SUnit
*PredSU
) {
1228 return SuccSU
== &ExitSU
|| !Topo
.IsReachable(PredSU
, SuccSU
);
1231 bool ScheduleDAGInstrs::addEdge(SUnit
*SuccSU
, const SDep
&PredDep
) {
1232 if (SuccSU
!= &ExitSU
) {
1233 // Do not use WillCreateCycle, it assumes SD scheduling.
1234 // If Pred is reachable from Succ, then the edge creates a cycle.
1235 if (Topo
.IsReachable(PredDep
.getSUnit(), SuccSU
))
1237 Topo
.AddPredQueued(SuccSU
, PredDep
.getSUnit());
1239 SuccSU
->addPred(PredDep
, /*Required=*/!PredDep
.isArtificial());
1240 // Return true regardless of whether a new edge needed to be inserted.
1244 //===----------------------------------------------------------------------===//
1245 // SchedDFSResult Implementation
1246 //===----------------------------------------------------------------------===//
1250 /// Internal state used to compute SchedDFSResult.
1251 class SchedDFSImpl
{
1254 /// Join DAG nodes into equivalence classes by their subtree.
1255 IntEqClasses SubtreeClasses
;
1256 /// List PredSU, SuccSU pairs that represent data edges between subtrees.
1257 std::vector
<std::pair
<const SUnit
*, const SUnit
*>> ConnectionPairs
;
1261 unsigned ParentNodeID
; ///< Parent node (member of the parent subtree).
1262 unsigned SubInstrCount
= 0; ///< Instr count in this tree only, not
1265 RootData(unsigned id
): NodeID(id
),
1266 ParentNodeID(SchedDFSResult::InvalidSubtreeID
) {}
1268 unsigned getSparseSetIndex() const { return NodeID
; }
1271 SparseSet
<RootData
> RootSet
;
1274 SchedDFSImpl(SchedDFSResult
&r
): R(r
), SubtreeClasses(R
.DFSNodeData
.size()) {
1275 RootSet
.setUniverse(R
.DFSNodeData
.size());
1278 /// Returns true if this node been visited by the DFS traversal.
1280 /// During visitPostorderNode the Node's SubtreeID is assigned to the Node
1281 /// ID. Later, SubtreeID is updated but remains valid.
1282 bool isVisited(const SUnit
*SU
) const {
1283 return R
.DFSNodeData
[SU
->NodeNum
].SubtreeID
1284 != SchedDFSResult::InvalidSubtreeID
;
1287 /// Initializes this node's instruction count. We don't need to flag the node
1288 /// visited until visitPostorder because the DAG cannot have cycles.
1289 void visitPreorder(const SUnit
*SU
) {
1290 R
.DFSNodeData
[SU
->NodeNum
].InstrCount
=
1291 SU
->getInstr()->isTransient() ? 0 : 1;
1294 /// Called once for each node after all predecessors are visited. Revisit this
1295 /// node's predecessors and potentially join them now that we know the ILP of
1296 /// the other predecessors.
1297 void visitPostorderNode(const SUnit
*SU
) {
1298 // Mark this node as the root of a subtree. It may be joined with its
1299 // successors later.
1300 R
.DFSNodeData
[SU
->NodeNum
].SubtreeID
= SU
->NodeNum
;
1301 RootData
RData(SU
->NodeNum
);
1302 RData
.SubInstrCount
= SU
->getInstr()->isTransient() ? 0 : 1;
1304 // If any predecessors are still in their own subtree, they either cannot be
1305 // joined or are large enough to remain separate. If this parent node's
1306 // total instruction count is not greater than a child subtree by at least
1307 // the subtree limit, then try to join it now since splitting subtrees is
1308 // only useful if multiple high-pressure paths are possible.
1309 unsigned InstrCount
= R
.DFSNodeData
[SU
->NodeNum
].InstrCount
;
1310 for (const SDep
&PredDep
: SU
->Preds
) {
1311 if (PredDep
.getKind() != SDep::Data
)
1313 unsigned PredNum
= PredDep
.getSUnit()->NodeNum
;
1314 if ((InstrCount
- R
.DFSNodeData
[PredNum
].InstrCount
) < R
.SubtreeLimit
)
1315 joinPredSubtree(PredDep
, SU
, /*CheckLimit=*/false);
1317 // Either link or merge the TreeData entry from the child to the parent.
1318 if (R
.DFSNodeData
[PredNum
].SubtreeID
== PredNum
) {
1319 // If the predecessor's parent is invalid, this is a tree edge and the
1320 // current node is the parent.
1321 if (RootSet
[PredNum
].ParentNodeID
== SchedDFSResult::InvalidSubtreeID
)
1322 RootSet
[PredNum
].ParentNodeID
= SU
->NodeNum
;
1324 else if (RootSet
.count(PredNum
)) {
1325 // The predecessor is not a root, but is still in the root set. This
1326 // must be the new parent that it was just joined to. Note that
1327 // RootSet[PredNum].ParentNodeID may either be invalid or may still be
1328 // set to the original parent.
1329 RData
.SubInstrCount
+= RootSet
[PredNum
].SubInstrCount
;
1330 RootSet
.erase(PredNum
);
1333 RootSet
[SU
->NodeNum
] = RData
;
1336 /// Called once for each tree edge after calling visitPostOrderNode on
1337 /// the predecessor. Increment the parent node's instruction count and
1338 /// preemptively join this subtree to its parent's if it is small enough.
1339 void visitPostorderEdge(const SDep
&PredDep
, const SUnit
*Succ
) {
1340 R
.DFSNodeData
[Succ
->NodeNum
].InstrCount
1341 += R
.DFSNodeData
[PredDep
.getSUnit()->NodeNum
].InstrCount
;
1342 joinPredSubtree(PredDep
, Succ
);
1345 /// Adds a connection for cross edges.
1346 void visitCrossEdge(const SDep
&PredDep
, const SUnit
*Succ
) {
1347 ConnectionPairs
.emplace_back(PredDep
.getSUnit(), Succ
);
1350 /// Sets each node's subtree ID to the representative ID and record
1351 /// connections between trees.
1353 SubtreeClasses
.compress();
1354 R
.DFSTreeData
.resize(SubtreeClasses
.getNumClasses());
1355 assert(SubtreeClasses
.getNumClasses() == RootSet
.size()
1356 && "number of roots should match trees");
1357 for (const RootData
&Root
: RootSet
) {
1358 unsigned TreeID
= SubtreeClasses
[Root
.NodeID
];
1359 if (Root
.ParentNodeID
!= SchedDFSResult::InvalidSubtreeID
)
1360 R
.DFSTreeData
[TreeID
].ParentTreeID
= SubtreeClasses
[Root
.ParentNodeID
];
1361 R
.DFSTreeData
[TreeID
].SubInstrCount
= Root
.SubInstrCount
;
1362 // Note that SubInstrCount may be greater than InstrCount if we joined
1363 // subtrees across a cross edge. InstrCount will be attributed to the
1364 // original parent, while SubInstrCount will be attributed to the joined
1367 R
.SubtreeConnections
.resize(SubtreeClasses
.getNumClasses());
1368 R
.SubtreeConnectLevels
.resize(SubtreeClasses
.getNumClasses());
1369 LLVM_DEBUG(dbgs() << R
.getNumSubtrees() << " subtrees:\n");
1370 for (unsigned Idx
= 0, End
= R
.DFSNodeData
.size(); Idx
!= End
; ++Idx
) {
1371 R
.DFSNodeData
[Idx
].SubtreeID
= SubtreeClasses
[Idx
];
1372 LLVM_DEBUG(dbgs() << " SU(" << Idx
<< ") in tree "
1373 << R
.DFSNodeData
[Idx
].SubtreeID
<< '\n');
1375 for (const auto &[Pred
, Succ
] : ConnectionPairs
) {
1376 unsigned PredTree
= SubtreeClasses
[Pred
->NodeNum
];
1377 unsigned SuccTree
= SubtreeClasses
[Succ
->NodeNum
];
1378 if (PredTree
== SuccTree
)
1380 unsigned Depth
= Pred
->getDepth();
1381 addConnection(PredTree
, SuccTree
, Depth
);
1382 addConnection(SuccTree
, PredTree
, Depth
);
1387 /// Joins the predecessor subtree with the successor that is its DFS parent.
1388 /// Applies some heuristics before joining.
1389 bool joinPredSubtree(const SDep
&PredDep
, const SUnit
*Succ
,
1390 bool CheckLimit
= true) {
1391 assert(PredDep
.getKind() == SDep::Data
&& "Subtrees are for data edges");
1393 // Check if the predecessor is already joined.
1394 const SUnit
*PredSU
= PredDep
.getSUnit();
1395 unsigned PredNum
= PredSU
->NodeNum
;
1396 if (R
.DFSNodeData
[PredNum
].SubtreeID
!= PredNum
)
1399 // Four is the magic number of successors before a node is considered a
1401 unsigned NumDataSucs
= 0;
1402 for (const SDep
&SuccDep
: PredSU
->Succs
) {
1403 if (SuccDep
.getKind() == SDep::Data
) {
1404 if (++NumDataSucs
>= 4)
1408 if (CheckLimit
&& R
.DFSNodeData
[PredNum
].InstrCount
> R
.SubtreeLimit
)
1410 R
.DFSNodeData
[PredNum
].SubtreeID
= Succ
->NodeNum
;
1411 SubtreeClasses
.join(Succ
->NodeNum
, PredNum
);
1415 /// Called by finalize() to record a connection between trees.
1416 void addConnection(unsigned FromTree
, unsigned ToTree
, unsigned Depth
) {
1421 SmallVectorImpl
<SchedDFSResult::Connection
> &Connections
=
1422 R
.SubtreeConnections
[FromTree
];
1423 for (SchedDFSResult::Connection
&C
: Connections
) {
1424 if (C
.TreeID
== ToTree
) {
1425 C
.Level
= std::max(C
.Level
, Depth
);
1429 Connections
.push_back(SchedDFSResult::Connection(ToTree
, Depth
));
1430 FromTree
= R
.DFSTreeData
[FromTree
].ParentTreeID
;
1431 } while (FromTree
!= SchedDFSResult::InvalidSubtreeID
);
1435 } // end namespace llvm
1439 /// Manage the stack used by a reverse depth-first search over the DAG.
1440 class SchedDAGReverseDFS
{
1441 std::vector
<std::pair
<const SUnit
*, SUnit::const_pred_iterator
>> DFSStack
;
1444 bool isComplete() const { return DFSStack
.empty(); }
1446 void follow(const SUnit
*SU
) {
1447 DFSStack
.emplace_back(SU
, SU
->Preds
.begin());
1449 void advance() { ++DFSStack
.back().second
; }
1451 const SDep
*backtrack() {
1452 DFSStack
.pop_back();
1453 return DFSStack
.empty() ? nullptr : std::prev(DFSStack
.back().second
);
1456 const SUnit
*getCurr() const { return DFSStack
.back().first
; }
1458 SUnit::const_pred_iterator
getPred() const { return DFSStack
.back().second
; }
1460 SUnit::const_pred_iterator
getPredEnd() const {
1461 return getCurr()->Preds
.end();
1465 } // end anonymous namespace
1467 static bool hasDataSucc(const SUnit
*SU
) {
1468 for (const SDep
&SuccDep
: SU
->Succs
) {
1469 if (SuccDep
.getKind() == SDep::Data
&&
1470 !SuccDep
.getSUnit()->isBoundaryNode())
1476 /// Computes an ILP metric for all nodes in the subDAG reachable via depth-first
1477 /// search from this root.
1478 void SchedDFSResult::compute(ArrayRef
<SUnit
> SUnits
) {
1480 llvm_unreachable("Top-down ILP metric is unimplemented");
1482 SchedDFSImpl
Impl(*this);
1483 for (const SUnit
&SU
: SUnits
) {
1484 if (Impl
.isVisited(&SU
) || hasDataSucc(&SU
))
1487 SchedDAGReverseDFS DFS
;
1488 Impl
.visitPreorder(&SU
);
1491 // Traverse the leftmost path as far as possible.
1492 while (DFS
.getPred() != DFS
.getPredEnd()) {
1493 const SDep
&PredDep
= *DFS
.getPred();
1495 // Ignore non-data edges.
1496 if (PredDep
.getKind() != SDep::Data
1497 || PredDep
.getSUnit()->isBoundaryNode()) {
1500 // An already visited edge is a cross edge, assuming an acyclic DAG.
1501 if (Impl
.isVisited(PredDep
.getSUnit())) {
1502 Impl
.visitCrossEdge(PredDep
, DFS
.getCurr());
1505 Impl
.visitPreorder(PredDep
.getSUnit());
1506 DFS
.follow(PredDep
.getSUnit());
1508 // Visit the top of the stack in postorder and backtrack.
1509 const SUnit
*Child
= DFS
.getCurr();
1510 const SDep
*PredDep
= DFS
.backtrack();
1511 Impl
.visitPostorderNode(Child
);
1513 Impl
.visitPostorderEdge(*PredDep
, DFS
.getCurr());
1514 if (DFS
.isComplete())
1521 /// The root of the given SubtreeID was just scheduled. For all subtrees
1522 /// connected to this tree, record the depth of the connection so that the
1523 /// nearest connected subtrees can be prioritized.
1524 void SchedDFSResult::scheduleTree(unsigned SubtreeID
) {
1525 for (const Connection
&C
: SubtreeConnections
[SubtreeID
]) {
1526 SubtreeConnectLevels
[C
.TreeID
] =
1527 std::max(SubtreeConnectLevels
[C
.TreeID
], C
.Level
);
1528 LLVM_DEBUG(dbgs() << " Tree: " << C
.TreeID
<< " @"
1529 << SubtreeConnectLevels
[C
.TreeID
] << '\n');
1533 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
1534 LLVM_DUMP_METHOD
void ILPValue::print(raw_ostream
&OS
) const {
1535 OS
<< InstrCount
<< " / " << Length
<< " = ";
1539 OS
<< format("%g", ((double)InstrCount
/ Length
));
1542 LLVM_DUMP_METHOD
void ILPValue::dump() const {
1543 dbgs() << *this << '\n';
1548 LLVM_ATTRIBUTE_UNUSED
1549 raw_ostream
&operator<<(raw_ostream
&OS
, const ILPValue
&Val
) {
1554 } // end namespace llvm