[docs] Add LICENSE.txt to the root of the mono-repo
[llvm-project.git] / llvm / lib / CodeGen / TargetSchedule.cpp
blobac07c86cab8546ddb195f0fde23fb0dd55c5cca7
1 //===- llvm/Target/TargetSchedule.cpp - Sched Machine Model ---------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file implements a wrapper around MCSchedModel that allows the interface
10 // to benefit from information currently only available in TargetInstrInfo.
12 //===----------------------------------------------------------------------===//
14 #include "llvm/CodeGen/TargetSchedule.h"
15 #include "llvm/CodeGen/MachineFunction.h"
16 #include "llvm/CodeGen/MachineInstr.h"
17 #include "llvm/CodeGen/MachineOperand.h"
18 #include "llvm/CodeGen/TargetInstrInfo.h"
19 #include "llvm/CodeGen/TargetSubtargetInfo.h"
20 #include "llvm/MC/MCInstrDesc.h"
21 #include "llvm/MC/MCInstrItineraries.h"
22 #include "llvm/MC/MCSchedule.h"
23 #include "llvm/Support/CommandLine.h"
24 #include "llvm/Support/ErrorHandling.h"
25 #include "llvm/Support/raw_ostream.h"
26 #include <algorithm>
27 #include <cassert>
28 #include <cstdint>
30 using namespace llvm;
32 static cl::opt<bool> EnableSchedModel("schedmodel", cl::Hidden, cl::init(true),
33 cl::desc("Use TargetSchedModel for latency lookup"));
35 static cl::opt<bool> EnableSchedItins("scheditins", cl::Hidden, cl::init(true),
36 cl::desc("Use InstrItineraryData for latency lookup"));
38 bool TargetSchedModel::hasInstrSchedModel() const {
39 return EnableSchedModel && SchedModel.hasInstrSchedModel();
42 bool TargetSchedModel::hasInstrItineraries() const {
43 return EnableSchedItins && !InstrItins.isEmpty();
46 static unsigned gcd(unsigned Dividend, unsigned Divisor) {
47 // Dividend and Divisor will be naturally swapped as needed.
48 while (Divisor) {
49 unsigned Rem = Dividend % Divisor;
50 Dividend = Divisor;
51 Divisor = Rem;
53 return Dividend;
56 static unsigned lcm(unsigned A, unsigned B) {
57 unsigned LCM = (uint64_t(A) * B) / gcd(A, B);
58 assert((LCM >= A && LCM >= B) && "LCM overflow");
59 return LCM;
62 void TargetSchedModel::init(const TargetSubtargetInfo *TSInfo) {
63 STI = TSInfo;
64 SchedModel = TSInfo->getSchedModel();
65 TII = TSInfo->getInstrInfo();
66 STI->initInstrItins(InstrItins);
68 unsigned NumRes = SchedModel.getNumProcResourceKinds();
69 ResourceFactors.resize(NumRes);
70 ResourceLCM = SchedModel.IssueWidth;
71 for (unsigned Idx = 0; Idx < NumRes; ++Idx) {
72 unsigned NumUnits = SchedModel.getProcResource(Idx)->NumUnits;
73 if (NumUnits > 0)
74 ResourceLCM = lcm(ResourceLCM, NumUnits);
76 MicroOpFactor = ResourceLCM / SchedModel.IssueWidth;
77 for (unsigned Idx = 0; Idx < NumRes; ++Idx) {
78 unsigned NumUnits = SchedModel.getProcResource(Idx)->NumUnits;
79 ResourceFactors[Idx] = NumUnits ? (ResourceLCM / NumUnits) : 0;
83 /// Returns true only if instruction is specified as single issue.
84 bool TargetSchedModel::mustBeginGroup(const MachineInstr *MI,
85 const MCSchedClassDesc *SC) const {
86 if (hasInstrSchedModel()) {
87 if (!SC)
88 SC = resolveSchedClass(MI);
89 if (SC->isValid())
90 return SC->BeginGroup;
92 return false;
95 bool TargetSchedModel::mustEndGroup(const MachineInstr *MI,
96 const MCSchedClassDesc *SC) const {
97 if (hasInstrSchedModel()) {
98 if (!SC)
99 SC = resolveSchedClass(MI);
100 if (SC->isValid())
101 return SC->EndGroup;
103 return false;
106 unsigned TargetSchedModel::getNumMicroOps(const MachineInstr *MI,
107 const MCSchedClassDesc *SC) const {
108 if (hasInstrItineraries()) {
109 int UOps = InstrItins.getNumMicroOps(MI->getDesc().getSchedClass());
110 return (UOps >= 0) ? UOps : TII->getNumMicroOps(&InstrItins, *MI);
112 if (hasInstrSchedModel()) {
113 if (!SC)
114 SC = resolveSchedClass(MI);
115 if (SC->isValid())
116 return SC->NumMicroOps;
118 return MI->isTransient() ? 0 : 1;
121 // The machine model may explicitly specify an invalid latency, which
122 // effectively means infinite latency. Since users of the TargetSchedule API
123 // don't know how to handle this, we convert it to a very large latency that is
124 // easy to distinguish when debugging the DAG but won't induce overflow.
125 static unsigned capLatency(int Cycles) {
126 return Cycles >= 0 ? Cycles : 1000;
129 /// Return the MCSchedClassDesc for this instruction. Some SchedClasses require
130 /// evaluation of predicates that depend on instruction operands or flags.
131 const MCSchedClassDesc *TargetSchedModel::
132 resolveSchedClass(const MachineInstr *MI) const {
133 // Get the definition's scheduling class descriptor from this machine model.
134 unsigned SchedClass = MI->getDesc().getSchedClass();
135 const MCSchedClassDesc *SCDesc = SchedModel.getSchedClassDesc(SchedClass);
136 if (!SCDesc->isValid())
137 return SCDesc;
139 #ifndef NDEBUG
140 unsigned NIter = 0;
141 #endif
142 while (SCDesc->isVariant()) {
143 assert(++NIter < 6 && "Variants are nested deeper than the magic number");
145 SchedClass = STI->resolveSchedClass(SchedClass, MI, this);
146 SCDesc = SchedModel.getSchedClassDesc(SchedClass);
148 return SCDesc;
151 /// Find the def index of this operand. This index maps to the machine model and
152 /// is independent of use operands. Def operands may be reordered with uses or
153 /// merged with uses without affecting the def index (e.g. before/after
154 /// regalloc). However, an instruction's def operands must never be reordered
155 /// with respect to each other.
156 static unsigned findDefIdx(const MachineInstr *MI, unsigned DefOperIdx) {
157 unsigned DefIdx = 0;
158 for (unsigned i = 0; i != DefOperIdx; ++i) {
159 const MachineOperand &MO = MI->getOperand(i);
160 if (MO.isReg() && MO.isDef())
161 ++DefIdx;
163 return DefIdx;
166 /// Find the use index of this operand. This is independent of the instruction's
167 /// def operands.
169 /// Note that uses are not determined by the operand's isUse property, which
170 /// is simply the inverse of isDef. Here we consider any readsReg operand to be
171 /// a "use". The machine model allows an operand to be both a Def and Use.
172 static unsigned findUseIdx(const MachineInstr *MI, unsigned UseOperIdx) {
173 unsigned UseIdx = 0;
174 for (unsigned i = 0; i != UseOperIdx; ++i) {
175 const MachineOperand &MO = MI->getOperand(i);
176 if (MO.isReg() && MO.readsReg() && !MO.isDef())
177 ++UseIdx;
179 return UseIdx;
182 // Top-level API for clients that know the operand indices.
183 unsigned TargetSchedModel::computeOperandLatency(
184 const MachineInstr *DefMI, unsigned DefOperIdx,
185 const MachineInstr *UseMI, unsigned UseOperIdx) const {
187 if (!hasInstrSchedModel() && !hasInstrItineraries())
188 return TII->defaultDefLatency(SchedModel, *DefMI);
190 if (hasInstrItineraries()) {
191 int OperLatency = 0;
192 if (UseMI) {
193 OperLatency = TII->getOperandLatency(&InstrItins, *DefMI, DefOperIdx,
194 *UseMI, UseOperIdx);
196 else {
197 unsigned DefClass = DefMI->getDesc().getSchedClass();
198 OperLatency = InstrItins.getOperandCycle(DefClass, DefOperIdx);
200 if (OperLatency >= 0)
201 return OperLatency;
203 // No operand latency was found.
204 unsigned InstrLatency = TII->getInstrLatency(&InstrItins, *DefMI);
206 // Expected latency is the max of the stage latency and itinerary props.
207 // Rather than directly querying InstrItins stage latency, we call a TII
208 // hook to allow subtargets to specialize latency. This hook is only
209 // applicable to the InstrItins model. InstrSchedModel should model all
210 // special cases without TII hooks.
211 InstrLatency =
212 std::max(InstrLatency, TII->defaultDefLatency(SchedModel, *DefMI));
213 return InstrLatency;
215 // hasInstrSchedModel()
216 const MCSchedClassDesc *SCDesc = resolveSchedClass(DefMI);
217 unsigned DefIdx = findDefIdx(DefMI, DefOperIdx);
218 if (DefIdx < SCDesc->NumWriteLatencyEntries) {
219 // Lookup the definition's write latency in SubtargetInfo.
220 const MCWriteLatencyEntry *WLEntry =
221 STI->getWriteLatencyEntry(SCDesc, DefIdx);
222 unsigned WriteID = WLEntry->WriteResourceID;
223 unsigned Latency = capLatency(WLEntry->Cycles);
224 if (!UseMI)
225 return Latency;
227 // Lookup the use's latency adjustment in SubtargetInfo.
228 const MCSchedClassDesc *UseDesc = resolveSchedClass(UseMI);
229 if (UseDesc->NumReadAdvanceEntries == 0)
230 return Latency;
231 unsigned UseIdx = findUseIdx(UseMI, UseOperIdx);
232 int Advance = STI->getReadAdvanceCycles(UseDesc, UseIdx, WriteID);
233 if (Advance > 0 && (unsigned)Advance > Latency) // unsigned wrap
234 return 0;
235 return Latency - Advance;
237 // If DefIdx does not exist in the model (e.g. implicit defs), then return
238 // unit latency (defaultDefLatency may be too conservative).
239 #ifndef NDEBUG
240 if (SCDesc->isValid() && !DefMI->getOperand(DefOperIdx).isImplicit()
241 && !DefMI->getDesc().OpInfo[DefOperIdx].isOptionalDef()
242 && SchedModel.isComplete()) {
243 errs() << "DefIdx " << DefIdx << " exceeds machine model writes for "
244 << *DefMI << " (Try with MCSchedModel.CompleteModel set to false)";
245 llvm_unreachable("incomplete machine model");
247 #endif
248 // FIXME: Automatically giving all implicit defs defaultDefLatency is
249 // undesirable. We should only do it for defs that are known to the MC
250 // desc like flags. Truly implicit defs should get 1 cycle latency.
251 return DefMI->isTransient() ? 0 : TII->defaultDefLatency(SchedModel, *DefMI);
254 unsigned
255 TargetSchedModel::computeInstrLatency(const MCSchedClassDesc &SCDesc) const {
256 return capLatency(MCSchedModel::computeInstrLatency(*STI, SCDesc));
259 unsigned TargetSchedModel::computeInstrLatency(unsigned Opcode) const {
260 assert(hasInstrSchedModel() && "Only call this function with a SchedModel");
261 unsigned SCIdx = TII->get(Opcode).getSchedClass();
262 return capLatency(SchedModel.computeInstrLatency(*STI, SCIdx));
265 unsigned TargetSchedModel::computeInstrLatency(const MCInst &Inst) const {
266 if (hasInstrSchedModel())
267 return capLatency(SchedModel.computeInstrLatency(*STI, *TII, Inst));
268 return computeInstrLatency(Inst.getOpcode());
271 unsigned
272 TargetSchedModel::computeInstrLatency(const MachineInstr *MI,
273 bool UseDefaultDefLatency) const {
274 // For the itinerary model, fall back to the old subtarget hook.
275 // Allow subtargets to compute Bundle latencies outside the machine model.
276 if (hasInstrItineraries() || MI->isBundle() ||
277 (!hasInstrSchedModel() && !UseDefaultDefLatency))
278 return TII->getInstrLatency(&InstrItins, *MI);
280 if (hasInstrSchedModel()) {
281 const MCSchedClassDesc *SCDesc = resolveSchedClass(MI);
282 if (SCDesc->isValid())
283 return computeInstrLatency(*SCDesc);
285 return TII->defaultDefLatency(SchedModel, *MI);
288 unsigned TargetSchedModel::
289 computeOutputLatency(const MachineInstr *DefMI, unsigned DefOperIdx,
290 const MachineInstr *DepMI) const {
291 if (!SchedModel.isOutOfOrder())
292 return 1;
294 // Out-of-order processor can dispatch WAW dependencies in the same cycle.
296 // Treat predication as a data dependency for out-of-order cpus. In-order
297 // cpus do not need to treat predicated writes specially.
299 // TODO: The following hack exists because predication passes do not
300 // correctly append imp-use operands, and readsReg() strangely returns false
301 // for predicated defs.
302 Register Reg = DefMI->getOperand(DefOperIdx).getReg();
303 const MachineFunction &MF = *DefMI->getMF();
304 const TargetRegisterInfo *TRI = MF.getSubtarget().getRegisterInfo();
305 if (!DepMI->readsRegister(Reg, TRI) && TII->isPredicated(*DepMI))
306 return computeInstrLatency(DefMI);
308 // If we have a per operand scheduling model, check if this def is writing
309 // an unbuffered resource. If so, it treated like an in-order cpu.
310 if (hasInstrSchedModel()) {
311 const MCSchedClassDesc *SCDesc = resolveSchedClass(DefMI);
312 if (SCDesc->isValid()) {
313 for (const MCWriteProcResEntry *PRI = STI->getWriteProcResBegin(SCDesc),
314 *PRE = STI->getWriteProcResEnd(SCDesc); PRI != PRE; ++PRI) {
315 if (!SchedModel.getProcResource(PRI->ProcResourceIdx)->BufferSize)
316 return 1;
320 return 0;
323 double
324 TargetSchedModel::computeReciprocalThroughput(const MachineInstr *MI) const {
325 if (hasInstrItineraries()) {
326 unsigned SchedClass = MI->getDesc().getSchedClass();
327 return MCSchedModel::getReciprocalThroughput(SchedClass,
328 *getInstrItineraries());
331 if (hasInstrSchedModel())
332 return MCSchedModel::getReciprocalThroughput(*STI, *resolveSchedClass(MI));
334 return 0.0;
337 double
338 TargetSchedModel::computeReciprocalThroughput(unsigned Opcode) const {
339 unsigned SchedClass = TII->get(Opcode).getSchedClass();
340 if (hasInstrItineraries())
341 return MCSchedModel::getReciprocalThroughput(SchedClass,
342 *getInstrItineraries());
343 if (hasInstrSchedModel()) {
344 const MCSchedClassDesc &SCDesc = *SchedModel.getSchedClassDesc(SchedClass);
345 if (SCDesc.isValid() && !SCDesc.isVariant())
346 return MCSchedModel::getReciprocalThroughput(*STI, SCDesc);
349 return 0.0;
352 double
353 TargetSchedModel::computeReciprocalThroughput(const MCInst &MI) const {
354 if (hasInstrSchedModel())
355 return SchedModel.getReciprocalThroughput(*STI, *TII, MI);
356 return computeReciprocalThroughput(MI.getOpcode());