[Alignment][NFC] Migrate Instructions to Align
[llvm-core.git] / include / llvm / CodeGen / TargetSchedule.h
blobcce85c8d7b0dc5718fd1eb6feacb4f04f37ead6d
1 //===- llvm/CodeGen/TargetSchedule.h - Sched Machine Model ------*- C++ -*-===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file defines a wrapper around MCSchedModel that allows the interface to
10 // benefit from information currently only available in TargetInstrInfo.
11 // Ideally, the scheduling interface would be fully defined in the MC layer.
13 //===----------------------------------------------------------------------===//
15 #ifndef LLVM_CODEGEN_TARGETSCHEDULE_H
16 #define LLVM_CODEGEN_TARGETSCHEDULE_H
18 #include "llvm/ADT/Optional.h"
19 #include "llvm/ADT/SmallVector.h"
20 #include "llvm/CodeGen/TargetSubtargetInfo.h"
21 #include "llvm/Config/llvm-config.h"
22 #include "llvm/MC/MCInstrItineraries.h"
23 #include "llvm/MC/MCSchedule.h"
25 namespace llvm {
27 class MachineInstr;
28 class TargetInstrInfo;
30 /// Provide an instruction scheduling machine model to CodeGen passes.
31 class TargetSchedModel {
32 // For efficiency, hold a copy of the statically defined MCSchedModel for this
33 // processor.
34 MCSchedModel SchedModel;
35 InstrItineraryData InstrItins;
36 const TargetSubtargetInfo *STI = nullptr;
37 const TargetInstrInfo *TII = nullptr;
39 SmallVector<unsigned, 16> ResourceFactors;
40 unsigned MicroOpFactor; // Multiply to normalize microops to resource units.
41 unsigned ResourceLCM; // Resource units per cycle. Latency normalization factor.
43 unsigned computeInstrLatency(const MCSchedClassDesc &SCDesc) const;
45 public:
46 TargetSchedModel() : SchedModel(MCSchedModel::GetDefaultSchedModel()) {}
48 /// Initialize the machine model for instruction scheduling.
49 ///
50 /// The machine model API keeps a copy of the top-level MCSchedModel table
51 /// indices and may query TargetSubtargetInfo and TargetInstrInfo to resolve
52 /// dynamic properties.
53 void init(const TargetSubtargetInfo *TSInfo);
55 /// Return the MCSchedClassDesc for this instruction.
56 const MCSchedClassDesc *resolveSchedClass(const MachineInstr *MI) const;
58 /// TargetSubtargetInfo getter.
59 const TargetSubtargetInfo *getSubtargetInfo() const { return STI; }
61 /// TargetInstrInfo getter.
62 const TargetInstrInfo *getInstrInfo() const { return TII; }
64 /// Return true if this machine model includes an instruction-level
65 /// scheduling model.
66 ///
67 /// This is more detailed than the course grain IssueWidth and default
68 /// latency properties, but separate from the per-cycle itinerary data.
69 bool hasInstrSchedModel() const;
71 const MCSchedModel *getMCSchedModel() const { return &SchedModel; }
73 /// Return true if this machine model includes cycle-to-cycle itinerary
74 /// data.
75 ///
76 /// This models scheduling at each stage in the processor pipeline.
77 bool hasInstrItineraries() const;
79 const InstrItineraryData *getInstrItineraries() const {
80 if (hasInstrItineraries())
81 return &InstrItins;
82 return nullptr;
85 /// Return true if this machine model includes an instruction-level
86 /// scheduling model or cycle-to-cycle itinerary data.
87 bool hasInstrSchedModelOrItineraries() const {
88 return hasInstrSchedModel() || hasInstrItineraries();
91 /// Identify the processor corresponding to the current subtarget.
92 unsigned getProcessorID() const { return SchedModel.getProcessorID(); }
94 /// Maximum number of micro-ops that may be scheduled per cycle.
95 unsigned getIssueWidth() const { return SchedModel.IssueWidth; }
97 /// Return true if new group must begin.
98 bool mustBeginGroup(const MachineInstr *MI,
99 const MCSchedClassDesc *SC = nullptr) const;
100 /// Return true if current group must end.
101 bool mustEndGroup(const MachineInstr *MI,
102 const MCSchedClassDesc *SC = nullptr) const;
104 /// Return the number of issue slots required for this MI.
105 unsigned getNumMicroOps(const MachineInstr *MI,
106 const MCSchedClassDesc *SC = nullptr) const;
108 /// Get the number of kinds of resources for this target.
109 unsigned getNumProcResourceKinds() const {
110 return SchedModel.getNumProcResourceKinds();
113 /// Get a processor resource by ID for convenience.
114 const MCProcResourceDesc *getProcResource(unsigned PIdx) const {
115 return SchedModel.getProcResource(PIdx);
118 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
119 const char *getResourceName(unsigned PIdx) const {
120 if (!PIdx)
121 return "MOps";
122 return SchedModel.getProcResource(PIdx)->Name;
124 #endif
126 using ProcResIter = const MCWriteProcResEntry *;
128 // Get an iterator into the processor resources consumed by this
129 // scheduling class.
130 ProcResIter getWriteProcResBegin(const MCSchedClassDesc *SC) const {
131 // The subtarget holds a single resource table for all processors.
132 return STI->getWriteProcResBegin(SC);
134 ProcResIter getWriteProcResEnd(const MCSchedClassDesc *SC) const {
135 return STI->getWriteProcResEnd(SC);
138 /// Multiply the number of units consumed for a resource by this factor
139 /// to normalize it relative to other resources.
140 unsigned getResourceFactor(unsigned ResIdx) const {
141 return ResourceFactors[ResIdx];
144 /// Multiply number of micro-ops by this factor to normalize it
145 /// relative to other resources.
146 unsigned getMicroOpFactor() const {
147 return MicroOpFactor;
150 /// Multiply cycle count by this factor to normalize it relative to
151 /// other resources. This is the number of resource units per cycle.
152 unsigned getLatencyFactor() const {
153 return ResourceLCM;
156 /// Number of micro-ops that may be buffered for OOO execution.
157 unsigned getMicroOpBufferSize() const { return SchedModel.MicroOpBufferSize; }
159 /// Number of resource units that may be buffered for OOO execution.
160 /// \return The buffer size in resource units or -1 for unlimited.
161 int getResourceBufferSize(unsigned PIdx) const {
162 return SchedModel.getProcResource(PIdx)->BufferSize;
165 /// Compute operand latency based on the available machine model.
167 /// Compute and return the latency of the given data dependent def and use
168 /// when the operand indices are already known. UseMI may be NULL for an
169 /// unknown user.
170 unsigned computeOperandLatency(const MachineInstr *DefMI, unsigned DefOperIdx,
171 const MachineInstr *UseMI, unsigned UseOperIdx)
172 const;
174 /// Compute the instruction latency based on the available machine
175 /// model.
177 /// Compute and return the expected latency of this instruction independent of
178 /// a particular use. computeOperandLatency is the preferred API, but this is
179 /// occasionally useful to help estimate instruction cost.
181 /// If UseDefaultDefLatency is false and no new machine sched model is
182 /// present this method falls back to TII->getInstrLatency with an empty
183 /// instruction itinerary (this is so we preserve the previous behavior of the
184 /// if converter after moving it to TargetSchedModel).
185 unsigned computeInstrLatency(const MachineInstr *MI,
186 bool UseDefaultDefLatency = true) const;
187 unsigned computeInstrLatency(const MCInst &Inst) const;
188 unsigned computeInstrLatency(unsigned Opcode) const;
191 /// Output dependency latency of a pair of defs of the same register.
193 /// This is typically one cycle.
194 unsigned computeOutputLatency(const MachineInstr *DefMI, unsigned DefOperIdx,
195 const MachineInstr *DepMI) const;
197 /// Compute the reciprocal throughput of the given instruction.
198 double computeReciprocalThroughput(const MachineInstr *MI) const;
199 double computeReciprocalThroughput(const MCInst &MI) const;
200 double computeReciprocalThroughput(unsigned Opcode) const;
203 } // end namespace llvm
205 #endif // LLVM_CODEGEN_TARGETSCHEDULE_H