1 //===-- SchedClassResolution.cpp --------------------------------*- C++ -*-===//
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7 //===----------------------------------------------------------------------===//
9 #include "SchedClassResolution.h"
10 #include "BenchmarkResult.h"
11 #include "llvm/ADT/STLExtras.h"
12 #include "llvm/MC/MCAsmInfo.h"
13 #include "llvm/MCA/Support.h"
14 #include "llvm/Support/FormatVariadic.h"
20 // Return the non-redundant list of WriteProcRes used by the given sched class.
21 // The scheduling model for LLVM is such that each instruction has a certain
22 // number of uops which consume resources which are described by WriteProcRes
23 // entries. Each entry describe how many cycles are spent on a specific ProcRes
25 // For example, an instruction might have 3 uOps, one dispatching on P0
26 // (ProcResIdx=1) and two on P06 (ProcResIdx = 7).
27 // Note that LLVM additionally denormalizes resource consumption to include
28 // usage of super resources by subresources. So in practice if there exists a
29 // P016 (ProcResIdx=10), then the cycles consumed by P0 are also consumed by
30 // P06 (ProcResIdx = 7) and P016 (ProcResIdx = 10), and the resources consumed
31 // by P06 are also consumed by P016. In the figure below, parenthesized cycles
32 // denote implied usage of superresources by subresources:
37 // =============================
39 // Eventually we end up with three entries for the WriteProcRes of the
41 // {ProcResIdx=1, Cycles=1} // P0
42 // {ProcResIdx=7, Cycles=3} // P06
43 // {ProcResIdx=10, Cycles=3} // P016
45 // Note that in this case, P016 does not contribute any cycles, so it would
46 // be removed by this function.
47 // FIXME: Merge this with the equivalent in llvm-mca.
48 static SmallVector
<MCWriteProcResEntry
, 8>
49 getNonRedundantWriteProcRes(const MCSchedClassDesc
&SCDesc
,
50 const MCSubtargetInfo
&STI
) {
51 SmallVector
<MCWriteProcResEntry
, 8> Result
;
52 const auto &SM
= STI
.getSchedModel();
53 const unsigned NumProcRes
= SM
.getNumProcResourceKinds();
55 // Collect resource masks.
56 SmallVector
<uint64_t> ProcResourceMasks(NumProcRes
);
57 mca::computeProcResourceMasks(SM
, ProcResourceMasks
);
59 // Sort entries by smaller resources for (basic) topological ordering.
60 using ResourceMaskAndEntry
= std::pair
<uint64_t, const MCWriteProcResEntry
*>;
61 SmallVector
<ResourceMaskAndEntry
, 8> ResourceMaskAndEntries
;
62 for (const auto *WPR
= STI
.getWriteProcResBegin(&SCDesc
),
63 *const WPREnd
= STI
.getWriteProcResEnd(&SCDesc
);
64 WPR
!= WPREnd
; ++WPR
) {
65 uint64_t Mask
= ProcResourceMasks
[WPR
->ProcResourceIdx
];
66 ResourceMaskAndEntries
.push_back({Mask
, WPR
});
68 sort(ResourceMaskAndEntries
,
69 [](const ResourceMaskAndEntry
&A
, const ResourceMaskAndEntry
&B
) {
70 unsigned popcntA
= popcount(A
.first
);
71 unsigned popcntB
= popcount(B
.first
);
72 if (popcntA
< popcntB
)
74 if (popcntA
> popcntB
)
76 return A
.first
< B
.first
;
79 SmallVector
<float, 32> ProcResUnitUsage(NumProcRes
);
80 for (const ResourceMaskAndEntry
&Entry
: ResourceMaskAndEntries
) {
81 const MCWriteProcResEntry
*WPR
= Entry
.second
;
82 const MCProcResourceDesc
*const ProcResDesc
=
83 SM
.getProcResource(WPR
->ProcResourceIdx
);
84 // TODO: Handle AcquireAtAtCycle in llvm-exegesis and llvm-mca. See
85 // https://github.com/llvm/llvm-project/issues/62680 and
86 // https://github.com/llvm/llvm-project/issues/62681
87 assert(WPR
->AcquireAtCycle
== 0 &&
88 "`llvm-exegesis` does not handle AcquireAtCycle > 0");
89 if (ProcResDesc
->SubUnitsIdxBegin
== nullptr) {
90 // This is a ProcResUnit.
92 {WPR
->ProcResourceIdx
, WPR
->ReleaseAtCycle
, WPR
->AcquireAtCycle
});
93 ProcResUnitUsage
[WPR
->ProcResourceIdx
] += WPR
->ReleaseAtCycle
;
95 // This is a ProcResGroup. First see if it contributes any cycles or if
96 // it has cycles just from subunits.
97 float RemainingCycles
= WPR
->ReleaseAtCycle
;
98 for (const auto *SubResIdx
= ProcResDesc
->SubUnitsIdxBegin
;
99 SubResIdx
!= ProcResDesc
->SubUnitsIdxBegin
+ ProcResDesc
->NumUnits
;
101 RemainingCycles
-= ProcResUnitUsage
[*SubResIdx
];
103 if (RemainingCycles
< 0.01f
) {
104 // The ProcResGroup contributes no cycles of its own.
107 // The ProcResGroup contributes `RemainingCycles` cycles of its own.
108 Result
.push_back({WPR
->ProcResourceIdx
,
109 static_cast<uint16_t>(std::round(RemainingCycles
)),
110 WPR
->AcquireAtCycle
});
111 // Spread the remaining cycles over all subunits.
112 for (const auto *SubResIdx
= ProcResDesc
->SubUnitsIdxBegin
;
113 SubResIdx
!= ProcResDesc
->SubUnitsIdxBegin
+ ProcResDesc
->NumUnits
;
115 ProcResUnitUsage
[*SubResIdx
] += RemainingCycles
/ ProcResDesc
->NumUnits
;
122 // Distributes a pressure budget as evenly as possible on the provided subunits
123 // given the already existing port pressure distribution.
125 // The algorithm is as follows: while there is remaining pressure to
126 // distribute, find the subunits with minimal pressure, and distribute
127 // remaining pressure equally up to the pressure of the unit with
128 // second-to-minimal pressure.
129 // For example, let's assume we want to distribute 2*P1256
130 // (Subunits = [P1,P2,P5,P6]), and the starting DensePressure is:
131 // DensePressure = P0 P1 P2 P3 P4 P5 P6 P7
132 // 0.1 0.3 0.2 0.0 0.0 0.5 0.5 0.5
133 // RemainingPressure = 2.0
134 // We sort the subunits by pressure:
135 // Subunits = [(P2,p=0.2), (P1,p=0.3), (P5,p=0.5), (P6, p=0.5)]
136 // We'll first start by the subunits with minimal pressure, which are at
137 // the beginning of the sorted array. In this example there is one (P2).
138 // The subunit with second-to-minimal pressure is the next one in the
139 // array (P1). So we distribute 0.1 pressure to P2, and remove 0.1 cycles
141 // Subunits = [(P2,p=0.3), (P1,p=0.3), (P5,p=0.5), (P5,p=0.5)]
142 // RemainingPressure = 1.9
143 // We repeat this process: distribute 0.2 pressure on each of the minimal
144 // P2 and P1, decrease budget by 2*0.2:
145 // Subunits = [(P2,p=0.5), (P1,p=0.5), (P5,p=0.5), (P5,p=0.5)]
146 // RemainingPressure = 1.5
147 // There are no second-to-minimal subunits so we just share the remaining
148 // budget (1.5 cycles) equally:
149 // Subunits = [(P2,p=0.875), (P1,p=0.875), (P5,p=0.875), (P5,p=0.875)]
150 // RemainingPressure = 0.0
151 // We stop as there is no remaining budget to distribute.
152 static void distributePressure(float RemainingPressure
,
153 SmallVector
<uint16_t, 32> Subunits
,
154 SmallVector
<float, 32> &DensePressure
) {
155 // Find the number of subunits with minimal pressure (they are at the
157 sort(Subunits
, [&DensePressure
](const uint16_t A
, const uint16_t B
) {
158 return DensePressure
[A
] < DensePressure
[B
];
160 const auto getPressureForSubunit
= [&DensePressure
,
161 &Subunits
](size_t I
) -> float & {
162 return DensePressure
[Subunits
[I
]];
164 size_t NumMinimalSU
= 1;
165 while (NumMinimalSU
< Subunits
.size() &&
166 getPressureForSubunit(NumMinimalSU
) == getPressureForSubunit(0)) {
169 while (RemainingPressure
> 0.0f
) {
170 if (NumMinimalSU
== Subunits
.size()) {
171 // All units are minimal, just distribute evenly and be done.
172 for (size_t I
= 0; I
< NumMinimalSU
; ++I
) {
173 getPressureForSubunit(I
) += RemainingPressure
/ NumMinimalSU
;
177 // Distribute the remaining pressure equally.
178 const float MinimalPressure
= getPressureForSubunit(NumMinimalSU
- 1);
179 const float SecondToMinimalPressure
= getPressureForSubunit(NumMinimalSU
);
180 assert(MinimalPressure
< SecondToMinimalPressure
);
181 const float Increment
= SecondToMinimalPressure
- MinimalPressure
;
182 if (RemainingPressure
<= NumMinimalSU
* Increment
) {
183 // There is not enough remaining pressure.
184 for (size_t I
= 0; I
< NumMinimalSU
; ++I
) {
185 getPressureForSubunit(I
) += RemainingPressure
/ NumMinimalSU
;
189 // Bump all minimal pressure subunits to `SecondToMinimalPressure`.
190 for (size_t I
= 0; I
< NumMinimalSU
; ++I
) {
191 getPressureForSubunit(I
) = SecondToMinimalPressure
;
192 RemainingPressure
-= SecondToMinimalPressure
;
194 while (NumMinimalSU
< Subunits
.size() &&
195 getPressureForSubunit(NumMinimalSU
) == SecondToMinimalPressure
) {
201 std::vector
<std::pair
<uint16_t, float>>
202 computeIdealizedProcResPressure(const MCSchedModel
&SM
,
203 SmallVector
<MCWriteProcResEntry
, 8> WPRS
) {
204 // DensePressure[I] is the port pressure for Proc Resource I.
205 SmallVector
<float, 32> DensePressure(SM
.getNumProcResourceKinds());
206 sort(WPRS
, [](const MCWriteProcResEntry
&A
, const MCWriteProcResEntry
&B
) {
207 return A
.ProcResourceIdx
< B
.ProcResourceIdx
;
209 for (const MCWriteProcResEntry
&WPR
: WPRS
) {
210 // Get units for the entry.
211 const MCProcResourceDesc
*const ProcResDesc
=
212 SM
.getProcResource(WPR
.ProcResourceIdx
);
213 if (ProcResDesc
->SubUnitsIdxBegin
== nullptr) {
214 // This is a ProcResUnit.
215 DensePressure
[WPR
.ProcResourceIdx
] += WPR
.ReleaseAtCycle
;
217 // This is a ProcResGroup.
218 SmallVector
<uint16_t, 32> Subunits(ProcResDesc
->SubUnitsIdxBegin
,
219 ProcResDesc
->SubUnitsIdxBegin
+
220 ProcResDesc
->NumUnits
);
221 distributePressure(WPR
.ReleaseAtCycle
, Subunits
, DensePressure
);
224 // Turn dense pressure into sparse pressure by removing zero entries.
225 std::vector
<std::pair
<uint16_t, float>> Pressure
;
226 for (unsigned I
= 0, E
= SM
.getNumProcResourceKinds(); I
< E
; ++I
) {
227 if (DensePressure
[I
] > 0.0f
)
228 Pressure
.emplace_back(I
, DensePressure
[I
]);
233 ResolvedSchedClass::ResolvedSchedClass(const MCSubtargetInfo
&STI
,
234 unsigned ResolvedSchedClassId
,
236 : SchedClassId(ResolvedSchedClassId
),
237 SCDesc(STI
.getSchedModel().getSchedClassDesc(ResolvedSchedClassId
)),
238 WasVariant(WasVariant
),
239 NonRedundantWriteProcRes(getNonRedundantWriteProcRes(*SCDesc
, STI
)),
240 IdealizedProcResPressure(computeIdealizedProcResPressure(
241 STI
.getSchedModel(), NonRedundantWriteProcRes
)) {
242 assert((SCDesc
== nullptr || !SCDesc
->isVariant()) &&
243 "ResolvedSchedClass should never be variant");
246 static unsigned ResolveVariantSchedClassId(const MCSubtargetInfo
&STI
,
247 const MCInstrInfo
&InstrInfo
,
248 unsigned SchedClassId
,
250 const auto &SM
= STI
.getSchedModel();
251 while (SchedClassId
&& SM
.getSchedClassDesc(SchedClassId
)->isVariant()) {
252 SchedClassId
= STI
.resolveVariantSchedClass(SchedClassId
, &MCI
, &InstrInfo
,
253 SM
.getProcessorID());
258 std::pair
<unsigned /*SchedClassId*/, bool /*WasVariant*/>
259 ResolvedSchedClass::resolveSchedClassId(const MCSubtargetInfo
&SubtargetInfo
,
260 const MCInstrInfo
&InstrInfo
,
262 unsigned SchedClassId
= InstrInfo
.get(MCI
.getOpcode()).getSchedClass();
263 const bool WasVariant
= SchedClassId
&& SubtargetInfo
.getSchedModel()
264 .getSchedClassDesc(SchedClassId
)
267 ResolveVariantSchedClassId(SubtargetInfo
, InstrInfo
, SchedClassId
, MCI
);
268 return std::make_pair(SchedClassId
, WasVariant
);
271 // Returns a ProxResIdx by id or name.
272 static unsigned findProcResIdx(const MCSubtargetInfo
&STI
,
273 const StringRef NameOrId
) {
274 // Interpret the key as an ProcResIdx.
275 unsigned ProcResIdx
= 0;
276 if (to_integer(NameOrId
, ProcResIdx
, 10))
278 // Interpret the key as a ProcRes name.
279 const auto &SchedModel
= STI
.getSchedModel();
280 for (int I
= 0, E
= SchedModel
.getNumProcResourceKinds(); I
< E
; ++I
) {
281 if (NameOrId
== SchedModel
.getProcResource(I
)->Name
)
287 std::vector
<BenchmarkMeasure
> ResolvedSchedClass::getAsPoint(
288 Benchmark::ModeE Mode
, const MCSubtargetInfo
&STI
,
289 ArrayRef
<PerInstructionStats
> Representative
) const {
290 const size_t NumMeasurements
= Representative
.size();
292 std::vector
<BenchmarkMeasure
> SchedClassPoint(NumMeasurements
);
294 if (Mode
== Benchmark::Latency
) {
295 assert(NumMeasurements
== 1 && "Latency is a single measure.");
296 BenchmarkMeasure
&LatencyMeasure
= SchedClassPoint
[0];
299 LatencyMeasure
.PerInstructionValue
= 0.0;
301 for (unsigned I
= 0; I
< SCDesc
->NumWriteLatencyEntries
; ++I
) {
302 const MCWriteLatencyEntry
*const WLE
=
303 STI
.getWriteLatencyEntry(SCDesc
, I
);
304 LatencyMeasure
.PerInstructionValue
=
305 std::max
<double>(LatencyMeasure
.PerInstructionValue
, WLE
->Cycles
);
307 } else if (Mode
== Benchmark::Uops
) {
308 for (auto I
: zip(SchedClassPoint
, Representative
)) {
309 BenchmarkMeasure
&Measure
= std::get
<0>(I
);
310 const PerInstructionStats
&Stats
= std::get
<1>(I
);
312 StringRef Key
= Stats
.key();
313 uint16_t ProcResIdx
= findProcResIdx(STI
, Key
);
314 if (ProcResIdx
> 0) {
315 // Find the pressure on ProcResIdx `Key`.
316 const auto ProcResPressureIt
=
317 find_if(IdealizedProcResPressure
,
318 [ProcResIdx
](const std::pair
<uint16_t, float> &WPR
) {
319 return WPR
.first
== ProcResIdx
;
321 Measure
.PerInstructionValue
=
322 ProcResPressureIt
== IdealizedProcResPressure
.end()
324 : ProcResPressureIt
->second
;
325 } else if (Key
== "NumMicroOps") {
326 Measure
.PerInstructionValue
= SCDesc
->NumMicroOps
;
328 errs() << "expected `key` to be either a ProcResIdx or a ProcRes "
334 } else if (Mode
== Benchmark::InverseThroughput
) {
335 assert(NumMeasurements
== 1 && "Inverse Throughput is a single measure.");
336 BenchmarkMeasure
&RThroughputMeasure
= SchedClassPoint
[0];
338 RThroughputMeasure
.PerInstructionValue
=
339 MCSchedModel::getReciprocalThroughput(STI
, *SCDesc
);
341 llvm_unreachable("unimplemented measurement matching mode");
344 return SchedClassPoint
;
347 } // namespace exegesis