1 //===--------------------- SchedulerStatistics.cpp --------------*- C++ -*-===//
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7 //===----------------------------------------------------------------------===//
10 /// This file implements the SchedulerStatistics interface.
12 //===----------------------------------------------------------------------===//
14 #include "Views/SchedulerStatistics.h"
15 #include "llvm/Support/Format.h"
16 #include "llvm/Support/FormattedStream.h"
21 SchedulerStatistics::SchedulerStatistics(const llvm::MCSubtargetInfo
&STI
)
22 : SM(STI
.getSchedModel()), LQResourceID(0), SQResourceID(0), NumIssued(0),
23 NumCycles(0), MostRecentLoadDispatched(~0U),
24 MostRecentStoreDispatched(~0U),
25 Usage(STI
.getSchedModel().NumProcResourceKinds
, {0, 0, 0}) {
26 if (SM
.hasExtraProcessorInfo()) {
27 const MCExtraProcessorInfo
&EPI
= SM
.getExtraProcessorInfo();
28 LQResourceID
= EPI
.LoadQueueID
;
29 SQResourceID
= EPI
.StoreQueueID
;
33 // FIXME: This implementation works under the assumption that load/store queue
34 // entries are reserved at 'instruction dispatched' stage, and released at
35 // 'instruction executed' stage. This currently matches the behavior of LSUnit.
37 // The current design minimizes the number of events generated by the
38 // Dispatch/Execute stages, at the cost of doing extra bookkeeping in method
39 // `onEvent`. However, it introduces a subtle dependency between this view and
40 // how the LSUnit works.
42 // In future we should add a new "memory queue" event type, so that we stop
43 // making assumptions on how LSUnit internally works (See PR39828).
44 void SchedulerStatistics::onEvent(const HWInstructionEvent
&Event
) {
45 if (Event
.Type
== HWInstructionEvent::Issued
) {
46 const Instruction
&Inst
= *Event
.IR
.getInstruction();
47 NumIssued
+= Inst
.getDesc().NumMicroOps
;
48 } else if (Event
.Type
== HWInstructionEvent::Dispatched
) {
49 const Instruction
&Inst
= *Event
.IR
.getInstruction();
50 const unsigned Index
= Event
.IR
.getSourceIndex();
51 if (LQResourceID
&& Inst
.getDesc().MayLoad
&&
52 MostRecentLoadDispatched
!= Index
) {
53 Usage
[LQResourceID
].SlotsInUse
++;
54 MostRecentLoadDispatched
= Index
;
56 if (SQResourceID
&& Inst
.getDesc().MayStore
&&
57 MostRecentStoreDispatched
!= Index
) {
58 Usage
[SQResourceID
].SlotsInUse
++;
59 MostRecentStoreDispatched
= Index
;
61 } else if (Event
.Type
== HWInstructionEvent::Executed
) {
62 const Instruction
&Inst
= *Event
.IR
.getInstruction();
63 if (LQResourceID
&& Inst
.getDesc().MayLoad
) {
64 assert(Usage
[LQResourceID
].SlotsInUse
);
65 Usage
[LQResourceID
].SlotsInUse
--;
67 if (SQResourceID
&& Inst
.getDesc().MayStore
) {
68 assert(Usage
[SQResourceID
].SlotsInUse
);
69 Usage
[SQResourceID
].SlotsInUse
--;
74 void SchedulerStatistics::onReservedBuffers(const InstRef
& /* unused */,
75 ArrayRef
<unsigned> Buffers
) {
76 for (const unsigned Buffer
: Buffers
) {
77 if (Buffer
== LQResourceID
|| Buffer
== SQResourceID
)
79 Usage
[Buffer
].SlotsInUse
++;
83 void SchedulerStatistics::onReleasedBuffers(const InstRef
& /* unused */,
84 ArrayRef
<unsigned> Buffers
) {
85 for (const unsigned Buffer
: Buffers
) {
86 if (Buffer
== LQResourceID
|| Buffer
== SQResourceID
)
88 Usage
[Buffer
].SlotsInUse
--;
92 void SchedulerStatistics::updateHistograms() {
93 for (BufferUsage
&BU
: Usage
) {
94 BU
.CumulativeNumUsedSlots
+= BU
.SlotsInUse
;
95 BU
.MaxUsedSlots
= std::max(BU
.MaxUsedSlots
, BU
.SlotsInUse
);
98 IssueWidthPerCycle
[NumIssued
]++;
102 void SchedulerStatistics::printSchedulerStats(raw_ostream
&OS
) const {
103 OS
<< "\n\nSchedulers - "
104 << "number of cycles where we saw N micro opcodes issued:\n";
105 OS
<< "[# issued], [# cycles]\n";
107 bool HasColors
= OS
.has_colors();
109 std::max_element(IssueWidthPerCycle
.begin(), IssueWidthPerCycle
.end());
110 for (const std::pair
<unsigned, unsigned> &Entry
: IssueWidthPerCycle
) {
111 unsigned NumIssued
= Entry
.first
;
112 if (NumIssued
== It
->first
&& HasColors
)
113 OS
.changeColor(raw_ostream::SAVEDCOLOR
, true, false);
115 unsigned IPC
= Entry
.second
;
116 OS
<< " " << NumIssued
<< ", " << IPC
<< " ("
117 << format("%.1f", ((double)IPC
/ NumCycles
) * 100) << "%)\n";
123 void SchedulerStatistics::printSchedulerUsage(raw_ostream
&OS
) const {
124 assert(NumCycles
&& "Unexpected number of cycles!");
126 OS
<< "\nScheduler's queue usage:\n";
127 if (all_of(Usage
, [](const BufferUsage
&BU
) { return !BU
.MaxUsedSlots
; })) {
128 OS
<< "No scheduler resources used.\n";
132 OS
<< "[1] Resource name.\n"
133 << "[2] Average number of used buffer entries.\n"
134 << "[3] Maximum number of used buffer entries.\n"
135 << "[4] Total number of buffer entries.\n\n"
136 << " [1] [2] [3] [4]\n";
138 formatted_raw_ostream
FOS(OS
);
139 bool HasColors
= FOS
.has_colors();
140 for (unsigned I
= 0, E
= SM
.getNumProcResourceKinds(); I
< E
; ++I
) {
141 const MCProcResourceDesc
&ProcResource
= *SM
.getProcResource(I
);
142 if (ProcResource
.BufferSize
<= 0)
145 const BufferUsage
&BU
= Usage
[I
];
146 double AvgUsage
= (double)BU
.CumulativeNumUsedSlots
/ NumCycles
;
147 double AlmostFullThreshold
= (double)(ProcResource
.BufferSize
* 4) / 5;
148 unsigned NormalizedAvg
= floor((AvgUsage
* 10) + 0.5) / 10;
149 unsigned NormalizedThreshold
= floor((AlmostFullThreshold
* 10) + 0.5) / 10;
151 FOS
<< ProcResource
.Name
;
153 if (HasColors
&& NormalizedAvg
>= NormalizedThreshold
)
154 FOS
.changeColor(raw_ostream::YELLOW
, true, false);
155 FOS
<< NormalizedAvg
;
160 BU
.MaxUsedSlots
== static_cast<unsigned>(ProcResource
.BufferSize
))
161 FOS
.changeColor(raw_ostream::RED
, true, false);
162 FOS
<< BU
.MaxUsedSlots
;
166 FOS
<< ProcResource
.BufferSize
<< '\n';
172 void SchedulerStatistics::printView(raw_ostream
&OS
) const {
173 printSchedulerStats(OS
);
174 printSchedulerUsage(OS
);