[ORC] Add std::tuple support to SimplePackedSerialization.
[llvm-project.git] / llvm / lib / MCA / Stages / InOrderIssueStage.cpp
blobfa5c0fc66b9edf48a7864ff6ffa1372b943b1729
1 //===---------------------- InOrderIssueStage.cpp ---------------*- C++ -*-===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 /// \file
9 ///
10 /// InOrderIssueStage implements an in-order execution pipeline.
11 ///
12 //===----------------------------------------------------------------------===//
14 #include "llvm/MCA/Stages/InOrderIssueStage.h"
15 #include "llvm/MCA/HardwareUnits/LSUnit.h"
16 #include "llvm/MCA/HardwareUnits/RegisterFile.h"
17 #include "llvm/MCA/HardwareUnits/RetireControlUnit.h"
18 #include "llvm/MCA/Instruction.h"
20 #define DEBUG_TYPE "llvm-mca"
21 namespace llvm {
22 namespace mca {
24 void StallInfo::clear() {
25 IR.invalidate();
26 CyclesLeft = 0;
27 Kind = StallKind::DEFAULT;
30 void StallInfo::update(const InstRef &Inst, unsigned Cycles, StallKind SK) {
31 IR = Inst;
32 CyclesLeft = Cycles;
33 Kind = SK;
36 void StallInfo::cycleEnd() {
37 if (!isValid())
38 return;
40 if (!CyclesLeft)
41 return;
43 --CyclesLeft;
46 InOrderIssueStage::InOrderIssueStage(const MCSubtargetInfo &STI,
47 RegisterFile &PRF, CustomBehaviour &CB,
48 LSUnit &LSU)
49 : STI(STI), PRF(PRF), RM(STI.getSchedModel()), CB(CB), LSU(LSU),
50 NumIssued(), SI(), CarryOver(), Bandwidth(), LastWriteBackCycle() {}
52 unsigned InOrderIssueStage::getIssueWidth() const {
53 return STI.getSchedModel().IssueWidth;
56 bool InOrderIssueStage::hasWorkToComplete() const {
57 return !IssuedInst.empty() || SI.isValid() || CarriedOver;
60 bool InOrderIssueStage::isAvailable(const InstRef &IR) const {
61 if (SI.isValid() || CarriedOver)
62 return false;
64 const Instruction &Inst = *IR.getInstruction();
65 unsigned NumMicroOps = Inst.getNumMicroOps();
66 const InstrDesc &Desc = Inst.getDesc();
68 bool ShouldCarryOver = NumMicroOps > getIssueWidth();
69 if (Bandwidth < NumMicroOps && !ShouldCarryOver)
70 return false;
72 // Instruction with BeginGroup must be the first instruction to be issued in a
73 // cycle.
74 if (Desc.BeginGroup && NumIssued != 0)
75 return false;
77 return true;
80 static bool hasResourceHazard(const ResourceManager &RM, const InstRef &IR) {
81 if (RM.checkAvailability(IR.getInstruction()->getDesc())) {
82 LLVM_DEBUG(dbgs() << "[E] Stall #" << IR << '\n');
83 return true;
86 return false;
89 static unsigned findFirstWriteBackCycle(const InstRef &IR) {
90 unsigned FirstWBCycle = IR.getInstruction()->getLatency();
91 for (const WriteState &WS : IR.getInstruction()->getDefs()) {
92 int CyclesLeft = WS.getCyclesLeft();
93 if (CyclesLeft == UNKNOWN_CYCLES)
94 CyclesLeft = WS.getLatency();
95 if (CyclesLeft < 0)
96 CyclesLeft = 0;
97 FirstWBCycle = std::min(FirstWBCycle, (unsigned)CyclesLeft);
99 return FirstWBCycle;
102 /// Return a number of cycles left until register requirements of the
103 /// instructions are met.
104 static unsigned checkRegisterHazard(const RegisterFile &PRF,
105 const MCSubtargetInfo &STI,
106 const InstRef &IR) {
107 for (const ReadState &RS : IR.getInstruction()->getUses()) {
108 RegisterFile::RAWHazard Hazard = PRF.checkRAWHazards(STI, RS);
109 if (Hazard.isValid())
110 return Hazard.hasUnknownCycles() ? 1U : Hazard.CyclesLeft;
113 return 0;
116 bool InOrderIssueStage::canExecute(const InstRef &IR) {
117 assert(!SI.getCyclesLeft() && "Should not have reached this code!");
118 assert(!SI.isValid() && "Should not have reached this code!");
120 if (unsigned Cycles = checkRegisterHazard(PRF, STI, IR)) {
121 SI.update(IR, Cycles, StallInfo::StallKind::REGISTER_DEPS);
122 return false;
125 if (hasResourceHazard(RM, IR)) {
126 SI.update(IR, /* delay */ 1, StallInfo::StallKind::DISPATCH);
127 return false;
130 if (IR.getInstruction()->isMemOp() && !LSU.isReady(IR)) {
131 // This load (store) aliases with a preceding store (load). Delay
132 // it until the depenency is cleared.
133 SI.update(IR, /* delay */ 1, StallInfo::StallKind::LOAD_STORE);
134 return false;
137 if (unsigned CustomStallCycles = CB.checkCustomHazard(IssuedInst, IR)) {
138 SI.update(IR, CustomStallCycles, StallInfo::StallKind::CUSTOM_STALL);
139 return false;
142 if (LastWriteBackCycle) {
143 if (!IR.getInstruction()->getDesc().RetireOOO) {
144 unsigned NextWriteBackCycle = findFirstWriteBackCycle(IR);
145 // Delay the instruction to ensure that writes happen in program order.
146 if (NextWriteBackCycle < LastWriteBackCycle) {
147 SI.update(IR, LastWriteBackCycle - NextWriteBackCycle,
148 StallInfo::StallKind::DELAY);
149 return false;
154 return true;
157 static void addRegisterReadWrite(RegisterFile &PRF, Instruction &IS,
158 unsigned SourceIndex,
159 const MCSubtargetInfo &STI,
160 SmallVectorImpl<unsigned> &UsedRegs) {
161 assert(!IS.isEliminated());
163 for (ReadState &RS : IS.getUses())
164 PRF.addRegisterRead(RS, STI);
166 for (WriteState &WS : IS.getDefs())
167 PRF.addRegisterWrite(WriteRef(SourceIndex, &WS), UsedRegs);
170 void InOrderIssueStage::notifyInstructionIssued(const InstRef &IR,
171 ArrayRef<ResourceUse> UsedRes) {
172 notifyEvent<HWInstructionEvent>(
173 HWInstructionEvent(HWInstructionEvent::Ready, IR));
174 notifyEvent<HWInstructionEvent>(HWInstructionIssuedEvent(IR, UsedRes));
176 LLVM_DEBUG(dbgs() << "[E] Issued #" << IR << "\n");
179 void InOrderIssueStage::notifyInstructionDispatched(
180 const InstRef &IR, unsigned Ops, ArrayRef<unsigned> UsedRegs) {
181 notifyEvent<HWInstructionEvent>(
182 HWInstructionDispatchedEvent(IR, UsedRegs, Ops));
184 LLVM_DEBUG(dbgs() << "[E] Dispatched #" << IR << "\n");
187 void InOrderIssueStage::notifyInstructionExecuted(const InstRef &IR) {
188 notifyEvent<HWInstructionEvent>(
189 HWInstructionEvent(HWInstructionEvent::Executed, IR));
190 LLVM_DEBUG(dbgs() << "[E] Instruction #" << IR << " is executed\n");
193 void InOrderIssueStage::notifyInstructionRetired(const InstRef &IR,
194 ArrayRef<unsigned> FreedRegs) {
195 notifyEvent<HWInstructionEvent>(HWInstructionRetiredEvent(IR, FreedRegs));
196 LLVM_DEBUG(dbgs() << "[E] Retired #" << IR << " \n");
199 llvm::Error InOrderIssueStage::execute(InstRef &IR) {
200 Instruction &IS = *IR.getInstruction();
201 if (IS.isMemOp())
202 IS.setLSUTokenID(LSU.dispatch(IR));
204 if (llvm::Error E = tryIssue(IR))
205 return E;
207 if (SI.isValid())
208 notifyStallEvent();
210 return llvm::ErrorSuccess();
213 llvm::Error InOrderIssueStage::tryIssue(InstRef &IR) {
214 Instruction &IS = *IR.getInstruction();
215 unsigned SourceIndex = IR.getSourceIndex();
216 const InstrDesc &Desc = IS.getDesc();
218 if (!canExecute(IR)) {
219 LLVM_DEBUG(dbgs() << "[N] Stalled #" << SI.getInstruction() << " for "
220 << SI.getCyclesLeft() << " cycles\n");
221 Bandwidth = 0;
222 return llvm::ErrorSuccess();
225 unsigned RCUTokenID = RetireControlUnit::UnhandledTokenID;
226 IS.dispatch(RCUTokenID);
228 SmallVector<unsigned, 4> UsedRegs(PRF.getNumRegisterFiles());
229 addRegisterReadWrite(PRF, IS, SourceIndex, STI, UsedRegs);
231 unsigned NumMicroOps = IS.getNumMicroOps();
232 notifyInstructionDispatched(IR, NumMicroOps, UsedRegs);
234 SmallVector<ResourceUse, 4> UsedResources;
235 RM.issueInstruction(Desc, UsedResources);
236 IS.execute(SourceIndex);
238 if (IS.isMemOp())
239 LSU.onInstructionIssued(IR);
241 // Replace resource masks with valid resource processor IDs.
242 for (ResourceUse &Use : UsedResources) {
243 uint64_t Mask = Use.first.first;
244 Use.first.first = RM.resolveResourceMask(Mask);
246 notifyInstructionIssued(IR, UsedResources);
248 bool ShouldCarryOver = NumMicroOps > Bandwidth;
249 if (ShouldCarryOver) {
250 CarryOver = NumMicroOps - Bandwidth;
251 CarriedOver = IR;
252 Bandwidth = 0;
253 NumIssued += Bandwidth;
254 LLVM_DEBUG(dbgs() << "[N] Carry over #" << IR << " \n");
255 } else {
256 NumIssued += NumMicroOps;
257 Bandwidth = Desc.EndGroup ? 0 : Bandwidth - NumMicroOps;
260 // If the instruction has a latency of 0, we need to handle
261 // the execution and retirement now.
262 if (IS.isExecuted()) {
263 PRF.onInstructionExecuted(&IS);
264 LSU.onInstructionExecuted(IR);
265 notifyEvent<HWInstructionEvent>(
266 HWInstructionEvent(HWInstructionEvent::Executed, IR));
267 LLVM_DEBUG(dbgs() << "[E] Instruction #" << IR << " is executed\n");
269 retireInstruction(IR);
270 return llvm::ErrorSuccess();
273 IssuedInst.push_back(IR);
275 if (!IR.getInstruction()->getDesc().RetireOOO)
276 LastWriteBackCycle = IS.getCyclesLeft();
278 return llvm::ErrorSuccess();
281 void InOrderIssueStage::updateIssuedInst() {
282 // Update other instructions. Executed instructions will be retired during the
283 // next cycle.
284 unsigned NumExecuted = 0;
285 for (auto I = IssuedInst.begin(), E = IssuedInst.end();
286 I != (E - NumExecuted);) {
287 InstRef &IR = *I;
288 Instruction &IS = *IR.getInstruction();
290 IS.cycleEvent();
291 if (!IS.isExecuted()) {
292 LLVM_DEBUG(dbgs() << "[N] Instruction #" << IR
293 << " is still executing\n");
294 ++I;
295 continue;
298 PRF.onInstructionExecuted(&IS);
299 LSU.onInstructionExecuted(IR);
300 notifyInstructionExecuted(IR);
301 ++NumExecuted;
303 retireInstruction(*I);
305 std::iter_swap(I, E - NumExecuted);
308 if (NumExecuted)
309 IssuedInst.resize(IssuedInst.size() - NumExecuted);
312 void InOrderIssueStage::updateCarriedOver() {
313 if (!CarriedOver)
314 return;
316 assert(!SI.isValid() && "A stalled instruction cannot be carried over.");
318 if (CarryOver > Bandwidth) {
319 CarryOver -= Bandwidth;
320 Bandwidth = 0;
321 LLVM_DEBUG(dbgs() << "[N] Carry over (" << CarryOver << "uops left) #"
322 << CarriedOver << " \n");
323 return;
326 LLVM_DEBUG(dbgs() << "[N] Carry over (complete) #" << CarriedOver << " \n");
328 if (CarriedOver.getInstruction()->getDesc().EndGroup)
329 Bandwidth = 0;
330 else
331 Bandwidth -= CarryOver;
333 CarriedOver = InstRef();
334 CarryOver = 0;
337 void InOrderIssueStage::retireInstruction(InstRef &IR) {
338 Instruction &IS = *IR.getInstruction();
339 IS.retire();
341 llvm::SmallVector<unsigned, 4> FreedRegs(PRF.getNumRegisterFiles());
342 for (const WriteState &WS : IS.getDefs())
343 PRF.removeRegisterWrite(WS, FreedRegs);
345 if (IS.isMemOp())
346 LSU.onInstructionRetired(IR);
348 notifyInstructionRetired(IR, FreedRegs);
351 void InOrderIssueStage::notifyStallEvent() {
352 assert(SI.getCyclesLeft() && "A zero cycles stall?");
353 assert(SI.isValid() && "Invalid stall information found!");
355 const InstRef &IR = SI.getInstruction();
357 switch (SI.getStallKind()) {
358 default:
359 break;
360 case StallInfo::StallKind::REGISTER_DEPS: {
361 notifyEvent<HWStallEvent>(
362 HWStallEvent(HWStallEvent::RegisterFileStall, IR));
363 notifyEvent<HWPressureEvent>(
364 HWPressureEvent(HWPressureEvent::REGISTER_DEPS, IR));
365 break;
367 case StallInfo::StallKind::DISPATCH: {
368 notifyEvent<HWStallEvent>(
369 HWStallEvent(HWStallEvent::DispatchGroupStall, IR));
370 notifyEvent<HWPressureEvent>(
371 HWPressureEvent(HWPressureEvent::RESOURCES, IR));
372 break;
374 case StallInfo::StallKind::CUSTOM_STALL: {
375 notifyEvent<HWStallEvent>(
376 HWStallEvent(HWStallEvent::CustomBehaviourStall, IR));
377 break;
382 llvm::Error InOrderIssueStage::cycleStart() {
383 NumIssued = 0;
384 Bandwidth = getIssueWidth();
386 PRF.cycleStart();
387 LSU.cycleEvent();
389 // Release consumed resources.
390 SmallVector<ResourceRef, 4> Freed;
391 RM.cycleEvent(Freed);
393 updateIssuedInst();
395 // Continue to issue the instruction carried over from the previous cycle
396 updateCarriedOver();
398 // Issue instructions scheduled for this cycle
399 if (SI.isValid()) {
400 if (!SI.getCyclesLeft()) {
401 // Make a copy of the reference, and try issue it again.
402 // Do not take the instruction reference because SI.clear() will
403 // invalidate it.
404 InstRef IR = SI.getInstruction();
405 SI.clear();
407 if (llvm::Error E = tryIssue(IR))
408 return E;
411 if (SI.getCyclesLeft()) {
412 // The instruction is still stalled, cannot issue any new instructions in
413 // this cycle.
414 notifyStallEvent();
415 Bandwidth = 0;
416 return llvm::ErrorSuccess();
420 assert((NumIssued <= getIssueWidth()) && "Overflow.");
421 return llvm::ErrorSuccess();
424 llvm::Error InOrderIssueStage::cycleEnd() {
425 PRF.cycleEnd();
426 SI.cycleEnd();
428 if (LastWriteBackCycle > 0)
429 --LastWriteBackCycle;
431 return llvm::ErrorSuccess();
434 } // namespace mca
435 } // namespace llvm