[ORC] Add std::tuple support to SimplePackedSerialization.
[llvm-project.git] / llvm / lib / MCA / Stages / DispatchStage.cpp
blob5385142698e6714b681074ebec1322929094f21e
1 //===--------------------- DispatchStage.cpp --------------------*- C++ -*-===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 /// \file
9 ///
10 /// This file models the dispatch component of an instruction pipeline.
11 ///
12 /// The DispatchStage is responsible for updating instruction dependencies
13 /// and communicating to the simulated instruction scheduler that an instruction
14 /// is ready to be scheduled for execution.
15 ///
16 //===----------------------------------------------------------------------===//
18 #include "llvm/MCA/Stages/DispatchStage.h"
19 #include "llvm/MCA/HWEventListener.h"
20 #include "llvm/MCA/HardwareUnits/Scheduler.h"
21 #include "llvm/Support/Debug.h"
23 #define DEBUG_TYPE "llvm-mca"
25 namespace llvm {
26 namespace mca {
28 DispatchStage::DispatchStage(const MCSubtargetInfo &Subtarget,
29 const MCRegisterInfo &MRI,
30 unsigned MaxDispatchWidth, RetireControlUnit &R,
31 RegisterFile &F)
32 : DispatchWidth(MaxDispatchWidth), AvailableEntries(MaxDispatchWidth),
33 CarryOver(0U), CarriedOver(), STI(Subtarget), RCU(R), PRF(F) {
34 if (!DispatchWidth)
35 DispatchWidth = Subtarget.getSchedModel().IssueWidth;
38 void DispatchStage::notifyInstructionDispatched(const InstRef &IR,
39 ArrayRef<unsigned> UsedRegs,
40 unsigned UOps) const {
41 LLVM_DEBUG(dbgs() << "[E] Instruction Dispatched: #" << IR << '\n');
42 notifyEvent<HWInstructionEvent>(
43 HWInstructionDispatchedEvent(IR, UsedRegs, UOps));
46 bool DispatchStage::checkPRF(const InstRef &IR) const {
47 SmallVector<MCPhysReg, 4> RegDefs;
48 for (const WriteState &RegDef : IR.getInstruction()->getDefs())
49 RegDefs.emplace_back(RegDef.getRegisterID());
51 const unsigned RegisterMask = PRF.isAvailable(RegDefs);
52 // A mask with all zeroes means: register files are available.
53 if (RegisterMask) {
54 notifyEvent<HWStallEvent>(
55 HWStallEvent(HWStallEvent::RegisterFileStall, IR));
56 return false;
59 return true;
62 bool DispatchStage::checkRCU(const InstRef &IR) const {
63 const unsigned NumMicroOps = IR.getInstruction()->getNumMicroOps();
64 if (RCU.isAvailable(NumMicroOps))
65 return true;
66 notifyEvent<HWStallEvent>(
67 HWStallEvent(HWStallEvent::RetireControlUnitStall, IR));
68 return false;
71 bool DispatchStage::canDispatch(const InstRef &IR) const {
72 bool CanDispatch = checkRCU(IR);
73 CanDispatch &= checkPRF(IR);
74 CanDispatch &= checkNextStage(IR);
75 return CanDispatch;
78 Error DispatchStage::dispatch(InstRef IR) {
79 assert(!CarryOver && "Cannot dispatch another instruction!");
80 Instruction &IS = *IR.getInstruction();
81 const InstrDesc &Desc = IS.getDesc();
82 const unsigned NumMicroOps = IS.getNumMicroOps();
83 if (NumMicroOps > DispatchWidth) {
84 assert(AvailableEntries == DispatchWidth);
85 AvailableEntries = 0;
86 CarryOver = NumMicroOps - DispatchWidth;
87 CarriedOver = IR;
88 } else {
89 assert(AvailableEntries >= NumMicroOps);
90 AvailableEntries -= NumMicroOps;
93 // Check if this instructions ends the dispatch group.
94 if (Desc.EndGroup)
95 AvailableEntries = 0;
97 // Check if this is an optimizable reg-reg move or an XCHG-like instruction.
98 if (IS.isOptimizableMove())
99 if (PRF.tryEliminateMoveOrSwap(IS.getDefs(), IS.getUses()))
100 IS.setEliminated();
102 // A dependency-breaking instruction doesn't have to wait on the register
103 // input operands, and it is often optimized at register renaming stage.
104 // Update RAW dependencies if this instruction is not a dependency-breaking
105 // instruction. A dependency-breaking instruction is a zero-latency
106 // instruction that doesn't consume hardware resources.
107 // An example of dependency-breaking instruction on X86 is a zero-idiom XOR.
109 // We also don't update data dependencies for instructions that have been
110 // eliminated at register renaming stage.
111 if (!IS.isEliminated()) {
112 for (ReadState &RS : IS.getUses())
113 PRF.addRegisterRead(RS, STI);
116 // By default, a dependency-breaking zero-idiom is expected to be optimized
117 // at register renaming stage. That means, no physical register is allocated
118 // to the instruction.
119 SmallVector<unsigned, 4> RegisterFiles(PRF.getNumRegisterFiles());
120 for (WriteState &WS : IS.getDefs())
121 PRF.addRegisterWrite(WriteRef(IR.getSourceIndex(), &WS), RegisterFiles);
123 // Reserve entries in the reorder buffer.
124 unsigned RCUTokenID = RCU.dispatch(IR);
125 // Notify the instruction that it has been dispatched.
126 IS.dispatch(RCUTokenID);
128 // Notify listeners of the "instruction dispatched" event,
129 // and move IR to the next stage.
130 notifyInstructionDispatched(IR, RegisterFiles,
131 std::min(DispatchWidth, NumMicroOps));
132 return moveToTheNextStage(IR);
135 Error DispatchStage::cycleStart() {
136 // The retire stage is responsible for calling method `cycleStart`
137 // on the PRF.
138 if (!CarryOver) {
139 AvailableEntries = DispatchWidth;
140 return ErrorSuccess();
143 AvailableEntries = CarryOver >= DispatchWidth ? 0 : DispatchWidth - CarryOver;
144 unsigned DispatchedOpcodes = DispatchWidth - AvailableEntries;
145 CarryOver -= DispatchedOpcodes;
146 assert(CarriedOver && "Invalid dispatched instruction");
148 SmallVector<unsigned, 8> RegisterFiles(PRF.getNumRegisterFiles(), 0U);
149 notifyInstructionDispatched(CarriedOver, RegisterFiles, DispatchedOpcodes);
150 if (!CarryOver)
151 CarriedOver = InstRef();
152 return ErrorSuccess();
155 bool DispatchStage::isAvailable(const InstRef &IR) const {
156 // Conservatively bail out if there are no available dispatch entries.
157 if (!AvailableEntries)
158 return false;
160 const Instruction &Inst = *IR.getInstruction();
161 unsigned NumMicroOps = Inst.getNumMicroOps();
162 const InstrDesc &Desc = Inst.getDesc();
163 unsigned Required = std::min(NumMicroOps, DispatchWidth);
164 if (Required > AvailableEntries)
165 return false;
167 if (Desc.BeginGroup && AvailableEntries != DispatchWidth)
168 return false;
170 // The dispatch logic doesn't internally buffer instructions. It only accepts
171 // instructions that can be successfully moved to the next stage during this
172 // same cycle.
173 return canDispatch(IR);
176 Error DispatchStage::execute(InstRef &IR) {
177 assert(canDispatch(IR) && "Cannot dispatch another instruction!");
178 return dispatch(IR);
181 #ifndef NDEBUG
182 void DispatchStage::dump() const {
183 PRF.dump();
184 RCU.dump();
186 #endif
187 } // namespace mca
188 } // namespace llvm