[Codegen] Alter the default promotion for saturating adds and subs
[llvm-complete.git] / lib / Target / AMDGPU / SIRegisterInfo.h
blobd861c76faf823f2452210146412181881e77aaee
1 //===-- SIRegisterInfo.h - SI Register Info Interface ----------*- C++ -*--===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 /// \file
10 /// Interface definition for SIRegisterInfo
12 //===----------------------------------------------------------------------===//
14 #ifndef LLVM_LIB_TARGET_AMDGPU_SIREGISTERINFO_H
15 #define LLVM_LIB_TARGET_AMDGPU_SIREGISTERINFO_H
17 #include "AMDGPURegisterInfo.h"
18 #include "SIDefines.h"
19 #include "llvm/CodeGen/MachineRegisterInfo.h"
21 namespace llvm {
23 class GCNSubtarget;
24 class LiveIntervals;
25 class MachineRegisterInfo;
26 class SIMachineFunctionInfo;
28 class SIRegisterInfo final : public AMDGPURegisterInfo {
29 private:
30 const GCNSubtarget &ST;
31 unsigned SGPRSetID;
32 unsigned VGPRSetID;
33 unsigned AGPRSetID;
34 BitVector SGPRPressureSets;
35 BitVector VGPRPressureSets;
36 BitVector AGPRPressureSets;
37 bool SpillSGPRToVGPR;
38 bool SpillSGPRToSMEM;
39 bool isWave32;
41 void classifyPressureSet(unsigned PSetID, unsigned Reg,
42 BitVector &PressureSets) const;
43 public:
44 SIRegisterInfo(const GCNSubtarget &ST);
46 bool spillSGPRToVGPR() const {
47 return SpillSGPRToVGPR;
50 bool spillSGPRToSMEM() const {
51 return SpillSGPRToSMEM;
54 /// Return the end register initially reserved for the scratch buffer in case
55 /// spilling is needed.
56 unsigned reservedPrivateSegmentBufferReg(const MachineFunction &MF) const;
58 /// Return the end register initially reserved for the scratch wave offset in
59 /// case spilling is needed.
60 unsigned reservedPrivateSegmentWaveByteOffsetReg(
61 const MachineFunction &MF) const;
63 BitVector getReservedRegs(const MachineFunction &MF) const override;
65 const MCPhysReg *getCalleeSavedRegs(const MachineFunction *MF) const override;
66 const MCPhysReg *getCalleeSavedRegsViaCopy(const MachineFunction *MF) const;
67 const uint32_t *getCallPreservedMask(const MachineFunction &MF,
68 CallingConv::ID) const override;
70 // Stack access is very expensive. CSRs are also the high registers, and we
71 // want to minimize the number of used registers.
72 unsigned getCSRFirstUseCost() const override {
73 return 100;
76 Register getFrameRegister(const MachineFunction &MF) const override;
78 bool canRealignStack(const MachineFunction &MF) const override;
79 bool requiresRegisterScavenging(const MachineFunction &Fn) const override;
81 bool requiresFrameIndexScavenging(const MachineFunction &MF) const override;
82 bool requiresFrameIndexReplacementScavenging(
83 const MachineFunction &MF) const override;
84 bool requiresVirtualBaseRegisters(const MachineFunction &Fn) const override;
85 bool trackLivenessAfterRegAlloc(const MachineFunction &MF) const override;
87 int64_t getMUBUFInstrOffset(const MachineInstr *MI) const;
89 int64_t getFrameIndexInstrOffset(const MachineInstr *MI,
90 int Idx) const override;
92 bool needsFrameBaseReg(MachineInstr *MI, int64_t Offset) const override;
94 void materializeFrameBaseRegister(MachineBasicBlock *MBB,
95 unsigned BaseReg, int FrameIdx,
96 int64_t Offset) const override;
98 void resolveFrameIndex(MachineInstr &MI, unsigned BaseReg,
99 int64_t Offset) const override;
101 bool isFrameOffsetLegal(const MachineInstr *MI, unsigned BaseReg,
102 int64_t Offset) const override;
104 const TargetRegisterClass *getPointerRegClass(
105 const MachineFunction &MF, unsigned Kind = 0) const override;
107 /// If \p OnlyToVGPR is true, this will only succeed if this
108 bool spillSGPR(MachineBasicBlock::iterator MI,
109 int FI, RegScavenger *RS,
110 bool OnlyToVGPR = false) const;
112 bool restoreSGPR(MachineBasicBlock::iterator MI,
113 int FI, RegScavenger *RS,
114 bool OnlyToVGPR = false) const;
116 void eliminateFrameIndex(MachineBasicBlock::iterator MI, int SPAdj,
117 unsigned FIOperandNum,
118 RegScavenger *RS) const override;
120 bool eliminateSGPRToVGPRSpillFrameIndex(MachineBasicBlock::iterator MI,
121 int FI, RegScavenger *RS) const;
123 StringRef getRegAsmName(unsigned Reg) const override;
125 unsigned getHWRegIndex(unsigned Reg) const {
126 return getEncodingValue(Reg) & 0xff;
129 /// Return the 'base' register class for this register.
130 /// e.g. SGPR0 => SReg_32, VGPR => VGPR_32 SGPR0_SGPR1 -> SReg_32, etc.
131 const TargetRegisterClass *getPhysRegClass(unsigned Reg) const;
133 /// \returns true if this class contains only SGPR registers
134 bool isSGPRClass(const TargetRegisterClass *RC) const {
135 return !hasVGPRs(RC) && !hasAGPRs(RC);
138 /// \returns true if this class ID contains only SGPR registers
139 bool isSGPRClassID(unsigned RCID) const {
140 return isSGPRClass(getRegClass(RCID));
143 bool isSGPRReg(const MachineRegisterInfo &MRI, unsigned Reg) const {
144 const TargetRegisterClass *RC;
145 if (Register::isVirtualRegister(Reg))
146 RC = MRI.getRegClass(Reg);
147 else
148 RC = getPhysRegClass(Reg);
149 return isSGPRClass(RC);
152 /// \returns true if this class contains VGPR registers.
153 bool hasVGPRs(const TargetRegisterClass *RC) const;
155 /// \returns true if this class contains AGPR registers.
156 bool hasAGPRs(const TargetRegisterClass *RC) const;
158 /// \returns true if this class contains any vector registers.
159 bool hasVectorRegisters(const TargetRegisterClass *RC) const {
160 return hasVGPRs(RC) || hasAGPRs(RC);
163 /// \returns A VGPR reg class with the same width as \p SRC
164 const TargetRegisterClass *getEquivalentVGPRClass(
165 const TargetRegisterClass *SRC) const;
167 /// \returns An AGPR reg class with the same width as \p SRC
168 const TargetRegisterClass *getEquivalentAGPRClass(
169 const TargetRegisterClass *SRC) const;
171 /// \returns A SGPR reg class with the same width as \p SRC
172 const TargetRegisterClass *getEquivalentSGPRClass(
173 const TargetRegisterClass *VRC) const;
175 /// \returns The register class that is used for a sub-register of \p RC for
176 /// the given \p SubIdx. If \p SubIdx equals NoSubRegister, \p RC will
177 /// be returned.
178 const TargetRegisterClass *getSubRegClass(const TargetRegisterClass *RC,
179 unsigned SubIdx) const;
181 bool shouldRewriteCopySrc(const TargetRegisterClass *DefRC,
182 unsigned DefSubReg,
183 const TargetRegisterClass *SrcRC,
184 unsigned SrcSubReg) const override;
186 /// \returns True if operands defined with this operand type can accept
187 /// a literal constant (i.e. any 32-bit immediate).
188 bool opCanUseLiteralConstant(unsigned OpType) const {
189 // TODO: 64-bit operands have extending behavior from 32-bit literal.
190 return OpType >= AMDGPU::OPERAND_REG_IMM_FIRST &&
191 OpType <= AMDGPU::OPERAND_REG_IMM_LAST;
194 /// \returns True if operands defined with this operand type can accept
195 /// an inline constant. i.e. An integer value in the range (-16, 64) or
196 /// -4.0f, -2.0f, -1.0f, -0.5f, 0.0f, 0.5f, 1.0f, 2.0f, 4.0f.
197 bool opCanUseInlineConstant(unsigned OpType) const;
199 unsigned findUnusedRegister(const MachineRegisterInfo &MRI,
200 const TargetRegisterClass *RC,
201 const MachineFunction &MF) const;
203 unsigned getSGPRPressureSet() const { return SGPRSetID; };
204 unsigned getVGPRPressureSet() const { return VGPRSetID; };
205 unsigned getAGPRPressureSet() const { return AGPRSetID; };
207 const TargetRegisterClass *getRegClassForReg(const MachineRegisterInfo &MRI,
208 unsigned Reg) const;
209 bool isVGPR(const MachineRegisterInfo &MRI, unsigned Reg) const;
210 bool isAGPR(const MachineRegisterInfo &MRI, unsigned Reg) const;
211 bool isVectorRegister(const MachineRegisterInfo &MRI, unsigned Reg) const {
212 return isVGPR(MRI, Reg) || isAGPR(MRI, Reg);
215 virtual bool
216 isDivergentRegClass(const TargetRegisterClass *RC) const override {
217 return !isSGPRClass(RC);
220 bool isSGPRPressureSet(unsigned SetID) const {
221 return SGPRPressureSets.test(SetID) && !VGPRPressureSets.test(SetID) &&
222 !AGPRPressureSets.test(SetID);
224 bool isVGPRPressureSet(unsigned SetID) const {
225 return VGPRPressureSets.test(SetID) && !SGPRPressureSets.test(SetID) &&
226 !AGPRPressureSets.test(SetID);
228 bool isAGPRPressureSet(unsigned SetID) const {
229 return AGPRPressureSets.test(SetID) && !SGPRPressureSets.test(SetID) &&
230 !VGPRPressureSets.test(SetID);
233 ArrayRef<int16_t> getRegSplitParts(const TargetRegisterClass *RC,
234 unsigned EltSize) const;
236 bool shouldCoalesce(MachineInstr *MI,
237 const TargetRegisterClass *SrcRC,
238 unsigned SubReg,
239 const TargetRegisterClass *DstRC,
240 unsigned DstSubReg,
241 const TargetRegisterClass *NewRC,
242 LiveIntervals &LIS) const override;
244 unsigned getRegPressureLimit(const TargetRegisterClass *RC,
245 MachineFunction &MF) const override;
247 unsigned getRegPressureSetLimit(const MachineFunction &MF,
248 unsigned Idx) const override;
250 const int *getRegUnitPressureSets(unsigned RegUnit) const override;
252 unsigned getReturnAddressReg(const MachineFunction &MF) const;
254 const TargetRegisterClass *
255 getRegClassForSizeOnBank(unsigned Size,
256 const RegisterBank &Bank,
257 const MachineRegisterInfo &MRI) const;
259 const TargetRegisterClass *
260 getRegClassForTypeOnBank(LLT Ty,
261 const RegisterBank &Bank,
262 const MachineRegisterInfo &MRI) const {
263 return getRegClassForSizeOnBank(Ty.getSizeInBits(), Bank, MRI);
266 const TargetRegisterClass *
267 getConstrainedRegClassForOperand(const MachineOperand &MO,
268 const MachineRegisterInfo &MRI) const override;
270 const TargetRegisterClass *getBoolRC() const {
271 return isWave32 ? &AMDGPU::SReg_32_XM0RegClass
272 : &AMDGPU::SReg_64RegClass;
275 const TargetRegisterClass *getWaveMaskRegClass() const {
276 return isWave32 ? &AMDGPU::SReg_32_XM0_XEXECRegClass
277 : &AMDGPU::SReg_64_XEXECRegClass;
280 unsigned getVCC() const;
282 const TargetRegisterClass *getRegClass(unsigned RCID) const;
284 // Find reaching register definition
285 MachineInstr *findReachingDef(unsigned Reg, unsigned SubReg,
286 MachineInstr &Use,
287 MachineRegisterInfo &MRI,
288 LiveIntervals *LIS) const;
290 const uint32_t *getAllVGPRRegMask() const;
291 const uint32_t *getAllAllocatableSRegMask() const;
293 private:
294 void buildSpillLoadStore(MachineBasicBlock::iterator MI,
295 unsigned LoadStoreOp,
296 int Index,
297 unsigned ValueReg,
298 bool ValueIsKill,
299 unsigned ScratchRsrcReg,
300 unsigned ScratchOffsetReg,
301 int64_t InstrOffset,
302 MachineMemOperand *MMO,
303 RegScavenger *RS) const;
306 } // End namespace llvm
308 #endif