1 //==- SIMachineFunctionInfo.h - SIMachineFunctionInfo interface --*- C++ -*-==//
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7 //===----------------------------------------------------------------------===//
11 //===----------------------------------------------------------------------===//
13 #ifndef LLVM_LIB_TARGET_AMDGPU_SIMACHINEFUNCTIONINFO_H
14 #define LLVM_LIB_TARGET_AMDGPU_SIMACHINEFUNCTIONINFO_H
16 #include "AMDGPUArgumentUsageInfo.h"
17 #include "AMDGPUMachineFunction.h"
18 #include "MCTargetDesc/AMDGPUMCTargetDesc.h"
19 #include "SIInstrInfo.h"
20 #include "llvm/ADT/MapVector.h"
21 #include "llvm/CodeGen/MIRYamlMapping.h"
22 #include "llvm/CodeGen/PseudoSourceValue.h"
23 #include "llvm/Support/raw_ostream.h"
27 class MachineFrameInfo
;
28 class MachineFunction
;
29 class SIMachineFunctionInfo
;
31 class TargetRegisterClass
;
33 class AMDGPUPseudoSourceValue
: public PseudoSourceValue
{
35 enum AMDGPUPSVKind
: unsigned {
36 PSVBuffer
= PseudoSourceValue::TargetCustom
,
42 AMDGPUPseudoSourceValue(unsigned Kind
, const TargetInstrInfo
&TII
)
43 : PseudoSourceValue(Kind
, TII
) {}
46 bool isConstant(const MachineFrameInfo
*) const override
{
47 // This should probably be true for most images, but we will start by being
52 bool isAliased(const MachineFrameInfo
*) const override
{
56 bool mayAlias(const MachineFrameInfo
*) const override
{
61 class AMDGPUBufferPseudoSourceValue final
: public AMDGPUPseudoSourceValue
{
63 explicit AMDGPUBufferPseudoSourceValue(const TargetInstrInfo
&TII
)
64 : AMDGPUPseudoSourceValue(PSVBuffer
, TII
) {}
66 static bool classof(const PseudoSourceValue
*V
) {
67 return V
->kind() == PSVBuffer
;
70 void printCustom(raw_ostream
&OS
) const override
{ OS
<< "BufferResource"; }
73 class AMDGPUImagePseudoSourceValue final
: public AMDGPUPseudoSourceValue
{
75 // TODO: Is the img rsrc useful?
76 explicit AMDGPUImagePseudoSourceValue(const TargetInstrInfo
&TII
)
77 : AMDGPUPseudoSourceValue(PSVImage
, TII
) {}
79 static bool classof(const PseudoSourceValue
*V
) {
80 return V
->kind() == PSVImage
;
83 void printCustom(raw_ostream
&OS
) const override
{ OS
<< "ImageResource"; }
86 class AMDGPUGWSResourcePseudoSourceValue final
: public AMDGPUPseudoSourceValue
{
88 explicit AMDGPUGWSResourcePseudoSourceValue(const TargetInstrInfo
&TII
)
89 : AMDGPUPseudoSourceValue(GWSResource
, TII
) {}
91 static bool classof(const PseudoSourceValue
*V
) {
92 return V
->kind() == GWSResource
;
95 // These are inaccessible memory from IR.
96 bool isAliased(const MachineFrameInfo
*) const override
{
100 // These are inaccessible memory from IR.
101 bool mayAlias(const MachineFrameInfo
*) const override
{
105 void printCustom(raw_ostream
&OS
) const override
{
115 StringValue RegisterName
;
116 unsigned StackOffset
;
118 Optional
<unsigned> Mask
;
120 // Default constructor, which creates a stack argument.
121 SIArgument() : IsRegister(false), StackOffset(0) {}
122 SIArgument(const SIArgument
&Other
) {
123 IsRegister
= Other
.IsRegister
;
125 ::new ((void *)std::addressof(RegisterName
))
126 StringValue(Other
.RegisterName
);
128 StackOffset
= Other
.StackOffset
;
131 SIArgument
&operator=(const SIArgument
&Other
) {
132 IsRegister
= Other
.IsRegister
;
134 ::new ((void *)std::addressof(RegisterName
))
135 StringValue(Other
.RegisterName
);
137 StackOffset
= Other
.StackOffset
;
143 RegisterName
.~StringValue();
146 // Helper to create a register or stack argument.
147 static inline SIArgument
createArgument(bool IsReg
) {
149 return SIArgument(IsReg
);
154 // Construct a register argument.
155 SIArgument(bool) : IsRegister(true), RegisterName() {}
158 template <> struct MappingTraits
<SIArgument
> {
159 static void mapping(IO
&YamlIO
, SIArgument
&A
) {
160 if (YamlIO
.outputting()) {
162 YamlIO
.mapRequired("reg", A
.RegisterName
);
164 YamlIO
.mapRequired("offset", A
.StackOffset
);
166 auto Keys
= YamlIO
.keys();
167 if (is_contained(Keys
, "reg")) {
168 A
= SIArgument::createArgument(true);
169 YamlIO
.mapRequired("reg", A
.RegisterName
);
170 } else if (is_contained(Keys
, "offset"))
171 YamlIO
.mapRequired("offset", A
.StackOffset
);
173 YamlIO
.setError("missing required key 'reg' or 'offset'");
175 YamlIO
.mapOptional("mask", A
.Mask
);
177 static const bool flow
= true;
180 struct SIArgumentInfo
{
181 Optional
<SIArgument
> PrivateSegmentBuffer
;
182 Optional
<SIArgument
> DispatchPtr
;
183 Optional
<SIArgument
> QueuePtr
;
184 Optional
<SIArgument
> KernargSegmentPtr
;
185 Optional
<SIArgument
> DispatchID
;
186 Optional
<SIArgument
> FlatScratchInit
;
187 Optional
<SIArgument
> PrivateSegmentSize
;
189 Optional
<SIArgument
> WorkGroupIDX
;
190 Optional
<SIArgument
> WorkGroupIDY
;
191 Optional
<SIArgument
> WorkGroupIDZ
;
192 Optional
<SIArgument
> WorkGroupInfo
;
193 Optional
<SIArgument
> PrivateSegmentWaveByteOffset
;
195 Optional
<SIArgument
> ImplicitArgPtr
;
196 Optional
<SIArgument
> ImplicitBufferPtr
;
198 Optional
<SIArgument
> WorkItemIDX
;
199 Optional
<SIArgument
> WorkItemIDY
;
200 Optional
<SIArgument
> WorkItemIDZ
;
203 template <> struct MappingTraits
<SIArgumentInfo
> {
204 static void mapping(IO
&YamlIO
, SIArgumentInfo
&AI
) {
205 YamlIO
.mapOptional("privateSegmentBuffer", AI
.PrivateSegmentBuffer
);
206 YamlIO
.mapOptional("dispatchPtr", AI
.DispatchPtr
);
207 YamlIO
.mapOptional("queuePtr", AI
.QueuePtr
);
208 YamlIO
.mapOptional("kernargSegmentPtr", AI
.KernargSegmentPtr
);
209 YamlIO
.mapOptional("dispatchID", AI
.DispatchID
);
210 YamlIO
.mapOptional("flatScratchInit", AI
.FlatScratchInit
);
211 YamlIO
.mapOptional("privateSegmentSize", AI
.PrivateSegmentSize
);
213 YamlIO
.mapOptional("workGroupIDX", AI
.WorkGroupIDX
);
214 YamlIO
.mapOptional("workGroupIDY", AI
.WorkGroupIDY
);
215 YamlIO
.mapOptional("workGroupIDZ", AI
.WorkGroupIDZ
);
216 YamlIO
.mapOptional("workGroupInfo", AI
.WorkGroupInfo
);
217 YamlIO
.mapOptional("privateSegmentWaveByteOffset",
218 AI
.PrivateSegmentWaveByteOffset
);
220 YamlIO
.mapOptional("implicitArgPtr", AI
.ImplicitArgPtr
);
221 YamlIO
.mapOptional("implicitBufferPtr", AI
.ImplicitBufferPtr
);
223 YamlIO
.mapOptional("workItemIDX", AI
.WorkItemIDX
);
224 YamlIO
.mapOptional("workItemIDY", AI
.WorkItemIDY
);
225 YamlIO
.mapOptional("workItemIDZ", AI
.WorkItemIDZ
);
229 // Default to default mode for default calling convention.
232 bool DX10Clamp
= true;
233 bool FP32InputDenormals
= true;
234 bool FP32OutputDenormals
= true;
235 bool FP64FP16InputDenormals
= true;
236 bool FP64FP16OutputDenormals
= true;
240 SIMode(const AMDGPU::SIModeRegisterDefaults
&Mode
) {
242 DX10Clamp
= Mode
.DX10Clamp
;
243 FP32InputDenormals
= Mode
.FP32InputDenormals
;
244 FP32OutputDenormals
= Mode
.FP32OutputDenormals
;
245 FP64FP16InputDenormals
= Mode
.FP64FP16InputDenormals
;
246 FP64FP16OutputDenormals
= Mode
.FP64FP16OutputDenormals
;
249 bool operator ==(const SIMode Other
) const {
250 return IEEE
== Other
.IEEE
&&
251 DX10Clamp
== Other
.DX10Clamp
&&
252 FP32InputDenormals
== Other
.FP32InputDenormals
&&
253 FP32OutputDenormals
== Other
.FP32OutputDenormals
&&
254 FP64FP16InputDenormals
== Other
.FP64FP16InputDenormals
&&
255 FP64FP16OutputDenormals
== Other
.FP64FP16OutputDenormals
;
259 template <> struct MappingTraits
<SIMode
> {
260 static void mapping(IO
&YamlIO
, SIMode
&Mode
) {
261 YamlIO
.mapOptional("ieee", Mode
.IEEE
, true);
262 YamlIO
.mapOptional("dx10-clamp", Mode
.DX10Clamp
, true);
263 YamlIO
.mapOptional("fp32-input-denormals", Mode
.FP32InputDenormals
, true);
264 YamlIO
.mapOptional("fp32-output-denormals", Mode
.FP32OutputDenormals
, true);
265 YamlIO
.mapOptional("fp64-fp16-input-denormals", Mode
.FP64FP16InputDenormals
, true);
266 YamlIO
.mapOptional("fp64-fp16-output-denormals", Mode
.FP64FP16OutputDenormals
, true);
270 struct SIMachineFunctionInfo final
: public yaml::MachineFunctionInfo
{
271 uint64_t ExplicitKernArgSize
= 0;
272 unsigned MaxKernArgAlign
= 0;
273 unsigned LDSSize
= 0;
275 bool IsEntryFunction
= false;
276 bool NoSignedZerosFPMath
= false;
277 bool MemoryBound
= false;
278 bool WaveLimiter
= false;
279 bool HasSpilledSGPRs
= false;
280 bool HasSpilledVGPRs
= false;
281 uint32_t HighBitsOf32BitAddress
= 0;
283 // TODO: 10 may be a better default since it's the maximum.
284 unsigned Occupancy
= 0;
286 StringValue ScratchRSrcReg
= "$private_rsrc_reg";
287 StringValue FrameOffsetReg
= "$fp_reg";
288 StringValue StackPtrOffsetReg
= "$sp_reg";
290 Optional
<SIArgumentInfo
> ArgInfo
;
292 Optional
<FrameIndex
> ScavengeFI
;
294 SIMachineFunctionInfo() = default;
295 SIMachineFunctionInfo(const llvm::SIMachineFunctionInfo
&,
296 const TargetRegisterInfo
&TRI
,
297 const llvm::MachineFunction
&MF
);
299 void mappingImpl(yaml::IO
&YamlIO
) override
;
300 ~SIMachineFunctionInfo() = default;
303 template <> struct MappingTraits
<SIMachineFunctionInfo
> {
304 static void mapping(IO
&YamlIO
, SIMachineFunctionInfo
&MFI
) {
305 YamlIO
.mapOptional("explicitKernArgSize", MFI
.ExplicitKernArgSize
,
307 YamlIO
.mapOptional("maxKernArgAlign", MFI
.MaxKernArgAlign
, 0u);
308 YamlIO
.mapOptional("ldsSize", MFI
.LDSSize
, 0u);
309 YamlIO
.mapOptional("dynLDSAlign", MFI
.DynLDSAlign
, Align());
310 YamlIO
.mapOptional("isEntryFunction", MFI
.IsEntryFunction
, false);
311 YamlIO
.mapOptional("noSignedZerosFPMath", MFI
.NoSignedZerosFPMath
, false);
312 YamlIO
.mapOptional("memoryBound", MFI
.MemoryBound
, false);
313 YamlIO
.mapOptional("waveLimiter", MFI
.WaveLimiter
, false);
314 YamlIO
.mapOptional("hasSpilledSGPRs", MFI
.HasSpilledSGPRs
, false);
315 YamlIO
.mapOptional("hasSpilledVGPRs", MFI
.HasSpilledVGPRs
, false);
316 YamlIO
.mapOptional("scratchRSrcReg", MFI
.ScratchRSrcReg
,
317 StringValue("$private_rsrc_reg"));
318 YamlIO
.mapOptional("frameOffsetReg", MFI
.FrameOffsetReg
,
319 StringValue("$fp_reg"));
320 YamlIO
.mapOptional("stackPtrOffsetReg", MFI
.StackPtrOffsetReg
,
321 StringValue("$sp_reg"));
322 YamlIO
.mapOptional("argumentInfo", MFI
.ArgInfo
);
323 YamlIO
.mapOptional("mode", MFI
.Mode
, SIMode());
324 YamlIO
.mapOptional("highBitsOf32BitAddress",
325 MFI
.HighBitsOf32BitAddress
, 0u);
326 YamlIO
.mapOptional("occupancy", MFI
.Occupancy
, 0);
327 YamlIO
.mapOptional("scavengeFI", MFI
.ScavengeFI
);
331 } // end namespace yaml
333 /// This class keeps track of the SPI_SP_INPUT_ADDR config register, which
334 /// tells the hardware which interpolation parameters to load.
335 class SIMachineFunctionInfo final
: public AMDGPUMachineFunction
{
336 friend class GCNTargetMachine
;
338 Register TIDReg
= AMDGPU::NoRegister
;
340 // Registers that may be reserved for spilling purposes. These may be the same
341 // as the input registers.
342 Register ScratchRSrcReg
= AMDGPU::PRIVATE_RSRC_REG
;
344 // This is the the unswizzled offset from the current dispatch's scratch wave
345 // base to the beginning of the current function's frame.
346 Register FrameOffsetReg
= AMDGPU::FP_REG
;
348 // This is an ABI register used in the non-entry calling convention to
349 // communicate the unswizzled offset from the current dispatch's scratch wave
350 // base to the beginning of the new function's frame.
351 Register StackPtrOffsetReg
= AMDGPU::SP_REG
;
353 AMDGPUFunctionArgInfo ArgInfo
;
356 unsigned PSInputAddr
= 0;
357 unsigned PSInputEnable
= 0;
359 /// Number of bytes of arguments this function has on the stack. If the callee
360 /// is expected to restore the argument stack this should be a multiple of 16,
361 /// all usable during a tail call.
363 /// The alternative would forbid tail call optimisation in some cases: if we
364 /// want to transfer control from a function with 8-bytes of stack-argument
365 /// space to a function with 16-bytes then misalignment of this value would
366 /// make a stack adjustment necessary, which could not be undone by the
368 unsigned BytesInStackArgArea
= 0;
370 bool ReturnsVoid
= true;
372 // A pair of default/requested minimum/maximum flat work group sizes.
373 // Minimum - first, maximum - second.
374 std::pair
<unsigned, unsigned> FlatWorkGroupSizes
= {0, 0};
376 // A pair of default/requested minimum/maximum number of waves per execution
377 // unit. Minimum - first, maximum - second.
378 std::pair
<unsigned, unsigned> WavesPerEU
= {0, 0};
380 std::unique_ptr
<const AMDGPUBufferPseudoSourceValue
> BufferPSV
;
381 std::unique_ptr
<const AMDGPUImagePseudoSourceValue
> ImagePSV
;
382 std::unique_ptr
<const AMDGPUGWSResourcePseudoSourceValue
> GWSResourcePSV
;
385 unsigned LDSWaveSpillSize
= 0;
386 unsigned NumUserSGPRs
= 0;
387 unsigned NumSystemSGPRs
= 0;
389 bool HasSpilledSGPRs
= false;
390 bool HasSpilledVGPRs
= false;
391 bool HasNonSpillStackObjects
= false;
392 bool IsStackRealigned
= false;
394 unsigned NumSpilledSGPRs
= 0;
395 unsigned NumSpilledVGPRs
= 0;
397 // Feature bits required for inputs passed in user SGPRs.
398 bool PrivateSegmentBuffer
: 1;
399 bool DispatchPtr
: 1;
401 bool KernargSegmentPtr
: 1;
403 bool FlatScratchInit
: 1;
405 // Feature bits required for inputs passed in system SGPRs.
406 bool WorkGroupIDX
: 1; // Always initialized.
407 bool WorkGroupIDY
: 1;
408 bool WorkGroupIDZ
: 1;
409 bool WorkGroupInfo
: 1;
410 bool PrivateSegmentWaveByteOffset
: 1;
412 bool WorkItemIDX
: 1; // Always initialized.
413 bool WorkItemIDY
: 1;
414 bool WorkItemIDZ
: 1;
416 // Private memory buffer
417 // Compute directly in sgpr[0:1]
418 // Other shaders indirect 64-bits at sgpr[0:1]
419 bool ImplicitBufferPtr
: 1;
421 // Pointer to where the ABI inserts special kernel arguments separate from the
422 // user arguments. This is an offset from the KernargSegmentPtr.
423 bool ImplicitArgPtr
: 1;
425 bool MayNeedAGPRs
: 1;
427 // The hard-wired high half of the address of the global information table
428 // for AMDPAL OS type. 0xffffffff represents no hard-wired high half, since
429 // current hardware only allows a 16 bit value.
432 unsigned HighBitsOf32BitAddress
;
435 // Current recorded maximum possible occupancy.
438 mutable Optional
<bool> UsesAGPRs
;
440 MCPhysReg
getNextUserSGPR() const;
442 MCPhysReg
getNextSystemSGPR() const;
449 SpilledReg() = default;
450 SpilledReg(Register R
, int L
) : VGPR (R
), Lane (L
) {}
452 bool hasLane() { return Lane
!= -1;}
453 bool hasReg() { return VGPR
!= 0;}
456 struct SGPRSpillVGPR
{
457 // VGPR used for SGPR spills
460 // If the VGPR is is used for SGPR spills in a non-entrypoint function, the
461 // stack slot used to save/restore it in the prolog/epilog.
464 SGPRSpillVGPR(Register V
, Optional
<int> F
) : VGPR(V
), FI(F
) {}
467 struct VGPRSpillToAGPR
{
468 SmallVector
<MCPhysReg
, 32> Lanes
;
469 bool FullyAllocated
= false;
473 // Map WWM VGPR to a stack slot that is used to save/restore it in the
475 MapVector
<Register
, Optional
<int>> WWMReservedRegs
;
478 // Track VGPR + wave index for each subregister of the SGPR spilled to
480 DenseMap
<int, std::vector
<SpilledReg
>> SGPRToVGPRSpills
;
481 unsigned NumVGPRSpillLanes
= 0;
482 SmallVector
<SGPRSpillVGPR
, 2> SpillVGPRs
;
484 DenseMap
<int, VGPRSpillToAGPR
> VGPRToAGPRSpills
;
486 // AGPRs used for VGPR spills.
487 SmallVector
<MCPhysReg
, 32> SpillAGPR
;
489 // VGPRs used for AGPR spills.
490 SmallVector
<MCPhysReg
, 32> SpillVGPR
;
492 // Emergency stack slot. Sometimes, we create this before finalizing the stack
493 // frame, so save it here and add it to the RegScavenger later.
494 Optional
<int> ScavengeFI
;
497 /// If this is set, an SGPR used for save/restore of the register used for the
499 Register SGPRForFPSaveRestoreCopy
;
500 Optional
<int> FramePointerSaveIndex
;
502 /// If this is set, an SGPR used for save/restore of the register used for the
504 Register SGPRForBPSaveRestoreCopy
;
505 Optional
<int> BasePointerSaveIndex
;
507 bool isCalleeSavedReg(const MCPhysReg
*CSRegs
, MCPhysReg Reg
);
510 SIMachineFunctionInfo(const MachineFunction
&MF
);
512 bool initializeBaseYamlFields(const yaml::SIMachineFunctionInfo
&YamlMFI
,
513 const MachineFunction
&MF
,
514 PerFunctionMIParsingState
&PFS
,
515 SMDiagnostic
&Error
, SMRange
&SourceRange
);
517 void reserveWWMRegister(Register Reg
, Optional
<int> FI
) {
518 WWMReservedRegs
.insert(std::make_pair(Reg
, FI
));
521 ArrayRef
<SpilledReg
> getSGPRToVGPRSpills(int FrameIndex
) const {
522 auto I
= SGPRToVGPRSpills
.find(FrameIndex
);
523 return (I
== SGPRToVGPRSpills
.end()) ?
524 ArrayRef
<SpilledReg
>() : makeArrayRef(I
->second
);
527 ArrayRef
<SGPRSpillVGPR
> getSGPRSpillVGPRs() const { return SpillVGPRs
; }
529 void setSGPRSpillVGPRs(Register NewVGPR
, Optional
<int> newFI
, int Index
) {
530 SpillVGPRs
[Index
].VGPR
= NewVGPR
;
531 SpillVGPRs
[Index
].FI
= newFI
;
534 bool removeVGPRForSGPRSpill(Register ReservedVGPR
, MachineFunction
&MF
);
536 ArrayRef
<MCPhysReg
> getAGPRSpillVGPRs() const {
540 ArrayRef
<MCPhysReg
> getVGPRSpillAGPRs() const {
544 MCPhysReg
getVGPRToAGPRSpill(int FrameIndex
, unsigned Lane
) const {
545 auto I
= VGPRToAGPRSpills
.find(FrameIndex
);
546 return (I
== VGPRToAGPRSpills
.end()) ? (MCPhysReg
)AMDGPU::NoRegister
547 : I
->second
.Lanes
[Lane
];
550 void setVGPRToAGPRSpillDead(int FrameIndex
) {
551 auto I
= VGPRToAGPRSpills
.find(FrameIndex
);
552 if (I
!= VGPRToAGPRSpills
.end())
553 I
->second
.IsDead
= true;
556 bool haveFreeLanesForSGPRSpill(const MachineFunction
&MF
,
557 unsigned NumLane
) const;
558 bool allocateSGPRSpillToVGPR(MachineFunction
&MF
, int FI
);
559 bool allocateVGPRSpillToAGPR(MachineFunction
&MF
, int FI
, bool isAGPRtoVGPR
);
561 /// If \p ResetSGPRSpillStackIDs is true, reset the stack ID from sgpr-spill
562 /// to the default stack.
563 bool removeDeadFrameIndices(MachineFrameInfo
&MFI
,
564 bool ResetSGPRSpillStackIDs
);
566 int getScavengeFI(MachineFrameInfo
&MFI
, const SIRegisterInfo
&TRI
);
567 Optional
<int> getOptionalScavengeFI() const { return ScavengeFI
; }
569 bool hasCalculatedTID() const { return TIDReg
!= 0; };
570 Register
getTIDReg() const { return TIDReg
; };
571 void setTIDReg(Register Reg
) { TIDReg
= Reg
; }
573 unsigned getBytesInStackArgArea() const {
574 return BytesInStackArgArea
;
577 void setBytesInStackArgArea(unsigned Bytes
) {
578 BytesInStackArgArea
= Bytes
;
582 Register
addPrivateSegmentBuffer(const SIRegisterInfo
&TRI
);
583 Register
addDispatchPtr(const SIRegisterInfo
&TRI
);
584 Register
addQueuePtr(const SIRegisterInfo
&TRI
);
585 Register
addKernargSegmentPtr(const SIRegisterInfo
&TRI
);
586 Register
addDispatchID(const SIRegisterInfo
&TRI
);
587 Register
addFlatScratchInit(const SIRegisterInfo
&TRI
);
588 Register
addImplicitBufferPtr(const SIRegisterInfo
&TRI
);
591 Register
addWorkGroupIDX() {
592 ArgInfo
.WorkGroupIDX
= ArgDescriptor::createRegister(getNextSystemSGPR());
594 return ArgInfo
.WorkGroupIDX
.getRegister();
597 Register
addWorkGroupIDY() {
598 ArgInfo
.WorkGroupIDY
= ArgDescriptor::createRegister(getNextSystemSGPR());
600 return ArgInfo
.WorkGroupIDY
.getRegister();
603 Register
addWorkGroupIDZ() {
604 ArgInfo
.WorkGroupIDZ
= ArgDescriptor::createRegister(getNextSystemSGPR());
606 return ArgInfo
.WorkGroupIDZ
.getRegister();
609 Register
addWorkGroupInfo() {
610 ArgInfo
.WorkGroupInfo
= ArgDescriptor::createRegister(getNextSystemSGPR());
612 return ArgInfo
.WorkGroupInfo
.getRegister();
615 // Add special VGPR inputs
616 void setWorkItemIDX(ArgDescriptor Arg
) {
617 ArgInfo
.WorkItemIDX
= Arg
;
620 void setWorkItemIDY(ArgDescriptor Arg
) {
621 ArgInfo
.WorkItemIDY
= Arg
;
624 void setWorkItemIDZ(ArgDescriptor Arg
) {
625 ArgInfo
.WorkItemIDZ
= Arg
;
628 Register
addPrivateSegmentWaveByteOffset() {
629 ArgInfo
.PrivateSegmentWaveByteOffset
630 = ArgDescriptor::createRegister(getNextSystemSGPR());
632 return ArgInfo
.PrivateSegmentWaveByteOffset
.getRegister();
635 void setPrivateSegmentWaveByteOffset(Register Reg
) {
636 ArgInfo
.PrivateSegmentWaveByteOffset
= ArgDescriptor::createRegister(Reg
);
639 bool hasPrivateSegmentBuffer() const {
640 return PrivateSegmentBuffer
;
643 bool hasDispatchPtr() const {
647 bool hasQueuePtr() const {
651 bool hasKernargSegmentPtr() const {
652 return KernargSegmentPtr
;
655 bool hasDispatchID() const {
659 bool hasFlatScratchInit() const {
660 return FlatScratchInit
;
663 bool hasWorkGroupIDX() const {
667 bool hasWorkGroupIDY() const {
671 bool hasWorkGroupIDZ() const {
675 bool hasWorkGroupInfo() const {
676 return WorkGroupInfo
;
679 bool hasPrivateSegmentWaveByteOffset() const {
680 return PrivateSegmentWaveByteOffset
;
683 bool hasWorkItemIDX() const {
687 bool hasWorkItemIDY() const {
691 bool hasWorkItemIDZ() const {
695 bool hasImplicitArgPtr() const {
696 return ImplicitArgPtr
;
699 bool hasImplicitBufferPtr() const {
700 return ImplicitBufferPtr
;
703 AMDGPUFunctionArgInfo
&getArgInfo() {
707 const AMDGPUFunctionArgInfo
&getArgInfo() const {
711 std::tuple
<const ArgDescriptor
*, const TargetRegisterClass
*, LLT
>
712 getPreloadedValue(AMDGPUFunctionArgInfo::PreloadedValue Value
) const {
713 return ArgInfo
.getPreloadedValue(Value
);
716 MCRegister
getPreloadedReg(AMDGPUFunctionArgInfo::PreloadedValue Value
) const {
717 auto Arg
= std::get
<0>(ArgInfo
.getPreloadedValue(Value
));
718 return Arg
? Arg
->getRegister() : MCRegister();
721 unsigned getGITPtrHigh() const {
725 Register
getGITPtrLoReg(const MachineFunction
&MF
) const;
727 uint32_t get32BitAddressHighBits() const {
728 return HighBitsOf32BitAddress
;
731 unsigned getGDSSize() const {
735 unsigned getNumUserSGPRs() const {
739 unsigned getNumPreloadedSGPRs() const {
740 return NumUserSGPRs
+ NumSystemSGPRs
;
743 Register
getPrivateSegmentWaveByteOffsetSystemSGPR() const {
744 return ArgInfo
.PrivateSegmentWaveByteOffset
.getRegister();
747 /// Returns the physical register reserved for use as the resource
748 /// descriptor for scratch accesses.
749 Register
getScratchRSrcReg() const {
750 return ScratchRSrcReg
;
753 void setScratchRSrcReg(Register Reg
) {
754 assert(Reg
!= 0 && "Should never be unset");
755 ScratchRSrcReg
= Reg
;
758 Register
getFrameOffsetReg() const {
759 return FrameOffsetReg
;
762 void setFrameOffsetReg(Register Reg
) {
763 assert(Reg
!= 0 && "Should never be unset");
764 FrameOffsetReg
= Reg
;
767 void setStackPtrOffsetReg(Register Reg
) {
768 assert(Reg
!= 0 && "Should never be unset");
769 StackPtrOffsetReg
= Reg
;
772 // Note the unset value for this is AMDGPU::SP_REG rather than
773 // NoRegister. This is mostly a workaround for MIR tests where state that
774 // can't be directly computed from the function is not preserved in serialized
776 Register
getStackPtrOffsetReg() const {
777 return StackPtrOffsetReg
;
780 Register
getQueuePtrUserSGPR() const {
781 return ArgInfo
.QueuePtr
.getRegister();
784 Register
getImplicitBufferPtrUserSGPR() const {
785 return ArgInfo
.ImplicitBufferPtr
.getRegister();
788 bool hasSpilledSGPRs() const {
789 return HasSpilledSGPRs
;
792 void setHasSpilledSGPRs(bool Spill
= true) {
793 HasSpilledSGPRs
= Spill
;
796 bool hasSpilledVGPRs() const {
797 return HasSpilledVGPRs
;
800 void setHasSpilledVGPRs(bool Spill
= true) {
801 HasSpilledVGPRs
= Spill
;
804 bool hasNonSpillStackObjects() const {
805 return HasNonSpillStackObjects
;
808 void setHasNonSpillStackObjects(bool StackObject
= true) {
809 HasNonSpillStackObjects
= StackObject
;
812 bool isStackRealigned() const {
813 return IsStackRealigned
;
816 void setIsStackRealigned(bool Realigned
= true) {
817 IsStackRealigned
= Realigned
;
820 unsigned getNumSpilledSGPRs() const {
821 return NumSpilledSGPRs
;
824 unsigned getNumSpilledVGPRs() const {
825 return NumSpilledVGPRs
;
828 void addToSpilledSGPRs(unsigned num
) {
829 NumSpilledSGPRs
+= num
;
832 void addToSpilledVGPRs(unsigned num
) {
833 NumSpilledVGPRs
+= num
;
836 unsigned getPSInputAddr() const {
840 unsigned getPSInputEnable() const {
841 return PSInputEnable
;
844 bool isPSInputAllocated(unsigned Index
) const {
845 return PSInputAddr
& (1 << Index
);
848 void markPSInputAllocated(unsigned Index
) {
849 PSInputAddr
|= 1 << Index
;
852 void markPSInputEnabled(unsigned Index
) {
853 PSInputEnable
|= 1 << Index
;
856 bool returnsVoid() const {
860 void setIfReturnsVoid(bool Value
) {
864 /// \returns A pair of default/requested minimum/maximum flat work group sizes
865 /// for this function.
866 std::pair
<unsigned, unsigned> getFlatWorkGroupSizes() const {
867 return FlatWorkGroupSizes
;
870 /// \returns Default/requested minimum flat work group size for this function.
871 unsigned getMinFlatWorkGroupSize() const {
872 return FlatWorkGroupSizes
.first
;
875 /// \returns Default/requested maximum flat work group size for this function.
876 unsigned getMaxFlatWorkGroupSize() const {
877 return FlatWorkGroupSizes
.second
;
880 /// \returns A pair of default/requested minimum/maximum number of waves per
882 std::pair
<unsigned, unsigned> getWavesPerEU() const {
886 /// \returns Default/requested minimum number of waves per execution unit.
887 unsigned getMinWavesPerEU() const {
888 return WavesPerEU
.first
;
891 /// \returns Default/requested maximum number of waves per execution unit.
892 unsigned getMaxWavesPerEU() const {
893 return WavesPerEU
.second
;
896 /// \returns SGPR used for \p Dim's work group ID.
897 Register
getWorkGroupIDSGPR(unsigned Dim
) const {
900 assert(hasWorkGroupIDX());
901 return ArgInfo
.WorkGroupIDX
.getRegister();
903 assert(hasWorkGroupIDY());
904 return ArgInfo
.WorkGroupIDY
.getRegister();
906 assert(hasWorkGroupIDZ());
907 return ArgInfo
.WorkGroupIDZ
.getRegister();
909 llvm_unreachable("unexpected dimension");
912 unsigned getLDSWaveSpillSize() const {
913 return LDSWaveSpillSize
;
916 const AMDGPUBufferPseudoSourceValue
*getBufferPSV(const SIInstrInfo
&TII
) {
918 BufferPSV
= std::make_unique
<AMDGPUBufferPseudoSourceValue
>(TII
);
920 return BufferPSV
.get();
923 const AMDGPUImagePseudoSourceValue
*getImagePSV(const SIInstrInfo
&TII
) {
925 ImagePSV
= std::make_unique
<AMDGPUImagePseudoSourceValue
>(TII
);
927 return ImagePSV
.get();
930 const AMDGPUGWSResourcePseudoSourceValue
*getGWSPSV(const SIInstrInfo
&TII
) {
931 if (!GWSResourcePSV
) {
933 std::make_unique
<AMDGPUGWSResourcePseudoSourceValue
>(TII
);
936 return GWSResourcePSV
.get();
939 unsigned getOccupancy() const {
943 unsigned getMinAllowedOccupancy() const {
944 if (!isMemoryBound() && !needsWaveLimiter())
946 return (Occupancy
< 4) ? Occupancy
: 4;
949 void limitOccupancy(const MachineFunction
&MF
);
951 void limitOccupancy(unsigned Limit
) {
952 if (Occupancy
> Limit
)
956 void increaseOccupancy(const MachineFunction
&MF
, unsigned Limit
) {
957 if (Occupancy
< Limit
)
962 bool mayNeedAGPRs() const {
966 // \returns true if a function has a use of AGPRs via inline asm or
967 // has a call which may use it.
968 bool mayUseAGPRs(const MachineFunction
&MF
) const;
970 // \returns true if a function needs or may need AGPRs.
971 bool usesAGPRs(const MachineFunction
&MF
) const;
974 } // end namespace llvm
976 #endif // LLVM_LIB_TARGET_AMDGPU_SIMACHINEFUNCTIONINFO_H