1 //==- SIMachineFunctionInfo.h - SIMachineFunctionInfo interface --*- C++ -*-==//
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7 //===----------------------------------------------------------------------===//
11 //===----------------------------------------------------------------------===//
13 #ifndef LLVM_LIB_TARGET_AMDGPU_SIMACHINEFUNCTIONINFO_H
14 #define LLVM_LIB_TARGET_AMDGPU_SIMACHINEFUNCTIONINFO_H
16 #include "AMDGPUArgumentUsageInfo.h"
17 #include "AMDGPUMachineFunction.h"
18 #include "MCTargetDesc/AMDGPUMCTargetDesc.h"
19 #include "SIInstrInfo.h"
20 #include "SIRegisterInfo.h"
21 #include "llvm/ADT/ArrayRef.h"
22 #include "llvm/ADT/DenseMap.h"
23 #include "llvm/ADT/Optional.h"
24 #include "llvm/ADT/STLExtras.h"
25 #include "llvm/ADT/SmallVector.h"
26 #include "llvm/ADT/SparseBitVector.h"
27 #include "llvm/CodeGen/MIRYamlMapping.h"
28 #include "llvm/CodeGen/PseudoSourceValue.h"
29 #include "llvm/CodeGen/TargetInstrInfo.h"
30 #include "llvm/MC/MCRegisterInfo.h"
31 #include "llvm/Support/ErrorHandling.h"
39 class MachineFrameInfo
;
40 class MachineFunction
;
41 class TargetRegisterClass
;
43 class AMDGPUPseudoSourceValue
: public PseudoSourceValue
{
45 enum AMDGPUPSVKind
: unsigned {
46 PSVBuffer
= PseudoSourceValue::TargetCustom
,
52 AMDGPUPseudoSourceValue(unsigned Kind
, const TargetInstrInfo
&TII
)
53 : PseudoSourceValue(Kind
, TII
) {}
56 bool isConstant(const MachineFrameInfo
*) const override
{
57 // This should probably be true for most images, but we will start by being
62 bool isAliased(const MachineFrameInfo
*) const override
{
66 bool mayAlias(const MachineFrameInfo
*) const override
{
71 class AMDGPUBufferPseudoSourceValue final
: public AMDGPUPseudoSourceValue
{
73 explicit AMDGPUBufferPseudoSourceValue(const TargetInstrInfo
&TII
)
74 : AMDGPUPseudoSourceValue(PSVBuffer
, TII
) {}
76 static bool classof(const PseudoSourceValue
*V
) {
77 return V
->kind() == PSVBuffer
;
81 class AMDGPUImagePseudoSourceValue final
: public AMDGPUPseudoSourceValue
{
83 // TODO: Is the img rsrc useful?
84 explicit AMDGPUImagePseudoSourceValue(const TargetInstrInfo
&TII
)
85 : AMDGPUPseudoSourceValue(PSVImage
, TII
) {}
87 static bool classof(const PseudoSourceValue
*V
) {
88 return V
->kind() == PSVImage
;
92 class AMDGPUGWSResourcePseudoSourceValue final
: public AMDGPUPseudoSourceValue
{
94 explicit AMDGPUGWSResourcePseudoSourceValue(const TargetInstrInfo
&TII
)
95 : AMDGPUPseudoSourceValue(GWSResource
, TII
) {}
97 static bool classof(const PseudoSourceValue
*V
) {
98 return V
->kind() == GWSResource
;
101 // These are inaccessible memory from IR.
102 bool isAliased(const MachineFrameInfo
*) const override
{
106 // These are inaccessible memory from IR.
107 bool mayAlias(const MachineFrameInfo
*) const override
{
111 void printCustom(raw_ostream
&OS
) const override
{
121 StringValue RegisterName
;
122 unsigned StackOffset
;
124 Optional
<unsigned> Mask
;
126 // Default constructor, which creates a stack argument.
127 SIArgument() : IsRegister(false), StackOffset(0) {}
128 SIArgument(const SIArgument
&Other
) {
129 IsRegister
= Other
.IsRegister
;
131 ::new ((void *)std::addressof(RegisterName
))
132 StringValue(Other
.RegisterName
);
134 StackOffset
= Other
.StackOffset
;
137 SIArgument
&operator=(const SIArgument
&Other
) {
138 IsRegister
= Other
.IsRegister
;
140 ::new ((void *)std::addressof(RegisterName
))
141 StringValue(Other
.RegisterName
);
143 StackOffset
= Other
.StackOffset
;
149 RegisterName
.~StringValue();
152 // Helper to create a register or stack argument.
153 static inline SIArgument
createArgument(bool IsReg
) {
155 return SIArgument(IsReg
);
160 // Construct a register argument.
161 SIArgument(bool) : IsRegister(true), RegisterName() {}
164 template <> struct MappingTraits
<SIArgument
> {
165 static void mapping(IO
&YamlIO
, SIArgument
&A
) {
166 if (YamlIO
.outputting()) {
168 YamlIO
.mapRequired("reg", A
.RegisterName
);
170 YamlIO
.mapRequired("offset", A
.StackOffset
);
172 auto Keys
= YamlIO
.keys();
173 if (is_contained(Keys
, "reg")) {
174 A
= SIArgument::createArgument(true);
175 YamlIO
.mapRequired("reg", A
.RegisterName
);
176 } else if (is_contained(Keys
, "offset"))
177 YamlIO
.mapRequired("offset", A
.StackOffset
);
179 YamlIO
.setError("missing required key 'reg' or 'offset'");
181 YamlIO
.mapOptional("mask", A
.Mask
);
183 static const bool flow
= true;
186 struct SIArgumentInfo
{
187 Optional
<SIArgument
> PrivateSegmentBuffer
;
188 Optional
<SIArgument
> DispatchPtr
;
189 Optional
<SIArgument
> QueuePtr
;
190 Optional
<SIArgument
> KernargSegmentPtr
;
191 Optional
<SIArgument
> DispatchID
;
192 Optional
<SIArgument
> FlatScratchInit
;
193 Optional
<SIArgument
> PrivateSegmentSize
;
195 Optional
<SIArgument
> WorkGroupIDX
;
196 Optional
<SIArgument
> WorkGroupIDY
;
197 Optional
<SIArgument
> WorkGroupIDZ
;
198 Optional
<SIArgument
> WorkGroupInfo
;
199 Optional
<SIArgument
> PrivateSegmentWaveByteOffset
;
201 Optional
<SIArgument
> ImplicitArgPtr
;
202 Optional
<SIArgument
> ImplicitBufferPtr
;
204 Optional
<SIArgument
> WorkItemIDX
;
205 Optional
<SIArgument
> WorkItemIDY
;
206 Optional
<SIArgument
> WorkItemIDZ
;
209 template <> struct MappingTraits
<SIArgumentInfo
> {
210 static void mapping(IO
&YamlIO
, SIArgumentInfo
&AI
) {
211 YamlIO
.mapOptional("privateSegmentBuffer", AI
.PrivateSegmentBuffer
);
212 YamlIO
.mapOptional("dispatchPtr", AI
.DispatchPtr
);
213 YamlIO
.mapOptional("queuePtr", AI
.QueuePtr
);
214 YamlIO
.mapOptional("kernargSegmentPtr", AI
.KernargSegmentPtr
);
215 YamlIO
.mapOptional("dispatchID", AI
.DispatchID
);
216 YamlIO
.mapOptional("flatScratchInit", AI
.FlatScratchInit
);
217 YamlIO
.mapOptional("privateSegmentSize", AI
.PrivateSegmentSize
);
219 YamlIO
.mapOptional("workGroupIDX", AI
.WorkGroupIDX
);
220 YamlIO
.mapOptional("workGroupIDY", AI
.WorkGroupIDY
);
221 YamlIO
.mapOptional("workGroupIDZ", AI
.WorkGroupIDZ
);
222 YamlIO
.mapOptional("workGroupInfo", AI
.WorkGroupInfo
);
223 YamlIO
.mapOptional("privateSegmentWaveByteOffset",
224 AI
.PrivateSegmentWaveByteOffset
);
226 YamlIO
.mapOptional("implicitArgPtr", AI
.ImplicitArgPtr
);
227 YamlIO
.mapOptional("implicitBufferPtr", AI
.ImplicitBufferPtr
);
229 YamlIO
.mapOptional("workItemIDX", AI
.WorkItemIDX
);
230 YamlIO
.mapOptional("workItemIDY", AI
.WorkItemIDY
);
231 YamlIO
.mapOptional("workItemIDZ", AI
.WorkItemIDZ
);
235 // Default to default mode for default calling convention.
238 bool DX10Clamp
= true;
243 SIMode(const AMDGPU::SIModeRegisterDefaults
&Mode
) {
245 DX10Clamp
= Mode
.DX10Clamp
;
248 bool operator ==(const SIMode Other
) const {
249 return IEEE
== Other
.IEEE
&& DX10Clamp
== Other
.DX10Clamp
;
253 template <> struct MappingTraits
<SIMode
> {
254 static void mapping(IO
&YamlIO
, SIMode
&Mode
) {
255 YamlIO
.mapOptional("ieee", Mode
.IEEE
, true);
256 YamlIO
.mapOptional("dx10-clamp", Mode
.DX10Clamp
, true);
260 struct SIMachineFunctionInfo final
: public yaml::MachineFunctionInfo
{
261 uint64_t ExplicitKernArgSize
= 0;
262 unsigned MaxKernArgAlign
= 0;
263 unsigned LDSSize
= 0;
264 bool IsEntryFunction
= false;
265 bool NoSignedZerosFPMath
= false;
266 bool MemoryBound
= false;
267 bool WaveLimiter
= false;
269 StringValue ScratchRSrcReg
= "$private_rsrc_reg";
270 StringValue ScratchWaveOffsetReg
= "$scratch_wave_offset_reg";
271 StringValue FrameOffsetReg
= "$fp_reg";
272 StringValue StackPtrOffsetReg
= "$sp_reg";
274 Optional
<SIArgumentInfo
> ArgInfo
;
277 SIMachineFunctionInfo() = default;
278 SIMachineFunctionInfo(const llvm::SIMachineFunctionInfo
&,
279 const TargetRegisterInfo
&TRI
);
281 void mappingImpl(yaml::IO
&YamlIO
) override
;
282 ~SIMachineFunctionInfo() = default;
285 template <> struct MappingTraits
<SIMachineFunctionInfo
> {
286 static void mapping(IO
&YamlIO
, SIMachineFunctionInfo
&MFI
) {
287 YamlIO
.mapOptional("explicitKernArgSize", MFI
.ExplicitKernArgSize
,
289 YamlIO
.mapOptional("maxKernArgAlign", MFI
.MaxKernArgAlign
, 0u);
290 YamlIO
.mapOptional("ldsSize", MFI
.LDSSize
, 0u);
291 YamlIO
.mapOptional("isEntryFunction", MFI
.IsEntryFunction
, false);
292 YamlIO
.mapOptional("noSignedZerosFPMath", MFI
.NoSignedZerosFPMath
, false);
293 YamlIO
.mapOptional("memoryBound", MFI
.MemoryBound
, false);
294 YamlIO
.mapOptional("waveLimiter", MFI
.WaveLimiter
, false);
295 YamlIO
.mapOptional("scratchRSrcReg", MFI
.ScratchRSrcReg
,
296 StringValue("$private_rsrc_reg"));
297 YamlIO
.mapOptional("scratchWaveOffsetReg", MFI
.ScratchWaveOffsetReg
,
298 StringValue("$scratch_wave_offset_reg"));
299 YamlIO
.mapOptional("frameOffsetReg", MFI
.FrameOffsetReg
,
300 StringValue("$fp_reg"));
301 YamlIO
.mapOptional("stackPtrOffsetReg", MFI
.StackPtrOffsetReg
,
302 StringValue("$sp_reg"));
303 YamlIO
.mapOptional("argumentInfo", MFI
.ArgInfo
);
304 YamlIO
.mapOptional("mode", MFI
.Mode
, SIMode());
308 } // end namespace yaml
310 /// This class keeps track of the SPI_SP_INPUT_ADDR config register, which
311 /// tells the hardware which interpolation parameters to load.
312 class SIMachineFunctionInfo final
: public AMDGPUMachineFunction
{
313 friend class GCNTargetMachine
;
315 unsigned TIDReg
= AMDGPU::NoRegister
;
317 // Registers that may be reserved for spilling purposes. These may be the same
318 // as the input registers.
319 unsigned ScratchRSrcReg
= AMDGPU::PRIVATE_RSRC_REG
;
320 unsigned ScratchWaveOffsetReg
= AMDGPU::SCRATCH_WAVE_OFFSET_REG
;
322 // This is the current function's incremented size from the kernel's scratch
323 // wave offset register. For an entry function, this is exactly the same as
324 // the ScratchWaveOffsetReg.
325 unsigned FrameOffsetReg
= AMDGPU::FP_REG
;
327 // Top of the stack SGPR offset derived from the ScratchWaveOffsetReg.
328 unsigned StackPtrOffsetReg
= AMDGPU::SP_REG
;
330 AMDGPUFunctionArgInfo ArgInfo
;
332 // State of MODE register, assumed FP mode.
333 AMDGPU::SIModeRegisterDefaults Mode
;
336 unsigned PSInputAddr
= 0;
337 unsigned PSInputEnable
= 0;
339 /// Number of bytes of arguments this function has on the stack. If the callee
340 /// is expected to restore the argument stack this should be a multiple of 16,
341 /// all usable during a tail call.
343 /// The alternative would forbid tail call optimisation in some cases: if we
344 /// want to transfer control from a function with 8-bytes of stack-argument
345 /// space to a function with 16-bytes then misalignment of this value would
346 /// make a stack adjustment necessary, which could not be undone by the
348 unsigned BytesInStackArgArea
= 0;
350 bool ReturnsVoid
= true;
352 // A pair of default/requested minimum/maximum flat work group sizes.
353 // Minimum - first, maximum - second.
354 std::pair
<unsigned, unsigned> FlatWorkGroupSizes
= {0, 0};
356 // A pair of default/requested minimum/maximum number of waves per execution
357 // unit. Minimum - first, maximum - second.
358 std::pair
<unsigned, unsigned> WavesPerEU
= {0, 0};
360 DenseMap
<const Value
*,
361 std::unique_ptr
<const AMDGPUBufferPseudoSourceValue
>> BufferPSVs
;
362 DenseMap
<const Value
*,
363 std::unique_ptr
<const AMDGPUImagePseudoSourceValue
>> ImagePSVs
;
364 std::unique_ptr
<const AMDGPUGWSResourcePseudoSourceValue
> GWSResourcePSV
;
367 unsigned LDSWaveSpillSize
= 0;
368 unsigned NumUserSGPRs
= 0;
369 unsigned NumSystemSGPRs
= 0;
371 bool HasSpilledSGPRs
= false;
372 bool HasSpilledVGPRs
= false;
373 bool HasNonSpillStackObjects
= false;
374 bool IsStackRealigned
= false;
376 unsigned NumSpilledSGPRs
= 0;
377 unsigned NumSpilledVGPRs
= 0;
379 // Feature bits required for inputs passed in user SGPRs.
380 bool PrivateSegmentBuffer
: 1;
381 bool DispatchPtr
: 1;
383 bool KernargSegmentPtr
: 1;
385 bool FlatScratchInit
: 1;
387 // Feature bits required for inputs passed in system SGPRs.
388 bool WorkGroupIDX
: 1; // Always initialized.
389 bool WorkGroupIDY
: 1;
390 bool WorkGroupIDZ
: 1;
391 bool WorkGroupInfo
: 1;
392 bool PrivateSegmentWaveByteOffset
: 1;
394 bool WorkItemIDX
: 1; // Always initialized.
395 bool WorkItemIDY
: 1;
396 bool WorkItemIDZ
: 1;
398 // Private memory buffer
399 // Compute directly in sgpr[0:1]
400 // Other shaders indirect 64-bits at sgpr[0:1]
401 bool ImplicitBufferPtr
: 1;
403 // Pointer to where the ABI inserts special kernel arguments separate from the
404 // user arguments. This is an offset from the KernargSegmentPtr.
405 bool ImplicitArgPtr
: 1;
407 // The hard-wired high half of the address of the global information table
408 // for AMDPAL OS type. 0xffffffff represents no hard-wired high half, since
409 // current hardware only allows a 16 bit value.
412 unsigned HighBitsOf32BitAddress
;
415 // Current recorded maximum possible occupancy.
418 MCPhysReg
getNextUserSGPR() const;
420 MCPhysReg
getNextSystemSGPR() const;
427 SpilledReg() = default;
428 SpilledReg(unsigned R
, int L
) : VGPR (R
), Lane (L
) {}
430 bool hasLane() { return Lane
!= -1;}
431 bool hasReg() { return VGPR
!= 0;}
434 struct SGPRSpillVGPRCSR
{
435 // VGPR used for SGPR spills
438 // If the VGPR is a CSR, the stack slot used to save/restore it in the
442 SGPRSpillVGPRCSR(unsigned V
, Optional
<int> F
) : VGPR(V
), FI(F
) {}
445 struct VGPRSpillToAGPR
{
446 SmallVector
<MCPhysReg
, 32> Lanes
;
447 bool FullyAllocated
= false;
450 SparseBitVector
<> WWMReservedRegs
;
452 void ReserveWWMRegister(unsigned reg
) { WWMReservedRegs
.set(reg
); }
455 // SGPR->VGPR spilling support.
456 using SpillRegMask
= std::pair
<unsigned, unsigned>;
458 // Track VGPR + wave index for each subregister of the SGPR spilled to
460 DenseMap
<int, std::vector
<SpilledReg
>> SGPRToVGPRSpills
;
461 unsigned NumVGPRSpillLanes
= 0;
462 SmallVector
<SGPRSpillVGPRCSR
, 2> SpillVGPRs
;
464 DenseMap
<int, VGPRSpillToAGPR
> VGPRToAGPRSpills
;
466 // AGPRs used for VGPR spills.
467 SmallVector
<MCPhysReg
, 32> SpillAGPR
;
469 // VGPRs used for AGPR spills.
470 SmallVector
<MCPhysReg
, 32> SpillVGPR
;
473 /// If this is set, an SGPR used for save/restore of the register used for the
475 unsigned SGPRForFPSaveRestoreCopy
= 0;
476 Optional
<int> FramePointerSaveIndex
;
479 SIMachineFunctionInfo(const MachineFunction
&MF
);
481 bool initializeBaseYamlFields(const yaml::SIMachineFunctionInfo
&YamlMFI
);
483 ArrayRef
<SpilledReg
> getSGPRToVGPRSpills(int FrameIndex
) const {
484 auto I
= SGPRToVGPRSpills
.find(FrameIndex
);
485 return (I
== SGPRToVGPRSpills
.end()) ?
486 ArrayRef
<SpilledReg
>() : makeArrayRef(I
->second
);
489 ArrayRef
<SGPRSpillVGPRCSR
> getSGPRSpillVGPRs() const {
493 ArrayRef
<MCPhysReg
> getAGPRSpillVGPRs() const {
497 ArrayRef
<MCPhysReg
> getVGPRSpillAGPRs() const {
501 MCPhysReg
getVGPRToAGPRSpill(int FrameIndex
, unsigned Lane
) const {
502 auto I
= VGPRToAGPRSpills
.find(FrameIndex
);
503 return (I
== VGPRToAGPRSpills
.end()) ? (MCPhysReg
)AMDGPU::NoRegister
504 : I
->second
.Lanes
[Lane
];
507 AMDGPU::SIModeRegisterDefaults
getMode() const {
511 bool haveFreeLanesForSGPRSpill(const MachineFunction
&MF
,
512 unsigned NumLane
) const;
513 bool allocateSGPRSpillToVGPR(MachineFunction
&MF
, int FI
);
514 bool allocateVGPRSpillToAGPR(MachineFunction
&MF
, int FI
, bool isAGPRtoVGPR
);
515 void removeDeadFrameIndices(MachineFrameInfo
&MFI
);
517 bool hasCalculatedTID() const { return TIDReg
!= 0; };
518 unsigned getTIDReg() const { return TIDReg
; };
519 void setTIDReg(unsigned Reg
) { TIDReg
= Reg
; }
521 unsigned getBytesInStackArgArea() const {
522 return BytesInStackArgArea
;
525 void setBytesInStackArgArea(unsigned Bytes
) {
526 BytesInStackArgArea
= Bytes
;
530 unsigned addPrivateSegmentBuffer(const SIRegisterInfo
&TRI
);
531 unsigned addDispatchPtr(const SIRegisterInfo
&TRI
);
532 unsigned addQueuePtr(const SIRegisterInfo
&TRI
);
533 unsigned addKernargSegmentPtr(const SIRegisterInfo
&TRI
);
534 unsigned addDispatchID(const SIRegisterInfo
&TRI
);
535 unsigned addFlatScratchInit(const SIRegisterInfo
&TRI
);
536 unsigned addImplicitBufferPtr(const SIRegisterInfo
&TRI
);
539 unsigned addWorkGroupIDX() {
540 ArgInfo
.WorkGroupIDX
= ArgDescriptor::createRegister(getNextSystemSGPR());
542 return ArgInfo
.WorkGroupIDX
.getRegister();
545 unsigned addWorkGroupIDY() {
546 ArgInfo
.WorkGroupIDY
= ArgDescriptor::createRegister(getNextSystemSGPR());
548 return ArgInfo
.WorkGroupIDY
.getRegister();
551 unsigned addWorkGroupIDZ() {
552 ArgInfo
.WorkGroupIDZ
= ArgDescriptor::createRegister(getNextSystemSGPR());
554 return ArgInfo
.WorkGroupIDZ
.getRegister();
557 unsigned addWorkGroupInfo() {
558 ArgInfo
.WorkGroupInfo
= ArgDescriptor::createRegister(getNextSystemSGPR());
560 return ArgInfo
.WorkGroupInfo
.getRegister();
563 // Add special VGPR inputs
564 void setWorkItemIDX(ArgDescriptor Arg
) {
565 ArgInfo
.WorkItemIDX
= Arg
;
568 void setWorkItemIDY(ArgDescriptor Arg
) {
569 ArgInfo
.WorkItemIDY
= Arg
;
572 void setWorkItemIDZ(ArgDescriptor Arg
) {
573 ArgInfo
.WorkItemIDZ
= Arg
;
576 unsigned addPrivateSegmentWaveByteOffset() {
577 ArgInfo
.PrivateSegmentWaveByteOffset
578 = ArgDescriptor::createRegister(getNextSystemSGPR());
580 return ArgInfo
.PrivateSegmentWaveByteOffset
.getRegister();
583 void setPrivateSegmentWaveByteOffset(unsigned Reg
) {
584 ArgInfo
.PrivateSegmentWaveByteOffset
= ArgDescriptor::createRegister(Reg
);
587 bool hasPrivateSegmentBuffer() const {
588 return PrivateSegmentBuffer
;
591 bool hasDispatchPtr() const {
595 bool hasQueuePtr() const {
599 bool hasKernargSegmentPtr() const {
600 return KernargSegmentPtr
;
603 bool hasDispatchID() const {
607 bool hasFlatScratchInit() const {
608 return FlatScratchInit
;
611 bool hasWorkGroupIDX() const {
615 bool hasWorkGroupIDY() const {
619 bool hasWorkGroupIDZ() const {
623 bool hasWorkGroupInfo() const {
624 return WorkGroupInfo
;
627 bool hasPrivateSegmentWaveByteOffset() const {
628 return PrivateSegmentWaveByteOffset
;
631 bool hasWorkItemIDX() const {
635 bool hasWorkItemIDY() const {
639 bool hasWorkItemIDZ() const {
643 bool hasImplicitArgPtr() const {
644 return ImplicitArgPtr
;
647 bool hasImplicitBufferPtr() const {
648 return ImplicitBufferPtr
;
651 AMDGPUFunctionArgInfo
&getArgInfo() {
655 const AMDGPUFunctionArgInfo
&getArgInfo() const {
659 std::pair
<const ArgDescriptor
*, const TargetRegisterClass
*>
660 getPreloadedValue(AMDGPUFunctionArgInfo::PreloadedValue Value
) const {
661 return ArgInfo
.getPreloadedValue(Value
);
664 Register
getPreloadedReg(AMDGPUFunctionArgInfo::PreloadedValue Value
) const {
665 auto Arg
= ArgInfo
.getPreloadedValue(Value
).first
;
666 return Arg
? Arg
->getRegister() : Register();
669 unsigned getGITPtrHigh() const {
673 unsigned get32BitAddressHighBits() const {
674 return HighBitsOf32BitAddress
;
677 unsigned getGDSSize() const {
681 unsigned getNumUserSGPRs() const {
685 unsigned getNumPreloadedSGPRs() const {
686 return NumUserSGPRs
+ NumSystemSGPRs
;
689 unsigned getPrivateSegmentWaveByteOffsetSystemSGPR() const {
690 return ArgInfo
.PrivateSegmentWaveByteOffset
.getRegister();
693 /// Returns the physical register reserved for use as the resource
694 /// descriptor for scratch accesses.
695 unsigned getScratchRSrcReg() const {
696 return ScratchRSrcReg
;
699 void setScratchRSrcReg(unsigned Reg
) {
700 assert(Reg
!= 0 && "Should never be unset");
701 ScratchRSrcReg
= Reg
;
704 unsigned getScratchWaveOffsetReg() const {
705 return ScratchWaveOffsetReg
;
708 unsigned getFrameOffsetReg() const {
709 return FrameOffsetReg
;
712 void setFrameOffsetReg(unsigned Reg
) {
713 assert(Reg
!= 0 && "Should never be unset");
714 FrameOffsetReg
= Reg
;
717 void setStackPtrOffsetReg(unsigned Reg
) {
718 assert(Reg
!= 0 && "Should never be unset");
719 StackPtrOffsetReg
= Reg
;
722 // Note the unset value for this is AMDGPU::SP_REG rather than
723 // NoRegister. This is mostly a workaround for MIR tests where state that
724 // can't be directly computed from the function is not preserved in serialized
726 unsigned getStackPtrOffsetReg() const {
727 return StackPtrOffsetReg
;
730 void setScratchWaveOffsetReg(unsigned Reg
) {
731 assert(Reg
!= 0 && "Should never be unset");
732 ScratchWaveOffsetReg
= Reg
;
735 unsigned getQueuePtrUserSGPR() const {
736 return ArgInfo
.QueuePtr
.getRegister();
739 unsigned getImplicitBufferPtrUserSGPR() const {
740 return ArgInfo
.ImplicitBufferPtr
.getRegister();
743 bool hasSpilledSGPRs() const {
744 return HasSpilledSGPRs
;
747 void setHasSpilledSGPRs(bool Spill
= true) {
748 HasSpilledSGPRs
= Spill
;
751 bool hasSpilledVGPRs() const {
752 return HasSpilledVGPRs
;
755 void setHasSpilledVGPRs(bool Spill
= true) {
756 HasSpilledVGPRs
= Spill
;
759 bool hasNonSpillStackObjects() const {
760 return HasNonSpillStackObjects
;
763 void setHasNonSpillStackObjects(bool StackObject
= true) {
764 HasNonSpillStackObjects
= StackObject
;
767 bool isStackRealigned() const {
768 return IsStackRealigned
;
771 void setIsStackRealigned(bool Realigned
= true) {
772 IsStackRealigned
= Realigned
;
775 unsigned getNumSpilledSGPRs() const {
776 return NumSpilledSGPRs
;
779 unsigned getNumSpilledVGPRs() const {
780 return NumSpilledVGPRs
;
783 void addToSpilledSGPRs(unsigned num
) {
784 NumSpilledSGPRs
+= num
;
787 void addToSpilledVGPRs(unsigned num
) {
788 NumSpilledVGPRs
+= num
;
791 unsigned getPSInputAddr() const {
795 unsigned getPSInputEnable() const {
796 return PSInputEnable
;
799 bool isPSInputAllocated(unsigned Index
) const {
800 return PSInputAddr
& (1 << Index
);
803 void markPSInputAllocated(unsigned Index
) {
804 PSInputAddr
|= 1 << Index
;
807 void markPSInputEnabled(unsigned Index
) {
808 PSInputEnable
|= 1 << Index
;
811 bool returnsVoid() const {
815 void setIfReturnsVoid(bool Value
) {
819 /// \returns A pair of default/requested minimum/maximum flat work group sizes
820 /// for this function.
821 std::pair
<unsigned, unsigned> getFlatWorkGroupSizes() const {
822 return FlatWorkGroupSizes
;
825 /// \returns Default/requested minimum flat work group size for this function.
826 unsigned getMinFlatWorkGroupSize() const {
827 return FlatWorkGroupSizes
.first
;
830 /// \returns Default/requested maximum flat work group size for this function.
831 unsigned getMaxFlatWorkGroupSize() const {
832 return FlatWorkGroupSizes
.second
;
835 /// \returns A pair of default/requested minimum/maximum number of waves per
837 std::pair
<unsigned, unsigned> getWavesPerEU() const {
841 /// \returns Default/requested minimum number of waves per execution unit.
842 unsigned getMinWavesPerEU() const {
843 return WavesPerEU
.first
;
846 /// \returns Default/requested maximum number of waves per execution unit.
847 unsigned getMaxWavesPerEU() const {
848 return WavesPerEU
.second
;
851 /// \returns SGPR used for \p Dim's work group ID.
852 unsigned getWorkGroupIDSGPR(unsigned Dim
) const {
855 assert(hasWorkGroupIDX());
856 return ArgInfo
.WorkGroupIDX
.getRegister();
858 assert(hasWorkGroupIDY());
859 return ArgInfo
.WorkGroupIDY
.getRegister();
861 assert(hasWorkGroupIDZ());
862 return ArgInfo
.WorkGroupIDZ
.getRegister();
864 llvm_unreachable("unexpected dimension");
867 unsigned getLDSWaveSpillSize() const {
868 return LDSWaveSpillSize
;
871 const AMDGPUBufferPseudoSourceValue
*getBufferPSV(const SIInstrInfo
&TII
,
872 const Value
*BufferRsrc
) {
874 auto PSV
= BufferPSVs
.try_emplace(
876 llvm::make_unique
<AMDGPUBufferPseudoSourceValue
>(TII
));
877 return PSV
.first
->second
.get();
880 const AMDGPUImagePseudoSourceValue
*getImagePSV(const SIInstrInfo
&TII
,
881 const Value
*ImgRsrc
) {
883 auto PSV
= ImagePSVs
.try_emplace(
885 llvm::make_unique
<AMDGPUImagePseudoSourceValue
>(TII
));
886 return PSV
.first
->second
.get();
889 const AMDGPUGWSResourcePseudoSourceValue
*getGWSPSV(const SIInstrInfo
&TII
) {
890 if (!GWSResourcePSV
) {
892 llvm::make_unique
<AMDGPUGWSResourcePseudoSourceValue
>(TII
);
895 return GWSResourcePSV
.get();
898 unsigned getOccupancy() const {
902 unsigned getMinAllowedOccupancy() const {
903 if (!isMemoryBound() && !needsWaveLimiter())
905 return (Occupancy
< 4) ? Occupancy
: 4;
908 void limitOccupancy(const MachineFunction
&MF
);
910 void limitOccupancy(unsigned Limit
) {
911 if (Occupancy
> Limit
)
915 void increaseOccupancy(const MachineFunction
&MF
, unsigned Limit
) {
916 if (Occupancy
< Limit
)
922 } // end namespace llvm
924 #endif // LLVM_LIB_TARGET_AMDGPU_SIMACHINEFUNCTIONINFO_H