1 //===-- SIRegisterInfo.cpp - SI Register Information ---------------------===//
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7 //===----------------------------------------------------------------------===//
10 /// SI implementation of the TargetRegisterInfo class.
12 //===----------------------------------------------------------------------===//
14 #include "SIRegisterInfo.h"
15 #include "AMDGPURegisterBankInfo.h"
16 #include "AMDGPUSubtarget.h"
17 #include "SIInstrInfo.h"
18 #include "SIMachineFunctionInfo.h"
19 #include "MCTargetDesc/AMDGPUInstPrinter.h"
20 #include "MCTargetDesc/AMDGPUMCTargetDesc.h"
21 #include "llvm/CodeGen/LiveIntervals.h"
22 #include "llvm/CodeGen/MachineDominators.h"
23 #include "llvm/CodeGen/MachineFrameInfo.h"
24 #include "llvm/CodeGen/MachineInstrBuilder.h"
25 #include "llvm/CodeGen/RegisterScavenging.h"
26 #include "llvm/CodeGen/SlotIndexes.h"
27 #include "llvm/IR/Function.h"
28 #include "llvm/IR/LLVMContext.h"
32 static bool hasPressureSet(const int *PSets
, unsigned PSetID
) {
33 for (unsigned i
= 0; PSets
[i
] != -1; ++i
) {
34 if (PSets
[i
] == (int)PSetID
)
40 void SIRegisterInfo::classifyPressureSet(unsigned PSetID
, unsigned Reg
,
41 BitVector
&PressureSets
) const {
42 for (MCRegUnitIterator
U(Reg
, this); U
.isValid(); ++U
) {
43 const int *PSets
= getRegUnitPressureSets(*U
);
44 if (hasPressureSet(PSets
, PSetID
)) {
45 PressureSets
.set(PSetID
);
51 static cl::opt
<bool> EnableSpillSGPRToSMEM(
52 "amdgpu-spill-sgpr-to-smem",
53 cl::desc("Use scalar stores to spill SGPRs if supported by subtarget"),
56 static cl::opt
<bool> EnableSpillSGPRToVGPR(
57 "amdgpu-spill-sgpr-to-vgpr",
58 cl::desc("Enable spilling VGPRs to SGPRs"),
62 SIRegisterInfo::SIRegisterInfo(const GCNSubtarget
&ST
) :
65 SGPRPressureSets(getNumRegPressureSets()),
66 VGPRPressureSets(getNumRegPressureSets()),
67 AGPRPressureSets(getNumRegPressureSets()),
68 SpillSGPRToVGPR(false),
69 SpillSGPRToSMEM(false),
70 isWave32(ST
.isWave32()) {
71 if (EnableSpillSGPRToSMEM
&& ST
.hasScalarStores())
72 SpillSGPRToSMEM
= true;
73 else if (EnableSpillSGPRToVGPR
)
74 SpillSGPRToVGPR
= true;
76 unsigned NumRegPressureSets
= getNumRegPressureSets();
78 SGPRSetID
= NumRegPressureSets
;
79 VGPRSetID
= NumRegPressureSets
;
80 AGPRSetID
= NumRegPressureSets
;
82 for (unsigned i
= 0; i
< NumRegPressureSets
; ++i
) {
83 classifyPressureSet(i
, AMDGPU::SGPR0
, SGPRPressureSets
);
84 classifyPressureSet(i
, AMDGPU::VGPR0
, VGPRPressureSets
);
85 classifyPressureSet(i
, AMDGPU::AGPR0
, AGPRPressureSets
);
88 // Determine the number of reg units for each pressure set.
89 std::vector
<unsigned> PressureSetRegUnits(NumRegPressureSets
, 0);
90 for (unsigned i
= 0, e
= getNumRegUnits(); i
!= e
; ++i
) {
91 const int *PSets
= getRegUnitPressureSets(i
);
92 for (unsigned j
= 0; PSets
[j
] != -1; ++j
) {
93 ++PressureSetRegUnits
[PSets
[j
]];
97 unsigned VGPRMax
= 0, SGPRMax
= 0, AGPRMax
= 0;
98 for (unsigned i
= 0; i
< NumRegPressureSets
; ++i
) {
99 if (isVGPRPressureSet(i
) && PressureSetRegUnits
[i
] > VGPRMax
) {
101 VGPRMax
= PressureSetRegUnits
[i
];
104 if (isSGPRPressureSet(i
) && PressureSetRegUnits
[i
] > SGPRMax
) {
106 SGPRMax
= PressureSetRegUnits
[i
];
108 if (isAGPRPressureSet(i
) && PressureSetRegUnits
[i
] > AGPRMax
) {
110 AGPRMax
= PressureSetRegUnits
[i
];
115 assert(SGPRSetID
< NumRegPressureSets
&&
116 VGPRSetID
< NumRegPressureSets
&&
117 AGPRSetID
< NumRegPressureSets
);
120 unsigned SIRegisterInfo::reservedPrivateSegmentBufferReg(
121 const MachineFunction
&MF
) const {
123 const GCNSubtarget
&ST
= MF
.getSubtarget
<GCNSubtarget
>();
124 unsigned BaseIdx
= alignDown(ST
.getMaxNumSGPRs(MF
), 4) - 4;
125 unsigned BaseReg(AMDGPU::SGPR_32RegClass
.getRegister(BaseIdx
));
126 return getMatchingSuperReg(BaseReg
, AMDGPU::sub0
, &AMDGPU::SGPR_128RegClass
);
129 static unsigned findPrivateSegmentWaveByteOffsetRegIndex(unsigned RegCount
) {
132 // Try to place it in a hole after PrivateSegmentBufferReg.
134 // We cannot put the segment buffer in (Idx - 4) ... (Idx - 1) due to
135 // alignment constraints, so we have a hole where can put the wave offset.
138 // We can put the segment buffer in (Idx - 4) ... (Idx - 1) and put the
139 // wave offset before it.
146 unsigned SIRegisterInfo::reservedPrivateSegmentWaveByteOffsetReg(
147 const MachineFunction
&MF
) const {
148 const GCNSubtarget
&ST
= MF
.getSubtarget
<GCNSubtarget
>();
149 unsigned Reg
= findPrivateSegmentWaveByteOffsetRegIndex(ST
.getMaxNumSGPRs(MF
));
150 return AMDGPU::SGPR_32RegClass
.getRegister(Reg
);
153 BitVector
SIRegisterInfo::getReservedRegs(const MachineFunction
&MF
) const {
154 BitVector
Reserved(getNumRegs());
156 // EXEC_LO and EXEC_HI could be allocated and used as regular register, but
157 // this seems likely to result in bugs, so I'm marking them as reserved.
158 reserveRegisterTuples(Reserved
, AMDGPU::EXEC
);
159 reserveRegisterTuples(Reserved
, AMDGPU::FLAT_SCR
);
161 // M0 has to be reserved so that llvm accepts it as a live-in into a block.
162 reserveRegisterTuples(Reserved
, AMDGPU::M0
);
164 // Reserve src_vccz, src_execz, src_scc.
165 reserveRegisterTuples(Reserved
, AMDGPU::SRC_VCCZ
);
166 reserveRegisterTuples(Reserved
, AMDGPU::SRC_EXECZ
);
167 reserveRegisterTuples(Reserved
, AMDGPU::SRC_SCC
);
169 // Reserve the memory aperture registers.
170 reserveRegisterTuples(Reserved
, AMDGPU::SRC_SHARED_BASE
);
171 reserveRegisterTuples(Reserved
, AMDGPU::SRC_SHARED_LIMIT
);
172 reserveRegisterTuples(Reserved
, AMDGPU::SRC_PRIVATE_BASE
);
173 reserveRegisterTuples(Reserved
, AMDGPU::SRC_PRIVATE_LIMIT
);
175 // Reserve src_pops_exiting_wave_id - support is not implemented in Codegen.
176 reserveRegisterTuples(Reserved
, AMDGPU::SRC_POPS_EXITING_WAVE_ID
);
178 // Reserve xnack_mask registers - support is not implemented in Codegen.
179 reserveRegisterTuples(Reserved
, AMDGPU::XNACK_MASK
);
181 // Reserve lds_direct register - support is not implemented in Codegen.
182 reserveRegisterTuples(Reserved
, AMDGPU::LDS_DIRECT
);
184 // Reserve Trap Handler registers - support is not implemented in Codegen.
185 reserveRegisterTuples(Reserved
, AMDGPU::TBA
);
186 reserveRegisterTuples(Reserved
, AMDGPU::TMA
);
187 reserveRegisterTuples(Reserved
, AMDGPU::TTMP0_TTMP1
);
188 reserveRegisterTuples(Reserved
, AMDGPU::TTMP2_TTMP3
);
189 reserveRegisterTuples(Reserved
, AMDGPU::TTMP4_TTMP5
);
190 reserveRegisterTuples(Reserved
, AMDGPU::TTMP6_TTMP7
);
191 reserveRegisterTuples(Reserved
, AMDGPU::TTMP8_TTMP9
);
192 reserveRegisterTuples(Reserved
, AMDGPU::TTMP10_TTMP11
);
193 reserveRegisterTuples(Reserved
, AMDGPU::TTMP12_TTMP13
);
194 reserveRegisterTuples(Reserved
, AMDGPU::TTMP14_TTMP15
);
196 // Reserve null register - it shall never be allocated
197 reserveRegisterTuples(Reserved
, AMDGPU::SGPR_NULL
);
199 // Disallow vcc_hi allocation in wave32. It may be allocated but most likely
200 // will result in bugs.
202 Reserved
.set(AMDGPU::VCC
);
203 Reserved
.set(AMDGPU::VCC_HI
);
206 const GCNSubtarget
&ST
= MF
.getSubtarget
<GCNSubtarget
>();
208 unsigned MaxNumSGPRs
= ST
.getMaxNumSGPRs(MF
);
209 unsigned TotalNumSGPRs
= AMDGPU::SGPR_32RegClass
.getNumRegs();
210 for (unsigned i
= MaxNumSGPRs
; i
< TotalNumSGPRs
; ++i
) {
211 unsigned Reg
= AMDGPU::SGPR_32RegClass
.getRegister(i
);
212 reserveRegisterTuples(Reserved
, Reg
);
215 unsigned MaxNumVGPRs
= ST
.getMaxNumVGPRs(MF
);
216 unsigned TotalNumVGPRs
= AMDGPU::VGPR_32RegClass
.getNumRegs();
217 for (unsigned i
= MaxNumVGPRs
; i
< TotalNumVGPRs
; ++i
) {
218 unsigned Reg
= AMDGPU::VGPR_32RegClass
.getRegister(i
);
219 reserveRegisterTuples(Reserved
, Reg
);
220 Reg
= AMDGPU::AGPR_32RegClass
.getRegister(i
);
221 reserveRegisterTuples(Reserved
, Reg
);
224 // Reserve all the rest AGPRs if there are no instructions to use it.
225 if (!ST
.hasMAIInsts()) {
226 for (unsigned i
= 0; i
< MaxNumVGPRs
; ++i
) {
227 unsigned Reg
= AMDGPU::AGPR_32RegClass
.getRegister(i
);
228 reserveRegisterTuples(Reserved
, Reg
);
232 const SIMachineFunctionInfo
*MFI
= MF
.getInfo
<SIMachineFunctionInfo
>();
234 unsigned ScratchWaveOffsetReg
= MFI
->getScratchWaveOffsetReg();
235 if (ScratchWaveOffsetReg
!= AMDGPU::NoRegister
) {
236 // Reserve 1 SGPR for scratch wave offset in case we need to spill.
237 reserveRegisterTuples(Reserved
, ScratchWaveOffsetReg
);
240 unsigned ScratchRSrcReg
= MFI
->getScratchRSrcReg();
241 if (ScratchRSrcReg
!= AMDGPU::NoRegister
) {
242 // Reserve 4 SGPRs for the scratch buffer resource descriptor in case we need
244 // TODO: May need to reserve a VGPR if doing LDS spilling.
245 reserveRegisterTuples(Reserved
, ScratchRSrcReg
);
246 assert(!isSubRegister(ScratchRSrcReg
, ScratchWaveOffsetReg
));
249 // We have to assume the SP is needed in case there are calls in the function,
250 // which is detected after the function is lowered. If we aren't really going
251 // to need SP, don't bother reserving it.
252 unsigned StackPtrReg
= MFI
->getStackPtrOffsetReg();
254 if (StackPtrReg
!= AMDGPU::NoRegister
) {
255 reserveRegisterTuples(Reserved
, StackPtrReg
);
256 assert(!isSubRegister(ScratchRSrcReg
, StackPtrReg
));
259 unsigned FrameReg
= MFI
->getFrameOffsetReg();
260 if (FrameReg
!= AMDGPU::NoRegister
) {
261 reserveRegisterTuples(Reserved
, FrameReg
);
262 assert(!isSubRegister(ScratchRSrcReg
, FrameReg
));
265 for (unsigned Reg
: MFI
->WWMReservedRegs
) {
266 reserveRegisterTuples(Reserved
, Reg
);
269 // FIXME: Stop using reserved registers for this.
270 for (MCPhysReg Reg
: MFI
->getAGPRSpillVGPRs())
271 reserveRegisterTuples(Reserved
, Reg
);
273 for (MCPhysReg Reg
: MFI
->getVGPRSpillAGPRs())
274 reserveRegisterTuples(Reserved
, Reg
);
279 bool SIRegisterInfo::canRealignStack(const MachineFunction
&MF
) const {
280 const SIMachineFunctionInfo
*Info
= MF
.getInfo
<SIMachineFunctionInfo
>();
281 // On entry, the base address is 0, so it can't possibly need any more
284 // FIXME: Should be able to specify the entry frame alignment per calling
285 // convention instead.
286 if (Info
->isEntryFunction())
289 return TargetRegisterInfo::canRealignStack(MF
);
292 bool SIRegisterInfo::requiresRegisterScavenging(const MachineFunction
&Fn
) const {
293 const SIMachineFunctionInfo
*Info
= Fn
.getInfo
<SIMachineFunctionInfo
>();
294 if (Info
->isEntryFunction()) {
295 const MachineFrameInfo
&MFI
= Fn
.getFrameInfo();
296 return MFI
.hasStackObjects() || MFI
.hasCalls();
299 // May need scavenger for dealing with callee saved registers.
303 bool SIRegisterInfo::requiresFrameIndexScavenging(
304 const MachineFunction
&MF
) const {
305 // Do not use frame virtual registers. They used to be used for SGPRs, but
306 // once we reach PrologEpilogInserter, we can no longer spill SGPRs. If the
307 // scavenger fails, we can increment/decrement the necessary SGPRs to avoid a
312 bool SIRegisterInfo::requiresFrameIndexReplacementScavenging(
313 const MachineFunction
&MF
) const {
314 const MachineFrameInfo
&MFI
= MF
.getFrameInfo();
315 return MFI
.hasStackObjects();
318 bool SIRegisterInfo::requiresVirtualBaseRegisters(
319 const MachineFunction
&) const {
320 // There are no special dedicated stack or frame pointers.
324 bool SIRegisterInfo::trackLivenessAfterRegAlloc(const MachineFunction
&MF
) const {
325 // This helps catch bugs as verifier errors.
329 int64_t SIRegisterInfo::getMUBUFInstrOffset(const MachineInstr
*MI
) const {
330 assert(SIInstrInfo::isMUBUF(*MI
));
332 int OffIdx
= AMDGPU::getNamedOperandIdx(MI
->getOpcode(),
333 AMDGPU::OpName::offset
);
334 return MI
->getOperand(OffIdx
).getImm();
337 int64_t SIRegisterInfo::getFrameIndexInstrOffset(const MachineInstr
*MI
,
339 if (!SIInstrInfo::isMUBUF(*MI
))
342 assert(Idx
== AMDGPU::getNamedOperandIdx(MI
->getOpcode(),
343 AMDGPU::OpName::vaddr
) &&
344 "Should never see frame index on non-address operand");
346 return getMUBUFInstrOffset(MI
);
349 bool SIRegisterInfo::needsFrameBaseReg(MachineInstr
*MI
, int64_t Offset
) const {
350 if (!MI
->mayLoadOrStore())
353 int64_t FullOffset
= Offset
+ getMUBUFInstrOffset(MI
);
355 return !isUInt
<12>(FullOffset
);
358 void SIRegisterInfo::materializeFrameBaseRegister(MachineBasicBlock
*MBB
,
361 int64_t Offset
) const {
362 MachineBasicBlock::iterator Ins
= MBB
->begin();
363 DebugLoc DL
; // Defaults to "unknown"
365 if (Ins
!= MBB
->end())
366 DL
= Ins
->getDebugLoc();
368 MachineFunction
*MF
= MBB
->getParent();
369 const GCNSubtarget
&Subtarget
= MF
->getSubtarget
<GCNSubtarget
>();
370 const SIInstrInfo
*TII
= Subtarget
.getInstrInfo();
373 BuildMI(*MBB
, Ins
, DL
, TII
->get(AMDGPU::V_MOV_B32_e32
), BaseReg
)
374 .addFrameIndex(FrameIdx
);
378 MachineRegisterInfo
&MRI
= MF
->getRegInfo();
379 Register OffsetReg
= MRI
.createVirtualRegister(&AMDGPU::SReg_32_XM0RegClass
);
381 Register FIReg
= MRI
.createVirtualRegister(&AMDGPU::VGPR_32RegClass
);
383 BuildMI(*MBB
, Ins
, DL
, TII
->get(AMDGPU::S_MOV_B32
), OffsetReg
)
385 BuildMI(*MBB
, Ins
, DL
, TII
->get(AMDGPU::V_MOV_B32_e32
), FIReg
)
386 .addFrameIndex(FrameIdx
);
388 TII
->getAddNoCarry(*MBB
, Ins
, DL
, BaseReg
)
389 .addReg(OffsetReg
, RegState::Kill
)
391 .addImm(0); // clamp bit
394 void SIRegisterInfo::resolveFrameIndex(MachineInstr
&MI
, unsigned BaseReg
,
395 int64_t Offset
) const {
397 MachineBasicBlock
*MBB
= MI
.getParent();
398 MachineFunction
*MF
= MBB
->getParent();
399 const GCNSubtarget
&Subtarget
= MF
->getSubtarget
<GCNSubtarget
>();
400 const SIInstrInfo
*TII
= Subtarget
.getInstrInfo();
403 // FIXME: Is it possible to be storing a frame index to itself?
405 for (const MachineOperand
&MO
: MI
.operands()) {
408 llvm_unreachable("should not see multiple frame indices");
415 MachineOperand
*FIOp
= TII
->getNamedOperand(MI
, AMDGPU::OpName::vaddr
);
416 assert(FIOp
&& FIOp
->isFI() && "frame index must be address operand");
417 assert(TII
->isMUBUF(MI
));
418 assert(TII
->getNamedOperand(MI
, AMDGPU::OpName::soffset
)->getReg() ==
419 MF
->getInfo
<SIMachineFunctionInfo
>()->getStackPtrOffsetReg() &&
420 "should only be seeing stack pointer offset relative FrameIndex");
422 MachineOperand
*OffsetOp
= TII
->getNamedOperand(MI
, AMDGPU::OpName::offset
);
423 int64_t NewOffset
= OffsetOp
->getImm() + Offset
;
424 assert(isUInt
<12>(NewOffset
) && "offset should be legal");
426 FIOp
->ChangeToRegister(BaseReg
, false);
427 OffsetOp
->setImm(NewOffset
);
430 bool SIRegisterInfo::isFrameOffsetLegal(const MachineInstr
*MI
,
432 int64_t Offset
) const {
433 if (!SIInstrInfo::isMUBUF(*MI
))
436 int64_t NewOffset
= Offset
+ getMUBUFInstrOffset(MI
);
438 return isUInt
<12>(NewOffset
);
441 const TargetRegisterClass
*SIRegisterInfo::getPointerRegClass(
442 const MachineFunction
&MF
, unsigned Kind
) const {
443 // This is inaccurate. It depends on the instruction and address space. The
444 // only place where we should hit this is for dealing with frame indexes /
445 // private accesses, so this is correct in that case.
446 return &AMDGPU::VGPR_32RegClass
;
449 static unsigned getNumSubRegsForSpillOp(unsigned Op
) {
452 case AMDGPU::SI_SPILL_S1024_SAVE
:
453 case AMDGPU::SI_SPILL_S1024_RESTORE
:
454 case AMDGPU::SI_SPILL_V1024_SAVE
:
455 case AMDGPU::SI_SPILL_V1024_RESTORE
:
456 case AMDGPU::SI_SPILL_A1024_SAVE
:
457 case AMDGPU::SI_SPILL_A1024_RESTORE
:
459 case AMDGPU::SI_SPILL_S512_SAVE
:
460 case AMDGPU::SI_SPILL_S512_RESTORE
:
461 case AMDGPU::SI_SPILL_V512_SAVE
:
462 case AMDGPU::SI_SPILL_V512_RESTORE
:
463 case AMDGPU::SI_SPILL_A512_SAVE
:
464 case AMDGPU::SI_SPILL_A512_RESTORE
:
466 case AMDGPU::SI_SPILL_S256_SAVE
:
467 case AMDGPU::SI_SPILL_S256_RESTORE
:
468 case AMDGPU::SI_SPILL_V256_SAVE
:
469 case AMDGPU::SI_SPILL_V256_RESTORE
:
471 case AMDGPU::SI_SPILL_S160_SAVE
:
472 case AMDGPU::SI_SPILL_S160_RESTORE
:
473 case AMDGPU::SI_SPILL_V160_SAVE
:
474 case AMDGPU::SI_SPILL_V160_RESTORE
:
476 case AMDGPU::SI_SPILL_S128_SAVE
:
477 case AMDGPU::SI_SPILL_S128_RESTORE
:
478 case AMDGPU::SI_SPILL_V128_SAVE
:
479 case AMDGPU::SI_SPILL_V128_RESTORE
:
480 case AMDGPU::SI_SPILL_A128_SAVE
:
481 case AMDGPU::SI_SPILL_A128_RESTORE
:
483 case AMDGPU::SI_SPILL_S96_SAVE
:
484 case AMDGPU::SI_SPILL_S96_RESTORE
:
485 case AMDGPU::SI_SPILL_V96_SAVE
:
486 case AMDGPU::SI_SPILL_V96_RESTORE
:
488 case AMDGPU::SI_SPILL_S64_SAVE
:
489 case AMDGPU::SI_SPILL_S64_RESTORE
:
490 case AMDGPU::SI_SPILL_V64_SAVE
:
491 case AMDGPU::SI_SPILL_V64_RESTORE
:
492 case AMDGPU::SI_SPILL_A64_SAVE
:
493 case AMDGPU::SI_SPILL_A64_RESTORE
:
495 case AMDGPU::SI_SPILL_S32_SAVE
:
496 case AMDGPU::SI_SPILL_S32_RESTORE
:
497 case AMDGPU::SI_SPILL_V32_SAVE
:
498 case AMDGPU::SI_SPILL_V32_RESTORE
:
499 case AMDGPU::SI_SPILL_A32_SAVE
:
500 case AMDGPU::SI_SPILL_A32_RESTORE
:
502 default: llvm_unreachable("Invalid spill opcode");
506 static int getOffsetMUBUFStore(unsigned Opc
) {
508 case AMDGPU::BUFFER_STORE_DWORD_OFFEN
:
509 return AMDGPU::BUFFER_STORE_DWORD_OFFSET
;
510 case AMDGPU::BUFFER_STORE_BYTE_OFFEN
:
511 return AMDGPU::BUFFER_STORE_BYTE_OFFSET
;
512 case AMDGPU::BUFFER_STORE_SHORT_OFFEN
:
513 return AMDGPU::BUFFER_STORE_SHORT_OFFSET
;
514 case AMDGPU::BUFFER_STORE_DWORDX2_OFFEN
:
515 return AMDGPU::BUFFER_STORE_DWORDX2_OFFSET
;
516 case AMDGPU::BUFFER_STORE_DWORDX4_OFFEN
:
517 return AMDGPU::BUFFER_STORE_DWORDX4_OFFSET
;
518 case AMDGPU::BUFFER_STORE_SHORT_D16_HI_OFFEN
:
519 return AMDGPU::BUFFER_STORE_SHORT_D16_HI_OFFSET
;
520 case AMDGPU::BUFFER_STORE_BYTE_D16_HI_OFFEN
:
521 return AMDGPU::BUFFER_STORE_BYTE_D16_HI_OFFSET
;
527 static int getOffsetMUBUFLoad(unsigned Opc
) {
529 case AMDGPU::BUFFER_LOAD_DWORD_OFFEN
:
530 return AMDGPU::BUFFER_LOAD_DWORD_OFFSET
;
531 case AMDGPU::BUFFER_LOAD_UBYTE_OFFEN
:
532 return AMDGPU::BUFFER_LOAD_UBYTE_OFFSET
;
533 case AMDGPU::BUFFER_LOAD_SBYTE_OFFEN
:
534 return AMDGPU::BUFFER_LOAD_SBYTE_OFFSET
;
535 case AMDGPU::BUFFER_LOAD_USHORT_OFFEN
:
536 return AMDGPU::BUFFER_LOAD_USHORT_OFFSET
;
537 case AMDGPU::BUFFER_LOAD_SSHORT_OFFEN
:
538 return AMDGPU::BUFFER_LOAD_SSHORT_OFFSET
;
539 case AMDGPU::BUFFER_LOAD_DWORDX2_OFFEN
:
540 return AMDGPU::BUFFER_LOAD_DWORDX2_OFFSET
;
541 case AMDGPU::BUFFER_LOAD_DWORDX4_OFFEN
:
542 return AMDGPU::BUFFER_LOAD_DWORDX4_OFFSET
;
543 case AMDGPU::BUFFER_LOAD_UBYTE_D16_OFFEN
:
544 return AMDGPU::BUFFER_LOAD_UBYTE_D16_OFFSET
;
545 case AMDGPU::BUFFER_LOAD_UBYTE_D16_HI_OFFEN
:
546 return AMDGPU::BUFFER_LOAD_UBYTE_D16_HI_OFFSET
;
547 case AMDGPU::BUFFER_LOAD_SBYTE_D16_OFFEN
:
548 return AMDGPU::BUFFER_LOAD_SBYTE_D16_OFFSET
;
549 case AMDGPU::BUFFER_LOAD_SBYTE_D16_HI_OFFEN
:
550 return AMDGPU::BUFFER_LOAD_SBYTE_D16_HI_OFFSET
;
551 case AMDGPU::BUFFER_LOAD_SHORT_D16_OFFEN
:
552 return AMDGPU::BUFFER_LOAD_SHORT_D16_OFFSET
;
553 case AMDGPU::BUFFER_LOAD_SHORT_D16_HI_OFFEN
:
554 return AMDGPU::BUFFER_LOAD_SHORT_D16_HI_OFFSET
;
560 static MachineInstrBuilder
spillVGPRtoAGPR(MachineBasicBlock::iterator MI
,
565 MachineBasicBlock
*MBB
= MI
->getParent();
566 MachineFunction
*MF
= MI
->getParent()->getParent();
567 SIMachineFunctionInfo
*MFI
= MF
->getInfo
<SIMachineFunctionInfo
>();
568 const GCNSubtarget
&ST
= MF
->getSubtarget
<GCNSubtarget
>();
569 const SIInstrInfo
*TII
= ST
.getInstrInfo();
571 MCPhysReg Reg
= MFI
->getVGPRToAGPRSpill(Index
, Lane
);
573 if (Reg
== AMDGPU::NoRegister
)
574 return MachineInstrBuilder();
576 bool IsStore
= MI
->mayStore();
577 MachineRegisterInfo
&MRI
= MF
->getRegInfo();
578 auto *TRI
= static_cast<const SIRegisterInfo
*>(MRI
.getTargetRegisterInfo());
580 unsigned Dst
= IsStore
? Reg
: ValueReg
;
581 unsigned Src
= IsStore
? ValueReg
: Reg
;
582 unsigned Opc
= (IsStore
^ TRI
->isVGPR(MRI
, Reg
)) ? AMDGPU::V_ACCVGPR_WRITE_B32
583 : AMDGPU::V_ACCVGPR_READ_B32
;
585 return BuildMI(*MBB
, MI
, MI
->getDebugLoc(), TII
->get(Opc
), Dst
)
586 .addReg(Src
, getKillRegState(IsKill
));
589 // This differs from buildSpillLoadStore by only scavenging a VGPR. It does not
590 // need to handle the case where an SGPR may need to be spilled while spilling.
591 static bool buildMUBUFOffsetLoadStore(const SIInstrInfo
*TII
,
592 MachineFrameInfo
&MFI
,
593 MachineBasicBlock::iterator MI
,
596 MachineBasicBlock
*MBB
= MI
->getParent();
597 const DebugLoc
&DL
= MI
->getDebugLoc();
598 bool IsStore
= MI
->mayStore();
600 unsigned Opc
= MI
->getOpcode();
601 int LoadStoreOp
= IsStore
?
602 getOffsetMUBUFStore(Opc
) : getOffsetMUBUFLoad(Opc
);
603 if (LoadStoreOp
== -1)
606 const MachineOperand
*Reg
= TII
->getNamedOperand(*MI
, AMDGPU::OpName::vdata
);
607 if (spillVGPRtoAGPR(MI
, Index
, 0, Reg
->getReg(), false).getInstr())
610 MachineInstrBuilder NewMI
=
611 BuildMI(*MBB
, MI
, DL
, TII
->get(LoadStoreOp
))
613 .add(*TII
->getNamedOperand(*MI
, AMDGPU::OpName::srsrc
))
614 .add(*TII
->getNamedOperand(*MI
, AMDGPU::OpName::soffset
))
623 const MachineOperand
*VDataIn
= TII
->getNamedOperand(*MI
,
624 AMDGPU::OpName::vdata_in
);
630 void SIRegisterInfo::buildSpillLoadStore(MachineBasicBlock::iterator MI
,
631 unsigned LoadStoreOp
,
635 unsigned ScratchRsrcReg
,
636 unsigned ScratchOffsetReg
,
638 MachineMemOperand
*MMO
,
639 RegScavenger
*RS
) const {
640 MachineBasicBlock
*MBB
= MI
->getParent();
641 MachineFunction
*MF
= MI
->getParent()->getParent();
642 const GCNSubtarget
&ST
= MF
->getSubtarget
<GCNSubtarget
>();
643 const SIInstrInfo
*TII
= ST
.getInstrInfo();
644 const MachineFrameInfo
&MFI
= MF
->getFrameInfo();
646 const MCInstrDesc
&Desc
= TII
->get(LoadStoreOp
);
647 const DebugLoc
&DL
= MI
->getDebugLoc();
648 bool IsStore
= Desc
.mayStore();
650 bool Scavenged
= false;
651 unsigned SOffset
= ScratchOffsetReg
;
653 const unsigned EltSize
= 4;
654 const TargetRegisterClass
*RC
= getRegClassForReg(MF
->getRegInfo(), ValueReg
);
655 unsigned NumSubRegs
= AMDGPU::getRegBitWidth(RC
->getID()) / (EltSize
* CHAR_BIT
);
656 unsigned Size
= NumSubRegs
* EltSize
;
657 int64_t Offset
= InstOffset
+ MFI
.getObjectOffset(Index
);
658 int64_t ScratchOffsetRegDelta
= 0;
660 unsigned Align
= MFI
.getObjectAlignment(Index
);
661 const MachinePointerInfo
&BasePtrInfo
= MMO
->getPointerInfo();
664 hasAGPRs(RC
) ? TII
->getNamedOperand(*MI
, AMDGPU::OpName::tmp
)->getReg()
667 assert((Offset
% EltSize
) == 0 && "unexpected VGPR spill offset");
669 if (!isUInt
<12>(Offset
+ Size
- EltSize
)) {
670 SOffset
= AMDGPU::NoRegister
;
672 // We currently only support spilling VGPRs to EltSize boundaries, meaning
673 // we can simplify the adjustment of Offset here to just scale with
675 Offset
*= ST
.getWavefrontSize();
677 // We don't have access to the register scavenger if this function is called
678 // during PEI::scavengeFrameVirtualRegs().
680 SOffset
= RS
->scavengeRegister(&AMDGPU::SGPR_32RegClass
, MI
, 0, false);
682 if (SOffset
== AMDGPU::NoRegister
) {
683 // There are no free SGPRs, and since we are in the process of spilling
684 // VGPRs too. Since we need a VGPR in order to spill SGPRs (this is true
685 // on SI/CI and on VI it is true until we implement spilling using scalar
686 // stores), we have no way to free up an SGPR. Our solution here is to
687 // add the offset directly to the ScratchOffset register, and then
688 // subtract the offset after the spill to return ScratchOffset to it's
690 SOffset
= ScratchOffsetReg
;
691 ScratchOffsetRegDelta
= Offset
;
696 BuildMI(*MBB
, MI
, DL
, TII
->get(AMDGPU::S_ADD_U32
), SOffset
)
697 .addReg(ScratchOffsetReg
)
703 for (unsigned i
= 0, e
= NumSubRegs
; i
!= e
; ++i
, Offset
+= EltSize
) {
704 Register SubReg
= NumSubRegs
== 1
706 : getSubReg(ValueReg
, getSubRegFromChannel(i
));
708 unsigned SOffsetRegState
= 0;
709 unsigned SrcDstRegState
= getDefRegState(!IsStore
);
711 SOffsetRegState
|= getKillRegState(Scavenged
);
712 // The last implicit use carries the "Kill" flag.
713 SrcDstRegState
|= getKillRegState(IsKill
);
716 auto MIB
= spillVGPRtoAGPR(MI
, Index
, i
, SubReg
, IsKill
);
718 if (!MIB
.getInstr()) {
719 unsigned FinalReg
= SubReg
;
720 if (TmpReg
!= AMDGPU::NoRegister
) {
722 BuildMI(*MBB
, MI
, DL
, TII
->get(AMDGPU::V_ACCVGPR_READ_B32
), TmpReg
)
723 .addReg(SubReg
, getKillRegState(IsKill
));
727 MachinePointerInfo PInfo
= BasePtrInfo
.getWithOffset(EltSize
* i
);
728 MachineMemOperand
*NewMMO
729 = MF
->getMachineMemOperand(PInfo
, MMO
->getFlags(),
730 EltSize
, MinAlign(Align
, EltSize
* i
));
732 MIB
= BuildMI(*MBB
, MI
, DL
, Desc
)
733 .addReg(SubReg
, getDefRegState(!IsStore
) | getKillRegState(IsKill
))
734 .addReg(ScratchRsrcReg
)
735 .addReg(SOffset
, SOffsetRegState
)
742 .addMemOperand(NewMMO
);
744 if (!IsStore
&& TmpReg
!= AMDGPU::NoRegister
)
745 MIB
= BuildMI(*MBB
, MI
, DL
, TII
->get(AMDGPU::V_ACCVGPR_WRITE_B32
),
747 .addReg(TmpReg
, RegState::Kill
);
751 MIB
.addReg(ValueReg
, RegState::Implicit
| SrcDstRegState
);
754 if (ScratchOffsetRegDelta
!= 0) {
755 // Subtract the offset we added to the ScratchOffset register.
756 BuildMI(*MBB
, MI
, DL
, TII
->get(AMDGPU::S_SUB_U32
), ScratchOffsetReg
)
757 .addReg(ScratchOffsetReg
)
758 .addImm(ScratchOffsetRegDelta
);
762 static std::pair
<unsigned, unsigned> getSpillEltSize(unsigned SuperRegSize
,
764 if (SuperRegSize
% 16 == 0) {
765 return { 16, Store
? AMDGPU::S_BUFFER_STORE_DWORDX4_SGPR
:
766 AMDGPU::S_BUFFER_LOAD_DWORDX4_SGPR
};
769 if (SuperRegSize
% 8 == 0) {
770 return { 8, Store
? AMDGPU::S_BUFFER_STORE_DWORDX2_SGPR
:
771 AMDGPU::S_BUFFER_LOAD_DWORDX2_SGPR
};
774 return { 4, Store
? AMDGPU::S_BUFFER_STORE_DWORD_SGPR
:
775 AMDGPU::S_BUFFER_LOAD_DWORD_SGPR
};
778 bool SIRegisterInfo::spillSGPR(MachineBasicBlock::iterator MI
,
781 bool OnlyToVGPR
) const {
782 MachineBasicBlock
*MBB
= MI
->getParent();
783 MachineFunction
*MF
= MBB
->getParent();
784 SIMachineFunctionInfo
*MFI
= MF
->getInfo
<SIMachineFunctionInfo
>();
785 DenseSet
<unsigned> SGPRSpillVGPRDefinedSet
;
787 ArrayRef
<SIMachineFunctionInfo::SpilledReg
> VGPRSpills
788 = MFI
->getSGPRToVGPRSpills(Index
);
789 bool SpillToVGPR
= !VGPRSpills
.empty();
790 if (OnlyToVGPR
&& !SpillToVGPR
)
793 const GCNSubtarget
&ST
= MF
->getSubtarget
<GCNSubtarget
>();
794 const SIInstrInfo
*TII
= ST
.getInstrInfo();
796 Register SuperReg
= MI
->getOperand(0).getReg();
797 bool IsKill
= MI
->getOperand(0).isKill();
798 const DebugLoc
&DL
= MI
->getDebugLoc();
800 MachineFrameInfo
&FrameInfo
= MF
->getFrameInfo();
802 bool SpillToSMEM
= spillSGPRToSMEM();
803 if (SpillToSMEM
&& OnlyToVGPR
)
806 Register FrameReg
= getFrameRegister(*MF
);
808 assert(SpillToVGPR
|| (SuperReg
!= MFI
->getStackPtrOffsetReg() &&
809 SuperReg
!= MFI
->getFrameOffsetReg() &&
810 SuperReg
!= MFI
->getScratchWaveOffsetReg()));
812 assert(SuperReg
!= AMDGPU::M0
&& "m0 should never spill");
814 unsigned OffsetReg
= AMDGPU::M0
;
815 unsigned M0CopyReg
= AMDGPU::NoRegister
;
818 if (RS
->isRegUsed(AMDGPU::M0
)) {
819 M0CopyReg
= RS
->scavengeRegister(&AMDGPU::SReg_32_XM0RegClass
, MI
, 0, false);
820 BuildMI(*MBB
, MI
, DL
, TII
->get(AMDGPU::COPY
), M0CopyReg
)
825 unsigned ScalarStoreOp
;
826 unsigned EltSize
= 4;
827 const TargetRegisterClass
*RC
= getPhysRegClass(SuperReg
);
828 if (SpillToSMEM
&& isSGPRClass(RC
)) {
829 // XXX - if private_element_size is larger than 4 it might be useful to be
830 // able to spill wider vmem spills.
831 std::tie(EltSize
, ScalarStoreOp
) =
832 getSpillEltSize(getRegSizeInBits(*RC
) / 8, true);
835 ArrayRef
<int16_t> SplitParts
= getRegSplitParts(RC
, EltSize
);
836 unsigned NumSubRegs
= SplitParts
.empty() ? 1 : SplitParts
.size();
838 // Scavenged temporary VGPR to use. It must be scavenged once for any number
839 // of spilled subregs.
842 // SubReg carries the "Kill" flag when SubReg == SuperReg.
843 unsigned SubKillState
= getKillRegState((NumSubRegs
== 1) && IsKill
);
844 for (unsigned i
= 0, e
= NumSubRegs
; i
< e
; ++i
) {
846 NumSubRegs
== 1 ? SuperReg
: getSubReg(SuperReg
, SplitParts
[i
]);
849 int64_t FrOffset
= FrameInfo
.getObjectOffset(Index
);
851 // The allocated memory size is really the wavefront size * the frame
852 // index size. The widest register class is 64 bytes, so a 4-byte scratch
853 // allocation is enough to spill this in a single stack object.
855 // FIXME: Frame size/offsets are computed earlier than this, so the extra
856 // space is still unnecessarily allocated.
858 unsigned Align
= FrameInfo
.getObjectAlignment(Index
);
859 MachinePointerInfo PtrInfo
860 = MachinePointerInfo::getFixedStack(*MF
, Index
, EltSize
* i
);
861 MachineMemOperand
*MMO
862 = MF
->getMachineMemOperand(PtrInfo
, MachineMemOperand::MOStore
,
863 EltSize
, MinAlign(Align
, EltSize
* i
));
865 // SMEM instructions only support a single offset, so increment the wave
868 int64_t Offset
= (ST
.getWavefrontSize() * FrOffset
) + (EltSize
* i
);
870 BuildMI(*MBB
, MI
, DL
, TII
->get(AMDGPU::S_ADD_U32
), OffsetReg
)
874 BuildMI(*MBB
, MI
, DL
, TII
->get(AMDGPU::S_MOV_B32
), OffsetReg
)
878 BuildMI(*MBB
, MI
, DL
, TII
->get(ScalarStoreOp
))
879 .addReg(SubReg
, getKillRegState(IsKill
)) // sdata
880 .addReg(MFI
->getScratchRSrcReg()) // sbase
881 .addReg(OffsetReg
, RegState::Kill
) // soff
890 SIMachineFunctionInfo::SpilledReg Spill
= VGPRSpills
[i
];
892 // During SGPR spilling to VGPR, determine if the VGPR is defined. The
893 // only circumstance in which we say it is undefined is when it is the
894 // first spill to this VGPR in the first basic block.
895 bool VGPRDefined
= true;
896 if (MBB
== &MF
->front())
897 VGPRDefined
= !SGPRSpillVGPRDefinedSet
.insert(Spill
.VGPR
).second
;
899 // Mark the "old value of vgpr" input undef only if this is the first sgpr
900 // spill to this specific vgpr in the first basic block.
901 BuildMI(*MBB
, MI
, DL
,
902 TII
->getMCOpcodeFromPseudo(AMDGPU::V_WRITELANE_B32
),
904 .addReg(SubReg
, getKillRegState(IsKill
))
906 .addReg(Spill
.VGPR
, VGPRDefined
? 0 : RegState::Undef
);
908 // FIXME: Since this spills to another register instead of an actual
909 // frame index, we should delete the frame index when all references to
912 // XXX - Can to VGPR spill fail for some subregisters but not others?
916 // Spill SGPR to a frame index.
917 // TODO: Should VI try to spill to VGPR and then spill to SMEM?
918 if (!TmpVGPR
.isValid())
919 TmpVGPR
= RS
->scavengeRegister(&AMDGPU::VGPR_32RegClass
, MI
, 0);
920 // TODO: Should VI try to spill to VGPR and then spill to SMEM?
922 MachineInstrBuilder Mov
923 = BuildMI(*MBB
, MI
, DL
, TII
->get(AMDGPU::V_MOV_B32_e32
), TmpVGPR
)
924 .addReg(SubReg
, SubKillState
);
926 // There could be undef components of a spilled super register.
927 // TODO: Can we detect this and skip the spill?
928 if (NumSubRegs
> 1) {
929 // The last implicit use of the SuperReg carries the "Kill" flag.
930 unsigned SuperKillState
= 0;
932 SuperKillState
|= getKillRegState(IsKill
);
933 Mov
.addReg(SuperReg
, RegState::Implicit
| SuperKillState
);
936 unsigned Align
= FrameInfo
.getObjectAlignment(Index
);
937 MachinePointerInfo PtrInfo
938 = MachinePointerInfo::getFixedStack(*MF
, Index
, EltSize
* i
);
939 MachineMemOperand
*MMO
940 = MF
->getMachineMemOperand(PtrInfo
, MachineMemOperand::MOStore
,
941 EltSize
, MinAlign(Align
, EltSize
* i
));
942 BuildMI(*MBB
, MI
, DL
, TII
->get(AMDGPU::SI_SPILL_V32_SAVE
))
943 .addReg(TmpVGPR
, RegState::Kill
) // src
944 .addFrameIndex(Index
) // vaddr
945 .addReg(MFI
->getScratchRSrcReg()) // srrsrc
946 .addReg(MFI
->getStackPtrOffsetReg()) // soffset
947 .addImm(i
* 4) // offset
952 if (M0CopyReg
!= AMDGPU::NoRegister
) {
953 BuildMI(*MBB
, MI
, DL
, TII
->get(AMDGPU::COPY
), AMDGPU::M0
)
954 .addReg(M0CopyReg
, RegState::Kill
);
957 MI
->eraseFromParent();
958 MFI
->addToSpilledSGPRs(NumSubRegs
);
962 bool SIRegisterInfo::restoreSGPR(MachineBasicBlock::iterator MI
,
965 bool OnlyToVGPR
) const {
966 MachineFunction
*MF
= MI
->getParent()->getParent();
967 MachineBasicBlock
*MBB
= MI
->getParent();
968 SIMachineFunctionInfo
*MFI
= MF
->getInfo
<SIMachineFunctionInfo
>();
970 ArrayRef
<SIMachineFunctionInfo::SpilledReg
> VGPRSpills
971 = MFI
->getSGPRToVGPRSpills(Index
);
972 bool SpillToVGPR
= !VGPRSpills
.empty();
973 if (OnlyToVGPR
&& !SpillToVGPR
)
976 MachineFrameInfo
&FrameInfo
= MF
->getFrameInfo();
977 const GCNSubtarget
&ST
= MF
->getSubtarget
<GCNSubtarget
>();
978 const SIInstrInfo
*TII
= ST
.getInstrInfo();
979 const DebugLoc
&DL
= MI
->getDebugLoc();
981 Register SuperReg
= MI
->getOperand(0).getReg();
982 bool SpillToSMEM
= spillSGPRToSMEM();
983 if (SpillToSMEM
&& OnlyToVGPR
)
986 assert(SuperReg
!= AMDGPU::M0
&& "m0 should never spill");
988 unsigned OffsetReg
= AMDGPU::M0
;
989 unsigned M0CopyReg
= AMDGPU::NoRegister
;
992 if (RS
->isRegUsed(AMDGPU::M0
)) {
993 M0CopyReg
= RS
->scavengeRegister(&AMDGPU::SReg_32_XM0RegClass
, MI
, 0, false);
994 BuildMI(*MBB
, MI
, DL
, TII
->get(AMDGPU::COPY
), M0CopyReg
)
999 unsigned EltSize
= 4;
1000 unsigned ScalarLoadOp
;
1002 Register FrameReg
= getFrameRegister(*MF
);
1004 const TargetRegisterClass
*RC
= getPhysRegClass(SuperReg
);
1005 if (SpillToSMEM
&& isSGPRClass(RC
)) {
1006 // XXX - if private_element_size is larger than 4 it might be useful to be
1007 // able to spill wider vmem spills.
1008 std::tie(EltSize
, ScalarLoadOp
) =
1009 getSpillEltSize(getRegSizeInBits(*RC
) / 8, false);
1012 ArrayRef
<int16_t> SplitParts
= getRegSplitParts(RC
, EltSize
);
1013 unsigned NumSubRegs
= SplitParts
.empty() ? 1 : SplitParts
.size();
1015 // SubReg carries the "Kill" flag when SubReg == SuperReg.
1016 int64_t FrOffset
= FrameInfo
.getObjectOffset(Index
);
1020 for (unsigned i
= 0, e
= NumSubRegs
; i
< e
; ++i
) {
1022 NumSubRegs
== 1 ? SuperReg
: getSubReg(SuperReg
, SplitParts
[i
]);
1025 // FIXME: Size may be > 4 but extra bytes wasted.
1026 unsigned Align
= FrameInfo
.getObjectAlignment(Index
);
1027 MachinePointerInfo PtrInfo
1028 = MachinePointerInfo::getFixedStack(*MF
, Index
, EltSize
* i
);
1029 MachineMemOperand
*MMO
1030 = MF
->getMachineMemOperand(PtrInfo
, MachineMemOperand::MOLoad
,
1031 EltSize
, MinAlign(Align
, EltSize
* i
));
1034 int64_t Offset
= (ST
.getWavefrontSize() * FrOffset
) + (EltSize
* i
);
1036 BuildMI(*MBB
, MI
, DL
, TII
->get(AMDGPU::S_ADD_U32
), OffsetReg
)
1040 BuildMI(*MBB
, MI
, DL
, TII
->get(AMDGPU::S_MOV_B32
), OffsetReg
)
1045 BuildMI(*MBB
, MI
, DL
, TII
->get(ScalarLoadOp
), SubReg
)
1046 .addReg(MFI
->getScratchRSrcReg()) // sbase
1047 .addReg(OffsetReg
, RegState::Kill
) // soff
1050 .addMemOperand(MMO
);
1052 if (NumSubRegs
> 1 && i
== 0)
1053 MIB
.addReg(SuperReg
, RegState::ImplicitDefine
);
1059 SIMachineFunctionInfo::SpilledReg Spill
= VGPRSpills
[i
];
1061 BuildMI(*MBB
, MI
, DL
, TII
->getMCOpcodeFromPseudo(AMDGPU::V_READLANE_B32
),
1064 .addImm(Spill
.Lane
);
1066 if (NumSubRegs
> 1 && i
== 0)
1067 MIB
.addReg(SuperReg
, RegState::ImplicitDefine
);
1072 // Restore SGPR from a stack slot.
1073 // FIXME: We should use S_LOAD_DWORD here for VI.
1074 if (!TmpVGPR
.isValid())
1075 TmpVGPR
= RS
->scavengeRegister(&AMDGPU::VGPR_32RegClass
, MI
, 0);
1076 unsigned Align
= FrameInfo
.getObjectAlignment(Index
);
1078 MachinePointerInfo PtrInfo
1079 = MachinePointerInfo::getFixedStack(*MF
, Index
, EltSize
* i
);
1081 MachineMemOperand
*MMO
= MF
->getMachineMemOperand(PtrInfo
,
1082 MachineMemOperand::MOLoad
, EltSize
,
1083 MinAlign(Align
, EltSize
* i
));
1085 BuildMI(*MBB
, MI
, DL
, TII
->get(AMDGPU::SI_SPILL_V32_RESTORE
), TmpVGPR
)
1086 .addFrameIndex(Index
) // vaddr
1087 .addReg(MFI
->getScratchRSrcReg()) // srsrc
1088 .addReg(MFI
->getStackPtrOffsetReg()) // soffset
1089 .addImm(i
* 4) // offset
1090 .addMemOperand(MMO
);
1093 BuildMI(*MBB
, MI
, DL
, TII
->get(AMDGPU::V_READFIRSTLANE_B32
), SubReg
)
1094 .addReg(TmpVGPR
, RegState::Kill
);
1097 MIB
.addReg(MI
->getOperand(0).getReg(), RegState::ImplicitDefine
);
1101 if (M0CopyReg
!= AMDGPU::NoRegister
) {
1102 BuildMI(*MBB
, MI
, DL
, TII
->get(AMDGPU::COPY
), AMDGPU::M0
)
1103 .addReg(M0CopyReg
, RegState::Kill
);
1106 MI
->eraseFromParent();
1110 /// Special case of eliminateFrameIndex. Returns true if the SGPR was spilled to
1111 /// a VGPR and the stack slot can be safely eliminated when all other users are
1113 bool SIRegisterInfo::eliminateSGPRToVGPRSpillFrameIndex(
1114 MachineBasicBlock::iterator MI
,
1116 RegScavenger
*RS
) const {
1117 switch (MI
->getOpcode()) {
1118 case AMDGPU::SI_SPILL_S1024_SAVE
:
1119 case AMDGPU::SI_SPILL_S512_SAVE
:
1120 case AMDGPU::SI_SPILL_S256_SAVE
:
1121 case AMDGPU::SI_SPILL_S160_SAVE
:
1122 case AMDGPU::SI_SPILL_S128_SAVE
:
1123 case AMDGPU::SI_SPILL_S96_SAVE
:
1124 case AMDGPU::SI_SPILL_S64_SAVE
:
1125 case AMDGPU::SI_SPILL_S32_SAVE
:
1126 return spillSGPR(MI
, FI
, RS
, true);
1127 case AMDGPU::SI_SPILL_S1024_RESTORE
:
1128 case AMDGPU::SI_SPILL_S512_RESTORE
:
1129 case AMDGPU::SI_SPILL_S256_RESTORE
:
1130 case AMDGPU::SI_SPILL_S160_RESTORE
:
1131 case AMDGPU::SI_SPILL_S128_RESTORE
:
1132 case AMDGPU::SI_SPILL_S96_RESTORE
:
1133 case AMDGPU::SI_SPILL_S64_RESTORE
:
1134 case AMDGPU::SI_SPILL_S32_RESTORE
:
1135 return restoreSGPR(MI
, FI
, RS
, true);
1137 llvm_unreachable("not an SGPR spill instruction");
1141 void SIRegisterInfo::eliminateFrameIndex(MachineBasicBlock::iterator MI
,
1142 int SPAdj
, unsigned FIOperandNum
,
1143 RegScavenger
*RS
) const {
1144 MachineFunction
*MF
= MI
->getParent()->getParent();
1145 MachineBasicBlock
*MBB
= MI
->getParent();
1146 SIMachineFunctionInfo
*MFI
= MF
->getInfo
<SIMachineFunctionInfo
>();
1147 MachineFrameInfo
&FrameInfo
= MF
->getFrameInfo();
1148 const GCNSubtarget
&ST
= MF
->getSubtarget
<GCNSubtarget
>();
1149 const SIInstrInfo
*TII
= ST
.getInstrInfo();
1150 DebugLoc DL
= MI
->getDebugLoc();
1152 assert(SPAdj
== 0 && "unhandled SP adjustment in call sequence?");
1154 MachineOperand
&FIOp
= MI
->getOperand(FIOperandNum
);
1155 int Index
= MI
->getOperand(FIOperandNum
).getIndex();
1157 Register FrameReg
= getFrameRegister(*MF
);
1159 switch (MI
->getOpcode()) {
1160 // SGPR register spill
1161 case AMDGPU::SI_SPILL_S1024_SAVE
:
1162 case AMDGPU::SI_SPILL_S512_SAVE
:
1163 case AMDGPU::SI_SPILL_S256_SAVE
:
1164 case AMDGPU::SI_SPILL_S160_SAVE
:
1165 case AMDGPU::SI_SPILL_S128_SAVE
:
1166 case AMDGPU::SI_SPILL_S96_SAVE
:
1167 case AMDGPU::SI_SPILL_S64_SAVE
:
1168 case AMDGPU::SI_SPILL_S32_SAVE
: {
1169 spillSGPR(MI
, Index
, RS
);
1173 // SGPR register restore
1174 case AMDGPU::SI_SPILL_S1024_RESTORE
:
1175 case AMDGPU::SI_SPILL_S512_RESTORE
:
1176 case AMDGPU::SI_SPILL_S256_RESTORE
:
1177 case AMDGPU::SI_SPILL_S160_RESTORE
:
1178 case AMDGPU::SI_SPILL_S128_RESTORE
:
1179 case AMDGPU::SI_SPILL_S96_RESTORE
:
1180 case AMDGPU::SI_SPILL_S64_RESTORE
:
1181 case AMDGPU::SI_SPILL_S32_RESTORE
: {
1182 restoreSGPR(MI
, Index
, RS
);
1186 // VGPR register spill
1187 case AMDGPU::SI_SPILL_V1024_SAVE
:
1188 case AMDGPU::SI_SPILL_V512_SAVE
:
1189 case AMDGPU::SI_SPILL_V256_SAVE
:
1190 case AMDGPU::SI_SPILL_V160_SAVE
:
1191 case AMDGPU::SI_SPILL_V128_SAVE
:
1192 case AMDGPU::SI_SPILL_V96_SAVE
:
1193 case AMDGPU::SI_SPILL_V64_SAVE
:
1194 case AMDGPU::SI_SPILL_V32_SAVE
:
1195 case AMDGPU::SI_SPILL_A1024_SAVE
:
1196 case AMDGPU::SI_SPILL_A512_SAVE
:
1197 case AMDGPU::SI_SPILL_A128_SAVE
:
1198 case AMDGPU::SI_SPILL_A64_SAVE
:
1199 case AMDGPU::SI_SPILL_A32_SAVE
: {
1200 const MachineOperand
*VData
= TII
->getNamedOperand(*MI
,
1201 AMDGPU::OpName::vdata
);
1202 assert(TII
->getNamedOperand(*MI
, AMDGPU::OpName::soffset
)->getReg() ==
1203 MFI
->getStackPtrOffsetReg());
1205 buildSpillLoadStore(MI
, AMDGPU::BUFFER_STORE_DWORD_OFFSET
,
1207 VData
->getReg(), VData
->isKill(),
1208 TII
->getNamedOperand(*MI
, AMDGPU::OpName::srsrc
)->getReg(),
1210 TII
->getNamedOperand(*MI
, AMDGPU::OpName::offset
)->getImm(),
1211 *MI
->memoperands_begin(),
1213 MFI
->addToSpilledVGPRs(getNumSubRegsForSpillOp(MI
->getOpcode()));
1214 MI
->eraseFromParent();
1217 case AMDGPU::SI_SPILL_V32_RESTORE
:
1218 case AMDGPU::SI_SPILL_V64_RESTORE
:
1219 case AMDGPU::SI_SPILL_V96_RESTORE
:
1220 case AMDGPU::SI_SPILL_V128_RESTORE
:
1221 case AMDGPU::SI_SPILL_V160_RESTORE
:
1222 case AMDGPU::SI_SPILL_V256_RESTORE
:
1223 case AMDGPU::SI_SPILL_V512_RESTORE
:
1224 case AMDGPU::SI_SPILL_V1024_RESTORE
:
1225 case AMDGPU::SI_SPILL_A32_RESTORE
:
1226 case AMDGPU::SI_SPILL_A64_RESTORE
:
1227 case AMDGPU::SI_SPILL_A128_RESTORE
:
1228 case AMDGPU::SI_SPILL_A512_RESTORE
:
1229 case AMDGPU::SI_SPILL_A1024_RESTORE
: {
1230 const MachineOperand
*VData
= TII
->getNamedOperand(*MI
,
1231 AMDGPU::OpName::vdata
);
1232 assert(TII
->getNamedOperand(*MI
, AMDGPU::OpName::soffset
)->getReg() ==
1233 MFI
->getStackPtrOffsetReg());
1235 buildSpillLoadStore(MI
, AMDGPU::BUFFER_LOAD_DWORD_OFFSET
,
1237 VData
->getReg(), VData
->isKill(),
1238 TII
->getNamedOperand(*MI
, AMDGPU::OpName::srsrc
)->getReg(),
1240 TII
->getNamedOperand(*MI
, AMDGPU::OpName::offset
)->getImm(),
1241 *MI
->memoperands_begin(),
1243 MI
->eraseFromParent();
1248 const DebugLoc
&DL
= MI
->getDebugLoc();
1249 bool IsMUBUF
= TII
->isMUBUF(*MI
);
1251 if (!IsMUBUF
&& !MFI
->isEntryFunction()) {
1252 // Convert to an absolute stack address by finding the offset from the
1253 // scratch wave base and scaling by the wave size.
1255 // In an entry function/kernel the offset is already the absolute
1256 // address relative to the frame register.
1258 Register TmpDiffReg
=
1259 RS
->scavengeRegister(&AMDGPU::SReg_32_XM0RegClass
, MI
, 0, false);
1261 // If there's no free SGPR, in-place modify the FP
1262 Register DiffReg
= TmpDiffReg
.isValid() ? TmpDiffReg
: FrameReg
;
1264 bool IsCopy
= MI
->getOpcode() == AMDGPU::V_MOV_B32_e32
;
1265 Register ResultReg
= IsCopy
?
1266 MI
->getOperand(0).getReg() :
1267 RS
->scavengeRegister(&AMDGPU::VGPR_32RegClass
, MI
, 0);
1269 BuildMI(*MBB
, MI
, DL
, TII
->get(AMDGPU::S_SUB_U32
), DiffReg
)
1271 .addReg(MFI
->getScratchWaveOffsetReg());
1273 int64_t Offset
= FrameInfo
.getObjectOffset(Index
);
1275 // XXX - This never happens because of emergency scavenging slot at 0?
1276 BuildMI(*MBB
, MI
, DL
, TII
->get(AMDGPU::V_LSHRREV_B32_e64
), ResultReg
)
1277 .addImm(ST
.getWavefrontSizeLog2())
1280 if (auto MIB
= TII
->getAddNoCarry(*MBB
, MI
, DL
, ResultReg
, *RS
)) {
1281 Register ScaledReg
=
1282 RS
->scavengeRegister(&AMDGPU::VGPR_32RegClass
, MIB
, 0);
1284 BuildMI(*MBB
, *MIB
, DL
, TII
->get(AMDGPU::V_LSHRREV_B32_e64
),
1286 .addImm(ST
.getWavefrontSizeLog2())
1287 .addReg(DiffReg
, RegState::Kill
);
1289 const bool IsVOP2
= MIB
->getOpcode() == AMDGPU::V_ADD_U32_e32
;
1291 // TODO: Fold if use instruction is another add of a constant.
1292 if (IsVOP2
|| AMDGPU::isInlinableLiteral32(Offset
, ST
.hasInv2PiInlineImm())) {
1293 // FIXME: This can fail
1295 MIB
.addReg(ScaledReg
, RegState::Kill
);
1297 MIB
.addImm(0); // clamp bit
1299 Register ConstOffsetReg
=
1300 RS
->scavengeRegister(&AMDGPU::SReg_32_XM0RegClass
, MIB
, 0, false);
1302 // This should always be able to use the unused carry out.
1303 assert(ConstOffsetReg
&& "this scavenge should not be able to fail");
1305 BuildMI(*MBB
, *MIB
, DL
, TII
->get(AMDGPU::S_MOV_B32
), ConstOffsetReg
)
1307 MIB
.addReg(ConstOffsetReg
, RegState::Kill
);
1308 MIB
.addReg(ScaledReg
, RegState::Kill
);
1309 MIB
.addImm(0); // clamp bit
1312 // We have to produce a carry out, and we there isn't a free SGPR
1313 // pair for it. We can keep the whole computation on the SALU to
1314 // avoid clobbering an additional register at the cost of an extra
1317 // We may have 1 free scratch SGPR even though a carry out is
1318 // unavailable. Only one additional mov is needed.
1319 Register TmpScaledReg
=
1320 RS
->scavengeRegister(&AMDGPU::SReg_32_XM0RegClass
, MI
, 0, false);
1321 Register ScaledReg
= TmpScaledReg
.isValid() ? TmpScaledReg
: DiffReg
;
1323 BuildMI(*MBB
, MI
, DL
, TII
->get(AMDGPU::S_LSHR_B32
), ScaledReg
)
1324 .addReg(DiffReg
, RegState::Kill
)
1325 .addImm(ST
.getWavefrontSizeLog2());
1326 BuildMI(*MBB
, MI
, DL
, TII
->get(AMDGPU::S_ADD_U32
), ScaledReg
)
1327 .addReg(ScaledReg
, RegState::Kill
)
1329 BuildMI(*MBB
, MI
, DL
, TII
->get(AMDGPU::COPY
), ResultReg
)
1330 .addReg(ScaledReg
, RegState::Kill
);
1332 // If there were truly no free SGPRs, we need to undo everything.
1333 if (!TmpScaledReg
.isValid()) {
1334 BuildMI(*MBB
, MI
, DL
, TII
->get(AMDGPU::S_SUB_U32
), ScaledReg
)
1335 .addReg(ScaledReg
, RegState::Kill
)
1337 BuildMI(*MBB
, MI
, DL
, TII
->get(AMDGPU::S_LSHL_B32
), ScaledReg
)
1338 .addReg(DiffReg
, RegState::Kill
)
1339 .addImm(ST
.getWavefrontSizeLog2());
1344 if (!TmpDiffReg
.isValid()) {
1346 BuildMI(*MBB
, MI
, DL
, TII
->get(AMDGPU::S_ADD_U32
), FrameReg
)
1348 .addReg(MFI
->getScratchWaveOffsetReg());
1351 // Don't introduce an extra copy if we're just materializing in a mov.
1353 MI
->eraseFromParent();
1355 FIOp
.ChangeToRegister(ResultReg
, false, false, true);
1360 // Disable offen so we don't need a 0 vgpr base.
1361 assert(static_cast<int>(FIOperandNum
) ==
1362 AMDGPU::getNamedOperandIdx(MI
->getOpcode(),
1363 AMDGPU::OpName::vaddr
));
1365 assert(TII
->getNamedOperand(*MI
, AMDGPU::OpName::soffset
)->getReg() ==
1366 MFI
->getStackPtrOffsetReg());
1368 TII
->getNamedOperand(*MI
, AMDGPU::OpName::soffset
)->setReg(FrameReg
);
1370 int64_t Offset
= FrameInfo
.getObjectOffset(Index
);
1372 = TII
->getNamedOperand(*MI
, AMDGPU::OpName::offset
)->getImm();
1373 int64_t NewOffset
= OldImm
+ Offset
;
1375 if (isUInt
<12>(NewOffset
) &&
1376 buildMUBUFOffsetLoadStore(TII
, FrameInfo
, MI
, Index
, NewOffset
)) {
1377 MI
->eraseFromParent();
1382 // If the offset is simply too big, don't convert to a scratch wave offset
1385 int64_t Offset
= FrameInfo
.getObjectOffset(Index
);
1386 FIOp
.ChangeToImmediate(Offset
);
1387 if (!TII
->isImmOperandLegal(*MI
, FIOperandNum
, FIOp
)) {
1388 Register TmpReg
= RS
->scavengeRegister(&AMDGPU::VGPR_32RegClass
, MI
, 0);
1389 BuildMI(*MBB
, MI
, DL
, TII
->get(AMDGPU::V_MOV_B32_e32
), TmpReg
)
1391 FIOp
.ChangeToRegister(TmpReg
, false, false, true);
1397 StringRef
SIRegisterInfo::getRegAsmName(unsigned Reg
) const {
1398 return AMDGPUInstPrinter::getRegisterName(Reg
);
1401 // FIXME: This is very slow. It might be worth creating a map from physreg to
1403 const TargetRegisterClass
*SIRegisterInfo::getPhysRegClass(unsigned Reg
) const {
1404 assert(!Register::isVirtualRegister(Reg
));
1406 static const TargetRegisterClass
*const BaseClasses
[] = {
1407 &AMDGPU::VGPR_32RegClass
,
1408 &AMDGPU::SReg_32RegClass
,
1409 &AMDGPU::AGPR_32RegClass
,
1410 &AMDGPU::VReg_64RegClass
,
1411 &AMDGPU::SReg_64RegClass
,
1412 &AMDGPU::AReg_64RegClass
,
1413 &AMDGPU::VReg_96RegClass
,
1414 &AMDGPU::SReg_96RegClass
,
1415 &AMDGPU::VReg_128RegClass
,
1416 &AMDGPU::SReg_128RegClass
,
1417 &AMDGPU::AReg_128RegClass
,
1418 &AMDGPU::VReg_160RegClass
,
1419 &AMDGPU::SReg_160RegClass
,
1420 &AMDGPU::VReg_256RegClass
,
1421 &AMDGPU::SReg_256RegClass
,
1422 &AMDGPU::VReg_512RegClass
,
1423 &AMDGPU::SReg_512RegClass
,
1424 &AMDGPU::AReg_512RegClass
,
1425 &AMDGPU::SReg_1024RegClass
,
1426 &AMDGPU::VReg_1024RegClass
,
1427 &AMDGPU::AReg_1024RegClass
,
1428 &AMDGPU::SCC_CLASSRegClass
,
1429 &AMDGPU::Pseudo_SReg_32RegClass
,
1430 &AMDGPU::Pseudo_SReg_128RegClass
,
1433 for (const TargetRegisterClass
*BaseClass
: BaseClasses
) {
1434 if (BaseClass
->contains(Reg
)) {
1441 // TODO: It might be helpful to have some target specific flags in
1442 // TargetRegisterClass to mark which classes are VGPRs to make this trivial.
1443 bool SIRegisterInfo::hasVGPRs(const TargetRegisterClass
*RC
) const {
1444 unsigned Size
= getRegSizeInBits(*RC
);
1447 return getCommonSubClass(&AMDGPU::VGPR_32RegClass
, RC
) != nullptr;
1449 return getCommonSubClass(&AMDGPU::VReg_64RegClass
, RC
) != nullptr;
1451 return getCommonSubClass(&AMDGPU::VReg_96RegClass
, RC
) != nullptr;
1453 return getCommonSubClass(&AMDGPU::VReg_128RegClass
, RC
) != nullptr;
1455 return getCommonSubClass(&AMDGPU::VReg_160RegClass
, RC
) != nullptr;
1457 return getCommonSubClass(&AMDGPU::VReg_256RegClass
, RC
) != nullptr;
1459 return getCommonSubClass(&AMDGPU::VReg_512RegClass
, RC
) != nullptr;
1461 return getCommonSubClass(&AMDGPU::VReg_1024RegClass
, RC
) != nullptr;
1463 return getCommonSubClass(&AMDGPU::VReg_1RegClass
, RC
) != nullptr;
1465 assert(Size
< 32 && "Invalid register class size");
1470 bool SIRegisterInfo::hasAGPRs(const TargetRegisterClass
*RC
) const {
1471 unsigned Size
= getRegSizeInBits(*RC
);
1476 return getCommonSubClass(&AMDGPU::AGPR_32RegClass
, RC
) != nullptr;
1478 return getCommonSubClass(&AMDGPU::AReg_64RegClass
, RC
) != nullptr;
1482 return getCommonSubClass(&AMDGPU::AReg_128RegClass
, RC
) != nullptr;
1487 return getCommonSubClass(&AMDGPU::AReg_512RegClass
, RC
) != nullptr;
1489 return getCommonSubClass(&AMDGPU::AReg_1024RegClass
, RC
) != nullptr;
1491 llvm_unreachable("Invalid register class size");
1495 const TargetRegisterClass
*SIRegisterInfo::getEquivalentVGPRClass(
1496 const TargetRegisterClass
*SRC
) const {
1497 switch (getRegSizeInBits(*SRC
)) {
1499 return &AMDGPU::VGPR_32RegClass
;
1501 return &AMDGPU::VReg_64RegClass
;
1503 return &AMDGPU::VReg_96RegClass
;
1505 return &AMDGPU::VReg_128RegClass
;
1507 return &AMDGPU::VReg_160RegClass
;
1509 return &AMDGPU::VReg_256RegClass
;
1511 return &AMDGPU::VReg_512RegClass
;
1513 return &AMDGPU::VReg_1024RegClass
;
1515 return &AMDGPU::VReg_1RegClass
;
1517 llvm_unreachable("Invalid register class size");
1521 const TargetRegisterClass
*SIRegisterInfo::getEquivalentAGPRClass(
1522 const TargetRegisterClass
*SRC
) const {
1523 switch (getRegSizeInBits(*SRC
)) {
1525 return &AMDGPU::AGPR_32RegClass
;
1527 return &AMDGPU::AReg_64RegClass
;
1529 return &AMDGPU::AReg_128RegClass
;
1531 return &AMDGPU::AReg_512RegClass
;
1533 return &AMDGPU::AReg_1024RegClass
;
1535 llvm_unreachable("Invalid register class size");
1539 const TargetRegisterClass
*SIRegisterInfo::getEquivalentSGPRClass(
1540 const TargetRegisterClass
*VRC
) const {
1541 switch (getRegSizeInBits(*VRC
)) {
1543 return &AMDGPU::SGPR_32RegClass
;
1545 return &AMDGPU::SReg_64RegClass
;
1547 return &AMDGPU::SReg_96RegClass
;
1549 return &AMDGPU::SGPR_128RegClass
;
1551 return &AMDGPU::SReg_160RegClass
;
1553 return &AMDGPU::SReg_256RegClass
;
1555 return &AMDGPU::SReg_512RegClass
;
1557 return &AMDGPU::SReg_1024RegClass
;
1559 llvm_unreachable("Invalid register class size");
1563 const TargetRegisterClass
*SIRegisterInfo::getSubRegClass(
1564 const TargetRegisterClass
*RC
, unsigned SubIdx
) const {
1565 if (SubIdx
== AMDGPU::NoSubRegister
)
1568 // We can assume that each lane corresponds to one 32-bit register.
1569 unsigned Count
= getSubRegIndexLaneMask(SubIdx
).getNumLanes();
1570 if (isSGPRClass(RC
)) {
1573 return &AMDGPU::SGPR_32RegClass
;
1575 return &AMDGPU::SReg_64RegClass
;
1577 return &AMDGPU::SReg_96RegClass
;
1579 return &AMDGPU::SGPR_128RegClass
;
1581 return &AMDGPU::SReg_160RegClass
;
1583 return &AMDGPU::SReg_256RegClass
;
1585 return &AMDGPU::SReg_512RegClass
;
1586 case 32: /* fall-through */
1588 llvm_unreachable("Invalid sub-register class size");
1590 } else if (hasAGPRs(RC
)) {
1593 return &AMDGPU::AGPR_32RegClass
;
1595 return &AMDGPU::AReg_64RegClass
;
1597 return &AMDGPU::AReg_128RegClass
;
1599 return &AMDGPU::AReg_512RegClass
;
1600 case 32: /* fall-through */
1602 llvm_unreachable("Invalid sub-register class size");
1607 return &AMDGPU::VGPR_32RegClass
;
1609 return &AMDGPU::VReg_64RegClass
;
1611 return &AMDGPU::VReg_96RegClass
;
1613 return &AMDGPU::VReg_128RegClass
;
1615 return &AMDGPU::VReg_160RegClass
;
1617 return &AMDGPU::VReg_256RegClass
;
1619 return &AMDGPU::VReg_512RegClass
;
1620 case 32: /* fall-through */
1622 llvm_unreachable("Invalid sub-register class size");
1627 bool SIRegisterInfo::opCanUseInlineConstant(unsigned OpType
) const {
1628 if (OpType
>= AMDGPU::OPERAND_REG_INLINE_AC_FIRST
&&
1629 OpType
<= AMDGPU::OPERAND_REG_INLINE_AC_LAST
)
1630 return !ST
.hasMFMAInlineLiteralBug();
1632 return OpType
>= AMDGPU::OPERAND_SRC_FIRST
&&
1633 OpType
<= AMDGPU::OPERAND_SRC_LAST
;
1636 bool SIRegisterInfo::shouldRewriteCopySrc(
1637 const TargetRegisterClass
*DefRC
,
1639 const TargetRegisterClass
*SrcRC
,
1640 unsigned SrcSubReg
) const {
1641 // We want to prefer the smallest register class possible, so we don't want to
1642 // stop and rewrite on anything that looks like a subregister
1643 // extract. Operations mostly don't care about the super register class, so we
1644 // only want to stop on the most basic of copies between the same register
1647 // e.g. if we have something like
1650 // %2 = REG_SEQUENCE %0, sub0, %1, sub1, %2, sub2
1651 // %3 = COPY %2, sub0
1653 // We want to look through the COPY to find:
1657 return getCommonSubClass(DefRC
, SrcRC
) != nullptr;
1660 /// Returns a register that is not used at any point in the function.
1661 /// If all registers are used, then this function will return
1662 // AMDGPU::NoRegister.
1664 SIRegisterInfo::findUnusedRegister(const MachineRegisterInfo
&MRI
,
1665 const TargetRegisterClass
*RC
,
1666 const MachineFunction
&MF
) const {
1668 for (unsigned Reg
: *RC
)
1669 if (MRI
.isAllocatable(Reg
) && !MRI
.isPhysRegUsed(Reg
))
1671 return AMDGPU::NoRegister
;
1674 ArrayRef
<int16_t> SIRegisterInfo::getRegSplitParts(const TargetRegisterClass
*RC
,
1675 unsigned EltSize
) const {
1677 static const int16_t Sub0_31
[] = {
1678 AMDGPU::sub0
, AMDGPU::sub1
, AMDGPU::sub2
, AMDGPU::sub3
,
1679 AMDGPU::sub4
, AMDGPU::sub5
, AMDGPU::sub6
, AMDGPU::sub7
,
1680 AMDGPU::sub8
, AMDGPU::sub9
, AMDGPU::sub10
, AMDGPU::sub11
,
1681 AMDGPU::sub12
, AMDGPU::sub13
, AMDGPU::sub14
, AMDGPU::sub15
,
1682 AMDGPU::sub16
, AMDGPU::sub17
, AMDGPU::sub18
, AMDGPU::sub19
,
1683 AMDGPU::sub20
, AMDGPU::sub21
, AMDGPU::sub22
, AMDGPU::sub23
,
1684 AMDGPU::sub24
, AMDGPU::sub25
, AMDGPU::sub26
, AMDGPU::sub27
,
1685 AMDGPU::sub28
, AMDGPU::sub29
, AMDGPU::sub30
, AMDGPU::sub31
,
1688 static const int16_t Sub0_15
[] = {
1689 AMDGPU::sub0
, AMDGPU::sub1
, AMDGPU::sub2
, AMDGPU::sub3
,
1690 AMDGPU::sub4
, AMDGPU::sub5
, AMDGPU::sub6
, AMDGPU::sub7
,
1691 AMDGPU::sub8
, AMDGPU::sub9
, AMDGPU::sub10
, AMDGPU::sub11
,
1692 AMDGPU::sub12
, AMDGPU::sub13
, AMDGPU::sub14
, AMDGPU::sub15
,
1695 static const int16_t Sub0_7
[] = {
1696 AMDGPU::sub0
, AMDGPU::sub1
, AMDGPU::sub2
, AMDGPU::sub3
,
1697 AMDGPU::sub4
, AMDGPU::sub5
, AMDGPU::sub6
, AMDGPU::sub7
,
1700 static const int16_t Sub0_4
[] = {
1701 AMDGPU::sub0
, AMDGPU::sub1
, AMDGPU::sub2
, AMDGPU::sub3
, AMDGPU::sub4
,
1704 static const int16_t Sub0_3
[] = {
1705 AMDGPU::sub0
, AMDGPU::sub1
, AMDGPU::sub2
, AMDGPU::sub3
,
1708 static const int16_t Sub0_2
[] = {
1709 AMDGPU::sub0
, AMDGPU::sub1
, AMDGPU::sub2
,
1712 static const int16_t Sub0_1
[] = {
1713 AMDGPU::sub0
, AMDGPU::sub1
,
1716 switch (AMDGPU::getRegBitWidth(*RC
->MC
)) {
1720 return makeArrayRef(Sub0_1
);
1722 return makeArrayRef(Sub0_2
);
1724 return makeArrayRef(Sub0_3
);
1726 return makeArrayRef(Sub0_4
);
1728 return makeArrayRef(Sub0_7
);
1730 return makeArrayRef(Sub0_15
);
1732 return makeArrayRef(Sub0_31
);
1734 llvm_unreachable("unhandled register size");
1739 static const int16_t Sub0_31_64
[] = {
1740 AMDGPU::sub0_sub1
, AMDGPU::sub2_sub3
,
1741 AMDGPU::sub4_sub5
, AMDGPU::sub6_sub7
,
1742 AMDGPU::sub8_sub9
, AMDGPU::sub10_sub11
,
1743 AMDGPU::sub12_sub13
, AMDGPU::sub14_sub15
,
1744 AMDGPU::sub16_sub17
, AMDGPU::sub18_sub19
,
1745 AMDGPU::sub20_sub21
, AMDGPU::sub22_sub23
,
1746 AMDGPU::sub24_sub25
, AMDGPU::sub26_sub27
,
1747 AMDGPU::sub28_sub29
, AMDGPU::sub30_sub31
1750 static const int16_t Sub0_15_64
[] = {
1751 AMDGPU::sub0_sub1
, AMDGPU::sub2_sub3
,
1752 AMDGPU::sub4_sub5
, AMDGPU::sub6_sub7
,
1753 AMDGPU::sub8_sub9
, AMDGPU::sub10_sub11
,
1754 AMDGPU::sub12_sub13
, AMDGPU::sub14_sub15
1757 static const int16_t Sub0_7_64
[] = {
1758 AMDGPU::sub0_sub1
, AMDGPU::sub2_sub3
,
1759 AMDGPU::sub4_sub5
, AMDGPU::sub6_sub7
1763 static const int16_t Sub0_3_64
[] = {
1764 AMDGPU::sub0_sub1
, AMDGPU::sub2_sub3
1767 switch (AMDGPU::getRegBitWidth(*RC
->MC
)) {
1771 return makeArrayRef(Sub0_3_64
);
1773 return makeArrayRef(Sub0_7_64
);
1775 return makeArrayRef(Sub0_15_64
);
1777 return makeArrayRef(Sub0_31_64
);
1779 llvm_unreachable("unhandled register size");
1783 if (EltSize
== 16) {
1785 static const int16_t Sub0_31_128
[] = {
1786 AMDGPU::sub0_sub1_sub2_sub3
,
1787 AMDGPU::sub4_sub5_sub6_sub7
,
1788 AMDGPU::sub8_sub9_sub10_sub11
,
1789 AMDGPU::sub12_sub13_sub14_sub15
,
1790 AMDGPU::sub16_sub17_sub18_sub19
,
1791 AMDGPU::sub20_sub21_sub22_sub23
,
1792 AMDGPU::sub24_sub25_sub26_sub27
,
1793 AMDGPU::sub28_sub29_sub30_sub31
1796 static const int16_t Sub0_15_128
[] = {
1797 AMDGPU::sub0_sub1_sub2_sub3
,
1798 AMDGPU::sub4_sub5_sub6_sub7
,
1799 AMDGPU::sub8_sub9_sub10_sub11
,
1800 AMDGPU::sub12_sub13_sub14_sub15
1803 static const int16_t Sub0_7_128
[] = {
1804 AMDGPU::sub0_sub1_sub2_sub3
,
1805 AMDGPU::sub4_sub5_sub6_sub7
1808 switch (AMDGPU::getRegBitWidth(*RC
->MC
)) {
1812 return makeArrayRef(Sub0_7_128
);
1814 return makeArrayRef(Sub0_15_128
);
1816 return makeArrayRef(Sub0_31_128
);
1818 llvm_unreachable("unhandled register size");
1822 assert(EltSize
== 32 && "unhandled elt size");
1824 static const int16_t Sub0_31_256
[] = {
1825 AMDGPU::sub0_sub1_sub2_sub3_sub4_sub5_sub6_sub7
,
1826 AMDGPU::sub8_sub9_sub10_sub11_sub12_sub13_sub14_sub15
,
1827 AMDGPU::sub16_sub17_sub18_sub19_sub20_sub21_sub22_sub23
,
1828 AMDGPU::sub24_sub25_sub26_sub27_sub28_sub29_sub30_sub31
1831 static const int16_t Sub0_15_256
[] = {
1832 AMDGPU::sub0_sub1_sub2_sub3_sub4_sub5_sub6_sub7
,
1833 AMDGPU::sub8_sub9_sub10_sub11_sub12_sub13_sub14_sub15
1836 switch (AMDGPU::getRegBitWidth(*RC
->MC
)) {
1840 return makeArrayRef(Sub0_15_256
);
1842 return makeArrayRef(Sub0_31_256
);
1844 llvm_unreachable("unhandled register size");
1848 const TargetRegisterClass
*
1849 SIRegisterInfo::getRegClassForReg(const MachineRegisterInfo
&MRI
,
1850 unsigned Reg
) const {
1851 if (Register::isVirtualRegister(Reg
))
1852 return MRI
.getRegClass(Reg
);
1854 return getPhysRegClass(Reg
);
1857 bool SIRegisterInfo::isVGPR(const MachineRegisterInfo
&MRI
,
1858 unsigned Reg
) const {
1859 const TargetRegisterClass
* RC
= getRegClassForReg(MRI
, Reg
);
1860 assert(RC
&& "Register class for the reg not found");
1861 return hasVGPRs(RC
);
1864 bool SIRegisterInfo::isAGPR(const MachineRegisterInfo
&MRI
,
1865 unsigned Reg
) const {
1866 const TargetRegisterClass
* RC
= getRegClassForReg(MRI
, Reg
);
1867 assert(RC
&& "Register class for the reg not found");
1868 return hasAGPRs(RC
);
1871 bool SIRegisterInfo::shouldCoalesce(MachineInstr
*MI
,
1872 const TargetRegisterClass
*SrcRC
,
1874 const TargetRegisterClass
*DstRC
,
1876 const TargetRegisterClass
*NewRC
,
1877 LiveIntervals
&LIS
) const {
1878 unsigned SrcSize
= getRegSizeInBits(*SrcRC
);
1879 unsigned DstSize
= getRegSizeInBits(*DstRC
);
1880 unsigned NewSize
= getRegSizeInBits(*NewRC
);
1882 // Do not increase size of registers beyond dword, we would need to allocate
1883 // adjacent registers and constraint regalloc more than needed.
1885 // Always allow dword coalescing.
1886 if (SrcSize
<= 32 || DstSize
<= 32)
1889 return NewSize
<= DstSize
|| NewSize
<= SrcSize
;
1892 unsigned SIRegisterInfo::getRegPressureLimit(const TargetRegisterClass
*RC
,
1893 MachineFunction
&MF
) const {
1895 const GCNSubtarget
&ST
= MF
.getSubtarget
<GCNSubtarget
>();
1896 const SIMachineFunctionInfo
*MFI
= MF
.getInfo
<SIMachineFunctionInfo
>();
1898 unsigned Occupancy
= ST
.getOccupancyWithLocalMemSize(MFI
->getLDSSize(),
1900 switch (RC
->getID()) {
1902 return AMDGPURegisterInfo::getRegPressureLimit(RC
, MF
);
1903 case AMDGPU::VGPR_32RegClassID
:
1904 return std::min(ST
.getMaxNumVGPRs(Occupancy
), ST
.getMaxNumVGPRs(MF
));
1905 case AMDGPU::SGPR_32RegClassID
:
1906 return std::min(ST
.getMaxNumSGPRs(Occupancy
, true), ST
.getMaxNumSGPRs(MF
));
1910 unsigned SIRegisterInfo::getRegPressureSetLimit(const MachineFunction
&MF
,
1911 unsigned Idx
) const {
1912 if (Idx
== getVGPRPressureSet() || Idx
== getAGPRPressureSet())
1913 return getRegPressureLimit(&AMDGPU::VGPR_32RegClass
,
1914 const_cast<MachineFunction
&>(MF
));
1916 if (Idx
== getSGPRPressureSet())
1917 return getRegPressureLimit(&AMDGPU::SGPR_32RegClass
,
1918 const_cast<MachineFunction
&>(MF
));
1920 return AMDGPURegisterInfo::getRegPressureSetLimit(MF
, Idx
);
1923 const int *SIRegisterInfo::getRegUnitPressureSets(unsigned RegUnit
) const {
1924 static const int Empty
[] = { -1 };
1926 if (hasRegUnit(AMDGPU::M0
, RegUnit
))
1928 return AMDGPURegisterInfo::getRegUnitPressureSets(RegUnit
);
1931 unsigned SIRegisterInfo::getReturnAddressReg(const MachineFunction
&MF
) const {
1932 // Not a callee saved register.
1933 return AMDGPU::SGPR30_SGPR31
;
1936 const TargetRegisterClass
*
1937 SIRegisterInfo::getRegClassForSizeOnBank(unsigned Size
,
1938 const RegisterBank
&RB
,
1939 const MachineRegisterInfo
&MRI
) const {
1942 switch (RB
.getID()) {
1943 case AMDGPU::VGPRRegBankID
:
1944 return &AMDGPU::VGPR_32RegClass
;
1945 case AMDGPU::VCCRegBankID
:
1947 &AMDGPU::SReg_32_XM0_XEXECRegClass
: &AMDGPU::SReg_64_XEXECRegClass
;
1948 case AMDGPU::SGPRRegBankID
:
1949 return &AMDGPU::SReg_32_XM0RegClass
;
1950 case AMDGPU::SCCRegBankID
:
1951 // This needs to return an allocatable class, so don't bother returning
1952 // the dummy SCC class.
1953 return &AMDGPU::SReg_32_XM0RegClass
;
1955 llvm_unreachable("unknown register bank");
1959 return RB
.getID() == AMDGPU::VGPRRegBankID
? &AMDGPU::VGPR_32RegClass
:
1960 &AMDGPU::SReg_32_XM0RegClass
;
1962 return RB
.getID() == AMDGPU::VGPRRegBankID
? &AMDGPU::VReg_64RegClass
:
1963 &AMDGPU::SReg_64_XEXECRegClass
;
1965 return RB
.getID() == AMDGPU::VGPRRegBankID
? &AMDGPU::VReg_96RegClass
:
1966 &AMDGPU::SReg_96RegClass
;
1968 return RB
.getID() == AMDGPU::VGPRRegBankID
? &AMDGPU::VReg_128RegClass
:
1969 &AMDGPU::SGPR_128RegClass
;
1971 return RB
.getID() == AMDGPU::VGPRRegBankID
? &AMDGPU::VReg_160RegClass
:
1972 &AMDGPU::SReg_160RegClass
;
1974 return RB
.getID() == AMDGPU::VGPRRegBankID
? &AMDGPU::VReg_256RegClass
:
1975 &AMDGPU::SReg_256RegClass
;
1977 return RB
.getID() == AMDGPU::VGPRRegBankID
? &AMDGPU::VReg_512RegClass
:
1978 &AMDGPU::SReg_512RegClass
;
1980 return RB
.getID() == AMDGPU::VGPRRegBankID
? &AMDGPU::VReg_1024RegClass
:
1981 &AMDGPU::SReg_1024RegClass
;
1984 return RB
.getID() == AMDGPU::VGPRRegBankID
? &AMDGPU::VGPR_32RegClass
:
1985 &AMDGPU::SReg_32_XM0RegClass
;
1990 const TargetRegisterClass
*
1991 SIRegisterInfo::getConstrainedRegClassForOperand(const MachineOperand
&MO
,
1992 const MachineRegisterInfo
&MRI
) const {
1993 const RegClassOrRegBank
&RCOrRB
= MRI
.getRegClassOrRegBank(MO
.getReg());
1994 if (const RegisterBank
*RB
= RCOrRB
.dyn_cast
<const RegisterBank
*>())
1995 return getRegClassForTypeOnBank(MRI
.getType(MO
.getReg()), *RB
, MRI
);
1997 const TargetRegisterClass
*RC
= RCOrRB
.get
<const TargetRegisterClass
*>();
1998 return getAllocatableClass(RC
);
2001 unsigned SIRegisterInfo::getVCC() const {
2002 return isWave32
? AMDGPU::VCC_LO
: AMDGPU::VCC
;
2005 const TargetRegisterClass
*
2006 SIRegisterInfo::getRegClass(unsigned RCID
) const {
2007 switch ((int)RCID
) {
2008 case AMDGPU::SReg_1RegClassID
:
2010 case AMDGPU::SReg_1_XEXECRegClassID
:
2011 return isWave32
? &AMDGPU::SReg_32_XM0_XEXECRegClass
2012 : &AMDGPU::SReg_64_XEXECRegClass
;
2016 return AMDGPURegisterInfo::getRegClass(RCID
);
2020 // Find reaching register definition
2021 MachineInstr
*SIRegisterInfo::findReachingDef(unsigned Reg
, unsigned SubReg
,
2023 MachineRegisterInfo
&MRI
,
2024 LiveIntervals
*LIS
) const {
2025 auto &MDT
= LIS
->getAnalysis
<MachineDominatorTree
>();
2026 SlotIndex UseIdx
= LIS
->getInstructionIndex(Use
);
2029 if (Register::isVirtualRegister(Reg
)) {
2030 if (!LIS
->hasInterval(Reg
))
2032 LiveInterval
&LI
= LIS
->getInterval(Reg
);
2033 LaneBitmask SubLanes
= SubReg
? getSubRegIndexLaneMask(SubReg
)
2034 : MRI
.getMaxLaneMaskForVReg(Reg
);
2035 VNInfo
*V
= nullptr;
2036 if (LI
.hasSubRanges()) {
2037 for (auto &S
: LI
.subranges()) {
2038 if ((S
.LaneMask
& SubLanes
) == SubLanes
) {
2039 V
= S
.getVNInfoAt(UseIdx
);
2044 V
= LI
.getVNInfoAt(UseIdx
);
2051 for (MCRegUnitIterator
Units(Reg
, this); Units
.isValid(); ++Units
) {
2052 LiveRange
&LR
= LIS
->getRegUnit(*Units
);
2053 if (VNInfo
*V
= LR
.getVNInfoAt(UseIdx
)) {
2054 if (!DefIdx
.isValid() ||
2055 MDT
.dominates(LIS
->getInstructionFromIndex(DefIdx
),
2056 LIS
->getInstructionFromIndex(V
->def
)))
2064 MachineInstr
*Def
= LIS
->getInstructionFromIndex(DefIdx
);
2066 if (!Def
|| !MDT
.dominates(Def
, &Use
))
2069 assert(Def
->modifiesRegister(Reg
, this));