1 //===-- SIRegisterInfo.cpp - SI Register Information ---------------------===//
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7 //===----------------------------------------------------------------------===//
10 /// SI implementation of the TargetRegisterInfo class.
12 //===----------------------------------------------------------------------===//
14 #include "SIRegisterInfo.h"
15 #include "AMDGPURegisterBankInfo.h"
16 #include "AMDGPUSubtarget.h"
17 #include "SIInstrInfo.h"
18 #include "SIMachineFunctionInfo.h"
19 #include "MCTargetDesc/AMDGPUInstPrinter.h"
20 #include "MCTargetDesc/AMDGPUMCTargetDesc.h"
21 #include "llvm/CodeGen/LiveIntervals.h"
22 #include "llvm/CodeGen/MachineDominators.h"
23 #include "llvm/CodeGen/MachineFrameInfo.h"
24 #include "llvm/CodeGen/MachineInstrBuilder.h"
25 #include "llvm/CodeGen/RegisterScavenging.h"
26 #include "llvm/CodeGen/SlotIndexes.h"
27 #include "llvm/IR/Function.h"
28 #include "llvm/IR/LLVMContext.h"
32 static bool hasPressureSet(const int *PSets
, unsigned PSetID
) {
33 for (unsigned i
= 0; PSets
[i
] != -1; ++i
) {
34 if (PSets
[i
] == (int)PSetID
)
40 void SIRegisterInfo::classifyPressureSet(unsigned PSetID
, unsigned Reg
,
41 BitVector
&PressureSets
) const {
42 for (MCRegUnitIterator
U(Reg
, this); U
.isValid(); ++U
) {
43 const int *PSets
= getRegUnitPressureSets(*U
);
44 if (hasPressureSet(PSets
, PSetID
)) {
45 PressureSets
.set(PSetID
);
51 static cl::opt
<bool> EnableSpillSGPRToSMEM(
52 "amdgpu-spill-sgpr-to-smem",
53 cl::desc("Use scalar stores to spill SGPRs if supported by subtarget"),
56 static cl::opt
<bool> EnableSpillSGPRToVGPR(
57 "amdgpu-spill-sgpr-to-vgpr",
58 cl::desc("Enable spilling VGPRs to SGPRs"),
62 SIRegisterInfo::SIRegisterInfo(const GCNSubtarget
&ST
) :
65 SGPRPressureSets(getNumRegPressureSets()),
66 VGPRPressureSets(getNumRegPressureSets()),
67 AGPRPressureSets(getNumRegPressureSets()),
68 SpillSGPRToVGPR(false),
69 SpillSGPRToSMEM(false),
70 isWave32(ST
.isWave32()) {
71 if (EnableSpillSGPRToSMEM
&& ST
.hasScalarStores())
72 SpillSGPRToSMEM
= true;
73 else if (EnableSpillSGPRToVGPR
)
74 SpillSGPRToVGPR
= true;
76 unsigned NumRegPressureSets
= getNumRegPressureSets();
78 SGPRSetID
= NumRegPressureSets
;
79 VGPRSetID
= NumRegPressureSets
;
80 AGPRSetID
= NumRegPressureSets
;
82 for (unsigned i
= 0; i
< NumRegPressureSets
; ++i
) {
83 classifyPressureSet(i
, AMDGPU::SGPR0
, SGPRPressureSets
);
84 classifyPressureSet(i
, AMDGPU::VGPR0
, VGPRPressureSets
);
85 classifyPressureSet(i
, AMDGPU::AGPR0
, AGPRPressureSets
);
88 // Determine the number of reg units for each pressure set.
89 std::vector
<unsigned> PressureSetRegUnits(NumRegPressureSets
, 0);
90 for (unsigned i
= 0, e
= getNumRegUnits(); i
!= e
; ++i
) {
91 const int *PSets
= getRegUnitPressureSets(i
);
92 for (unsigned j
= 0; PSets
[j
] != -1; ++j
) {
93 ++PressureSetRegUnits
[PSets
[j
]];
97 unsigned VGPRMax
= 0, SGPRMax
= 0, AGPRMax
= 0;
98 for (unsigned i
= 0; i
< NumRegPressureSets
; ++i
) {
99 if (isVGPRPressureSet(i
) && PressureSetRegUnits
[i
] > VGPRMax
) {
101 VGPRMax
= PressureSetRegUnits
[i
];
104 if (isSGPRPressureSet(i
) && PressureSetRegUnits
[i
] > SGPRMax
) {
106 SGPRMax
= PressureSetRegUnits
[i
];
108 if (isAGPRPressureSet(i
) && PressureSetRegUnits
[i
] > AGPRMax
) {
110 AGPRMax
= PressureSetRegUnits
[i
];
115 assert(SGPRSetID
< NumRegPressureSets
&&
116 VGPRSetID
< NumRegPressureSets
&&
117 AGPRSetID
< NumRegPressureSets
);
120 unsigned SIRegisterInfo::reservedPrivateSegmentBufferReg(
121 const MachineFunction
&MF
) const {
123 const GCNSubtarget
&ST
= MF
.getSubtarget
<GCNSubtarget
>();
124 unsigned BaseIdx
= alignDown(ST
.getMaxNumSGPRs(MF
), 4) - 4;
125 unsigned BaseReg(AMDGPU::SGPR_32RegClass
.getRegister(BaseIdx
));
126 return getMatchingSuperReg(BaseReg
, AMDGPU::sub0
, &AMDGPU::SReg_128RegClass
);
129 static unsigned findPrivateSegmentWaveByteOffsetRegIndex(unsigned RegCount
) {
132 // Try to place it in a hole after PrivateSegmentBufferReg.
134 // We cannot put the segment buffer in (Idx - 4) ... (Idx - 1) due to
135 // alignment constraints, so we have a hole where can put the wave offset.
138 // We can put the segment buffer in (Idx - 4) ... (Idx - 1) and put the
139 // wave offset before it.
146 unsigned SIRegisterInfo::reservedPrivateSegmentWaveByteOffsetReg(
147 const MachineFunction
&MF
) const {
148 const GCNSubtarget
&ST
= MF
.getSubtarget
<GCNSubtarget
>();
149 unsigned Reg
= findPrivateSegmentWaveByteOffsetRegIndex(ST
.getMaxNumSGPRs(MF
));
150 return AMDGPU::SGPR_32RegClass
.getRegister(Reg
);
153 BitVector
SIRegisterInfo::getReservedRegs(const MachineFunction
&MF
) const {
154 BitVector
Reserved(getNumRegs());
156 // EXEC_LO and EXEC_HI could be allocated and used as regular register, but
157 // this seems likely to result in bugs, so I'm marking them as reserved.
158 reserveRegisterTuples(Reserved
, AMDGPU::EXEC
);
159 reserveRegisterTuples(Reserved
, AMDGPU::FLAT_SCR
);
161 // M0 has to be reserved so that llvm accepts it as a live-in into a block.
162 reserveRegisterTuples(Reserved
, AMDGPU::M0
);
164 // Reserve src_vccz, src_execz, src_scc.
165 reserveRegisterTuples(Reserved
, AMDGPU::SRC_VCCZ
);
166 reserveRegisterTuples(Reserved
, AMDGPU::SRC_EXECZ
);
167 reserveRegisterTuples(Reserved
, AMDGPU::SRC_SCC
);
169 // Reserve the memory aperture registers.
170 reserveRegisterTuples(Reserved
, AMDGPU::SRC_SHARED_BASE
);
171 reserveRegisterTuples(Reserved
, AMDGPU::SRC_SHARED_LIMIT
);
172 reserveRegisterTuples(Reserved
, AMDGPU::SRC_PRIVATE_BASE
);
173 reserveRegisterTuples(Reserved
, AMDGPU::SRC_PRIVATE_LIMIT
);
175 // Reserve src_pops_exiting_wave_id - support is not implemented in Codegen.
176 reserveRegisterTuples(Reserved
, AMDGPU::SRC_POPS_EXITING_WAVE_ID
);
178 // Reserve xnack_mask registers - support is not implemented in Codegen.
179 reserveRegisterTuples(Reserved
, AMDGPU::XNACK_MASK
);
181 // Reserve lds_direct register - support is not implemented in Codegen.
182 reserveRegisterTuples(Reserved
, AMDGPU::LDS_DIRECT
);
184 // Reserve Trap Handler registers - support is not implemented in Codegen.
185 reserveRegisterTuples(Reserved
, AMDGPU::TBA
);
186 reserveRegisterTuples(Reserved
, AMDGPU::TMA
);
187 reserveRegisterTuples(Reserved
, AMDGPU::TTMP0_TTMP1
);
188 reserveRegisterTuples(Reserved
, AMDGPU::TTMP2_TTMP3
);
189 reserveRegisterTuples(Reserved
, AMDGPU::TTMP4_TTMP5
);
190 reserveRegisterTuples(Reserved
, AMDGPU::TTMP6_TTMP7
);
191 reserveRegisterTuples(Reserved
, AMDGPU::TTMP8_TTMP9
);
192 reserveRegisterTuples(Reserved
, AMDGPU::TTMP10_TTMP11
);
193 reserveRegisterTuples(Reserved
, AMDGPU::TTMP12_TTMP13
);
194 reserveRegisterTuples(Reserved
, AMDGPU::TTMP14_TTMP15
);
196 // Reserve null register - it shall never be allocated
197 reserveRegisterTuples(Reserved
, AMDGPU::SGPR_NULL
);
199 // Disallow vcc_hi allocation in wave32. It may be allocated but most likely
200 // will result in bugs.
202 Reserved
.set(AMDGPU::VCC
);
203 Reserved
.set(AMDGPU::VCC_HI
);
206 const GCNSubtarget
&ST
= MF
.getSubtarget
<GCNSubtarget
>();
208 unsigned MaxNumSGPRs
= ST
.getMaxNumSGPRs(MF
);
209 unsigned TotalNumSGPRs
= AMDGPU::SGPR_32RegClass
.getNumRegs();
210 for (unsigned i
= MaxNumSGPRs
; i
< TotalNumSGPRs
; ++i
) {
211 unsigned Reg
= AMDGPU::SGPR_32RegClass
.getRegister(i
);
212 reserveRegisterTuples(Reserved
, Reg
);
215 unsigned MaxNumVGPRs
= ST
.getMaxNumVGPRs(MF
);
216 unsigned TotalNumVGPRs
= AMDGPU::VGPR_32RegClass
.getNumRegs();
217 for (unsigned i
= MaxNumVGPRs
; i
< TotalNumVGPRs
; ++i
) {
218 unsigned Reg
= AMDGPU::VGPR_32RegClass
.getRegister(i
);
219 reserveRegisterTuples(Reserved
, Reg
);
220 Reg
= AMDGPU::AGPR_32RegClass
.getRegister(i
);
221 reserveRegisterTuples(Reserved
, Reg
);
224 // Reserve all the rest AGPRs if there are no instructions to use it.
225 if (!ST
.hasMAIInsts()) {
226 for (unsigned i
= 0; i
< MaxNumVGPRs
; ++i
) {
227 unsigned Reg
= AMDGPU::AGPR_32RegClass
.getRegister(i
);
228 reserveRegisterTuples(Reserved
, Reg
);
232 const SIMachineFunctionInfo
*MFI
= MF
.getInfo
<SIMachineFunctionInfo
>();
234 unsigned ScratchWaveOffsetReg
= MFI
->getScratchWaveOffsetReg();
235 if (ScratchWaveOffsetReg
!= AMDGPU::NoRegister
) {
236 // Reserve 1 SGPR for scratch wave offset in case we need to spill.
237 reserveRegisterTuples(Reserved
, ScratchWaveOffsetReg
);
240 unsigned ScratchRSrcReg
= MFI
->getScratchRSrcReg();
241 if (ScratchRSrcReg
!= AMDGPU::NoRegister
) {
242 // Reserve 4 SGPRs for the scratch buffer resource descriptor in case we need
244 // TODO: May need to reserve a VGPR if doing LDS spilling.
245 reserveRegisterTuples(Reserved
, ScratchRSrcReg
);
246 assert(!isSubRegister(ScratchRSrcReg
, ScratchWaveOffsetReg
));
249 // We have to assume the SP is needed in case there are calls in the function,
250 // which is detected after the function is lowered. If we aren't really going
251 // to need SP, don't bother reserving it.
252 unsigned StackPtrReg
= MFI
->getStackPtrOffsetReg();
254 if (StackPtrReg
!= AMDGPU::NoRegister
) {
255 reserveRegisterTuples(Reserved
, StackPtrReg
);
256 assert(!isSubRegister(ScratchRSrcReg
, StackPtrReg
));
259 unsigned FrameReg
= MFI
->getFrameOffsetReg();
260 if (FrameReg
!= AMDGPU::NoRegister
) {
261 reserveRegisterTuples(Reserved
, FrameReg
);
262 assert(!isSubRegister(ScratchRSrcReg
, FrameReg
));
265 for (unsigned Reg
: MFI
->WWMReservedRegs
) {
266 reserveRegisterTuples(Reserved
, Reg
);
269 // FIXME: Stop using reserved registers for this.
270 for (MCPhysReg Reg
: MFI
->getAGPRSpillVGPRs())
271 reserveRegisterTuples(Reserved
, Reg
);
273 for (MCPhysReg Reg
: MFI
->getVGPRSpillAGPRs())
274 reserveRegisterTuples(Reserved
, Reg
);
279 bool SIRegisterInfo::canRealignStack(const MachineFunction
&MF
) const {
280 const SIMachineFunctionInfo
*Info
= MF
.getInfo
<SIMachineFunctionInfo
>();
281 // On entry, the base address is 0, so it can't possibly need any more
284 // FIXME: Should be able to specify the entry frame alignment per calling
285 // convention instead.
286 if (Info
->isEntryFunction())
289 return TargetRegisterInfo::canRealignStack(MF
);
292 bool SIRegisterInfo::requiresRegisterScavenging(const MachineFunction
&Fn
) const {
293 const SIMachineFunctionInfo
*Info
= Fn
.getInfo
<SIMachineFunctionInfo
>();
294 if (Info
->isEntryFunction()) {
295 const MachineFrameInfo
&MFI
= Fn
.getFrameInfo();
296 return MFI
.hasStackObjects() || MFI
.hasCalls();
299 // May need scavenger for dealing with callee saved registers.
303 bool SIRegisterInfo::requiresFrameIndexScavenging(
304 const MachineFunction
&MF
) const {
305 const MachineFrameInfo
&MFI
= MF
.getFrameInfo();
306 if (MFI
.hasStackObjects())
309 // May need to deal with callee saved registers.
310 const SIMachineFunctionInfo
*Info
= MF
.getInfo
<SIMachineFunctionInfo
>();
311 return !Info
->isEntryFunction();
314 bool SIRegisterInfo::requiresFrameIndexReplacementScavenging(
315 const MachineFunction
&MF
) const {
316 const MachineFrameInfo
&MFI
= MF
.getFrameInfo();
317 if (!MFI
.hasStackObjects())
320 // The scavenger is used for large frames which may require finding a free
321 // register for large offsets.
322 if (!isUInt
<12>(MFI
.getStackSize()))
325 // If using scalar stores, for spills, m0 is needed for the scalar store
326 // offset (pre-GFX9). m0 is unallocatable, so we can't create a virtual
327 // register for it during frame index elimination, so the scavenger is
329 return MF
.getSubtarget
<GCNSubtarget
>().hasScalarStores() &&
330 MF
.getInfo
<SIMachineFunctionInfo
>()->hasSpilledSGPRs();
333 bool SIRegisterInfo::requiresVirtualBaseRegisters(
334 const MachineFunction
&) const {
335 // There are no special dedicated stack or frame pointers.
339 bool SIRegisterInfo::trackLivenessAfterRegAlloc(const MachineFunction
&MF
) const {
340 // This helps catch bugs as verifier errors.
344 int64_t SIRegisterInfo::getMUBUFInstrOffset(const MachineInstr
*MI
) const {
345 assert(SIInstrInfo::isMUBUF(*MI
));
347 int OffIdx
= AMDGPU::getNamedOperandIdx(MI
->getOpcode(),
348 AMDGPU::OpName::offset
);
349 return MI
->getOperand(OffIdx
).getImm();
352 int64_t SIRegisterInfo::getFrameIndexInstrOffset(const MachineInstr
*MI
,
354 if (!SIInstrInfo::isMUBUF(*MI
))
357 assert(Idx
== AMDGPU::getNamedOperandIdx(MI
->getOpcode(),
358 AMDGPU::OpName::vaddr
) &&
359 "Should never see frame index on non-address operand");
361 return getMUBUFInstrOffset(MI
);
364 bool SIRegisterInfo::needsFrameBaseReg(MachineInstr
*MI
, int64_t Offset
) const {
365 if (!MI
->mayLoadOrStore())
368 int64_t FullOffset
= Offset
+ getMUBUFInstrOffset(MI
);
370 return !isUInt
<12>(FullOffset
);
373 void SIRegisterInfo::materializeFrameBaseRegister(MachineBasicBlock
*MBB
,
376 int64_t Offset
) const {
377 MachineBasicBlock::iterator Ins
= MBB
->begin();
378 DebugLoc DL
; // Defaults to "unknown"
380 if (Ins
!= MBB
->end())
381 DL
= Ins
->getDebugLoc();
383 MachineFunction
*MF
= MBB
->getParent();
384 const GCNSubtarget
&Subtarget
= MF
->getSubtarget
<GCNSubtarget
>();
385 const SIInstrInfo
*TII
= Subtarget
.getInstrInfo();
388 BuildMI(*MBB
, Ins
, DL
, TII
->get(AMDGPU::V_MOV_B32_e32
), BaseReg
)
389 .addFrameIndex(FrameIdx
);
393 MachineRegisterInfo
&MRI
= MF
->getRegInfo();
394 Register OffsetReg
= MRI
.createVirtualRegister(&AMDGPU::SReg_32_XM0RegClass
);
396 Register FIReg
= MRI
.createVirtualRegister(&AMDGPU::VGPR_32RegClass
);
398 BuildMI(*MBB
, Ins
, DL
, TII
->get(AMDGPU::S_MOV_B32
), OffsetReg
)
400 BuildMI(*MBB
, Ins
, DL
, TII
->get(AMDGPU::V_MOV_B32_e32
), FIReg
)
401 .addFrameIndex(FrameIdx
);
403 TII
->getAddNoCarry(*MBB
, Ins
, DL
, BaseReg
)
404 .addReg(OffsetReg
, RegState::Kill
)
406 .addImm(0); // clamp bit
409 void SIRegisterInfo::resolveFrameIndex(MachineInstr
&MI
, unsigned BaseReg
,
410 int64_t Offset
) const {
412 MachineBasicBlock
*MBB
= MI
.getParent();
413 MachineFunction
*MF
= MBB
->getParent();
414 const GCNSubtarget
&Subtarget
= MF
->getSubtarget
<GCNSubtarget
>();
415 const SIInstrInfo
*TII
= Subtarget
.getInstrInfo();
418 // FIXME: Is it possible to be storing a frame index to itself?
420 for (const MachineOperand
&MO
: MI
.operands()) {
423 llvm_unreachable("should not see multiple frame indices");
430 MachineOperand
*FIOp
= TII
->getNamedOperand(MI
, AMDGPU::OpName::vaddr
);
431 assert(FIOp
&& FIOp
->isFI() && "frame index must be address operand");
432 assert(TII
->isMUBUF(MI
));
433 assert(TII
->getNamedOperand(MI
, AMDGPU::OpName::soffset
)->getReg() ==
434 MF
->getInfo
<SIMachineFunctionInfo
>()->getFrameOffsetReg() &&
435 "should only be seeing frame offset relative FrameIndex");
438 MachineOperand
*OffsetOp
= TII
->getNamedOperand(MI
, AMDGPU::OpName::offset
);
439 int64_t NewOffset
= OffsetOp
->getImm() + Offset
;
440 assert(isUInt
<12>(NewOffset
) && "offset should be legal");
442 FIOp
->ChangeToRegister(BaseReg
, false);
443 OffsetOp
->setImm(NewOffset
);
446 bool SIRegisterInfo::isFrameOffsetLegal(const MachineInstr
*MI
,
448 int64_t Offset
) const {
449 if (!SIInstrInfo::isMUBUF(*MI
))
452 int64_t NewOffset
= Offset
+ getMUBUFInstrOffset(MI
);
454 return isUInt
<12>(NewOffset
);
457 const TargetRegisterClass
*SIRegisterInfo::getPointerRegClass(
458 const MachineFunction
&MF
, unsigned Kind
) const {
459 // This is inaccurate. It depends on the instruction and address space. The
460 // only place where we should hit this is for dealing with frame indexes /
461 // private accesses, so this is correct in that case.
462 return &AMDGPU::VGPR_32RegClass
;
465 static unsigned getNumSubRegsForSpillOp(unsigned Op
) {
468 case AMDGPU::SI_SPILL_S1024_SAVE
:
469 case AMDGPU::SI_SPILL_S1024_RESTORE
:
470 case AMDGPU::SI_SPILL_V1024_SAVE
:
471 case AMDGPU::SI_SPILL_V1024_RESTORE
:
472 case AMDGPU::SI_SPILL_A1024_SAVE
:
473 case AMDGPU::SI_SPILL_A1024_RESTORE
:
475 case AMDGPU::SI_SPILL_S512_SAVE
:
476 case AMDGPU::SI_SPILL_S512_RESTORE
:
477 case AMDGPU::SI_SPILL_V512_SAVE
:
478 case AMDGPU::SI_SPILL_V512_RESTORE
:
479 case AMDGPU::SI_SPILL_A512_SAVE
:
480 case AMDGPU::SI_SPILL_A512_RESTORE
:
482 case AMDGPU::SI_SPILL_S256_SAVE
:
483 case AMDGPU::SI_SPILL_S256_RESTORE
:
484 case AMDGPU::SI_SPILL_V256_SAVE
:
485 case AMDGPU::SI_SPILL_V256_RESTORE
:
487 case AMDGPU::SI_SPILL_S160_SAVE
:
488 case AMDGPU::SI_SPILL_S160_RESTORE
:
489 case AMDGPU::SI_SPILL_V160_SAVE
:
490 case AMDGPU::SI_SPILL_V160_RESTORE
:
492 case AMDGPU::SI_SPILL_S128_SAVE
:
493 case AMDGPU::SI_SPILL_S128_RESTORE
:
494 case AMDGPU::SI_SPILL_V128_SAVE
:
495 case AMDGPU::SI_SPILL_V128_RESTORE
:
496 case AMDGPU::SI_SPILL_A128_SAVE
:
497 case AMDGPU::SI_SPILL_A128_RESTORE
:
499 case AMDGPU::SI_SPILL_S96_SAVE
:
500 case AMDGPU::SI_SPILL_S96_RESTORE
:
501 case AMDGPU::SI_SPILL_V96_SAVE
:
502 case AMDGPU::SI_SPILL_V96_RESTORE
:
504 case AMDGPU::SI_SPILL_S64_SAVE
:
505 case AMDGPU::SI_SPILL_S64_RESTORE
:
506 case AMDGPU::SI_SPILL_V64_SAVE
:
507 case AMDGPU::SI_SPILL_V64_RESTORE
:
508 case AMDGPU::SI_SPILL_A64_SAVE
:
509 case AMDGPU::SI_SPILL_A64_RESTORE
:
511 case AMDGPU::SI_SPILL_S32_SAVE
:
512 case AMDGPU::SI_SPILL_S32_RESTORE
:
513 case AMDGPU::SI_SPILL_V32_SAVE
:
514 case AMDGPU::SI_SPILL_V32_RESTORE
:
515 case AMDGPU::SI_SPILL_A32_SAVE
:
516 case AMDGPU::SI_SPILL_A32_RESTORE
:
518 default: llvm_unreachable("Invalid spill opcode");
522 static int getOffsetMUBUFStore(unsigned Opc
) {
524 case AMDGPU::BUFFER_STORE_DWORD_OFFEN
:
525 return AMDGPU::BUFFER_STORE_DWORD_OFFSET
;
526 case AMDGPU::BUFFER_STORE_BYTE_OFFEN
:
527 return AMDGPU::BUFFER_STORE_BYTE_OFFSET
;
528 case AMDGPU::BUFFER_STORE_SHORT_OFFEN
:
529 return AMDGPU::BUFFER_STORE_SHORT_OFFSET
;
530 case AMDGPU::BUFFER_STORE_DWORDX2_OFFEN
:
531 return AMDGPU::BUFFER_STORE_DWORDX2_OFFSET
;
532 case AMDGPU::BUFFER_STORE_DWORDX4_OFFEN
:
533 return AMDGPU::BUFFER_STORE_DWORDX4_OFFSET
;
534 case AMDGPU::BUFFER_STORE_SHORT_D16_HI_OFFEN
:
535 return AMDGPU::BUFFER_STORE_SHORT_D16_HI_OFFSET
;
536 case AMDGPU::BUFFER_STORE_BYTE_D16_HI_OFFEN
:
537 return AMDGPU::BUFFER_STORE_BYTE_D16_HI_OFFSET
;
543 static int getOffsetMUBUFLoad(unsigned Opc
) {
545 case AMDGPU::BUFFER_LOAD_DWORD_OFFEN
:
546 return AMDGPU::BUFFER_LOAD_DWORD_OFFSET
;
547 case AMDGPU::BUFFER_LOAD_UBYTE_OFFEN
:
548 return AMDGPU::BUFFER_LOAD_UBYTE_OFFSET
;
549 case AMDGPU::BUFFER_LOAD_SBYTE_OFFEN
:
550 return AMDGPU::BUFFER_LOAD_SBYTE_OFFSET
;
551 case AMDGPU::BUFFER_LOAD_USHORT_OFFEN
:
552 return AMDGPU::BUFFER_LOAD_USHORT_OFFSET
;
553 case AMDGPU::BUFFER_LOAD_SSHORT_OFFEN
:
554 return AMDGPU::BUFFER_LOAD_SSHORT_OFFSET
;
555 case AMDGPU::BUFFER_LOAD_DWORDX2_OFFEN
:
556 return AMDGPU::BUFFER_LOAD_DWORDX2_OFFSET
;
557 case AMDGPU::BUFFER_LOAD_DWORDX4_OFFEN
:
558 return AMDGPU::BUFFER_LOAD_DWORDX4_OFFSET
;
559 case AMDGPU::BUFFER_LOAD_UBYTE_D16_OFFEN
:
560 return AMDGPU::BUFFER_LOAD_UBYTE_D16_OFFSET
;
561 case AMDGPU::BUFFER_LOAD_UBYTE_D16_HI_OFFEN
:
562 return AMDGPU::BUFFER_LOAD_UBYTE_D16_HI_OFFSET
;
563 case AMDGPU::BUFFER_LOAD_SBYTE_D16_OFFEN
:
564 return AMDGPU::BUFFER_LOAD_SBYTE_D16_OFFSET
;
565 case AMDGPU::BUFFER_LOAD_SBYTE_D16_HI_OFFEN
:
566 return AMDGPU::BUFFER_LOAD_SBYTE_D16_HI_OFFSET
;
567 case AMDGPU::BUFFER_LOAD_SHORT_D16_OFFEN
:
568 return AMDGPU::BUFFER_LOAD_SHORT_D16_OFFSET
;
569 case AMDGPU::BUFFER_LOAD_SHORT_D16_HI_OFFEN
:
570 return AMDGPU::BUFFER_LOAD_SHORT_D16_HI_OFFSET
;
576 static MachineInstrBuilder
spillVGPRtoAGPR(MachineBasicBlock::iterator MI
,
581 MachineBasicBlock
*MBB
= MI
->getParent();
582 MachineFunction
*MF
= MI
->getParent()->getParent();
583 SIMachineFunctionInfo
*MFI
= MF
->getInfo
<SIMachineFunctionInfo
>();
584 const GCNSubtarget
&ST
= MF
->getSubtarget
<GCNSubtarget
>();
585 const SIInstrInfo
*TII
= ST
.getInstrInfo();
587 MCPhysReg Reg
= MFI
->getVGPRToAGPRSpill(Index
, Lane
);
589 if (Reg
== AMDGPU::NoRegister
)
590 return MachineInstrBuilder();
592 bool IsStore
= MI
->mayStore();
593 MachineRegisterInfo
&MRI
= MF
->getRegInfo();
594 auto *TRI
= static_cast<const SIRegisterInfo
*>(MRI
.getTargetRegisterInfo());
596 unsigned Dst
= IsStore
? Reg
: ValueReg
;
597 unsigned Src
= IsStore
? ValueReg
: Reg
;
598 unsigned Opc
= (IsStore
^ TRI
->isVGPR(MRI
, Reg
)) ? AMDGPU::V_ACCVGPR_WRITE_B32
599 : AMDGPU::V_ACCVGPR_READ_B32
;
601 return BuildMI(*MBB
, MI
, MI
->getDebugLoc(), TII
->get(Opc
), Dst
)
602 .addReg(Src
, getKillRegState(IsKill
));
605 // This differs from buildSpillLoadStore by only scavenging a VGPR. It does not
606 // need to handle the case where an SGPR may need to be spilled while spilling.
607 static bool buildMUBUFOffsetLoadStore(const SIInstrInfo
*TII
,
608 MachineFrameInfo
&MFI
,
609 MachineBasicBlock::iterator MI
,
612 MachineBasicBlock
*MBB
= MI
->getParent();
613 const DebugLoc
&DL
= MI
->getDebugLoc();
614 bool IsStore
= MI
->mayStore();
616 unsigned Opc
= MI
->getOpcode();
617 int LoadStoreOp
= IsStore
?
618 getOffsetMUBUFStore(Opc
) : getOffsetMUBUFLoad(Opc
);
619 if (LoadStoreOp
== -1)
622 const MachineOperand
*Reg
= TII
->getNamedOperand(*MI
, AMDGPU::OpName::vdata
);
623 if (spillVGPRtoAGPR(MI
, Index
, 0, Reg
->getReg(), false).getInstr())
626 MachineInstrBuilder NewMI
=
627 BuildMI(*MBB
, MI
, DL
, TII
->get(LoadStoreOp
))
629 .add(*TII
->getNamedOperand(*MI
, AMDGPU::OpName::srsrc
))
630 .add(*TII
->getNamedOperand(*MI
, AMDGPU::OpName::soffset
))
638 const MachineOperand
*VDataIn
= TII
->getNamedOperand(*MI
,
639 AMDGPU::OpName::vdata_in
);
645 void SIRegisterInfo::buildSpillLoadStore(MachineBasicBlock::iterator MI
,
646 unsigned LoadStoreOp
,
650 unsigned ScratchRsrcReg
,
651 unsigned ScratchOffsetReg
,
653 MachineMemOperand
*MMO
,
654 RegScavenger
*RS
) const {
655 MachineBasicBlock
*MBB
= MI
->getParent();
656 MachineFunction
*MF
= MI
->getParent()->getParent();
657 const GCNSubtarget
&ST
= MF
->getSubtarget
<GCNSubtarget
>();
658 const SIInstrInfo
*TII
= ST
.getInstrInfo();
659 const MachineFrameInfo
&MFI
= MF
->getFrameInfo();
661 const MCInstrDesc
&Desc
= TII
->get(LoadStoreOp
);
662 const DebugLoc
&DL
= MI
->getDebugLoc();
663 bool IsStore
= Desc
.mayStore();
665 bool Scavenged
= false;
666 unsigned SOffset
= ScratchOffsetReg
;
668 const unsigned EltSize
= 4;
669 const TargetRegisterClass
*RC
= getRegClassForReg(MF
->getRegInfo(), ValueReg
);
670 unsigned NumSubRegs
= AMDGPU::getRegBitWidth(RC
->getID()) / (EltSize
* CHAR_BIT
);
671 unsigned Size
= NumSubRegs
* EltSize
;
672 int64_t Offset
= InstOffset
+ MFI
.getObjectOffset(Index
);
673 int64_t ScratchOffsetRegDelta
= 0;
675 unsigned Align
= MFI
.getObjectAlignment(Index
);
676 const MachinePointerInfo
&BasePtrInfo
= MMO
->getPointerInfo();
679 hasAGPRs(RC
) ? TII
->getNamedOperand(*MI
, AMDGPU::OpName::tmp
)->getReg()
682 assert((Offset
% EltSize
) == 0 && "unexpected VGPR spill offset");
684 if (!isUInt
<12>(Offset
+ Size
- EltSize
)) {
685 SOffset
= AMDGPU::NoRegister
;
687 // We currently only support spilling VGPRs to EltSize boundaries, meaning
688 // we can simplify the adjustment of Offset here to just scale with
690 Offset
*= ST
.getWavefrontSize();
692 // We don't have access to the register scavenger if this function is called
693 // during PEI::scavengeFrameVirtualRegs().
695 SOffset
= RS
->scavengeRegister(&AMDGPU::SGPR_32RegClass
, MI
, 0, false);
697 if (SOffset
== AMDGPU::NoRegister
) {
698 // There are no free SGPRs, and since we are in the process of spilling
699 // VGPRs too. Since we need a VGPR in order to spill SGPRs (this is true
700 // on SI/CI and on VI it is true until we implement spilling using scalar
701 // stores), we have no way to free up an SGPR. Our solution here is to
702 // add the offset directly to the ScratchOffset register, and then
703 // subtract the offset after the spill to return ScratchOffset to it's
705 SOffset
= ScratchOffsetReg
;
706 ScratchOffsetRegDelta
= Offset
;
711 BuildMI(*MBB
, MI
, DL
, TII
->get(AMDGPU::S_ADD_U32
), SOffset
)
712 .addReg(ScratchOffsetReg
)
718 for (unsigned i
= 0, e
= NumSubRegs
; i
!= e
; ++i
, Offset
+= EltSize
) {
719 Register SubReg
= NumSubRegs
== 1
721 : getSubReg(ValueReg
, getSubRegFromChannel(i
));
723 unsigned SOffsetRegState
= 0;
724 unsigned SrcDstRegState
= getDefRegState(!IsStore
);
726 SOffsetRegState
|= getKillRegState(Scavenged
);
727 // The last implicit use carries the "Kill" flag.
728 SrcDstRegState
|= getKillRegState(IsKill
);
731 auto MIB
= spillVGPRtoAGPR(MI
, Index
, i
, SubReg
, IsKill
);
733 if (!MIB
.getInstr()) {
734 unsigned FinalReg
= SubReg
;
735 if (TmpReg
!= AMDGPU::NoRegister
) {
737 BuildMI(*MBB
, MI
, DL
, TII
->get(AMDGPU::V_ACCVGPR_READ_B32
), TmpReg
)
738 .addReg(SubReg
, getKillRegState(IsKill
));
742 MachinePointerInfo PInfo
= BasePtrInfo
.getWithOffset(EltSize
* i
);
743 MachineMemOperand
*NewMMO
744 = MF
->getMachineMemOperand(PInfo
, MMO
->getFlags(),
745 EltSize
, MinAlign(Align
, EltSize
* i
));
747 MIB
= BuildMI(*MBB
, MI
, DL
, Desc
)
748 .addReg(SubReg
, getDefRegState(!IsStore
) | getKillRegState(IsKill
))
749 .addReg(ScratchRsrcReg
)
750 .addReg(SOffset
, SOffsetRegState
)
756 .addMemOperand(NewMMO
);
758 if (!IsStore
&& TmpReg
!= AMDGPU::NoRegister
)
759 MIB
= BuildMI(*MBB
, MI
, DL
, TII
->get(AMDGPU::V_ACCVGPR_WRITE_B32
),
761 .addReg(TmpReg
, RegState::Kill
);
765 MIB
.addReg(ValueReg
, RegState::Implicit
| SrcDstRegState
);
768 if (ScratchOffsetRegDelta
!= 0) {
769 // Subtract the offset we added to the ScratchOffset register.
770 BuildMI(*MBB
, MI
, DL
, TII
->get(AMDGPU::S_SUB_U32
), ScratchOffsetReg
)
771 .addReg(ScratchOffsetReg
)
772 .addImm(ScratchOffsetRegDelta
);
776 static std::pair
<unsigned, unsigned> getSpillEltSize(unsigned SuperRegSize
,
778 if (SuperRegSize
% 16 == 0) {
779 return { 16, Store
? AMDGPU::S_BUFFER_STORE_DWORDX4_SGPR
:
780 AMDGPU::S_BUFFER_LOAD_DWORDX4_SGPR
};
783 if (SuperRegSize
% 8 == 0) {
784 return { 8, Store
? AMDGPU::S_BUFFER_STORE_DWORDX2_SGPR
:
785 AMDGPU::S_BUFFER_LOAD_DWORDX2_SGPR
};
788 return { 4, Store
? AMDGPU::S_BUFFER_STORE_DWORD_SGPR
:
789 AMDGPU::S_BUFFER_LOAD_DWORD_SGPR
};
792 bool SIRegisterInfo::spillSGPR(MachineBasicBlock::iterator MI
,
795 bool OnlyToVGPR
) const {
796 MachineBasicBlock
*MBB
= MI
->getParent();
797 MachineFunction
*MF
= MBB
->getParent();
798 SIMachineFunctionInfo
*MFI
= MF
->getInfo
<SIMachineFunctionInfo
>();
799 DenseSet
<unsigned> SGPRSpillVGPRDefinedSet
;
801 ArrayRef
<SIMachineFunctionInfo::SpilledReg
> VGPRSpills
802 = MFI
->getSGPRToVGPRSpills(Index
);
803 bool SpillToVGPR
= !VGPRSpills
.empty();
804 if (OnlyToVGPR
&& !SpillToVGPR
)
807 MachineRegisterInfo
&MRI
= MF
->getRegInfo();
808 const GCNSubtarget
&ST
= MF
->getSubtarget
<GCNSubtarget
>();
809 const SIInstrInfo
*TII
= ST
.getInstrInfo();
811 Register SuperReg
= MI
->getOperand(0).getReg();
812 bool IsKill
= MI
->getOperand(0).isKill();
813 const DebugLoc
&DL
= MI
->getDebugLoc();
815 MachineFrameInfo
&FrameInfo
= MF
->getFrameInfo();
817 bool SpillToSMEM
= spillSGPRToSMEM();
818 if (SpillToSMEM
&& OnlyToVGPR
)
821 Register FrameReg
= getFrameRegister(*MF
);
823 assert(SpillToVGPR
|| (SuperReg
!= MFI
->getStackPtrOffsetReg() &&
824 SuperReg
!= MFI
->getFrameOffsetReg() &&
825 SuperReg
!= MFI
->getScratchWaveOffsetReg()));
827 assert(SuperReg
!= AMDGPU::M0
&& "m0 should never spill");
829 unsigned OffsetReg
= AMDGPU::M0
;
830 unsigned M0CopyReg
= AMDGPU::NoRegister
;
833 if (RS
->isRegUsed(AMDGPU::M0
)) {
834 M0CopyReg
= MRI
.createVirtualRegister(&AMDGPU::SReg_32_XM0RegClass
);
835 BuildMI(*MBB
, MI
, DL
, TII
->get(AMDGPU::COPY
), M0CopyReg
)
840 unsigned ScalarStoreOp
;
841 unsigned EltSize
= 4;
842 const TargetRegisterClass
*RC
= getPhysRegClass(SuperReg
);
843 if (SpillToSMEM
&& isSGPRClass(RC
)) {
844 // XXX - if private_element_size is larger than 4 it might be useful to be
845 // able to spill wider vmem spills.
846 std::tie(EltSize
, ScalarStoreOp
) =
847 getSpillEltSize(getRegSizeInBits(*RC
) / 8, true);
850 ArrayRef
<int16_t> SplitParts
= getRegSplitParts(RC
, EltSize
);
851 unsigned NumSubRegs
= SplitParts
.empty() ? 1 : SplitParts
.size();
853 // SubReg carries the "Kill" flag when SubReg == SuperReg.
854 unsigned SubKillState
= getKillRegState((NumSubRegs
== 1) && IsKill
);
855 for (unsigned i
= 0, e
= NumSubRegs
; i
< e
; ++i
) {
857 NumSubRegs
== 1 ? SuperReg
: getSubReg(SuperReg
, SplitParts
[i
]);
860 int64_t FrOffset
= FrameInfo
.getObjectOffset(Index
);
862 // The allocated memory size is really the wavefront size * the frame
863 // index size. The widest register class is 64 bytes, so a 4-byte scratch
864 // allocation is enough to spill this in a single stack object.
866 // FIXME: Frame size/offsets are computed earlier than this, so the extra
867 // space is still unnecessarily allocated.
869 unsigned Align
= FrameInfo
.getObjectAlignment(Index
);
870 MachinePointerInfo PtrInfo
871 = MachinePointerInfo::getFixedStack(*MF
, Index
, EltSize
* i
);
872 MachineMemOperand
*MMO
873 = MF
->getMachineMemOperand(PtrInfo
, MachineMemOperand::MOStore
,
874 EltSize
, MinAlign(Align
, EltSize
* i
));
876 // SMEM instructions only support a single offset, so increment the wave
879 int64_t Offset
= (ST
.getWavefrontSize() * FrOffset
) + (EltSize
* i
);
881 BuildMI(*MBB
, MI
, DL
, TII
->get(AMDGPU::S_ADD_U32
), OffsetReg
)
885 BuildMI(*MBB
, MI
, DL
, TII
->get(AMDGPU::S_MOV_B32
), OffsetReg
)
889 BuildMI(*MBB
, MI
, DL
, TII
->get(ScalarStoreOp
))
890 .addReg(SubReg
, getKillRegState(IsKill
)) // sdata
891 .addReg(MFI
->getScratchRSrcReg()) // sbase
892 .addReg(OffsetReg
, RegState::Kill
) // soff
901 SIMachineFunctionInfo::SpilledReg Spill
= VGPRSpills
[i
];
903 // During SGPR spilling to VGPR, determine if the VGPR is defined. The
904 // only circumstance in which we say it is undefined is when it is the
905 // first spill to this VGPR in the first basic block.
906 bool VGPRDefined
= true;
907 if (MBB
== &MF
->front())
908 VGPRDefined
= !SGPRSpillVGPRDefinedSet
.insert(Spill
.VGPR
).second
;
910 // Mark the "old value of vgpr" input undef only if this is the first sgpr
911 // spill to this specific vgpr in the first basic block.
912 BuildMI(*MBB
, MI
, DL
,
913 TII
->getMCOpcodeFromPseudo(AMDGPU::V_WRITELANE_B32
),
915 .addReg(SubReg
, getKillRegState(IsKill
))
917 .addReg(Spill
.VGPR
, VGPRDefined
? 0 : RegState::Undef
);
919 // FIXME: Since this spills to another register instead of an actual
920 // frame index, we should delete the frame index when all references to
923 // XXX - Can to VGPR spill fail for some subregisters but not others?
927 // Spill SGPR to a frame index.
928 // TODO: Should VI try to spill to VGPR and then spill to SMEM?
929 Register TmpReg
= MRI
.createVirtualRegister(&AMDGPU::VGPR_32RegClass
);
930 // TODO: Should VI try to spill to VGPR and then spill to SMEM?
932 MachineInstrBuilder Mov
933 = BuildMI(*MBB
, MI
, DL
, TII
->get(AMDGPU::V_MOV_B32_e32
), TmpReg
)
934 .addReg(SubReg
, SubKillState
);
937 // There could be undef components of a spilled super register.
938 // TODO: Can we detect this and skip the spill?
939 if (NumSubRegs
> 1) {
940 // The last implicit use of the SuperReg carries the "Kill" flag.
941 unsigned SuperKillState
= 0;
943 SuperKillState
|= getKillRegState(IsKill
);
944 Mov
.addReg(SuperReg
, RegState::Implicit
| SuperKillState
);
947 unsigned Align
= FrameInfo
.getObjectAlignment(Index
);
948 MachinePointerInfo PtrInfo
949 = MachinePointerInfo::getFixedStack(*MF
, Index
, EltSize
* i
);
950 MachineMemOperand
*MMO
951 = MF
->getMachineMemOperand(PtrInfo
, MachineMemOperand::MOStore
,
952 EltSize
, MinAlign(Align
, EltSize
* i
));
953 BuildMI(*MBB
, MI
, DL
, TII
->get(AMDGPU::SI_SPILL_V32_SAVE
))
954 .addReg(TmpReg
, RegState::Kill
) // src
955 .addFrameIndex(Index
) // vaddr
956 .addReg(MFI
->getScratchRSrcReg()) // srrsrc
957 .addReg(MFI
->getStackPtrOffsetReg()) // soffset
958 .addImm(i
* 4) // offset
963 if (M0CopyReg
!= AMDGPU::NoRegister
) {
964 BuildMI(*MBB
, MI
, DL
, TII
->get(AMDGPU::COPY
), AMDGPU::M0
)
965 .addReg(M0CopyReg
, RegState::Kill
);
968 MI
->eraseFromParent();
969 MFI
->addToSpilledSGPRs(NumSubRegs
);
973 bool SIRegisterInfo::restoreSGPR(MachineBasicBlock::iterator MI
,
976 bool OnlyToVGPR
) const {
977 MachineFunction
*MF
= MI
->getParent()->getParent();
978 MachineRegisterInfo
&MRI
= MF
->getRegInfo();
979 MachineBasicBlock
*MBB
= MI
->getParent();
980 SIMachineFunctionInfo
*MFI
= MF
->getInfo
<SIMachineFunctionInfo
>();
982 ArrayRef
<SIMachineFunctionInfo::SpilledReg
> VGPRSpills
983 = MFI
->getSGPRToVGPRSpills(Index
);
984 bool SpillToVGPR
= !VGPRSpills
.empty();
985 if (OnlyToVGPR
&& !SpillToVGPR
)
988 MachineFrameInfo
&FrameInfo
= MF
->getFrameInfo();
989 const GCNSubtarget
&ST
= MF
->getSubtarget
<GCNSubtarget
>();
990 const SIInstrInfo
*TII
= ST
.getInstrInfo();
991 const DebugLoc
&DL
= MI
->getDebugLoc();
993 Register SuperReg
= MI
->getOperand(0).getReg();
994 bool SpillToSMEM
= spillSGPRToSMEM();
995 if (SpillToSMEM
&& OnlyToVGPR
)
998 assert(SuperReg
!= AMDGPU::M0
&& "m0 should never spill");
1000 unsigned OffsetReg
= AMDGPU::M0
;
1001 unsigned M0CopyReg
= AMDGPU::NoRegister
;
1004 if (RS
->isRegUsed(AMDGPU::M0
)) {
1005 M0CopyReg
= MRI
.createVirtualRegister(&AMDGPU::SReg_32_XM0RegClass
);
1006 BuildMI(*MBB
, MI
, DL
, TII
->get(AMDGPU::COPY
), M0CopyReg
)
1007 .addReg(AMDGPU::M0
);
1011 unsigned EltSize
= 4;
1012 unsigned ScalarLoadOp
;
1014 Register FrameReg
= getFrameRegister(*MF
);
1016 const TargetRegisterClass
*RC
= getPhysRegClass(SuperReg
);
1017 if (SpillToSMEM
&& isSGPRClass(RC
)) {
1018 // XXX - if private_element_size is larger than 4 it might be useful to be
1019 // able to spill wider vmem spills.
1020 std::tie(EltSize
, ScalarLoadOp
) =
1021 getSpillEltSize(getRegSizeInBits(*RC
) / 8, false);
1024 ArrayRef
<int16_t> SplitParts
= getRegSplitParts(RC
, EltSize
);
1025 unsigned NumSubRegs
= SplitParts
.empty() ? 1 : SplitParts
.size();
1027 // SubReg carries the "Kill" flag when SubReg == SuperReg.
1028 int64_t FrOffset
= FrameInfo
.getObjectOffset(Index
);
1030 for (unsigned i
= 0, e
= NumSubRegs
; i
< e
; ++i
) {
1032 NumSubRegs
== 1 ? SuperReg
: getSubReg(SuperReg
, SplitParts
[i
]);
1035 // FIXME: Size may be > 4 but extra bytes wasted.
1036 unsigned Align
= FrameInfo
.getObjectAlignment(Index
);
1037 MachinePointerInfo PtrInfo
1038 = MachinePointerInfo::getFixedStack(*MF
, Index
, EltSize
* i
);
1039 MachineMemOperand
*MMO
1040 = MF
->getMachineMemOperand(PtrInfo
, MachineMemOperand::MOLoad
,
1041 EltSize
, MinAlign(Align
, EltSize
* i
));
1044 int64_t Offset
= (ST
.getWavefrontSize() * FrOffset
) + (EltSize
* i
);
1046 BuildMI(*MBB
, MI
, DL
, TII
->get(AMDGPU::S_ADD_U32
), OffsetReg
)
1050 BuildMI(*MBB
, MI
, DL
, TII
->get(AMDGPU::S_MOV_B32
), OffsetReg
)
1055 BuildMI(*MBB
, MI
, DL
, TII
->get(ScalarLoadOp
), SubReg
)
1056 .addReg(MFI
->getScratchRSrcReg()) // sbase
1057 .addReg(OffsetReg
, RegState::Kill
) // soff
1060 .addMemOperand(MMO
);
1062 if (NumSubRegs
> 1 && i
== 0)
1063 MIB
.addReg(SuperReg
, RegState::ImplicitDefine
);
1069 SIMachineFunctionInfo::SpilledReg Spill
= VGPRSpills
[i
];
1071 BuildMI(*MBB
, MI
, DL
, TII
->getMCOpcodeFromPseudo(AMDGPU::V_READLANE_B32
),
1074 .addImm(Spill
.Lane
);
1076 if (NumSubRegs
> 1 && i
== 0)
1077 MIB
.addReg(SuperReg
, RegState::ImplicitDefine
);
1082 // Restore SGPR from a stack slot.
1083 // FIXME: We should use S_LOAD_DWORD here for VI.
1084 Register TmpReg
= MRI
.createVirtualRegister(&AMDGPU::VGPR_32RegClass
);
1085 unsigned Align
= FrameInfo
.getObjectAlignment(Index
);
1087 MachinePointerInfo PtrInfo
1088 = MachinePointerInfo::getFixedStack(*MF
, Index
, EltSize
* i
);
1090 MachineMemOperand
*MMO
= MF
->getMachineMemOperand(PtrInfo
,
1091 MachineMemOperand::MOLoad
, EltSize
,
1092 MinAlign(Align
, EltSize
* i
));
1094 BuildMI(*MBB
, MI
, DL
, TII
->get(AMDGPU::SI_SPILL_V32_RESTORE
), TmpReg
)
1095 .addFrameIndex(Index
) // vaddr
1096 .addReg(MFI
->getScratchRSrcReg()) // srsrc
1097 .addReg(MFI
->getStackPtrOffsetReg()) // soffset
1098 .addImm(i
* 4) // offset
1099 .addMemOperand(MMO
);
1102 BuildMI(*MBB
, MI
, DL
, TII
->get(AMDGPU::V_READFIRSTLANE_B32
), SubReg
)
1103 .addReg(TmpReg
, RegState::Kill
);
1106 MIB
.addReg(MI
->getOperand(0).getReg(), RegState::ImplicitDefine
);
1110 if (M0CopyReg
!= AMDGPU::NoRegister
) {
1111 BuildMI(*MBB
, MI
, DL
, TII
->get(AMDGPU::COPY
), AMDGPU::M0
)
1112 .addReg(M0CopyReg
, RegState::Kill
);
1115 MI
->eraseFromParent();
1119 /// Special case of eliminateFrameIndex. Returns true if the SGPR was spilled to
1120 /// a VGPR and the stack slot can be safely eliminated when all other users are
1122 bool SIRegisterInfo::eliminateSGPRToVGPRSpillFrameIndex(
1123 MachineBasicBlock::iterator MI
,
1125 RegScavenger
*RS
) const {
1126 switch (MI
->getOpcode()) {
1127 case AMDGPU::SI_SPILL_S1024_SAVE
:
1128 case AMDGPU::SI_SPILL_S512_SAVE
:
1129 case AMDGPU::SI_SPILL_S256_SAVE
:
1130 case AMDGPU::SI_SPILL_S160_SAVE
:
1131 case AMDGPU::SI_SPILL_S128_SAVE
:
1132 case AMDGPU::SI_SPILL_S96_SAVE
:
1133 case AMDGPU::SI_SPILL_S64_SAVE
:
1134 case AMDGPU::SI_SPILL_S32_SAVE
:
1135 return spillSGPR(MI
, FI
, RS
, true);
1136 case AMDGPU::SI_SPILL_S1024_RESTORE
:
1137 case AMDGPU::SI_SPILL_S512_RESTORE
:
1138 case AMDGPU::SI_SPILL_S256_RESTORE
:
1139 case AMDGPU::SI_SPILL_S160_RESTORE
:
1140 case AMDGPU::SI_SPILL_S128_RESTORE
:
1141 case AMDGPU::SI_SPILL_S96_RESTORE
:
1142 case AMDGPU::SI_SPILL_S64_RESTORE
:
1143 case AMDGPU::SI_SPILL_S32_RESTORE
:
1144 return restoreSGPR(MI
, FI
, RS
, true);
1146 llvm_unreachable("not an SGPR spill instruction");
1150 void SIRegisterInfo::eliminateFrameIndex(MachineBasicBlock::iterator MI
,
1151 int SPAdj
, unsigned FIOperandNum
,
1152 RegScavenger
*RS
) const {
1153 MachineFunction
*MF
= MI
->getParent()->getParent();
1154 MachineRegisterInfo
&MRI
= MF
->getRegInfo();
1155 MachineBasicBlock
*MBB
= MI
->getParent();
1156 SIMachineFunctionInfo
*MFI
= MF
->getInfo
<SIMachineFunctionInfo
>();
1157 MachineFrameInfo
&FrameInfo
= MF
->getFrameInfo();
1158 const GCNSubtarget
&ST
= MF
->getSubtarget
<GCNSubtarget
>();
1159 const SIInstrInfo
*TII
= ST
.getInstrInfo();
1160 DebugLoc DL
= MI
->getDebugLoc();
1162 assert(SPAdj
== 0 && "unhandled SP adjustment in call sequence?");
1164 MachineOperand
&FIOp
= MI
->getOperand(FIOperandNum
);
1165 int Index
= MI
->getOperand(FIOperandNum
).getIndex();
1167 Register FrameReg
= getFrameRegister(*MF
);
1169 switch (MI
->getOpcode()) {
1170 // SGPR register spill
1171 case AMDGPU::SI_SPILL_S1024_SAVE
:
1172 case AMDGPU::SI_SPILL_S512_SAVE
:
1173 case AMDGPU::SI_SPILL_S256_SAVE
:
1174 case AMDGPU::SI_SPILL_S160_SAVE
:
1175 case AMDGPU::SI_SPILL_S128_SAVE
:
1176 case AMDGPU::SI_SPILL_S96_SAVE
:
1177 case AMDGPU::SI_SPILL_S64_SAVE
:
1178 case AMDGPU::SI_SPILL_S32_SAVE
: {
1179 spillSGPR(MI
, Index
, RS
);
1183 // SGPR register restore
1184 case AMDGPU::SI_SPILL_S1024_RESTORE
:
1185 case AMDGPU::SI_SPILL_S512_RESTORE
:
1186 case AMDGPU::SI_SPILL_S256_RESTORE
:
1187 case AMDGPU::SI_SPILL_S160_RESTORE
:
1188 case AMDGPU::SI_SPILL_S128_RESTORE
:
1189 case AMDGPU::SI_SPILL_S96_RESTORE
:
1190 case AMDGPU::SI_SPILL_S64_RESTORE
:
1191 case AMDGPU::SI_SPILL_S32_RESTORE
: {
1192 restoreSGPR(MI
, Index
, RS
);
1196 // VGPR register spill
1197 case AMDGPU::SI_SPILL_V1024_SAVE
:
1198 case AMDGPU::SI_SPILL_V512_SAVE
:
1199 case AMDGPU::SI_SPILL_V256_SAVE
:
1200 case AMDGPU::SI_SPILL_V160_SAVE
:
1201 case AMDGPU::SI_SPILL_V128_SAVE
:
1202 case AMDGPU::SI_SPILL_V96_SAVE
:
1203 case AMDGPU::SI_SPILL_V64_SAVE
:
1204 case AMDGPU::SI_SPILL_V32_SAVE
:
1205 case AMDGPU::SI_SPILL_A1024_SAVE
:
1206 case AMDGPU::SI_SPILL_A512_SAVE
:
1207 case AMDGPU::SI_SPILL_A128_SAVE
:
1208 case AMDGPU::SI_SPILL_A64_SAVE
:
1209 case AMDGPU::SI_SPILL_A32_SAVE
: {
1210 const MachineOperand
*VData
= TII
->getNamedOperand(*MI
,
1211 AMDGPU::OpName::vdata
);
1212 assert(TII
->getNamedOperand(*MI
, AMDGPU::OpName::soffset
)->getReg() ==
1213 MFI
->getStackPtrOffsetReg());
1215 buildSpillLoadStore(MI
, AMDGPU::BUFFER_STORE_DWORD_OFFSET
,
1217 VData
->getReg(), VData
->isKill(),
1218 TII
->getNamedOperand(*MI
, AMDGPU::OpName::srsrc
)->getReg(),
1220 TII
->getNamedOperand(*MI
, AMDGPU::OpName::offset
)->getImm(),
1221 *MI
->memoperands_begin(),
1223 MFI
->addToSpilledVGPRs(getNumSubRegsForSpillOp(MI
->getOpcode()));
1224 MI
->eraseFromParent();
1227 case AMDGPU::SI_SPILL_V32_RESTORE
:
1228 case AMDGPU::SI_SPILL_V64_RESTORE
:
1229 case AMDGPU::SI_SPILL_V96_RESTORE
:
1230 case AMDGPU::SI_SPILL_V128_RESTORE
:
1231 case AMDGPU::SI_SPILL_V160_RESTORE
:
1232 case AMDGPU::SI_SPILL_V256_RESTORE
:
1233 case AMDGPU::SI_SPILL_V512_RESTORE
:
1234 case AMDGPU::SI_SPILL_V1024_RESTORE
:
1235 case AMDGPU::SI_SPILL_A32_RESTORE
:
1236 case AMDGPU::SI_SPILL_A64_RESTORE
:
1237 case AMDGPU::SI_SPILL_A128_RESTORE
:
1238 case AMDGPU::SI_SPILL_A512_RESTORE
:
1239 case AMDGPU::SI_SPILL_A1024_RESTORE
: {
1240 const MachineOperand
*VData
= TII
->getNamedOperand(*MI
,
1241 AMDGPU::OpName::vdata
);
1242 assert(TII
->getNamedOperand(*MI
, AMDGPU::OpName::soffset
)->getReg() ==
1243 MFI
->getStackPtrOffsetReg());
1245 buildSpillLoadStore(MI
, AMDGPU::BUFFER_LOAD_DWORD_OFFSET
,
1247 VData
->getReg(), VData
->isKill(),
1248 TII
->getNamedOperand(*MI
, AMDGPU::OpName::srsrc
)->getReg(),
1250 TII
->getNamedOperand(*MI
, AMDGPU::OpName::offset
)->getImm(),
1251 *MI
->memoperands_begin(),
1253 MI
->eraseFromParent();
1258 const DebugLoc
&DL
= MI
->getDebugLoc();
1259 bool IsMUBUF
= TII
->isMUBUF(*MI
);
1261 if (!IsMUBUF
&& !MFI
->isEntryFunction()) {
1262 // Convert to an absolute stack address by finding the offset from the
1263 // scratch wave base and scaling by the wave size.
1265 // In an entry function/kernel the offset is already the absolute
1266 // address relative to the frame register.
1269 MRI
.createVirtualRegister(&AMDGPU::SReg_32_XM0RegClass
);
1271 bool IsCopy
= MI
->getOpcode() == AMDGPU::V_MOV_B32_e32
;
1272 Register ResultReg
= IsCopy
?
1273 MI
->getOperand(0).getReg() :
1274 MRI
.createVirtualRegister(&AMDGPU::VGPR_32RegClass
);
1276 BuildMI(*MBB
, MI
, DL
, TII
->get(AMDGPU::S_SUB_U32
), DiffReg
)
1278 .addReg(MFI
->getScratchWaveOffsetReg());
1280 int64_t Offset
= FrameInfo
.getObjectOffset(Index
);
1282 // XXX - This never happens because of emergency scavenging slot at 0?
1283 BuildMI(*MBB
, MI
, DL
, TII
->get(AMDGPU::V_LSHRREV_B32_e64
), ResultReg
)
1284 .addImm(Log2_32(ST
.getWavefrontSize()))
1287 Register ScaledReg
=
1288 MRI
.createVirtualRegister(&AMDGPU::VGPR_32RegClass
);
1290 BuildMI(*MBB
, MI
, DL
, TII
->get(AMDGPU::V_LSHRREV_B32_e64
), ScaledReg
)
1291 .addImm(Log2_32(ST
.getWavefrontSize()))
1292 .addReg(DiffReg
, RegState::Kill
);
1294 // TODO: Fold if use instruction is another add of a constant.
1295 if (AMDGPU::isInlinableLiteral32(Offset
, ST
.hasInv2PiInlineImm())) {
1296 TII
->getAddNoCarry(*MBB
, MI
, DL
, ResultReg
)
1298 .addReg(ScaledReg
, RegState::Kill
)
1299 .addImm(0); // clamp bit
1301 Register ConstOffsetReg
=
1302 MRI
.createVirtualRegister(&AMDGPU::SReg_32_XM0RegClass
);
1304 BuildMI(*MBB
, MI
, DL
, TII
->get(AMDGPU::S_MOV_B32
), ConstOffsetReg
)
1306 TII
->getAddNoCarry(*MBB
, MI
, DL
, ResultReg
)
1307 .addReg(ConstOffsetReg
, RegState::Kill
)
1308 .addReg(ScaledReg
, RegState::Kill
)
1309 .addImm(0); // clamp bit
1313 // Don't introduce an extra copy if we're just materializing in a mov.
1315 MI
->eraseFromParent();
1317 FIOp
.ChangeToRegister(ResultReg
, false, false, true);
1322 // Disable offen so we don't need a 0 vgpr base.
1323 assert(static_cast<int>(FIOperandNum
) ==
1324 AMDGPU::getNamedOperandIdx(MI
->getOpcode(),
1325 AMDGPU::OpName::vaddr
));
1327 assert(TII
->getNamedOperand(*MI
, AMDGPU::OpName::soffset
)->getReg() ==
1328 MFI
->getStackPtrOffsetReg());
1330 TII
->getNamedOperand(*MI
, AMDGPU::OpName::soffset
)->setReg(FrameReg
);
1332 int64_t Offset
= FrameInfo
.getObjectOffset(Index
);
1334 = TII
->getNamedOperand(*MI
, AMDGPU::OpName::offset
)->getImm();
1335 int64_t NewOffset
= OldImm
+ Offset
;
1337 if (isUInt
<12>(NewOffset
) &&
1338 buildMUBUFOffsetLoadStore(TII
, FrameInfo
, MI
, Index
, NewOffset
)) {
1339 MI
->eraseFromParent();
1344 // If the offset is simply too big, don't convert to a scratch wave offset
1347 int64_t Offset
= FrameInfo
.getObjectOffset(Index
);
1348 FIOp
.ChangeToImmediate(Offset
);
1349 if (!TII
->isImmOperandLegal(*MI
, FIOperandNum
, FIOp
)) {
1350 Register TmpReg
= MRI
.createVirtualRegister(&AMDGPU::VGPR_32RegClass
);
1351 BuildMI(*MBB
, MI
, DL
, TII
->get(AMDGPU::V_MOV_B32_e32
), TmpReg
)
1353 FIOp
.ChangeToRegister(TmpReg
, false, false, true);
1359 StringRef
SIRegisterInfo::getRegAsmName(unsigned Reg
) const {
1360 return AMDGPUInstPrinter::getRegisterName(Reg
);
1363 // FIXME: This is very slow. It might be worth creating a map from physreg to
1365 const TargetRegisterClass
*SIRegisterInfo::getPhysRegClass(unsigned Reg
) const {
1366 assert(!Register::isVirtualRegister(Reg
));
1368 static const TargetRegisterClass
*const BaseClasses
[] = {
1369 &AMDGPU::VGPR_32RegClass
,
1370 &AMDGPU::SReg_32RegClass
,
1371 &AMDGPU::AGPR_32RegClass
,
1372 &AMDGPU::VReg_64RegClass
,
1373 &AMDGPU::SReg_64RegClass
,
1374 &AMDGPU::AReg_64RegClass
,
1375 &AMDGPU::VReg_96RegClass
,
1376 &AMDGPU::SReg_96RegClass
,
1377 &AMDGPU::VReg_128RegClass
,
1378 &AMDGPU::SReg_128RegClass
,
1379 &AMDGPU::AReg_128RegClass
,
1380 &AMDGPU::VReg_160RegClass
,
1381 &AMDGPU::SReg_160RegClass
,
1382 &AMDGPU::VReg_256RegClass
,
1383 &AMDGPU::SReg_256RegClass
,
1384 &AMDGPU::VReg_512RegClass
,
1385 &AMDGPU::SReg_512RegClass
,
1386 &AMDGPU::AReg_512RegClass
,
1387 &AMDGPU::SReg_1024RegClass
,
1388 &AMDGPU::VReg_1024RegClass
,
1389 &AMDGPU::AReg_1024RegClass
,
1390 &AMDGPU::SCC_CLASSRegClass
,
1391 &AMDGPU::Pseudo_SReg_32RegClass
,
1392 &AMDGPU::Pseudo_SReg_128RegClass
,
1395 for (const TargetRegisterClass
*BaseClass
: BaseClasses
) {
1396 if (BaseClass
->contains(Reg
)) {
1403 // TODO: It might be helpful to have some target specific flags in
1404 // TargetRegisterClass to mark which classes are VGPRs to make this trivial.
1405 bool SIRegisterInfo::hasVGPRs(const TargetRegisterClass
*RC
) const {
1406 unsigned Size
= getRegSizeInBits(*RC
);
1411 return getCommonSubClass(&AMDGPU::VGPR_32RegClass
, RC
) != nullptr;
1413 return getCommonSubClass(&AMDGPU::VReg_64RegClass
, RC
) != nullptr;
1415 return getCommonSubClass(&AMDGPU::VReg_96RegClass
, RC
) != nullptr;
1417 return getCommonSubClass(&AMDGPU::VReg_128RegClass
, RC
) != nullptr;
1419 return getCommonSubClass(&AMDGPU::VReg_160RegClass
, RC
) != nullptr;
1421 return getCommonSubClass(&AMDGPU::VReg_256RegClass
, RC
) != nullptr;
1423 return getCommonSubClass(&AMDGPU::VReg_512RegClass
, RC
) != nullptr;
1425 return getCommonSubClass(&AMDGPU::VReg_1024RegClass
, RC
) != nullptr;
1427 llvm_unreachable("Invalid register class size");
1431 bool SIRegisterInfo::hasAGPRs(const TargetRegisterClass
*RC
) const {
1432 unsigned Size
= getRegSizeInBits(*RC
);
1437 return getCommonSubClass(&AMDGPU::AGPR_32RegClass
, RC
) != nullptr;
1439 return getCommonSubClass(&AMDGPU::AReg_64RegClass
, RC
) != nullptr;
1443 return getCommonSubClass(&AMDGPU::AReg_128RegClass
, RC
) != nullptr;
1448 return getCommonSubClass(&AMDGPU::AReg_512RegClass
, RC
) != nullptr;
1450 return getCommonSubClass(&AMDGPU::AReg_1024RegClass
, RC
) != nullptr;
1452 llvm_unreachable("Invalid register class size");
1456 const TargetRegisterClass
*SIRegisterInfo::getEquivalentVGPRClass(
1457 const TargetRegisterClass
*SRC
) const {
1458 switch (getRegSizeInBits(*SRC
)) {
1460 return &AMDGPU::VGPR_32RegClass
;
1462 return &AMDGPU::VReg_64RegClass
;
1464 return &AMDGPU::VReg_96RegClass
;
1466 return &AMDGPU::VReg_128RegClass
;
1468 return &AMDGPU::VReg_160RegClass
;
1470 return &AMDGPU::VReg_256RegClass
;
1472 return &AMDGPU::VReg_512RegClass
;
1474 return &AMDGPU::VReg_1024RegClass
;
1476 llvm_unreachable("Invalid register class size");
1480 const TargetRegisterClass
*SIRegisterInfo::getEquivalentAGPRClass(
1481 const TargetRegisterClass
*SRC
) const {
1482 switch (getRegSizeInBits(*SRC
)) {
1484 return &AMDGPU::AGPR_32RegClass
;
1486 return &AMDGPU::AReg_64RegClass
;
1488 return &AMDGPU::AReg_128RegClass
;
1490 return &AMDGPU::AReg_512RegClass
;
1492 return &AMDGPU::AReg_1024RegClass
;
1494 llvm_unreachable("Invalid register class size");
1498 const TargetRegisterClass
*SIRegisterInfo::getEquivalentSGPRClass(
1499 const TargetRegisterClass
*VRC
) const {
1500 switch (getRegSizeInBits(*VRC
)) {
1502 return &AMDGPU::SGPR_32RegClass
;
1504 return &AMDGPU::SReg_64RegClass
;
1506 return &AMDGPU::SReg_96RegClass
;
1508 return &AMDGPU::SReg_128RegClass
;
1510 return &AMDGPU::SReg_160RegClass
;
1512 return &AMDGPU::SReg_256RegClass
;
1514 return &AMDGPU::SReg_512RegClass
;
1516 return &AMDGPU::SReg_1024RegClass
;
1518 llvm_unreachable("Invalid register class size");
1522 const TargetRegisterClass
*SIRegisterInfo::getSubRegClass(
1523 const TargetRegisterClass
*RC
, unsigned SubIdx
) const {
1524 if (SubIdx
== AMDGPU::NoSubRegister
)
1527 // We can assume that each lane corresponds to one 32-bit register.
1528 unsigned Count
= getSubRegIndexLaneMask(SubIdx
).getNumLanes();
1529 if (isSGPRClass(RC
)) {
1532 return &AMDGPU::SGPR_32RegClass
;
1534 return &AMDGPU::SReg_64RegClass
;
1536 return &AMDGPU::SReg_96RegClass
;
1538 return &AMDGPU::SReg_128RegClass
;
1540 return &AMDGPU::SReg_160RegClass
;
1542 return &AMDGPU::SReg_256RegClass
;
1544 return &AMDGPU::SReg_512RegClass
;
1545 case 32: /* fall-through */
1547 llvm_unreachable("Invalid sub-register class size");
1549 } else if (hasAGPRs(RC
)) {
1552 return &AMDGPU::AGPR_32RegClass
;
1554 return &AMDGPU::AReg_64RegClass
;
1556 return &AMDGPU::AReg_128RegClass
;
1558 return &AMDGPU::AReg_512RegClass
;
1559 case 32: /* fall-through */
1561 llvm_unreachable("Invalid sub-register class size");
1566 return &AMDGPU::VGPR_32RegClass
;
1568 return &AMDGPU::VReg_64RegClass
;
1570 return &AMDGPU::VReg_96RegClass
;
1572 return &AMDGPU::VReg_128RegClass
;
1574 return &AMDGPU::VReg_160RegClass
;
1576 return &AMDGPU::VReg_256RegClass
;
1578 return &AMDGPU::VReg_512RegClass
;
1579 case 32: /* fall-through */
1581 llvm_unreachable("Invalid sub-register class size");
1586 bool SIRegisterInfo::opCanUseInlineConstant(unsigned OpType
) const {
1587 if (OpType
>= AMDGPU::OPERAND_REG_INLINE_AC_FIRST
&&
1588 OpType
<= AMDGPU::OPERAND_REG_INLINE_AC_LAST
)
1589 return !ST
.hasMFMAInlineLiteralBug();
1591 return OpType
>= AMDGPU::OPERAND_SRC_FIRST
&&
1592 OpType
<= AMDGPU::OPERAND_SRC_LAST
;
1595 bool SIRegisterInfo::shouldRewriteCopySrc(
1596 const TargetRegisterClass
*DefRC
,
1598 const TargetRegisterClass
*SrcRC
,
1599 unsigned SrcSubReg
) const {
1600 // We want to prefer the smallest register class possible, so we don't want to
1601 // stop and rewrite on anything that looks like a subregister
1602 // extract. Operations mostly don't care about the super register class, so we
1603 // only want to stop on the most basic of copies between the same register
1606 // e.g. if we have something like
1609 // %2 = REG_SEQUENCE %0, sub0, %1, sub1, %2, sub2
1610 // %3 = COPY %2, sub0
1612 // We want to look through the COPY to find:
1616 return getCommonSubClass(DefRC
, SrcRC
) != nullptr;
1619 /// Returns a register that is not used at any point in the function.
1620 /// If all registers are used, then this function will return
1621 // AMDGPU::NoRegister.
1623 SIRegisterInfo::findUnusedRegister(const MachineRegisterInfo
&MRI
,
1624 const TargetRegisterClass
*RC
,
1625 const MachineFunction
&MF
) const {
1627 for (unsigned Reg
: *RC
)
1628 if (MRI
.isAllocatable(Reg
) && !MRI
.isPhysRegUsed(Reg
))
1630 return AMDGPU::NoRegister
;
1633 ArrayRef
<int16_t> SIRegisterInfo::getRegSplitParts(const TargetRegisterClass
*RC
,
1634 unsigned EltSize
) const {
1636 static const int16_t Sub0_31
[] = {
1637 AMDGPU::sub0
, AMDGPU::sub1
, AMDGPU::sub2
, AMDGPU::sub3
,
1638 AMDGPU::sub4
, AMDGPU::sub5
, AMDGPU::sub6
, AMDGPU::sub7
,
1639 AMDGPU::sub8
, AMDGPU::sub9
, AMDGPU::sub10
, AMDGPU::sub11
,
1640 AMDGPU::sub12
, AMDGPU::sub13
, AMDGPU::sub14
, AMDGPU::sub15
,
1641 AMDGPU::sub16
, AMDGPU::sub17
, AMDGPU::sub18
, AMDGPU::sub19
,
1642 AMDGPU::sub20
, AMDGPU::sub21
, AMDGPU::sub22
, AMDGPU::sub23
,
1643 AMDGPU::sub24
, AMDGPU::sub25
, AMDGPU::sub26
, AMDGPU::sub27
,
1644 AMDGPU::sub28
, AMDGPU::sub29
, AMDGPU::sub30
, AMDGPU::sub31
,
1647 static const int16_t Sub0_15
[] = {
1648 AMDGPU::sub0
, AMDGPU::sub1
, AMDGPU::sub2
, AMDGPU::sub3
,
1649 AMDGPU::sub4
, AMDGPU::sub5
, AMDGPU::sub6
, AMDGPU::sub7
,
1650 AMDGPU::sub8
, AMDGPU::sub9
, AMDGPU::sub10
, AMDGPU::sub11
,
1651 AMDGPU::sub12
, AMDGPU::sub13
, AMDGPU::sub14
, AMDGPU::sub15
,
1654 static const int16_t Sub0_7
[] = {
1655 AMDGPU::sub0
, AMDGPU::sub1
, AMDGPU::sub2
, AMDGPU::sub3
,
1656 AMDGPU::sub4
, AMDGPU::sub5
, AMDGPU::sub6
, AMDGPU::sub7
,
1659 static const int16_t Sub0_4
[] = {
1660 AMDGPU::sub0
, AMDGPU::sub1
, AMDGPU::sub2
, AMDGPU::sub3
, AMDGPU::sub4
,
1663 static const int16_t Sub0_3
[] = {
1664 AMDGPU::sub0
, AMDGPU::sub1
, AMDGPU::sub2
, AMDGPU::sub3
,
1667 static const int16_t Sub0_2
[] = {
1668 AMDGPU::sub0
, AMDGPU::sub1
, AMDGPU::sub2
,
1671 static const int16_t Sub0_1
[] = {
1672 AMDGPU::sub0
, AMDGPU::sub1
,
1675 switch (AMDGPU::getRegBitWidth(*RC
->MC
)) {
1679 return makeArrayRef(Sub0_1
);
1681 return makeArrayRef(Sub0_2
);
1683 return makeArrayRef(Sub0_3
);
1685 return makeArrayRef(Sub0_4
);
1687 return makeArrayRef(Sub0_7
);
1689 return makeArrayRef(Sub0_15
);
1691 return makeArrayRef(Sub0_31
);
1693 llvm_unreachable("unhandled register size");
1698 static const int16_t Sub0_31_64
[] = {
1699 AMDGPU::sub0_sub1
, AMDGPU::sub2_sub3
,
1700 AMDGPU::sub4_sub5
, AMDGPU::sub6_sub7
,
1701 AMDGPU::sub8_sub9
, AMDGPU::sub10_sub11
,
1702 AMDGPU::sub12_sub13
, AMDGPU::sub14_sub15
,
1703 AMDGPU::sub16_sub17
, AMDGPU::sub18_sub19
,
1704 AMDGPU::sub20_sub21
, AMDGPU::sub22_sub23
,
1705 AMDGPU::sub24_sub25
, AMDGPU::sub26_sub27
,
1706 AMDGPU::sub28_sub29
, AMDGPU::sub30_sub31
1709 static const int16_t Sub0_15_64
[] = {
1710 AMDGPU::sub0_sub1
, AMDGPU::sub2_sub3
,
1711 AMDGPU::sub4_sub5
, AMDGPU::sub6_sub7
,
1712 AMDGPU::sub8_sub9
, AMDGPU::sub10_sub11
,
1713 AMDGPU::sub12_sub13
, AMDGPU::sub14_sub15
1716 static const int16_t Sub0_7_64
[] = {
1717 AMDGPU::sub0_sub1
, AMDGPU::sub2_sub3
,
1718 AMDGPU::sub4_sub5
, AMDGPU::sub6_sub7
1722 static const int16_t Sub0_3_64
[] = {
1723 AMDGPU::sub0_sub1
, AMDGPU::sub2_sub3
1726 switch (AMDGPU::getRegBitWidth(*RC
->MC
)) {
1730 return makeArrayRef(Sub0_3_64
);
1732 return makeArrayRef(Sub0_7_64
);
1734 return makeArrayRef(Sub0_15_64
);
1736 return makeArrayRef(Sub0_31_64
);
1738 llvm_unreachable("unhandled register size");
1742 if (EltSize
== 16) {
1744 static const int16_t Sub0_31_128
[] = {
1745 AMDGPU::sub0_sub1_sub2_sub3
,
1746 AMDGPU::sub4_sub5_sub6_sub7
,
1747 AMDGPU::sub8_sub9_sub10_sub11
,
1748 AMDGPU::sub12_sub13_sub14_sub15
,
1749 AMDGPU::sub16_sub17_sub18_sub19
,
1750 AMDGPU::sub20_sub21_sub22_sub23
,
1751 AMDGPU::sub24_sub25_sub26_sub27
,
1752 AMDGPU::sub28_sub29_sub30_sub31
1755 static const int16_t Sub0_15_128
[] = {
1756 AMDGPU::sub0_sub1_sub2_sub3
,
1757 AMDGPU::sub4_sub5_sub6_sub7
,
1758 AMDGPU::sub8_sub9_sub10_sub11
,
1759 AMDGPU::sub12_sub13_sub14_sub15
1762 static const int16_t Sub0_7_128
[] = {
1763 AMDGPU::sub0_sub1_sub2_sub3
,
1764 AMDGPU::sub4_sub5_sub6_sub7
1767 switch (AMDGPU::getRegBitWidth(*RC
->MC
)) {
1771 return makeArrayRef(Sub0_7_128
);
1773 return makeArrayRef(Sub0_15_128
);
1775 return makeArrayRef(Sub0_31_128
);
1777 llvm_unreachable("unhandled register size");
1781 assert(EltSize
== 32 && "unhandled elt size");
1783 static const int16_t Sub0_31_256
[] = {
1784 AMDGPU::sub0_sub1_sub2_sub3_sub4_sub5_sub6_sub7
,
1785 AMDGPU::sub8_sub9_sub10_sub11_sub12_sub13_sub14_sub15
,
1786 AMDGPU::sub16_sub17_sub18_sub19_sub20_sub21_sub22_sub23
,
1787 AMDGPU::sub24_sub25_sub26_sub27_sub28_sub29_sub30_sub31
1790 static const int16_t Sub0_15_256
[] = {
1791 AMDGPU::sub0_sub1_sub2_sub3_sub4_sub5_sub6_sub7
,
1792 AMDGPU::sub8_sub9_sub10_sub11_sub12_sub13_sub14_sub15
1795 switch (AMDGPU::getRegBitWidth(*RC
->MC
)) {
1799 return makeArrayRef(Sub0_15_256
);
1801 return makeArrayRef(Sub0_31_256
);
1803 llvm_unreachable("unhandled register size");
1807 const TargetRegisterClass
*
1808 SIRegisterInfo::getRegClassForReg(const MachineRegisterInfo
&MRI
,
1809 unsigned Reg
) const {
1810 if (Register::isVirtualRegister(Reg
))
1811 return MRI
.getRegClass(Reg
);
1813 return getPhysRegClass(Reg
);
1816 bool SIRegisterInfo::isVGPR(const MachineRegisterInfo
&MRI
,
1817 unsigned Reg
) const {
1818 const TargetRegisterClass
* RC
= getRegClassForReg(MRI
, Reg
);
1819 assert(RC
&& "Register class for the reg not found");
1820 return hasVGPRs(RC
);
1823 bool SIRegisterInfo::isAGPR(const MachineRegisterInfo
&MRI
,
1824 unsigned Reg
) const {
1825 const TargetRegisterClass
* RC
= getRegClassForReg(MRI
, Reg
);
1826 assert(RC
&& "Register class for the reg not found");
1827 return hasAGPRs(RC
);
1830 bool SIRegisterInfo::shouldCoalesce(MachineInstr
*MI
,
1831 const TargetRegisterClass
*SrcRC
,
1833 const TargetRegisterClass
*DstRC
,
1835 const TargetRegisterClass
*NewRC
,
1836 LiveIntervals
&LIS
) const {
1837 unsigned SrcSize
= getRegSizeInBits(*SrcRC
);
1838 unsigned DstSize
= getRegSizeInBits(*DstRC
);
1839 unsigned NewSize
= getRegSizeInBits(*NewRC
);
1841 // Do not increase size of registers beyond dword, we would need to allocate
1842 // adjacent registers and constraint regalloc more than needed.
1844 // Always allow dword coalescing.
1845 if (SrcSize
<= 32 || DstSize
<= 32)
1848 return NewSize
<= DstSize
|| NewSize
<= SrcSize
;
1851 unsigned SIRegisterInfo::getRegPressureLimit(const TargetRegisterClass
*RC
,
1852 MachineFunction
&MF
) const {
1854 const GCNSubtarget
&ST
= MF
.getSubtarget
<GCNSubtarget
>();
1855 const SIMachineFunctionInfo
*MFI
= MF
.getInfo
<SIMachineFunctionInfo
>();
1857 unsigned Occupancy
= ST
.getOccupancyWithLocalMemSize(MFI
->getLDSSize(),
1859 switch (RC
->getID()) {
1861 return AMDGPURegisterInfo::getRegPressureLimit(RC
, MF
);
1862 case AMDGPU::VGPR_32RegClassID
:
1863 return std::min(ST
.getMaxNumVGPRs(Occupancy
), ST
.getMaxNumVGPRs(MF
));
1864 case AMDGPU::SGPR_32RegClassID
:
1865 return std::min(ST
.getMaxNumSGPRs(Occupancy
, true), ST
.getMaxNumSGPRs(MF
));
1869 unsigned SIRegisterInfo::getRegPressureSetLimit(const MachineFunction
&MF
,
1870 unsigned Idx
) const {
1871 if (Idx
== getVGPRPressureSet() || Idx
== getAGPRPressureSet())
1872 return getRegPressureLimit(&AMDGPU::VGPR_32RegClass
,
1873 const_cast<MachineFunction
&>(MF
));
1875 if (Idx
== getSGPRPressureSet())
1876 return getRegPressureLimit(&AMDGPU::SGPR_32RegClass
,
1877 const_cast<MachineFunction
&>(MF
));
1879 return AMDGPURegisterInfo::getRegPressureSetLimit(MF
, Idx
);
1882 const int *SIRegisterInfo::getRegUnitPressureSets(unsigned RegUnit
) const {
1883 static const int Empty
[] = { -1 };
1885 if (hasRegUnit(AMDGPU::M0
, RegUnit
))
1887 return AMDGPURegisterInfo::getRegUnitPressureSets(RegUnit
);
1890 unsigned SIRegisterInfo::getReturnAddressReg(const MachineFunction
&MF
) const {
1891 // Not a callee saved register.
1892 return AMDGPU::SGPR30_SGPR31
;
1895 const TargetRegisterClass
*
1896 SIRegisterInfo::getRegClassForSizeOnBank(unsigned Size
,
1897 const RegisterBank
&RB
,
1898 const MachineRegisterInfo
&MRI
) const {
1901 switch (RB
.getID()) {
1902 case AMDGPU::VGPRRegBankID
:
1903 return &AMDGPU::VGPR_32RegClass
;
1904 case AMDGPU::VCCRegBankID
:
1906 &AMDGPU::SReg_32_XM0_XEXECRegClass
: &AMDGPU::SReg_64_XEXECRegClass
;
1907 case AMDGPU::SGPRRegBankID
:
1908 return &AMDGPU::SReg_32_XM0RegClass
;
1909 case AMDGPU::SCCRegBankID
:
1910 // This needs to return an allocatable class, so don't bother returning
1911 // the dummy SCC class.
1912 return &AMDGPU::SReg_32_XM0RegClass
;
1914 llvm_unreachable("unknown register bank");
1918 return RB
.getID() == AMDGPU::VGPRRegBankID
? &AMDGPU::VGPR_32RegClass
:
1919 &AMDGPU::SReg_32_XM0RegClass
;
1921 return RB
.getID() == AMDGPU::VGPRRegBankID
? &AMDGPU::VReg_64RegClass
:
1922 &AMDGPU::SReg_64_XEXECRegClass
;
1924 return RB
.getID() == AMDGPU::VGPRRegBankID
? &AMDGPU::VReg_96RegClass
:
1925 &AMDGPU::SReg_96RegClass
;
1927 return RB
.getID() == AMDGPU::VGPRRegBankID
? &AMDGPU::VReg_128RegClass
:
1928 &AMDGPU::SReg_128RegClass
;
1930 return RB
.getID() == AMDGPU::VGPRRegBankID
? &AMDGPU::VReg_160RegClass
:
1931 &AMDGPU::SReg_160RegClass
;
1933 return RB
.getID() == AMDGPU::VGPRRegBankID
? &AMDGPU::VReg_256RegClass
:
1934 &AMDGPU::SReg_256RegClass
;
1936 return RB
.getID() == AMDGPU::VGPRRegBankID
? &AMDGPU::VReg_512RegClass
:
1937 &AMDGPU::SReg_512RegClass
;
1940 return RB
.getID() == AMDGPU::VGPRRegBankID
? &AMDGPU::VGPR_32RegClass
:
1941 &AMDGPU::SReg_32_XM0RegClass
;
1946 const TargetRegisterClass
*
1947 SIRegisterInfo::getConstrainedRegClassForOperand(const MachineOperand
&MO
,
1948 const MachineRegisterInfo
&MRI
) const {
1949 if (const RegisterBank
*RB
= MRI
.getRegBankOrNull(MO
.getReg()))
1950 return getRegClassForTypeOnBank(MRI
.getType(MO
.getReg()), *RB
, MRI
);
1954 unsigned SIRegisterInfo::getVCC() const {
1955 return isWave32
? AMDGPU::VCC_LO
: AMDGPU::VCC
;
1958 const TargetRegisterClass
*
1959 SIRegisterInfo::getRegClass(unsigned RCID
) const {
1960 switch ((int)RCID
) {
1961 case AMDGPU::SReg_1RegClassID
:
1963 case AMDGPU::SReg_1_XEXECRegClassID
:
1964 return isWave32
? &AMDGPU::SReg_32_XM0_XEXECRegClass
1965 : &AMDGPU::SReg_64_XEXECRegClass
;
1969 return AMDGPURegisterInfo::getRegClass(RCID
);
1973 // Find reaching register definition
1974 MachineInstr
*SIRegisterInfo::findReachingDef(unsigned Reg
, unsigned SubReg
,
1976 MachineRegisterInfo
&MRI
,
1977 LiveIntervals
*LIS
) const {
1978 auto &MDT
= LIS
->getAnalysis
<MachineDominatorTree
>();
1979 SlotIndex UseIdx
= LIS
->getInstructionIndex(Use
);
1982 if (Register::isVirtualRegister(Reg
)) {
1983 if (!LIS
->hasInterval(Reg
))
1985 LiveInterval
&LI
= LIS
->getInterval(Reg
);
1986 LaneBitmask SubLanes
= SubReg
? getSubRegIndexLaneMask(SubReg
)
1987 : MRI
.getMaxLaneMaskForVReg(Reg
);
1988 VNInfo
*V
= nullptr;
1989 if (LI
.hasSubRanges()) {
1990 for (auto &S
: LI
.subranges()) {
1991 if ((S
.LaneMask
& SubLanes
) == SubLanes
) {
1992 V
= S
.getVNInfoAt(UseIdx
);
1997 V
= LI
.getVNInfoAt(UseIdx
);
2004 for (MCRegUnitIterator
Units(Reg
, this); Units
.isValid(); ++Units
) {
2005 LiveRange
&LR
= LIS
->getRegUnit(*Units
);
2006 if (VNInfo
*V
= LR
.getVNInfoAt(UseIdx
)) {
2007 if (!DefIdx
.isValid() ||
2008 MDT
.dominates(LIS
->getInstructionFromIndex(DefIdx
),
2009 LIS
->getInstructionFromIndex(V
->def
)))
2017 MachineInstr
*Def
= LIS
->getInstructionFromIndex(DefIdx
);
2019 if (!Def
|| !MDT
.dominates(Def
, &Use
))
2022 assert(Def
->modifiesRegister(Reg
, this));