1 //===-- SIRegisterInfo.cpp - SI Register Information ---------------------===//
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7 //===----------------------------------------------------------------------===//
10 /// SI implementation of the TargetRegisterInfo class.
12 //===----------------------------------------------------------------------===//
14 #include "SIRegisterInfo.h"
15 #include "AMDGPURegisterBankInfo.h"
16 #include "AMDGPUSubtarget.h"
17 #include "SIInstrInfo.h"
18 #include "SIMachineFunctionInfo.h"
19 #include "MCTargetDesc/AMDGPUInstPrinter.h"
20 #include "MCTargetDesc/AMDGPUMCTargetDesc.h"
21 #include "llvm/CodeGen/LiveIntervals.h"
22 #include "llvm/CodeGen/MachineDominators.h"
23 #include "llvm/CodeGen/MachineFrameInfo.h"
24 #include "llvm/CodeGen/MachineInstrBuilder.h"
25 #include "llvm/CodeGen/RegisterScavenging.h"
26 #include "llvm/CodeGen/SlotIndexes.h"
27 #include "llvm/IR/Function.h"
28 #include "llvm/IR/LLVMContext.h"
32 static bool hasPressureSet(const int *PSets
, unsigned PSetID
) {
33 for (unsigned i
= 0; PSets
[i
] != -1; ++i
) {
34 if (PSets
[i
] == (int)PSetID
)
40 void SIRegisterInfo::classifyPressureSet(unsigned PSetID
, unsigned Reg
,
41 BitVector
&PressureSets
) const {
42 for (MCRegUnitIterator
U(Reg
, this); U
.isValid(); ++U
) {
43 const int *PSets
= getRegUnitPressureSets(*U
);
44 if (hasPressureSet(PSets
, PSetID
)) {
45 PressureSets
.set(PSetID
);
51 static cl::opt
<bool> EnableSpillSGPRToVGPR(
52 "amdgpu-spill-sgpr-to-vgpr",
53 cl::desc("Enable spilling VGPRs to SGPRs"),
57 SIRegisterInfo::SIRegisterInfo(const GCNSubtarget
&ST
) :
60 SGPRPressureSets(getNumRegPressureSets()),
61 VGPRPressureSets(getNumRegPressureSets()),
62 AGPRPressureSets(getNumRegPressureSets()),
63 SpillSGPRToVGPR(EnableSpillSGPRToVGPR
),
64 isWave32(ST
.isWave32()) {
65 unsigned NumRegPressureSets
= getNumRegPressureSets();
67 SGPRSetID
= NumRegPressureSets
;
68 VGPRSetID
= NumRegPressureSets
;
69 AGPRSetID
= NumRegPressureSets
;
71 for (unsigned i
= 0; i
< NumRegPressureSets
; ++i
) {
72 classifyPressureSet(i
, AMDGPU::SGPR0
, SGPRPressureSets
);
73 classifyPressureSet(i
, AMDGPU::VGPR0
, VGPRPressureSets
);
74 classifyPressureSet(i
, AMDGPU::AGPR0
, AGPRPressureSets
);
77 // Determine the number of reg units for each pressure set.
78 std::vector
<unsigned> PressureSetRegUnits(NumRegPressureSets
, 0);
79 for (unsigned i
= 0, e
= getNumRegUnits(); i
!= e
; ++i
) {
80 const int *PSets
= getRegUnitPressureSets(i
);
81 for (unsigned j
= 0; PSets
[j
] != -1; ++j
) {
82 ++PressureSetRegUnits
[PSets
[j
]];
86 unsigned VGPRMax
= 0, SGPRMax
= 0, AGPRMax
= 0;
87 for (unsigned i
= 0; i
< NumRegPressureSets
; ++i
) {
88 if (isVGPRPressureSet(i
) && PressureSetRegUnits
[i
] > VGPRMax
) {
90 VGPRMax
= PressureSetRegUnits
[i
];
93 if (isSGPRPressureSet(i
) && PressureSetRegUnits
[i
] > SGPRMax
) {
95 SGPRMax
= PressureSetRegUnits
[i
];
97 if (isAGPRPressureSet(i
) && PressureSetRegUnits
[i
] > AGPRMax
) {
99 AGPRMax
= PressureSetRegUnits
[i
];
104 assert(SGPRSetID
< NumRegPressureSets
&&
105 VGPRSetID
< NumRegPressureSets
&&
106 AGPRSetID
< NumRegPressureSets
);
109 unsigned SIRegisterInfo::reservedPrivateSegmentBufferReg(
110 const MachineFunction
&MF
) const {
111 unsigned BaseIdx
= alignDown(ST
.getMaxNumSGPRs(MF
), 4) - 4;
112 unsigned BaseReg(AMDGPU::SGPR_32RegClass
.getRegister(BaseIdx
));
113 return getMatchingSuperReg(BaseReg
, AMDGPU::sub0
, &AMDGPU::SGPR_128RegClass
);
116 static unsigned findPrivateSegmentWaveByteOffsetRegIndex(unsigned RegCount
) {
119 // Try to place it in a hole after PrivateSegmentBufferReg.
121 // We cannot put the segment buffer in (Idx - 4) ... (Idx - 1) due to
122 // alignment constraints, so we have a hole where can put the wave offset.
125 // We can put the segment buffer in (Idx - 4) ... (Idx - 1) and put the
126 // wave offset before it.
133 unsigned SIRegisterInfo::reservedPrivateSegmentWaveByteOffsetReg(
134 const MachineFunction
&MF
) const {
135 unsigned Reg
= findPrivateSegmentWaveByteOffsetRegIndex(ST
.getMaxNumSGPRs(MF
));
136 return AMDGPU::SGPR_32RegClass
.getRegister(Reg
);
139 BitVector
SIRegisterInfo::getReservedRegs(const MachineFunction
&MF
) const {
140 BitVector
Reserved(getNumRegs());
142 // EXEC_LO and EXEC_HI could be allocated and used as regular register, but
143 // this seems likely to result in bugs, so I'm marking them as reserved.
144 reserveRegisterTuples(Reserved
, AMDGPU::EXEC
);
145 reserveRegisterTuples(Reserved
, AMDGPU::FLAT_SCR
);
147 // M0 has to be reserved so that llvm accepts it as a live-in into a block.
148 reserveRegisterTuples(Reserved
, AMDGPU::M0
);
150 // Reserve src_vccz, src_execz, src_scc.
151 reserveRegisterTuples(Reserved
, AMDGPU::SRC_VCCZ
);
152 reserveRegisterTuples(Reserved
, AMDGPU::SRC_EXECZ
);
153 reserveRegisterTuples(Reserved
, AMDGPU::SRC_SCC
);
155 // Reserve the memory aperture registers.
156 reserveRegisterTuples(Reserved
, AMDGPU::SRC_SHARED_BASE
);
157 reserveRegisterTuples(Reserved
, AMDGPU::SRC_SHARED_LIMIT
);
158 reserveRegisterTuples(Reserved
, AMDGPU::SRC_PRIVATE_BASE
);
159 reserveRegisterTuples(Reserved
, AMDGPU::SRC_PRIVATE_LIMIT
);
161 // Reserve src_pops_exiting_wave_id - support is not implemented in Codegen.
162 reserveRegisterTuples(Reserved
, AMDGPU::SRC_POPS_EXITING_WAVE_ID
);
164 // Reserve xnack_mask registers - support is not implemented in Codegen.
165 reserveRegisterTuples(Reserved
, AMDGPU::XNACK_MASK
);
167 // Reserve lds_direct register - support is not implemented in Codegen.
168 reserveRegisterTuples(Reserved
, AMDGPU::LDS_DIRECT
);
170 // Reserve Trap Handler registers - support is not implemented in Codegen.
171 reserveRegisterTuples(Reserved
, AMDGPU::TBA
);
172 reserveRegisterTuples(Reserved
, AMDGPU::TMA
);
173 reserveRegisterTuples(Reserved
, AMDGPU::TTMP0_TTMP1
);
174 reserveRegisterTuples(Reserved
, AMDGPU::TTMP2_TTMP3
);
175 reserveRegisterTuples(Reserved
, AMDGPU::TTMP4_TTMP5
);
176 reserveRegisterTuples(Reserved
, AMDGPU::TTMP6_TTMP7
);
177 reserveRegisterTuples(Reserved
, AMDGPU::TTMP8_TTMP9
);
178 reserveRegisterTuples(Reserved
, AMDGPU::TTMP10_TTMP11
);
179 reserveRegisterTuples(Reserved
, AMDGPU::TTMP12_TTMP13
);
180 reserveRegisterTuples(Reserved
, AMDGPU::TTMP14_TTMP15
);
182 // Reserve null register - it shall never be allocated
183 reserveRegisterTuples(Reserved
, AMDGPU::SGPR_NULL
);
185 // Disallow vcc_hi allocation in wave32. It may be allocated but most likely
186 // will result in bugs.
188 Reserved
.set(AMDGPU::VCC
);
189 Reserved
.set(AMDGPU::VCC_HI
);
192 unsigned MaxNumSGPRs
= ST
.getMaxNumSGPRs(MF
);
193 unsigned TotalNumSGPRs
= AMDGPU::SGPR_32RegClass
.getNumRegs();
194 for (unsigned i
= MaxNumSGPRs
; i
< TotalNumSGPRs
; ++i
) {
195 unsigned Reg
= AMDGPU::SGPR_32RegClass
.getRegister(i
);
196 reserveRegisterTuples(Reserved
, Reg
);
199 unsigned MaxNumVGPRs
= ST
.getMaxNumVGPRs(MF
);
200 unsigned TotalNumVGPRs
= AMDGPU::VGPR_32RegClass
.getNumRegs();
201 for (unsigned i
= MaxNumVGPRs
; i
< TotalNumVGPRs
; ++i
) {
202 unsigned Reg
= AMDGPU::VGPR_32RegClass
.getRegister(i
);
203 reserveRegisterTuples(Reserved
, Reg
);
204 Reg
= AMDGPU::AGPR_32RegClass
.getRegister(i
);
205 reserveRegisterTuples(Reserved
, Reg
);
208 // Reserve all the rest AGPRs if there are no instructions to use it.
209 if (!ST
.hasMAIInsts()) {
210 for (unsigned i
= 0; i
< MaxNumVGPRs
; ++i
) {
211 unsigned Reg
= AMDGPU::AGPR_32RegClass
.getRegister(i
);
212 reserveRegisterTuples(Reserved
, Reg
);
216 const SIMachineFunctionInfo
*MFI
= MF
.getInfo
<SIMachineFunctionInfo
>();
218 unsigned ScratchWaveOffsetReg
= MFI
->getScratchWaveOffsetReg();
219 if (ScratchWaveOffsetReg
!= AMDGPU::NoRegister
) {
220 // Reserve 1 SGPR for scratch wave offset in case we need to spill.
221 reserveRegisterTuples(Reserved
, ScratchWaveOffsetReg
);
224 unsigned ScratchRSrcReg
= MFI
->getScratchRSrcReg();
225 if (ScratchRSrcReg
!= AMDGPU::NoRegister
) {
226 // Reserve 4 SGPRs for the scratch buffer resource descriptor in case we need
228 // TODO: May need to reserve a VGPR if doing LDS spilling.
229 reserveRegisterTuples(Reserved
, ScratchRSrcReg
);
230 assert(!isSubRegister(ScratchRSrcReg
, ScratchWaveOffsetReg
));
233 // We have to assume the SP is needed in case there are calls in the function,
234 // which is detected after the function is lowered. If we aren't really going
235 // to need SP, don't bother reserving it.
236 unsigned StackPtrReg
= MFI
->getStackPtrOffsetReg();
238 if (StackPtrReg
!= AMDGPU::NoRegister
) {
239 reserveRegisterTuples(Reserved
, StackPtrReg
);
240 assert(!isSubRegister(ScratchRSrcReg
, StackPtrReg
));
243 unsigned FrameReg
= MFI
->getFrameOffsetReg();
244 if (FrameReg
!= AMDGPU::NoRegister
) {
245 reserveRegisterTuples(Reserved
, FrameReg
);
246 assert(!isSubRegister(ScratchRSrcReg
, FrameReg
));
249 for (unsigned Reg
: MFI
->WWMReservedRegs
) {
250 reserveRegisterTuples(Reserved
, Reg
);
253 // FIXME: Stop using reserved registers for this.
254 for (MCPhysReg Reg
: MFI
->getAGPRSpillVGPRs())
255 reserveRegisterTuples(Reserved
, Reg
);
257 for (MCPhysReg Reg
: MFI
->getVGPRSpillAGPRs())
258 reserveRegisterTuples(Reserved
, Reg
);
263 bool SIRegisterInfo::canRealignStack(const MachineFunction
&MF
) const {
264 const SIMachineFunctionInfo
*Info
= MF
.getInfo
<SIMachineFunctionInfo
>();
265 // On entry, the base address is 0, so it can't possibly need any more
268 // FIXME: Should be able to specify the entry frame alignment per calling
269 // convention instead.
270 if (Info
->isEntryFunction())
273 return TargetRegisterInfo::canRealignStack(MF
);
276 bool SIRegisterInfo::requiresRegisterScavenging(const MachineFunction
&Fn
) const {
277 const SIMachineFunctionInfo
*Info
= Fn
.getInfo
<SIMachineFunctionInfo
>();
278 if (Info
->isEntryFunction()) {
279 const MachineFrameInfo
&MFI
= Fn
.getFrameInfo();
280 return MFI
.hasStackObjects() || MFI
.hasCalls();
283 // May need scavenger for dealing with callee saved registers.
287 bool SIRegisterInfo::requiresFrameIndexScavenging(
288 const MachineFunction
&MF
) const {
289 // Do not use frame virtual registers. They used to be used for SGPRs, but
290 // once we reach PrologEpilogInserter, we can no longer spill SGPRs. If the
291 // scavenger fails, we can increment/decrement the necessary SGPRs to avoid a
296 bool SIRegisterInfo::requiresFrameIndexReplacementScavenging(
297 const MachineFunction
&MF
) const {
298 const MachineFrameInfo
&MFI
= MF
.getFrameInfo();
299 return MFI
.hasStackObjects();
302 bool SIRegisterInfo::requiresVirtualBaseRegisters(
303 const MachineFunction
&) const {
304 // There are no special dedicated stack or frame pointers.
308 bool SIRegisterInfo::trackLivenessAfterRegAlloc(const MachineFunction
&MF
) const {
309 // This helps catch bugs as verifier errors.
313 int64_t SIRegisterInfo::getMUBUFInstrOffset(const MachineInstr
*MI
) const {
314 assert(SIInstrInfo::isMUBUF(*MI
));
316 int OffIdx
= AMDGPU::getNamedOperandIdx(MI
->getOpcode(),
317 AMDGPU::OpName::offset
);
318 return MI
->getOperand(OffIdx
).getImm();
321 int64_t SIRegisterInfo::getFrameIndexInstrOffset(const MachineInstr
*MI
,
323 if (!SIInstrInfo::isMUBUF(*MI
))
326 assert(Idx
== AMDGPU::getNamedOperandIdx(MI
->getOpcode(),
327 AMDGPU::OpName::vaddr
) &&
328 "Should never see frame index on non-address operand");
330 return getMUBUFInstrOffset(MI
);
333 bool SIRegisterInfo::needsFrameBaseReg(MachineInstr
*MI
, int64_t Offset
) const {
334 if (!MI
->mayLoadOrStore())
337 int64_t FullOffset
= Offset
+ getMUBUFInstrOffset(MI
);
339 return !isUInt
<12>(FullOffset
);
342 void SIRegisterInfo::materializeFrameBaseRegister(MachineBasicBlock
*MBB
,
345 int64_t Offset
) const {
346 MachineBasicBlock::iterator Ins
= MBB
->begin();
347 DebugLoc DL
; // Defaults to "unknown"
349 if (Ins
!= MBB
->end())
350 DL
= Ins
->getDebugLoc();
352 MachineFunction
*MF
= MBB
->getParent();
353 const SIInstrInfo
*TII
= ST
.getInstrInfo();
356 BuildMI(*MBB
, Ins
, DL
, TII
->get(AMDGPU::V_MOV_B32_e32
), BaseReg
)
357 .addFrameIndex(FrameIdx
);
361 MachineRegisterInfo
&MRI
= MF
->getRegInfo();
362 Register OffsetReg
= MRI
.createVirtualRegister(&AMDGPU::SReg_32_XM0RegClass
);
364 Register FIReg
= MRI
.createVirtualRegister(&AMDGPU::VGPR_32RegClass
);
366 BuildMI(*MBB
, Ins
, DL
, TII
->get(AMDGPU::S_MOV_B32
), OffsetReg
)
368 BuildMI(*MBB
, Ins
, DL
, TII
->get(AMDGPU::V_MOV_B32_e32
), FIReg
)
369 .addFrameIndex(FrameIdx
);
371 TII
->getAddNoCarry(*MBB
, Ins
, DL
, BaseReg
)
372 .addReg(OffsetReg
, RegState::Kill
)
374 .addImm(0); // clamp bit
377 void SIRegisterInfo::resolveFrameIndex(MachineInstr
&MI
, unsigned BaseReg
,
378 int64_t Offset
) const {
379 const SIInstrInfo
*TII
= ST
.getInstrInfo();
382 // FIXME: Is it possible to be storing a frame index to itself?
384 for (const MachineOperand
&MO
: MI
.operands()) {
387 llvm_unreachable("should not see multiple frame indices");
394 MachineOperand
*FIOp
= TII
->getNamedOperand(MI
, AMDGPU::OpName::vaddr
);
396 MachineBasicBlock
*MBB
= MI
.getParent();
397 MachineFunction
*MF
= MBB
->getParent();
399 assert(FIOp
&& FIOp
->isFI() && "frame index must be address operand");
400 assert(TII
->isMUBUF(MI
));
401 assert(TII
->getNamedOperand(MI
, AMDGPU::OpName::soffset
)->getReg() ==
402 MF
->getInfo
<SIMachineFunctionInfo
>()->getStackPtrOffsetReg() &&
403 "should only be seeing stack pointer offset relative FrameIndex");
405 MachineOperand
*OffsetOp
= TII
->getNamedOperand(MI
, AMDGPU::OpName::offset
);
406 int64_t NewOffset
= OffsetOp
->getImm() + Offset
;
407 assert(isUInt
<12>(NewOffset
) && "offset should be legal");
409 FIOp
->ChangeToRegister(BaseReg
, false);
410 OffsetOp
->setImm(NewOffset
);
413 bool SIRegisterInfo::isFrameOffsetLegal(const MachineInstr
*MI
,
415 int64_t Offset
) const {
416 if (!SIInstrInfo::isMUBUF(*MI
))
419 int64_t NewOffset
= Offset
+ getMUBUFInstrOffset(MI
);
421 return isUInt
<12>(NewOffset
);
424 const TargetRegisterClass
*SIRegisterInfo::getPointerRegClass(
425 const MachineFunction
&MF
, unsigned Kind
) const {
426 // This is inaccurate. It depends on the instruction and address space. The
427 // only place where we should hit this is for dealing with frame indexes /
428 // private accesses, so this is correct in that case.
429 return &AMDGPU::VGPR_32RegClass
;
432 static unsigned getNumSubRegsForSpillOp(unsigned Op
) {
435 case AMDGPU::SI_SPILL_S1024_SAVE
:
436 case AMDGPU::SI_SPILL_S1024_RESTORE
:
437 case AMDGPU::SI_SPILL_V1024_SAVE
:
438 case AMDGPU::SI_SPILL_V1024_RESTORE
:
439 case AMDGPU::SI_SPILL_A1024_SAVE
:
440 case AMDGPU::SI_SPILL_A1024_RESTORE
:
442 case AMDGPU::SI_SPILL_S512_SAVE
:
443 case AMDGPU::SI_SPILL_S512_RESTORE
:
444 case AMDGPU::SI_SPILL_V512_SAVE
:
445 case AMDGPU::SI_SPILL_V512_RESTORE
:
446 case AMDGPU::SI_SPILL_A512_SAVE
:
447 case AMDGPU::SI_SPILL_A512_RESTORE
:
449 case AMDGPU::SI_SPILL_S256_SAVE
:
450 case AMDGPU::SI_SPILL_S256_RESTORE
:
451 case AMDGPU::SI_SPILL_V256_SAVE
:
452 case AMDGPU::SI_SPILL_V256_RESTORE
:
454 case AMDGPU::SI_SPILL_S160_SAVE
:
455 case AMDGPU::SI_SPILL_S160_RESTORE
:
456 case AMDGPU::SI_SPILL_V160_SAVE
:
457 case AMDGPU::SI_SPILL_V160_RESTORE
:
459 case AMDGPU::SI_SPILL_S128_SAVE
:
460 case AMDGPU::SI_SPILL_S128_RESTORE
:
461 case AMDGPU::SI_SPILL_V128_SAVE
:
462 case AMDGPU::SI_SPILL_V128_RESTORE
:
463 case AMDGPU::SI_SPILL_A128_SAVE
:
464 case AMDGPU::SI_SPILL_A128_RESTORE
:
466 case AMDGPU::SI_SPILL_S96_SAVE
:
467 case AMDGPU::SI_SPILL_S96_RESTORE
:
468 case AMDGPU::SI_SPILL_V96_SAVE
:
469 case AMDGPU::SI_SPILL_V96_RESTORE
:
471 case AMDGPU::SI_SPILL_S64_SAVE
:
472 case AMDGPU::SI_SPILL_S64_RESTORE
:
473 case AMDGPU::SI_SPILL_V64_SAVE
:
474 case AMDGPU::SI_SPILL_V64_RESTORE
:
475 case AMDGPU::SI_SPILL_A64_SAVE
:
476 case AMDGPU::SI_SPILL_A64_RESTORE
:
478 case AMDGPU::SI_SPILL_S32_SAVE
:
479 case AMDGPU::SI_SPILL_S32_RESTORE
:
480 case AMDGPU::SI_SPILL_V32_SAVE
:
481 case AMDGPU::SI_SPILL_V32_RESTORE
:
482 case AMDGPU::SI_SPILL_A32_SAVE
:
483 case AMDGPU::SI_SPILL_A32_RESTORE
:
485 default: llvm_unreachable("Invalid spill opcode");
489 static int getOffsetMUBUFStore(unsigned Opc
) {
491 case AMDGPU::BUFFER_STORE_DWORD_OFFEN
:
492 return AMDGPU::BUFFER_STORE_DWORD_OFFSET
;
493 case AMDGPU::BUFFER_STORE_BYTE_OFFEN
:
494 return AMDGPU::BUFFER_STORE_BYTE_OFFSET
;
495 case AMDGPU::BUFFER_STORE_SHORT_OFFEN
:
496 return AMDGPU::BUFFER_STORE_SHORT_OFFSET
;
497 case AMDGPU::BUFFER_STORE_DWORDX2_OFFEN
:
498 return AMDGPU::BUFFER_STORE_DWORDX2_OFFSET
;
499 case AMDGPU::BUFFER_STORE_DWORDX4_OFFEN
:
500 return AMDGPU::BUFFER_STORE_DWORDX4_OFFSET
;
501 case AMDGPU::BUFFER_STORE_SHORT_D16_HI_OFFEN
:
502 return AMDGPU::BUFFER_STORE_SHORT_D16_HI_OFFSET
;
503 case AMDGPU::BUFFER_STORE_BYTE_D16_HI_OFFEN
:
504 return AMDGPU::BUFFER_STORE_BYTE_D16_HI_OFFSET
;
510 static int getOffsetMUBUFLoad(unsigned Opc
) {
512 case AMDGPU::BUFFER_LOAD_DWORD_OFFEN
:
513 return AMDGPU::BUFFER_LOAD_DWORD_OFFSET
;
514 case AMDGPU::BUFFER_LOAD_UBYTE_OFFEN
:
515 return AMDGPU::BUFFER_LOAD_UBYTE_OFFSET
;
516 case AMDGPU::BUFFER_LOAD_SBYTE_OFFEN
:
517 return AMDGPU::BUFFER_LOAD_SBYTE_OFFSET
;
518 case AMDGPU::BUFFER_LOAD_USHORT_OFFEN
:
519 return AMDGPU::BUFFER_LOAD_USHORT_OFFSET
;
520 case AMDGPU::BUFFER_LOAD_SSHORT_OFFEN
:
521 return AMDGPU::BUFFER_LOAD_SSHORT_OFFSET
;
522 case AMDGPU::BUFFER_LOAD_DWORDX2_OFFEN
:
523 return AMDGPU::BUFFER_LOAD_DWORDX2_OFFSET
;
524 case AMDGPU::BUFFER_LOAD_DWORDX4_OFFEN
:
525 return AMDGPU::BUFFER_LOAD_DWORDX4_OFFSET
;
526 case AMDGPU::BUFFER_LOAD_UBYTE_D16_OFFEN
:
527 return AMDGPU::BUFFER_LOAD_UBYTE_D16_OFFSET
;
528 case AMDGPU::BUFFER_LOAD_UBYTE_D16_HI_OFFEN
:
529 return AMDGPU::BUFFER_LOAD_UBYTE_D16_HI_OFFSET
;
530 case AMDGPU::BUFFER_LOAD_SBYTE_D16_OFFEN
:
531 return AMDGPU::BUFFER_LOAD_SBYTE_D16_OFFSET
;
532 case AMDGPU::BUFFER_LOAD_SBYTE_D16_HI_OFFEN
:
533 return AMDGPU::BUFFER_LOAD_SBYTE_D16_HI_OFFSET
;
534 case AMDGPU::BUFFER_LOAD_SHORT_D16_OFFEN
:
535 return AMDGPU::BUFFER_LOAD_SHORT_D16_OFFSET
;
536 case AMDGPU::BUFFER_LOAD_SHORT_D16_HI_OFFEN
:
537 return AMDGPU::BUFFER_LOAD_SHORT_D16_HI_OFFSET
;
543 static MachineInstrBuilder
spillVGPRtoAGPR(const GCNSubtarget
&ST
,
544 MachineBasicBlock::iterator MI
,
549 MachineBasicBlock
*MBB
= MI
->getParent();
550 MachineFunction
*MF
= MI
->getParent()->getParent();
551 SIMachineFunctionInfo
*MFI
= MF
->getInfo
<SIMachineFunctionInfo
>();
552 const SIInstrInfo
*TII
= ST
.getInstrInfo();
554 MCPhysReg Reg
= MFI
->getVGPRToAGPRSpill(Index
, Lane
);
556 if (Reg
== AMDGPU::NoRegister
)
557 return MachineInstrBuilder();
559 bool IsStore
= MI
->mayStore();
560 MachineRegisterInfo
&MRI
= MF
->getRegInfo();
561 auto *TRI
= static_cast<const SIRegisterInfo
*>(MRI
.getTargetRegisterInfo());
563 unsigned Dst
= IsStore
? Reg
: ValueReg
;
564 unsigned Src
= IsStore
? ValueReg
: Reg
;
565 unsigned Opc
= (IsStore
^ TRI
->isVGPR(MRI
, Reg
)) ? AMDGPU::V_ACCVGPR_WRITE_B32
566 : AMDGPU::V_ACCVGPR_READ_B32
;
568 return BuildMI(*MBB
, MI
, MI
->getDebugLoc(), TII
->get(Opc
), Dst
)
569 .addReg(Src
, getKillRegState(IsKill
));
572 // This differs from buildSpillLoadStore by only scavenging a VGPR. It does not
573 // need to handle the case where an SGPR may need to be spilled while spilling.
574 static bool buildMUBUFOffsetLoadStore(const GCNSubtarget
&ST
,
575 MachineFrameInfo
&MFI
,
576 MachineBasicBlock::iterator MI
,
579 const SIInstrInfo
*TII
= ST
.getInstrInfo();
580 MachineBasicBlock
*MBB
= MI
->getParent();
581 const DebugLoc
&DL
= MI
->getDebugLoc();
582 bool IsStore
= MI
->mayStore();
584 unsigned Opc
= MI
->getOpcode();
585 int LoadStoreOp
= IsStore
?
586 getOffsetMUBUFStore(Opc
) : getOffsetMUBUFLoad(Opc
);
587 if (LoadStoreOp
== -1)
590 const MachineOperand
*Reg
= TII
->getNamedOperand(*MI
, AMDGPU::OpName::vdata
);
591 if (spillVGPRtoAGPR(ST
, MI
, Index
, 0, Reg
->getReg(), false).getInstr())
594 MachineInstrBuilder NewMI
=
595 BuildMI(*MBB
, MI
, DL
, TII
->get(LoadStoreOp
))
597 .add(*TII
->getNamedOperand(*MI
, AMDGPU::OpName::srsrc
))
598 .add(*TII
->getNamedOperand(*MI
, AMDGPU::OpName::soffset
))
607 const MachineOperand
*VDataIn
= TII
->getNamedOperand(*MI
,
608 AMDGPU::OpName::vdata_in
);
614 void SIRegisterInfo::buildSpillLoadStore(MachineBasicBlock::iterator MI
,
615 unsigned LoadStoreOp
,
619 unsigned ScratchRsrcReg
,
620 unsigned ScratchOffsetReg
,
622 MachineMemOperand
*MMO
,
623 RegScavenger
*RS
) const {
624 MachineBasicBlock
*MBB
= MI
->getParent();
625 MachineFunction
*MF
= MI
->getParent()->getParent();
626 const SIInstrInfo
*TII
= ST
.getInstrInfo();
627 const MachineFrameInfo
&MFI
= MF
->getFrameInfo();
629 const MCInstrDesc
&Desc
= TII
->get(LoadStoreOp
);
630 const DebugLoc
&DL
= MI
->getDebugLoc();
631 bool IsStore
= Desc
.mayStore();
633 bool Scavenged
= false;
634 unsigned SOffset
= ScratchOffsetReg
;
636 const unsigned EltSize
= 4;
637 const TargetRegisterClass
*RC
= getRegClassForReg(MF
->getRegInfo(), ValueReg
);
638 unsigned NumSubRegs
= AMDGPU::getRegBitWidth(RC
->getID()) / (EltSize
* CHAR_BIT
);
639 unsigned Size
= NumSubRegs
* EltSize
;
640 int64_t Offset
= InstOffset
+ MFI
.getObjectOffset(Index
);
641 int64_t ScratchOffsetRegDelta
= 0;
643 unsigned Align
= MFI
.getObjectAlignment(Index
);
644 const MachinePointerInfo
&BasePtrInfo
= MMO
->getPointerInfo();
647 hasAGPRs(RC
) ? TII
->getNamedOperand(*MI
, AMDGPU::OpName::tmp
)->getReg()
650 assert((Offset
% EltSize
) == 0 && "unexpected VGPR spill offset");
652 if (!isUInt
<12>(Offset
+ Size
- EltSize
)) {
653 SOffset
= AMDGPU::NoRegister
;
655 // We currently only support spilling VGPRs to EltSize boundaries, meaning
656 // we can simplify the adjustment of Offset here to just scale with
658 Offset
*= ST
.getWavefrontSize();
660 // We don't have access to the register scavenger if this function is called
661 // during PEI::scavengeFrameVirtualRegs().
663 SOffset
= RS
->scavengeRegister(&AMDGPU::SGPR_32RegClass
, MI
, 0, false);
665 if (SOffset
== AMDGPU::NoRegister
) {
666 // There are no free SGPRs, and since we are in the process of spilling
667 // VGPRs too. Since we need a VGPR in order to spill SGPRs (this is true
668 // on SI/CI and on VI it is true until we implement spilling using scalar
669 // stores), we have no way to free up an SGPR. Our solution here is to
670 // add the offset directly to the ScratchOffset register, and then
671 // subtract the offset after the spill to return ScratchOffset to it's
673 SOffset
= ScratchOffsetReg
;
674 ScratchOffsetRegDelta
= Offset
;
679 BuildMI(*MBB
, MI
, DL
, TII
->get(AMDGPU::S_ADD_U32
), SOffset
)
680 .addReg(ScratchOffsetReg
)
686 for (unsigned i
= 0, e
= NumSubRegs
; i
!= e
; ++i
, Offset
+= EltSize
) {
687 Register SubReg
= NumSubRegs
== 1
689 : getSubReg(ValueReg
, getSubRegFromChannel(i
));
691 unsigned SOffsetRegState
= 0;
692 unsigned SrcDstRegState
= getDefRegState(!IsStore
);
694 SOffsetRegState
|= getKillRegState(Scavenged
);
695 // The last implicit use carries the "Kill" flag.
696 SrcDstRegState
|= getKillRegState(IsKill
);
699 auto MIB
= spillVGPRtoAGPR(ST
, MI
, Index
, i
, SubReg
, IsKill
);
701 if (!MIB
.getInstr()) {
702 unsigned FinalReg
= SubReg
;
703 if (TmpReg
!= AMDGPU::NoRegister
) {
705 BuildMI(*MBB
, MI
, DL
, TII
->get(AMDGPU::V_ACCVGPR_READ_B32
), TmpReg
)
706 .addReg(SubReg
, getKillRegState(IsKill
));
710 MachinePointerInfo PInfo
= BasePtrInfo
.getWithOffset(EltSize
* i
);
711 MachineMemOperand
*NewMMO
712 = MF
->getMachineMemOperand(PInfo
, MMO
->getFlags(),
713 EltSize
, MinAlign(Align
, EltSize
* i
));
715 MIB
= BuildMI(*MBB
, MI
, DL
, Desc
)
716 .addReg(SubReg
, getDefRegState(!IsStore
) | getKillRegState(IsKill
))
717 .addReg(ScratchRsrcReg
)
718 .addReg(SOffset
, SOffsetRegState
)
725 .addMemOperand(NewMMO
);
727 if (!IsStore
&& TmpReg
!= AMDGPU::NoRegister
)
728 MIB
= BuildMI(*MBB
, MI
, DL
, TII
->get(AMDGPU::V_ACCVGPR_WRITE_B32
),
730 .addReg(TmpReg
, RegState::Kill
);
734 MIB
.addReg(ValueReg
, RegState::Implicit
| SrcDstRegState
);
737 if (ScratchOffsetRegDelta
!= 0) {
738 // Subtract the offset we added to the ScratchOffset register.
739 BuildMI(*MBB
, MI
, DL
, TII
->get(AMDGPU::S_SUB_U32
), ScratchOffsetReg
)
740 .addReg(ScratchOffsetReg
)
741 .addImm(ScratchOffsetRegDelta
);
745 bool SIRegisterInfo::spillSGPR(MachineBasicBlock::iterator MI
,
748 bool OnlyToVGPR
) const {
749 MachineBasicBlock
*MBB
= MI
->getParent();
750 MachineFunction
*MF
= MBB
->getParent();
751 SIMachineFunctionInfo
*MFI
= MF
->getInfo
<SIMachineFunctionInfo
>();
752 DenseSet
<unsigned> SGPRSpillVGPRDefinedSet
;
754 ArrayRef
<SIMachineFunctionInfo::SpilledReg
> VGPRSpills
755 = MFI
->getSGPRToVGPRSpills(Index
);
756 bool SpillToVGPR
= !VGPRSpills
.empty();
757 if (OnlyToVGPR
&& !SpillToVGPR
)
760 const SIInstrInfo
*TII
= ST
.getInstrInfo();
762 Register SuperReg
= MI
->getOperand(0).getReg();
763 bool IsKill
= MI
->getOperand(0).isKill();
764 const DebugLoc
&DL
= MI
->getDebugLoc();
766 MachineFrameInfo
&FrameInfo
= MF
->getFrameInfo();
768 assert(SpillToVGPR
|| (SuperReg
!= MFI
->getStackPtrOffsetReg() &&
769 SuperReg
!= MFI
->getFrameOffsetReg() &&
770 SuperReg
!= MFI
->getScratchWaveOffsetReg()));
772 assert(SuperReg
!= AMDGPU::M0
&& "m0 should never spill");
774 unsigned M0CopyReg
= AMDGPU::NoRegister
;
776 unsigned EltSize
= 4;
777 const TargetRegisterClass
*RC
= getPhysRegClass(SuperReg
);
779 ArrayRef
<int16_t> SplitParts
= getRegSplitParts(RC
, EltSize
);
780 unsigned NumSubRegs
= SplitParts
.empty() ? 1 : SplitParts
.size();
782 // Scavenged temporary VGPR to use. It must be scavenged once for any number
783 // of spilled subregs.
786 // SubReg carries the "Kill" flag when SubReg == SuperReg.
787 unsigned SubKillState
= getKillRegState((NumSubRegs
== 1) && IsKill
);
788 for (unsigned i
= 0, e
= NumSubRegs
; i
< e
; ++i
) {
790 NumSubRegs
== 1 ? SuperReg
: getSubReg(SuperReg
, SplitParts
[i
]);
793 SIMachineFunctionInfo::SpilledReg Spill
= VGPRSpills
[i
];
795 // During SGPR spilling to VGPR, determine if the VGPR is defined. The
796 // only circumstance in which we say it is undefined is when it is the
797 // first spill to this VGPR in the first basic block.
798 bool VGPRDefined
= true;
799 if (MBB
== &MF
->front())
800 VGPRDefined
= !SGPRSpillVGPRDefinedSet
.insert(Spill
.VGPR
).second
;
802 // Mark the "old value of vgpr" input undef only if this is the first sgpr
803 // spill to this specific vgpr in the first basic block.
804 BuildMI(*MBB
, MI
, DL
,
805 TII
->getMCOpcodeFromPseudo(AMDGPU::V_WRITELANE_B32
),
807 .addReg(SubReg
, getKillRegState(IsKill
))
809 .addReg(Spill
.VGPR
, VGPRDefined
? 0 : RegState::Undef
);
811 // FIXME: Since this spills to another register instead of an actual
812 // frame index, we should delete the frame index when all references to
815 // XXX - Can to VGPR spill fail for some subregisters but not others?
819 // Spill SGPR to a frame index.
820 if (!TmpVGPR
.isValid())
821 TmpVGPR
= RS
->scavengeRegister(&AMDGPU::VGPR_32RegClass
, MI
, 0);
823 MachineInstrBuilder Mov
824 = BuildMI(*MBB
, MI
, DL
, TII
->get(AMDGPU::V_MOV_B32_e32
), TmpVGPR
)
825 .addReg(SubReg
, SubKillState
);
827 // There could be undef components of a spilled super register.
828 // TODO: Can we detect this and skip the spill?
829 if (NumSubRegs
> 1) {
830 // The last implicit use of the SuperReg carries the "Kill" flag.
831 unsigned SuperKillState
= 0;
833 SuperKillState
|= getKillRegState(IsKill
);
834 Mov
.addReg(SuperReg
, RegState::Implicit
| SuperKillState
);
837 unsigned Align
= FrameInfo
.getObjectAlignment(Index
);
838 MachinePointerInfo PtrInfo
839 = MachinePointerInfo::getFixedStack(*MF
, Index
, EltSize
* i
);
840 MachineMemOperand
*MMO
841 = MF
->getMachineMemOperand(PtrInfo
, MachineMemOperand::MOStore
,
842 EltSize
, MinAlign(Align
, EltSize
* i
));
843 BuildMI(*MBB
, MI
, DL
, TII
->get(AMDGPU::SI_SPILL_V32_SAVE
))
844 .addReg(TmpVGPR
, RegState::Kill
) // src
845 .addFrameIndex(Index
) // vaddr
846 .addReg(MFI
->getScratchRSrcReg()) // srrsrc
847 .addReg(MFI
->getStackPtrOffsetReg()) // soffset
848 .addImm(i
* 4) // offset
853 if (M0CopyReg
!= AMDGPU::NoRegister
) {
854 BuildMI(*MBB
, MI
, DL
, TII
->get(AMDGPU::COPY
), AMDGPU::M0
)
855 .addReg(M0CopyReg
, RegState::Kill
);
858 MI
->eraseFromParent();
859 MFI
->addToSpilledSGPRs(NumSubRegs
);
863 bool SIRegisterInfo::restoreSGPR(MachineBasicBlock::iterator MI
,
866 bool OnlyToVGPR
) const {
867 MachineFunction
*MF
= MI
->getParent()->getParent();
868 MachineBasicBlock
*MBB
= MI
->getParent();
869 SIMachineFunctionInfo
*MFI
= MF
->getInfo
<SIMachineFunctionInfo
>();
871 ArrayRef
<SIMachineFunctionInfo::SpilledReg
> VGPRSpills
872 = MFI
->getSGPRToVGPRSpills(Index
);
873 bool SpillToVGPR
= !VGPRSpills
.empty();
874 if (OnlyToVGPR
&& !SpillToVGPR
)
877 MachineFrameInfo
&FrameInfo
= MF
->getFrameInfo();
878 const SIInstrInfo
*TII
= ST
.getInstrInfo();
879 const DebugLoc
&DL
= MI
->getDebugLoc();
881 Register SuperReg
= MI
->getOperand(0).getReg();
883 assert(SuperReg
!= AMDGPU::M0
&& "m0 should never spill");
885 unsigned M0CopyReg
= AMDGPU::NoRegister
;
887 unsigned EltSize
= 4;
889 const TargetRegisterClass
*RC
= getPhysRegClass(SuperReg
);
891 ArrayRef
<int16_t> SplitParts
= getRegSplitParts(RC
, EltSize
);
892 unsigned NumSubRegs
= SplitParts
.empty() ? 1 : SplitParts
.size();
896 for (unsigned i
= 0, e
= NumSubRegs
; i
< e
; ++i
) {
898 NumSubRegs
== 1 ? SuperReg
: getSubReg(SuperReg
, SplitParts
[i
]);
901 SIMachineFunctionInfo::SpilledReg Spill
= VGPRSpills
[i
];
903 BuildMI(*MBB
, MI
, DL
, TII
->getMCOpcodeFromPseudo(AMDGPU::V_READLANE_B32
),
908 if (NumSubRegs
> 1 && i
== 0)
909 MIB
.addReg(SuperReg
, RegState::ImplicitDefine
);
914 // Restore SGPR from a stack slot.
915 // FIXME: We should use S_LOAD_DWORD here for VI.
916 if (!TmpVGPR
.isValid())
917 TmpVGPR
= RS
->scavengeRegister(&AMDGPU::VGPR_32RegClass
, MI
, 0);
918 unsigned Align
= FrameInfo
.getObjectAlignment(Index
);
920 MachinePointerInfo PtrInfo
921 = MachinePointerInfo::getFixedStack(*MF
, Index
, EltSize
* i
);
923 MachineMemOperand
*MMO
= MF
->getMachineMemOperand(PtrInfo
,
924 MachineMemOperand::MOLoad
, EltSize
,
925 MinAlign(Align
, EltSize
* i
));
927 BuildMI(*MBB
, MI
, DL
, TII
->get(AMDGPU::SI_SPILL_V32_RESTORE
), TmpVGPR
)
928 .addFrameIndex(Index
) // vaddr
929 .addReg(MFI
->getScratchRSrcReg()) // srsrc
930 .addReg(MFI
->getStackPtrOffsetReg()) // soffset
931 .addImm(i
* 4) // offset
935 BuildMI(*MBB
, MI
, DL
, TII
->get(AMDGPU::V_READFIRSTLANE_B32
), SubReg
)
936 .addReg(TmpVGPR
, RegState::Kill
);
939 MIB
.addReg(MI
->getOperand(0).getReg(), RegState::ImplicitDefine
);
943 if (M0CopyReg
!= AMDGPU::NoRegister
) {
944 BuildMI(*MBB
, MI
, DL
, TII
->get(AMDGPU::COPY
), AMDGPU::M0
)
945 .addReg(M0CopyReg
, RegState::Kill
);
948 MI
->eraseFromParent();
952 /// Special case of eliminateFrameIndex. Returns true if the SGPR was spilled to
953 /// a VGPR and the stack slot can be safely eliminated when all other users are
955 bool SIRegisterInfo::eliminateSGPRToVGPRSpillFrameIndex(
956 MachineBasicBlock::iterator MI
,
958 RegScavenger
*RS
) const {
959 switch (MI
->getOpcode()) {
960 case AMDGPU::SI_SPILL_S1024_SAVE
:
961 case AMDGPU::SI_SPILL_S512_SAVE
:
962 case AMDGPU::SI_SPILL_S256_SAVE
:
963 case AMDGPU::SI_SPILL_S160_SAVE
:
964 case AMDGPU::SI_SPILL_S128_SAVE
:
965 case AMDGPU::SI_SPILL_S96_SAVE
:
966 case AMDGPU::SI_SPILL_S64_SAVE
:
967 case AMDGPU::SI_SPILL_S32_SAVE
:
968 return spillSGPR(MI
, FI
, RS
, true);
969 case AMDGPU::SI_SPILL_S1024_RESTORE
:
970 case AMDGPU::SI_SPILL_S512_RESTORE
:
971 case AMDGPU::SI_SPILL_S256_RESTORE
:
972 case AMDGPU::SI_SPILL_S160_RESTORE
:
973 case AMDGPU::SI_SPILL_S128_RESTORE
:
974 case AMDGPU::SI_SPILL_S96_RESTORE
:
975 case AMDGPU::SI_SPILL_S64_RESTORE
:
976 case AMDGPU::SI_SPILL_S32_RESTORE
:
977 return restoreSGPR(MI
, FI
, RS
, true);
979 llvm_unreachable("not an SGPR spill instruction");
983 void SIRegisterInfo::eliminateFrameIndex(MachineBasicBlock::iterator MI
,
984 int SPAdj
, unsigned FIOperandNum
,
985 RegScavenger
*RS
) const {
986 MachineFunction
*MF
= MI
->getParent()->getParent();
987 MachineBasicBlock
*MBB
= MI
->getParent();
988 SIMachineFunctionInfo
*MFI
= MF
->getInfo
<SIMachineFunctionInfo
>();
989 MachineFrameInfo
&FrameInfo
= MF
->getFrameInfo();
990 const SIInstrInfo
*TII
= ST
.getInstrInfo();
991 DebugLoc DL
= MI
->getDebugLoc();
993 assert(SPAdj
== 0 && "unhandled SP adjustment in call sequence?");
995 MachineOperand
&FIOp
= MI
->getOperand(FIOperandNum
);
996 int Index
= MI
->getOperand(FIOperandNum
).getIndex();
998 Register FrameReg
= getFrameRegister(*MF
);
1000 switch (MI
->getOpcode()) {
1001 // SGPR register spill
1002 case AMDGPU::SI_SPILL_S1024_SAVE
:
1003 case AMDGPU::SI_SPILL_S512_SAVE
:
1004 case AMDGPU::SI_SPILL_S256_SAVE
:
1005 case AMDGPU::SI_SPILL_S160_SAVE
:
1006 case AMDGPU::SI_SPILL_S128_SAVE
:
1007 case AMDGPU::SI_SPILL_S96_SAVE
:
1008 case AMDGPU::SI_SPILL_S64_SAVE
:
1009 case AMDGPU::SI_SPILL_S32_SAVE
: {
1010 spillSGPR(MI
, Index
, RS
);
1014 // SGPR register restore
1015 case AMDGPU::SI_SPILL_S1024_RESTORE
:
1016 case AMDGPU::SI_SPILL_S512_RESTORE
:
1017 case AMDGPU::SI_SPILL_S256_RESTORE
:
1018 case AMDGPU::SI_SPILL_S160_RESTORE
:
1019 case AMDGPU::SI_SPILL_S128_RESTORE
:
1020 case AMDGPU::SI_SPILL_S96_RESTORE
:
1021 case AMDGPU::SI_SPILL_S64_RESTORE
:
1022 case AMDGPU::SI_SPILL_S32_RESTORE
: {
1023 restoreSGPR(MI
, Index
, RS
);
1027 // VGPR register spill
1028 case AMDGPU::SI_SPILL_V1024_SAVE
:
1029 case AMDGPU::SI_SPILL_V512_SAVE
:
1030 case AMDGPU::SI_SPILL_V256_SAVE
:
1031 case AMDGPU::SI_SPILL_V160_SAVE
:
1032 case AMDGPU::SI_SPILL_V128_SAVE
:
1033 case AMDGPU::SI_SPILL_V96_SAVE
:
1034 case AMDGPU::SI_SPILL_V64_SAVE
:
1035 case AMDGPU::SI_SPILL_V32_SAVE
:
1036 case AMDGPU::SI_SPILL_A1024_SAVE
:
1037 case AMDGPU::SI_SPILL_A512_SAVE
:
1038 case AMDGPU::SI_SPILL_A128_SAVE
:
1039 case AMDGPU::SI_SPILL_A64_SAVE
:
1040 case AMDGPU::SI_SPILL_A32_SAVE
: {
1041 const MachineOperand
*VData
= TII
->getNamedOperand(*MI
,
1042 AMDGPU::OpName::vdata
);
1043 assert(TII
->getNamedOperand(*MI
, AMDGPU::OpName::soffset
)->getReg() ==
1044 MFI
->getStackPtrOffsetReg());
1046 buildSpillLoadStore(MI
, AMDGPU::BUFFER_STORE_DWORD_OFFSET
,
1048 VData
->getReg(), VData
->isKill(),
1049 TII
->getNamedOperand(*MI
, AMDGPU::OpName::srsrc
)->getReg(),
1051 TII
->getNamedOperand(*MI
, AMDGPU::OpName::offset
)->getImm(),
1052 *MI
->memoperands_begin(),
1054 MFI
->addToSpilledVGPRs(getNumSubRegsForSpillOp(MI
->getOpcode()));
1055 MI
->eraseFromParent();
1058 case AMDGPU::SI_SPILL_V32_RESTORE
:
1059 case AMDGPU::SI_SPILL_V64_RESTORE
:
1060 case AMDGPU::SI_SPILL_V96_RESTORE
:
1061 case AMDGPU::SI_SPILL_V128_RESTORE
:
1062 case AMDGPU::SI_SPILL_V160_RESTORE
:
1063 case AMDGPU::SI_SPILL_V256_RESTORE
:
1064 case AMDGPU::SI_SPILL_V512_RESTORE
:
1065 case AMDGPU::SI_SPILL_V1024_RESTORE
:
1066 case AMDGPU::SI_SPILL_A32_RESTORE
:
1067 case AMDGPU::SI_SPILL_A64_RESTORE
:
1068 case AMDGPU::SI_SPILL_A128_RESTORE
:
1069 case AMDGPU::SI_SPILL_A512_RESTORE
:
1070 case AMDGPU::SI_SPILL_A1024_RESTORE
: {
1071 const MachineOperand
*VData
= TII
->getNamedOperand(*MI
,
1072 AMDGPU::OpName::vdata
);
1073 assert(TII
->getNamedOperand(*MI
, AMDGPU::OpName::soffset
)->getReg() ==
1074 MFI
->getStackPtrOffsetReg());
1076 buildSpillLoadStore(MI
, AMDGPU::BUFFER_LOAD_DWORD_OFFSET
,
1078 VData
->getReg(), VData
->isKill(),
1079 TII
->getNamedOperand(*MI
, AMDGPU::OpName::srsrc
)->getReg(),
1081 TII
->getNamedOperand(*MI
, AMDGPU::OpName::offset
)->getImm(),
1082 *MI
->memoperands_begin(),
1084 MI
->eraseFromParent();
1089 const DebugLoc
&DL
= MI
->getDebugLoc();
1090 bool IsMUBUF
= TII
->isMUBUF(*MI
);
1092 if (!IsMUBUF
&& !MFI
->isEntryFunction()) {
1093 // Convert to an absolute stack address by finding the offset from the
1094 // scratch wave base and scaling by the wave size.
1096 // In an entry function/kernel the offset is already the absolute
1097 // address relative to the frame register.
1099 Register TmpDiffReg
=
1100 RS
->scavengeRegister(&AMDGPU::SReg_32_XM0RegClass
, MI
, 0, false);
1102 // If there's no free SGPR, in-place modify the FP
1103 Register DiffReg
= TmpDiffReg
.isValid() ? TmpDiffReg
: FrameReg
;
1105 bool IsCopy
= MI
->getOpcode() == AMDGPU::V_MOV_B32_e32
;
1106 Register ResultReg
= IsCopy
?
1107 MI
->getOperand(0).getReg() :
1108 RS
->scavengeRegister(&AMDGPU::VGPR_32RegClass
, MI
, 0);
1110 BuildMI(*MBB
, MI
, DL
, TII
->get(AMDGPU::S_SUB_U32
), DiffReg
)
1112 .addReg(MFI
->getScratchWaveOffsetReg());
1114 int64_t Offset
= FrameInfo
.getObjectOffset(Index
);
1116 // XXX - This never happens because of emergency scavenging slot at 0?
1117 BuildMI(*MBB
, MI
, DL
, TII
->get(AMDGPU::V_LSHRREV_B32_e64
), ResultReg
)
1118 .addImm(ST
.getWavefrontSizeLog2())
1121 if (auto MIB
= TII
->getAddNoCarry(*MBB
, MI
, DL
, ResultReg
, *RS
)) {
1122 Register ScaledReg
=
1123 RS
->scavengeRegister(&AMDGPU::VGPR_32RegClass
, MIB
, 0);
1125 BuildMI(*MBB
, *MIB
, DL
, TII
->get(AMDGPU::V_LSHRREV_B32_e64
),
1127 .addImm(ST
.getWavefrontSizeLog2())
1128 .addReg(DiffReg
, RegState::Kill
);
1130 const bool IsVOP2
= MIB
->getOpcode() == AMDGPU::V_ADD_U32_e32
;
1132 // TODO: Fold if use instruction is another add of a constant.
1133 if (IsVOP2
|| AMDGPU::isInlinableLiteral32(Offset
, ST
.hasInv2PiInlineImm())) {
1134 // FIXME: This can fail
1136 MIB
.addReg(ScaledReg
, RegState::Kill
);
1138 MIB
.addImm(0); // clamp bit
1140 Register ConstOffsetReg
=
1141 RS
->scavengeRegister(&AMDGPU::SReg_32_XM0RegClass
, MIB
, 0, false);
1143 // This should always be able to use the unused carry out.
1144 assert(ConstOffsetReg
&& "this scavenge should not be able to fail");
1146 BuildMI(*MBB
, *MIB
, DL
, TII
->get(AMDGPU::S_MOV_B32
), ConstOffsetReg
)
1148 MIB
.addReg(ConstOffsetReg
, RegState::Kill
);
1149 MIB
.addReg(ScaledReg
, RegState::Kill
);
1150 MIB
.addImm(0); // clamp bit
1153 // We have to produce a carry out, and we there isn't a free SGPR
1154 // pair for it. We can keep the whole computation on the SALU to
1155 // avoid clobbering an additional register at the cost of an extra
1158 // We may have 1 free scratch SGPR even though a carry out is
1159 // unavailable. Only one additional mov is needed.
1160 Register TmpScaledReg
=
1161 RS
->scavengeRegister(&AMDGPU::SReg_32_XM0RegClass
, MI
, 0, false);
1162 Register ScaledReg
= TmpScaledReg
.isValid() ? TmpScaledReg
: DiffReg
;
1164 BuildMI(*MBB
, MI
, DL
, TII
->get(AMDGPU::S_LSHR_B32
), ScaledReg
)
1165 .addReg(DiffReg
, RegState::Kill
)
1166 .addImm(ST
.getWavefrontSizeLog2());
1167 BuildMI(*MBB
, MI
, DL
, TII
->get(AMDGPU::S_ADD_U32
), ScaledReg
)
1168 .addReg(ScaledReg
, RegState::Kill
)
1170 BuildMI(*MBB
, MI
, DL
, TII
->get(AMDGPU::COPY
), ResultReg
)
1171 .addReg(ScaledReg
, RegState::Kill
);
1173 // If there were truly no free SGPRs, we need to undo everything.
1174 if (!TmpScaledReg
.isValid()) {
1175 BuildMI(*MBB
, MI
, DL
, TII
->get(AMDGPU::S_SUB_U32
), ScaledReg
)
1176 .addReg(ScaledReg
, RegState::Kill
)
1178 BuildMI(*MBB
, MI
, DL
, TII
->get(AMDGPU::S_LSHL_B32
), ScaledReg
)
1179 .addReg(DiffReg
, RegState::Kill
)
1180 .addImm(ST
.getWavefrontSizeLog2());
1185 if (!TmpDiffReg
.isValid()) {
1187 BuildMI(*MBB
, MI
, DL
, TII
->get(AMDGPU::S_ADD_U32
), FrameReg
)
1189 .addReg(MFI
->getScratchWaveOffsetReg());
1192 // Don't introduce an extra copy if we're just materializing in a mov.
1194 MI
->eraseFromParent();
1196 FIOp
.ChangeToRegister(ResultReg
, false, false, true);
1201 // Disable offen so we don't need a 0 vgpr base.
1202 assert(static_cast<int>(FIOperandNum
) ==
1203 AMDGPU::getNamedOperandIdx(MI
->getOpcode(),
1204 AMDGPU::OpName::vaddr
));
1206 assert(TII
->getNamedOperand(*MI
, AMDGPU::OpName::soffset
)->getReg() ==
1207 MFI
->getStackPtrOffsetReg());
1209 TII
->getNamedOperand(*MI
, AMDGPU::OpName::soffset
)->setReg(FrameReg
);
1211 int64_t Offset
= FrameInfo
.getObjectOffset(Index
);
1213 = TII
->getNamedOperand(*MI
, AMDGPU::OpName::offset
)->getImm();
1214 int64_t NewOffset
= OldImm
+ Offset
;
1216 if (isUInt
<12>(NewOffset
) &&
1217 buildMUBUFOffsetLoadStore(ST
, FrameInfo
, MI
, Index
, NewOffset
)) {
1218 MI
->eraseFromParent();
1223 // If the offset is simply too big, don't convert to a scratch wave offset
1226 int64_t Offset
= FrameInfo
.getObjectOffset(Index
);
1227 FIOp
.ChangeToImmediate(Offset
);
1228 if (!TII
->isImmOperandLegal(*MI
, FIOperandNum
, FIOp
)) {
1229 Register TmpReg
= RS
->scavengeRegister(&AMDGPU::VGPR_32RegClass
, MI
, 0);
1230 BuildMI(*MBB
, MI
, DL
, TII
->get(AMDGPU::V_MOV_B32_e32
), TmpReg
)
1232 FIOp
.ChangeToRegister(TmpReg
, false, false, true);
1238 StringRef
SIRegisterInfo::getRegAsmName(unsigned Reg
) const {
1239 return AMDGPUInstPrinter::getRegisterName(Reg
);
1242 // FIXME: This is very slow. It might be worth creating a map from physreg to
1244 const TargetRegisterClass
*SIRegisterInfo::getPhysRegClass(unsigned Reg
) const {
1245 assert(!Register::isVirtualRegister(Reg
));
1247 static const TargetRegisterClass
*const BaseClasses
[] = {
1248 &AMDGPU::VGPR_32RegClass
,
1249 &AMDGPU::SReg_32RegClass
,
1250 &AMDGPU::AGPR_32RegClass
,
1251 &AMDGPU::VReg_64RegClass
,
1252 &AMDGPU::SReg_64RegClass
,
1253 &AMDGPU::AReg_64RegClass
,
1254 &AMDGPU::VReg_96RegClass
,
1255 &AMDGPU::SReg_96RegClass
,
1256 &AMDGPU::VReg_128RegClass
,
1257 &AMDGPU::SReg_128RegClass
,
1258 &AMDGPU::AReg_128RegClass
,
1259 &AMDGPU::VReg_160RegClass
,
1260 &AMDGPU::SReg_160RegClass
,
1261 &AMDGPU::VReg_256RegClass
,
1262 &AMDGPU::SReg_256RegClass
,
1263 &AMDGPU::VReg_512RegClass
,
1264 &AMDGPU::SReg_512RegClass
,
1265 &AMDGPU::AReg_512RegClass
,
1266 &AMDGPU::SReg_1024RegClass
,
1267 &AMDGPU::VReg_1024RegClass
,
1268 &AMDGPU::AReg_1024RegClass
,
1269 &AMDGPU::SCC_CLASSRegClass
,
1270 &AMDGPU::Pseudo_SReg_32RegClass
,
1271 &AMDGPU::Pseudo_SReg_128RegClass
,
1274 for (const TargetRegisterClass
*BaseClass
: BaseClasses
) {
1275 if (BaseClass
->contains(Reg
)) {
1282 // TODO: It might be helpful to have some target specific flags in
1283 // TargetRegisterClass to mark which classes are VGPRs to make this trivial.
1284 bool SIRegisterInfo::hasVGPRs(const TargetRegisterClass
*RC
) const {
1285 unsigned Size
= getRegSizeInBits(*RC
);
1288 return getCommonSubClass(&AMDGPU::VGPR_32RegClass
, RC
) != nullptr;
1290 return getCommonSubClass(&AMDGPU::VReg_64RegClass
, RC
) != nullptr;
1292 return getCommonSubClass(&AMDGPU::VReg_96RegClass
, RC
) != nullptr;
1294 return getCommonSubClass(&AMDGPU::VReg_128RegClass
, RC
) != nullptr;
1296 return getCommonSubClass(&AMDGPU::VReg_160RegClass
, RC
) != nullptr;
1298 return getCommonSubClass(&AMDGPU::VReg_256RegClass
, RC
) != nullptr;
1300 return getCommonSubClass(&AMDGPU::VReg_512RegClass
, RC
) != nullptr;
1302 return getCommonSubClass(&AMDGPU::VReg_1024RegClass
, RC
) != nullptr;
1304 return getCommonSubClass(&AMDGPU::VReg_1RegClass
, RC
) != nullptr;
1306 assert(Size
< 32 && "Invalid register class size");
1311 bool SIRegisterInfo::hasAGPRs(const TargetRegisterClass
*RC
) const {
1312 unsigned Size
= getRegSizeInBits(*RC
);
1317 return getCommonSubClass(&AMDGPU::AGPR_32RegClass
, RC
) != nullptr;
1319 return getCommonSubClass(&AMDGPU::AReg_64RegClass
, RC
) != nullptr;
1323 return getCommonSubClass(&AMDGPU::AReg_128RegClass
, RC
) != nullptr;
1328 return getCommonSubClass(&AMDGPU::AReg_512RegClass
, RC
) != nullptr;
1330 return getCommonSubClass(&AMDGPU::AReg_1024RegClass
, RC
) != nullptr;
1332 llvm_unreachable("Invalid register class size");
1336 const TargetRegisterClass
*SIRegisterInfo::getEquivalentVGPRClass(
1337 const TargetRegisterClass
*SRC
) const {
1338 switch (getRegSizeInBits(*SRC
)) {
1340 return &AMDGPU::VGPR_32RegClass
;
1342 return &AMDGPU::VReg_64RegClass
;
1344 return &AMDGPU::VReg_96RegClass
;
1346 return &AMDGPU::VReg_128RegClass
;
1348 return &AMDGPU::VReg_160RegClass
;
1350 return &AMDGPU::VReg_256RegClass
;
1352 return &AMDGPU::VReg_512RegClass
;
1354 return &AMDGPU::VReg_1024RegClass
;
1356 return &AMDGPU::VReg_1RegClass
;
1358 llvm_unreachable("Invalid register class size");
1362 const TargetRegisterClass
*SIRegisterInfo::getEquivalentAGPRClass(
1363 const TargetRegisterClass
*SRC
) const {
1364 switch (getRegSizeInBits(*SRC
)) {
1366 return &AMDGPU::AGPR_32RegClass
;
1368 return &AMDGPU::AReg_64RegClass
;
1370 return &AMDGPU::AReg_128RegClass
;
1372 return &AMDGPU::AReg_512RegClass
;
1374 return &AMDGPU::AReg_1024RegClass
;
1376 llvm_unreachable("Invalid register class size");
1380 const TargetRegisterClass
*SIRegisterInfo::getEquivalentSGPRClass(
1381 const TargetRegisterClass
*VRC
) const {
1382 switch (getRegSizeInBits(*VRC
)) {
1384 return &AMDGPU::SGPR_32RegClass
;
1386 return &AMDGPU::SReg_64RegClass
;
1388 return &AMDGPU::SReg_96RegClass
;
1390 return &AMDGPU::SGPR_128RegClass
;
1392 return &AMDGPU::SReg_160RegClass
;
1394 return &AMDGPU::SReg_256RegClass
;
1396 return &AMDGPU::SReg_512RegClass
;
1398 return &AMDGPU::SReg_1024RegClass
;
1400 llvm_unreachable("Invalid register class size");
1404 const TargetRegisterClass
*SIRegisterInfo::getSubRegClass(
1405 const TargetRegisterClass
*RC
, unsigned SubIdx
) const {
1406 if (SubIdx
== AMDGPU::NoSubRegister
)
1409 // We can assume that each lane corresponds to one 32-bit register.
1410 unsigned Count
= getSubRegIndexLaneMask(SubIdx
).getNumLanes();
1411 if (isSGPRClass(RC
)) {
1414 return &AMDGPU::SGPR_32RegClass
;
1416 return &AMDGPU::SReg_64RegClass
;
1418 return &AMDGPU::SReg_96RegClass
;
1420 return &AMDGPU::SGPR_128RegClass
;
1422 return &AMDGPU::SReg_160RegClass
;
1424 return &AMDGPU::SReg_256RegClass
;
1426 return &AMDGPU::SReg_512RegClass
;
1427 case 32: /* fall-through */
1429 llvm_unreachable("Invalid sub-register class size");
1431 } else if (hasAGPRs(RC
)) {
1434 return &AMDGPU::AGPR_32RegClass
;
1436 return &AMDGPU::AReg_64RegClass
;
1438 return &AMDGPU::AReg_128RegClass
;
1440 return &AMDGPU::AReg_512RegClass
;
1441 case 32: /* fall-through */
1443 llvm_unreachable("Invalid sub-register class size");
1448 return &AMDGPU::VGPR_32RegClass
;
1450 return &AMDGPU::VReg_64RegClass
;
1452 return &AMDGPU::VReg_96RegClass
;
1454 return &AMDGPU::VReg_128RegClass
;
1456 return &AMDGPU::VReg_160RegClass
;
1458 return &AMDGPU::VReg_256RegClass
;
1460 return &AMDGPU::VReg_512RegClass
;
1461 case 32: /* fall-through */
1463 llvm_unreachable("Invalid sub-register class size");
1468 bool SIRegisterInfo::opCanUseInlineConstant(unsigned OpType
) const {
1469 if (OpType
>= AMDGPU::OPERAND_REG_INLINE_AC_FIRST
&&
1470 OpType
<= AMDGPU::OPERAND_REG_INLINE_AC_LAST
)
1471 return !ST
.hasMFMAInlineLiteralBug();
1473 return OpType
>= AMDGPU::OPERAND_SRC_FIRST
&&
1474 OpType
<= AMDGPU::OPERAND_SRC_LAST
;
1477 bool SIRegisterInfo::shouldRewriteCopySrc(
1478 const TargetRegisterClass
*DefRC
,
1480 const TargetRegisterClass
*SrcRC
,
1481 unsigned SrcSubReg
) const {
1482 // We want to prefer the smallest register class possible, so we don't want to
1483 // stop and rewrite on anything that looks like a subregister
1484 // extract. Operations mostly don't care about the super register class, so we
1485 // only want to stop on the most basic of copies between the same register
1488 // e.g. if we have something like
1491 // %2 = REG_SEQUENCE %0, sub0, %1, sub1, %2, sub2
1492 // %3 = COPY %2, sub0
1494 // We want to look through the COPY to find:
1498 return getCommonSubClass(DefRC
, SrcRC
) != nullptr;
1501 /// Returns a register that is not used at any point in the function.
1502 /// If all registers are used, then this function will return
1503 // AMDGPU::NoRegister.
1505 SIRegisterInfo::findUnusedRegister(const MachineRegisterInfo
&MRI
,
1506 const TargetRegisterClass
*RC
,
1507 const MachineFunction
&MF
) const {
1509 for (unsigned Reg
: *RC
)
1510 if (MRI
.isAllocatable(Reg
) && !MRI
.isPhysRegUsed(Reg
))
1512 return AMDGPU::NoRegister
;
1515 ArrayRef
<int16_t> SIRegisterInfo::getRegSplitParts(const TargetRegisterClass
*RC
,
1516 unsigned EltSize
) const {
1518 static const int16_t Sub0_31
[] = {
1519 AMDGPU::sub0
, AMDGPU::sub1
, AMDGPU::sub2
, AMDGPU::sub3
,
1520 AMDGPU::sub4
, AMDGPU::sub5
, AMDGPU::sub6
, AMDGPU::sub7
,
1521 AMDGPU::sub8
, AMDGPU::sub9
, AMDGPU::sub10
, AMDGPU::sub11
,
1522 AMDGPU::sub12
, AMDGPU::sub13
, AMDGPU::sub14
, AMDGPU::sub15
,
1523 AMDGPU::sub16
, AMDGPU::sub17
, AMDGPU::sub18
, AMDGPU::sub19
,
1524 AMDGPU::sub20
, AMDGPU::sub21
, AMDGPU::sub22
, AMDGPU::sub23
,
1525 AMDGPU::sub24
, AMDGPU::sub25
, AMDGPU::sub26
, AMDGPU::sub27
,
1526 AMDGPU::sub28
, AMDGPU::sub29
, AMDGPU::sub30
, AMDGPU::sub31
,
1529 static const int16_t Sub0_15
[] = {
1530 AMDGPU::sub0
, AMDGPU::sub1
, AMDGPU::sub2
, AMDGPU::sub3
,
1531 AMDGPU::sub4
, AMDGPU::sub5
, AMDGPU::sub6
, AMDGPU::sub7
,
1532 AMDGPU::sub8
, AMDGPU::sub9
, AMDGPU::sub10
, AMDGPU::sub11
,
1533 AMDGPU::sub12
, AMDGPU::sub13
, AMDGPU::sub14
, AMDGPU::sub15
,
1536 static const int16_t Sub0_7
[] = {
1537 AMDGPU::sub0
, AMDGPU::sub1
, AMDGPU::sub2
, AMDGPU::sub3
,
1538 AMDGPU::sub4
, AMDGPU::sub5
, AMDGPU::sub6
, AMDGPU::sub7
,
1541 static const int16_t Sub0_4
[] = {
1542 AMDGPU::sub0
, AMDGPU::sub1
, AMDGPU::sub2
, AMDGPU::sub3
, AMDGPU::sub4
,
1545 static const int16_t Sub0_3
[] = {
1546 AMDGPU::sub0
, AMDGPU::sub1
, AMDGPU::sub2
, AMDGPU::sub3
,
1549 static const int16_t Sub0_2
[] = {
1550 AMDGPU::sub0
, AMDGPU::sub1
, AMDGPU::sub2
,
1553 static const int16_t Sub0_1
[] = {
1554 AMDGPU::sub0
, AMDGPU::sub1
,
1557 switch (AMDGPU::getRegBitWidth(*RC
->MC
)) {
1561 return makeArrayRef(Sub0_1
);
1563 return makeArrayRef(Sub0_2
);
1565 return makeArrayRef(Sub0_3
);
1567 return makeArrayRef(Sub0_4
);
1569 return makeArrayRef(Sub0_7
);
1571 return makeArrayRef(Sub0_15
);
1573 return makeArrayRef(Sub0_31
);
1575 llvm_unreachable("unhandled register size");
1580 static const int16_t Sub0_31_64
[] = {
1581 AMDGPU::sub0_sub1
, AMDGPU::sub2_sub3
,
1582 AMDGPU::sub4_sub5
, AMDGPU::sub6_sub7
,
1583 AMDGPU::sub8_sub9
, AMDGPU::sub10_sub11
,
1584 AMDGPU::sub12_sub13
, AMDGPU::sub14_sub15
,
1585 AMDGPU::sub16_sub17
, AMDGPU::sub18_sub19
,
1586 AMDGPU::sub20_sub21
, AMDGPU::sub22_sub23
,
1587 AMDGPU::sub24_sub25
, AMDGPU::sub26_sub27
,
1588 AMDGPU::sub28_sub29
, AMDGPU::sub30_sub31
1591 static const int16_t Sub0_15_64
[] = {
1592 AMDGPU::sub0_sub1
, AMDGPU::sub2_sub3
,
1593 AMDGPU::sub4_sub5
, AMDGPU::sub6_sub7
,
1594 AMDGPU::sub8_sub9
, AMDGPU::sub10_sub11
,
1595 AMDGPU::sub12_sub13
, AMDGPU::sub14_sub15
1598 static const int16_t Sub0_7_64
[] = {
1599 AMDGPU::sub0_sub1
, AMDGPU::sub2_sub3
,
1600 AMDGPU::sub4_sub5
, AMDGPU::sub6_sub7
1604 static const int16_t Sub0_3_64
[] = {
1605 AMDGPU::sub0_sub1
, AMDGPU::sub2_sub3
1608 switch (AMDGPU::getRegBitWidth(*RC
->MC
)) {
1612 return makeArrayRef(Sub0_3_64
);
1614 return makeArrayRef(Sub0_7_64
);
1616 return makeArrayRef(Sub0_15_64
);
1618 return makeArrayRef(Sub0_31_64
);
1620 llvm_unreachable("unhandled register size");
1624 if (EltSize
== 16) {
1626 static const int16_t Sub0_31_128
[] = {
1627 AMDGPU::sub0_sub1_sub2_sub3
,
1628 AMDGPU::sub4_sub5_sub6_sub7
,
1629 AMDGPU::sub8_sub9_sub10_sub11
,
1630 AMDGPU::sub12_sub13_sub14_sub15
,
1631 AMDGPU::sub16_sub17_sub18_sub19
,
1632 AMDGPU::sub20_sub21_sub22_sub23
,
1633 AMDGPU::sub24_sub25_sub26_sub27
,
1634 AMDGPU::sub28_sub29_sub30_sub31
1637 static const int16_t Sub0_15_128
[] = {
1638 AMDGPU::sub0_sub1_sub2_sub3
,
1639 AMDGPU::sub4_sub5_sub6_sub7
,
1640 AMDGPU::sub8_sub9_sub10_sub11
,
1641 AMDGPU::sub12_sub13_sub14_sub15
1644 static const int16_t Sub0_7_128
[] = {
1645 AMDGPU::sub0_sub1_sub2_sub3
,
1646 AMDGPU::sub4_sub5_sub6_sub7
1649 switch (AMDGPU::getRegBitWidth(*RC
->MC
)) {
1653 return makeArrayRef(Sub0_7_128
);
1655 return makeArrayRef(Sub0_15_128
);
1657 return makeArrayRef(Sub0_31_128
);
1659 llvm_unreachable("unhandled register size");
1663 assert(EltSize
== 32 && "unhandled elt size");
1665 static const int16_t Sub0_31_256
[] = {
1666 AMDGPU::sub0_sub1_sub2_sub3_sub4_sub5_sub6_sub7
,
1667 AMDGPU::sub8_sub9_sub10_sub11_sub12_sub13_sub14_sub15
,
1668 AMDGPU::sub16_sub17_sub18_sub19_sub20_sub21_sub22_sub23
,
1669 AMDGPU::sub24_sub25_sub26_sub27_sub28_sub29_sub30_sub31
1672 static const int16_t Sub0_15_256
[] = {
1673 AMDGPU::sub0_sub1_sub2_sub3_sub4_sub5_sub6_sub7
,
1674 AMDGPU::sub8_sub9_sub10_sub11_sub12_sub13_sub14_sub15
1677 switch (AMDGPU::getRegBitWidth(*RC
->MC
)) {
1681 return makeArrayRef(Sub0_15_256
);
1683 return makeArrayRef(Sub0_31_256
);
1685 llvm_unreachable("unhandled register size");
1689 const TargetRegisterClass
*
1690 SIRegisterInfo::getRegClassForReg(const MachineRegisterInfo
&MRI
,
1691 unsigned Reg
) const {
1692 if (Register::isVirtualRegister(Reg
))
1693 return MRI
.getRegClass(Reg
);
1695 return getPhysRegClass(Reg
);
1698 bool SIRegisterInfo::isVGPR(const MachineRegisterInfo
&MRI
,
1699 unsigned Reg
) const {
1700 const TargetRegisterClass
* RC
= getRegClassForReg(MRI
, Reg
);
1701 assert(RC
&& "Register class for the reg not found");
1702 return hasVGPRs(RC
);
1705 bool SIRegisterInfo::isAGPR(const MachineRegisterInfo
&MRI
,
1706 unsigned Reg
) const {
1707 const TargetRegisterClass
* RC
= getRegClassForReg(MRI
, Reg
);
1708 assert(RC
&& "Register class for the reg not found");
1709 return hasAGPRs(RC
);
1712 bool SIRegisterInfo::shouldCoalesce(MachineInstr
*MI
,
1713 const TargetRegisterClass
*SrcRC
,
1715 const TargetRegisterClass
*DstRC
,
1717 const TargetRegisterClass
*NewRC
,
1718 LiveIntervals
&LIS
) const {
1719 unsigned SrcSize
= getRegSizeInBits(*SrcRC
);
1720 unsigned DstSize
= getRegSizeInBits(*DstRC
);
1721 unsigned NewSize
= getRegSizeInBits(*NewRC
);
1723 // Do not increase size of registers beyond dword, we would need to allocate
1724 // adjacent registers and constraint regalloc more than needed.
1726 // Always allow dword coalescing.
1727 if (SrcSize
<= 32 || DstSize
<= 32)
1730 return NewSize
<= DstSize
|| NewSize
<= SrcSize
;
1733 unsigned SIRegisterInfo::getRegPressureLimit(const TargetRegisterClass
*RC
,
1734 MachineFunction
&MF
) const {
1735 const SIMachineFunctionInfo
*MFI
= MF
.getInfo
<SIMachineFunctionInfo
>();
1737 unsigned Occupancy
= ST
.getOccupancyWithLocalMemSize(MFI
->getLDSSize(),
1739 switch (RC
->getID()) {
1741 return AMDGPURegisterInfo::getRegPressureLimit(RC
, MF
);
1742 case AMDGPU::VGPR_32RegClassID
:
1743 return std::min(ST
.getMaxNumVGPRs(Occupancy
), ST
.getMaxNumVGPRs(MF
));
1744 case AMDGPU::SGPR_32RegClassID
:
1745 return std::min(ST
.getMaxNumSGPRs(Occupancy
, true), ST
.getMaxNumSGPRs(MF
));
1749 unsigned SIRegisterInfo::getRegPressureSetLimit(const MachineFunction
&MF
,
1750 unsigned Idx
) const {
1751 if (Idx
== getVGPRPressureSet() || Idx
== getAGPRPressureSet())
1752 return getRegPressureLimit(&AMDGPU::VGPR_32RegClass
,
1753 const_cast<MachineFunction
&>(MF
));
1755 if (Idx
== getSGPRPressureSet())
1756 return getRegPressureLimit(&AMDGPU::SGPR_32RegClass
,
1757 const_cast<MachineFunction
&>(MF
));
1759 return AMDGPURegisterInfo::getRegPressureSetLimit(MF
, Idx
);
1762 const int *SIRegisterInfo::getRegUnitPressureSets(unsigned RegUnit
) const {
1763 static const int Empty
[] = { -1 };
1765 if (hasRegUnit(AMDGPU::M0
, RegUnit
))
1767 return AMDGPURegisterInfo::getRegUnitPressureSets(RegUnit
);
1770 unsigned SIRegisterInfo::getReturnAddressReg(const MachineFunction
&MF
) const {
1771 // Not a callee saved register.
1772 return AMDGPU::SGPR30_SGPR31
;
1775 const TargetRegisterClass
*
1776 SIRegisterInfo::getRegClassForSizeOnBank(unsigned Size
,
1777 const RegisterBank
&RB
,
1778 const MachineRegisterInfo
&MRI
) const {
1781 switch (RB
.getID()) {
1782 case AMDGPU::VGPRRegBankID
:
1783 return &AMDGPU::VGPR_32RegClass
;
1784 case AMDGPU::VCCRegBankID
:
1786 &AMDGPU::SReg_32_XM0_XEXECRegClass
: &AMDGPU::SReg_64_XEXECRegClass
;
1787 case AMDGPU::SGPRRegBankID
:
1788 return &AMDGPU::SReg_32RegClass
;
1789 case AMDGPU::SCCRegBankID
:
1790 // This needs to return an allocatable class, so don't bother returning
1791 // the dummy SCC class.
1793 // FIXME: This is a grotesque hack. We use SGPR_32 as an indication this
1794 // was not an VCC bank value since we use the larger class SReg_32 for
1795 // other values. These should all use SReg_32.
1796 return &AMDGPU::SGPR_32RegClass
;
1798 llvm_unreachable("unknown register bank");
1802 return RB
.getID() == AMDGPU::VGPRRegBankID
? &AMDGPU::VGPR_32RegClass
:
1803 &AMDGPU::SReg_32RegClass
;
1805 return RB
.getID() == AMDGPU::VGPRRegBankID
? &AMDGPU::VReg_64RegClass
:
1806 &AMDGPU::SReg_64_XEXECRegClass
;
1808 return RB
.getID() == AMDGPU::VGPRRegBankID
? &AMDGPU::VReg_96RegClass
:
1809 &AMDGPU::SReg_96RegClass
;
1811 return RB
.getID() == AMDGPU::VGPRRegBankID
? &AMDGPU::VReg_128RegClass
:
1812 &AMDGPU::SGPR_128RegClass
;
1814 return RB
.getID() == AMDGPU::VGPRRegBankID
? &AMDGPU::VReg_160RegClass
:
1815 &AMDGPU::SReg_160RegClass
;
1817 return RB
.getID() == AMDGPU::VGPRRegBankID
? &AMDGPU::VReg_256RegClass
:
1818 &AMDGPU::SReg_256RegClass
;
1820 return RB
.getID() == AMDGPU::VGPRRegBankID
? &AMDGPU::VReg_512RegClass
:
1821 &AMDGPU::SReg_512RegClass
;
1823 return RB
.getID() == AMDGPU::VGPRRegBankID
? &AMDGPU::VReg_1024RegClass
:
1824 &AMDGPU::SReg_1024RegClass
;
1827 return RB
.getID() == AMDGPU::VGPRRegBankID
? &AMDGPU::VGPR_32RegClass
:
1828 &AMDGPU::SReg_32RegClass
;
1833 const TargetRegisterClass
*
1834 SIRegisterInfo::getConstrainedRegClassForOperand(const MachineOperand
&MO
,
1835 const MachineRegisterInfo
&MRI
) const {
1836 const RegClassOrRegBank
&RCOrRB
= MRI
.getRegClassOrRegBank(MO
.getReg());
1837 if (const RegisterBank
*RB
= RCOrRB
.dyn_cast
<const RegisterBank
*>())
1838 return getRegClassForTypeOnBank(MRI
.getType(MO
.getReg()), *RB
, MRI
);
1840 const TargetRegisterClass
*RC
= RCOrRB
.get
<const TargetRegisterClass
*>();
1841 return getAllocatableClass(RC
);
1844 unsigned SIRegisterInfo::getVCC() const {
1845 return isWave32
? AMDGPU::VCC_LO
: AMDGPU::VCC
;
1848 const TargetRegisterClass
*
1849 SIRegisterInfo::getRegClass(unsigned RCID
) const {
1850 switch ((int)RCID
) {
1851 case AMDGPU::SReg_1RegClassID
:
1853 case AMDGPU::SReg_1_XEXECRegClassID
:
1854 return isWave32
? &AMDGPU::SReg_32_XM0_XEXECRegClass
1855 : &AMDGPU::SReg_64_XEXECRegClass
;
1859 return AMDGPURegisterInfo::getRegClass(RCID
);
1863 // Find reaching register definition
1864 MachineInstr
*SIRegisterInfo::findReachingDef(unsigned Reg
, unsigned SubReg
,
1866 MachineRegisterInfo
&MRI
,
1867 LiveIntervals
*LIS
) const {
1868 auto &MDT
= LIS
->getAnalysis
<MachineDominatorTree
>();
1869 SlotIndex UseIdx
= LIS
->getInstructionIndex(Use
);
1872 if (Register::isVirtualRegister(Reg
)) {
1873 if (!LIS
->hasInterval(Reg
))
1875 LiveInterval
&LI
= LIS
->getInterval(Reg
);
1876 LaneBitmask SubLanes
= SubReg
? getSubRegIndexLaneMask(SubReg
)
1877 : MRI
.getMaxLaneMaskForVReg(Reg
);
1878 VNInfo
*V
= nullptr;
1879 if (LI
.hasSubRanges()) {
1880 for (auto &S
: LI
.subranges()) {
1881 if ((S
.LaneMask
& SubLanes
) == SubLanes
) {
1882 V
= S
.getVNInfoAt(UseIdx
);
1887 V
= LI
.getVNInfoAt(UseIdx
);
1894 for (MCRegUnitIterator
Units(Reg
, this); Units
.isValid(); ++Units
) {
1895 LiveRange
&LR
= LIS
->getRegUnit(*Units
);
1896 if (VNInfo
*V
= LR
.getVNInfoAt(UseIdx
)) {
1897 if (!DefIdx
.isValid() ||
1898 MDT
.dominates(LIS
->getInstructionFromIndex(DefIdx
),
1899 LIS
->getInstructionFromIndex(V
->def
)))
1907 MachineInstr
*Def
= LIS
->getInstructionFromIndex(DefIdx
);
1909 if (!Def
|| !MDT
.dominates(Def
, &Use
))
1912 assert(Def
->modifiesRegister(Reg
, this));