1 //===- AMDGPUInstructionSelector.cpp ----------------------------*- C++ -*-==//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 /// This file implements the targeting of the InstructionSelector class for
12 /// \todo This should be generated by TableGen.
13 //===----------------------------------------------------------------------===//
15 #include "AMDGPUInstructionSelector.h"
16 #include "AMDGPUInstrInfo.h"
17 #include "AMDGPURegisterBankInfo.h"
18 #include "AMDGPURegisterInfo.h"
19 #include "AMDGPUSubtarget.h"
20 #include "AMDGPUTargetMachine.h"
21 #include "MCTargetDesc/AMDGPUMCTargetDesc.h"
22 #include "llvm/CodeGen/GlobalISel/InstructionSelector.h"
23 #include "llvm/CodeGen/GlobalISel/InstructionSelectorImpl.h"
24 #include "llvm/CodeGen/GlobalISel/Utils.h"
25 #include "llvm/CodeGen/MachineBasicBlock.h"
26 #include "llvm/CodeGen/MachineFunction.h"
27 #include "llvm/CodeGen/MachineInstr.h"
28 #include "llvm/CodeGen/MachineInstrBuilder.h"
29 #include "llvm/CodeGen/MachineRegisterInfo.h"
30 #include "llvm/IR/Type.h"
31 #include "llvm/Support/Debug.h"
32 #include "llvm/Support/raw_ostream.h"
34 #define DEBUG_TYPE "amdgpu-isel"
38 #define GET_GLOBALISEL_IMPL
39 #include "AMDGPUGenGlobalISel.inc"
40 #undef GET_GLOBALISEL_IMPL
42 AMDGPUInstructionSelector::AMDGPUInstructionSelector(
43 const SISubtarget
&STI
, const AMDGPURegisterBankInfo
&RBI
,
44 const AMDGPUTargetMachine
&TM
)
45 : InstructionSelector(), TII(*STI
.getInstrInfo()),
46 TRI(*STI
.getRegisterInfo()), RBI(RBI
), TM(TM
),
48 EnableLateStructurizeCFG(AMDGPUTargetMachine::EnableLateStructurizeCFG
),
49 #define GET_GLOBALISEL_PREDICATES_INIT
50 #include "AMDGPUGenGlobalISel.inc"
51 #undef GET_GLOBALISEL_PREDICATES_INIT
52 #define GET_GLOBALISEL_TEMPORARIES_INIT
53 #include "AMDGPUGenGlobalISel.inc"
54 #undef GET_GLOBALISEL_TEMPORARIES_INIT
55 ,AMDGPUASI(STI
.getAMDGPUAS())
59 const char *AMDGPUInstructionSelector::getName() { return DEBUG_TYPE
; }
61 bool AMDGPUInstructionSelector::selectCOPY(MachineInstr
&I
) const {
62 MachineBasicBlock
*BB
= I
.getParent();
63 MachineFunction
*MF
= BB
->getParent();
64 MachineRegisterInfo
&MRI
= MF
->getRegInfo();
65 I
.setDesc(TII
.get(TargetOpcode::COPY
));
66 for (const MachineOperand
&MO
: I
.operands()) {
67 if (TargetRegisterInfo::isPhysicalRegister(MO
.getReg()))
70 const TargetRegisterClass
*RC
=
71 TRI
.getConstrainedRegClassForOperand(MO
, MRI
);
74 RBI
.constrainGenericRegister(MO
.getReg(), *RC
, MRI
);
80 AMDGPUInstructionSelector::getSubOperand64(MachineOperand
&MO
,
81 unsigned SubIdx
) const {
83 MachineInstr
*MI
= MO
.getParent();
84 MachineBasicBlock
*BB
= MO
.getParent()->getParent();
85 MachineFunction
*MF
= BB
->getParent();
86 MachineRegisterInfo
&MRI
= MF
->getRegInfo();
87 unsigned DstReg
= MRI
.createVirtualRegister(&AMDGPU::SGPR_32RegClass
);
90 unsigned ComposedSubIdx
= TRI
.composeSubRegIndices(MO
.getSubReg(), SubIdx
);
91 unsigned Reg
= MO
.getReg();
92 BuildMI(*BB
, MI
, MI
->getDebugLoc(), TII
.get(AMDGPU::COPY
), DstReg
)
93 .addReg(Reg
, 0, ComposedSubIdx
);
95 return MachineOperand::CreateReg(DstReg
, MO
.isDef(), MO
.isImplicit(),
96 MO
.isKill(), MO
.isDead(), MO
.isUndef(),
97 MO
.isEarlyClobber(), 0, MO
.isDebug(),
103 APInt
Imm(64, MO
.getImm());
107 llvm_unreachable("do not know to split immediate with this sub index.");
109 return MachineOperand::CreateImm(Imm
.getLoBits(32).getSExtValue());
111 return MachineOperand::CreateImm(Imm
.getHiBits(32).getSExtValue());
115 bool AMDGPUInstructionSelector::selectG_ADD(MachineInstr
&I
) const {
116 MachineBasicBlock
*BB
= I
.getParent();
117 MachineFunction
*MF
= BB
->getParent();
118 MachineRegisterInfo
&MRI
= MF
->getRegInfo();
119 unsigned Size
= RBI
.getSizeInBits(I
.getOperand(0).getReg(), MRI
, TRI
);
120 unsigned DstLo
= MRI
.createVirtualRegister(&AMDGPU::SReg_32RegClass
);
121 unsigned DstHi
= MRI
.createVirtualRegister(&AMDGPU::SReg_32RegClass
);
126 DebugLoc DL
= I
.getDebugLoc();
128 MachineOperand
Lo1(getSubOperand64(I
.getOperand(1), AMDGPU::sub0
));
129 MachineOperand
Lo2(getSubOperand64(I
.getOperand(2), AMDGPU::sub0
));
131 BuildMI(*BB
, &I
, DL
, TII
.get(AMDGPU::S_ADD_U32
), DstLo
)
135 MachineOperand
Hi1(getSubOperand64(I
.getOperand(1), AMDGPU::sub1
));
136 MachineOperand
Hi2(getSubOperand64(I
.getOperand(2), AMDGPU::sub1
));
138 BuildMI(*BB
, &I
, DL
, TII
.get(AMDGPU::S_ADDC_U32
), DstHi
)
142 BuildMI(*BB
, &I
, DL
, TII
.get(AMDGPU::REG_SEQUENCE
), I
.getOperand(0).getReg())
144 .addImm(AMDGPU::sub0
)
146 .addImm(AMDGPU::sub1
);
148 for (MachineOperand
&MO
: I
.explicit_operands()) {
149 if (!MO
.isReg() || TargetRegisterInfo::isPhysicalRegister(MO
.getReg()))
151 RBI
.constrainGenericRegister(MO
.getReg(), AMDGPU::SReg_64RegClass
, MRI
);
158 bool AMDGPUInstructionSelector::selectG_GEP(MachineInstr
&I
) const {
159 return selectG_ADD(I
);
162 bool AMDGPUInstructionSelector::selectG_IMPLICIT_DEF(MachineInstr
&I
) const {
163 MachineBasicBlock
*BB
= I
.getParent();
164 MachineFunction
*MF
= BB
->getParent();
165 MachineRegisterInfo
&MRI
= MF
->getRegInfo();
166 const MachineOperand
&MO
= I
.getOperand(0);
167 const TargetRegisterClass
*RC
=
168 TRI
.getConstrainedRegClassForOperand(MO
, MRI
);
170 RBI
.constrainGenericRegister(MO
.getReg(), *RC
, MRI
);
171 I
.setDesc(TII
.get(TargetOpcode::IMPLICIT_DEF
));
175 bool AMDGPUInstructionSelector::selectG_INTRINSIC(MachineInstr
&I
,
176 CodeGenCoverage
&CoverageInfo
) const {
177 unsigned IntrinsicID
= I
.getOperand(1).getIntrinsicID();
179 switch (IntrinsicID
) {
182 case Intrinsic::amdgcn_cvt_pkrtz
:
183 return selectImpl(I
, CoverageInfo
);
188 bool AMDGPUInstructionSelector::selectG_STORE(MachineInstr
&I
) const {
189 MachineBasicBlock
*BB
= I
.getParent();
190 MachineFunction
*MF
= BB
->getParent();
191 MachineRegisterInfo
&MRI
= MF
->getRegInfo();
192 DebugLoc DL
= I
.getDebugLoc();
193 unsigned StoreSize
= RBI
.getSizeInBits(I
.getOperand(0).getReg(), MRI
, TRI
);
196 // FIXME: Select store instruction based on address space
201 Opcode
= AMDGPU::FLAT_STORE_DWORD
;
204 Opcode
= AMDGPU::FLAT_STORE_DWORDX2
;
207 Opcode
= AMDGPU::FLAT_STORE_DWORDX3
;
210 Opcode
= AMDGPU::FLAT_STORE_DWORDX4
;
214 MachineInstr
*Flat
= BuildMI(*BB
, &I
, DL
, TII
.get(Opcode
))
215 .add(I
.getOperand(1))
216 .add(I
.getOperand(0))
222 // Now that we selected an opcode, we need to constrain the register
223 // operands to use appropriate classes.
224 bool Ret
= constrainSelectedInstRegOperands(*Flat
, TII
, TRI
, RBI
);
230 bool AMDGPUInstructionSelector::selectG_CONSTANT(MachineInstr
&I
) const {
231 MachineBasicBlock
*BB
= I
.getParent();
232 MachineFunction
*MF
= BB
->getParent();
233 MachineRegisterInfo
&MRI
= MF
->getRegInfo();
234 MachineOperand
&ImmOp
= I
.getOperand(1);
236 // The AMDGPU backend only supports Imm operands and not CImm or FPImm.
237 if (ImmOp
.isFPImm()) {
238 const APInt
&Imm
= ImmOp
.getFPImm()->getValueAPF().bitcastToAPInt();
239 ImmOp
.ChangeToImmediate(Imm
.getZExtValue());
240 } else if (ImmOp
.isCImm()) {
241 ImmOp
.ChangeToImmediate(ImmOp
.getCImm()->getZExtValue());
244 unsigned DstReg
= I
.getOperand(0).getReg();
247 const RegisterBank
*RB
= MRI
.getRegBankOrNull(I
.getOperand(0).getReg());
249 IsSgpr
= RB
->getID() == AMDGPU::SGPRRegBankID
;
250 Size
= MRI
.getType(DstReg
).getSizeInBits();
252 const TargetRegisterClass
*RC
= TRI
.getRegClassForReg(MRI
, DstReg
);
253 IsSgpr
= TRI
.isSGPRClass(RC
);
254 Size
= TRI
.getRegSizeInBits(*RC
);
257 if (Size
!= 32 && Size
!= 64)
260 unsigned Opcode
= IsSgpr
? AMDGPU::S_MOV_B32
: AMDGPU::V_MOV_B32_e32
;
262 I
.setDesc(TII
.get(Opcode
));
263 I
.addImplicitDefUseOperands(*MF
);
264 return constrainSelectedInstRegOperands(I
, TII
, TRI
, RBI
);
267 DebugLoc DL
= I
.getDebugLoc();
268 const TargetRegisterClass
*RC
= IsSgpr
? &AMDGPU::SReg_32_XM0RegClass
:
269 &AMDGPU::VGPR_32RegClass
;
270 unsigned LoReg
= MRI
.createVirtualRegister(RC
);
271 unsigned HiReg
= MRI
.createVirtualRegister(RC
);
272 const APInt
&Imm
= APInt(Size
, I
.getOperand(1).getImm());
274 BuildMI(*BB
, &I
, DL
, TII
.get(Opcode
), LoReg
)
275 .addImm(Imm
.trunc(32).getZExtValue());
277 BuildMI(*BB
, &I
, DL
, TII
.get(Opcode
), HiReg
)
278 .addImm(Imm
.ashr(32).getZExtValue());
280 const MachineInstr
*RS
=
281 BuildMI(*BB
, &I
, DL
, TII
.get(AMDGPU::REG_SEQUENCE
), DstReg
)
283 .addImm(AMDGPU::sub0
)
285 .addImm(AMDGPU::sub1
);
287 // We can't call constrainSelectedInstRegOperands here, because it doesn't
288 // work for target independent opcodes
290 const TargetRegisterClass
*DstRC
=
291 TRI
.getConstrainedRegClassForOperand(RS
->getOperand(0), MRI
);
294 return RBI
.constrainGenericRegister(DstReg
, *DstRC
, MRI
);
297 static bool isConstant(const MachineInstr
&MI
) {
298 return MI
.getOpcode() == TargetOpcode::G_CONSTANT
;
301 void AMDGPUInstructionSelector::getAddrModeInfo(const MachineInstr
&Load
,
302 const MachineRegisterInfo
&MRI
, SmallVectorImpl
<GEPInfo
> &AddrInfo
) const {
304 const MachineInstr
*PtrMI
= MRI
.getUniqueVRegDef(Load
.getOperand(1).getReg());
308 if (PtrMI
->getOpcode() != TargetOpcode::G_GEP
)
311 GEPInfo
GEPInfo(*PtrMI
);
313 for (unsigned i
= 1, e
= 3; i
< e
; ++i
) {
314 const MachineOperand
&GEPOp
= PtrMI
->getOperand(i
);
315 const MachineInstr
*OpDef
= MRI
.getUniqueVRegDef(GEPOp
.getReg());
317 if (isConstant(*OpDef
)) {
318 // FIXME: Is it possible to have multiple Imm parts? Maybe if we
319 // are lacking other optimizations.
320 assert(GEPInfo
.Imm
== 0);
321 GEPInfo
.Imm
= OpDef
->getOperand(1).getCImm()->getSExtValue();
324 const RegisterBank
*OpBank
= RBI
.getRegBank(GEPOp
.getReg(), MRI
, TRI
);
325 if (OpBank
->getID() == AMDGPU::SGPRRegBankID
)
326 GEPInfo
.SgprParts
.push_back(GEPOp
.getReg());
328 GEPInfo
.VgprParts
.push_back(GEPOp
.getReg());
331 AddrInfo
.push_back(GEPInfo
);
332 getAddrModeInfo(*PtrMI
, MRI
, AddrInfo
);
335 static bool isInstrUniform(const MachineInstr
&MI
) {
336 if (!MI
.hasOneMemOperand())
339 const MachineMemOperand
*MMO
= *MI
.memoperands_begin();
340 const Value
*Ptr
= MMO
->getValue();
342 // UndefValue means this is a load of a kernel input. These are uniform.
343 // Sometimes LDS instructions have constant pointers.
344 // If Ptr is null, then that means this mem operand contains a
345 // PseudoSourceValue like GOT.
346 if (!Ptr
|| isa
<UndefValue
>(Ptr
) || isa
<Argument
>(Ptr
) ||
347 isa
<Constant
>(Ptr
) || isa
<GlobalValue
>(Ptr
))
350 if (MMO
->getAddrSpace() == AMDGPUAS::CONSTANT_ADDRESS_32BIT
)
353 const Instruction
*I
= dyn_cast
<Instruction
>(Ptr
);
354 return I
&& I
->getMetadata("amdgpu.uniform");
357 static unsigned getSmrdOpcode(unsigned BaseOpcode
, unsigned LoadSize
) {
362 switch (BaseOpcode
) {
363 case AMDGPU::S_LOAD_DWORD_IMM
:
366 return AMDGPU::S_LOAD_DWORDX2_IMM
;
368 return AMDGPU::S_LOAD_DWORDX4_IMM
;
370 return AMDGPU::S_LOAD_DWORDX8_IMM
;
372 return AMDGPU::S_LOAD_DWORDX16_IMM
;
375 case AMDGPU::S_LOAD_DWORD_IMM_ci
:
378 return AMDGPU::S_LOAD_DWORDX2_IMM_ci
;
380 return AMDGPU::S_LOAD_DWORDX4_IMM_ci
;
382 return AMDGPU::S_LOAD_DWORDX8_IMM_ci
;
384 return AMDGPU::S_LOAD_DWORDX16_IMM_ci
;
387 case AMDGPU::S_LOAD_DWORD_SGPR
:
390 return AMDGPU::S_LOAD_DWORDX2_SGPR
;
392 return AMDGPU::S_LOAD_DWORDX4_SGPR
;
394 return AMDGPU::S_LOAD_DWORDX8_SGPR
;
396 return AMDGPU::S_LOAD_DWORDX16_SGPR
;
400 llvm_unreachable("Invalid base smrd opcode or size");
403 bool AMDGPUInstructionSelector::hasVgprParts(ArrayRef
<GEPInfo
> AddrInfo
) const {
404 for (const GEPInfo
&GEPInfo
: AddrInfo
) {
405 if (!GEPInfo
.VgprParts
.empty())
411 bool AMDGPUInstructionSelector::selectSMRD(MachineInstr
&I
,
412 ArrayRef
<GEPInfo
> AddrInfo
) const {
414 if (!I
.hasOneMemOperand())
417 if ((*I
.memoperands_begin())->getAddrSpace() != AMDGPUASI
.CONSTANT_ADDRESS
&&
418 (*I
.memoperands_begin())->getAddrSpace() != AMDGPUASI
.CONSTANT_ADDRESS_32BIT
)
421 if (!isInstrUniform(I
))
424 if (hasVgprParts(AddrInfo
))
427 MachineBasicBlock
*BB
= I
.getParent();
428 MachineFunction
*MF
= BB
->getParent();
429 const SISubtarget
&Subtarget
= MF
->getSubtarget
<SISubtarget
>();
430 MachineRegisterInfo
&MRI
= MF
->getRegInfo();
431 unsigned DstReg
= I
.getOperand(0).getReg();
432 const DebugLoc
&DL
= I
.getDebugLoc();
434 unsigned LoadSize
= RBI
.getSizeInBits(DstReg
, MRI
, TRI
);
436 if (!AddrInfo
.empty() && AddrInfo
[0].SgprParts
.size() == 1) {
438 const GEPInfo
&GEPInfo
= AddrInfo
[0];
440 unsigned PtrReg
= GEPInfo
.SgprParts
[0];
441 int64_t EncodedImm
= AMDGPU::getSMRDEncodedOffset(Subtarget
, GEPInfo
.Imm
);
442 if (AMDGPU::isLegalSMRDImmOffset(Subtarget
, GEPInfo
.Imm
)) {
443 Opcode
= getSmrdOpcode(AMDGPU::S_LOAD_DWORD_IMM
, LoadSize
);
445 MachineInstr
*SMRD
= BuildMI(*BB
, &I
, DL
, TII
.get(Opcode
), DstReg
)
449 return constrainSelectedInstRegOperands(*SMRD
, TII
, TRI
, RBI
);
452 if (Subtarget
.getGeneration() == AMDGPUSubtarget::SEA_ISLANDS
&&
453 isUInt
<32>(EncodedImm
)) {
454 Opcode
= getSmrdOpcode(AMDGPU::S_LOAD_DWORD_IMM_ci
, LoadSize
);
455 MachineInstr
*SMRD
= BuildMI(*BB
, &I
, DL
, TII
.get(Opcode
), DstReg
)
459 return constrainSelectedInstRegOperands(*SMRD
, TII
, TRI
, RBI
);
462 if (isUInt
<32>(GEPInfo
.Imm
)) {
463 Opcode
= getSmrdOpcode(AMDGPU::S_LOAD_DWORD_SGPR
, LoadSize
);
464 unsigned OffsetReg
= MRI
.createVirtualRegister(&AMDGPU::SReg_32RegClass
);
465 BuildMI(*BB
, &I
, DL
, TII
.get(AMDGPU::S_MOV_B32
), OffsetReg
)
466 .addImm(GEPInfo
.Imm
);
468 MachineInstr
*SMRD
= BuildMI(*BB
, &I
, DL
, TII
.get(Opcode
), DstReg
)
472 return constrainSelectedInstRegOperands(*SMRD
, TII
, TRI
, RBI
);
476 unsigned PtrReg
= I
.getOperand(1).getReg();
477 Opcode
= getSmrdOpcode(AMDGPU::S_LOAD_DWORD_IMM
, LoadSize
);
478 MachineInstr
*SMRD
= BuildMI(*BB
, &I
, DL
, TII
.get(Opcode
), DstReg
)
482 return constrainSelectedInstRegOperands(*SMRD
, TII
, TRI
, RBI
);
486 bool AMDGPUInstructionSelector::selectG_LOAD(MachineInstr
&I
) const {
487 MachineBasicBlock
*BB
= I
.getParent();
488 MachineFunction
*MF
= BB
->getParent();
489 MachineRegisterInfo
&MRI
= MF
->getRegInfo();
490 DebugLoc DL
= I
.getDebugLoc();
491 unsigned DstReg
= I
.getOperand(0).getReg();
492 unsigned PtrReg
= I
.getOperand(1).getReg();
493 unsigned LoadSize
= RBI
.getSizeInBits(DstReg
, MRI
, TRI
);
496 SmallVector
<GEPInfo
, 4> AddrInfo
;
498 getAddrModeInfo(I
, MRI
, AddrInfo
);
500 if (selectSMRD(I
, AddrInfo
)) {
507 llvm_unreachable("Load size not supported\n");
509 Opcode
= AMDGPU::FLAT_LOAD_DWORD
;
512 Opcode
= AMDGPU::FLAT_LOAD_DWORDX2
;
516 MachineInstr
*Flat
= BuildMI(*BB
, &I
, DL
, TII
.get(Opcode
))
517 .add(I
.getOperand(0))
523 bool Ret
= constrainSelectedInstRegOperands(*Flat
, TII
, TRI
, RBI
);
528 bool AMDGPUInstructionSelector::select(MachineInstr
&I
,
529 CodeGenCoverage
&CoverageInfo
) const {
531 if (!isPreISelGenericOpcode(I
.getOpcode())) {
533 return selectCOPY(I
);
537 switch (I
.getOpcode()) {
539 return selectImpl(I
, CoverageInfo
);
540 case TargetOpcode::G_ADD
:
541 return selectG_ADD(I
);
542 case TargetOpcode::G_BITCAST
:
543 return selectCOPY(I
);
544 case TargetOpcode::G_CONSTANT
:
545 case TargetOpcode::G_FCONSTANT
:
546 return selectG_CONSTANT(I
);
547 case TargetOpcode::G_GEP
:
548 return selectG_GEP(I
);
549 case TargetOpcode::G_IMPLICIT_DEF
:
550 return selectG_IMPLICIT_DEF(I
);
551 case TargetOpcode::G_INTRINSIC
:
552 return selectG_INTRINSIC(I
, CoverageInfo
);
553 case TargetOpcode::G_LOAD
:
554 return selectG_LOAD(I
);
555 case TargetOpcode::G_STORE
:
556 return selectG_STORE(I
);
561 InstructionSelector::ComplexRendererFns
562 AMDGPUInstructionSelector::selectVCSRC(MachineOperand
&Root
) const {
564 [=](MachineInstrBuilder
&MIB
) { MIB
.add(Root
); }
570 /// This will select either an SGPR or VGPR operand and will save us from
571 /// having to write an extra tablegen pattern.
572 InstructionSelector::ComplexRendererFns
573 AMDGPUInstructionSelector::selectVSRC0(MachineOperand
&Root
) const {
575 [=](MachineInstrBuilder
&MIB
) { MIB
.add(Root
); }
579 InstructionSelector::ComplexRendererFns
580 AMDGPUInstructionSelector::selectVOP3Mods0(MachineOperand
&Root
) const {
582 [=](MachineInstrBuilder
&MIB
) { MIB
.add(Root
); },
583 [=](MachineInstrBuilder
&MIB
) { MIB
.addImm(0); }, // src0_mods
584 [=](MachineInstrBuilder
&MIB
) { MIB
.addImm(0); }, // clamp
585 [=](MachineInstrBuilder
&MIB
) { MIB
.addImm(0); } // omod
588 InstructionSelector::ComplexRendererFns
589 AMDGPUInstructionSelector::selectVOP3OMods(MachineOperand
&Root
) const {
591 [=](MachineInstrBuilder
&MIB
) { MIB
.add(Root
); },
592 [=](MachineInstrBuilder
&MIB
) { MIB
.addImm(0); }, // clamp
593 [=](MachineInstrBuilder
&MIB
) { MIB
.addImm(0); } // omod
597 InstructionSelector::ComplexRendererFns
598 AMDGPUInstructionSelector::selectVOP3Mods(MachineOperand
&Root
) const {
600 [=](MachineInstrBuilder
&MIB
) { MIB
.add(Root
); },
601 [=](MachineInstrBuilder
&MIB
) { MIB
.addImm(0); } // src_mods