1 //===- X86RegisterBankInfo.cpp -----------------------------------*- C++ -*-==//
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7 //===----------------------------------------------------------------------===//
9 /// This file implements the targeting of the RegisterBankInfo class for X86.
10 /// \todo This should be generated by TableGen.
11 //===----------------------------------------------------------------------===//
13 #include "X86RegisterBankInfo.h"
14 #include "X86InstrInfo.h"
15 #include "llvm/CodeGen/GlobalISel/RegisterBank.h"
16 #include "llvm/CodeGen/GlobalISel/RegisterBankInfo.h"
17 #include "llvm/CodeGen/MachineRegisterInfo.h"
18 #include "llvm/CodeGen/TargetRegisterInfo.h"
20 #define GET_TARGET_REGBANK_IMPL
21 #include "X86GenRegisterBank.inc"
24 // This file will be TableGen'ed at some point.
25 #define GET_TARGET_REGBANK_INFO_IMPL
26 #include "X86GenRegisterBankInfo.def"
28 X86RegisterBankInfo::X86RegisterBankInfo(const TargetRegisterInfo
&TRI
)
29 : X86GenRegisterBankInfo() {
31 // validate RegBank initialization.
32 const RegisterBank
&RBGPR
= getRegBank(X86::GPRRegBankID
);
34 assert(&X86::GPRRegBank
== &RBGPR
&& "Incorrect RegBanks inizalization.");
36 // The GPR register bank is fully defined by all the registers in
37 // GR64 + its subclasses.
38 assert(RBGPR
.covers(*TRI
.getRegClass(X86::GR64RegClassID
)) &&
39 "Subclass not added?");
40 assert(RBGPR
.getSize() == 64 && "GPRs should hold up to 64-bit");
43 const RegisterBank
&X86RegisterBankInfo::getRegBankFromRegClass(
44 const TargetRegisterClass
&RC
) const {
46 if (X86::GR8RegClass
.hasSubClassEq(&RC
) ||
47 X86::GR16RegClass
.hasSubClassEq(&RC
) ||
48 X86::GR32RegClass
.hasSubClassEq(&RC
) ||
49 X86::GR64RegClass
.hasSubClassEq(&RC
))
50 return getRegBank(X86::GPRRegBankID
);
52 if (X86::FR32XRegClass
.hasSubClassEq(&RC
) ||
53 X86::FR64XRegClass
.hasSubClassEq(&RC
) ||
54 X86::VR128XRegClass
.hasSubClassEq(&RC
) ||
55 X86::VR256XRegClass
.hasSubClassEq(&RC
) ||
56 X86::VR512RegClass
.hasSubClassEq(&RC
))
57 return getRegBank(X86::VECRRegBankID
);
59 llvm_unreachable("Unsupported register kind yet.");
62 X86GenRegisterBankInfo::PartialMappingIdx
63 X86GenRegisterBankInfo::getPartialMappingIdx(const LLT
&Ty
, bool isFP
) {
64 if ((Ty
.isScalar() && !isFP
) || Ty
.isPointer()) {
65 switch (Ty
.getSizeInBits()) {
79 llvm_unreachable("Unsupported register size.");
81 } else if (Ty
.isScalar()) {
82 switch (Ty
.getSizeInBits()) {
90 llvm_unreachable("Unsupported register size.");
93 switch (Ty
.getSizeInBits()) {
101 llvm_unreachable("Unsupported register size.");
108 void X86RegisterBankInfo::getInstrPartialMappingIdxs(
109 const MachineInstr
&MI
, const MachineRegisterInfo
&MRI
, const bool isFP
,
110 SmallVectorImpl
<PartialMappingIdx
> &OpRegBankIdx
) {
112 unsigned NumOperands
= MI
.getNumOperands();
113 for (unsigned Idx
= 0; Idx
< NumOperands
; ++Idx
) {
114 auto &MO
= MI
.getOperand(Idx
);
116 OpRegBankIdx
[Idx
] = PMI_None
;
118 OpRegBankIdx
[Idx
] = getPartialMappingIdx(MRI
.getType(MO
.getReg()), isFP
);
122 bool X86RegisterBankInfo::getInstrValueMapping(
123 const MachineInstr
&MI
,
124 const SmallVectorImpl
<PartialMappingIdx
> &OpRegBankIdx
,
125 SmallVectorImpl
<const ValueMapping
*> &OpdsMapping
) {
127 unsigned NumOperands
= MI
.getNumOperands();
128 for (unsigned Idx
= 0; Idx
< NumOperands
; ++Idx
) {
129 if (!MI
.getOperand(Idx
).isReg())
132 auto Mapping
= getValueMapping(OpRegBankIdx
[Idx
], 1);
133 if (!Mapping
->isValid())
136 OpdsMapping
[Idx
] = Mapping
;
141 const RegisterBankInfo::InstructionMapping
&
142 X86RegisterBankInfo::getSameOperandsMapping(const MachineInstr
&MI
,
144 const MachineFunction
&MF
= *MI
.getParent()->getParent();
145 const MachineRegisterInfo
&MRI
= MF
.getRegInfo();
147 unsigned NumOperands
= MI
.getNumOperands();
148 LLT Ty
= MRI
.getType(MI
.getOperand(0).getReg());
150 if (NumOperands
!= 3 || (Ty
!= MRI
.getType(MI
.getOperand(1).getReg())) ||
151 (Ty
!= MRI
.getType(MI
.getOperand(2).getReg())))
152 llvm_unreachable("Unsupported operand mapping yet.");
154 auto Mapping
= getValueMapping(getPartialMappingIdx(Ty
, isFP
), 3);
155 return getInstructionMapping(DefaultMappingID
, 1, Mapping
, NumOperands
);
158 const RegisterBankInfo::InstructionMapping
&
159 X86RegisterBankInfo::getInstrMapping(const MachineInstr
&MI
) const {
160 const MachineFunction
&MF
= *MI
.getParent()->getParent();
161 const MachineRegisterInfo
&MRI
= MF
.getRegInfo();
162 unsigned Opc
= MI
.getOpcode();
164 // Try the default logic for non-generic instructions that are either copies
165 // or already have some operands assigned to banks.
166 if (!isPreISelGenericOpcode(Opc
) || Opc
== TargetOpcode::G_PHI
) {
167 const InstructionMapping
&Mapping
= getInstrMappingImpl(MI
);
168 if (Mapping
.isValid())
173 case TargetOpcode::G_ADD
:
174 case TargetOpcode::G_SUB
:
175 case TargetOpcode::G_MUL
:
176 return getSameOperandsMapping(MI
, false);
177 case TargetOpcode::G_FADD
:
178 case TargetOpcode::G_FSUB
:
179 case TargetOpcode::G_FMUL
:
180 case TargetOpcode::G_FDIV
:
181 return getSameOperandsMapping(MI
, true);
182 case TargetOpcode::G_SHL
:
183 case TargetOpcode::G_LSHR
:
184 case TargetOpcode::G_ASHR
: {
185 unsigned NumOperands
= MI
.getNumOperands();
186 LLT Ty
= MRI
.getType(MI
.getOperand(0).getReg());
188 auto Mapping
= getValueMapping(getPartialMappingIdx(Ty
, false), 3);
189 return getInstructionMapping(DefaultMappingID
, 1, Mapping
, NumOperands
);
196 unsigned NumOperands
= MI
.getNumOperands();
197 SmallVector
<PartialMappingIdx
, 4> OpRegBankIdx(NumOperands
);
200 case TargetOpcode::G_FPEXT
:
201 case TargetOpcode::G_FPTRUNC
:
202 case TargetOpcode::G_FCONSTANT
:
203 // Instruction having only floating-point operands (all scalars in VECRReg)
204 getInstrPartialMappingIdxs(MI
, MRI
, /* isFP */ true, OpRegBankIdx
);
206 case TargetOpcode::G_SITOFP
:
207 case TargetOpcode::G_FPTOSI
: {
208 // Some of the floating-point instructions have mixed GPR and FP operands:
209 // fine-tune the computed mapping.
210 auto &Op0
= MI
.getOperand(0);
211 auto &Op1
= MI
.getOperand(1);
212 const LLT Ty0
= MRI
.getType(Op0
.getReg());
213 const LLT Ty1
= MRI
.getType(Op1
.getReg());
215 bool FirstArgIsFP
= Opc
== TargetOpcode::G_SITOFP
;
216 bool SecondArgIsFP
= Opc
== TargetOpcode::G_FPTOSI
;
217 OpRegBankIdx
[0] = getPartialMappingIdx(Ty0
, /* isFP */ FirstArgIsFP
);
218 OpRegBankIdx
[1] = getPartialMappingIdx(Ty1
, /* isFP */ SecondArgIsFP
);
221 case TargetOpcode::G_FCMP
: {
222 LLT Ty1
= MRI
.getType(MI
.getOperand(2).getReg());
223 LLT Ty2
= MRI
.getType(MI
.getOperand(3).getReg());
225 assert(Ty1
.getSizeInBits() == Ty2
.getSizeInBits() &&
226 "Mismatched operand sizes for G_FCMP");
228 unsigned Size
= Ty1
.getSizeInBits();
230 assert((Size
== 32 || Size
== 64) && "Unsupported size for G_FCMP");
232 auto FpRegBank
= getPartialMappingIdx(Ty1
, /* isFP */ true);
233 OpRegBankIdx
= {PMI_GPR8
,
234 /* Predicate */ PMI_None
, FpRegBank
, FpRegBank
};
237 case TargetOpcode::G_TRUNC
:
238 case TargetOpcode::G_ANYEXT
: {
239 auto &Op0
= MI
.getOperand(0);
240 auto &Op1
= MI
.getOperand(1);
241 const LLT Ty0
= MRI
.getType(Op0
.getReg());
242 const LLT Ty1
= MRI
.getType(Op1
.getReg());
244 bool isFPTrunc
= (Ty0
.getSizeInBits() == 32 || Ty0
.getSizeInBits() == 64) &&
245 Ty1
.getSizeInBits() == 128 && Opc
== TargetOpcode::G_TRUNC
;
247 Ty0
.getSizeInBits() == 128 &&
248 (Ty1
.getSizeInBits() == 32 || Ty1
.getSizeInBits() == 64) &&
249 Opc
== TargetOpcode::G_ANYEXT
;
251 getInstrPartialMappingIdxs(MI
, MRI
, /* isFP */ isFPTrunc
|| isFPAnyExt
,
255 // Track the bank of each register, use NotFP mapping (all scalars in GPRs)
256 getInstrPartialMappingIdxs(MI
, MRI
, /* isFP */ false, OpRegBankIdx
);
260 // Finally construct the computed mapping.
261 SmallVector
<const ValueMapping
*, 8> OpdsMapping(NumOperands
);
262 if (!getInstrValueMapping(MI
, OpRegBankIdx
, OpdsMapping
))
263 return getInvalidInstructionMapping();
265 return getInstructionMapping(DefaultMappingID
, /* Cost */ 1,
266 getOperandsMapping(OpdsMapping
), NumOperands
);
269 void X86RegisterBankInfo::applyMappingImpl(
270 const OperandsMapper
&OpdMapper
) const {
271 return applyDefaultMapping(OpdMapper
);
274 RegisterBankInfo::InstructionMappings
275 X86RegisterBankInfo::getInstrAlternativeMappings(const MachineInstr
&MI
) const {
277 const MachineFunction
&MF
= *MI
.getParent()->getParent();
278 const TargetSubtargetInfo
&STI
= MF
.getSubtarget();
279 const TargetRegisterInfo
&TRI
= *STI
.getRegisterInfo();
280 const MachineRegisterInfo
&MRI
= MF
.getRegInfo();
282 switch (MI
.getOpcode()) {
283 case TargetOpcode::G_LOAD
:
284 case TargetOpcode::G_STORE
:
285 case TargetOpcode::G_IMPLICIT_DEF
: {
286 // we going to try to map 32/64 bit to PMI_FP32/PMI_FP64
287 unsigned Size
= getSizeInBits(MI
.getOperand(0).getReg(), MRI
, TRI
);
288 if (Size
!= 32 && Size
!= 64)
291 unsigned NumOperands
= MI
.getNumOperands();
293 // Track the bank of each register, use FP mapping (all scalars in VEC)
294 SmallVector
<PartialMappingIdx
, 4> OpRegBankIdx(NumOperands
);
295 getInstrPartialMappingIdxs(MI
, MRI
, /* isFP */ true, OpRegBankIdx
);
297 // Finally construct the computed mapping.
298 SmallVector
<const ValueMapping
*, 8> OpdsMapping(NumOperands
);
299 if (!getInstrValueMapping(MI
, OpRegBankIdx
, OpdsMapping
))
302 const RegisterBankInfo::InstructionMapping
&Mapping
= getInstructionMapping(
303 /*ID*/ 1, /*Cost*/ 1, getOperandsMapping(OpdsMapping
), NumOperands
);
304 InstructionMappings AltMappings
;
305 AltMappings
.push_back(&Mapping
);
311 return RegisterBankInfo::getInstrAlternativeMappings(MI
);