1 //===-- X86Subtarget.cpp - X86 Subtarget Information ----------------------===//
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7 //===----------------------------------------------------------------------===//
9 // This file implements the X86 specific subclass of TargetSubtargetInfo.
11 //===----------------------------------------------------------------------===//
15 #include "X86CallLowering.h"
16 #include "X86LegalizerInfo.h"
17 #include "X86MacroFusion.h"
18 #include "X86RegisterBankInfo.h"
19 #include "X86Subtarget.h"
20 #include "MCTargetDesc/X86BaseInfo.h"
21 #include "X86TargetMachine.h"
22 #include "llvm/ADT/Triple.h"
23 #include "llvm/CodeGen/GlobalISel/CallLowering.h"
24 #include "llvm/CodeGen/GlobalISel/InstructionSelect.h"
25 #include "llvm/IR/Attributes.h"
26 #include "llvm/IR/ConstantRange.h"
27 #include "llvm/IR/Function.h"
28 #include "llvm/IR/GlobalValue.h"
29 #include "llvm/Support/Casting.h"
30 #include "llvm/Support/CodeGen.h"
31 #include "llvm/Support/CommandLine.h"
32 #include "llvm/Support/Debug.h"
33 #include "llvm/Support/ErrorHandling.h"
34 #include "llvm/Support/raw_ostream.h"
35 #include "llvm/Target/TargetMachine.h"
43 #define DEBUG_TYPE "subtarget"
45 #define GET_SUBTARGETINFO_TARGET_DESC
46 #define GET_SUBTARGETINFO_CTOR
47 #include "X86GenSubtargetInfo.inc"
49 // Temporary option to control early if-conversion for x86 while adding machine
52 X86EarlyIfConv("x86-early-ifcvt", cl::Hidden
,
53 cl::desc("Enable early if-conversion on X86"));
56 /// Classify a blockaddress reference for the current subtarget according to how
57 /// we should reference it in a non-pcrel context.
58 unsigned char X86Subtarget::classifyBlockAddressReference() const {
59 return classifyLocalReference(nullptr);
62 /// Classify a global variable reference for the current subtarget according to
63 /// how we should reference it in a non-pcrel context.
65 X86Subtarget::classifyGlobalReference(const GlobalValue
*GV
) const {
66 return classifyGlobalReference(GV
, *GV
->getParent());
70 X86Subtarget::classifyLocalReference(const GlobalValue
*GV
) const {
71 // If we're not PIC, it's not very interesting.
72 if (!isPositionIndependent())
73 return X86II::MO_NO_FLAG
;
76 // 64-bit ELF PIC local references may use GOTOFF relocations.
78 switch (TM
.getCodeModel()) {
79 // 64-bit small code model is simple: All rip-relative.
81 llvm_unreachable("Tiny codesize model not supported on X86");
82 case CodeModel::Small
:
83 case CodeModel::Kernel
:
84 return X86II::MO_NO_FLAG
;
86 // The large PIC code model uses GOTOFF.
87 case CodeModel::Large
:
88 return X86II::MO_GOTOFF
;
90 // Medium is a hybrid: RIP-rel for code, GOTOFF for DSO local data.
91 case CodeModel::Medium
:
92 if (isa
<Function
>(GV
))
93 return X86II::MO_NO_FLAG
; // All code is RIP-relative
94 return X86II::MO_GOTOFF
; // Local symbols use GOTOFF.
96 llvm_unreachable("invalid code model");
99 // Otherwise, this is either a RIP-relative reference or a 64-bit movabsq,
100 // both of which use MO_NO_FLAG.
101 return X86II::MO_NO_FLAG
;
104 // The COFF dynamic linker just patches the executable sections.
106 return X86II::MO_NO_FLAG
;
108 if (isTargetDarwin()) {
109 // 32 bit macho has no relocation for a-b if a is undefined, even if
110 // b is in the section that is being relocated.
111 // This means we have to use o load even for GVs that are known to be
113 if (GV
&& (GV
->isDeclarationForLinker() || GV
->hasCommonLinkage()))
114 return X86II::MO_DARWIN_NONLAZY_PIC_BASE
;
116 return X86II::MO_PIC_BASE_OFFSET
;
119 return X86II::MO_GOTOFF
;
122 unsigned char X86Subtarget::classifyGlobalReference(const GlobalValue
*GV
,
123 const Module
&M
) const {
124 // The static large model never uses stubs.
125 if (TM
.getCodeModel() == CodeModel::Large
&& !isPositionIndependent())
126 return X86II::MO_NO_FLAG
;
128 // Absolute symbols can be referenced directly.
130 if (Optional
<ConstantRange
> CR
= GV
->getAbsoluteSymbolRange()) {
131 // See if we can use the 8-bit immediate form. Note that some instructions
132 // will sign extend the immediate operand, so to be conservative we only
133 // accept the range [0,128).
134 if (CR
->getUnsignedMax().ult(128))
135 return X86II::MO_ABS8
;
137 return X86II::MO_NO_FLAG
;
141 if (TM
.shouldAssumeDSOLocal(M
, GV
))
142 return classifyLocalReference(GV
);
144 if (isTargetCOFF()) {
145 if (GV
->hasDLLImportStorageClass())
146 return X86II::MO_DLLIMPORT
;
147 return X86II::MO_COFFSTUB
;
149 // Some JIT users use *-win32-elf triples; these shouldn't use GOT tables.
151 return X86II::MO_NO_FLAG
;
154 // ELF supports a large, truly PIC code model with non-PC relative GOT
155 // references. Other object file formats do not. Use the no-flag, 64-bit
156 // reference for them.
157 if (TM
.getCodeModel() == CodeModel::Large
)
158 return isTargetELF() ? X86II::MO_GOT
: X86II::MO_NO_FLAG
;
159 return X86II::MO_GOTPCREL
;
162 if (isTargetDarwin()) {
163 if (!isPositionIndependent())
164 return X86II::MO_DARWIN_NONLAZY
;
165 return X86II::MO_DARWIN_NONLAZY_PIC_BASE
;
168 return X86II::MO_GOT
;
172 X86Subtarget::classifyGlobalFunctionReference(const GlobalValue
*GV
) const {
173 return classifyGlobalFunctionReference(GV
, *GV
->getParent());
177 X86Subtarget::classifyGlobalFunctionReference(const GlobalValue
*GV
,
178 const Module
&M
) const {
179 if (TM
.shouldAssumeDSOLocal(M
, GV
))
180 return X86II::MO_NO_FLAG
;
182 // Functions on COFF can be non-DSO local for two reasons:
183 // - They are marked dllimport
184 // - They are extern_weak, and a stub is needed
185 if (isTargetCOFF()) {
186 if (GV
->hasDLLImportStorageClass())
187 return X86II::MO_DLLIMPORT
;
188 return X86II::MO_COFFSTUB
;
191 const Function
*F
= dyn_cast_or_null
<Function
>(GV
);
194 if (is64Bit() && F
&& (CallingConv::X86_RegCall
== F
->getCallingConv()))
195 // According to psABI, PLT stub clobbers XMM8-XMM15.
196 // In Regcall calling convention those registers are used for passing
197 // parameters. Thus we need to prevent lazy binding in Regcall.
198 return X86II::MO_GOTPCREL
;
199 // If PLT must be avoided then the call should be via GOTPCREL.
200 if (((F
&& F
->hasFnAttribute(Attribute::NonLazyBind
)) ||
201 (!F
&& M
.getRtLibUseGOT())) &&
203 return X86II::MO_GOTPCREL
;
204 return X86II::MO_PLT
;
208 if (F
&& F
->hasFnAttribute(Attribute::NonLazyBind
))
209 // If the function is marked as non-lazy, generate an indirect call
210 // which loads from the GOT directly. This avoids runtime overhead
211 // at the cost of eager binding (and one extra byte of encoding).
212 return X86II::MO_GOTPCREL
;
213 return X86II::MO_NO_FLAG
;
216 return X86II::MO_NO_FLAG
;
219 /// Return true if the subtarget allows calls to immediate address.
220 bool X86Subtarget::isLegalToCallImmediateAddr() const {
221 // FIXME: I386 PE/COFF supports PC relative calls using IMAGE_REL_I386_REL32
222 // but WinCOFFObjectWriter::RecordRelocation cannot emit them. Once it does,
223 // the following check for Win32 should be removed.
224 if (In64BitMode
|| isTargetWin32())
226 return isTargetELF() || TM
.getRelocationModel() == Reloc::Static
;
229 void X86Subtarget::initSubtargetFeatures(StringRef CPU
, StringRef FS
) {
230 std::string CPUName
= CPU
;
234 std::string FullFS
= FS
;
236 // SSE2 should default to enabled in 64-bit mode, but can be turned off
239 FullFS
= "+sse2," + FullFS
;
243 // If no CPU was specified, enable 64bit feature to satisy later check.
244 if (CPUName
== "generic") {
246 FullFS
= "+64bit," + FullFS
;
252 // LAHF/SAHF are always supported in non-64-bit mode.
255 FullFS
= "+sahf," + FullFS
;
260 // Parse features string and set the CPU.
261 ParseSubtargetFeatures(CPUName
, FullFS
);
263 // All CPUs that implement SSE4.2 or SSE4A support unaligned accesses of
264 // 16-bytes and under that are reasonably fast. These features were
265 // introduced with Intel's Nehalem/Silvermont and AMD's Family10h
266 // micro-architectures respectively.
267 if (hasSSE42() || hasSSE4A())
268 IsUAMem16Slow
= false;
270 // It's important to keep the MCSubtargetInfo feature bits in sync with
271 // target data structure which is shared with MC code emitter, etc.
273 ToggleFeature(X86::Mode64Bit
);
274 else if (In32BitMode
)
275 ToggleFeature(X86::Mode32Bit
);
276 else if (In16BitMode
)
277 ToggleFeature(X86::Mode16Bit
);
279 llvm_unreachable("Not 16-bit, 32-bit or 64-bit mode!");
281 LLVM_DEBUG(dbgs() << "Subtarget features: SSELevel " << X86SSELevel
282 << ", 3DNowLevel " << X863DNowLevel
<< ", 64bit "
283 << HasX86_64
<< "\n");
284 if (In64BitMode
&& !HasX86_64
)
285 report_fatal_error("64-bit code requested on a subtarget that doesn't "
288 // Stack alignment is 16 bytes on Darwin, Linux, kFreeBSD and Solaris (both
289 // 32 and 64 bit) and for all 64-bit targets.
290 if (StackAlignOverride
)
291 stackAlignment
= *StackAlignOverride
;
292 else if (isTargetDarwin() || isTargetLinux() || isTargetSolaris() ||
293 isTargetKFreeBSD() || In64BitMode
)
294 stackAlignment
= Align(16);
296 // Some CPUs have more overhead for gather. The specified overhead is relative
297 // to the Load operation. "2" is the number provided by Intel architects. This
298 // parameter is used for cost estimation of Gather Op and comparison with
299 // other alternatives.
300 // TODO: Remove the explicit hasAVX512()?, That would mean we would only
301 // enable gather with a -march.
302 if (hasAVX512() || (hasAVX2() && hasFastGather()))
307 // Consume the vector width attribute or apply any target specific limit.
308 if (PreferVectorWidthOverride
)
309 PreferVectorWidth
= PreferVectorWidthOverride
;
310 else if (Prefer128Bit
)
311 PreferVectorWidth
= 128;
312 else if (Prefer256Bit
)
313 PreferVectorWidth
= 256;
316 X86Subtarget
&X86Subtarget::initializeSubtargetDependencies(StringRef CPU
,
318 initSubtargetFeatures(CPU
, FS
);
322 X86Subtarget::X86Subtarget(const Triple
&TT
, StringRef CPU
, StringRef FS
,
323 const X86TargetMachine
&TM
,
324 MaybeAlign StackAlignOverride
,
325 unsigned PreferVectorWidthOverride
,
326 unsigned RequiredVectorWidth
)
327 : X86GenSubtargetInfo(TT
, CPU
, FS
), PICStyle(PICStyles::None
), TM(TM
),
328 TargetTriple(TT
), StackAlignOverride(StackAlignOverride
),
329 PreferVectorWidthOverride(PreferVectorWidthOverride
),
330 RequiredVectorWidth(RequiredVectorWidth
),
331 In64BitMode(TargetTriple
.getArch() == Triple::x86_64
),
332 In32BitMode(TargetTriple
.getArch() == Triple::x86
&&
333 TargetTriple
.getEnvironment() != Triple::CODE16
),
334 In16BitMode(TargetTriple
.getArch() == Triple::x86
&&
335 TargetTriple
.getEnvironment() == Triple::CODE16
),
336 InstrInfo(initializeSubtargetDependencies(CPU
, FS
)), TLInfo(TM
, *this),
337 FrameLowering(*this, getStackAlignment()) {
338 // Determine the PICStyle based on the target selected.
339 if (!isPositionIndependent())
340 setPICStyle(PICStyles::None
);
342 setPICStyle(PICStyles::RIPRel
);
343 else if (isTargetCOFF())
344 setPICStyle(PICStyles::None
);
345 else if (isTargetDarwin())
346 setPICStyle(PICStyles::StubPIC
);
347 else if (isTargetELF())
348 setPICStyle(PICStyles::GOT
);
350 CallLoweringInfo
.reset(new X86CallLowering(*getTargetLowering()));
351 Legalizer
.reset(new X86LegalizerInfo(*this, TM
));
353 auto *RBI
= new X86RegisterBankInfo(*getRegisterInfo());
354 RegBankInfo
.reset(RBI
);
355 InstSelector
.reset(createX86InstructionSelector(TM
, *this, *RBI
));
358 const CallLowering
*X86Subtarget::getCallLowering() const {
359 return CallLoweringInfo
.get();
362 InstructionSelector
*X86Subtarget::getInstructionSelector() const {
363 return InstSelector
.get();
366 const LegalizerInfo
*X86Subtarget::getLegalizerInfo() const {
367 return Legalizer
.get();
370 const RegisterBankInfo
*X86Subtarget::getRegBankInfo() const {
371 return RegBankInfo
.get();
374 bool X86Subtarget::enableEarlyIfConversion() const {
375 return hasCMov() && X86EarlyIfConv
;
378 void X86Subtarget::getPostRAMutations(
379 std::vector
<std::unique_ptr
<ScheduleDAGMutation
>> &Mutations
) const {
380 Mutations
.push_back(createX86MacroFusionDAGMutation());