[ORC] Add std::tuple support to SimplePackedSerialization.
[llvm-project.git] / llvm / lib / Target / X86 / X86Subtarget.cpp
blob4af0ac238f5987fbeba1d6165495758ac0f00075
1 //===-- X86Subtarget.cpp - X86 Subtarget Information ----------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file implements the X86 specific subclass of TargetSubtargetInfo.
11 //===----------------------------------------------------------------------===//
13 #include "X86Subtarget.h"
14 #include "MCTargetDesc/X86BaseInfo.h"
15 #include "X86.h"
16 #include "X86CallLowering.h"
17 #include "X86LegalizerInfo.h"
18 #include "X86MacroFusion.h"
19 #include "X86RegisterBankInfo.h"
20 #include "X86TargetMachine.h"
21 #include "llvm/ADT/Triple.h"
22 #include "llvm/CodeGen/GlobalISel/CallLowering.h"
23 #include "llvm/CodeGen/GlobalISel/InstructionSelect.h"
24 #include "llvm/IR/Attributes.h"
25 #include "llvm/IR/ConstantRange.h"
26 #include "llvm/IR/Function.h"
27 #include "llvm/IR/GlobalValue.h"
28 #include "llvm/Support/Casting.h"
29 #include "llvm/Support/CodeGen.h"
30 #include "llvm/Support/CommandLine.h"
31 #include "llvm/Support/Debug.h"
32 #include "llvm/Support/ErrorHandling.h"
33 #include "llvm/Support/raw_ostream.h"
34 #include "llvm/Target/TargetMachine.h"
36 #if defined(_MSC_VER)
37 #include <intrin.h>
38 #endif
40 using namespace llvm;
42 #define DEBUG_TYPE "subtarget"
44 #define GET_SUBTARGETINFO_TARGET_DESC
45 #define GET_SUBTARGETINFO_CTOR
46 #include "X86GenSubtargetInfo.inc"
48 // Temporary option to control early if-conversion for x86 while adding machine
49 // models.
50 static cl::opt<bool>
51 X86EarlyIfConv("x86-early-ifcvt", cl::Hidden,
52 cl::desc("Enable early if-conversion on X86"));
55 /// Classify a blockaddress reference for the current subtarget according to how
56 /// we should reference it in a non-pcrel context.
57 unsigned char X86Subtarget::classifyBlockAddressReference() const {
58 return classifyLocalReference(nullptr);
61 /// Classify a global variable reference for the current subtarget according to
62 /// how we should reference it in a non-pcrel context.
63 unsigned char
64 X86Subtarget::classifyGlobalReference(const GlobalValue *GV) const {
65 return classifyGlobalReference(GV, *GV->getParent());
68 unsigned char
69 X86Subtarget::classifyLocalReference(const GlobalValue *GV) const {
70 // If we're not PIC, it's not very interesting.
71 if (!isPositionIndependent())
72 return X86II::MO_NO_FLAG;
74 if (is64Bit()) {
75 // 64-bit ELF PIC local references may use GOTOFF relocations.
76 if (isTargetELF()) {
77 switch (TM.getCodeModel()) {
78 // 64-bit small code model is simple: All rip-relative.
79 case CodeModel::Tiny:
80 llvm_unreachable("Tiny codesize model not supported on X86");
81 case CodeModel::Small:
82 case CodeModel::Kernel:
83 return X86II::MO_NO_FLAG;
85 // The large PIC code model uses GOTOFF.
86 case CodeModel::Large:
87 return X86II::MO_GOTOFF;
89 // Medium is a hybrid: RIP-rel for code, GOTOFF for DSO local data.
90 case CodeModel::Medium:
91 // Constant pool and jump table handling pass a nullptr to this
92 // function so we need to use isa_and_nonnull.
93 if (isa_and_nonnull<Function>(GV))
94 return X86II::MO_NO_FLAG; // All code is RIP-relative
95 return X86II::MO_GOTOFF; // Local symbols use GOTOFF.
97 llvm_unreachable("invalid code model");
100 // Otherwise, this is either a RIP-relative reference or a 64-bit movabsq,
101 // both of which use MO_NO_FLAG.
102 return X86II::MO_NO_FLAG;
105 // The COFF dynamic linker just patches the executable sections.
106 if (isTargetCOFF())
107 return X86II::MO_NO_FLAG;
109 if (isTargetDarwin()) {
110 // 32 bit macho has no relocation for a-b if a is undefined, even if
111 // b is in the section that is being relocated.
112 // This means we have to use o load even for GVs that are known to be
113 // local to the dso.
114 if (GV && (GV->isDeclarationForLinker() || GV->hasCommonLinkage()))
115 return X86II::MO_DARWIN_NONLAZY_PIC_BASE;
117 return X86II::MO_PIC_BASE_OFFSET;
120 return X86II::MO_GOTOFF;
123 unsigned char X86Subtarget::classifyGlobalReference(const GlobalValue *GV,
124 const Module &M) const {
125 // The static large model never uses stubs.
126 if (TM.getCodeModel() == CodeModel::Large && !isPositionIndependent())
127 return X86II::MO_NO_FLAG;
129 // Absolute symbols can be referenced directly.
130 if (GV) {
131 if (Optional<ConstantRange> CR = GV->getAbsoluteSymbolRange()) {
132 // See if we can use the 8-bit immediate form. Note that some instructions
133 // will sign extend the immediate operand, so to be conservative we only
134 // accept the range [0,128).
135 if (CR->getUnsignedMax().ult(128))
136 return X86II::MO_ABS8;
137 else
138 return X86II::MO_NO_FLAG;
142 if (TM.shouldAssumeDSOLocal(M, GV))
143 return classifyLocalReference(GV);
145 if (isTargetCOFF()) {
146 if (GV->hasDLLImportStorageClass())
147 return X86II::MO_DLLIMPORT;
148 return X86II::MO_COFFSTUB;
150 // Some JIT users use *-win32-elf triples; these shouldn't use GOT tables.
151 if (isOSWindows())
152 return X86II::MO_NO_FLAG;
154 if (is64Bit()) {
155 // ELF supports a large, truly PIC code model with non-PC relative GOT
156 // references. Other object file formats do not. Use the no-flag, 64-bit
157 // reference for them.
158 if (TM.getCodeModel() == CodeModel::Large)
159 return isTargetELF() ? X86II::MO_GOT : X86II::MO_NO_FLAG;
160 return X86II::MO_GOTPCREL;
163 if (isTargetDarwin()) {
164 if (!isPositionIndependent())
165 return X86II::MO_DARWIN_NONLAZY;
166 return X86II::MO_DARWIN_NONLAZY_PIC_BASE;
169 // 32-bit ELF references GlobalAddress directly in static relocation model.
170 // We cannot use MO_GOT because EBX may not be set up.
171 if (TM.getRelocationModel() == Reloc::Static)
172 return X86II::MO_NO_FLAG;
173 return X86II::MO_GOT;
176 unsigned char
177 X86Subtarget::classifyGlobalFunctionReference(const GlobalValue *GV) const {
178 return classifyGlobalFunctionReference(GV, *GV->getParent());
181 unsigned char
182 X86Subtarget::classifyGlobalFunctionReference(const GlobalValue *GV,
183 const Module &M) const {
184 if (TM.shouldAssumeDSOLocal(M, GV))
185 return X86II::MO_NO_FLAG;
187 // Functions on COFF can be non-DSO local for two reasons:
188 // - They are marked dllimport
189 // - They are extern_weak, and a stub is needed
190 if (isTargetCOFF()) {
191 if (GV->hasDLLImportStorageClass())
192 return X86II::MO_DLLIMPORT;
193 return X86II::MO_COFFSTUB;
196 const Function *F = dyn_cast_or_null<Function>(GV);
198 if (isTargetELF()) {
199 if (is64Bit() && F && (CallingConv::X86_RegCall == F->getCallingConv()))
200 // According to psABI, PLT stub clobbers XMM8-XMM15.
201 // In Regcall calling convention those registers are used for passing
202 // parameters. Thus we need to prevent lazy binding in Regcall.
203 return X86II::MO_GOTPCREL;
204 // If PLT must be avoided then the call should be via GOTPCREL.
205 if (((F && F->hasFnAttribute(Attribute::NonLazyBind)) ||
206 (!F && M.getRtLibUseGOT())) &&
207 is64Bit())
208 return X86II::MO_GOTPCREL;
209 // Reference ExternalSymbol directly in static relocation model.
210 if (!is64Bit() && !GV && TM.getRelocationModel() == Reloc::Static)
211 return X86II::MO_NO_FLAG;
212 return X86II::MO_PLT;
215 if (is64Bit()) {
216 if (F && F->hasFnAttribute(Attribute::NonLazyBind))
217 // If the function is marked as non-lazy, generate an indirect call
218 // which loads from the GOT directly. This avoids runtime overhead
219 // at the cost of eager binding (and one extra byte of encoding).
220 return X86II::MO_GOTPCREL;
221 return X86II::MO_NO_FLAG;
224 return X86II::MO_NO_FLAG;
227 /// Return true if the subtarget allows calls to immediate address.
228 bool X86Subtarget::isLegalToCallImmediateAddr() const {
229 // FIXME: I386 PE/COFF supports PC relative calls using IMAGE_REL_I386_REL32
230 // but WinCOFFObjectWriter::RecordRelocation cannot emit them. Once it does,
231 // the following check for Win32 should be removed.
232 if (In64BitMode || isTargetWin32())
233 return false;
234 return isTargetELF() || TM.getRelocationModel() == Reloc::Static;
237 void X86Subtarget::initSubtargetFeatures(StringRef CPU, StringRef TuneCPU,
238 StringRef FS) {
239 if (CPU.empty())
240 CPU = "generic";
242 if (TuneCPU.empty())
243 TuneCPU = "i586"; // FIXME: "generic" is more modern than llc tests expect.
245 std::string FullFS = X86_MC::ParseX86Triple(TargetTriple);
246 assert(!FullFS.empty() && "Failed to parse X86 triple");
248 if (!FS.empty())
249 FullFS = (Twine(FullFS) + "," + FS).str();
251 // Parse features string and set the CPU.
252 ParseSubtargetFeatures(CPU, TuneCPU, FullFS);
254 // All CPUs that implement SSE4.2 or SSE4A support unaligned accesses of
255 // 16-bytes and under that are reasonably fast. These features were
256 // introduced with Intel's Nehalem/Silvermont and AMD's Family10h
257 // micro-architectures respectively.
258 if (hasSSE42() || hasSSE4A())
259 IsUAMem16Slow = false;
261 LLVM_DEBUG(dbgs() << "Subtarget features: SSELevel " << X86SSELevel
262 << ", 3DNowLevel " << X863DNowLevel << ", 64bit "
263 << HasX86_64 << "\n");
264 if (In64BitMode && !HasX86_64)
265 report_fatal_error("64-bit code requested on a subtarget that doesn't "
266 "support it!");
268 // Stack alignment is 16 bytes on Darwin, Linux, kFreeBSD, NaCl, and for all
269 // 64-bit targets. On Solaris (32-bit), stack alignment is 4 bytes
270 // following the i386 psABI, while on Illumos it is always 16 bytes.
271 if (StackAlignOverride)
272 stackAlignment = *StackAlignOverride;
273 else if (isTargetDarwin() || isTargetLinux() || isTargetKFreeBSD() ||
274 isTargetNaCl() || In64BitMode)
275 stackAlignment = Align(16);
277 // Consume the vector width attribute or apply any target specific limit.
278 if (PreferVectorWidthOverride)
279 PreferVectorWidth = PreferVectorWidthOverride;
280 else if (Prefer128Bit)
281 PreferVectorWidth = 128;
282 else if (Prefer256Bit)
283 PreferVectorWidth = 256;
286 X86Subtarget &X86Subtarget::initializeSubtargetDependencies(StringRef CPU,
287 StringRef TuneCPU,
288 StringRef FS) {
289 initSubtargetFeatures(CPU, TuneCPU, FS);
290 return *this;
293 X86Subtarget::X86Subtarget(const Triple &TT, StringRef CPU, StringRef TuneCPU,
294 StringRef FS, const X86TargetMachine &TM,
295 MaybeAlign StackAlignOverride,
296 unsigned PreferVectorWidthOverride,
297 unsigned RequiredVectorWidth)
298 : X86GenSubtargetInfo(TT, CPU, TuneCPU, FS),
299 PICStyle(PICStyles::Style::None), TM(TM), TargetTriple(TT),
300 StackAlignOverride(StackAlignOverride),
301 PreferVectorWidthOverride(PreferVectorWidthOverride),
302 RequiredVectorWidth(RequiredVectorWidth),
303 InstrInfo(initializeSubtargetDependencies(CPU, TuneCPU, FS)),
304 TLInfo(TM, *this), FrameLowering(*this, getStackAlignment()) {
305 // Determine the PICStyle based on the target selected.
306 if (!isPositionIndependent())
307 setPICStyle(PICStyles::Style::None);
308 else if (is64Bit())
309 setPICStyle(PICStyles::Style::RIPRel);
310 else if (isTargetCOFF())
311 setPICStyle(PICStyles::Style::None);
312 else if (isTargetDarwin())
313 setPICStyle(PICStyles::Style::StubPIC);
314 else if (isTargetELF())
315 setPICStyle(PICStyles::Style::GOT);
317 CallLoweringInfo.reset(new X86CallLowering(*getTargetLowering()));
318 Legalizer.reset(new X86LegalizerInfo(*this, TM));
320 auto *RBI = new X86RegisterBankInfo(*getRegisterInfo());
321 RegBankInfo.reset(RBI);
322 InstSelector.reset(createX86InstructionSelector(TM, *this, *RBI));
325 const CallLowering *X86Subtarget::getCallLowering() const {
326 return CallLoweringInfo.get();
329 InstructionSelector *X86Subtarget::getInstructionSelector() const {
330 return InstSelector.get();
333 const LegalizerInfo *X86Subtarget::getLegalizerInfo() const {
334 return Legalizer.get();
337 const RegisterBankInfo *X86Subtarget::getRegBankInfo() const {
338 return RegBankInfo.get();
341 bool X86Subtarget::enableEarlyIfConversion() const {
342 return hasCMov() && X86EarlyIfConv;
345 void X86Subtarget::getPostRAMutations(
346 std::vector<std::unique_ptr<ScheduleDAGMutation>> &Mutations) const {
347 Mutations.push_back(createX86MacroFusionDAGMutation());
350 bool X86Subtarget::isPositionIndependent() const {
351 return TM.isPositionIndependent();