[ARM] MVE integer min and max
[llvm-complete.git] / lib / Target / X86 / MCTargetDesc / X86MCTargetDesc.cpp
blobce05ad9745073b11d49479e1a6c6ccfa9e964c94
1 //===-- X86MCTargetDesc.cpp - X86 Target Descriptions ---------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file provides X86 specific target descriptions.
11 //===----------------------------------------------------------------------===//
13 #include "X86MCTargetDesc.h"
14 #include "TargetInfo/X86TargetInfo.h"
15 #include "X86ATTInstPrinter.h"
16 #include "X86BaseInfo.h"
17 #include "X86IntelInstPrinter.h"
18 #include "X86MCAsmInfo.h"
19 #include "llvm/ADT/APInt.h"
20 #include "llvm/ADT/Triple.h"
21 #include "llvm/DebugInfo/CodeView/CodeView.h"
22 #include "llvm/MC/MCDwarf.h"
23 #include "llvm/MC/MCInstrAnalysis.h"
24 #include "llvm/MC/MCInstrInfo.h"
25 #include "llvm/MC/MCRegisterInfo.h"
26 #include "llvm/MC/MCStreamer.h"
27 #include "llvm/MC/MCSubtargetInfo.h"
28 #include "llvm/MC/MachineLocation.h"
29 #include "llvm/Support/ErrorHandling.h"
30 #include "llvm/Support/Host.h"
31 #include "llvm/Support/TargetRegistry.h"
33 #if _MSC_VER
34 #include <intrin.h>
35 #endif
37 using namespace llvm;
39 #define GET_REGINFO_MC_DESC
40 #include "X86GenRegisterInfo.inc"
42 #define GET_INSTRINFO_MC_DESC
43 #define GET_INSTRINFO_MC_HELPERS
44 #include "X86GenInstrInfo.inc"
46 #define GET_SUBTARGETINFO_MC_DESC
47 #include "X86GenSubtargetInfo.inc"
49 std::string X86_MC::ParseX86Triple(const Triple &TT) {
50 std::string FS;
51 if (TT.getArch() == Triple::x86_64)
52 FS = "+64bit-mode,-32bit-mode,-16bit-mode";
53 else if (TT.getEnvironment() != Triple::CODE16)
54 FS = "-64bit-mode,+32bit-mode,-16bit-mode";
55 else
56 FS = "-64bit-mode,-32bit-mode,+16bit-mode";
58 return FS;
61 unsigned X86_MC::getDwarfRegFlavour(const Triple &TT, bool isEH) {
62 if (TT.getArch() == Triple::x86_64)
63 return DWARFFlavour::X86_64;
65 if (TT.isOSDarwin())
66 return isEH ? DWARFFlavour::X86_32_DarwinEH : DWARFFlavour::X86_32_Generic;
67 if (TT.isOSCygMing())
68 // Unsupported by now, just quick fallback
69 return DWARFFlavour::X86_32_Generic;
70 return DWARFFlavour::X86_32_Generic;
73 void X86_MC::initLLVMToSEHAndCVRegMapping(MCRegisterInfo *MRI) {
74 // FIXME: TableGen these.
75 for (unsigned Reg = X86::NoRegister + 1; Reg < X86::NUM_TARGET_REGS; ++Reg) {
76 unsigned SEH = MRI->getEncodingValue(Reg);
77 MRI->mapLLVMRegToSEHReg(Reg, SEH);
80 // Mapping from CodeView to MC register id.
81 static const struct {
82 codeview::RegisterId CVReg;
83 MCPhysReg Reg;
84 } RegMap[] = {
85 {codeview::RegisterId::AL, X86::AL},
86 {codeview::RegisterId::CL, X86::CL},
87 {codeview::RegisterId::DL, X86::DL},
88 {codeview::RegisterId::BL, X86::BL},
89 {codeview::RegisterId::AH, X86::AH},
90 {codeview::RegisterId::CH, X86::CH},
91 {codeview::RegisterId::DH, X86::DH},
92 {codeview::RegisterId::BH, X86::BH},
93 {codeview::RegisterId::AX, X86::AX},
94 {codeview::RegisterId::CX, X86::CX},
95 {codeview::RegisterId::DX, X86::DX},
96 {codeview::RegisterId::BX, X86::BX},
97 {codeview::RegisterId::SP, X86::SP},
98 {codeview::RegisterId::BP, X86::BP},
99 {codeview::RegisterId::SI, X86::SI},
100 {codeview::RegisterId::DI, X86::DI},
101 {codeview::RegisterId::EAX, X86::EAX},
102 {codeview::RegisterId::ECX, X86::ECX},
103 {codeview::RegisterId::EDX, X86::EDX},
104 {codeview::RegisterId::EBX, X86::EBX},
105 {codeview::RegisterId::ESP, X86::ESP},
106 {codeview::RegisterId::EBP, X86::EBP},
107 {codeview::RegisterId::ESI, X86::ESI},
108 {codeview::RegisterId::EDI, X86::EDI},
110 {codeview::RegisterId::EFLAGS, X86::EFLAGS},
112 {codeview::RegisterId::ST0, X86::FP0},
113 {codeview::RegisterId::ST1, X86::FP1},
114 {codeview::RegisterId::ST2, X86::FP2},
115 {codeview::RegisterId::ST3, X86::FP3},
116 {codeview::RegisterId::ST4, X86::FP4},
117 {codeview::RegisterId::ST5, X86::FP5},
118 {codeview::RegisterId::ST6, X86::FP6},
119 {codeview::RegisterId::ST7, X86::FP7},
121 {codeview::RegisterId::MM0, X86::MM0},
122 {codeview::RegisterId::MM1, X86::MM1},
123 {codeview::RegisterId::MM2, X86::MM2},
124 {codeview::RegisterId::MM3, X86::MM3},
125 {codeview::RegisterId::MM4, X86::MM4},
126 {codeview::RegisterId::MM5, X86::MM5},
127 {codeview::RegisterId::MM6, X86::MM6},
128 {codeview::RegisterId::MM7, X86::MM7},
130 {codeview::RegisterId::XMM0, X86::XMM0},
131 {codeview::RegisterId::XMM1, X86::XMM1},
132 {codeview::RegisterId::XMM2, X86::XMM2},
133 {codeview::RegisterId::XMM3, X86::XMM3},
134 {codeview::RegisterId::XMM4, X86::XMM4},
135 {codeview::RegisterId::XMM5, X86::XMM5},
136 {codeview::RegisterId::XMM6, X86::XMM6},
137 {codeview::RegisterId::XMM7, X86::XMM7},
139 {codeview::RegisterId::XMM8, X86::XMM8},
140 {codeview::RegisterId::XMM9, X86::XMM9},
141 {codeview::RegisterId::XMM10, X86::XMM10},
142 {codeview::RegisterId::XMM11, X86::XMM11},
143 {codeview::RegisterId::XMM12, X86::XMM12},
144 {codeview::RegisterId::XMM13, X86::XMM13},
145 {codeview::RegisterId::XMM14, X86::XMM14},
146 {codeview::RegisterId::XMM15, X86::XMM15},
148 {codeview::RegisterId::SIL, X86::SIL},
149 {codeview::RegisterId::DIL, X86::DIL},
150 {codeview::RegisterId::BPL, X86::BPL},
151 {codeview::RegisterId::SPL, X86::SPL},
152 {codeview::RegisterId::RAX, X86::RAX},
153 {codeview::RegisterId::RBX, X86::RBX},
154 {codeview::RegisterId::RCX, X86::RCX},
155 {codeview::RegisterId::RDX, X86::RDX},
156 {codeview::RegisterId::RSI, X86::RSI},
157 {codeview::RegisterId::RDI, X86::RDI},
158 {codeview::RegisterId::RBP, X86::RBP},
159 {codeview::RegisterId::RSP, X86::RSP},
160 {codeview::RegisterId::R8, X86::R8},
161 {codeview::RegisterId::R9, X86::R9},
162 {codeview::RegisterId::R10, X86::R10},
163 {codeview::RegisterId::R11, X86::R11},
164 {codeview::RegisterId::R12, X86::R12},
165 {codeview::RegisterId::R13, X86::R13},
166 {codeview::RegisterId::R14, X86::R14},
167 {codeview::RegisterId::R15, X86::R15},
168 {codeview::RegisterId::R8B, X86::R8B},
169 {codeview::RegisterId::R9B, X86::R9B},
170 {codeview::RegisterId::R10B, X86::R10B},
171 {codeview::RegisterId::R11B, X86::R11B},
172 {codeview::RegisterId::R12B, X86::R12B},
173 {codeview::RegisterId::R13B, X86::R13B},
174 {codeview::RegisterId::R14B, X86::R14B},
175 {codeview::RegisterId::R15B, X86::R15B},
176 {codeview::RegisterId::R8W, X86::R8W},
177 {codeview::RegisterId::R9W, X86::R9W},
178 {codeview::RegisterId::R10W, X86::R10W},
179 {codeview::RegisterId::R11W, X86::R11W},
180 {codeview::RegisterId::R12W, X86::R12W},
181 {codeview::RegisterId::R13W, X86::R13W},
182 {codeview::RegisterId::R14W, X86::R14W},
183 {codeview::RegisterId::R15W, X86::R15W},
184 {codeview::RegisterId::R8D, X86::R8D},
185 {codeview::RegisterId::R9D, X86::R9D},
186 {codeview::RegisterId::R10D, X86::R10D},
187 {codeview::RegisterId::R11D, X86::R11D},
188 {codeview::RegisterId::R12D, X86::R12D},
189 {codeview::RegisterId::R13D, X86::R13D},
190 {codeview::RegisterId::R14D, X86::R14D},
191 {codeview::RegisterId::R15D, X86::R15D},
192 {codeview::RegisterId::AMD64_YMM0, X86::YMM0},
193 {codeview::RegisterId::AMD64_YMM1, X86::YMM1},
194 {codeview::RegisterId::AMD64_YMM2, X86::YMM2},
195 {codeview::RegisterId::AMD64_YMM3, X86::YMM3},
196 {codeview::RegisterId::AMD64_YMM4, X86::YMM4},
197 {codeview::RegisterId::AMD64_YMM5, X86::YMM5},
198 {codeview::RegisterId::AMD64_YMM6, X86::YMM6},
199 {codeview::RegisterId::AMD64_YMM7, X86::YMM7},
200 {codeview::RegisterId::AMD64_YMM8, X86::YMM8},
201 {codeview::RegisterId::AMD64_YMM9, X86::YMM9},
202 {codeview::RegisterId::AMD64_YMM10, X86::YMM10},
203 {codeview::RegisterId::AMD64_YMM11, X86::YMM11},
204 {codeview::RegisterId::AMD64_YMM12, X86::YMM12},
205 {codeview::RegisterId::AMD64_YMM13, X86::YMM13},
206 {codeview::RegisterId::AMD64_YMM14, X86::YMM14},
207 {codeview::RegisterId::AMD64_YMM15, X86::YMM15},
208 {codeview::RegisterId::AMD64_YMM16, X86::YMM16},
209 {codeview::RegisterId::AMD64_YMM17, X86::YMM17},
210 {codeview::RegisterId::AMD64_YMM18, X86::YMM18},
211 {codeview::RegisterId::AMD64_YMM19, X86::YMM19},
212 {codeview::RegisterId::AMD64_YMM20, X86::YMM20},
213 {codeview::RegisterId::AMD64_YMM21, X86::YMM21},
214 {codeview::RegisterId::AMD64_YMM22, X86::YMM22},
215 {codeview::RegisterId::AMD64_YMM23, X86::YMM23},
216 {codeview::RegisterId::AMD64_YMM24, X86::YMM24},
217 {codeview::RegisterId::AMD64_YMM25, X86::YMM25},
218 {codeview::RegisterId::AMD64_YMM26, X86::YMM26},
219 {codeview::RegisterId::AMD64_YMM27, X86::YMM27},
220 {codeview::RegisterId::AMD64_YMM28, X86::YMM28},
221 {codeview::RegisterId::AMD64_YMM29, X86::YMM29},
222 {codeview::RegisterId::AMD64_YMM30, X86::YMM30},
223 {codeview::RegisterId::AMD64_YMM31, X86::YMM31},
224 {codeview::RegisterId::AMD64_ZMM0, X86::ZMM0},
225 {codeview::RegisterId::AMD64_ZMM1, X86::ZMM1},
226 {codeview::RegisterId::AMD64_ZMM2, X86::ZMM2},
227 {codeview::RegisterId::AMD64_ZMM3, X86::ZMM3},
228 {codeview::RegisterId::AMD64_ZMM4, X86::ZMM4},
229 {codeview::RegisterId::AMD64_ZMM5, X86::ZMM5},
230 {codeview::RegisterId::AMD64_ZMM6, X86::ZMM6},
231 {codeview::RegisterId::AMD64_ZMM7, X86::ZMM7},
232 {codeview::RegisterId::AMD64_ZMM8, X86::ZMM8},
233 {codeview::RegisterId::AMD64_ZMM9, X86::ZMM9},
234 {codeview::RegisterId::AMD64_ZMM10, X86::ZMM10},
235 {codeview::RegisterId::AMD64_ZMM11, X86::ZMM11},
236 {codeview::RegisterId::AMD64_ZMM12, X86::ZMM12},
237 {codeview::RegisterId::AMD64_ZMM13, X86::ZMM13},
238 {codeview::RegisterId::AMD64_ZMM14, X86::ZMM14},
239 {codeview::RegisterId::AMD64_ZMM15, X86::ZMM15},
240 {codeview::RegisterId::AMD64_ZMM16, X86::ZMM16},
241 {codeview::RegisterId::AMD64_ZMM17, X86::ZMM17},
242 {codeview::RegisterId::AMD64_ZMM18, X86::ZMM18},
243 {codeview::RegisterId::AMD64_ZMM19, X86::ZMM19},
244 {codeview::RegisterId::AMD64_ZMM20, X86::ZMM20},
245 {codeview::RegisterId::AMD64_ZMM21, X86::ZMM21},
246 {codeview::RegisterId::AMD64_ZMM22, X86::ZMM22},
247 {codeview::RegisterId::AMD64_ZMM23, X86::ZMM23},
248 {codeview::RegisterId::AMD64_ZMM24, X86::ZMM24},
249 {codeview::RegisterId::AMD64_ZMM25, X86::ZMM25},
250 {codeview::RegisterId::AMD64_ZMM26, X86::ZMM26},
251 {codeview::RegisterId::AMD64_ZMM27, X86::ZMM27},
252 {codeview::RegisterId::AMD64_ZMM28, X86::ZMM28},
253 {codeview::RegisterId::AMD64_ZMM29, X86::ZMM29},
254 {codeview::RegisterId::AMD64_ZMM30, X86::ZMM30},
255 {codeview::RegisterId::AMD64_ZMM31, X86::ZMM31},
256 {codeview::RegisterId::AMD64_K0, X86::K0},
257 {codeview::RegisterId::AMD64_K1, X86::K1},
258 {codeview::RegisterId::AMD64_K2, X86::K2},
259 {codeview::RegisterId::AMD64_K3, X86::K3},
260 {codeview::RegisterId::AMD64_K4, X86::K4},
261 {codeview::RegisterId::AMD64_K5, X86::K5},
262 {codeview::RegisterId::AMD64_K6, X86::K6},
263 {codeview::RegisterId::AMD64_K7, X86::K7},
264 {codeview::RegisterId::AMD64_XMM16, X86::XMM16},
265 {codeview::RegisterId::AMD64_XMM17, X86::XMM17},
266 {codeview::RegisterId::AMD64_XMM18, X86::XMM18},
267 {codeview::RegisterId::AMD64_XMM19, X86::XMM19},
268 {codeview::RegisterId::AMD64_XMM20, X86::XMM20},
269 {codeview::RegisterId::AMD64_XMM21, X86::XMM21},
270 {codeview::RegisterId::AMD64_XMM22, X86::XMM22},
271 {codeview::RegisterId::AMD64_XMM23, X86::XMM23},
272 {codeview::RegisterId::AMD64_XMM24, X86::XMM24},
273 {codeview::RegisterId::AMD64_XMM25, X86::XMM25},
274 {codeview::RegisterId::AMD64_XMM26, X86::XMM26},
275 {codeview::RegisterId::AMD64_XMM27, X86::XMM27},
276 {codeview::RegisterId::AMD64_XMM28, X86::XMM28},
277 {codeview::RegisterId::AMD64_XMM29, X86::XMM29},
278 {codeview::RegisterId::AMD64_XMM30, X86::XMM30},
279 {codeview::RegisterId::AMD64_XMM31, X86::XMM31},
282 for (unsigned I = 0; I < array_lengthof(RegMap); ++I)
283 MRI->mapLLVMRegToCVReg(RegMap[I].Reg, static_cast<int>(RegMap[I].CVReg));
286 MCSubtargetInfo *X86_MC::createX86MCSubtargetInfo(const Triple &TT,
287 StringRef CPU, StringRef FS) {
288 std::string ArchFS = X86_MC::ParseX86Triple(TT);
289 if (!FS.empty()) {
290 if (!ArchFS.empty())
291 ArchFS = (Twine(ArchFS) + "," + FS).str();
292 else
293 ArchFS = FS;
296 std::string CPUName = CPU;
297 if (CPUName.empty())
298 CPUName = "generic";
300 return createX86MCSubtargetInfoImpl(TT, CPUName, ArchFS);
303 static MCInstrInfo *createX86MCInstrInfo() {
304 MCInstrInfo *X = new MCInstrInfo();
305 InitX86MCInstrInfo(X);
306 return X;
309 static MCRegisterInfo *createX86MCRegisterInfo(const Triple &TT) {
310 unsigned RA = (TT.getArch() == Triple::x86_64)
311 ? X86::RIP // Should have dwarf #16.
312 : X86::EIP; // Should have dwarf #8.
314 MCRegisterInfo *X = new MCRegisterInfo();
315 InitX86MCRegisterInfo(X, RA, X86_MC::getDwarfRegFlavour(TT, false),
316 X86_MC::getDwarfRegFlavour(TT, true), RA);
317 X86_MC::initLLVMToSEHAndCVRegMapping(X);
318 return X;
321 static MCAsmInfo *createX86MCAsmInfo(const MCRegisterInfo &MRI,
322 const Triple &TheTriple) {
323 bool is64Bit = TheTriple.getArch() == Triple::x86_64;
325 MCAsmInfo *MAI;
326 if (TheTriple.isOSBinFormatMachO()) {
327 if (is64Bit)
328 MAI = new X86_64MCAsmInfoDarwin(TheTriple);
329 else
330 MAI = new X86MCAsmInfoDarwin(TheTriple);
331 } else if (TheTriple.isOSBinFormatELF()) {
332 // Force the use of an ELF container.
333 MAI = new X86ELFMCAsmInfo(TheTriple);
334 } else if (TheTriple.isWindowsMSVCEnvironment() ||
335 TheTriple.isWindowsCoreCLREnvironment()) {
336 MAI = new X86MCAsmInfoMicrosoft(TheTriple);
337 } else if (TheTriple.isOSCygMing() ||
338 TheTriple.isWindowsItaniumEnvironment()) {
339 MAI = new X86MCAsmInfoGNUCOFF(TheTriple);
340 } else {
341 // The default is ELF.
342 MAI = new X86ELFMCAsmInfo(TheTriple);
345 // Initialize initial frame state.
346 // Calculate amount of bytes used for return address storing
347 int stackGrowth = is64Bit ? -8 : -4;
349 // Initial state of the frame pointer is esp+stackGrowth.
350 unsigned StackPtr = is64Bit ? X86::RSP : X86::ESP;
351 MCCFIInstruction Inst = MCCFIInstruction::createDefCfa(
352 nullptr, MRI.getDwarfRegNum(StackPtr, true), -stackGrowth);
353 MAI->addInitialFrameState(Inst);
355 // Add return address to move list
356 unsigned InstPtr = is64Bit ? X86::RIP : X86::EIP;
357 MCCFIInstruction Inst2 = MCCFIInstruction::createOffset(
358 nullptr, MRI.getDwarfRegNum(InstPtr, true), stackGrowth);
359 MAI->addInitialFrameState(Inst2);
361 return MAI;
364 static MCInstPrinter *createX86MCInstPrinter(const Triple &T,
365 unsigned SyntaxVariant,
366 const MCAsmInfo &MAI,
367 const MCInstrInfo &MII,
368 const MCRegisterInfo &MRI) {
369 if (SyntaxVariant == 0)
370 return new X86ATTInstPrinter(MAI, MII, MRI);
371 if (SyntaxVariant == 1)
372 return new X86IntelInstPrinter(MAI, MII, MRI);
373 return nullptr;
376 static MCRelocationInfo *createX86MCRelocationInfo(const Triple &TheTriple,
377 MCContext &Ctx) {
378 // Default to the stock relocation info.
379 return llvm::createMCRelocationInfo(TheTriple, Ctx);
382 namespace llvm {
383 namespace X86_MC {
385 class X86MCInstrAnalysis : public MCInstrAnalysis {
386 X86MCInstrAnalysis(const X86MCInstrAnalysis &) = delete;
387 X86MCInstrAnalysis &operator=(const X86MCInstrAnalysis &) = delete;
388 virtual ~X86MCInstrAnalysis() = default;
390 public:
391 X86MCInstrAnalysis(const MCInstrInfo *MCII) : MCInstrAnalysis(MCII) {}
393 #define GET_STIPREDICATE_DECLS_FOR_MC_ANALYSIS
394 #include "X86GenSubtargetInfo.inc"
396 bool clearsSuperRegisters(const MCRegisterInfo &MRI, const MCInst &Inst,
397 APInt &Mask) const override;
398 std::vector<std::pair<uint64_t, uint64_t>>
399 findPltEntries(uint64_t PltSectionVA, ArrayRef<uint8_t> PltContents,
400 uint64_t GotSectionVA,
401 const Triple &TargetTriple) const override;
404 #define GET_STIPREDICATE_DEFS_FOR_MC_ANALYSIS
405 #include "X86GenSubtargetInfo.inc"
407 bool X86MCInstrAnalysis::clearsSuperRegisters(const MCRegisterInfo &MRI,
408 const MCInst &Inst,
409 APInt &Mask) const {
410 const MCInstrDesc &Desc = Info->get(Inst.getOpcode());
411 unsigned NumDefs = Desc.getNumDefs();
412 unsigned NumImplicitDefs = Desc.getNumImplicitDefs();
413 assert(Mask.getBitWidth() == NumDefs + NumImplicitDefs &&
414 "Unexpected number of bits in the mask!");
416 bool HasVEX = (Desc.TSFlags & X86II::EncodingMask) == X86II::VEX;
417 bool HasEVEX = (Desc.TSFlags & X86II::EncodingMask) == X86II::EVEX;
418 bool HasXOP = (Desc.TSFlags & X86II::EncodingMask) == X86II::XOP;
420 const MCRegisterClass &GR32RC = MRI.getRegClass(X86::GR32RegClassID);
421 const MCRegisterClass &VR128XRC = MRI.getRegClass(X86::VR128XRegClassID);
422 const MCRegisterClass &VR256XRC = MRI.getRegClass(X86::VR256XRegClassID);
424 auto ClearsSuperReg = [=](unsigned RegID) {
425 // On X86-64, a general purpose integer register is viewed as a 64-bit
426 // register internal to the processor.
427 // An update to the lower 32 bits of a 64 bit integer register is
428 // architecturally defined to zero extend the upper 32 bits.
429 if (GR32RC.contains(RegID))
430 return true;
432 // Early exit if this instruction has no vex/evex/xop prefix.
433 if (!HasEVEX && !HasVEX && !HasXOP)
434 return false;
436 // All VEX and EVEX encoded instructions are defined to zero the high bits
437 // of the destination register up to VLMAX (i.e. the maximum vector register
438 // width pertaining to the instruction).
439 // We assume the same behavior for XOP instructions too.
440 return VR128XRC.contains(RegID) || VR256XRC.contains(RegID);
443 Mask.clearAllBits();
444 for (unsigned I = 0, E = NumDefs; I < E; ++I) {
445 const MCOperand &Op = Inst.getOperand(I);
446 if (ClearsSuperReg(Op.getReg()))
447 Mask.setBit(I);
450 for (unsigned I = 0, E = NumImplicitDefs; I < E; ++I) {
451 const MCPhysReg Reg = Desc.getImplicitDefs()[I];
452 if (ClearsSuperReg(Reg))
453 Mask.setBit(NumDefs + I);
456 return Mask.getBoolValue();
459 static std::vector<std::pair<uint64_t, uint64_t>>
460 findX86PltEntries(uint64_t PltSectionVA, ArrayRef<uint8_t> PltContents,
461 uint64_t GotPltSectionVA) {
462 // Do a lightweight parsing of PLT entries.
463 std::vector<std::pair<uint64_t, uint64_t>> Result;
464 for (uint64_t Byte = 0, End = PltContents.size(); Byte + 6 < End; ) {
465 // Recognize a jmp.
466 if (PltContents[Byte] == 0xff && PltContents[Byte + 1] == 0xa3) {
467 // The jmp instruction at the beginning of each PLT entry jumps to the
468 // address of the base of the .got.plt section plus the immediate.
469 uint32_t Imm = support::endian::read32le(PltContents.data() + Byte + 2);
470 Result.push_back(
471 std::make_pair(PltSectionVA + Byte, GotPltSectionVA + Imm));
472 Byte += 6;
473 } else if (PltContents[Byte] == 0xff && PltContents[Byte + 1] == 0x25) {
474 // The jmp instruction at the beginning of each PLT entry jumps to the
475 // immediate.
476 uint32_t Imm = support::endian::read32le(PltContents.data() + Byte + 2);
477 Result.push_back(std::make_pair(PltSectionVA + Byte, Imm));
478 Byte += 6;
479 } else
480 Byte++;
482 return Result;
485 static std::vector<std::pair<uint64_t, uint64_t>>
486 findX86_64PltEntries(uint64_t PltSectionVA, ArrayRef<uint8_t> PltContents) {
487 // Do a lightweight parsing of PLT entries.
488 std::vector<std::pair<uint64_t, uint64_t>> Result;
489 for (uint64_t Byte = 0, End = PltContents.size(); Byte + 6 < End; ) {
490 // Recognize a jmp.
491 if (PltContents[Byte] == 0xff && PltContents[Byte + 1] == 0x25) {
492 // The jmp instruction at the beginning of each PLT entry jumps to the
493 // address of the next instruction plus the immediate.
494 uint32_t Imm = support::endian::read32le(PltContents.data() + Byte + 2);
495 Result.push_back(
496 std::make_pair(PltSectionVA + Byte, PltSectionVA + Byte + 6 + Imm));
497 Byte += 6;
498 } else
499 Byte++;
501 return Result;
504 std::vector<std::pair<uint64_t, uint64_t>> X86MCInstrAnalysis::findPltEntries(
505 uint64_t PltSectionVA, ArrayRef<uint8_t> PltContents,
506 uint64_t GotPltSectionVA, const Triple &TargetTriple) const {
507 switch (TargetTriple.getArch()) {
508 case Triple::x86:
509 return findX86PltEntries(PltSectionVA, PltContents, GotPltSectionVA);
510 case Triple::x86_64:
511 return findX86_64PltEntries(PltSectionVA, PltContents);
512 default:
513 return {};
517 } // end of namespace X86_MC
519 } // end of namespace llvm
521 static MCInstrAnalysis *createX86MCInstrAnalysis(const MCInstrInfo *Info) {
522 return new X86_MC::X86MCInstrAnalysis(Info);
525 // Force static initialization.
526 extern "C" void LLVMInitializeX86TargetMC() {
527 for (Target *T : {&getTheX86_32Target(), &getTheX86_64Target()}) {
528 // Register the MC asm info.
529 RegisterMCAsmInfoFn X(*T, createX86MCAsmInfo);
531 // Register the MC instruction info.
532 TargetRegistry::RegisterMCInstrInfo(*T, createX86MCInstrInfo);
534 // Register the MC register info.
535 TargetRegistry::RegisterMCRegInfo(*T, createX86MCRegisterInfo);
537 // Register the MC subtarget info.
538 TargetRegistry::RegisterMCSubtargetInfo(*T,
539 X86_MC::createX86MCSubtargetInfo);
541 // Register the MC instruction analyzer.
542 TargetRegistry::RegisterMCInstrAnalysis(*T, createX86MCInstrAnalysis);
544 // Register the code emitter.
545 TargetRegistry::RegisterMCCodeEmitter(*T, createX86MCCodeEmitter);
547 // Register the obj target streamer.
548 TargetRegistry::RegisterObjectTargetStreamer(*T,
549 createX86ObjectTargetStreamer);
551 // Register the asm target streamer.
552 TargetRegistry::RegisterAsmTargetStreamer(*T, createX86AsmTargetStreamer);
554 TargetRegistry::RegisterCOFFStreamer(*T, createX86WinCOFFStreamer);
556 // Register the MCInstPrinter.
557 TargetRegistry::RegisterMCInstPrinter(*T, createX86MCInstPrinter);
559 // Register the MC relocation info.
560 TargetRegistry::RegisterMCRelocationInfo(*T, createX86MCRelocationInfo);
563 // Register the asm backend.
564 TargetRegistry::RegisterMCAsmBackend(getTheX86_32Target(),
565 createX86_32AsmBackend);
566 TargetRegistry::RegisterMCAsmBackend(getTheX86_64Target(),
567 createX86_64AsmBackend);
570 unsigned llvm::getX86SubSuperRegisterOrZero(unsigned Reg, unsigned Size,
571 bool High) {
572 switch (Size) {
573 default: return 0;
574 case 8:
575 if (High) {
576 switch (Reg) {
577 default: return getX86SubSuperRegisterOrZero(Reg, 64);
578 case X86::SIL: case X86::SI: case X86::ESI: case X86::RSI:
579 return X86::SI;
580 case X86::DIL: case X86::DI: case X86::EDI: case X86::RDI:
581 return X86::DI;
582 case X86::BPL: case X86::BP: case X86::EBP: case X86::RBP:
583 return X86::BP;
584 case X86::SPL: case X86::SP: case X86::ESP: case X86::RSP:
585 return X86::SP;
586 case X86::AH: case X86::AL: case X86::AX: case X86::EAX: case X86::RAX:
587 return X86::AH;
588 case X86::DH: case X86::DL: case X86::DX: case X86::EDX: case X86::RDX:
589 return X86::DH;
590 case X86::CH: case X86::CL: case X86::CX: case X86::ECX: case X86::RCX:
591 return X86::CH;
592 case X86::BH: case X86::BL: case X86::BX: case X86::EBX: case X86::RBX:
593 return X86::BH;
595 } else {
596 switch (Reg) {
597 default: return 0;
598 case X86::AH: case X86::AL: case X86::AX: case X86::EAX: case X86::RAX:
599 return X86::AL;
600 case X86::DH: case X86::DL: case X86::DX: case X86::EDX: case X86::RDX:
601 return X86::DL;
602 case X86::CH: case X86::CL: case X86::CX: case X86::ECX: case X86::RCX:
603 return X86::CL;
604 case X86::BH: case X86::BL: case X86::BX: case X86::EBX: case X86::RBX:
605 return X86::BL;
606 case X86::SIL: case X86::SI: case X86::ESI: case X86::RSI:
607 return X86::SIL;
608 case X86::DIL: case X86::DI: case X86::EDI: case X86::RDI:
609 return X86::DIL;
610 case X86::BPL: case X86::BP: case X86::EBP: case X86::RBP:
611 return X86::BPL;
612 case X86::SPL: case X86::SP: case X86::ESP: case X86::RSP:
613 return X86::SPL;
614 case X86::R8B: case X86::R8W: case X86::R8D: case X86::R8:
615 return X86::R8B;
616 case X86::R9B: case X86::R9W: case X86::R9D: case X86::R9:
617 return X86::R9B;
618 case X86::R10B: case X86::R10W: case X86::R10D: case X86::R10:
619 return X86::R10B;
620 case X86::R11B: case X86::R11W: case X86::R11D: case X86::R11:
621 return X86::R11B;
622 case X86::R12B: case X86::R12W: case X86::R12D: case X86::R12:
623 return X86::R12B;
624 case X86::R13B: case X86::R13W: case X86::R13D: case X86::R13:
625 return X86::R13B;
626 case X86::R14B: case X86::R14W: case X86::R14D: case X86::R14:
627 return X86::R14B;
628 case X86::R15B: case X86::R15W: case X86::R15D: case X86::R15:
629 return X86::R15B;
632 case 16:
633 switch (Reg) {
634 default: return 0;
635 case X86::AH: case X86::AL: case X86::AX: case X86::EAX: case X86::RAX:
636 return X86::AX;
637 case X86::DH: case X86::DL: case X86::DX: case X86::EDX: case X86::RDX:
638 return X86::DX;
639 case X86::CH: case X86::CL: case X86::CX: case X86::ECX: case X86::RCX:
640 return X86::CX;
641 case X86::BH: case X86::BL: case X86::BX: case X86::EBX: case X86::RBX:
642 return X86::BX;
643 case X86::SIL: case X86::SI: case X86::ESI: case X86::RSI:
644 return X86::SI;
645 case X86::DIL: case X86::DI: case X86::EDI: case X86::RDI:
646 return X86::DI;
647 case X86::BPL: case X86::BP: case X86::EBP: case X86::RBP:
648 return X86::BP;
649 case X86::SPL: case X86::SP: case X86::ESP: case X86::RSP:
650 return X86::SP;
651 case X86::R8B: case X86::R8W: case X86::R8D: case X86::R8:
652 return X86::R8W;
653 case X86::R9B: case X86::R9W: case X86::R9D: case X86::R9:
654 return X86::R9W;
655 case X86::R10B: case X86::R10W: case X86::R10D: case X86::R10:
656 return X86::R10W;
657 case X86::R11B: case X86::R11W: case X86::R11D: case X86::R11:
658 return X86::R11W;
659 case X86::R12B: case X86::R12W: case X86::R12D: case X86::R12:
660 return X86::R12W;
661 case X86::R13B: case X86::R13W: case X86::R13D: case X86::R13:
662 return X86::R13W;
663 case X86::R14B: case X86::R14W: case X86::R14D: case X86::R14:
664 return X86::R14W;
665 case X86::R15B: case X86::R15W: case X86::R15D: case X86::R15:
666 return X86::R15W;
668 case 32:
669 switch (Reg) {
670 default: return 0;
671 case X86::AH: case X86::AL: case X86::AX: case X86::EAX: case X86::RAX:
672 return X86::EAX;
673 case X86::DH: case X86::DL: case X86::DX: case X86::EDX: case X86::RDX:
674 return X86::EDX;
675 case X86::CH: case X86::CL: case X86::CX: case X86::ECX: case X86::RCX:
676 return X86::ECX;
677 case X86::BH: case X86::BL: case X86::BX: case X86::EBX: case X86::RBX:
678 return X86::EBX;
679 case X86::SIL: case X86::SI: case X86::ESI: case X86::RSI:
680 return X86::ESI;
681 case X86::DIL: case X86::DI: case X86::EDI: case X86::RDI:
682 return X86::EDI;
683 case X86::BPL: case X86::BP: case X86::EBP: case X86::RBP:
684 return X86::EBP;
685 case X86::SPL: case X86::SP: case X86::ESP: case X86::RSP:
686 return X86::ESP;
687 case X86::R8B: case X86::R8W: case X86::R8D: case X86::R8:
688 return X86::R8D;
689 case X86::R9B: case X86::R9W: case X86::R9D: case X86::R9:
690 return X86::R9D;
691 case X86::R10B: case X86::R10W: case X86::R10D: case X86::R10:
692 return X86::R10D;
693 case X86::R11B: case X86::R11W: case X86::R11D: case X86::R11:
694 return X86::R11D;
695 case X86::R12B: case X86::R12W: case X86::R12D: case X86::R12:
696 return X86::R12D;
697 case X86::R13B: case X86::R13W: case X86::R13D: case X86::R13:
698 return X86::R13D;
699 case X86::R14B: case X86::R14W: case X86::R14D: case X86::R14:
700 return X86::R14D;
701 case X86::R15B: case X86::R15W: case X86::R15D: case X86::R15:
702 return X86::R15D;
704 case 64:
705 switch (Reg) {
706 default: return 0;
707 case X86::AH: case X86::AL: case X86::AX: case X86::EAX: case X86::RAX:
708 return X86::RAX;
709 case X86::DH: case X86::DL: case X86::DX: case X86::EDX: case X86::RDX:
710 return X86::RDX;
711 case X86::CH: case X86::CL: case X86::CX: case X86::ECX: case X86::RCX:
712 return X86::RCX;
713 case X86::BH: case X86::BL: case X86::BX: case X86::EBX: case X86::RBX:
714 return X86::RBX;
715 case X86::SIL: case X86::SI: case X86::ESI: case X86::RSI:
716 return X86::RSI;
717 case X86::DIL: case X86::DI: case X86::EDI: case X86::RDI:
718 return X86::RDI;
719 case X86::BPL: case X86::BP: case X86::EBP: case X86::RBP:
720 return X86::RBP;
721 case X86::SPL: case X86::SP: case X86::ESP: case X86::RSP:
722 return X86::RSP;
723 case X86::R8B: case X86::R8W: case X86::R8D: case X86::R8:
724 return X86::R8;
725 case X86::R9B: case X86::R9W: case X86::R9D: case X86::R9:
726 return X86::R9;
727 case X86::R10B: case X86::R10W: case X86::R10D: case X86::R10:
728 return X86::R10;
729 case X86::R11B: case X86::R11W: case X86::R11D: case X86::R11:
730 return X86::R11;
731 case X86::R12B: case X86::R12W: case X86::R12D: case X86::R12:
732 return X86::R12;
733 case X86::R13B: case X86::R13W: case X86::R13D: case X86::R13:
734 return X86::R13;
735 case X86::R14B: case X86::R14W: case X86::R14D: case X86::R14:
736 return X86::R14;
737 case X86::R15B: case X86::R15W: case X86::R15D: case X86::R15:
738 return X86::R15;
743 unsigned llvm::getX86SubSuperRegister(unsigned Reg, unsigned Size, bool High) {
744 unsigned Res = getX86SubSuperRegisterOrZero(Reg, Size, High);
745 assert(Res != 0 && "Unexpected register or VT");
746 return Res;