1 //===-- X86MCTargetDesc.cpp - X86 Target Descriptions ---------------------===//
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7 //===----------------------------------------------------------------------===//
9 // This file provides X86 specific target descriptions.
11 //===----------------------------------------------------------------------===//
13 #include "X86MCTargetDesc.h"
14 #include "TargetInfo/X86TargetInfo.h"
15 #include "X86ATTInstPrinter.h"
16 #include "X86BaseInfo.h"
17 #include "X86IntelInstPrinter.h"
18 #include "X86MCAsmInfo.h"
19 #include "llvm/ADT/APInt.h"
20 #include "llvm/ADT/Triple.h"
21 #include "llvm/DebugInfo/CodeView/CodeView.h"
22 #include "llvm/MC/MCDwarf.h"
23 #include "llvm/MC/MCInstrAnalysis.h"
24 #include "llvm/MC/MCInstrInfo.h"
25 #include "llvm/MC/MCRegisterInfo.h"
26 #include "llvm/MC/MCStreamer.h"
27 #include "llvm/MC/MCSubtargetInfo.h"
28 #include "llvm/MC/MachineLocation.h"
29 #include "llvm/Support/ErrorHandling.h"
30 #include "llvm/Support/Host.h"
31 #include "llvm/Support/TargetRegistry.h"
39 #define GET_REGINFO_MC_DESC
40 #include "X86GenRegisterInfo.inc"
42 #define GET_INSTRINFO_MC_DESC
43 #define GET_INSTRINFO_MC_HELPERS
44 #include "X86GenInstrInfo.inc"
46 #define GET_SUBTARGETINFO_MC_DESC
47 #include "X86GenSubtargetInfo.inc"
49 std::string
X86_MC::ParseX86Triple(const Triple
&TT
) {
51 if (TT
.getArch() == Triple::x86_64
)
52 FS
= "+64bit-mode,-32bit-mode,-16bit-mode";
53 else if (TT
.getEnvironment() != Triple::CODE16
)
54 FS
= "-64bit-mode,+32bit-mode,-16bit-mode";
56 FS
= "-64bit-mode,-32bit-mode,+16bit-mode";
61 unsigned X86_MC::getDwarfRegFlavour(const Triple
&TT
, bool isEH
) {
62 if (TT
.getArch() == Triple::x86_64
)
63 return DWARFFlavour::X86_64
;
66 return isEH
? DWARFFlavour::X86_32_DarwinEH
: DWARFFlavour::X86_32_Generic
;
68 // Unsupported by now, just quick fallback
69 return DWARFFlavour::X86_32_Generic
;
70 return DWARFFlavour::X86_32_Generic
;
73 bool X86_MC::hasLockPrefix(const MCInst
&MI
) {
74 return MI
.getFlags() & X86::IP_HAS_LOCK
;
77 void X86_MC::initLLVMToSEHAndCVRegMapping(MCRegisterInfo
*MRI
) {
78 // FIXME: TableGen these.
79 for (unsigned Reg
= X86::NoRegister
+ 1; Reg
< X86::NUM_TARGET_REGS
; ++Reg
) {
80 unsigned SEH
= MRI
->getEncodingValue(Reg
);
81 MRI
->mapLLVMRegToSEHReg(Reg
, SEH
);
84 // Mapping from CodeView to MC register id.
86 codeview::RegisterId CVReg
;
89 {codeview::RegisterId::AL
, X86::AL
},
90 {codeview::RegisterId::CL
, X86::CL
},
91 {codeview::RegisterId::DL
, X86::DL
},
92 {codeview::RegisterId::BL
, X86::BL
},
93 {codeview::RegisterId::AH
, X86::AH
},
94 {codeview::RegisterId::CH
, X86::CH
},
95 {codeview::RegisterId::DH
, X86::DH
},
96 {codeview::RegisterId::BH
, X86::BH
},
97 {codeview::RegisterId::AX
, X86::AX
},
98 {codeview::RegisterId::CX
, X86::CX
},
99 {codeview::RegisterId::DX
, X86::DX
},
100 {codeview::RegisterId::BX
, X86::BX
},
101 {codeview::RegisterId::SP
, X86::SP
},
102 {codeview::RegisterId::BP
, X86::BP
},
103 {codeview::RegisterId::SI
, X86::SI
},
104 {codeview::RegisterId::DI
, X86::DI
},
105 {codeview::RegisterId::EAX
, X86::EAX
},
106 {codeview::RegisterId::ECX
, X86::ECX
},
107 {codeview::RegisterId::EDX
, X86::EDX
},
108 {codeview::RegisterId::EBX
, X86::EBX
},
109 {codeview::RegisterId::ESP
, X86::ESP
},
110 {codeview::RegisterId::EBP
, X86::EBP
},
111 {codeview::RegisterId::ESI
, X86::ESI
},
112 {codeview::RegisterId::EDI
, X86::EDI
},
114 {codeview::RegisterId::EFLAGS
, X86::EFLAGS
},
116 {codeview::RegisterId::ST0
, X86::FP0
},
117 {codeview::RegisterId::ST1
, X86::FP1
},
118 {codeview::RegisterId::ST2
, X86::FP2
},
119 {codeview::RegisterId::ST3
, X86::FP3
},
120 {codeview::RegisterId::ST4
, X86::FP4
},
121 {codeview::RegisterId::ST5
, X86::FP5
},
122 {codeview::RegisterId::ST6
, X86::FP6
},
123 {codeview::RegisterId::ST7
, X86::FP7
},
125 {codeview::RegisterId::MM0
, X86::MM0
},
126 {codeview::RegisterId::MM1
, X86::MM1
},
127 {codeview::RegisterId::MM2
, X86::MM2
},
128 {codeview::RegisterId::MM3
, X86::MM3
},
129 {codeview::RegisterId::MM4
, X86::MM4
},
130 {codeview::RegisterId::MM5
, X86::MM5
},
131 {codeview::RegisterId::MM6
, X86::MM6
},
132 {codeview::RegisterId::MM7
, X86::MM7
},
134 {codeview::RegisterId::XMM0
, X86::XMM0
},
135 {codeview::RegisterId::XMM1
, X86::XMM1
},
136 {codeview::RegisterId::XMM2
, X86::XMM2
},
137 {codeview::RegisterId::XMM3
, X86::XMM3
},
138 {codeview::RegisterId::XMM4
, X86::XMM4
},
139 {codeview::RegisterId::XMM5
, X86::XMM5
},
140 {codeview::RegisterId::XMM6
, X86::XMM6
},
141 {codeview::RegisterId::XMM7
, X86::XMM7
},
143 {codeview::RegisterId::XMM8
, X86::XMM8
},
144 {codeview::RegisterId::XMM9
, X86::XMM9
},
145 {codeview::RegisterId::XMM10
, X86::XMM10
},
146 {codeview::RegisterId::XMM11
, X86::XMM11
},
147 {codeview::RegisterId::XMM12
, X86::XMM12
},
148 {codeview::RegisterId::XMM13
, X86::XMM13
},
149 {codeview::RegisterId::XMM14
, X86::XMM14
},
150 {codeview::RegisterId::XMM15
, X86::XMM15
},
152 {codeview::RegisterId::SIL
, X86::SIL
},
153 {codeview::RegisterId::DIL
, X86::DIL
},
154 {codeview::RegisterId::BPL
, X86::BPL
},
155 {codeview::RegisterId::SPL
, X86::SPL
},
156 {codeview::RegisterId::RAX
, X86::RAX
},
157 {codeview::RegisterId::RBX
, X86::RBX
},
158 {codeview::RegisterId::RCX
, X86::RCX
},
159 {codeview::RegisterId::RDX
, X86::RDX
},
160 {codeview::RegisterId::RSI
, X86::RSI
},
161 {codeview::RegisterId::RDI
, X86::RDI
},
162 {codeview::RegisterId::RBP
, X86::RBP
},
163 {codeview::RegisterId::RSP
, X86::RSP
},
164 {codeview::RegisterId::R8
, X86::R8
},
165 {codeview::RegisterId::R9
, X86::R9
},
166 {codeview::RegisterId::R10
, X86::R10
},
167 {codeview::RegisterId::R11
, X86::R11
},
168 {codeview::RegisterId::R12
, X86::R12
},
169 {codeview::RegisterId::R13
, X86::R13
},
170 {codeview::RegisterId::R14
, X86::R14
},
171 {codeview::RegisterId::R15
, X86::R15
},
172 {codeview::RegisterId::R8B
, X86::R8B
},
173 {codeview::RegisterId::R9B
, X86::R9B
},
174 {codeview::RegisterId::R10B
, X86::R10B
},
175 {codeview::RegisterId::R11B
, X86::R11B
},
176 {codeview::RegisterId::R12B
, X86::R12B
},
177 {codeview::RegisterId::R13B
, X86::R13B
},
178 {codeview::RegisterId::R14B
, X86::R14B
},
179 {codeview::RegisterId::R15B
, X86::R15B
},
180 {codeview::RegisterId::R8W
, X86::R8W
},
181 {codeview::RegisterId::R9W
, X86::R9W
},
182 {codeview::RegisterId::R10W
, X86::R10W
},
183 {codeview::RegisterId::R11W
, X86::R11W
},
184 {codeview::RegisterId::R12W
, X86::R12W
},
185 {codeview::RegisterId::R13W
, X86::R13W
},
186 {codeview::RegisterId::R14W
, X86::R14W
},
187 {codeview::RegisterId::R15W
, X86::R15W
},
188 {codeview::RegisterId::R8D
, X86::R8D
},
189 {codeview::RegisterId::R9D
, X86::R9D
},
190 {codeview::RegisterId::R10D
, X86::R10D
},
191 {codeview::RegisterId::R11D
, X86::R11D
},
192 {codeview::RegisterId::R12D
, X86::R12D
},
193 {codeview::RegisterId::R13D
, X86::R13D
},
194 {codeview::RegisterId::R14D
, X86::R14D
},
195 {codeview::RegisterId::R15D
, X86::R15D
},
196 {codeview::RegisterId::AMD64_YMM0
, X86::YMM0
},
197 {codeview::RegisterId::AMD64_YMM1
, X86::YMM1
},
198 {codeview::RegisterId::AMD64_YMM2
, X86::YMM2
},
199 {codeview::RegisterId::AMD64_YMM3
, X86::YMM3
},
200 {codeview::RegisterId::AMD64_YMM4
, X86::YMM4
},
201 {codeview::RegisterId::AMD64_YMM5
, X86::YMM5
},
202 {codeview::RegisterId::AMD64_YMM6
, X86::YMM6
},
203 {codeview::RegisterId::AMD64_YMM7
, X86::YMM7
},
204 {codeview::RegisterId::AMD64_YMM8
, X86::YMM8
},
205 {codeview::RegisterId::AMD64_YMM9
, X86::YMM9
},
206 {codeview::RegisterId::AMD64_YMM10
, X86::YMM10
},
207 {codeview::RegisterId::AMD64_YMM11
, X86::YMM11
},
208 {codeview::RegisterId::AMD64_YMM12
, X86::YMM12
},
209 {codeview::RegisterId::AMD64_YMM13
, X86::YMM13
},
210 {codeview::RegisterId::AMD64_YMM14
, X86::YMM14
},
211 {codeview::RegisterId::AMD64_YMM15
, X86::YMM15
},
212 {codeview::RegisterId::AMD64_YMM16
, X86::YMM16
},
213 {codeview::RegisterId::AMD64_YMM17
, X86::YMM17
},
214 {codeview::RegisterId::AMD64_YMM18
, X86::YMM18
},
215 {codeview::RegisterId::AMD64_YMM19
, X86::YMM19
},
216 {codeview::RegisterId::AMD64_YMM20
, X86::YMM20
},
217 {codeview::RegisterId::AMD64_YMM21
, X86::YMM21
},
218 {codeview::RegisterId::AMD64_YMM22
, X86::YMM22
},
219 {codeview::RegisterId::AMD64_YMM23
, X86::YMM23
},
220 {codeview::RegisterId::AMD64_YMM24
, X86::YMM24
},
221 {codeview::RegisterId::AMD64_YMM25
, X86::YMM25
},
222 {codeview::RegisterId::AMD64_YMM26
, X86::YMM26
},
223 {codeview::RegisterId::AMD64_YMM27
, X86::YMM27
},
224 {codeview::RegisterId::AMD64_YMM28
, X86::YMM28
},
225 {codeview::RegisterId::AMD64_YMM29
, X86::YMM29
},
226 {codeview::RegisterId::AMD64_YMM30
, X86::YMM30
},
227 {codeview::RegisterId::AMD64_YMM31
, X86::YMM31
},
228 {codeview::RegisterId::AMD64_ZMM0
, X86::ZMM0
},
229 {codeview::RegisterId::AMD64_ZMM1
, X86::ZMM1
},
230 {codeview::RegisterId::AMD64_ZMM2
, X86::ZMM2
},
231 {codeview::RegisterId::AMD64_ZMM3
, X86::ZMM3
},
232 {codeview::RegisterId::AMD64_ZMM4
, X86::ZMM4
},
233 {codeview::RegisterId::AMD64_ZMM5
, X86::ZMM5
},
234 {codeview::RegisterId::AMD64_ZMM6
, X86::ZMM6
},
235 {codeview::RegisterId::AMD64_ZMM7
, X86::ZMM7
},
236 {codeview::RegisterId::AMD64_ZMM8
, X86::ZMM8
},
237 {codeview::RegisterId::AMD64_ZMM9
, X86::ZMM9
},
238 {codeview::RegisterId::AMD64_ZMM10
, X86::ZMM10
},
239 {codeview::RegisterId::AMD64_ZMM11
, X86::ZMM11
},
240 {codeview::RegisterId::AMD64_ZMM12
, X86::ZMM12
},
241 {codeview::RegisterId::AMD64_ZMM13
, X86::ZMM13
},
242 {codeview::RegisterId::AMD64_ZMM14
, X86::ZMM14
},
243 {codeview::RegisterId::AMD64_ZMM15
, X86::ZMM15
},
244 {codeview::RegisterId::AMD64_ZMM16
, X86::ZMM16
},
245 {codeview::RegisterId::AMD64_ZMM17
, X86::ZMM17
},
246 {codeview::RegisterId::AMD64_ZMM18
, X86::ZMM18
},
247 {codeview::RegisterId::AMD64_ZMM19
, X86::ZMM19
},
248 {codeview::RegisterId::AMD64_ZMM20
, X86::ZMM20
},
249 {codeview::RegisterId::AMD64_ZMM21
, X86::ZMM21
},
250 {codeview::RegisterId::AMD64_ZMM22
, X86::ZMM22
},
251 {codeview::RegisterId::AMD64_ZMM23
, X86::ZMM23
},
252 {codeview::RegisterId::AMD64_ZMM24
, X86::ZMM24
},
253 {codeview::RegisterId::AMD64_ZMM25
, X86::ZMM25
},
254 {codeview::RegisterId::AMD64_ZMM26
, X86::ZMM26
},
255 {codeview::RegisterId::AMD64_ZMM27
, X86::ZMM27
},
256 {codeview::RegisterId::AMD64_ZMM28
, X86::ZMM28
},
257 {codeview::RegisterId::AMD64_ZMM29
, X86::ZMM29
},
258 {codeview::RegisterId::AMD64_ZMM30
, X86::ZMM30
},
259 {codeview::RegisterId::AMD64_ZMM31
, X86::ZMM31
},
260 {codeview::RegisterId::AMD64_K0
, X86::K0
},
261 {codeview::RegisterId::AMD64_K1
, X86::K1
},
262 {codeview::RegisterId::AMD64_K2
, X86::K2
},
263 {codeview::RegisterId::AMD64_K3
, X86::K3
},
264 {codeview::RegisterId::AMD64_K4
, X86::K4
},
265 {codeview::RegisterId::AMD64_K5
, X86::K5
},
266 {codeview::RegisterId::AMD64_K6
, X86::K6
},
267 {codeview::RegisterId::AMD64_K7
, X86::K7
},
268 {codeview::RegisterId::AMD64_XMM16
, X86::XMM16
},
269 {codeview::RegisterId::AMD64_XMM17
, X86::XMM17
},
270 {codeview::RegisterId::AMD64_XMM18
, X86::XMM18
},
271 {codeview::RegisterId::AMD64_XMM19
, X86::XMM19
},
272 {codeview::RegisterId::AMD64_XMM20
, X86::XMM20
},
273 {codeview::RegisterId::AMD64_XMM21
, X86::XMM21
},
274 {codeview::RegisterId::AMD64_XMM22
, X86::XMM22
},
275 {codeview::RegisterId::AMD64_XMM23
, X86::XMM23
},
276 {codeview::RegisterId::AMD64_XMM24
, X86::XMM24
},
277 {codeview::RegisterId::AMD64_XMM25
, X86::XMM25
},
278 {codeview::RegisterId::AMD64_XMM26
, X86::XMM26
},
279 {codeview::RegisterId::AMD64_XMM27
, X86::XMM27
},
280 {codeview::RegisterId::AMD64_XMM28
, X86::XMM28
},
281 {codeview::RegisterId::AMD64_XMM29
, X86::XMM29
},
282 {codeview::RegisterId::AMD64_XMM30
, X86::XMM30
},
283 {codeview::RegisterId::AMD64_XMM31
, X86::XMM31
},
286 for (unsigned I
= 0; I
< array_lengthof(RegMap
); ++I
)
287 MRI
->mapLLVMRegToCVReg(RegMap
[I
].Reg
, static_cast<int>(RegMap
[I
].CVReg
));
290 MCSubtargetInfo
*X86_MC::createX86MCSubtargetInfo(const Triple
&TT
,
291 StringRef CPU
, StringRef FS
) {
292 std::string ArchFS
= X86_MC::ParseX86Triple(TT
);
295 ArchFS
= (Twine(ArchFS
) + "," + FS
).str();
300 std::string CPUName
= CPU
;
304 return createX86MCSubtargetInfoImpl(TT
, CPUName
, ArchFS
);
307 static MCInstrInfo
*createX86MCInstrInfo() {
308 MCInstrInfo
*X
= new MCInstrInfo();
309 InitX86MCInstrInfo(X
);
313 static MCRegisterInfo
*createX86MCRegisterInfo(const Triple
&TT
) {
314 unsigned RA
= (TT
.getArch() == Triple::x86_64
)
315 ? X86::RIP
// Should have dwarf #16.
316 : X86::EIP
; // Should have dwarf #8.
318 MCRegisterInfo
*X
= new MCRegisterInfo();
319 InitX86MCRegisterInfo(X
, RA
, X86_MC::getDwarfRegFlavour(TT
, false),
320 X86_MC::getDwarfRegFlavour(TT
, true), RA
);
321 X86_MC::initLLVMToSEHAndCVRegMapping(X
);
325 static MCAsmInfo
*createX86MCAsmInfo(const MCRegisterInfo
&MRI
,
326 const Triple
&TheTriple
) {
327 bool is64Bit
= TheTriple
.getArch() == Triple::x86_64
;
330 if (TheTriple
.isOSBinFormatMachO()) {
332 MAI
= new X86_64MCAsmInfoDarwin(TheTriple
);
334 MAI
= new X86MCAsmInfoDarwin(TheTriple
);
335 } else if (TheTriple
.isOSBinFormatELF()) {
336 // Force the use of an ELF container.
337 MAI
= new X86ELFMCAsmInfo(TheTriple
);
338 } else if (TheTriple
.isWindowsMSVCEnvironment() ||
339 TheTriple
.isWindowsCoreCLREnvironment()) {
340 MAI
= new X86MCAsmInfoMicrosoft(TheTriple
);
341 } else if (TheTriple
.isOSCygMing() ||
342 TheTriple
.isWindowsItaniumEnvironment()) {
343 MAI
= new X86MCAsmInfoGNUCOFF(TheTriple
);
345 // The default is ELF.
346 MAI
= new X86ELFMCAsmInfo(TheTriple
);
349 // Initialize initial frame state.
350 // Calculate amount of bytes used for return address storing
351 int stackGrowth
= is64Bit
? -8 : -4;
353 // Initial state of the frame pointer is esp+stackGrowth.
354 unsigned StackPtr
= is64Bit
? X86::RSP
: X86::ESP
;
355 MCCFIInstruction Inst
= MCCFIInstruction::createDefCfa(
356 nullptr, MRI
.getDwarfRegNum(StackPtr
, true), -stackGrowth
);
357 MAI
->addInitialFrameState(Inst
);
359 // Add return address to move list
360 unsigned InstPtr
= is64Bit
? X86::RIP
: X86::EIP
;
361 MCCFIInstruction Inst2
= MCCFIInstruction::createOffset(
362 nullptr, MRI
.getDwarfRegNum(InstPtr
, true), stackGrowth
);
363 MAI
->addInitialFrameState(Inst2
);
368 static MCInstPrinter
*createX86MCInstPrinter(const Triple
&T
,
369 unsigned SyntaxVariant
,
370 const MCAsmInfo
&MAI
,
371 const MCInstrInfo
&MII
,
372 const MCRegisterInfo
&MRI
) {
373 if (SyntaxVariant
== 0)
374 return new X86ATTInstPrinter(MAI
, MII
, MRI
);
375 if (SyntaxVariant
== 1)
376 return new X86IntelInstPrinter(MAI
, MII
, MRI
);
380 static MCRelocationInfo
*createX86MCRelocationInfo(const Triple
&TheTriple
,
382 // Default to the stock relocation info.
383 return llvm::createMCRelocationInfo(TheTriple
, Ctx
);
389 class X86MCInstrAnalysis
: public MCInstrAnalysis
{
390 X86MCInstrAnalysis(const X86MCInstrAnalysis
&) = delete;
391 X86MCInstrAnalysis
&operator=(const X86MCInstrAnalysis
&) = delete;
392 virtual ~X86MCInstrAnalysis() = default;
395 X86MCInstrAnalysis(const MCInstrInfo
*MCII
) : MCInstrAnalysis(MCII
) {}
397 #define GET_STIPREDICATE_DECLS_FOR_MC_ANALYSIS
398 #include "X86GenSubtargetInfo.inc"
400 bool clearsSuperRegisters(const MCRegisterInfo
&MRI
, const MCInst
&Inst
,
401 APInt
&Mask
) const override
;
402 std::vector
<std::pair
<uint64_t, uint64_t>>
403 findPltEntries(uint64_t PltSectionVA
, ArrayRef
<uint8_t> PltContents
,
404 uint64_t GotSectionVA
,
405 const Triple
&TargetTriple
) const override
;
406 Optional
<uint64_t> evaluateMemoryOperandAddress(const MCInst
&Inst
,
408 uint64_t Size
) const override
;
411 #define GET_STIPREDICATE_DEFS_FOR_MC_ANALYSIS
412 #include "X86GenSubtargetInfo.inc"
414 bool X86MCInstrAnalysis::clearsSuperRegisters(const MCRegisterInfo
&MRI
,
417 const MCInstrDesc
&Desc
= Info
->get(Inst
.getOpcode());
418 unsigned NumDefs
= Desc
.getNumDefs();
419 unsigned NumImplicitDefs
= Desc
.getNumImplicitDefs();
420 assert(Mask
.getBitWidth() == NumDefs
+ NumImplicitDefs
&&
421 "Unexpected number of bits in the mask!");
423 bool HasVEX
= (Desc
.TSFlags
& X86II::EncodingMask
) == X86II::VEX
;
424 bool HasEVEX
= (Desc
.TSFlags
& X86II::EncodingMask
) == X86II::EVEX
;
425 bool HasXOP
= (Desc
.TSFlags
& X86II::EncodingMask
) == X86II::XOP
;
427 const MCRegisterClass
&GR32RC
= MRI
.getRegClass(X86::GR32RegClassID
);
428 const MCRegisterClass
&VR128XRC
= MRI
.getRegClass(X86::VR128XRegClassID
);
429 const MCRegisterClass
&VR256XRC
= MRI
.getRegClass(X86::VR256XRegClassID
);
431 auto ClearsSuperReg
= [=](unsigned RegID
) {
432 // On X86-64, a general purpose integer register is viewed as a 64-bit
433 // register internal to the processor.
434 // An update to the lower 32 bits of a 64 bit integer register is
435 // architecturally defined to zero extend the upper 32 bits.
436 if (GR32RC
.contains(RegID
))
439 // Early exit if this instruction has no vex/evex/xop prefix.
440 if (!HasEVEX
&& !HasVEX
&& !HasXOP
)
443 // All VEX and EVEX encoded instructions are defined to zero the high bits
444 // of the destination register up to VLMAX (i.e. the maximum vector register
445 // width pertaining to the instruction).
446 // We assume the same behavior for XOP instructions too.
447 return VR128XRC
.contains(RegID
) || VR256XRC
.contains(RegID
);
451 for (unsigned I
= 0, E
= NumDefs
; I
< E
; ++I
) {
452 const MCOperand
&Op
= Inst
.getOperand(I
);
453 if (ClearsSuperReg(Op
.getReg()))
457 for (unsigned I
= 0, E
= NumImplicitDefs
; I
< E
; ++I
) {
458 const MCPhysReg Reg
= Desc
.getImplicitDefs()[I
];
459 if (ClearsSuperReg(Reg
))
460 Mask
.setBit(NumDefs
+ I
);
463 return Mask
.getBoolValue();
466 static std::vector
<std::pair
<uint64_t, uint64_t>>
467 findX86PltEntries(uint64_t PltSectionVA
, ArrayRef
<uint8_t> PltContents
,
468 uint64_t GotPltSectionVA
) {
469 // Do a lightweight parsing of PLT entries.
470 std::vector
<std::pair
<uint64_t, uint64_t>> Result
;
471 for (uint64_t Byte
= 0, End
= PltContents
.size(); Byte
+ 6 < End
; ) {
473 if (PltContents
[Byte
] == 0xff && PltContents
[Byte
+ 1] == 0xa3) {
474 // The jmp instruction at the beginning of each PLT entry jumps to the
475 // address of the base of the .got.plt section plus the immediate.
476 uint32_t Imm
= support::endian::read32le(PltContents
.data() + Byte
+ 2);
478 std::make_pair(PltSectionVA
+ Byte
, GotPltSectionVA
+ Imm
));
480 } else if (PltContents
[Byte
] == 0xff && PltContents
[Byte
+ 1] == 0x25) {
481 // The jmp instruction at the beginning of each PLT entry jumps to the
483 uint32_t Imm
= support::endian::read32le(PltContents
.data() + Byte
+ 2);
484 Result
.push_back(std::make_pair(PltSectionVA
+ Byte
, Imm
));
492 static std::vector
<std::pair
<uint64_t, uint64_t>>
493 findX86_64PltEntries(uint64_t PltSectionVA
, ArrayRef
<uint8_t> PltContents
) {
494 // Do a lightweight parsing of PLT entries.
495 std::vector
<std::pair
<uint64_t, uint64_t>> Result
;
496 for (uint64_t Byte
= 0, End
= PltContents
.size(); Byte
+ 6 < End
; ) {
498 if (PltContents
[Byte
] == 0xff && PltContents
[Byte
+ 1] == 0x25) {
499 // The jmp instruction at the beginning of each PLT entry jumps to the
500 // address of the next instruction plus the immediate.
501 uint32_t Imm
= support::endian::read32le(PltContents
.data() + Byte
+ 2);
503 std::make_pair(PltSectionVA
+ Byte
, PltSectionVA
+ Byte
+ 6 + Imm
));
511 std::vector
<std::pair
<uint64_t, uint64_t>> X86MCInstrAnalysis::findPltEntries(
512 uint64_t PltSectionVA
, ArrayRef
<uint8_t> PltContents
,
513 uint64_t GotPltSectionVA
, const Triple
&TargetTriple
) const {
514 switch (TargetTriple
.getArch()) {
516 return findX86PltEntries(PltSectionVA
, PltContents
, GotPltSectionVA
);
518 return findX86_64PltEntries(PltSectionVA
, PltContents
);
524 Optional
<uint64_t> X86MCInstrAnalysis::evaluateMemoryOperandAddress(
525 const MCInst
&Inst
, uint64_t Addr
, uint64_t Size
) const {
526 const MCInstrDesc
&MCID
= Info
->get(Inst
.getOpcode());
527 int MemOpStart
= X86II::getMemoryOperandNo(MCID
.TSFlags
);
528 if (MemOpStart
== -1)
530 MemOpStart
+= X86II::getOperandBias(MCID
);
532 const MCOperand
&SegReg
= Inst
.getOperand(MemOpStart
+ X86::AddrSegmentReg
);
533 const MCOperand
&BaseReg
= Inst
.getOperand(MemOpStart
+ X86::AddrBaseReg
);
534 const MCOperand
&IndexReg
= Inst
.getOperand(MemOpStart
+ X86::AddrIndexReg
);
535 const MCOperand
&ScaleAmt
= Inst
.getOperand(MemOpStart
+ X86::AddrScaleAmt
);
536 const MCOperand
&Disp
= Inst
.getOperand(MemOpStart
+ X86::AddrDisp
);
537 if (SegReg
.getReg() != 0 || IndexReg
.getReg() != 0 || ScaleAmt
.getImm() != 1 ||
541 // RIP-relative addressing.
542 if (BaseReg
.getReg() == X86::RIP
)
543 return Addr
+ Size
+ Disp
.getImm();
548 } // end of namespace X86_MC
550 } // end of namespace llvm
552 static MCInstrAnalysis
*createX86MCInstrAnalysis(const MCInstrInfo
*Info
) {
553 return new X86_MC::X86MCInstrAnalysis(Info
);
556 // Force static initialization.
557 extern "C" void LLVMInitializeX86TargetMC() {
558 for (Target
*T
: {&getTheX86_32Target(), &getTheX86_64Target()}) {
559 // Register the MC asm info.
560 RegisterMCAsmInfoFn
X(*T
, createX86MCAsmInfo
);
562 // Register the MC instruction info.
563 TargetRegistry::RegisterMCInstrInfo(*T
, createX86MCInstrInfo
);
565 // Register the MC register info.
566 TargetRegistry::RegisterMCRegInfo(*T
, createX86MCRegisterInfo
);
568 // Register the MC subtarget info.
569 TargetRegistry::RegisterMCSubtargetInfo(*T
,
570 X86_MC::createX86MCSubtargetInfo
);
572 // Register the MC instruction analyzer.
573 TargetRegistry::RegisterMCInstrAnalysis(*T
, createX86MCInstrAnalysis
);
575 // Register the code emitter.
576 TargetRegistry::RegisterMCCodeEmitter(*T
, createX86MCCodeEmitter
);
578 // Register the obj target streamer.
579 TargetRegistry::RegisterObjectTargetStreamer(*T
,
580 createX86ObjectTargetStreamer
);
582 // Register the asm target streamer.
583 TargetRegistry::RegisterAsmTargetStreamer(*T
, createX86AsmTargetStreamer
);
585 TargetRegistry::RegisterCOFFStreamer(*T
, createX86WinCOFFStreamer
);
587 // Register the MCInstPrinter.
588 TargetRegistry::RegisterMCInstPrinter(*T
, createX86MCInstPrinter
);
590 // Register the MC relocation info.
591 TargetRegistry::RegisterMCRelocationInfo(*T
, createX86MCRelocationInfo
);
594 // Register the asm backend.
595 TargetRegistry::RegisterMCAsmBackend(getTheX86_32Target(),
596 createX86_32AsmBackend
);
597 TargetRegistry::RegisterMCAsmBackend(getTheX86_64Target(),
598 createX86_64AsmBackend
);
601 MCRegister
llvm::getX86SubSuperRegisterOrZero(MCRegister Reg
, unsigned Size
,
604 default: return X86::NoRegister
;
608 default: return getX86SubSuperRegisterOrZero(Reg
, 64);
609 case X86::SIL
: case X86::SI
: case X86::ESI
: case X86::RSI
:
611 case X86::DIL
: case X86::DI
: case X86::EDI
: case X86::RDI
:
613 case X86::BPL
: case X86::BP
: case X86::EBP
: case X86::RBP
:
615 case X86::SPL
: case X86::SP
: case X86::ESP
: case X86::RSP
:
617 case X86::AH
: case X86::AL
: case X86::AX
: case X86::EAX
: case X86::RAX
:
619 case X86::DH
: case X86::DL
: case X86::DX
: case X86::EDX
: case X86::RDX
:
621 case X86::CH
: case X86::CL
: case X86::CX
: case X86::ECX
: case X86::RCX
:
623 case X86::BH
: case X86::BL
: case X86::BX
: case X86::EBX
: case X86::RBX
:
628 default: return X86::NoRegister
;
629 case X86::AH
: case X86::AL
: case X86::AX
: case X86::EAX
: case X86::RAX
:
631 case X86::DH
: case X86::DL
: case X86::DX
: case X86::EDX
: case X86::RDX
:
633 case X86::CH
: case X86::CL
: case X86::CX
: case X86::ECX
: case X86::RCX
:
635 case X86::BH
: case X86::BL
: case X86::BX
: case X86::EBX
: case X86::RBX
:
637 case X86::SIL
: case X86::SI
: case X86::ESI
: case X86::RSI
:
639 case X86::DIL
: case X86::DI
: case X86::EDI
: case X86::RDI
:
641 case X86::BPL
: case X86::BP
: case X86::EBP
: case X86::RBP
:
643 case X86::SPL
: case X86::SP
: case X86::ESP
: case X86::RSP
:
645 case X86::R8B
: case X86::R8W
: case X86::R8D
: case X86::R8
:
647 case X86::R9B
: case X86::R9W
: case X86::R9D
: case X86::R9
:
649 case X86::R10B
: case X86::R10W
: case X86::R10D
: case X86::R10
:
651 case X86::R11B
: case X86::R11W
: case X86::R11D
: case X86::R11
:
653 case X86::R12B
: case X86::R12W
: case X86::R12D
: case X86::R12
:
655 case X86::R13B
: case X86::R13W
: case X86::R13D
: case X86::R13
:
657 case X86::R14B
: case X86::R14W
: case X86::R14D
: case X86::R14
:
659 case X86::R15B
: case X86::R15W
: case X86::R15D
: case X86::R15
:
665 default: return X86::NoRegister
;
666 case X86::AH
: case X86::AL
: case X86::AX
: case X86::EAX
: case X86::RAX
:
668 case X86::DH
: case X86::DL
: case X86::DX
: case X86::EDX
: case X86::RDX
:
670 case X86::CH
: case X86::CL
: case X86::CX
: case X86::ECX
: case X86::RCX
:
672 case X86::BH
: case X86::BL
: case X86::BX
: case X86::EBX
: case X86::RBX
:
674 case X86::SIL
: case X86::SI
: case X86::ESI
: case X86::RSI
:
676 case X86::DIL
: case X86::DI
: case X86::EDI
: case X86::RDI
:
678 case X86::BPL
: case X86::BP
: case X86::EBP
: case X86::RBP
:
680 case X86::SPL
: case X86::SP
: case X86::ESP
: case X86::RSP
:
682 case X86::R8B
: case X86::R8W
: case X86::R8D
: case X86::R8
:
684 case X86::R9B
: case X86::R9W
: case X86::R9D
: case X86::R9
:
686 case X86::R10B
: case X86::R10W
: case X86::R10D
: case X86::R10
:
688 case X86::R11B
: case X86::R11W
: case X86::R11D
: case X86::R11
:
690 case X86::R12B
: case X86::R12W
: case X86::R12D
: case X86::R12
:
692 case X86::R13B
: case X86::R13W
: case X86::R13D
: case X86::R13
:
694 case X86::R14B
: case X86::R14W
: case X86::R14D
: case X86::R14
:
696 case X86::R15B
: case X86::R15W
: case X86::R15D
: case X86::R15
:
701 default: return X86::NoRegister
;
702 case X86::AH
: case X86::AL
: case X86::AX
: case X86::EAX
: case X86::RAX
:
704 case X86::DH
: case X86::DL
: case X86::DX
: case X86::EDX
: case X86::RDX
:
706 case X86::CH
: case X86::CL
: case X86::CX
: case X86::ECX
: case X86::RCX
:
708 case X86::BH
: case X86::BL
: case X86::BX
: case X86::EBX
: case X86::RBX
:
710 case X86::SIL
: case X86::SI
: case X86::ESI
: case X86::RSI
:
712 case X86::DIL
: case X86::DI
: case X86::EDI
: case X86::RDI
:
714 case X86::BPL
: case X86::BP
: case X86::EBP
: case X86::RBP
:
716 case X86::SPL
: case X86::SP
: case X86::ESP
: case X86::RSP
:
718 case X86::R8B
: case X86::R8W
: case X86::R8D
: case X86::R8
:
720 case X86::R9B
: case X86::R9W
: case X86::R9D
: case X86::R9
:
722 case X86::R10B
: case X86::R10W
: case X86::R10D
: case X86::R10
:
724 case X86::R11B
: case X86::R11W
: case X86::R11D
: case X86::R11
:
726 case X86::R12B
: case X86::R12W
: case X86::R12D
: case X86::R12
:
728 case X86::R13B
: case X86::R13W
: case X86::R13D
: case X86::R13
:
730 case X86::R14B
: case X86::R14W
: case X86::R14D
: case X86::R14
:
732 case X86::R15B
: case X86::R15W
: case X86::R15D
: case X86::R15
:
738 case X86::AH
: case X86::AL
: case X86::AX
: case X86::EAX
: case X86::RAX
:
740 case X86::DH
: case X86::DL
: case X86::DX
: case X86::EDX
: case X86::RDX
:
742 case X86::CH
: case X86::CL
: case X86::CX
: case X86::ECX
: case X86::RCX
:
744 case X86::BH
: case X86::BL
: case X86::BX
: case X86::EBX
: case X86::RBX
:
746 case X86::SIL
: case X86::SI
: case X86::ESI
: case X86::RSI
:
748 case X86::DIL
: case X86::DI
: case X86::EDI
: case X86::RDI
:
750 case X86::BPL
: case X86::BP
: case X86::EBP
: case X86::RBP
:
752 case X86::SPL
: case X86::SP
: case X86::ESP
: case X86::RSP
:
754 case X86::R8B
: case X86::R8W
: case X86::R8D
: case X86::R8
:
756 case X86::R9B
: case X86::R9W
: case X86::R9D
: case X86::R9
:
758 case X86::R10B
: case X86::R10W
: case X86::R10D
: case X86::R10
:
760 case X86::R11B
: case X86::R11W
: case X86::R11D
: case X86::R11
:
762 case X86::R12B
: case X86::R12W
: case X86::R12D
: case X86::R12
:
764 case X86::R13B
: case X86::R13W
: case X86::R13D
: case X86::R13
:
766 case X86::R14B
: case X86::R14W
: case X86::R14D
: case X86::R14
:
768 case X86::R15B
: case X86::R15W
: case X86::R15D
: case X86::R15
:
774 MCRegister
llvm::getX86SubSuperRegister(MCRegister Reg
, unsigned Size
, bool High
) {
775 MCRegister Res
= getX86SubSuperRegisterOrZero(Reg
, Size
, High
);
776 assert(Res
!= X86::NoRegister
&& "Unexpected register or VT");