1 //===-- X86MCTargetDesc.cpp - X86 Target Descriptions ---------------------===//
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7 //===----------------------------------------------------------------------===//
9 // This file provides X86 specific target descriptions.
11 //===----------------------------------------------------------------------===//
13 #include "X86MCTargetDesc.h"
14 #include "TargetInfo/X86TargetInfo.h"
15 #include "X86ATTInstPrinter.h"
16 #include "X86BaseInfo.h"
17 #include "X86IntelInstPrinter.h"
18 #include "X86MCAsmInfo.h"
19 #include "X86TargetStreamer.h"
20 #include "llvm/ADT/APInt.h"
21 #include "llvm/DebugInfo/CodeView/CodeView.h"
22 #include "llvm/MC/MCDwarf.h"
23 #include "llvm/MC/MCInstrAnalysis.h"
24 #include "llvm/MC/MCInstrInfo.h"
25 #include "llvm/MC/MCRegisterInfo.h"
26 #include "llvm/MC/MCStreamer.h"
27 #include "llvm/MC/MCSubtargetInfo.h"
28 #include "llvm/MC/TargetRegistry.h"
29 #include "llvm/Support/ErrorHandling.h"
30 #include "llvm/TargetParser/Host.h"
31 #include "llvm/TargetParser/Triple.h"
35 #define GET_REGINFO_MC_DESC
36 #include "X86GenRegisterInfo.inc"
38 #define GET_INSTRINFO_MC_DESC
39 #define GET_INSTRINFO_MC_HELPERS
40 #define ENABLE_INSTR_PREDICATE_VERIFIER
41 #include "X86GenInstrInfo.inc"
43 #define GET_SUBTARGETINFO_MC_DESC
44 #include "X86GenSubtargetInfo.inc"
46 std::string
X86_MC::ParseX86Triple(const Triple
&TT
) {
48 // SSE2 should default to enabled in 64-bit mode, but can be turned off
51 FS
= "+64bit-mode,-32bit-mode,-16bit-mode,+sse2";
52 else if (TT
.getEnvironment() != Triple::CODE16
)
53 FS
= "-64bit-mode,+32bit-mode,-16bit-mode";
55 FS
= "-64bit-mode,-32bit-mode,+16bit-mode";
60 unsigned X86_MC::getDwarfRegFlavour(const Triple
&TT
, bool isEH
) {
61 if (TT
.getArch() == Triple::x86_64
)
62 return DWARFFlavour::X86_64
;
65 return isEH
? DWARFFlavour::X86_32_DarwinEH
: DWARFFlavour::X86_32_Generic
;
67 // Unsupported by now, just quick fallback
68 return DWARFFlavour::X86_32_Generic
;
69 return DWARFFlavour::X86_32_Generic
;
72 bool X86_MC::hasLockPrefix(const MCInst
&MI
) {
73 return MI
.getFlags() & X86::IP_HAS_LOCK
;
76 static bool isMemOperand(const MCInst
&MI
, unsigned Op
, unsigned RegClassID
) {
77 const MCOperand
&Base
= MI
.getOperand(Op
+ X86::AddrBaseReg
);
78 const MCOperand
&Index
= MI
.getOperand(Op
+ X86::AddrIndexReg
);
79 const MCRegisterClass
&RC
= X86MCRegisterClasses
[RegClassID
];
81 return (Base
.isReg() && Base
.getReg() && RC
.contains(Base
.getReg())) ||
82 (Index
.isReg() && Index
.getReg() && RC
.contains(Index
.getReg()));
85 bool X86_MC::is16BitMemOperand(const MCInst
&MI
, unsigned Op
,
86 const MCSubtargetInfo
&STI
) {
87 const MCOperand
&Base
= MI
.getOperand(Op
+ X86::AddrBaseReg
);
88 const MCOperand
&Index
= MI
.getOperand(Op
+ X86::AddrIndexReg
);
90 if (STI
.hasFeature(X86::Is16Bit
) && Base
.isReg() && !Base
.getReg() &&
91 Index
.isReg() && !Index
.getReg())
93 return isMemOperand(MI
, Op
, X86::GR16RegClassID
);
96 bool X86_MC::is32BitMemOperand(const MCInst
&MI
, unsigned Op
) {
97 const MCOperand
&Base
= MI
.getOperand(Op
+ X86::AddrBaseReg
);
98 const MCOperand
&Index
= MI
.getOperand(Op
+ X86::AddrIndexReg
);
99 if (Base
.isReg() && Base
.getReg() == X86::EIP
) {
100 assert(Index
.isReg() && !Index
.getReg() && "Invalid eip-based address");
103 if (Index
.isReg() && Index
.getReg() == X86::EIZ
)
105 return isMemOperand(MI
, Op
, X86::GR32RegClassID
);
109 bool X86_MC::is64BitMemOperand(const MCInst
&MI
, unsigned Op
) {
110 return isMemOperand(MI
, Op
, X86::GR64RegClassID
);
114 bool X86_MC::needsAddressSizeOverride(const MCInst
&MI
,
115 const MCSubtargetInfo
&STI
,
116 int MemoryOperand
, uint64_t TSFlags
) {
117 uint64_t AdSize
= TSFlags
& X86II::AdSizeMask
;
118 bool Is16BitMode
= STI
.hasFeature(X86::Is16Bit
);
119 bool Is32BitMode
= STI
.hasFeature(X86::Is32Bit
);
120 bool Is64BitMode
= STI
.hasFeature(X86::Is64Bit
);
121 if ((Is16BitMode
&& AdSize
== X86II::AdSize32
) ||
122 (Is32BitMode
&& AdSize
== X86II::AdSize16
) ||
123 (Is64BitMode
&& AdSize
== X86II::AdSize32
))
125 uint64_t Form
= TSFlags
& X86II::FormMask
;
129 case X86II::RawFrmDstSrc
: {
130 MCRegister siReg
= MI
.getOperand(1).getReg();
131 assert(((siReg
== X86::SI
&& MI
.getOperand(0).getReg() == X86::DI
) ||
132 (siReg
== X86::ESI
&& MI
.getOperand(0).getReg() == X86::EDI
) ||
133 (siReg
== X86::RSI
&& MI
.getOperand(0).getReg() == X86::RDI
)) &&
134 "SI and DI register sizes do not match");
135 return (!Is32BitMode
&& siReg
== X86::ESI
) ||
136 (Is32BitMode
&& siReg
== X86::SI
);
138 case X86II::RawFrmSrc
: {
139 MCRegister siReg
= MI
.getOperand(0).getReg();
140 return (!Is32BitMode
&& siReg
== X86::ESI
) ||
141 (Is32BitMode
&& siReg
== X86::SI
);
143 case X86II::RawFrmDst
: {
144 MCRegister siReg
= MI
.getOperand(0).getReg();
145 return (!Is32BitMode
&& siReg
== X86::EDI
) ||
146 (Is32BitMode
&& siReg
== X86::DI
);
150 // Determine where the memory operand starts, if present.
151 if (MemoryOperand
< 0)
154 if (STI
.hasFeature(X86::Is64Bit
)) {
155 assert(!is16BitMemOperand(MI
, MemoryOperand
, STI
));
156 return is32BitMemOperand(MI
, MemoryOperand
);
158 if (STI
.hasFeature(X86::Is32Bit
)) {
159 assert(!is64BitMemOperand(MI
, MemoryOperand
));
160 return is16BitMemOperand(MI
, MemoryOperand
, STI
);
162 assert(STI
.hasFeature(X86::Is16Bit
));
163 assert(!is64BitMemOperand(MI
, MemoryOperand
));
164 return !is16BitMemOperand(MI
, MemoryOperand
, STI
);
167 void X86_MC::initLLVMToSEHAndCVRegMapping(MCRegisterInfo
*MRI
) {
168 // FIXME: TableGen these.
169 for (unsigned Reg
= X86::NoRegister
+ 1; Reg
< X86::NUM_TARGET_REGS
; ++Reg
) {
170 unsigned SEH
= MRI
->getEncodingValue(Reg
);
171 MRI
->mapLLVMRegToSEHReg(Reg
, SEH
);
174 // Mapping from CodeView to MC register id.
175 static const struct {
176 codeview::RegisterId CVReg
;
179 {codeview::RegisterId::AL
, X86::AL
},
180 {codeview::RegisterId::CL
, X86::CL
},
181 {codeview::RegisterId::DL
, X86::DL
},
182 {codeview::RegisterId::BL
, X86::BL
},
183 {codeview::RegisterId::AH
, X86::AH
},
184 {codeview::RegisterId::CH
, X86::CH
},
185 {codeview::RegisterId::DH
, X86::DH
},
186 {codeview::RegisterId::BH
, X86::BH
},
187 {codeview::RegisterId::AX
, X86::AX
},
188 {codeview::RegisterId::CX
, X86::CX
},
189 {codeview::RegisterId::DX
, X86::DX
},
190 {codeview::RegisterId::BX
, X86::BX
},
191 {codeview::RegisterId::SP
, X86::SP
},
192 {codeview::RegisterId::BP
, X86::BP
},
193 {codeview::RegisterId::SI
, X86::SI
},
194 {codeview::RegisterId::DI
, X86::DI
},
195 {codeview::RegisterId::EAX
, X86::EAX
},
196 {codeview::RegisterId::ECX
, X86::ECX
},
197 {codeview::RegisterId::EDX
, X86::EDX
},
198 {codeview::RegisterId::EBX
, X86::EBX
},
199 {codeview::RegisterId::ESP
, X86::ESP
},
200 {codeview::RegisterId::EBP
, X86::EBP
},
201 {codeview::RegisterId::ESI
, X86::ESI
},
202 {codeview::RegisterId::EDI
, X86::EDI
},
204 {codeview::RegisterId::EFLAGS
, X86::EFLAGS
},
206 {codeview::RegisterId::ST0
, X86::ST0
},
207 {codeview::RegisterId::ST1
, X86::ST1
},
208 {codeview::RegisterId::ST2
, X86::ST2
},
209 {codeview::RegisterId::ST3
, X86::ST3
},
210 {codeview::RegisterId::ST4
, X86::ST4
},
211 {codeview::RegisterId::ST5
, X86::ST5
},
212 {codeview::RegisterId::ST6
, X86::ST6
},
213 {codeview::RegisterId::ST7
, X86::ST7
},
215 {codeview::RegisterId::ST0
, X86::FP0
},
216 {codeview::RegisterId::ST1
, X86::FP1
},
217 {codeview::RegisterId::ST2
, X86::FP2
},
218 {codeview::RegisterId::ST3
, X86::FP3
},
219 {codeview::RegisterId::ST4
, X86::FP4
},
220 {codeview::RegisterId::ST5
, X86::FP5
},
221 {codeview::RegisterId::ST6
, X86::FP6
},
222 {codeview::RegisterId::ST7
, X86::FP7
},
224 {codeview::RegisterId::MM0
, X86::MM0
},
225 {codeview::RegisterId::MM1
, X86::MM1
},
226 {codeview::RegisterId::MM2
, X86::MM2
},
227 {codeview::RegisterId::MM3
, X86::MM3
},
228 {codeview::RegisterId::MM4
, X86::MM4
},
229 {codeview::RegisterId::MM5
, X86::MM5
},
230 {codeview::RegisterId::MM6
, X86::MM6
},
231 {codeview::RegisterId::MM7
, X86::MM7
},
233 {codeview::RegisterId::XMM0
, X86::XMM0
},
234 {codeview::RegisterId::XMM1
, X86::XMM1
},
235 {codeview::RegisterId::XMM2
, X86::XMM2
},
236 {codeview::RegisterId::XMM3
, X86::XMM3
},
237 {codeview::RegisterId::XMM4
, X86::XMM4
},
238 {codeview::RegisterId::XMM5
, X86::XMM5
},
239 {codeview::RegisterId::XMM6
, X86::XMM6
},
240 {codeview::RegisterId::XMM7
, X86::XMM7
},
242 {codeview::RegisterId::XMM8
, X86::XMM8
},
243 {codeview::RegisterId::XMM9
, X86::XMM9
},
244 {codeview::RegisterId::XMM10
, X86::XMM10
},
245 {codeview::RegisterId::XMM11
, X86::XMM11
},
246 {codeview::RegisterId::XMM12
, X86::XMM12
},
247 {codeview::RegisterId::XMM13
, X86::XMM13
},
248 {codeview::RegisterId::XMM14
, X86::XMM14
},
249 {codeview::RegisterId::XMM15
, X86::XMM15
},
251 {codeview::RegisterId::SIL
, X86::SIL
},
252 {codeview::RegisterId::DIL
, X86::DIL
},
253 {codeview::RegisterId::BPL
, X86::BPL
},
254 {codeview::RegisterId::SPL
, X86::SPL
},
255 {codeview::RegisterId::RAX
, X86::RAX
},
256 {codeview::RegisterId::RBX
, X86::RBX
},
257 {codeview::RegisterId::RCX
, X86::RCX
},
258 {codeview::RegisterId::RDX
, X86::RDX
},
259 {codeview::RegisterId::RSI
, X86::RSI
},
260 {codeview::RegisterId::RDI
, X86::RDI
},
261 {codeview::RegisterId::RBP
, X86::RBP
},
262 {codeview::RegisterId::RSP
, X86::RSP
},
263 {codeview::RegisterId::R8
, X86::R8
},
264 {codeview::RegisterId::R9
, X86::R9
},
265 {codeview::RegisterId::R10
, X86::R10
},
266 {codeview::RegisterId::R11
, X86::R11
},
267 {codeview::RegisterId::R12
, X86::R12
},
268 {codeview::RegisterId::R13
, X86::R13
},
269 {codeview::RegisterId::R14
, X86::R14
},
270 {codeview::RegisterId::R15
, X86::R15
},
271 {codeview::RegisterId::R8B
, X86::R8B
},
272 {codeview::RegisterId::R9B
, X86::R9B
},
273 {codeview::RegisterId::R10B
, X86::R10B
},
274 {codeview::RegisterId::R11B
, X86::R11B
},
275 {codeview::RegisterId::R12B
, X86::R12B
},
276 {codeview::RegisterId::R13B
, X86::R13B
},
277 {codeview::RegisterId::R14B
, X86::R14B
},
278 {codeview::RegisterId::R15B
, X86::R15B
},
279 {codeview::RegisterId::R8W
, X86::R8W
},
280 {codeview::RegisterId::R9W
, X86::R9W
},
281 {codeview::RegisterId::R10W
, X86::R10W
},
282 {codeview::RegisterId::R11W
, X86::R11W
},
283 {codeview::RegisterId::R12W
, X86::R12W
},
284 {codeview::RegisterId::R13W
, X86::R13W
},
285 {codeview::RegisterId::R14W
, X86::R14W
},
286 {codeview::RegisterId::R15W
, X86::R15W
},
287 {codeview::RegisterId::R8D
, X86::R8D
},
288 {codeview::RegisterId::R9D
, X86::R9D
},
289 {codeview::RegisterId::R10D
, X86::R10D
},
290 {codeview::RegisterId::R11D
, X86::R11D
},
291 {codeview::RegisterId::R12D
, X86::R12D
},
292 {codeview::RegisterId::R13D
, X86::R13D
},
293 {codeview::RegisterId::R14D
, X86::R14D
},
294 {codeview::RegisterId::R15D
, X86::R15D
},
295 {codeview::RegisterId::AMD64_YMM0
, X86::YMM0
},
296 {codeview::RegisterId::AMD64_YMM1
, X86::YMM1
},
297 {codeview::RegisterId::AMD64_YMM2
, X86::YMM2
},
298 {codeview::RegisterId::AMD64_YMM3
, X86::YMM3
},
299 {codeview::RegisterId::AMD64_YMM4
, X86::YMM4
},
300 {codeview::RegisterId::AMD64_YMM5
, X86::YMM5
},
301 {codeview::RegisterId::AMD64_YMM6
, X86::YMM6
},
302 {codeview::RegisterId::AMD64_YMM7
, X86::YMM7
},
303 {codeview::RegisterId::AMD64_YMM8
, X86::YMM8
},
304 {codeview::RegisterId::AMD64_YMM9
, X86::YMM9
},
305 {codeview::RegisterId::AMD64_YMM10
, X86::YMM10
},
306 {codeview::RegisterId::AMD64_YMM11
, X86::YMM11
},
307 {codeview::RegisterId::AMD64_YMM12
, X86::YMM12
},
308 {codeview::RegisterId::AMD64_YMM13
, X86::YMM13
},
309 {codeview::RegisterId::AMD64_YMM14
, X86::YMM14
},
310 {codeview::RegisterId::AMD64_YMM15
, X86::YMM15
},
311 {codeview::RegisterId::AMD64_YMM16
, X86::YMM16
},
312 {codeview::RegisterId::AMD64_YMM17
, X86::YMM17
},
313 {codeview::RegisterId::AMD64_YMM18
, X86::YMM18
},
314 {codeview::RegisterId::AMD64_YMM19
, X86::YMM19
},
315 {codeview::RegisterId::AMD64_YMM20
, X86::YMM20
},
316 {codeview::RegisterId::AMD64_YMM21
, X86::YMM21
},
317 {codeview::RegisterId::AMD64_YMM22
, X86::YMM22
},
318 {codeview::RegisterId::AMD64_YMM23
, X86::YMM23
},
319 {codeview::RegisterId::AMD64_YMM24
, X86::YMM24
},
320 {codeview::RegisterId::AMD64_YMM25
, X86::YMM25
},
321 {codeview::RegisterId::AMD64_YMM26
, X86::YMM26
},
322 {codeview::RegisterId::AMD64_YMM27
, X86::YMM27
},
323 {codeview::RegisterId::AMD64_YMM28
, X86::YMM28
},
324 {codeview::RegisterId::AMD64_YMM29
, X86::YMM29
},
325 {codeview::RegisterId::AMD64_YMM30
, X86::YMM30
},
326 {codeview::RegisterId::AMD64_YMM31
, X86::YMM31
},
327 {codeview::RegisterId::AMD64_ZMM0
, X86::ZMM0
},
328 {codeview::RegisterId::AMD64_ZMM1
, X86::ZMM1
},
329 {codeview::RegisterId::AMD64_ZMM2
, X86::ZMM2
},
330 {codeview::RegisterId::AMD64_ZMM3
, X86::ZMM3
},
331 {codeview::RegisterId::AMD64_ZMM4
, X86::ZMM4
},
332 {codeview::RegisterId::AMD64_ZMM5
, X86::ZMM5
},
333 {codeview::RegisterId::AMD64_ZMM6
, X86::ZMM6
},
334 {codeview::RegisterId::AMD64_ZMM7
, X86::ZMM7
},
335 {codeview::RegisterId::AMD64_ZMM8
, X86::ZMM8
},
336 {codeview::RegisterId::AMD64_ZMM9
, X86::ZMM9
},
337 {codeview::RegisterId::AMD64_ZMM10
, X86::ZMM10
},
338 {codeview::RegisterId::AMD64_ZMM11
, X86::ZMM11
},
339 {codeview::RegisterId::AMD64_ZMM12
, X86::ZMM12
},
340 {codeview::RegisterId::AMD64_ZMM13
, X86::ZMM13
},
341 {codeview::RegisterId::AMD64_ZMM14
, X86::ZMM14
},
342 {codeview::RegisterId::AMD64_ZMM15
, X86::ZMM15
},
343 {codeview::RegisterId::AMD64_ZMM16
, X86::ZMM16
},
344 {codeview::RegisterId::AMD64_ZMM17
, X86::ZMM17
},
345 {codeview::RegisterId::AMD64_ZMM18
, X86::ZMM18
},
346 {codeview::RegisterId::AMD64_ZMM19
, X86::ZMM19
},
347 {codeview::RegisterId::AMD64_ZMM20
, X86::ZMM20
},
348 {codeview::RegisterId::AMD64_ZMM21
, X86::ZMM21
},
349 {codeview::RegisterId::AMD64_ZMM22
, X86::ZMM22
},
350 {codeview::RegisterId::AMD64_ZMM23
, X86::ZMM23
},
351 {codeview::RegisterId::AMD64_ZMM24
, X86::ZMM24
},
352 {codeview::RegisterId::AMD64_ZMM25
, X86::ZMM25
},
353 {codeview::RegisterId::AMD64_ZMM26
, X86::ZMM26
},
354 {codeview::RegisterId::AMD64_ZMM27
, X86::ZMM27
},
355 {codeview::RegisterId::AMD64_ZMM28
, X86::ZMM28
},
356 {codeview::RegisterId::AMD64_ZMM29
, X86::ZMM29
},
357 {codeview::RegisterId::AMD64_ZMM30
, X86::ZMM30
},
358 {codeview::RegisterId::AMD64_ZMM31
, X86::ZMM31
},
359 {codeview::RegisterId::AMD64_K0
, X86::K0
},
360 {codeview::RegisterId::AMD64_K1
, X86::K1
},
361 {codeview::RegisterId::AMD64_K2
, X86::K2
},
362 {codeview::RegisterId::AMD64_K3
, X86::K3
},
363 {codeview::RegisterId::AMD64_K4
, X86::K4
},
364 {codeview::RegisterId::AMD64_K5
, X86::K5
},
365 {codeview::RegisterId::AMD64_K6
, X86::K6
},
366 {codeview::RegisterId::AMD64_K7
, X86::K7
},
367 {codeview::RegisterId::AMD64_XMM16
, X86::XMM16
},
368 {codeview::RegisterId::AMD64_XMM17
, X86::XMM17
},
369 {codeview::RegisterId::AMD64_XMM18
, X86::XMM18
},
370 {codeview::RegisterId::AMD64_XMM19
, X86::XMM19
},
371 {codeview::RegisterId::AMD64_XMM20
, X86::XMM20
},
372 {codeview::RegisterId::AMD64_XMM21
, X86::XMM21
},
373 {codeview::RegisterId::AMD64_XMM22
, X86::XMM22
},
374 {codeview::RegisterId::AMD64_XMM23
, X86::XMM23
},
375 {codeview::RegisterId::AMD64_XMM24
, X86::XMM24
},
376 {codeview::RegisterId::AMD64_XMM25
, X86::XMM25
},
377 {codeview::RegisterId::AMD64_XMM26
, X86::XMM26
},
378 {codeview::RegisterId::AMD64_XMM27
, X86::XMM27
},
379 {codeview::RegisterId::AMD64_XMM28
, X86::XMM28
},
380 {codeview::RegisterId::AMD64_XMM29
, X86::XMM29
},
381 {codeview::RegisterId::AMD64_XMM30
, X86::XMM30
},
382 {codeview::RegisterId::AMD64_XMM31
, X86::XMM31
},
385 for (const auto &I
: RegMap
)
386 MRI
->mapLLVMRegToCVReg(I
.Reg
, static_cast<int>(I
.CVReg
));
389 MCSubtargetInfo
*X86_MC::createX86MCSubtargetInfo(const Triple
&TT
,
390 StringRef CPU
, StringRef FS
) {
391 std::string ArchFS
= X86_MC::ParseX86Triple(TT
);
392 assert(!ArchFS
.empty() && "Failed to parse X86 triple");
394 ArchFS
= (Twine(ArchFS
) + "," + FS
).str();
399 size_t posNoEVEX512
= FS
.rfind("-evex512");
400 // Make sure we won't be cheated by "-avx512fp16".
401 size_t posNoAVX512F
=
402 FS
.ends_with("-avx512f") ? FS
.size() - 8 : FS
.rfind("-avx512f,");
403 size_t posEVEX512
= FS
.rfind("+evex512");
404 size_t posAVX512F
= FS
.rfind("+avx512"); // Any AVX512XXX will enable AVX512F.
406 if (posAVX512F
!= StringRef::npos
&&
407 (posNoAVX512F
== StringRef::npos
|| posNoAVX512F
< posAVX512F
))
408 if (posEVEX512
== StringRef::npos
&& posNoEVEX512
== StringRef::npos
)
409 ArchFS
+= ",+evex512";
411 return createX86MCSubtargetInfoImpl(TT
, CPU
, /*TuneCPU*/ CPU
, ArchFS
);
414 static MCInstrInfo
*createX86MCInstrInfo() {
415 MCInstrInfo
*X
= new MCInstrInfo();
416 InitX86MCInstrInfo(X
);
420 static MCRegisterInfo
*createX86MCRegisterInfo(const Triple
&TT
) {
421 unsigned RA
= (TT
.getArch() == Triple::x86_64
)
422 ? X86::RIP
// Should have dwarf #16.
423 : X86::EIP
; // Should have dwarf #8.
425 MCRegisterInfo
*X
= new MCRegisterInfo();
426 InitX86MCRegisterInfo(X
, RA
, X86_MC::getDwarfRegFlavour(TT
, false),
427 X86_MC::getDwarfRegFlavour(TT
, true), RA
);
428 X86_MC::initLLVMToSEHAndCVRegMapping(X
);
432 static MCAsmInfo
*createX86MCAsmInfo(const MCRegisterInfo
&MRI
,
433 const Triple
&TheTriple
,
434 const MCTargetOptions
&Options
) {
435 bool is64Bit
= TheTriple
.getArch() == Triple::x86_64
;
438 if (TheTriple
.isOSBinFormatMachO()) {
440 MAI
= new X86_64MCAsmInfoDarwin(TheTriple
);
442 MAI
= new X86MCAsmInfoDarwin(TheTriple
);
443 } else if (TheTriple
.isOSBinFormatELF()) {
444 // Force the use of an ELF container.
445 MAI
= new X86ELFMCAsmInfo(TheTriple
);
446 } else if (TheTriple
.isWindowsMSVCEnvironment() ||
447 TheTriple
.isWindowsCoreCLREnvironment()) {
448 if (Options
.getAssemblyLanguage().equals_insensitive("masm"))
449 MAI
= new X86MCAsmInfoMicrosoftMASM(TheTriple
);
451 MAI
= new X86MCAsmInfoMicrosoft(TheTriple
);
452 } else if (TheTriple
.isOSCygMing() ||
453 TheTriple
.isWindowsItaniumEnvironment()) {
454 MAI
= new X86MCAsmInfoGNUCOFF(TheTriple
);
455 } else if (TheTriple
.isUEFI()) {
456 MAI
= new X86MCAsmInfoGNUCOFF(TheTriple
);
458 // The default is ELF.
459 MAI
= new X86ELFMCAsmInfo(TheTriple
);
462 // Initialize initial frame state.
463 // Calculate amount of bytes used for return address storing
464 int stackGrowth
= is64Bit
? -8 : -4;
466 // Initial state of the frame pointer is esp+stackGrowth.
467 unsigned StackPtr
= is64Bit
? X86::RSP
: X86::ESP
;
468 MCCFIInstruction Inst
= MCCFIInstruction::cfiDefCfa(
469 nullptr, MRI
.getDwarfRegNum(StackPtr
, true), -stackGrowth
);
470 MAI
->addInitialFrameState(Inst
);
472 // Add return address to move list
473 unsigned InstPtr
= is64Bit
? X86::RIP
: X86::EIP
;
474 MCCFIInstruction Inst2
= MCCFIInstruction::createOffset(
475 nullptr, MRI
.getDwarfRegNum(InstPtr
, true), stackGrowth
);
476 MAI
->addInitialFrameState(Inst2
);
481 static MCInstPrinter
*createX86MCInstPrinter(const Triple
&T
,
482 unsigned SyntaxVariant
,
483 const MCAsmInfo
&MAI
,
484 const MCInstrInfo
&MII
,
485 const MCRegisterInfo
&MRI
) {
486 if (SyntaxVariant
== 0)
487 return new X86ATTInstPrinter(MAI
, MII
, MRI
);
488 if (SyntaxVariant
== 1)
489 return new X86IntelInstPrinter(MAI
, MII
, MRI
);
493 static MCRelocationInfo
*createX86MCRelocationInfo(const Triple
&TheTriple
,
495 // Default to the stock relocation info.
496 return llvm::createMCRelocationInfo(TheTriple
, Ctx
);
502 class X86MCInstrAnalysis
: public MCInstrAnalysis
{
503 X86MCInstrAnalysis(const X86MCInstrAnalysis
&) = delete;
504 X86MCInstrAnalysis
&operator=(const X86MCInstrAnalysis
&) = delete;
505 virtual ~X86MCInstrAnalysis() = default;
508 X86MCInstrAnalysis(const MCInstrInfo
*MCII
) : MCInstrAnalysis(MCII
) {}
510 #define GET_STIPREDICATE_DECLS_FOR_MC_ANALYSIS
511 #include "X86GenSubtargetInfo.inc"
513 bool clearsSuperRegisters(const MCRegisterInfo
&MRI
, const MCInst
&Inst
,
514 APInt
&Mask
) const override
;
515 std::vector
<std::pair
<uint64_t, uint64_t>>
516 findPltEntries(uint64_t PltSectionVA
, ArrayRef
<uint8_t> PltContents
,
517 const Triple
&TargetTriple
) const override
;
519 bool evaluateBranch(const MCInst
&Inst
, uint64_t Addr
, uint64_t Size
,
520 uint64_t &Target
) const override
;
521 std::optional
<uint64_t>
522 evaluateMemoryOperandAddress(const MCInst
&Inst
, const MCSubtargetInfo
*STI
,
523 uint64_t Addr
, uint64_t Size
) const override
;
524 std::optional
<uint64_t>
525 getMemoryOperandRelocationOffset(const MCInst
&Inst
,
526 uint64_t Size
) const override
;
529 #define GET_STIPREDICATE_DEFS_FOR_MC_ANALYSIS
530 #include "X86GenSubtargetInfo.inc"
532 bool X86MCInstrAnalysis::clearsSuperRegisters(const MCRegisterInfo
&MRI
,
535 const MCInstrDesc
&Desc
= Info
->get(Inst
.getOpcode());
536 unsigned NumDefs
= Desc
.getNumDefs();
537 unsigned NumImplicitDefs
= Desc
.implicit_defs().size();
538 assert(Mask
.getBitWidth() == NumDefs
+ NumImplicitDefs
&&
539 "Unexpected number of bits in the mask!");
541 bool HasVEX
= (Desc
.TSFlags
& X86II::EncodingMask
) == X86II::VEX
;
542 bool HasEVEX
= (Desc
.TSFlags
& X86II::EncodingMask
) == X86II::EVEX
;
543 bool HasXOP
= (Desc
.TSFlags
& X86II::EncodingMask
) == X86II::XOP
;
545 const MCRegisterClass
&GR32RC
= MRI
.getRegClass(X86::GR32RegClassID
);
546 const MCRegisterClass
&VR128XRC
= MRI
.getRegClass(X86::VR128XRegClassID
);
547 const MCRegisterClass
&VR256XRC
= MRI
.getRegClass(X86::VR256XRegClassID
);
549 auto ClearsSuperReg
= [=](unsigned RegID
) {
550 // On X86-64, a general purpose integer register is viewed as a 64-bit
551 // register internal to the processor.
552 // An update to the lower 32 bits of a 64 bit integer register is
553 // architecturally defined to zero extend the upper 32 bits.
554 if (GR32RC
.contains(RegID
))
557 // Early exit if this instruction has no vex/evex/xop prefix.
558 if (!HasEVEX
&& !HasVEX
&& !HasXOP
)
561 // All VEX and EVEX encoded instructions are defined to zero the high bits
562 // of the destination register up to VLMAX (i.e. the maximum vector register
563 // width pertaining to the instruction).
564 // We assume the same behavior for XOP instructions too.
565 return VR128XRC
.contains(RegID
) || VR256XRC
.contains(RegID
);
569 for (unsigned I
= 0, E
= NumDefs
; I
< E
; ++I
) {
570 const MCOperand
&Op
= Inst
.getOperand(I
);
571 if (ClearsSuperReg(Op
.getReg()))
575 for (unsigned I
= 0, E
= NumImplicitDefs
; I
< E
; ++I
) {
576 const MCPhysReg Reg
= Desc
.implicit_defs()[I
];
577 if (ClearsSuperReg(Reg
))
578 Mask
.setBit(NumDefs
+ I
);
581 return Mask
.getBoolValue();
584 static std::vector
<std::pair
<uint64_t, uint64_t>>
585 findX86PltEntries(uint64_t PltSectionVA
, ArrayRef
<uint8_t> PltContents
) {
586 // Do a lightweight parsing of PLT entries.
587 std::vector
<std::pair
<uint64_t, uint64_t>> Result
;
588 for (uint64_t Byte
= 0, End
= PltContents
.size(); Byte
+ 6 < End
; ) {
590 if (PltContents
[Byte
] == 0xff && PltContents
[Byte
+ 1] == 0xa3) {
591 // The jmp instruction at the beginning of each PLT entry jumps to the
592 // address of the base of the .got.plt section plus the immediate.
593 // Set the 1 << 32 bit to let ELFObjectFileBase::getPltEntries convert the
594 // offset to an address. Imm may be a negative int32_t if the GOT entry is
596 uint32_t Imm
= support::endian::read32le(PltContents
.data() + Byte
+ 2);
597 Result
.emplace_back(PltSectionVA
+ Byte
, Imm
| (uint64_t(1) << 32));
599 } else if (PltContents
[Byte
] == 0xff && PltContents
[Byte
+ 1] == 0x25) {
600 // The jmp instruction at the beginning of each PLT entry jumps to the
602 uint32_t Imm
= support::endian::read32le(PltContents
.data() + Byte
+ 2);
603 Result
.push_back(std::make_pair(PltSectionVA
+ Byte
, Imm
));
611 static std::vector
<std::pair
<uint64_t, uint64_t>>
612 findX86_64PltEntries(uint64_t PltSectionVA
, ArrayRef
<uint8_t> PltContents
) {
613 // Do a lightweight parsing of PLT entries.
614 std::vector
<std::pair
<uint64_t, uint64_t>> Result
;
615 for (uint64_t Byte
= 0, End
= PltContents
.size(); Byte
+ 6 < End
; ) {
617 if (PltContents
[Byte
] == 0xff && PltContents
[Byte
+ 1] == 0x25) {
618 // The jmp instruction at the beginning of each PLT entry jumps to the
619 // address of the next instruction plus the immediate.
620 uint32_t Imm
= support::endian::read32le(PltContents
.data() + Byte
+ 2);
622 std::make_pair(PltSectionVA
+ Byte
, PltSectionVA
+ Byte
+ 6 + Imm
));
630 std::vector
<std::pair
<uint64_t, uint64_t>>
631 X86MCInstrAnalysis::findPltEntries(uint64_t PltSectionVA
,
632 ArrayRef
<uint8_t> PltContents
,
633 const Triple
&TargetTriple
) const {
634 switch (TargetTriple
.getArch()) {
636 return findX86PltEntries(PltSectionVA
, PltContents
);
638 return findX86_64PltEntries(PltSectionVA
, PltContents
);
644 bool X86MCInstrAnalysis::evaluateBranch(const MCInst
&Inst
, uint64_t Addr
,
645 uint64_t Size
, uint64_t &Target
) const {
646 if (Inst
.getNumOperands() == 0 ||
647 Info
->get(Inst
.getOpcode()).operands()[0].OperandType
!=
650 Target
= Addr
+ Size
+ Inst
.getOperand(0).getImm();
654 std::optional
<uint64_t> X86MCInstrAnalysis::evaluateMemoryOperandAddress(
655 const MCInst
&Inst
, const MCSubtargetInfo
*STI
, uint64_t Addr
,
656 uint64_t Size
) const {
657 const MCInstrDesc
&MCID
= Info
->get(Inst
.getOpcode());
658 int MemOpStart
= X86II::getMemoryOperandNo(MCID
.TSFlags
);
659 if (MemOpStart
== -1)
661 MemOpStart
+= X86II::getOperandBias(MCID
);
663 const MCOperand
&SegReg
= Inst
.getOperand(MemOpStart
+ X86::AddrSegmentReg
);
664 const MCOperand
&BaseReg
= Inst
.getOperand(MemOpStart
+ X86::AddrBaseReg
);
665 const MCOperand
&IndexReg
= Inst
.getOperand(MemOpStart
+ X86::AddrIndexReg
);
666 const MCOperand
&ScaleAmt
= Inst
.getOperand(MemOpStart
+ X86::AddrScaleAmt
);
667 const MCOperand
&Disp
= Inst
.getOperand(MemOpStart
+ X86::AddrDisp
);
668 if (SegReg
.getReg() || IndexReg
.getReg() || ScaleAmt
.getImm() != 1 ||
672 // RIP-relative addressing.
673 if (BaseReg
.getReg() == X86::RIP
)
674 return Addr
+ Size
+ Disp
.getImm();
679 std::optional
<uint64_t>
680 X86MCInstrAnalysis::getMemoryOperandRelocationOffset(const MCInst
&Inst
,
681 uint64_t Size
) const {
682 if (Inst
.getOpcode() != X86::LEA64r
)
684 const MCInstrDesc
&MCID
= Info
->get(Inst
.getOpcode());
685 int MemOpStart
= X86II::getMemoryOperandNo(MCID
.TSFlags
);
686 if (MemOpStart
== -1)
688 MemOpStart
+= X86II::getOperandBias(MCID
);
689 const MCOperand
&SegReg
= Inst
.getOperand(MemOpStart
+ X86::AddrSegmentReg
);
690 const MCOperand
&BaseReg
= Inst
.getOperand(MemOpStart
+ X86::AddrBaseReg
);
691 const MCOperand
&IndexReg
= Inst
.getOperand(MemOpStart
+ X86::AddrIndexReg
);
692 const MCOperand
&ScaleAmt
= Inst
.getOperand(MemOpStart
+ X86::AddrScaleAmt
);
693 const MCOperand
&Disp
= Inst
.getOperand(MemOpStart
+ X86::AddrDisp
);
694 // Must be a simple rip-relative address.
695 if (BaseReg
.getReg() != X86::RIP
|| SegReg
.getReg() || IndexReg
.getReg() ||
696 ScaleAmt
.getImm() != 1 || !Disp
.isImm())
698 // rip-relative ModR/M immediate is 32 bits.
699 assert(Size
> 4 && "invalid instruction size for rip-relative lea");
703 } // end of namespace X86_MC
705 } // end of namespace llvm
707 static MCInstrAnalysis
*createX86MCInstrAnalysis(const MCInstrInfo
*Info
) {
708 return new X86_MC::X86MCInstrAnalysis(Info
);
711 // Force static initialization.
712 extern "C" LLVM_C_ABI
void LLVMInitializeX86TargetMC() {
713 for (Target
*T
: {&getTheX86_32Target(), &getTheX86_64Target()}) {
714 // Register the MC asm info.
715 RegisterMCAsmInfoFn
X(*T
, createX86MCAsmInfo
);
717 // Register the MC instruction info.
718 TargetRegistry::RegisterMCInstrInfo(*T
, createX86MCInstrInfo
);
720 // Register the MC register info.
721 TargetRegistry::RegisterMCRegInfo(*T
, createX86MCRegisterInfo
);
723 // Register the MC subtarget info.
724 TargetRegistry::RegisterMCSubtargetInfo(*T
,
725 X86_MC::createX86MCSubtargetInfo
);
727 // Register the MC instruction analyzer.
728 TargetRegistry::RegisterMCInstrAnalysis(*T
, createX86MCInstrAnalysis
);
730 // Register the code emitter.
731 TargetRegistry::RegisterMCCodeEmitter(*T
, createX86MCCodeEmitter
);
733 // Register the obj target streamer.
734 TargetRegistry::RegisterObjectTargetStreamer(*T
,
735 createX86ObjectTargetStreamer
);
737 // Register the asm target streamer.
738 TargetRegistry::RegisterAsmTargetStreamer(*T
, createX86AsmTargetStreamer
);
740 // Register the null streamer.
741 TargetRegistry::RegisterNullTargetStreamer(*T
, createX86NullTargetStreamer
);
743 TargetRegistry::RegisterCOFFStreamer(*T
, createX86WinCOFFStreamer
);
744 TargetRegistry::RegisterELFStreamer(*T
, createX86ELFStreamer
);
746 // Register the MCInstPrinter.
747 TargetRegistry::RegisterMCInstPrinter(*T
, createX86MCInstPrinter
);
749 // Register the MC relocation info.
750 TargetRegistry::RegisterMCRelocationInfo(*T
, createX86MCRelocationInfo
);
753 // Register the asm backend.
754 TargetRegistry::RegisterMCAsmBackend(getTheX86_32Target(),
755 createX86_32AsmBackend
);
756 TargetRegistry::RegisterMCAsmBackend(getTheX86_64Target(),
757 createX86_64AsmBackend
);
760 MCRegister
llvm::getX86SubSuperRegister(MCRegister Reg
, unsigned Size
,
762 #define DEFAULT_NOREG \
764 return X86::NoRegister;
765 #define SUB_SUPER(R1, R2, R3, R4, R) \
771 #define A_SUB_SUPER(R) \
773 SUB_SUPER(AL, AX, EAX, RAX, R)
774 #define D_SUB_SUPER(R) \
776 SUB_SUPER(DL, DX, EDX, RDX, R)
777 #define C_SUB_SUPER(R) \
779 SUB_SUPER(CL, CX, ECX, RCX, R)
780 #define B_SUB_SUPER(R) \
782 SUB_SUPER(BL, BX, EBX, RBX, R)
783 #define SI_SUB_SUPER(R) SUB_SUPER(SIL, SI, ESI, RSI, R)
784 #define DI_SUB_SUPER(R) SUB_SUPER(DIL, DI, EDI, RDI, R)
785 #define BP_SUB_SUPER(R) SUB_SUPER(BPL, BP, EBP, RBP, R)
786 #define SP_SUB_SUPER(R) SUB_SUPER(SPL, SP, ESP, RSP, R)
787 #define NO_SUB_SUPER(NO, REG) \
788 SUB_SUPER(R##NO##B, R##NO##W, R##NO##D, R##NO, REG)
789 #define NO_SUB_SUPER_B(NO) NO_SUB_SUPER(NO, R##NO##B)
790 #define NO_SUB_SUPER_W(NO) NO_SUB_SUPER(NO, R##NO##W)
791 #define NO_SUB_SUPER_D(NO) NO_SUB_SUPER(NO, R##NO##D)
792 #define NO_SUB_SUPER_Q(NO) NO_SUB_SUPER(NO, R##NO)
795 llvm_unreachable("illegal register size");