1 //===-- X86MCCodeEmitter.cpp - Convert X86 code to machine code -----------===//
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7 //===----------------------------------------------------------------------===//
9 // This file implements the X86MCCodeEmitter class.
11 //===----------------------------------------------------------------------===//
13 #include "MCTargetDesc/X86BaseInfo.h"
14 #include "MCTargetDesc/X86FixupKinds.h"
15 #include "MCTargetDesc/X86MCTargetDesc.h"
16 #include "llvm/ADT/SmallVector.h"
17 #include "llvm/MC/MCCodeEmitter.h"
18 #include "llvm/MC/MCContext.h"
19 #include "llvm/MC/MCExpr.h"
20 #include "llvm/MC/MCFixup.h"
21 #include "llvm/MC/MCInst.h"
22 #include "llvm/MC/MCInstrDesc.h"
23 #include "llvm/MC/MCInstrInfo.h"
24 #include "llvm/MC/MCRegisterInfo.h"
25 #include "llvm/MC/MCSubtargetInfo.h"
26 #include "llvm/MC/MCSymbol.h"
27 #include "llvm/Support/ErrorHandling.h"
28 #include "llvm/Support/raw_ostream.h"
35 #define DEBUG_TYPE "mccodeemitter"
39 class X86MCCodeEmitter
: public MCCodeEmitter
{
40 const MCInstrInfo
&MCII
;
44 X86MCCodeEmitter(const MCInstrInfo
&mcii
, MCContext
&ctx
)
45 : MCII(mcii
), Ctx(ctx
) {
47 X86MCCodeEmitter(const X86MCCodeEmitter
&) = delete;
48 X86MCCodeEmitter
&operator=(const X86MCCodeEmitter
&) = delete;
49 ~X86MCCodeEmitter() override
= default;
51 bool is64BitMode(const MCSubtargetInfo
&STI
) const {
52 return STI
.getFeatureBits()[X86::Mode64Bit
];
55 bool is32BitMode(const MCSubtargetInfo
&STI
) const {
56 return STI
.getFeatureBits()[X86::Mode32Bit
];
59 bool is16BitMode(const MCSubtargetInfo
&STI
) const {
60 return STI
.getFeatureBits()[X86::Mode16Bit
];
63 /// Is16BitMemOperand - Return true if the specified instruction has
64 /// a 16-bit memory operand. Op specifies the operand # of the memoperand.
65 bool Is16BitMemOperand(const MCInst
&MI
, unsigned Op
,
66 const MCSubtargetInfo
&STI
) const {
67 const MCOperand
&BaseReg
= MI
.getOperand(Op
+X86::AddrBaseReg
);
68 const MCOperand
&IndexReg
= MI
.getOperand(Op
+X86::AddrIndexReg
);
69 const MCOperand
&Disp
= MI
.getOperand(Op
+X86::AddrDisp
);
71 if (is16BitMode(STI
) && BaseReg
.getReg() == 0 &&
72 Disp
.isImm() && Disp
.getImm() < 0x10000)
74 if ((BaseReg
.getReg() != 0 &&
75 X86MCRegisterClasses
[X86::GR16RegClassID
].contains(BaseReg
.getReg())) ||
76 (IndexReg
.getReg() != 0 &&
77 X86MCRegisterClasses
[X86::GR16RegClassID
].contains(IndexReg
.getReg())))
82 unsigned GetX86RegNum(const MCOperand
&MO
) const {
83 return Ctx
.getRegisterInfo()->getEncodingValue(MO
.getReg()) & 0x7;
86 unsigned getX86RegEncoding(const MCInst
&MI
, unsigned OpNum
) const {
87 return Ctx
.getRegisterInfo()->getEncodingValue(
88 MI
.getOperand(OpNum
).getReg());
91 // Does this register require a bit to be set in REX prefix.
92 bool isREXExtendedReg(const MCInst
&MI
, unsigned OpNum
) const {
93 return (getX86RegEncoding(MI
, OpNum
) >> 3) & 1;
96 void EmitByte(uint8_t C
, unsigned &CurByte
, raw_ostream
&OS
) const {
101 void EmitConstant(uint64_t Val
, unsigned Size
, unsigned &CurByte
,
102 raw_ostream
&OS
) const {
103 // Output the constant in little endian byte order.
104 for (unsigned i
= 0; i
!= Size
; ++i
) {
105 EmitByte(Val
& 255, CurByte
, OS
);
110 void EmitImmediate(const MCOperand
&Disp
, SMLoc Loc
,
111 unsigned ImmSize
, MCFixupKind FixupKind
,
112 unsigned &CurByte
, raw_ostream
&OS
,
113 SmallVectorImpl
<MCFixup
> &Fixups
,
114 int ImmOffset
= 0) const;
116 static uint8_t ModRMByte(unsigned Mod
, unsigned RegOpcode
, unsigned RM
) {
117 assert(Mod
< 4 && RegOpcode
< 8 && RM
< 8 && "ModRM Fields out of range!");
118 return RM
| (RegOpcode
<< 3) | (Mod
<< 6);
121 void EmitRegModRMByte(const MCOperand
&ModRMReg
, unsigned RegOpcodeFld
,
122 unsigned &CurByte
, raw_ostream
&OS
) const {
123 EmitByte(ModRMByte(3, RegOpcodeFld
, GetX86RegNum(ModRMReg
)), CurByte
, OS
);
126 void EmitSIBByte(unsigned SS
, unsigned Index
, unsigned Base
,
127 unsigned &CurByte
, raw_ostream
&OS
) const {
128 // SIB byte is in the same format as the ModRMByte.
129 EmitByte(ModRMByte(SS
, Index
, Base
), CurByte
, OS
);
132 void emitMemModRMByte(const MCInst
&MI
, unsigned Op
, unsigned RegOpcodeField
,
133 uint64_t TSFlags
, bool Rex
, unsigned &CurByte
,
134 raw_ostream
&OS
, SmallVectorImpl
<MCFixup
> &Fixups
,
135 const MCSubtargetInfo
&STI
) const;
137 void encodeInstruction(const MCInst
&MI
, raw_ostream
&OS
,
138 SmallVectorImpl
<MCFixup
> &Fixups
,
139 const MCSubtargetInfo
&STI
) const override
;
141 void EmitVEXOpcodePrefix(uint64_t TSFlags
, unsigned &CurByte
, int MemOperand
,
142 const MCInst
&MI
, const MCInstrDesc
&Desc
,
143 raw_ostream
&OS
) const;
145 void EmitSegmentOverridePrefix(unsigned &CurByte
, unsigned SegOperand
,
146 const MCInst
&MI
, raw_ostream
&OS
) const;
148 bool emitOpcodePrefix(uint64_t TSFlags
, unsigned &CurByte
, int MemOperand
,
149 const MCInst
&MI
, const MCInstrDesc
&Desc
,
150 const MCSubtargetInfo
&STI
, raw_ostream
&OS
) const;
152 uint8_t DetermineREXPrefix(const MCInst
&MI
, uint64_t TSFlags
,
153 int MemOperand
, const MCInstrDesc
&Desc
) const;
155 bool isPCRel32Branch(const MCInst
&MI
) const;
158 } // end anonymous namespace
160 /// isDisp8 - Return true if this signed displacement fits in a 8-bit
161 /// sign-extended field.
162 static bool isDisp8(int Value
) {
163 return Value
== (int8_t)Value
;
166 /// isCDisp8 - Return true if this signed displacement fits in a 8-bit
167 /// compressed dispacement field.
168 static bool isCDisp8(uint64_t TSFlags
, int Value
, int& CValue
) {
169 assert(((TSFlags
& X86II::EncodingMask
) == X86II::EVEX
) &&
170 "Compressed 8-bit displacement is only valid for EVEX inst.");
173 (TSFlags
& X86II::CD8_Scale_Mask
) >> X86II::CD8_Scale_Shift
;
174 if (CD8_Scale
== 0) {
176 return isDisp8(Value
);
179 unsigned Mask
= CD8_Scale
- 1;
180 assert((CD8_Scale
& Mask
) == 0 && "Invalid memory object size.");
181 if (Value
& Mask
) // Unaligned offset
183 Value
/= (int)CD8_Scale
;
184 bool Ret
= (Value
== (int8_t)Value
);
191 /// getImmFixupKind - Return the appropriate fixup kind to use for an immediate
192 /// in an instruction with the specified TSFlags.
193 static MCFixupKind
getImmFixupKind(uint64_t TSFlags
) {
194 unsigned Size
= X86II::getSizeOfImm(TSFlags
);
195 bool isPCRel
= X86II::isImmPCRel(TSFlags
);
197 if (X86II::isImmSigned(TSFlags
)) {
199 default: llvm_unreachable("Unsupported signed fixup size!");
200 case 4: return MCFixupKind(X86::reloc_signed_4byte
);
203 return MCFixup::getKindForSize(Size
, isPCRel
);
206 /// Is32BitMemOperand - Return true if the specified instruction has
207 /// a 32-bit memory operand. Op specifies the operand # of the memoperand.
208 static bool Is32BitMemOperand(const MCInst
&MI
, unsigned Op
) {
209 const MCOperand
&BaseReg
= MI
.getOperand(Op
+X86::AddrBaseReg
);
210 const MCOperand
&IndexReg
= MI
.getOperand(Op
+X86::AddrIndexReg
);
212 if ((BaseReg
.getReg() != 0 &&
213 X86MCRegisterClasses
[X86::GR32RegClassID
].contains(BaseReg
.getReg())) ||
214 (IndexReg
.getReg() != 0 &&
215 X86MCRegisterClasses
[X86::GR32RegClassID
].contains(IndexReg
.getReg())))
217 if (BaseReg
.getReg() == X86::EIP
) {
218 assert(IndexReg
.getReg() == 0 && "Invalid eip-based address.");
221 if (IndexReg
.getReg() == X86::EIZ
)
226 /// Is64BitMemOperand - Return true if the specified instruction has
227 /// a 64-bit memory operand. Op specifies the operand # of the memoperand.
229 static bool Is64BitMemOperand(const MCInst
&MI
, unsigned Op
) {
230 const MCOperand
&BaseReg
= MI
.getOperand(Op
+X86::AddrBaseReg
);
231 const MCOperand
&IndexReg
= MI
.getOperand(Op
+X86::AddrIndexReg
);
233 if ((BaseReg
.getReg() != 0 &&
234 X86MCRegisterClasses
[X86::GR64RegClassID
].contains(BaseReg
.getReg())) ||
235 (IndexReg
.getReg() != 0 &&
236 X86MCRegisterClasses
[X86::GR64RegClassID
].contains(IndexReg
.getReg())))
242 /// StartsWithGlobalOffsetTable - Check if this expression starts with
243 /// _GLOBAL_OFFSET_TABLE_ and if it is of the form
244 /// _GLOBAL_OFFSET_TABLE_-symbol. This is needed to support PIC on ELF
245 /// i386 as _GLOBAL_OFFSET_TABLE_ is magical. We check only simple case that
246 /// are know to be used: _GLOBAL_OFFSET_TABLE_ by itself or at the start
247 /// of a binary expression.
248 enum GlobalOffsetTableExprKind
{
253 static GlobalOffsetTableExprKind
254 StartsWithGlobalOffsetTable(const MCExpr
*Expr
) {
255 const MCExpr
*RHS
= nullptr;
256 if (Expr
->getKind() == MCExpr::Binary
) {
257 const MCBinaryExpr
*BE
= static_cast<const MCBinaryExpr
*>(Expr
);
262 if (Expr
->getKind() != MCExpr::SymbolRef
)
265 const MCSymbolRefExpr
*Ref
= static_cast<const MCSymbolRefExpr
*>(Expr
);
266 const MCSymbol
&S
= Ref
->getSymbol();
267 if (S
.getName() != "_GLOBAL_OFFSET_TABLE_")
269 if (RHS
&& RHS
->getKind() == MCExpr::SymbolRef
)
274 static bool HasSecRelSymbolRef(const MCExpr
*Expr
) {
275 if (Expr
->getKind() == MCExpr::SymbolRef
) {
276 const MCSymbolRefExpr
*Ref
= static_cast<const MCSymbolRefExpr
*>(Expr
);
277 return Ref
->getKind() == MCSymbolRefExpr::VK_SECREL
;
282 bool X86MCCodeEmitter::isPCRel32Branch(const MCInst
&MI
) const {
283 unsigned Opcode
= MI
.getOpcode();
284 const MCInstrDesc
&Desc
= MCII
.get(Opcode
);
285 if ((Opcode
!= X86::CALL64pcrel32
&& Opcode
!= X86::JMP_4
) ||
286 getImmFixupKind(Desc
.TSFlags
) != FK_PCRel_4
)
289 unsigned CurOp
= X86II::getOperandBias(Desc
);
290 const MCOperand
&Op
= MI
.getOperand(CurOp
);
294 const MCSymbolRefExpr
*Ref
= dyn_cast
<MCSymbolRefExpr
>(Op
.getExpr());
295 return Ref
&& Ref
->getKind() == MCSymbolRefExpr::VK_None
;
298 void X86MCCodeEmitter::
299 EmitImmediate(const MCOperand
&DispOp
, SMLoc Loc
, unsigned Size
,
300 MCFixupKind FixupKind
, unsigned &CurByte
, raw_ostream
&OS
,
301 SmallVectorImpl
<MCFixup
> &Fixups
, int ImmOffset
) const {
302 const MCExpr
*Expr
= nullptr;
303 if (DispOp
.isImm()) {
304 // If this is a simple integer displacement that doesn't require a
305 // relocation, emit it now.
306 if (FixupKind
!= FK_PCRel_1
&&
307 FixupKind
!= FK_PCRel_2
&&
308 FixupKind
!= FK_PCRel_4
) {
309 EmitConstant(DispOp
.getImm()+ImmOffset
, Size
, CurByte
, OS
);
312 Expr
= MCConstantExpr::create(DispOp
.getImm(), Ctx
);
314 Expr
= DispOp
.getExpr();
317 // If we have an immoffset, add it to the expression.
318 if ((FixupKind
== FK_Data_4
||
319 FixupKind
== FK_Data_8
||
320 FixupKind
== MCFixupKind(X86::reloc_signed_4byte
))) {
321 GlobalOffsetTableExprKind Kind
= StartsWithGlobalOffsetTable(Expr
);
322 if (Kind
!= GOT_None
) {
323 assert(ImmOffset
== 0);
326 FixupKind
= MCFixupKind(X86::reloc_global_offset_table8
);
329 FixupKind
= MCFixupKind(X86::reloc_global_offset_table
);
332 if (Kind
== GOT_Normal
)
334 } else if (Expr
->getKind() == MCExpr::SymbolRef
) {
335 if (HasSecRelSymbolRef(Expr
)) {
336 FixupKind
= MCFixupKind(FK_SecRel_4
);
338 } else if (Expr
->getKind() == MCExpr::Binary
) {
339 const MCBinaryExpr
*Bin
= static_cast<const MCBinaryExpr
*>(Expr
);
340 if (HasSecRelSymbolRef(Bin
->getLHS())
341 || HasSecRelSymbolRef(Bin
->getRHS())) {
342 FixupKind
= MCFixupKind(FK_SecRel_4
);
347 // If the fixup is pc-relative, we need to bias the value to be relative to
348 // the start of the field, not the end of the field.
349 if (FixupKind
== FK_PCRel_4
||
350 FixupKind
== MCFixupKind(X86::reloc_riprel_4byte
) ||
351 FixupKind
== MCFixupKind(X86::reloc_riprel_4byte_movq_load
) ||
352 FixupKind
== MCFixupKind(X86::reloc_riprel_4byte_relax
) ||
353 FixupKind
== MCFixupKind(X86::reloc_riprel_4byte_relax_rex
) ||
354 FixupKind
== MCFixupKind(X86::reloc_branch_4byte_pcrel
)) {
356 // If this is a pc-relative load off _GLOBAL_OFFSET_TABLE_:
357 // leaq _GLOBAL_OFFSET_TABLE_(%rip), %r15
358 // this needs to be a GOTPC32 relocation.
359 if (StartsWithGlobalOffsetTable(Expr
) != GOT_None
)
360 FixupKind
= MCFixupKind(X86::reloc_global_offset_table
);
362 if (FixupKind
== FK_PCRel_2
)
364 if (FixupKind
== FK_PCRel_1
)
368 Expr
= MCBinaryExpr::createAdd(Expr
, MCConstantExpr::create(ImmOffset
, Ctx
),
371 // Emit a symbolic constant as a fixup and 4 zeros.
372 Fixups
.push_back(MCFixup::create(CurByte
, Expr
, FixupKind
, Loc
));
373 EmitConstant(0, Size
, CurByte
, OS
);
376 void X86MCCodeEmitter::emitMemModRMByte(const MCInst
&MI
, unsigned Op
,
377 unsigned RegOpcodeField
,
378 uint64_t TSFlags
, bool Rex
,
379 unsigned &CurByte
, raw_ostream
&OS
,
380 SmallVectorImpl
<MCFixup
> &Fixups
,
381 const MCSubtargetInfo
&STI
) const {
382 const MCOperand
&Disp
= MI
.getOperand(Op
+X86::AddrDisp
);
383 const MCOperand
&Base
= MI
.getOperand(Op
+X86::AddrBaseReg
);
384 const MCOperand
&Scale
= MI
.getOperand(Op
+X86::AddrScaleAmt
);
385 const MCOperand
&IndexReg
= MI
.getOperand(Op
+X86::AddrIndexReg
);
386 unsigned BaseReg
= Base
.getReg();
387 bool HasEVEX
= (TSFlags
& X86II::EncodingMask
) == X86II::EVEX
;
389 // Handle %rip relative addressing.
390 if (BaseReg
== X86::RIP
||
391 BaseReg
== X86::EIP
) { // [disp32+rIP] in X86-64 mode
392 assert(is64BitMode(STI
) && "Rip-relative addressing requires 64-bit mode");
393 assert(IndexReg
.getReg() == 0 && "Invalid rip-relative address");
394 EmitByte(ModRMByte(0, RegOpcodeField
, 5), CurByte
, OS
);
396 unsigned Opcode
= MI
.getOpcode();
397 // movq loads are handled with a special relocation form which allows the
398 // linker to eliminate some loads for GOT references which end up in the
399 // same linkage unit.
400 unsigned FixupKind
= [=]() {
403 return X86::reloc_riprel_4byte
;
406 return X86::reloc_riprel_4byte_movq_load
;
409 case X86::TAILJMPm64
:
419 return Rex
? X86::reloc_riprel_4byte_relax_rex
420 : X86::reloc_riprel_4byte_relax
;
424 // rip-relative addressing is actually relative to the *next* instruction.
425 // Since an immediate can follow the mod/rm byte for an instruction, this
426 // means that we need to bias the displacement field of the instruction with
427 // the size of the immediate field. If we have this case, add it into the
428 // expression to emit.
429 // Note: rip-relative addressing using immediate displacement values should
430 // not be adjusted, assuming it was the user's intent.
431 int ImmSize
= !Disp
.isImm() && X86II::hasImm(TSFlags
)
432 ? X86II::getSizeOfImm(TSFlags
)
435 EmitImmediate(Disp
, MI
.getLoc(), 4, MCFixupKind(FixupKind
),
436 CurByte
, OS
, Fixups
, -ImmSize
);
440 unsigned BaseRegNo
= BaseReg
? GetX86RegNum(Base
) : -1U;
442 // 16-bit addressing forms of the ModR/M byte have a different encoding for
443 // the R/M field and are far more limited in which registers can be used.
444 if (Is16BitMemOperand(MI
, Op
, STI
)) {
446 // For 32-bit addressing, the row and column values in Table 2-2 are
447 // basically the same. It's AX/CX/DX/BX/SP/BP/SI/DI in that order, with
448 // some special cases. And GetX86RegNum reflects that numbering.
449 // For 16-bit addressing it's more fun, as shown in the SDM Vol 2A,
450 // Table 2-1 "16-Bit Addressing Forms with the ModR/M byte". We can only
451 // use SI/DI/BP/BX, which have "row" values 4-7 in no particular order,
452 // while values 0-3 indicate the allowed combinations (base+index) of
453 // those: 0 for BX+SI, 1 for BX+DI, 2 for BP+SI, 3 for BP+DI.
455 // R16Table[] is a lookup from the normal RegNo, to the row values from
456 // Table 2-1 for 16-bit addressing modes. Where zero means disallowed.
457 static const unsigned R16Table
[] = { 0, 0, 0, 7, 0, 6, 4, 5 };
458 unsigned RMfield
= R16Table
[BaseRegNo
];
460 assert(RMfield
&& "invalid 16-bit base register");
462 if (IndexReg
.getReg()) {
463 unsigned IndexReg16
= R16Table
[GetX86RegNum(IndexReg
)];
465 assert(IndexReg16
&& "invalid 16-bit index register");
466 // We must have one of SI/DI (4,5), and one of BP/BX (6,7).
467 assert(((IndexReg16
^ RMfield
) & 2) &&
468 "invalid 16-bit base/index register combination");
469 assert(Scale
.getImm() == 1 &&
470 "invalid scale for 16-bit memory reference");
472 // Allow base/index to appear in either order (although GAS doesn't).
474 RMfield
= (RMfield
& 1) | ((7 - IndexReg16
) << 1);
476 RMfield
= (IndexReg16
& 1) | ((7 - RMfield
) << 1);
479 if (Disp
.isImm() && isDisp8(Disp
.getImm())) {
480 if (Disp
.getImm() == 0 && RMfield
!= 6) {
481 // There is no displacement; just the register.
482 EmitByte(ModRMByte(0, RegOpcodeField
, RMfield
), CurByte
, OS
);
485 // Use the [REG]+disp8 form, including for [BP] which cannot be encoded.
486 EmitByte(ModRMByte(1, RegOpcodeField
, RMfield
), CurByte
, OS
);
487 EmitImmediate(Disp
, MI
.getLoc(), 1, FK_Data_1
, CurByte
, OS
, Fixups
);
490 // This is the [REG]+disp16 case.
491 EmitByte(ModRMByte(2, RegOpcodeField
, RMfield
), CurByte
, OS
);
493 // There is no BaseReg; this is the plain [disp16] case.
494 EmitByte(ModRMByte(0, RegOpcodeField
, 6), CurByte
, OS
);
497 // Emit 16-bit displacement for plain disp16 or [REG]+disp16 cases.
498 EmitImmediate(Disp
, MI
.getLoc(), 2, FK_Data_2
, CurByte
, OS
, Fixups
);
502 // Determine whether a SIB byte is needed.
503 // If no BaseReg, issue a RIP relative instruction only if the MCE can
504 // resolve addresses on-the-fly, otherwise use SIB (Intel Manual 2A, table
505 // 2-7) and absolute references.
507 if (// The SIB byte must be used if there is an index register.
508 IndexReg
.getReg() == 0 &&
509 // The SIB byte must be used if the base is ESP/RSP/R12, all of which
510 // encode to an R/M value of 4, which indicates that a SIB byte is
512 BaseRegNo
!= N86::ESP
&&
513 // If there is no base register and we're in 64-bit mode, we need a SIB
514 // byte to emit an addr that is just 'disp32' (the non-RIP relative form).
515 (!is64BitMode(STI
) || BaseReg
!= 0)) {
517 if (BaseReg
== 0) { // [disp32] in X86-32 mode
518 EmitByte(ModRMByte(0, RegOpcodeField
, 5), CurByte
, OS
);
519 EmitImmediate(Disp
, MI
.getLoc(), 4, FK_Data_4
, CurByte
, OS
, Fixups
);
523 // If the base is not EBP/ESP and there is no displacement, use simple
524 // indirect register encoding, this handles addresses like [EAX]. The
525 // encoding for [EBP] with no displacement means [disp32] so we handle it
526 // by emitting a displacement of 0 below.
527 if (BaseRegNo
!= N86::EBP
) {
528 if (Disp
.isImm() && Disp
.getImm() == 0) {
529 EmitByte(ModRMByte(0, RegOpcodeField
, BaseRegNo
), CurByte
, OS
);
533 // If the displacement is @tlscall, treat it as a zero.
535 auto *Sym
= dyn_cast
<MCSymbolRefExpr
>(Disp
.getExpr());
536 if (Sym
&& Sym
->getKind() == MCSymbolRefExpr::VK_TLSCALL
) {
537 // This is exclusively used by call *a@tlscall(base). The relocation
538 // (R_386_TLSCALL or R_X86_64_TLSCALL) applies to the beginning.
539 Fixups
.push_back(MCFixup::create(0, Sym
, FK_NONE
, MI
.getLoc()));
540 EmitByte(ModRMByte(0, RegOpcodeField
, BaseRegNo
), CurByte
, OS
);
546 // Otherwise, if the displacement fits in a byte, encode as [REG+disp8].
548 if (!HasEVEX
&& isDisp8(Disp
.getImm())) {
549 EmitByte(ModRMByte(1, RegOpcodeField
, BaseRegNo
), CurByte
, OS
);
550 EmitImmediate(Disp
, MI
.getLoc(), 1, FK_Data_1
, CurByte
, OS
, Fixups
);
553 // Try EVEX compressed 8-bit displacement first; if failed, fall back to
554 // 32-bit displacement.
556 if (HasEVEX
&& isCDisp8(TSFlags
, Disp
.getImm(), CDisp8
)) {
557 EmitByte(ModRMByte(1, RegOpcodeField
, BaseRegNo
), CurByte
, OS
);
558 EmitImmediate(Disp
, MI
.getLoc(), 1, FK_Data_1
, CurByte
, OS
, Fixups
,
559 CDisp8
- Disp
.getImm());
564 // Otherwise, emit the most general non-SIB encoding: [REG+disp32]
565 EmitByte(ModRMByte(2, RegOpcodeField
, BaseRegNo
), CurByte
, OS
);
566 unsigned Opcode
= MI
.getOpcode();
567 unsigned FixupKind
= Opcode
== X86::MOV32rm
? X86::reloc_signed_4byte_relax
568 : X86::reloc_signed_4byte
;
569 EmitImmediate(Disp
, MI
.getLoc(), 4, MCFixupKind(FixupKind
), CurByte
, OS
,
574 // We need a SIB byte, so start by outputting the ModR/M byte first
575 assert(IndexReg
.getReg() != X86::ESP
&&
576 IndexReg
.getReg() != X86::RSP
&& "Cannot use ESP as index reg!");
578 bool ForceDisp32
= false;
579 bool ForceDisp8
= false;
583 // If there is no base register, we emit the special case SIB byte with
584 // MOD=0, BASE=5, to JUST get the index, scale, and displacement.
585 EmitByte(ModRMByte(0, RegOpcodeField
, 4), CurByte
, OS
);
587 } else if (!Disp
.isImm()) {
588 // Emit the normal disp32 encoding.
589 EmitByte(ModRMByte(2, RegOpcodeField
, 4), CurByte
, OS
);
591 } else if (Disp
.getImm() == 0 &&
592 // Base reg can't be anything that ends up with '5' as the base
593 // reg, it is the magic [*] nomenclature that indicates no base.
594 BaseRegNo
!= N86::EBP
) {
595 // Emit no displacement ModR/M byte
596 EmitByte(ModRMByte(0, RegOpcodeField
, 4), CurByte
, OS
);
597 } else if (!HasEVEX
&& isDisp8(Disp
.getImm())) {
598 // Emit the disp8 encoding.
599 EmitByte(ModRMByte(1, RegOpcodeField
, 4), CurByte
, OS
);
600 ForceDisp8
= true; // Make sure to force 8 bit disp if Base=EBP
601 } else if (HasEVEX
&& isCDisp8(TSFlags
, Disp
.getImm(), CDisp8
)) {
602 // Emit the disp8 encoding.
603 EmitByte(ModRMByte(1, RegOpcodeField
, 4), CurByte
, OS
);
604 ForceDisp8
= true; // Make sure to force 8 bit disp if Base=EBP
605 ImmOffset
= CDisp8
- Disp
.getImm();
607 // Emit the normal disp32 encoding.
608 EmitByte(ModRMByte(2, RegOpcodeField
, 4), CurByte
, OS
);
611 // Calculate what the SS field value should be...
612 static const unsigned SSTable
[] = { ~0U, 0, 1, ~0U, 2, ~0U, ~0U, ~0U, 3 };
613 unsigned SS
= SSTable
[Scale
.getImm()];
616 // Handle the SIB byte for the case where there is no base, see Intel
617 // Manual 2A, table 2-7. The displacement has already been output.
619 if (IndexReg
.getReg())
620 IndexRegNo
= GetX86RegNum(IndexReg
);
621 else // Examples: [ESP+1*<noreg>+4] or [scaled idx]+disp32 (MOD=0,BASE=5)
623 EmitSIBByte(SS
, IndexRegNo
, 5, CurByte
, OS
);
626 if (IndexReg
.getReg())
627 IndexRegNo
= GetX86RegNum(IndexReg
);
629 IndexRegNo
= 4; // For example [ESP+1*<noreg>+4]
630 EmitSIBByte(SS
, IndexRegNo
, GetX86RegNum(Base
), CurByte
, OS
);
633 // Do we need to output a displacement?
635 EmitImmediate(Disp
, MI
.getLoc(), 1, FK_Data_1
, CurByte
, OS
, Fixups
, ImmOffset
);
636 else if (ForceDisp32
|| Disp
.getImm() != 0)
637 EmitImmediate(Disp
, MI
.getLoc(), 4, MCFixupKind(X86::reloc_signed_4byte
),
638 CurByte
, OS
, Fixups
);
641 /// EmitVEXOpcodePrefix - AVX instructions are encoded using a opcode prefix
643 void X86MCCodeEmitter::EmitVEXOpcodePrefix(uint64_t TSFlags
, unsigned &CurByte
,
644 int MemOperand
, const MCInst
&MI
,
645 const MCInstrDesc
&Desc
,
646 raw_ostream
&OS
) const {
647 assert(!(TSFlags
& X86II::LOCK
) && "Can't have LOCK VEX.");
649 uint64_t Encoding
= TSFlags
& X86II::EncodingMask
;
650 bool HasEVEX_K
= TSFlags
& X86II::EVEX_K
;
651 bool HasVEX_4V
= TSFlags
& X86II::VEX_4V
;
652 bool HasEVEX_RC
= TSFlags
& X86II::EVEX_RC
;
654 // VEX_R: opcode externsion equivalent to REX.R in
655 // 1's complement (inverted) form
657 // 1: Same as REX_R=0 (must be 1 in 32-bit mode)
658 // 0: Same as REX_R=1 (64 bit mode only)
661 uint8_t EVEX_R2
= 0x1;
663 // VEX_X: equivalent to REX.X, only used when a
664 // register is used for index in SIB Byte.
666 // 1: Same as REX.X=0 (must be 1 in 32-bit mode)
667 // 0: Same as REX.X=1 (64-bit mode only)
672 // 1: Same as REX_B=0 (ignored in 32-bit mode)
673 // 0: Same as REX_B=1 (64 bit mode only)
677 // VEX_W: opcode specific (use like REX.W, or used for
678 // opcode extension, or ignored, depending on the opcode byte)
679 uint8_t VEX_W
= (TSFlags
& X86II::VEX_W
) ? 1 : 0;
681 // VEX_5M (VEX m-mmmmm field):
683 // 0b00000: Reserved for future use
684 // 0b00001: implied 0F leading opcode
685 // 0b00010: implied 0F 38 leading opcode bytes
686 // 0b00011: implied 0F 3A leading opcode bytes
687 // 0b00100-0b11111: Reserved for future use
688 // 0b01000: XOP map select - 08h instructions with imm byte
689 // 0b01001: XOP map select - 09h instructions with no imm byte
690 // 0b01010: XOP map select - 0Ah instructions with imm dword
692 switch (TSFlags
& X86II::OpMapMask
) {
693 default: llvm_unreachable("Invalid prefix!");
694 case X86II::TB
: VEX_5M
= 0x1; break; // 0F
695 case X86II::T8
: VEX_5M
= 0x2; break; // 0F 38
696 case X86II::TA
: VEX_5M
= 0x3; break; // 0F 3A
697 case X86II::XOP8
: VEX_5M
= 0x8; break;
698 case X86II::XOP9
: VEX_5M
= 0x9; break;
699 case X86II::XOPA
: VEX_5M
= 0xA; break;
702 // VEX_4V (VEX vvvv field): a register specifier
703 // (in 1's complement form) or 1111 if unused.
704 uint8_t VEX_4V
= 0xf;
705 uint8_t EVEX_V2
= 0x1;
707 // EVEX_L2/VEX_L (Vector Length):
710 // 0 0: scalar or 128-bit vector
711 // 0 1: 256-bit vector
712 // 1 0: 512-bit vector
714 uint8_t VEX_L
= (TSFlags
& X86II::VEX_L
) ? 1 : 0;
715 uint8_t EVEX_L2
= (TSFlags
& X86II::EVEX_L2
) ? 1 : 0;
717 // VEX_PP: opcode extension providing equivalent
718 // functionality of a SIMD prefix
726 switch (TSFlags
& X86II::OpPrefixMask
) {
727 case X86II::PD
: VEX_PP
= 0x1; break; // 66
728 case X86II::XS
: VEX_PP
= 0x2; break; // F3
729 case X86II::XD
: VEX_PP
= 0x3; break; // F2
733 uint8_t EVEX_U
= 1; // Always '1' so far
736 uint8_t EVEX_z
= (HasEVEX_K
&& (TSFlags
& X86II::EVEX_Z
)) ? 1 : 0;
739 uint8_t EVEX_b
= (TSFlags
& X86II::EVEX_B
) ? 1 : 0;
745 uint8_t EVEX_aaa
= 0;
747 bool EncodeRC
= false;
749 // Classify VEX_B, VEX_4V, VEX_R, VEX_X
750 unsigned NumOps
= Desc
.getNumOperands();
751 unsigned CurOp
= X86II::getOperandBias(Desc
);
753 switch (TSFlags
& X86II::FormMask
) {
754 default: llvm_unreachable("Unexpected form in EmitVEXOpcodePrefix!");
757 case X86II::MRMDestMem
: {
758 // MRMDestMem instructions forms:
759 // MemAddr, src1(ModR/M)
760 // MemAddr, src1(VEX_4V), src2(ModR/M)
761 // MemAddr, src1(ModR/M), imm8
763 unsigned BaseRegEnc
= getX86RegEncoding(MI
, MemOperand
+ X86::AddrBaseReg
);
764 VEX_B
= ~(BaseRegEnc
>> 3) & 1;
765 unsigned IndexRegEnc
= getX86RegEncoding(MI
, MemOperand
+X86::AddrIndexReg
);
766 VEX_X
= ~(IndexRegEnc
>> 3) & 1;
767 if (!HasVEX_4V
) // Only needed with VSIB which don't use VVVV.
768 EVEX_V2
= ~(IndexRegEnc
>> 4) & 1;
770 CurOp
+= X86::AddrNumOperands
;
773 EVEX_aaa
= getX86RegEncoding(MI
, CurOp
++);
776 unsigned VRegEnc
= getX86RegEncoding(MI
, CurOp
++);
777 VEX_4V
= ~VRegEnc
& 0xf;
778 EVEX_V2
= ~(VRegEnc
>> 4) & 1;
781 unsigned RegEnc
= getX86RegEncoding(MI
, CurOp
++);
782 VEX_R
= ~(RegEnc
>> 3) & 1;
783 EVEX_R2
= ~(RegEnc
>> 4) & 1;
786 case X86II::MRMSrcMem
: {
787 // MRMSrcMem instructions forms:
788 // src1(ModR/M), MemAddr
789 // src1(ModR/M), src2(VEX_4V), MemAddr
790 // src1(ModR/M), MemAddr, imm8
791 // src1(ModR/M), MemAddr, src2(Imm[7:4])
794 // dst(ModR/M.reg), src1(VEX_4V), src2(ModR/M), src3(Imm[7:4])
795 unsigned RegEnc
= getX86RegEncoding(MI
, CurOp
++);
796 VEX_R
= ~(RegEnc
>> 3) & 1;
797 EVEX_R2
= ~(RegEnc
>> 4) & 1;
800 EVEX_aaa
= getX86RegEncoding(MI
, CurOp
++);
803 unsigned VRegEnc
= getX86RegEncoding(MI
, CurOp
++);
804 VEX_4V
= ~VRegEnc
& 0xf;
805 EVEX_V2
= ~(VRegEnc
>> 4) & 1;
808 unsigned BaseRegEnc
= getX86RegEncoding(MI
, MemOperand
+ X86::AddrBaseReg
);
809 VEX_B
= ~(BaseRegEnc
>> 3) & 1;
810 unsigned IndexRegEnc
= getX86RegEncoding(MI
, MemOperand
+X86::AddrIndexReg
);
811 VEX_X
= ~(IndexRegEnc
>> 3) & 1;
812 if (!HasVEX_4V
) // Only needed with VSIB which don't use VVVV.
813 EVEX_V2
= ~(IndexRegEnc
>> 4) & 1;
817 case X86II::MRMSrcMem4VOp3
: {
818 // Instruction format for 4VOp3:
819 // src1(ModR/M), MemAddr, src3(VEX_4V)
820 unsigned RegEnc
= getX86RegEncoding(MI
, CurOp
++);
821 VEX_R
= ~(RegEnc
>> 3) & 1;
823 unsigned BaseRegEnc
= getX86RegEncoding(MI
, MemOperand
+ X86::AddrBaseReg
);
824 VEX_B
= ~(BaseRegEnc
>> 3) & 1;
825 unsigned IndexRegEnc
= getX86RegEncoding(MI
, MemOperand
+X86::AddrIndexReg
);
826 VEX_X
= ~(IndexRegEnc
>> 3) & 1;
828 VEX_4V
= ~getX86RegEncoding(MI
, CurOp
+ X86::AddrNumOperands
) & 0xf;
831 case X86II::MRMSrcMemOp4
: {
832 // dst(ModR/M.reg), src1(VEX_4V), src2(Imm[7:4]), src3(ModR/M),
833 unsigned RegEnc
= getX86RegEncoding(MI
, CurOp
++);
834 VEX_R
= ~(RegEnc
>> 3) & 1;
836 unsigned VRegEnc
= getX86RegEncoding(MI
, CurOp
++);
837 VEX_4V
= ~VRegEnc
& 0xf;
839 unsigned BaseRegEnc
= getX86RegEncoding(MI
, MemOperand
+ X86::AddrBaseReg
);
840 VEX_B
= ~(BaseRegEnc
>> 3) & 1;
841 unsigned IndexRegEnc
= getX86RegEncoding(MI
, MemOperand
+X86::AddrIndexReg
);
842 VEX_X
= ~(IndexRegEnc
>> 3) & 1;
845 case X86II::MRM0m
: case X86II::MRM1m
:
846 case X86II::MRM2m
: case X86II::MRM3m
:
847 case X86II::MRM4m
: case X86II::MRM5m
:
848 case X86II::MRM6m
: case X86II::MRM7m
: {
849 // MRM[0-9]m instructions forms:
851 // src1(VEX_4V), MemAddr
853 unsigned VRegEnc
= getX86RegEncoding(MI
, CurOp
++);
854 VEX_4V
= ~VRegEnc
& 0xf;
855 EVEX_V2
= ~(VRegEnc
>> 4) & 1;
859 EVEX_aaa
= getX86RegEncoding(MI
, CurOp
++);
861 unsigned BaseRegEnc
= getX86RegEncoding(MI
, MemOperand
+ X86::AddrBaseReg
);
862 VEX_B
= ~(BaseRegEnc
>> 3) & 1;
863 unsigned IndexRegEnc
= getX86RegEncoding(MI
, MemOperand
+X86::AddrIndexReg
);
864 VEX_X
= ~(IndexRegEnc
>> 3) & 1;
867 case X86II::MRMSrcReg
: {
868 // MRMSrcReg instructions forms:
869 // dst(ModR/M), src1(VEX_4V), src2(ModR/M), src3(Imm[7:4])
870 // dst(ModR/M), src1(ModR/M)
871 // dst(ModR/M), src1(ModR/M), imm8
874 // dst(ModR/M.reg), src1(VEX_4V), src2(Imm[7:4]), src3(ModR/M),
875 unsigned RegEnc
= getX86RegEncoding(MI
, CurOp
++);
876 VEX_R
= ~(RegEnc
>> 3) & 1;
877 EVEX_R2
= ~(RegEnc
>> 4) & 1;
880 EVEX_aaa
= getX86RegEncoding(MI
, CurOp
++);
883 unsigned VRegEnc
= getX86RegEncoding(MI
, CurOp
++);
884 VEX_4V
= ~VRegEnc
& 0xf;
885 EVEX_V2
= ~(VRegEnc
>> 4) & 1;
888 RegEnc
= getX86RegEncoding(MI
, CurOp
++);
889 VEX_B
= ~(RegEnc
>> 3) & 1;
890 VEX_X
= ~(RegEnc
>> 4) & 1;
894 unsigned RcOperand
= NumOps
-1;
895 assert(RcOperand
>= CurOp
);
896 EVEX_rc
= MI
.getOperand(RcOperand
).getImm();
897 assert(EVEX_rc
<= 3 && "Invalid rounding control!");
903 case X86II::MRMSrcReg4VOp3
: {
904 // Instruction format for 4VOp3:
905 // src1(ModR/M), src2(ModR/M), src3(VEX_4V)
906 unsigned RegEnc
= getX86RegEncoding(MI
, CurOp
++);
907 VEX_R
= ~(RegEnc
>> 3) & 1;
909 RegEnc
= getX86RegEncoding(MI
, CurOp
++);
910 VEX_B
= ~(RegEnc
>> 3) & 1;
912 VEX_4V
= ~getX86RegEncoding(MI
, CurOp
++) & 0xf;
915 case X86II::MRMSrcRegOp4
: {
916 // dst(ModR/M.reg), src1(VEX_4V), src2(Imm[7:4]), src3(ModR/M),
917 unsigned RegEnc
= getX86RegEncoding(MI
, CurOp
++);
918 VEX_R
= ~(RegEnc
>> 3) & 1;
920 unsigned VRegEnc
= getX86RegEncoding(MI
, CurOp
++);
921 VEX_4V
= ~VRegEnc
& 0xf;
923 // Skip second register source (encoded in Imm[7:4])
926 RegEnc
= getX86RegEncoding(MI
, CurOp
++);
927 VEX_B
= ~(RegEnc
>> 3) & 1;
928 VEX_X
= ~(RegEnc
>> 4) & 1;
931 case X86II::MRMDestReg
: {
932 // MRMDestReg instructions forms:
933 // dst(ModR/M), src(ModR/M)
934 // dst(ModR/M), src(ModR/M), imm8
935 // dst(ModR/M), src1(VEX_4V), src2(ModR/M)
936 unsigned RegEnc
= getX86RegEncoding(MI
, CurOp
++);
937 VEX_B
= ~(RegEnc
>> 3) & 1;
938 VEX_X
= ~(RegEnc
>> 4) & 1;
941 EVEX_aaa
= getX86RegEncoding(MI
, CurOp
++);
944 unsigned VRegEnc
= getX86RegEncoding(MI
, CurOp
++);
945 VEX_4V
= ~VRegEnc
& 0xf;
946 EVEX_V2
= ~(VRegEnc
>> 4) & 1;
949 RegEnc
= getX86RegEncoding(MI
, CurOp
++);
950 VEX_R
= ~(RegEnc
>> 3) & 1;
951 EVEX_R2
= ~(RegEnc
>> 4) & 1;
956 case X86II::MRM0r
: case X86II::MRM1r
:
957 case X86II::MRM2r
: case X86II::MRM3r
:
958 case X86II::MRM4r
: case X86II::MRM5r
:
959 case X86II::MRM6r
: case X86II::MRM7r
: {
960 // MRM0r-MRM7r instructions forms:
961 // dst(VEX_4V), src(ModR/M), imm8
963 unsigned VRegEnc
= getX86RegEncoding(MI
, CurOp
++);
964 VEX_4V
= ~VRegEnc
& 0xf;
965 EVEX_V2
= ~(VRegEnc
>> 4) & 1;
968 EVEX_aaa
= getX86RegEncoding(MI
, CurOp
++);
970 unsigned RegEnc
= getX86RegEncoding(MI
, CurOp
++);
971 VEX_B
= ~(RegEnc
>> 3) & 1;
972 VEX_X
= ~(RegEnc
>> 4) & 1;
977 if (Encoding
== X86II::VEX
|| Encoding
== X86II::XOP
) {
978 // VEX opcode prefix can have 2 or 3 bytes
981 // +-----+ +--------------+ +-------------------+
982 // | C4h | | RXB | m-mmmm | | W | vvvv | L | pp |
983 // +-----+ +--------------+ +-------------------+
985 // +-----+ +-------------------+
986 // | C5h | | R | vvvv | L | pp |
987 // +-----+ +-------------------+
989 // XOP uses a similar prefix:
990 // +-----+ +--------------+ +-------------------+
991 // | 8Fh | | RXB | m-mmmm | | W | vvvv | L | pp |
992 // +-----+ +--------------+ +-------------------+
993 uint8_t LastByte
= VEX_PP
| (VEX_L
<< 2) | (VEX_4V
<< 3);
995 // Can we use the 2 byte VEX prefix?
996 if (!(MI
.getFlags() & X86::IP_USE_VEX3
) &&
997 Encoding
== X86II::VEX
&& VEX_B
&& VEX_X
&& !VEX_W
&& (VEX_5M
== 1)) {
998 EmitByte(0xC5, CurByte
, OS
);
999 EmitByte(LastByte
| (VEX_R
<< 7), CurByte
, OS
);
1003 // 3 byte VEX prefix
1004 EmitByte(Encoding
== X86II::XOP
? 0x8F : 0xC4, CurByte
, OS
);
1005 EmitByte(VEX_R
<< 7 | VEX_X
<< 6 | VEX_B
<< 5 | VEX_5M
, CurByte
, OS
);
1006 EmitByte(LastByte
| (VEX_W
<< 7), CurByte
, OS
);
1008 assert(Encoding
== X86II::EVEX
&& "unknown encoding!");
1009 // EVEX opcode prefix can have 4 bytes
1011 // +-----+ +--------------+ +-------------------+ +------------------------+
1012 // | 62h | | RXBR' | 00mm | | W | vvvv | U | pp | | z | L'L | b | v' | aaa |
1013 // +-----+ +--------------+ +-------------------+ +------------------------+
1014 assert((VEX_5M
& 0x3) == VEX_5M
1015 && "More than 2 significant bits in VEX.m-mmmm fields for EVEX!");
1017 EmitByte(0x62, CurByte
, OS
);
1018 EmitByte((VEX_R
<< 7) |
1022 VEX_5M
, CurByte
, OS
);
1023 EmitByte((VEX_W
<< 7) |
1026 VEX_PP
, CurByte
, OS
);
1028 EmitByte((EVEX_z
<< 7) |
1032 EVEX_aaa
, CurByte
, OS
);
1034 EmitByte((EVEX_z
<< 7) |
1039 EVEX_aaa
, CurByte
, OS
);
1043 /// DetermineREXPrefix - Determine if the MCInst has to be encoded with a X86-64
1044 /// REX prefix which specifies 1) 64-bit instructions, 2) non-default operand
1045 /// size, and 3) use of X86-64 extended registers.
1046 uint8_t X86MCCodeEmitter::DetermineREXPrefix(const MCInst
&MI
, uint64_t TSFlags
,
1048 const MCInstrDesc
&Desc
) const {
1050 bool UsesHighByteReg
= false;
1052 if (TSFlags
& X86II::REX_W
)
1053 REX
|= 1 << 3; // set REX.W
1055 if (MI
.getNumOperands() == 0) return REX
;
1057 unsigned NumOps
= MI
.getNumOperands();
1058 unsigned CurOp
= X86II::getOperandBias(Desc
);
1060 // If it accesses SPL, BPL, SIL, or DIL, then it requires a 0x40 REX prefix.
1061 for (unsigned i
= CurOp
; i
!= NumOps
; ++i
) {
1062 const MCOperand
&MO
= MI
.getOperand(i
);
1063 if (!MO
.isReg()) continue;
1064 unsigned Reg
= MO
.getReg();
1065 if (Reg
== X86::AH
|| Reg
== X86::BH
|| Reg
== X86::CH
|| Reg
== X86::DH
)
1066 UsesHighByteReg
= true;
1067 if (X86II::isX86_64NonExtLowByteReg(Reg
))
1068 // FIXME: The caller of DetermineREXPrefix slaps this prefix onto anything
1069 // that returns non-zero.
1070 REX
|= 0x40; // REX fixed encoding prefix
1073 switch (TSFlags
& X86II::FormMask
) {
1074 case X86II::AddRegFrm
:
1075 REX
|= isREXExtendedReg(MI
, CurOp
++) << 0; // REX.B
1077 case X86II::MRMSrcReg
:
1078 case X86II::MRMSrcRegCC
:
1079 REX
|= isREXExtendedReg(MI
, CurOp
++) << 2; // REX.R
1080 REX
|= isREXExtendedReg(MI
, CurOp
++) << 0; // REX.B
1082 case X86II::MRMSrcMem
:
1083 case X86II::MRMSrcMemCC
:
1084 REX
|= isREXExtendedReg(MI
, CurOp
++) << 2; // REX.R
1085 REX
|= isREXExtendedReg(MI
, MemOperand
+X86::AddrBaseReg
) << 0; // REX.B
1086 REX
|= isREXExtendedReg(MI
, MemOperand
+X86::AddrIndexReg
) << 1; // REX.X
1087 CurOp
+= X86::AddrNumOperands
;
1089 case X86II::MRMDestReg
:
1090 REX
|= isREXExtendedReg(MI
, CurOp
++) << 0; // REX.B
1091 REX
|= isREXExtendedReg(MI
, CurOp
++) << 2; // REX.R
1093 case X86II::MRMDestMem
:
1094 REX
|= isREXExtendedReg(MI
, MemOperand
+X86::AddrBaseReg
) << 0; // REX.B
1095 REX
|= isREXExtendedReg(MI
, MemOperand
+X86::AddrIndexReg
) << 1; // REX.X
1096 CurOp
+= X86::AddrNumOperands
;
1097 REX
|= isREXExtendedReg(MI
, CurOp
++) << 2; // REX.R
1099 case X86II::MRMXmCC
: case X86II::MRMXm
:
1100 case X86II::MRM0m
: case X86II::MRM1m
:
1101 case X86II::MRM2m
: case X86II::MRM3m
:
1102 case X86II::MRM4m
: case X86II::MRM5m
:
1103 case X86II::MRM6m
: case X86II::MRM7m
:
1104 REX
|= isREXExtendedReg(MI
, MemOperand
+X86::AddrBaseReg
) << 0; // REX.B
1105 REX
|= isREXExtendedReg(MI
, MemOperand
+X86::AddrIndexReg
) << 1; // REX.X
1107 case X86II::MRMXrCC
: case X86II::MRMXr
:
1108 case X86II::MRM0r
: case X86II::MRM1r
:
1109 case X86II::MRM2r
: case X86II::MRM3r
:
1110 case X86II::MRM4r
: case X86II::MRM5r
:
1111 case X86II::MRM6r
: case X86II::MRM7r
:
1112 REX
|= isREXExtendedReg(MI
, CurOp
++) << 0; // REX.B
1115 if (REX
&& UsesHighByteReg
)
1116 report_fatal_error("Cannot encode high byte register in REX-prefixed instruction");
1121 /// EmitSegmentOverridePrefix - Emit segment override opcode prefix as needed
1122 void X86MCCodeEmitter::EmitSegmentOverridePrefix(unsigned &CurByte
,
1123 unsigned SegOperand
,
1125 raw_ostream
&OS
) const {
1126 // Check for explicit segment override on memory operand.
1127 switch (MI
.getOperand(SegOperand
).getReg()) {
1128 default: llvm_unreachable("Unknown segment register!");
1130 case X86::CS
: EmitByte(0x2E, CurByte
, OS
); break;
1131 case X86::SS
: EmitByte(0x36, CurByte
, OS
); break;
1132 case X86::DS
: EmitByte(0x3E, CurByte
, OS
); break;
1133 case X86::ES
: EmitByte(0x26, CurByte
, OS
); break;
1134 case X86::FS
: EmitByte(0x64, CurByte
, OS
); break;
1135 case X86::GS
: EmitByte(0x65, CurByte
, OS
); break;
1139 /// Emit all instruction prefixes prior to the opcode.
1141 /// MemOperand is the operand # of the start of a memory operand if present. If
1142 /// Not present, it is -1.
1144 /// Returns true if a REX prefix was used.
1145 bool X86MCCodeEmitter::emitOpcodePrefix(uint64_t TSFlags
, unsigned &CurByte
,
1146 int MemOperand
, const MCInst
&MI
,
1147 const MCInstrDesc
&Desc
,
1148 const MCSubtargetInfo
&STI
,
1149 raw_ostream
&OS
) const {
1151 // Emit the operand size opcode prefix as needed.
1152 if ((TSFlags
& X86II::OpSizeMask
) == (is16BitMode(STI
) ? X86II::OpSize32
1154 EmitByte(0x66, CurByte
, OS
);
1156 // Emit the LOCK opcode prefix.
1157 if (TSFlags
& X86II::LOCK
|| MI
.getFlags() & X86::IP_HAS_LOCK
)
1158 EmitByte(0xF0, CurByte
, OS
);
1160 // Emit the NOTRACK opcode prefix.
1161 if (TSFlags
& X86II::NOTRACK
|| MI
.getFlags() & X86::IP_HAS_NOTRACK
)
1162 EmitByte(0x3E, CurByte
, OS
);
1164 switch (TSFlags
& X86II::OpPrefixMask
) {
1165 case X86II::PD
: // 66
1166 EmitByte(0x66, CurByte
, OS
);
1168 case X86II::XS
: // F3
1169 EmitByte(0xF3, CurByte
, OS
);
1171 case X86II::XD
: // F2
1172 EmitByte(0xF2, CurByte
, OS
);
1176 // Handle REX prefix.
1177 // FIXME: Can this come before F2 etc to simplify emission?
1178 if (is64BitMode(STI
)) {
1179 if (uint8_t REX
= DetermineREXPrefix(MI
, TSFlags
, MemOperand
, Desc
)) {
1180 EmitByte(0x40 | REX
, CurByte
, OS
);
1184 assert(!(TSFlags
& X86II::REX_W
) && "REX.W requires 64bit mode.");
1187 // 0x0F escape code must be emitted just before the opcode.
1188 switch (TSFlags
& X86II::OpMapMask
) {
1189 case X86II::TB
: // Two-byte opcode map
1190 case X86II::T8
: // 0F 38
1191 case X86II::TA
: // 0F 3A
1192 case X86II::ThreeDNow
: // 0F 0F, second 0F emitted by caller.
1193 EmitByte(0x0F, CurByte
, OS
);
1197 switch (TSFlags
& X86II::OpMapMask
) {
1198 case X86II::T8
: // 0F 38
1199 EmitByte(0x38, CurByte
, OS
);
1201 case X86II::TA
: // 0F 3A
1202 EmitByte(0x3A, CurByte
, OS
);
1208 void X86MCCodeEmitter::
1209 encodeInstruction(const MCInst
&MI
, raw_ostream
&OS
,
1210 SmallVectorImpl
<MCFixup
> &Fixups
,
1211 const MCSubtargetInfo
&STI
) const {
1212 unsigned Opcode
= MI
.getOpcode();
1213 const MCInstrDesc
&Desc
= MCII
.get(Opcode
);
1214 uint64_t TSFlags
= Desc
.TSFlags
;
1215 unsigned Flags
= MI
.getFlags();
1217 // Pseudo instructions don't get encoded.
1218 if ((TSFlags
& X86II::FormMask
) == X86II::Pseudo
)
1221 unsigned NumOps
= Desc
.getNumOperands();
1222 unsigned CurOp
= X86II::getOperandBias(Desc
);
1224 // Keep track of the current byte being emitted.
1225 unsigned CurByte
= 0;
1227 // Encoding type for this instruction.
1228 uint64_t Encoding
= TSFlags
& X86II::EncodingMask
;
1230 // It uses the VEX.VVVV field?
1231 bool HasVEX_4V
= TSFlags
& X86II::VEX_4V
;
1232 bool HasVEX_I8Reg
= (TSFlags
& X86II::ImmMask
) == X86II::Imm8Reg
;
1234 // It uses the EVEX.aaa field?
1235 bool HasEVEX_K
= TSFlags
& X86II::EVEX_K
;
1236 bool HasEVEX_RC
= TSFlags
& X86II::EVEX_RC
;
1238 // Used if a register is encoded in 7:4 of immediate.
1239 unsigned I8RegNum
= 0;
1241 // Determine where the memory operand starts, if present.
1242 int MemoryOperand
= X86II::getMemoryOperandNo(TSFlags
);
1243 if (MemoryOperand
!= -1) MemoryOperand
+= CurOp
;
1245 // Emit segment override opcode prefix as needed.
1246 if (MemoryOperand
>= 0)
1247 EmitSegmentOverridePrefix(CurByte
, MemoryOperand
+X86::AddrSegmentReg
,
1250 // Emit the repeat opcode prefix as needed.
1251 if (TSFlags
& X86II::REP
|| Flags
& X86::IP_HAS_REPEAT
)
1252 EmitByte(0xF3, CurByte
, OS
);
1253 if (Flags
& X86::IP_HAS_REPEAT_NE
)
1254 EmitByte(0xF2, CurByte
, OS
);
1256 // Emit the address size opcode prefix as needed.
1257 bool need_address_override
;
1258 uint64_t AdSize
= TSFlags
& X86II::AdSizeMask
;
1259 if ((is16BitMode(STI
) && AdSize
== X86II::AdSize32
) ||
1260 (is32BitMode(STI
) && AdSize
== X86II::AdSize16
) ||
1261 (is64BitMode(STI
) && AdSize
== X86II::AdSize32
)) {
1262 need_address_override
= true;
1263 } else if (MemoryOperand
< 0) {
1264 need_address_override
= false;
1265 } else if (is64BitMode(STI
)) {
1266 assert(!Is16BitMemOperand(MI
, MemoryOperand
, STI
));
1267 need_address_override
= Is32BitMemOperand(MI
, MemoryOperand
);
1268 } else if (is32BitMode(STI
)) {
1269 assert(!Is64BitMemOperand(MI
, MemoryOperand
));
1270 need_address_override
= Is16BitMemOperand(MI
, MemoryOperand
, STI
);
1272 assert(is16BitMode(STI
));
1273 assert(!Is64BitMemOperand(MI
, MemoryOperand
));
1274 need_address_override
= !Is16BitMemOperand(MI
, MemoryOperand
, STI
);
1277 if (need_address_override
)
1278 EmitByte(0x67, CurByte
, OS
);
1282 Rex
= emitOpcodePrefix(TSFlags
, CurByte
, MemoryOperand
, MI
, Desc
, STI
, OS
);
1284 EmitVEXOpcodePrefix(TSFlags
, CurByte
, MemoryOperand
, MI
, Desc
, OS
);
1286 uint8_t BaseOpcode
= X86II::getBaseOpcodeFor(TSFlags
);
1288 if ((TSFlags
& X86II::OpMapMask
) == X86II::ThreeDNow
)
1289 BaseOpcode
= 0x0F; // Weird 3DNow! encoding.
1291 unsigned OpcodeOffset
= 0;
1293 uint64_t Form
= TSFlags
& X86II::FormMask
;
1295 default: errs() << "FORM: " << Form
<< "\n";
1296 llvm_unreachable("Unknown FormMask value in X86MCCodeEmitter!");
1298 llvm_unreachable("Pseudo instruction shouldn't be emitted");
1299 case X86II::RawFrmDstSrc
: {
1300 unsigned siReg
= MI
.getOperand(1).getReg();
1301 assert(((siReg
== X86::SI
&& MI
.getOperand(0).getReg() == X86::DI
) ||
1302 (siReg
== X86::ESI
&& MI
.getOperand(0).getReg() == X86::EDI
) ||
1303 (siReg
== X86::RSI
&& MI
.getOperand(0).getReg() == X86::RDI
)) &&
1304 "SI and DI register sizes do not match");
1305 // Emit segment override opcode prefix as needed (not for %ds).
1306 if (MI
.getOperand(2).getReg() != X86::DS
)
1307 EmitSegmentOverridePrefix(CurByte
, 2, MI
, OS
);
1308 // Emit AdSize prefix as needed.
1309 if ((!is32BitMode(STI
) && siReg
== X86::ESI
) ||
1310 (is32BitMode(STI
) && siReg
== X86::SI
))
1311 EmitByte(0x67, CurByte
, OS
);
1312 CurOp
+= 3; // Consume operands.
1313 EmitByte(BaseOpcode
, CurByte
, OS
);
1316 case X86II::RawFrmSrc
: {
1317 unsigned siReg
= MI
.getOperand(0).getReg();
1318 // Emit segment override opcode prefix as needed (not for %ds).
1319 if (MI
.getOperand(1).getReg() != X86::DS
)
1320 EmitSegmentOverridePrefix(CurByte
, 1, MI
, OS
);
1321 // Emit AdSize prefix as needed.
1322 if ((!is32BitMode(STI
) && siReg
== X86::ESI
) ||
1323 (is32BitMode(STI
) && siReg
== X86::SI
))
1324 EmitByte(0x67, CurByte
, OS
);
1325 CurOp
+= 2; // Consume operands.
1326 EmitByte(BaseOpcode
, CurByte
, OS
);
1329 case X86II::RawFrmDst
: {
1330 unsigned siReg
= MI
.getOperand(0).getReg();
1331 // Emit AdSize prefix as needed.
1332 if ((!is32BitMode(STI
) && siReg
== X86::EDI
) ||
1333 (is32BitMode(STI
) && siReg
== X86::DI
))
1334 EmitByte(0x67, CurByte
, OS
);
1335 ++CurOp
; // Consume operand.
1336 EmitByte(BaseOpcode
, CurByte
, OS
);
1339 case X86II::AddCCFrm
: {
1340 // This will be added to the opcode in the fallthrough.
1341 OpcodeOffset
= MI
.getOperand(NumOps
- 1).getImm();
1342 assert(OpcodeOffset
< 16 && "Unexpected opcode offset!");
1343 --NumOps
; // Drop the operand from the end.
1346 EmitByte(BaseOpcode
+ OpcodeOffset
, CurByte
, OS
);
1348 if (!is64BitMode(STI
) || !isPCRel32Branch(MI
))
1351 const MCOperand
&Op
= MI
.getOperand(CurOp
++);
1352 EmitImmediate(Op
, MI
.getLoc(), X86II::getSizeOfImm(TSFlags
),
1353 MCFixupKind(X86::reloc_branch_4byte_pcrel
), CurByte
, OS
,
1357 case X86II::RawFrmMemOffs
:
1358 // Emit segment override opcode prefix as needed.
1359 EmitSegmentOverridePrefix(CurByte
, 1, MI
, OS
);
1360 EmitByte(BaseOpcode
, CurByte
, OS
);
1361 EmitImmediate(MI
.getOperand(CurOp
++), MI
.getLoc(),
1362 X86II::getSizeOfImm(TSFlags
), getImmFixupKind(TSFlags
),
1363 CurByte
, OS
, Fixups
);
1364 ++CurOp
; // skip segment operand
1366 case X86II::RawFrmImm8
:
1367 EmitByte(BaseOpcode
, CurByte
, OS
);
1368 EmitImmediate(MI
.getOperand(CurOp
++), MI
.getLoc(),
1369 X86II::getSizeOfImm(TSFlags
), getImmFixupKind(TSFlags
),
1370 CurByte
, OS
, Fixups
);
1371 EmitImmediate(MI
.getOperand(CurOp
++), MI
.getLoc(), 1, FK_Data_1
, CurByte
,
1374 case X86II::RawFrmImm16
:
1375 EmitByte(BaseOpcode
, CurByte
, OS
);
1376 EmitImmediate(MI
.getOperand(CurOp
++), MI
.getLoc(),
1377 X86II::getSizeOfImm(TSFlags
), getImmFixupKind(TSFlags
),
1378 CurByte
, OS
, Fixups
);
1379 EmitImmediate(MI
.getOperand(CurOp
++), MI
.getLoc(), 2, FK_Data_2
, CurByte
,
1383 case X86II::AddRegFrm
:
1384 EmitByte(BaseOpcode
+ GetX86RegNum(MI
.getOperand(CurOp
++)), CurByte
, OS
);
1387 case X86II::MRMDestReg
: {
1388 EmitByte(BaseOpcode
, CurByte
, OS
);
1389 unsigned SrcRegNum
= CurOp
+ 1;
1391 if (HasEVEX_K
) // Skip writemask
1394 if (HasVEX_4V
) // Skip 1st src (which is encoded in VEX_VVVV)
1397 EmitRegModRMByte(MI
.getOperand(CurOp
),
1398 GetX86RegNum(MI
.getOperand(SrcRegNum
)), CurByte
, OS
);
1399 CurOp
= SrcRegNum
+ 1;
1402 case X86II::MRMDestMem
: {
1403 EmitByte(BaseOpcode
, CurByte
, OS
);
1404 unsigned SrcRegNum
= CurOp
+ X86::AddrNumOperands
;
1406 if (HasEVEX_K
) // Skip writemask
1409 if (HasVEX_4V
) // Skip 1st src (which is encoded in VEX_VVVV)
1412 emitMemModRMByte(MI
, CurOp
, GetX86RegNum(MI
.getOperand(SrcRegNum
)), TSFlags
,
1413 Rex
, CurByte
, OS
, Fixups
, STI
);
1414 CurOp
= SrcRegNum
+ 1;
1417 case X86II::MRMSrcReg
: {
1418 EmitByte(BaseOpcode
, CurByte
, OS
);
1419 unsigned SrcRegNum
= CurOp
+ 1;
1421 if (HasEVEX_K
) // Skip writemask
1424 if (HasVEX_4V
) // Skip 1st src (which is encoded in VEX_VVVV)
1427 EmitRegModRMByte(MI
.getOperand(SrcRegNum
),
1428 GetX86RegNum(MI
.getOperand(CurOp
)), CurByte
, OS
);
1429 CurOp
= SrcRegNum
+ 1;
1431 I8RegNum
= getX86RegEncoding(MI
, CurOp
++);
1432 // do not count the rounding control operand
1437 case X86II::MRMSrcReg4VOp3
: {
1438 EmitByte(BaseOpcode
, CurByte
, OS
);
1439 unsigned SrcRegNum
= CurOp
+ 1;
1441 EmitRegModRMByte(MI
.getOperand(SrcRegNum
),
1442 GetX86RegNum(MI
.getOperand(CurOp
)), CurByte
, OS
);
1443 CurOp
= SrcRegNum
+ 1;
1444 ++CurOp
; // Encoded in VEX.VVVV
1447 case X86II::MRMSrcRegOp4
: {
1448 EmitByte(BaseOpcode
, CurByte
, OS
);
1449 unsigned SrcRegNum
= CurOp
+ 1;
1451 // Skip 1st src (which is encoded in VEX_VVVV)
1454 // Capture 2nd src (which is encoded in Imm[7:4])
1455 assert(HasVEX_I8Reg
&& "MRMSrcRegOp4 should imply VEX_I8Reg");
1456 I8RegNum
= getX86RegEncoding(MI
, SrcRegNum
++);
1458 EmitRegModRMByte(MI
.getOperand(SrcRegNum
),
1459 GetX86RegNum(MI
.getOperand(CurOp
)), CurByte
, OS
);
1460 CurOp
= SrcRegNum
+ 1;
1463 case X86II::MRMSrcRegCC
: {
1464 unsigned FirstOp
= CurOp
++;
1465 unsigned SecondOp
= CurOp
++;
1467 unsigned CC
= MI
.getOperand(CurOp
++).getImm();
1468 EmitByte(BaseOpcode
+ CC
, CurByte
, OS
);
1470 EmitRegModRMByte(MI
.getOperand(SecondOp
),
1471 GetX86RegNum(MI
.getOperand(FirstOp
)), CurByte
, OS
);
1474 case X86II::MRMSrcMem
: {
1475 unsigned FirstMemOp
= CurOp
+1;
1477 if (HasEVEX_K
) // Skip writemask
1481 ++FirstMemOp
; // Skip the register source (which is encoded in VEX_VVVV).
1483 EmitByte(BaseOpcode
, CurByte
, OS
);
1485 emitMemModRMByte(MI
, FirstMemOp
, GetX86RegNum(MI
.getOperand(CurOp
)),
1486 TSFlags
, Rex
, CurByte
, OS
, Fixups
, STI
);
1487 CurOp
= FirstMemOp
+ X86::AddrNumOperands
;
1489 I8RegNum
= getX86RegEncoding(MI
, CurOp
++);
1492 case X86II::MRMSrcMem4VOp3
: {
1493 unsigned FirstMemOp
= CurOp
+1;
1495 EmitByte(BaseOpcode
, CurByte
, OS
);
1497 emitMemModRMByte(MI
, FirstMemOp
, GetX86RegNum(MI
.getOperand(CurOp
)),
1498 TSFlags
, Rex
, CurByte
, OS
, Fixups
, STI
);
1499 CurOp
= FirstMemOp
+ X86::AddrNumOperands
;
1500 ++CurOp
; // Encoded in VEX.VVVV.
1503 case X86II::MRMSrcMemOp4
: {
1504 unsigned FirstMemOp
= CurOp
+1;
1506 ++FirstMemOp
; // Skip the register source (which is encoded in VEX_VVVV).
1508 // Capture second register source (encoded in Imm[7:4])
1509 assert(HasVEX_I8Reg
&& "MRMSrcRegOp4 should imply VEX_I8Reg");
1510 I8RegNum
= getX86RegEncoding(MI
, FirstMemOp
++);
1512 EmitByte(BaseOpcode
, CurByte
, OS
);
1514 emitMemModRMByte(MI
, FirstMemOp
, GetX86RegNum(MI
.getOperand(CurOp
)),
1515 TSFlags
, Rex
, CurByte
, OS
, Fixups
, STI
);
1516 CurOp
= FirstMemOp
+ X86::AddrNumOperands
;
1519 case X86II::MRMSrcMemCC
: {
1520 unsigned RegOp
= CurOp
++;
1521 unsigned FirstMemOp
= CurOp
;
1522 CurOp
= FirstMemOp
+ X86::AddrNumOperands
;
1524 unsigned CC
= MI
.getOperand(CurOp
++).getImm();
1525 EmitByte(BaseOpcode
+ CC
, CurByte
, OS
);
1527 emitMemModRMByte(MI
, FirstMemOp
, GetX86RegNum(MI
.getOperand(RegOp
)),
1528 TSFlags
, Rex
, CurByte
, OS
, Fixups
, STI
);
1532 case X86II::MRMXrCC
: {
1533 unsigned RegOp
= CurOp
++;
1535 unsigned CC
= MI
.getOperand(CurOp
++).getImm();
1536 EmitByte(BaseOpcode
+ CC
, CurByte
, OS
);
1537 EmitRegModRMByte(MI
.getOperand(RegOp
), 0, CurByte
, OS
);
1542 case X86II::MRM0r
: case X86II::MRM1r
:
1543 case X86II::MRM2r
: case X86II::MRM3r
:
1544 case X86II::MRM4r
: case X86II::MRM5r
:
1545 case X86II::MRM6r
: case X86II::MRM7r
:
1546 if (HasVEX_4V
) // Skip the register dst (which is encoded in VEX_VVVV).
1548 if (HasEVEX_K
) // Skip writemask
1550 EmitByte(BaseOpcode
, CurByte
, OS
);
1551 EmitRegModRMByte(MI
.getOperand(CurOp
++),
1552 (Form
== X86II::MRMXr
) ? 0 : Form
-X86II::MRM0r
,
1556 case X86II::MRMXmCC
: {
1557 unsigned FirstMemOp
= CurOp
;
1558 CurOp
= FirstMemOp
+ X86::AddrNumOperands
;
1560 unsigned CC
= MI
.getOperand(CurOp
++).getImm();
1561 EmitByte(BaseOpcode
+ CC
, CurByte
, OS
);
1563 emitMemModRMByte(MI
, FirstMemOp
, 0, TSFlags
, Rex
, CurByte
, OS
, Fixups
, STI
);
1568 case X86II::MRM0m
: case X86II::MRM1m
:
1569 case X86II::MRM2m
: case X86II::MRM3m
:
1570 case X86II::MRM4m
: case X86II::MRM5m
:
1571 case X86II::MRM6m
: case X86II::MRM7m
:
1572 if (HasVEX_4V
) // Skip the register dst (which is encoded in VEX_VVVV).
1574 if (HasEVEX_K
) // Skip writemask
1576 EmitByte(BaseOpcode
, CurByte
, OS
);
1577 emitMemModRMByte(MI
, CurOp
,
1578 (Form
== X86II::MRMXm
) ? 0 : Form
- X86II::MRM0m
, TSFlags
,
1579 Rex
, CurByte
, OS
, Fixups
, STI
);
1580 CurOp
+= X86::AddrNumOperands
;
1583 case X86II::MRM_C0
: case X86II::MRM_C1
: case X86II::MRM_C2
:
1584 case X86II::MRM_C3
: case X86II::MRM_C4
: case X86II::MRM_C5
:
1585 case X86II::MRM_C6
: case X86II::MRM_C7
: case X86II::MRM_C8
:
1586 case X86II::MRM_C9
: case X86II::MRM_CA
: case X86II::MRM_CB
:
1587 case X86II::MRM_CC
: case X86II::MRM_CD
: case X86II::MRM_CE
:
1588 case X86II::MRM_CF
: case X86II::MRM_D0
: case X86II::MRM_D1
:
1589 case X86II::MRM_D2
: case X86II::MRM_D3
: case X86II::MRM_D4
:
1590 case X86II::MRM_D5
: case X86II::MRM_D6
: case X86II::MRM_D7
:
1591 case X86II::MRM_D8
: case X86II::MRM_D9
: case X86II::MRM_DA
:
1592 case X86II::MRM_DB
: case X86II::MRM_DC
: case X86II::MRM_DD
:
1593 case X86II::MRM_DE
: case X86II::MRM_DF
: case X86II::MRM_E0
:
1594 case X86II::MRM_E1
: case X86II::MRM_E2
: case X86II::MRM_E3
:
1595 case X86II::MRM_E4
: case X86II::MRM_E5
: case X86II::MRM_E6
:
1596 case X86II::MRM_E7
: case X86II::MRM_E8
: case X86II::MRM_E9
:
1597 case X86II::MRM_EA
: case X86II::MRM_EB
: case X86II::MRM_EC
:
1598 case X86II::MRM_ED
: case X86II::MRM_EE
: case X86II::MRM_EF
:
1599 case X86II::MRM_F0
: case X86II::MRM_F1
: case X86II::MRM_F2
:
1600 case X86II::MRM_F3
: case X86II::MRM_F4
: case X86II::MRM_F5
:
1601 case X86II::MRM_F6
: case X86II::MRM_F7
: case X86II::MRM_F8
:
1602 case X86II::MRM_F9
: case X86II::MRM_FA
: case X86II::MRM_FB
:
1603 case X86II::MRM_FC
: case X86II::MRM_FD
: case X86II::MRM_FE
:
1605 EmitByte(BaseOpcode
, CurByte
, OS
);
1606 EmitByte(0xC0 + Form
- X86II::MRM_C0
, CurByte
, OS
);
1611 // The last source register of a 4 operand instruction in AVX is encoded
1612 // in bits[7:4] of a immediate byte.
1613 assert(I8RegNum
< 16 && "Register encoding out of range");
1615 if (CurOp
!= NumOps
) {
1616 unsigned Val
= MI
.getOperand(CurOp
++).getImm();
1617 assert(Val
< 16 && "Immediate operand value out of range");
1620 EmitImmediate(MCOperand::createImm(I8RegNum
), MI
.getLoc(), 1, FK_Data_1
,
1621 CurByte
, OS
, Fixups
);
1623 // If there is a remaining operand, it must be a trailing immediate. Emit it
1624 // according to the right size for the instruction. Some instructions
1625 // (SSE4a extrq and insertq) have two trailing immediates.
1626 while (CurOp
!= NumOps
&& NumOps
- CurOp
<= 2) {
1627 EmitImmediate(MI
.getOperand(CurOp
++), MI
.getLoc(),
1628 X86II::getSizeOfImm(TSFlags
), getImmFixupKind(TSFlags
),
1629 CurByte
, OS
, Fixups
);
1633 if ((TSFlags
& X86II::OpMapMask
) == X86II::ThreeDNow
)
1634 EmitByte(X86II::getBaseOpcodeFor(TSFlags
), CurByte
, OS
);
1638 if (/*!Desc.isVariadic() &&*/ CurOp
!= NumOps
) {
1639 errs() << "Cannot encode all operands of: ";
1647 MCCodeEmitter
*llvm::createX86MCCodeEmitter(const MCInstrInfo
&MCII
,
1648 const MCRegisterInfo
&MRI
,
1650 return new X86MCCodeEmitter(MCII
, Ctx
);