1 //===-- X86AsmBackend.cpp - X86 Assembler Backend -------------------------===//
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7 //===----------------------------------------------------------------------===//
9 #include "MCTargetDesc/X86BaseInfo.h"
10 #include "MCTargetDesc/X86FixupKinds.h"
11 #include "llvm/ADT/StringSwitch.h"
12 #include "llvm/BinaryFormat/ELF.h"
13 #include "llvm/BinaryFormat/MachO.h"
14 #include "llvm/MC/MCAsmBackend.h"
15 #include "llvm/MC/MCELFObjectWriter.h"
16 #include "llvm/MC/MCExpr.h"
17 #include "llvm/MC/MCFixupKindInfo.h"
18 #include "llvm/MC/MCInst.h"
19 #include "llvm/MC/MCMachObjectWriter.h"
20 #include "llvm/MC/MCObjectWriter.h"
21 #include "llvm/MC/MCRegisterInfo.h"
22 #include "llvm/MC/MCSectionMachO.h"
23 #include "llvm/MC/MCSubtargetInfo.h"
24 #include "llvm/Support/ErrorHandling.h"
25 #include "llvm/Support/raw_ostream.h"
28 static unsigned getFixupKindLog2Size(unsigned Kind
) {
31 llvm_unreachable("invalid fixup kind!");
41 case X86::reloc_riprel_4byte
:
42 case X86::reloc_riprel_4byte_relax
:
43 case X86::reloc_riprel_4byte_relax_rex
:
44 case X86::reloc_riprel_4byte_movq_load
:
45 case X86::reloc_signed_4byte
:
46 case X86::reloc_signed_4byte_relax
:
47 case X86::reloc_global_offset_table
:
48 case X86::reloc_branch_4byte_pcrel
:
55 case X86::reloc_global_offset_table8
:
62 class X86ELFObjectWriter
: public MCELFObjectTargetWriter
{
64 X86ELFObjectWriter(bool is64Bit
, uint8_t OSABI
, uint16_t EMachine
,
65 bool HasRelocationAddend
, bool foobar
)
66 : MCELFObjectTargetWriter(is64Bit
, OSABI
, EMachine
, HasRelocationAddend
) {}
69 class X86AsmBackend
: public MCAsmBackend
{
70 const MCSubtargetInfo
&STI
;
72 X86AsmBackend(const Target
&T
, const MCSubtargetInfo
&STI
)
73 : MCAsmBackend(support::little
), STI(STI
) {}
75 unsigned getNumFixupKinds() const override
{
76 return X86::NumTargetFixupKinds
;
79 const MCFixupKindInfo
&getFixupKindInfo(MCFixupKind Kind
) const override
{
80 const static MCFixupKindInfo Infos
[X86::NumTargetFixupKinds
] = {
81 {"reloc_riprel_4byte", 0, 32, MCFixupKindInfo::FKF_IsPCRel
},
82 {"reloc_riprel_4byte_movq_load", 0, 32, MCFixupKindInfo::FKF_IsPCRel
},
83 {"reloc_riprel_4byte_relax", 0, 32, MCFixupKindInfo::FKF_IsPCRel
},
84 {"reloc_riprel_4byte_relax_rex", 0, 32, MCFixupKindInfo::FKF_IsPCRel
},
85 {"reloc_signed_4byte", 0, 32, 0},
86 {"reloc_signed_4byte_relax", 0, 32, 0},
87 {"reloc_global_offset_table", 0, 32, 0},
88 {"reloc_global_offset_table8", 0, 64, 0},
89 {"reloc_branch_4byte_pcrel", 0, 32, MCFixupKindInfo::FKF_IsPCRel
},
92 if (Kind
< FirstTargetFixupKind
)
93 return MCAsmBackend::getFixupKindInfo(Kind
);
95 assert(unsigned(Kind
- FirstTargetFixupKind
) < getNumFixupKinds() &&
97 assert(Infos
[Kind
- FirstTargetFixupKind
].Name
&& "Empty fixup name!");
98 return Infos
[Kind
- FirstTargetFixupKind
];
101 void applyFixup(const MCAssembler
&Asm
, const MCFixup
&Fixup
,
102 const MCValue
&Target
, MutableArrayRef
<char> Data
,
103 uint64_t Value
, bool IsResolved
,
104 const MCSubtargetInfo
*STI
) const override
{
105 unsigned Size
= 1 << getFixupKindLog2Size(Fixup
.getKind());
107 assert(Fixup
.getOffset() + Size
<= Data
.size() && "Invalid fixup offset!");
109 // Check that uppper bits are either all zeros or all ones.
110 // Specifically ignore overflow/underflow as long as the leakage is
111 // limited to the lower bits. This is to remain compatible with
113 assert(isIntN(Size
* 8 + 1, Value
) &&
114 "Value does not fit in the Fixup field");
116 for (unsigned i
= 0; i
!= Size
; ++i
)
117 Data
[Fixup
.getOffset() + i
] = uint8_t(Value
>> (i
* 8));
120 bool mayNeedRelaxation(const MCInst
&Inst
,
121 const MCSubtargetInfo
&STI
) const override
;
123 bool fixupNeedsRelaxation(const MCFixup
&Fixup
, uint64_t Value
,
124 const MCRelaxableFragment
*DF
,
125 const MCAsmLayout
&Layout
) const override
;
127 void relaxInstruction(const MCInst
&Inst
, const MCSubtargetInfo
&STI
,
128 MCInst
&Res
) const override
;
130 bool writeNopData(raw_ostream
&OS
, uint64_t Count
) const override
;
132 } // end anonymous namespace
134 static unsigned getRelaxedOpcodeBranch(const MCInst
&Inst
, bool is16BitMode
) {
135 unsigned Op
= Inst
.getOpcode();
140 return (is16BitMode
) ? X86::JAE_2
: X86::JAE_4
;
142 return (is16BitMode
) ? X86::JA_2
: X86::JA_4
;
144 return (is16BitMode
) ? X86::JBE_2
: X86::JBE_4
;
146 return (is16BitMode
) ? X86::JB_2
: X86::JB_4
;
148 return (is16BitMode
) ? X86::JE_2
: X86::JE_4
;
150 return (is16BitMode
) ? X86::JGE_2
: X86::JGE_4
;
152 return (is16BitMode
) ? X86::JG_2
: X86::JG_4
;
154 return (is16BitMode
) ? X86::JLE_2
: X86::JLE_4
;
156 return (is16BitMode
) ? X86::JL_2
: X86::JL_4
;
158 return (is16BitMode
) ? X86::JMP_2
: X86::JMP_4
;
160 return (is16BitMode
) ? X86::JNE_2
: X86::JNE_4
;
162 return (is16BitMode
) ? X86::JNO_2
: X86::JNO_4
;
164 return (is16BitMode
) ? X86::JNP_2
: X86::JNP_4
;
166 return (is16BitMode
) ? X86::JNS_2
: X86::JNS_4
;
168 return (is16BitMode
) ? X86::JO_2
: X86::JO_4
;
170 return (is16BitMode
) ? X86::JP_2
: X86::JP_4
;
172 return (is16BitMode
) ? X86::JS_2
: X86::JS_4
;
176 static unsigned getRelaxedOpcodeArith(const MCInst
&Inst
) {
177 unsigned Op
= Inst
.getOpcode();
183 case X86::IMUL16rri8
: return X86::IMUL16rri
;
184 case X86::IMUL16rmi8
: return X86::IMUL16rmi
;
185 case X86::IMUL32rri8
: return X86::IMUL32rri
;
186 case X86::IMUL32rmi8
: return X86::IMUL32rmi
;
187 case X86::IMUL64rri8
: return X86::IMUL64rri32
;
188 case X86::IMUL64rmi8
: return X86::IMUL64rmi32
;
191 case X86::AND16ri8
: return X86::AND16ri
;
192 case X86::AND16mi8
: return X86::AND16mi
;
193 case X86::AND32ri8
: return X86::AND32ri
;
194 case X86::AND32mi8
: return X86::AND32mi
;
195 case X86::AND64ri8
: return X86::AND64ri32
;
196 case X86::AND64mi8
: return X86::AND64mi32
;
199 case X86::OR16ri8
: return X86::OR16ri
;
200 case X86::OR16mi8
: return X86::OR16mi
;
201 case X86::OR32ri8
: return X86::OR32ri
;
202 case X86::OR32mi8
: return X86::OR32mi
;
203 case X86::OR64ri8
: return X86::OR64ri32
;
204 case X86::OR64mi8
: return X86::OR64mi32
;
207 case X86::XOR16ri8
: return X86::XOR16ri
;
208 case X86::XOR16mi8
: return X86::XOR16mi
;
209 case X86::XOR32ri8
: return X86::XOR32ri
;
210 case X86::XOR32mi8
: return X86::XOR32mi
;
211 case X86::XOR64ri8
: return X86::XOR64ri32
;
212 case X86::XOR64mi8
: return X86::XOR64mi32
;
215 case X86::ADD16ri8
: return X86::ADD16ri
;
216 case X86::ADD16mi8
: return X86::ADD16mi
;
217 case X86::ADD32ri8
: return X86::ADD32ri
;
218 case X86::ADD32mi8
: return X86::ADD32mi
;
219 case X86::ADD64ri8
: return X86::ADD64ri32
;
220 case X86::ADD64mi8
: return X86::ADD64mi32
;
223 case X86::ADC16ri8
: return X86::ADC16ri
;
224 case X86::ADC16mi8
: return X86::ADC16mi
;
225 case X86::ADC32ri8
: return X86::ADC32ri
;
226 case X86::ADC32mi8
: return X86::ADC32mi
;
227 case X86::ADC64ri8
: return X86::ADC64ri32
;
228 case X86::ADC64mi8
: return X86::ADC64mi32
;
231 case X86::SUB16ri8
: return X86::SUB16ri
;
232 case X86::SUB16mi8
: return X86::SUB16mi
;
233 case X86::SUB32ri8
: return X86::SUB32ri
;
234 case X86::SUB32mi8
: return X86::SUB32mi
;
235 case X86::SUB64ri8
: return X86::SUB64ri32
;
236 case X86::SUB64mi8
: return X86::SUB64mi32
;
239 case X86::SBB16ri8
: return X86::SBB16ri
;
240 case X86::SBB16mi8
: return X86::SBB16mi
;
241 case X86::SBB32ri8
: return X86::SBB32ri
;
242 case X86::SBB32mi8
: return X86::SBB32mi
;
243 case X86::SBB64ri8
: return X86::SBB64ri32
;
244 case X86::SBB64mi8
: return X86::SBB64mi32
;
247 case X86::CMP16ri8
: return X86::CMP16ri
;
248 case X86::CMP16mi8
: return X86::CMP16mi
;
249 case X86::CMP32ri8
: return X86::CMP32ri
;
250 case X86::CMP32mi8
: return X86::CMP32mi
;
251 case X86::CMP64ri8
: return X86::CMP64ri32
;
252 case X86::CMP64mi8
: return X86::CMP64mi32
;
255 case X86::PUSH32i8
: return X86::PUSHi32
;
256 case X86::PUSH16i8
: return X86::PUSHi16
;
257 case X86::PUSH64i8
: return X86::PUSH64i32
;
261 static unsigned getRelaxedOpcode(const MCInst
&Inst
, bool is16BitMode
) {
262 unsigned R
= getRelaxedOpcodeArith(Inst
);
263 if (R
!= Inst
.getOpcode())
265 return getRelaxedOpcodeBranch(Inst
, is16BitMode
);
268 bool X86AsmBackend::mayNeedRelaxation(const MCInst
&Inst
,
269 const MCSubtargetInfo
&STI
) const {
270 // Branches can always be relaxed in either mode.
271 if (getRelaxedOpcodeBranch(Inst
, false) != Inst
.getOpcode())
274 // Check if this instruction is ever relaxable.
275 if (getRelaxedOpcodeArith(Inst
) == Inst
.getOpcode())
279 // Check if the relaxable operand has an expression. For the current set of
280 // relaxable instructions, the relaxable operand is always the last operand.
281 unsigned RelaxableOp
= Inst
.getNumOperands() - 1;
282 if (Inst
.getOperand(RelaxableOp
).isExpr())
288 bool X86AsmBackend::fixupNeedsRelaxation(const MCFixup
&Fixup
,
290 const MCRelaxableFragment
*DF
,
291 const MCAsmLayout
&Layout
) const {
292 // Relax if the value is too big for a (signed) i8.
293 return int64_t(Value
) != int64_t(int8_t(Value
));
296 // FIXME: Can tblgen help at all here to verify there aren't other instructions
298 void X86AsmBackend::relaxInstruction(const MCInst
&Inst
,
299 const MCSubtargetInfo
&STI
,
301 // The only relaxations X86 does is from a 1byte pcrel to a 4byte pcrel.
302 bool is16BitMode
= STI
.getFeatureBits()[X86::Mode16Bit
];
303 unsigned RelaxedOp
= getRelaxedOpcode(Inst
, is16BitMode
);
305 if (RelaxedOp
== Inst
.getOpcode()) {
306 SmallString
<256> Tmp
;
307 raw_svector_ostream
OS(Tmp
);
308 Inst
.dump_pretty(OS
);
310 report_fatal_error("unexpected instruction to relax: " + OS
.str());
314 Res
.setOpcode(RelaxedOp
);
317 /// Write a sequence of optimal nops to the output, covering \p Count
319 /// \return - true on success, false on failure
320 bool X86AsmBackend::writeNopData(raw_ostream
&OS
, uint64_t Count
) const {
321 static const char Nops
[10][11] = {
330 // nopl 0(%[re]ax,%[re]ax,1)
331 "\x0f\x1f\x44\x00\x00",
332 // nopw 0(%[re]ax,%[re]ax,1)
333 "\x66\x0f\x1f\x44\x00\x00",
335 "\x0f\x1f\x80\x00\x00\x00\x00",
336 // nopl 0L(%[re]ax,%[re]ax,1)
337 "\x0f\x1f\x84\x00\x00\x00\x00\x00",
338 // nopw 0L(%[re]ax,%[re]ax,1)
339 "\x66\x0f\x1f\x84\x00\x00\x00\x00\x00",
340 // nopw %cs:0L(%[re]ax,%[re]ax,1)
341 "\x66\x2e\x0f\x1f\x84\x00\x00\x00\x00\x00",
344 // This CPU doesn't support long nops. If needed add more.
345 // FIXME: We could generated something better than plain 0x90.
346 if (!STI
.getFeatureBits()[X86::FeatureNOPL
]) {
347 for (uint64_t i
= 0; i
< Count
; ++i
)
352 // 15-bytes is the longest single NOP instruction, but 10-bytes is
353 // commonly the longest that can be efficiently decoded.
354 uint64_t MaxNopLength
= 10;
355 if (STI
.getFeatureBits()[X86::ProcIntelSLM
])
357 else if (STI
.getFeatureBits()[X86::FeatureFast15ByteNOP
])
359 else if (STI
.getFeatureBits()[X86::FeatureFast11ByteNOP
])
362 // Emit as many MaxNopLength NOPs as needed, then emit a NOP of the remaining
365 const uint8_t ThisNopLength
= (uint8_t) std::min(Count
, MaxNopLength
);
366 const uint8_t Prefixes
= ThisNopLength
<= 10 ? 0 : ThisNopLength
- 10;
367 for (uint8_t i
= 0; i
< Prefixes
; i
++)
369 const uint8_t Rest
= ThisNopLength
- Prefixes
;
371 OS
.write(Nops
[Rest
- 1], Rest
);
372 Count
-= ThisNopLength
;
373 } while (Count
!= 0);
382 class ELFX86AsmBackend
: public X86AsmBackend
{
385 ELFX86AsmBackend(const Target
&T
, uint8_t OSABI
, const MCSubtargetInfo
&STI
)
386 : X86AsmBackend(T
, STI
), OSABI(OSABI
) {}
389 class ELFX86_32AsmBackend
: public ELFX86AsmBackend
{
391 ELFX86_32AsmBackend(const Target
&T
, uint8_t OSABI
,
392 const MCSubtargetInfo
&STI
)
393 : ELFX86AsmBackend(T
, OSABI
, STI
) {}
395 std::unique_ptr
<MCObjectTargetWriter
>
396 createObjectTargetWriter() const override
{
397 return createX86ELFObjectWriter(/*IsELF64*/ false, OSABI
, ELF::EM_386
);
401 class ELFX86_X32AsmBackend
: public ELFX86AsmBackend
{
403 ELFX86_X32AsmBackend(const Target
&T
, uint8_t OSABI
,
404 const MCSubtargetInfo
&STI
)
405 : ELFX86AsmBackend(T
, OSABI
, STI
) {}
407 std::unique_ptr
<MCObjectTargetWriter
>
408 createObjectTargetWriter() const override
{
409 return createX86ELFObjectWriter(/*IsELF64*/ false, OSABI
,
414 class ELFX86_IAMCUAsmBackend
: public ELFX86AsmBackend
{
416 ELFX86_IAMCUAsmBackend(const Target
&T
, uint8_t OSABI
,
417 const MCSubtargetInfo
&STI
)
418 : ELFX86AsmBackend(T
, OSABI
, STI
) {}
420 std::unique_ptr
<MCObjectTargetWriter
>
421 createObjectTargetWriter() const override
{
422 return createX86ELFObjectWriter(/*IsELF64*/ false, OSABI
,
427 class ELFX86_64AsmBackend
: public ELFX86AsmBackend
{
429 ELFX86_64AsmBackend(const Target
&T
, uint8_t OSABI
,
430 const MCSubtargetInfo
&STI
)
431 : ELFX86AsmBackend(T
, OSABI
, STI
) {}
433 std::unique_ptr
<MCObjectTargetWriter
>
434 createObjectTargetWriter() const override
{
435 return createX86ELFObjectWriter(/*IsELF64*/ true, OSABI
, ELF::EM_X86_64
);
439 class WindowsX86AsmBackend
: public X86AsmBackend
{
443 WindowsX86AsmBackend(const Target
&T
, bool is64Bit
,
444 const MCSubtargetInfo
&STI
)
445 : X86AsmBackend(T
, STI
)
449 Optional
<MCFixupKind
> getFixupKind(StringRef Name
) const override
{
450 return StringSwitch
<Optional
<MCFixupKind
>>(Name
)
451 .Case("dir32", FK_Data_4
)
452 .Case("secrel32", FK_SecRel_4
)
453 .Case("secidx", FK_SecRel_2
)
454 .Default(MCAsmBackend::getFixupKind(Name
));
457 std::unique_ptr
<MCObjectTargetWriter
>
458 createObjectTargetWriter() const override
{
459 return createX86WinCOFFObjectWriter(Is64Bit
);
465 /// Compact unwind encoding values.
466 enum CompactUnwindEncodings
{
467 /// [RE]BP based frame where [RE]BP is pused on the stack immediately after
468 /// the return address, then [RE]SP is moved to [RE]BP.
469 UNWIND_MODE_BP_FRAME
= 0x01000000,
471 /// A frameless function with a small constant stack size.
472 UNWIND_MODE_STACK_IMMD
= 0x02000000,
474 /// A frameless function with a large constant stack size.
475 UNWIND_MODE_STACK_IND
= 0x03000000,
477 /// No compact unwind encoding is available.
478 UNWIND_MODE_DWARF
= 0x04000000,
480 /// Mask for encoding the frame registers.
481 UNWIND_BP_FRAME_REGISTERS
= 0x00007FFF,
483 /// Mask for encoding the frameless registers.
484 UNWIND_FRAMELESS_STACK_REG_PERMUTATION
= 0x000003FF
487 } // end CU namespace
489 class DarwinX86AsmBackend
: public X86AsmBackend
{
490 const MCRegisterInfo
&MRI
;
492 /// Number of registers that can be saved in a compact unwind encoding.
493 enum { CU_NUM_SAVED_REGS
= 6 };
495 mutable unsigned SavedRegs
[CU_NUM_SAVED_REGS
];
498 unsigned OffsetSize
; ///< Offset of a "push" instruction.
499 unsigned MoveInstrSize
; ///< Size of a "move" instruction.
500 unsigned StackDivide
; ///< Amount to adjust stack size by.
502 /// Size of a "push" instruction for the given register.
503 unsigned PushInstrSize(unsigned Reg
) const {
523 /// Implementation of algorithm to generate the compact unwind encoding
524 /// for the CFI instructions.
526 generateCompactUnwindEncodingImpl(ArrayRef
<MCCFIInstruction
> Instrs
) const {
527 if (Instrs
.empty()) return 0;
529 // Reset the saved registers.
530 unsigned SavedRegIdx
= 0;
531 memset(SavedRegs
, 0, sizeof(SavedRegs
));
535 // Encode that we are using EBP/RBP as the frame pointer.
536 uint32_t CompactUnwindEncoding
= 0;
538 unsigned SubtractInstrIdx
= Is64Bit
? 3 : 2;
539 unsigned InstrOffset
= 0;
540 unsigned StackAdjust
= 0;
541 unsigned StackSize
= 0;
542 unsigned NumDefCFAOffsets
= 0;
544 for (unsigned i
= 0, e
= Instrs
.size(); i
!= e
; ++i
) {
545 const MCCFIInstruction
&Inst
= Instrs
[i
];
547 switch (Inst
.getOperation()) {
549 // Any other CFI directives indicate a frame that we aren't prepared
550 // to represent via compact unwind, so just bail out.
552 case MCCFIInstruction::OpDefCfaRegister
: {
553 // Defines a frame pointer. E.g.
557 // .cfi_def_cfa_register %rbp
561 // If the frame pointer is other than esp/rsp, we do not have a way to
562 // generate a compact unwinding representation, so bail out.
563 if (MRI
.getLLVMRegNum(Inst
.getRegister(), true) !=
564 (Is64Bit
? X86::RBP
: X86::EBP
))
568 memset(SavedRegs
, 0, sizeof(SavedRegs
));
571 InstrOffset
+= MoveInstrSize
;
574 case MCCFIInstruction::OpDefCfaOffset
: {
575 // Defines a new offset for the CFA. E.g.
581 // .cfi_def_cfa_offset 16
587 // .cfi_def_cfa_offset 80
589 StackSize
= std::abs(Inst
.getOffset()) / StackDivide
;
593 case MCCFIInstruction::OpOffset
: {
594 // Defines a "push" of a callee-saved register. E.g.
602 // .cfi_offset %rbx, -40
603 // .cfi_offset %r14, -32
604 // .cfi_offset %r15, -24
606 if (SavedRegIdx
== CU_NUM_SAVED_REGS
)
607 // If there are too many saved registers, we cannot use a compact
609 return CU::UNWIND_MODE_DWARF
;
611 unsigned Reg
= MRI
.getLLVMRegNum(Inst
.getRegister(), true);
612 SavedRegs
[SavedRegIdx
++] = Reg
;
613 StackAdjust
+= OffsetSize
;
614 InstrOffset
+= PushInstrSize(Reg
);
620 StackAdjust
/= StackDivide
;
623 if ((StackAdjust
& 0xFF) != StackAdjust
)
624 // Offset was too big for a compact unwind encoding.
625 return CU::UNWIND_MODE_DWARF
;
627 // Get the encoding of the saved registers when we have a frame pointer.
628 uint32_t RegEnc
= encodeCompactUnwindRegistersWithFrame();
629 if (RegEnc
== ~0U) return CU::UNWIND_MODE_DWARF
;
631 CompactUnwindEncoding
|= CU::UNWIND_MODE_BP_FRAME
;
632 CompactUnwindEncoding
|= (StackAdjust
& 0xFF) << 16;
633 CompactUnwindEncoding
|= RegEnc
& CU::UNWIND_BP_FRAME_REGISTERS
;
635 SubtractInstrIdx
+= InstrOffset
;
638 if ((StackSize
& 0xFF) == StackSize
) {
639 // Frameless stack with a small stack size.
640 CompactUnwindEncoding
|= CU::UNWIND_MODE_STACK_IMMD
;
642 // Encode the stack size.
643 CompactUnwindEncoding
|= (StackSize
& 0xFF) << 16;
645 if ((StackAdjust
& 0x7) != StackAdjust
)
646 // The extra stack adjustments are too big for us to handle.
647 return CU::UNWIND_MODE_DWARF
;
649 // Frameless stack with an offset too large for us to encode compactly.
650 CompactUnwindEncoding
|= CU::UNWIND_MODE_STACK_IND
;
652 // Encode the offset to the nnnnnn value in the 'subl $nnnnnn, ESP'
654 CompactUnwindEncoding
|= (SubtractInstrIdx
& 0xFF) << 16;
656 // Encode any extra stack adjustments (done via push instructions).
657 CompactUnwindEncoding
|= (StackAdjust
& 0x7) << 13;
660 // Encode the number of registers saved. (Reverse the list first.)
661 std::reverse(&SavedRegs
[0], &SavedRegs
[SavedRegIdx
]);
662 CompactUnwindEncoding
|= (SavedRegIdx
& 0x7) << 10;
664 // Get the encoding of the saved registers when we don't have a frame
666 uint32_t RegEnc
= encodeCompactUnwindRegistersWithoutFrame(SavedRegIdx
);
667 if (RegEnc
== ~0U) return CU::UNWIND_MODE_DWARF
;
669 // Encode the register encoding.
670 CompactUnwindEncoding
|=
671 RegEnc
& CU::UNWIND_FRAMELESS_STACK_REG_PERMUTATION
;
674 return CompactUnwindEncoding
;
678 /// Get the compact unwind number for a given register. The number
679 /// corresponds to the enum lists in compact_unwind_encoding.h.
680 int getCompactUnwindRegNum(unsigned Reg
) const {
681 static const MCPhysReg CU32BitRegs
[7] = {
682 X86::EBX
, X86::ECX
, X86::EDX
, X86::EDI
, X86::ESI
, X86::EBP
, 0
684 static const MCPhysReg CU64BitRegs
[] = {
685 X86::RBX
, X86::R12
, X86::R13
, X86::R14
, X86::R15
, X86::RBP
, 0
687 const MCPhysReg
*CURegs
= Is64Bit
? CU64BitRegs
: CU32BitRegs
;
688 for (int Idx
= 1; *CURegs
; ++CURegs
, ++Idx
)
695 /// Return the registers encoded for a compact encoding with a frame
697 uint32_t encodeCompactUnwindRegistersWithFrame() const {
698 // Encode the registers in the order they were saved --- 3-bits per
699 // register. The list of saved registers is assumed to be in reverse
700 // order. The registers are numbered from 1 to CU_NUM_SAVED_REGS.
702 for (int i
= 0, Idx
= 0; i
!= CU_NUM_SAVED_REGS
; ++i
) {
703 unsigned Reg
= SavedRegs
[i
];
706 int CURegNum
= getCompactUnwindRegNum(Reg
);
707 if (CURegNum
== -1) return ~0U;
709 // Encode the 3-bit register number in order, skipping over 3-bits for
711 RegEnc
|= (CURegNum
& 0x7) << (Idx
++ * 3);
714 assert((RegEnc
& 0x3FFFF) == RegEnc
&&
715 "Invalid compact register encoding!");
719 /// Create the permutation encoding used with frameless stacks. It is
720 /// passed the number of registers to be saved and an array of the registers
722 uint32_t encodeCompactUnwindRegistersWithoutFrame(unsigned RegCount
) const {
723 // The saved registers are numbered from 1 to 6. In order to encode the
724 // order in which they were saved, we re-number them according to their
725 // place in the register order. The re-numbering is relative to the last
726 // re-numbered register. E.g., if we have registers {6, 2, 4, 5} saved in
736 for (unsigned i
= 0; i
< RegCount
; ++i
) {
737 int CUReg
= getCompactUnwindRegNum(SavedRegs
[i
]);
738 if (CUReg
== -1) return ~0U;
739 SavedRegs
[i
] = CUReg
;
743 std::reverse(&SavedRegs
[0], &SavedRegs
[CU_NUM_SAVED_REGS
]);
745 uint32_t RenumRegs
[CU_NUM_SAVED_REGS
];
746 for (unsigned i
= CU_NUM_SAVED_REGS
- RegCount
; i
< CU_NUM_SAVED_REGS
; ++i
){
747 unsigned Countless
= 0;
748 for (unsigned j
= CU_NUM_SAVED_REGS
- RegCount
; j
< i
; ++j
)
749 if (SavedRegs
[j
] < SavedRegs
[i
])
752 RenumRegs
[i
] = SavedRegs
[i
] - Countless
- 1;
755 // Take the renumbered values and encode them into a 10-bit number.
756 uint32_t permutationEncoding
= 0;
759 permutationEncoding
|= 120 * RenumRegs
[0] + 24 * RenumRegs
[1]
760 + 6 * RenumRegs
[2] + 2 * RenumRegs
[3]
764 permutationEncoding
|= 120 * RenumRegs
[1] + 24 * RenumRegs
[2]
765 + 6 * RenumRegs
[3] + 2 * RenumRegs
[4]
769 permutationEncoding
|= 60 * RenumRegs
[2] + 12 * RenumRegs
[3]
770 + 3 * RenumRegs
[4] + RenumRegs
[5];
773 permutationEncoding
|= 20 * RenumRegs
[3] + 4 * RenumRegs
[4]
777 permutationEncoding
|= 5 * RenumRegs
[4] + RenumRegs
[5];
780 permutationEncoding
|= RenumRegs
[5];
784 assert((permutationEncoding
& 0x3FF) == permutationEncoding
&&
785 "Invalid compact register encoding!");
786 return permutationEncoding
;
790 DarwinX86AsmBackend(const Target
&T
, const MCRegisterInfo
&MRI
,
791 const MCSubtargetInfo
&STI
, bool Is64Bit
)
792 : X86AsmBackend(T
, STI
), MRI(MRI
), Is64Bit(Is64Bit
) {
793 memset(SavedRegs
, 0, sizeof(SavedRegs
));
794 OffsetSize
= Is64Bit
? 8 : 4;
795 MoveInstrSize
= Is64Bit
? 3 : 2;
796 StackDivide
= Is64Bit
? 8 : 4;
800 class DarwinX86_32AsmBackend
: public DarwinX86AsmBackend
{
802 DarwinX86_32AsmBackend(const Target
&T
, const MCRegisterInfo
&MRI
,
803 const MCSubtargetInfo
&STI
)
804 : DarwinX86AsmBackend(T
, MRI
, STI
, false) {}
806 std::unique_ptr
<MCObjectTargetWriter
>
807 createObjectTargetWriter() const override
{
808 return createX86MachObjectWriter(/*Is64Bit=*/false,
809 MachO::CPU_TYPE_I386
,
810 MachO::CPU_SUBTYPE_I386_ALL
);
813 /// Generate the compact unwind encoding for the CFI instructions.
814 uint32_t generateCompactUnwindEncoding(
815 ArrayRef
<MCCFIInstruction
> Instrs
) const override
{
816 return generateCompactUnwindEncodingImpl(Instrs
);
820 class DarwinX86_64AsmBackend
: public DarwinX86AsmBackend
{
821 const MachO::CPUSubTypeX86 Subtype
;
823 DarwinX86_64AsmBackend(const Target
&T
, const MCRegisterInfo
&MRI
,
824 const MCSubtargetInfo
&STI
, MachO::CPUSubTypeX86 st
)
825 : DarwinX86AsmBackend(T
, MRI
, STI
, true), Subtype(st
) {}
827 std::unique_ptr
<MCObjectTargetWriter
>
828 createObjectTargetWriter() const override
{
829 return createX86MachObjectWriter(/*Is64Bit=*/true, MachO::CPU_TYPE_X86_64
,
833 /// Generate the compact unwind encoding for the CFI instructions.
834 uint32_t generateCompactUnwindEncoding(
835 ArrayRef
<MCCFIInstruction
> Instrs
) const override
{
836 return generateCompactUnwindEncodingImpl(Instrs
);
840 } // end anonymous namespace
842 MCAsmBackend
*llvm::createX86_32AsmBackend(const Target
&T
,
843 const MCSubtargetInfo
&STI
,
844 const MCRegisterInfo
&MRI
,
845 const MCTargetOptions
&Options
) {
846 const Triple
&TheTriple
= STI
.getTargetTriple();
847 if (TheTriple
.isOSBinFormatMachO())
848 return new DarwinX86_32AsmBackend(T
, MRI
, STI
);
850 if (TheTriple
.isOSWindows() && TheTriple
.isOSBinFormatCOFF())
851 return new WindowsX86AsmBackend(T
, false, STI
);
853 uint8_t OSABI
= MCELFObjectTargetWriter::getOSABI(TheTriple
.getOS());
855 if (TheTriple
.isOSIAMCU())
856 return new ELFX86_IAMCUAsmBackend(T
, OSABI
, STI
);
858 return new ELFX86_32AsmBackend(T
, OSABI
, STI
);
861 MCAsmBackend
*llvm::createX86_64AsmBackend(const Target
&T
,
862 const MCSubtargetInfo
&STI
,
863 const MCRegisterInfo
&MRI
,
864 const MCTargetOptions
&Options
) {
865 const Triple
&TheTriple
= STI
.getTargetTriple();
866 if (TheTriple
.isOSBinFormatMachO()) {
867 MachO::CPUSubTypeX86 CS
=
868 StringSwitch
<MachO::CPUSubTypeX86
>(TheTriple
.getArchName())
869 .Case("x86_64h", MachO::CPU_SUBTYPE_X86_64_H
)
870 .Default(MachO::CPU_SUBTYPE_X86_64_ALL
);
871 return new DarwinX86_64AsmBackend(T
, MRI
, STI
, CS
);
874 if (TheTriple
.isOSWindows() && TheTriple
.isOSBinFormatCOFF())
875 return new WindowsX86AsmBackend(T
, true, STI
);
877 uint8_t OSABI
= MCELFObjectTargetWriter::getOSABI(TheTriple
.getOS());
879 if (TheTriple
.getEnvironment() == Triple::GNUX32
)
880 return new ELFX86_X32AsmBackend(T
, OSABI
, STI
);
881 return new ELFX86_64AsmBackend(T
, OSABI
, STI
);