1 //===-- X86AsmBackend.cpp - X86 Assembler Backend -------------------------===//
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7 //===----------------------------------------------------------------------===//
9 #include "MCTargetDesc/X86BaseInfo.h"
10 #include "MCTargetDesc/X86FixupKinds.h"
11 #include "llvm/ADT/StringSwitch.h"
12 #include "llvm/BinaryFormat/ELF.h"
13 #include "llvm/BinaryFormat/MachO.h"
14 #include "llvm/MC/MCAsmBackend.h"
15 #include "llvm/MC/MCDwarf.h"
16 #include "llvm/MC/MCELFObjectWriter.h"
17 #include "llvm/MC/MCExpr.h"
18 #include "llvm/MC/MCFixupKindInfo.h"
19 #include "llvm/MC/MCInst.h"
20 #include "llvm/MC/MCMachObjectWriter.h"
21 #include "llvm/MC/MCObjectWriter.h"
22 #include "llvm/MC/MCRegisterInfo.h"
23 #include "llvm/MC/MCSectionMachO.h"
24 #include "llvm/MC/MCSubtargetInfo.h"
25 #include "llvm/Support/ErrorHandling.h"
26 #include "llvm/Support/raw_ostream.h"
29 static unsigned getFixupKindSize(unsigned Kind
) {
32 llvm_unreachable("invalid fixup kind!");
44 case X86::reloc_riprel_4byte
:
45 case X86::reloc_riprel_4byte_relax
:
46 case X86::reloc_riprel_4byte_relax_rex
:
47 case X86::reloc_riprel_4byte_movq_load
:
48 case X86::reloc_signed_4byte
:
49 case X86::reloc_signed_4byte_relax
:
50 case X86::reloc_global_offset_table
:
51 case X86::reloc_branch_4byte_pcrel
:
58 case X86::reloc_global_offset_table8
:
65 class X86ELFObjectWriter
: public MCELFObjectTargetWriter
{
67 X86ELFObjectWriter(bool is64Bit
, uint8_t OSABI
, uint16_t EMachine
,
68 bool HasRelocationAddend
, bool foobar
)
69 : MCELFObjectTargetWriter(is64Bit
, OSABI
, EMachine
, HasRelocationAddend
) {}
72 class X86AsmBackend
: public MCAsmBackend
{
73 const MCSubtargetInfo
&STI
;
75 X86AsmBackend(const Target
&T
, const MCSubtargetInfo
&STI
)
76 : MCAsmBackend(support::little
), STI(STI
) {}
78 unsigned getNumFixupKinds() const override
{
79 return X86::NumTargetFixupKinds
;
82 Optional
<MCFixupKind
> getFixupKind(StringRef Name
) const override
;
84 const MCFixupKindInfo
&getFixupKindInfo(MCFixupKind Kind
) const override
{
85 const static MCFixupKindInfo Infos
[X86::NumTargetFixupKinds
] = {
86 {"reloc_riprel_4byte", 0, 32, MCFixupKindInfo::FKF_IsPCRel
},
87 {"reloc_riprel_4byte_movq_load", 0, 32, MCFixupKindInfo::FKF_IsPCRel
},
88 {"reloc_riprel_4byte_relax", 0, 32, MCFixupKindInfo::FKF_IsPCRel
},
89 {"reloc_riprel_4byte_relax_rex", 0, 32, MCFixupKindInfo::FKF_IsPCRel
},
90 {"reloc_signed_4byte", 0, 32, 0},
91 {"reloc_signed_4byte_relax", 0, 32, 0},
92 {"reloc_global_offset_table", 0, 32, 0},
93 {"reloc_global_offset_table8", 0, 64, 0},
94 {"reloc_branch_4byte_pcrel", 0, 32, MCFixupKindInfo::FKF_IsPCRel
},
97 if (Kind
< FirstTargetFixupKind
)
98 return MCAsmBackend::getFixupKindInfo(Kind
);
100 assert(unsigned(Kind
- FirstTargetFixupKind
) < getNumFixupKinds() &&
102 assert(Infos
[Kind
- FirstTargetFixupKind
].Name
&& "Empty fixup name!");
103 return Infos
[Kind
- FirstTargetFixupKind
];
106 bool shouldForceRelocation(const MCAssembler
&Asm
, const MCFixup
&Fixup
,
107 const MCValue
&Target
) override
;
109 void applyFixup(const MCAssembler
&Asm
, const MCFixup
&Fixup
,
110 const MCValue
&Target
, MutableArrayRef
<char> Data
,
111 uint64_t Value
, bool IsResolved
,
112 const MCSubtargetInfo
*STI
) const override
{
113 unsigned Size
= getFixupKindSize(Fixup
.getKind());
115 assert(Fixup
.getOffset() + Size
<= Data
.size() && "Invalid fixup offset!");
117 // Check that uppper bits are either all zeros or all ones.
118 // Specifically ignore overflow/underflow as long as the leakage is
119 // limited to the lower bits. This is to remain compatible with
121 assert((Size
== 0 || isIntN(Size
* 8 + 1, Value
)) &&
122 "Value does not fit in the Fixup field");
124 for (unsigned i
= 0; i
!= Size
; ++i
)
125 Data
[Fixup
.getOffset() + i
] = uint8_t(Value
>> (i
* 8));
128 bool mayNeedRelaxation(const MCInst
&Inst
,
129 const MCSubtargetInfo
&STI
) const override
;
131 bool fixupNeedsRelaxation(const MCFixup
&Fixup
, uint64_t Value
,
132 const MCRelaxableFragment
*DF
,
133 const MCAsmLayout
&Layout
) const override
;
135 void relaxInstruction(const MCInst
&Inst
, const MCSubtargetInfo
&STI
,
136 MCInst
&Res
) const override
;
138 bool writeNopData(raw_ostream
&OS
, uint64_t Count
) const override
;
140 } // end anonymous namespace
142 static unsigned getRelaxedOpcodeBranch(const MCInst
&Inst
, bool is16BitMode
) {
143 unsigned Op
= Inst
.getOpcode();
148 return (is16BitMode
) ? X86::JCC_2
: X86::JCC_4
;
150 return (is16BitMode
) ? X86::JMP_2
: X86::JMP_4
;
154 static unsigned getRelaxedOpcodeArith(const MCInst
&Inst
) {
155 unsigned Op
= Inst
.getOpcode();
161 case X86::IMUL16rri8
: return X86::IMUL16rri
;
162 case X86::IMUL16rmi8
: return X86::IMUL16rmi
;
163 case X86::IMUL32rri8
: return X86::IMUL32rri
;
164 case X86::IMUL32rmi8
: return X86::IMUL32rmi
;
165 case X86::IMUL64rri8
: return X86::IMUL64rri32
;
166 case X86::IMUL64rmi8
: return X86::IMUL64rmi32
;
169 case X86::AND16ri8
: return X86::AND16ri
;
170 case X86::AND16mi8
: return X86::AND16mi
;
171 case X86::AND32ri8
: return X86::AND32ri
;
172 case X86::AND32mi8
: return X86::AND32mi
;
173 case X86::AND64ri8
: return X86::AND64ri32
;
174 case X86::AND64mi8
: return X86::AND64mi32
;
177 case X86::OR16ri8
: return X86::OR16ri
;
178 case X86::OR16mi8
: return X86::OR16mi
;
179 case X86::OR32ri8
: return X86::OR32ri
;
180 case X86::OR32mi8
: return X86::OR32mi
;
181 case X86::OR64ri8
: return X86::OR64ri32
;
182 case X86::OR64mi8
: return X86::OR64mi32
;
185 case X86::XOR16ri8
: return X86::XOR16ri
;
186 case X86::XOR16mi8
: return X86::XOR16mi
;
187 case X86::XOR32ri8
: return X86::XOR32ri
;
188 case X86::XOR32mi8
: return X86::XOR32mi
;
189 case X86::XOR64ri8
: return X86::XOR64ri32
;
190 case X86::XOR64mi8
: return X86::XOR64mi32
;
193 case X86::ADD16ri8
: return X86::ADD16ri
;
194 case X86::ADD16mi8
: return X86::ADD16mi
;
195 case X86::ADD32ri8
: return X86::ADD32ri
;
196 case X86::ADD32mi8
: return X86::ADD32mi
;
197 case X86::ADD64ri8
: return X86::ADD64ri32
;
198 case X86::ADD64mi8
: return X86::ADD64mi32
;
201 case X86::ADC16ri8
: return X86::ADC16ri
;
202 case X86::ADC16mi8
: return X86::ADC16mi
;
203 case X86::ADC32ri8
: return X86::ADC32ri
;
204 case X86::ADC32mi8
: return X86::ADC32mi
;
205 case X86::ADC64ri8
: return X86::ADC64ri32
;
206 case X86::ADC64mi8
: return X86::ADC64mi32
;
209 case X86::SUB16ri8
: return X86::SUB16ri
;
210 case X86::SUB16mi8
: return X86::SUB16mi
;
211 case X86::SUB32ri8
: return X86::SUB32ri
;
212 case X86::SUB32mi8
: return X86::SUB32mi
;
213 case X86::SUB64ri8
: return X86::SUB64ri32
;
214 case X86::SUB64mi8
: return X86::SUB64mi32
;
217 case X86::SBB16ri8
: return X86::SBB16ri
;
218 case X86::SBB16mi8
: return X86::SBB16mi
;
219 case X86::SBB32ri8
: return X86::SBB32ri
;
220 case X86::SBB32mi8
: return X86::SBB32mi
;
221 case X86::SBB64ri8
: return X86::SBB64ri32
;
222 case X86::SBB64mi8
: return X86::SBB64mi32
;
225 case X86::CMP16ri8
: return X86::CMP16ri
;
226 case X86::CMP16mi8
: return X86::CMP16mi
;
227 case X86::CMP32ri8
: return X86::CMP32ri
;
228 case X86::CMP32mi8
: return X86::CMP32mi
;
229 case X86::CMP64ri8
: return X86::CMP64ri32
;
230 case X86::CMP64mi8
: return X86::CMP64mi32
;
233 case X86::PUSH32i8
: return X86::PUSHi32
;
234 case X86::PUSH16i8
: return X86::PUSHi16
;
235 case X86::PUSH64i8
: return X86::PUSH64i32
;
239 static unsigned getRelaxedOpcode(const MCInst
&Inst
, bool is16BitMode
) {
240 unsigned R
= getRelaxedOpcodeArith(Inst
);
241 if (R
!= Inst
.getOpcode())
243 return getRelaxedOpcodeBranch(Inst
, is16BitMode
);
246 Optional
<MCFixupKind
> X86AsmBackend::getFixupKind(StringRef Name
) const {
247 if (STI
.getTargetTriple().isOSBinFormatELF()) {
248 if (STI
.getTargetTriple().getArch() == Triple::x86_64
) {
249 if (Name
== "R_X86_64_NONE")
252 if (Name
== "R_386_NONE")
256 return MCAsmBackend::getFixupKind(Name
);
259 bool X86AsmBackend::shouldForceRelocation(const MCAssembler
&,
260 const MCFixup
&Fixup
,
262 return Fixup
.getKind() == FK_NONE
;
265 bool X86AsmBackend::mayNeedRelaxation(const MCInst
&Inst
,
266 const MCSubtargetInfo
&STI
) const {
267 // Branches can always be relaxed in either mode.
268 if (getRelaxedOpcodeBranch(Inst
, false) != Inst
.getOpcode())
271 // Check if this instruction is ever relaxable.
272 if (getRelaxedOpcodeArith(Inst
) == Inst
.getOpcode())
276 // Check if the relaxable operand has an expression. For the current set of
277 // relaxable instructions, the relaxable operand is always the last operand.
278 unsigned RelaxableOp
= Inst
.getNumOperands() - 1;
279 if (Inst
.getOperand(RelaxableOp
).isExpr())
285 bool X86AsmBackend::fixupNeedsRelaxation(const MCFixup
&Fixup
,
287 const MCRelaxableFragment
*DF
,
288 const MCAsmLayout
&Layout
) const {
289 // Relax if the value is too big for a (signed) i8.
290 return !isInt
<8>(Value
);
293 // FIXME: Can tblgen help at all here to verify there aren't other instructions
295 void X86AsmBackend::relaxInstruction(const MCInst
&Inst
,
296 const MCSubtargetInfo
&STI
,
298 // The only relaxations X86 does is from a 1byte pcrel to a 4byte pcrel.
299 bool is16BitMode
= STI
.getFeatureBits()[X86::Mode16Bit
];
300 unsigned RelaxedOp
= getRelaxedOpcode(Inst
, is16BitMode
);
302 if (RelaxedOp
== Inst
.getOpcode()) {
303 SmallString
<256> Tmp
;
304 raw_svector_ostream
OS(Tmp
);
305 Inst
.dump_pretty(OS
);
307 report_fatal_error("unexpected instruction to relax: " + OS
.str());
311 Res
.setOpcode(RelaxedOp
);
314 /// Write a sequence of optimal nops to the output, covering \p Count
316 /// \return - true on success, false on failure
317 bool X86AsmBackend::writeNopData(raw_ostream
&OS
, uint64_t Count
) const {
318 static const char Nops
[10][11] = {
327 // nopl 0(%[re]ax,%[re]ax,1)
328 "\x0f\x1f\x44\x00\x00",
329 // nopw 0(%[re]ax,%[re]ax,1)
330 "\x66\x0f\x1f\x44\x00\x00",
332 "\x0f\x1f\x80\x00\x00\x00\x00",
333 // nopl 0L(%[re]ax,%[re]ax,1)
334 "\x0f\x1f\x84\x00\x00\x00\x00\x00",
335 // nopw 0L(%[re]ax,%[re]ax,1)
336 "\x66\x0f\x1f\x84\x00\x00\x00\x00\x00",
337 // nopw %cs:0L(%[re]ax,%[re]ax,1)
338 "\x66\x2e\x0f\x1f\x84\x00\x00\x00\x00\x00",
341 // This CPU doesn't support long nops. If needed add more.
342 // FIXME: We could generated something better than plain 0x90.
343 if (!STI
.getFeatureBits()[X86::FeatureNOPL
]) {
344 for (uint64_t i
= 0; i
< Count
; ++i
)
349 // 15-bytes is the longest single NOP instruction, but 10-bytes is
350 // commonly the longest that can be efficiently decoded.
351 uint64_t MaxNopLength
= 10;
352 if (STI
.getFeatureBits()[X86::ProcIntelSLM
])
354 else if (STI
.getFeatureBits()[X86::FeatureFast15ByteNOP
])
356 else if (STI
.getFeatureBits()[X86::FeatureFast11ByteNOP
])
359 // Emit as many MaxNopLength NOPs as needed, then emit a NOP of the remaining
362 const uint8_t ThisNopLength
= (uint8_t) std::min(Count
, MaxNopLength
);
363 const uint8_t Prefixes
= ThisNopLength
<= 10 ? 0 : ThisNopLength
- 10;
364 for (uint8_t i
= 0; i
< Prefixes
; i
++)
366 const uint8_t Rest
= ThisNopLength
- Prefixes
;
368 OS
.write(Nops
[Rest
- 1], Rest
);
369 Count
-= ThisNopLength
;
370 } while (Count
!= 0);
379 class ELFX86AsmBackend
: public X86AsmBackend
{
382 ELFX86AsmBackend(const Target
&T
, uint8_t OSABI
, const MCSubtargetInfo
&STI
)
383 : X86AsmBackend(T
, STI
), OSABI(OSABI
) {}
386 class ELFX86_32AsmBackend
: public ELFX86AsmBackend
{
388 ELFX86_32AsmBackend(const Target
&T
, uint8_t OSABI
,
389 const MCSubtargetInfo
&STI
)
390 : ELFX86AsmBackend(T
, OSABI
, STI
) {}
392 std::unique_ptr
<MCObjectTargetWriter
>
393 createObjectTargetWriter() const override
{
394 return createX86ELFObjectWriter(/*IsELF64*/ false, OSABI
, ELF::EM_386
);
398 class ELFX86_X32AsmBackend
: public ELFX86AsmBackend
{
400 ELFX86_X32AsmBackend(const Target
&T
, uint8_t OSABI
,
401 const MCSubtargetInfo
&STI
)
402 : ELFX86AsmBackend(T
, OSABI
, STI
) {}
404 std::unique_ptr
<MCObjectTargetWriter
>
405 createObjectTargetWriter() const override
{
406 return createX86ELFObjectWriter(/*IsELF64*/ false, OSABI
,
411 class ELFX86_IAMCUAsmBackend
: public ELFX86AsmBackend
{
413 ELFX86_IAMCUAsmBackend(const Target
&T
, uint8_t OSABI
,
414 const MCSubtargetInfo
&STI
)
415 : ELFX86AsmBackend(T
, OSABI
, STI
) {}
417 std::unique_ptr
<MCObjectTargetWriter
>
418 createObjectTargetWriter() const override
{
419 return createX86ELFObjectWriter(/*IsELF64*/ false, OSABI
,
424 class ELFX86_64AsmBackend
: public ELFX86AsmBackend
{
426 ELFX86_64AsmBackend(const Target
&T
, uint8_t OSABI
,
427 const MCSubtargetInfo
&STI
)
428 : ELFX86AsmBackend(T
, OSABI
, STI
) {}
430 std::unique_ptr
<MCObjectTargetWriter
>
431 createObjectTargetWriter() const override
{
432 return createX86ELFObjectWriter(/*IsELF64*/ true, OSABI
, ELF::EM_X86_64
);
436 class WindowsX86AsmBackend
: public X86AsmBackend
{
440 WindowsX86AsmBackend(const Target
&T
, bool is64Bit
,
441 const MCSubtargetInfo
&STI
)
442 : X86AsmBackend(T
, STI
)
446 Optional
<MCFixupKind
> getFixupKind(StringRef Name
) const override
{
447 return StringSwitch
<Optional
<MCFixupKind
>>(Name
)
448 .Case("dir32", FK_Data_4
)
449 .Case("secrel32", FK_SecRel_4
)
450 .Case("secidx", FK_SecRel_2
)
451 .Default(MCAsmBackend::getFixupKind(Name
));
454 std::unique_ptr
<MCObjectTargetWriter
>
455 createObjectTargetWriter() const override
{
456 return createX86WinCOFFObjectWriter(Is64Bit
);
462 /// Compact unwind encoding values.
463 enum CompactUnwindEncodings
{
464 /// [RE]BP based frame where [RE]BP is pused on the stack immediately after
465 /// the return address, then [RE]SP is moved to [RE]BP.
466 UNWIND_MODE_BP_FRAME
= 0x01000000,
468 /// A frameless function with a small constant stack size.
469 UNWIND_MODE_STACK_IMMD
= 0x02000000,
471 /// A frameless function with a large constant stack size.
472 UNWIND_MODE_STACK_IND
= 0x03000000,
474 /// No compact unwind encoding is available.
475 UNWIND_MODE_DWARF
= 0x04000000,
477 /// Mask for encoding the frame registers.
478 UNWIND_BP_FRAME_REGISTERS
= 0x00007FFF,
480 /// Mask for encoding the frameless registers.
481 UNWIND_FRAMELESS_STACK_REG_PERMUTATION
= 0x000003FF
484 } // end CU namespace
486 class DarwinX86AsmBackend
: public X86AsmBackend
{
487 const MCRegisterInfo
&MRI
;
489 /// Number of registers that can be saved in a compact unwind encoding.
490 enum { CU_NUM_SAVED_REGS
= 6 };
492 mutable unsigned SavedRegs
[CU_NUM_SAVED_REGS
];
495 unsigned OffsetSize
; ///< Offset of a "push" instruction.
496 unsigned MoveInstrSize
; ///< Size of a "move" instruction.
497 unsigned StackDivide
; ///< Amount to adjust stack size by.
499 /// Size of a "push" instruction for the given register.
500 unsigned PushInstrSize(unsigned Reg
) const {
520 /// Implementation of algorithm to generate the compact unwind encoding
521 /// for the CFI instructions.
523 generateCompactUnwindEncodingImpl(ArrayRef
<MCCFIInstruction
> Instrs
) const {
524 if (Instrs
.empty()) return 0;
526 // Reset the saved registers.
527 unsigned SavedRegIdx
= 0;
528 memset(SavedRegs
, 0, sizeof(SavedRegs
));
532 // Encode that we are using EBP/RBP as the frame pointer.
533 uint32_t CompactUnwindEncoding
= 0;
535 unsigned SubtractInstrIdx
= Is64Bit
? 3 : 2;
536 unsigned InstrOffset
= 0;
537 unsigned StackAdjust
= 0;
538 unsigned StackSize
= 0;
539 unsigned NumDefCFAOffsets
= 0;
541 for (unsigned i
= 0, e
= Instrs
.size(); i
!= e
; ++i
) {
542 const MCCFIInstruction
&Inst
= Instrs
[i
];
544 switch (Inst
.getOperation()) {
546 // Any other CFI directives indicate a frame that we aren't prepared
547 // to represent via compact unwind, so just bail out.
549 case MCCFIInstruction::OpDefCfaRegister
: {
550 // Defines a frame pointer. E.g.
554 // .cfi_def_cfa_register %rbp
558 // If the frame pointer is other than esp/rsp, we do not have a way to
559 // generate a compact unwinding representation, so bail out.
560 if (*MRI
.getLLVMRegNum(Inst
.getRegister(), true) !=
561 (Is64Bit
? X86::RBP
: X86::EBP
))
565 memset(SavedRegs
, 0, sizeof(SavedRegs
));
568 InstrOffset
+= MoveInstrSize
;
571 case MCCFIInstruction::OpDefCfaOffset
: {
572 // Defines a new offset for the CFA. E.g.
578 // .cfi_def_cfa_offset 16
584 // .cfi_def_cfa_offset 80
586 StackSize
= std::abs(Inst
.getOffset()) / StackDivide
;
590 case MCCFIInstruction::OpOffset
: {
591 // Defines a "push" of a callee-saved register. E.g.
599 // .cfi_offset %rbx, -40
600 // .cfi_offset %r14, -32
601 // .cfi_offset %r15, -24
603 if (SavedRegIdx
== CU_NUM_SAVED_REGS
)
604 // If there are too many saved registers, we cannot use a compact
606 return CU::UNWIND_MODE_DWARF
;
608 unsigned Reg
= *MRI
.getLLVMRegNum(Inst
.getRegister(), true);
609 SavedRegs
[SavedRegIdx
++] = Reg
;
610 StackAdjust
+= OffsetSize
;
611 InstrOffset
+= PushInstrSize(Reg
);
617 StackAdjust
/= StackDivide
;
620 if ((StackAdjust
& 0xFF) != StackAdjust
)
621 // Offset was too big for a compact unwind encoding.
622 return CU::UNWIND_MODE_DWARF
;
624 // Get the encoding of the saved registers when we have a frame pointer.
625 uint32_t RegEnc
= encodeCompactUnwindRegistersWithFrame();
626 if (RegEnc
== ~0U) return CU::UNWIND_MODE_DWARF
;
628 CompactUnwindEncoding
|= CU::UNWIND_MODE_BP_FRAME
;
629 CompactUnwindEncoding
|= (StackAdjust
& 0xFF) << 16;
630 CompactUnwindEncoding
|= RegEnc
& CU::UNWIND_BP_FRAME_REGISTERS
;
632 SubtractInstrIdx
+= InstrOffset
;
635 if ((StackSize
& 0xFF) == StackSize
) {
636 // Frameless stack with a small stack size.
637 CompactUnwindEncoding
|= CU::UNWIND_MODE_STACK_IMMD
;
639 // Encode the stack size.
640 CompactUnwindEncoding
|= (StackSize
& 0xFF) << 16;
642 if ((StackAdjust
& 0x7) != StackAdjust
)
643 // The extra stack adjustments are too big for us to handle.
644 return CU::UNWIND_MODE_DWARF
;
646 // Frameless stack with an offset too large for us to encode compactly.
647 CompactUnwindEncoding
|= CU::UNWIND_MODE_STACK_IND
;
649 // Encode the offset to the nnnnnn value in the 'subl $nnnnnn, ESP'
651 CompactUnwindEncoding
|= (SubtractInstrIdx
& 0xFF) << 16;
653 // Encode any extra stack adjustments (done via push instructions).
654 CompactUnwindEncoding
|= (StackAdjust
& 0x7) << 13;
657 // Encode the number of registers saved. (Reverse the list first.)
658 std::reverse(&SavedRegs
[0], &SavedRegs
[SavedRegIdx
]);
659 CompactUnwindEncoding
|= (SavedRegIdx
& 0x7) << 10;
661 // Get the encoding of the saved registers when we don't have a frame
663 uint32_t RegEnc
= encodeCompactUnwindRegistersWithoutFrame(SavedRegIdx
);
664 if (RegEnc
== ~0U) return CU::UNWIND_MODE_DWARF
;
666 // Encode the register encoding.
667 CompactUnwindEncoding
|=
668 RegEnc
& CU::UNWIND_FRAMELESS_STACK_REG_PERMUTATION
;
671 return CompactUnwindEncoding
;
675 /// Get the compact unwind number for a given register. The number
676 /// corresponds to the enum lists in compact_unwind_encoding.h.
677 int getCompactUnwindRegNum(unsigned Reg
) const {
678 static const MCPhysReg CU32BitRegs
[7] = {
679 X86::EBX
, X86::ECX
, X86::EDX
, X86::EDI
, X86::ESI
, X86::EBP
, 0
681 static const MCPhysReg CU64BitRegs
[] = {
682 X86::RBX
, X86::R12
, X86::R13
, X86::R14
, X86::R15
, X86::RBP
, 0
684 const MCPhysReg
*CURegs
= Is64Bit
? CU64BitRegs
: CU32BitRegs
;
685 for (int Idx
= 1; *CURegs
; ++CURegs
, ++Idx
)
692 /// Return the registers encoded for a compact encoding with a frame
694 uint32_t encodeCompactUnwindRegistersWithFrame() const {
695 // Encode the registers in the order they were saved --- 3-bits per
696 // register. The list of saved registers is assumed to be in reverse
697 // order. The registers are numbered from 1 to CU_NUM_SAVED_REGS.
699 for (int i
= 0, Idx
= 0; i
!= CU_NUM_SAVED_REGS
; ++i
) {
700 unsigned Reg
= SavedRegs
[i
];
703 int CURegNum
= getCompactUnwindRegNum(Reg
);
704 if (CURegNum
== -1) return ~0U;
706 // Encode the 3-bit register number in order, skipping over 3-bits for
708 RegEnc
|= (CURegNum
& 0x7) << (Idx
++ * 3);
711 assert((RegEnc
& 0x3FFFF) == RegEnc
&&
712 "Invalid compact register encoding!");
716 /// Create the permutation encoding used with frameless stacks. It is
717 /// passed the number of registers to be saved and an array of the registers
719 uint32_t encodeCompactUnwindRegistersWithoutFrame(unsigned RegCount
) const {
720 // The saved registers are numbered from 1 to 6. In order to encode the
721 // order in which they were saved, we re-number them according to their
722 // place in the register order. The re-numbering is relative to the last
723 // re-numbered register. E.g., if we have registers {6, 2, 4, 5} saved in
733 for (unsigned i
= 0; i
< RegCount
; ++i
) {
734 int CUReg
= getCompactUnwindRegNum(SavedRegs
[i
]);
735 if (CUReg
== -1) return ~0U;
736 SavedRegs
[i
] = CUReg
;
740 std::reverse(&SavedRegs
[0], &SavedRegs
[CU_NUM_SAVED_REGS
]);
742 uint32_t RenumRegs
[CU_NUM_SAVED_REGS
];
743 for (unsigned i
= CU_NUM_SAVED_REGS
- RegCount
; i
< CU_NUM_SAVED_REGS
; ++i
){
744 unsigned Countless
= 0;
745 for (unsigned j
= CU_NUM_SAVED_REGS
- RegCount
; j
< i
; ++j
)
746 if (SavedRegs
[j
] < SavedRegs
[i
])
749 RenumRegs
[i
] = SavedRegs
[i
] - Countless
- 1;
752 // Take the renumbered values and encode them into a 10-bit number.
753 uint32_t permutationEncoding
= 0;
756 permutationEncoding
|= 120 * RenumRegs
[0] + 24 * RenumRegs
[1]
757 + 6 * RenumRegs
[2] + 2 * RenumRegs
[3]
761 permutationEncoding
|= 120 * RenumRegs
[1] + 24 * RenumRegs
[2]
762 + 6 * RenumRegs
[3] + 2 * RenumRegs
[4]
766 permutationEncoding
|= 60 * RenumRegs
[2] + 12 * RenumRegs
[3]
767 + 3 * RenumRegs
[4] + RenumRegs
[5];
770 permutationEncoding
|= 20 * RenumRegs
[3] + 4 * RenumRegs
[4]
774 permutationEncoding
|= 5 * RenumRegs
[4] + RenumRegs
[5];
777 permutationEncoding
|= RenumRegs
[5];
781 assert((permutationEncoding
& 0x3FF) == permutationEncoding
&&
782 "Invalid compact register encoding!");
783 return permutationEncoding
;
787 DarwinX86AsmBackend(const Target
&T
, const MCRegisterInfo
&MRI
,
788 const MCSubtargetInfo
&STI
, bool Is64Bit
)
789 : X86AsmBackend(T
, STI
), MRI(MRI
), Is64Bit(Is64Bit
) {
790 memset(SavedRegs
, 0, sizeof(SavedRegs
));
791 OffsetSize
= Is64Bit
? 8 : 4;
792 MoveInstrSize
= Is64Bit
? 3 : 2;
793 StackDivide
= Is64Bit
? 8 : 4;
797 class DarwinX86_32AsmBackend
: public DarwinX86AsmBackend
{
799 DarwinX86_32AsmBackend(const Target
&T
, const MCRegisterInfo
&MRI
,
800 const MCSubtargetInfo
&STI
)
801 : DarwinX86AsmBackend(T
, MRI
, STI
, false) {}
803 std::unique_ptr
<MCObjectTargetWriter
>
804 createObjectTargetWriter() const override
{
805 return createX86MachObjectWriter(/*Is64Bit=*/false,
806 MachO::CPU_TYPE_I386
,
807 MachO::CPU_SUBTYPE_I386_ALL
);
810 /// Generate the compact unwind encoding for the CFI instructions.
811 uint32_t generateCompactUnwindEncoding(
812 ArrayRef
<MCCFIInstruction
> Instrs
) const override
{
813 return generateCompactUnwindEncodingImpl(Instrs
);
817 class DarwinX86_64AsmBackend
: public DarwinX86AsmBackend
{
818 const MachO::CPUSubTypeX86 Subtype
;
820 DarwinX86_64AsmBackend(const Target
&T
, const MCRegisterInfo
&MRI
,
821 const MCSubtargetInfo
&STI
, MachO::CPUSubTypeX86 st
)
822 : DarwinX86AsmBackend(T
, MRI
, STI
, true), Subtype(st
) {}
824 std::unique_ptr
<MCObjectTargetWriter
>
825 createObjectTargetWriter() const override
{
826 return createX86MachObjectWriter(/*Is64Bit=*/true, MachO::CPU_TYPE_X86_64
,
830 /// Generate the compact unwind encoding for the CFI instructions.
831 uint32_t generateCompactUnwindEncoding(
832 ArrayRef
<MCCFIInstruction
> Instrs
) const override
{
833 return generateCompactUnwindEncodingImpl(Instrs
);
837 } // end anonymous namespace
839 MCAsmBackend
*llvm::createX86_32AsmBackend(const Target
&T
,
840 const MCSubtargetInfo
&STI
,
841 const MCRegisterInfo
&MRI
,
842 const MCTargetOptions
&Options
) {
843 const Triple
&TheTriple
= STI
.getTargetTriple();
844 if (TheTriple
.isOSBinFormatMachO())
845 return new DarwinX86_32AsmBackend(T
, MRI
, STI
);
847 if (TheTriple
.isOSWindows() && TheTriple
.isOSBinFormatCOFF())
848 return new WindowsX86AsmBackend(T
, false, STI
);
850 uint8_t OSABI
= MCELFObjectTargetWriter::getOSABI(TheTriple
.getOS());
852 if (TheTriple
.isOSIAMCU())
853 return new ELFX86_IAMCUAsmBackend(T
, OSABI
, STI
);
855 return new ELFX86_32AsmBackend(T
, OSABI
, STI
);
858 MCAsmBackend
*llvm::createX86_64AsmBackend(const Target
&T
,
859 const MCSubtargetInfo
&STI
,
860 const MCRegisterInfo
&MRI
,
861 const MCTargetOptions
&Options
) {
862 const Triple
&TheTriple
= STI
.getTargetTriple();
863 if (TheTriple
.isOSBinFormatMachO()) {
864 MachO::CPUSubTypeX86 CS
=
865 StringSwitch
<MachO::CPUSubTypeX86
>(TheTriple
.getArchName())
866 .Case("x86_64h", MachO::CPU_SUBTYPE_X86_64_H
)
867 .Default(MachO::CPU_SUBTYPE_X86_64_ALL
);
868 return new DarwinX86_64AsmBackend(T
, MRI
, STI
, CS
);
871 if (TheTriple
.isOSWindows() && TheTriple
.isOSBinFormatCOFF())
872 return new WindowsX86AsmBackend(T
, true, STI
);
874 uint8_t OSABI
= MCELFObjectTargetWriter::getOSABI(TheTriple
.getOS());
876 if (TheTriple
.getEnvironment() == Triple::GNUX32
)
877 return new ELFX86_X32AsmBackend(T
, OSABI
, STI
);
878 return new ELFX86_64AsmBackend(T
, OSABI
, STI
);