1 //===-- AArch64AsmBackend.cpp - AArch64 Assembler Backend -----------------===//
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7 //===----------------------------------------------------------------------===//
9 #include "MCTargetDesc/AArch64FixupKinds.h"
10 #include "MCTargetDesc/AArch64MCExpr.h"
11 #include "MCTargetDesc/AArch64MCTargetDesc.h"
12 #include "Utils/AArch64BaseInfo.h"
13 #include "llvm/BinaryFormat/MachO.h"
14 #include "llvm/MC/MCAsmBackend.h"
15 #include "llvm/MC/MCAssembler.h"
16 #include "llvm/MC/MCContext.h"
17 #include "llvm/MC/MCELFObjectWriter.h"
18 #include "llvm/MC/MCFixupKindInfo.h"
19 #include "llvm/MC/MCObjectWriter.h"
20 #include "llvm/MC/MCRegisterInfo.h"
21 #include "llvm/MC/MCSubtargetInfo.h"
22 #include "llvm/MC/MCTargetOptions.h"
23 #include "llvm/MC/MCValue.h"
24 #include "llvm/MC/TargetRegistry.h"
25 #include "llvm/Support/ErrorHandling.h"
26 #include "llvm/Support/MathExtras.h"
27 #include "llvm/TargetParser/Triple.h"
32 class AArch64AsmBackend
: public MCAsmBackend
{
33 static const unsigned PCRelFlagVal
=
34 MCFixupKindInfo::FKF_IsAlignedDownTo32Bits
| MCFixupKindInfo::FKF_IsPCRel
;
39 AArch64AsmBackend(const Target
&T
, const Triple
&TT
, bool IsLittleEndian
)
40 : MCAsmBackend(IsLittleEndian
? llvm::endianness::little
41 : llvm::endianness::big
),
44 unsigned getNumFixupKinds() const override
{
45 return AArch64::NumTargetFixupKinds
;
48 std::optional
<MCFixupKind
> getFixupKind(StringRef Name
) const override
;
50 const MCFixupKindInfo
&getFixupKindInfo(MCFixupKind Kind
) const override
{
51 const static MCFixupKindInfo Infos
[AArch64::NumTargetFixupKinds
] = {
52 // This table *must* be in the order that the fixup_* kinds are defined
53 // in AArch64FixupKinds.h.
55 // Name Offset (bits) Size (bits) Flags
56 {"fixup_aarch64_pcrel_adr_imm21", 0, 32, PCRelFlagVal
},
57 {"fixup_aarch64_pcrel_adrp_imm21", 0, 32, PCRelFlagVal
},
58 {"fixup_aarch64_add_imm12", 10, 12, 0},
59 {"fixup_aarch64_ldst_imm12_scale1", 10, 12, 0},
60 {"fixup_aarch64_ldst_imm12_scale2", 10, 12, 0},
61 {"fixup_aarch64_ldst_imm12_scale4", 10, 12, 0},
62 {"fixup_aarch64_ldst_imm12_scale8", 10, 12, 0},
63 {"fixup_aarch64_ldst_imm12_scale16", 10, 12, 0},
64 {"fixup_aarch64_ldr_pcrel_imm19", 5, 19, PCRelFlagVal
},
65 {"fixup_aarch64_movw", 5, 16, 0},
66 {"fixup_aarch64_pcrel_branch9", 5, 9, PCRelFlagVal
},
67 {"fixup_aarch64_pcrel_branch14", 5, 14, PCRelFlagVal
},
68 {"fixup_aarch64_pcrel_branch16", 5, 16, PCRelFlagVal
},
69 {"fixup_aarch64_pcrel_branch19", 5, 19, PCRelFlagVal
},
70 {"fixup_aarch64_pcrel_branch26", 0, 26, PCRelFlagVal
},
71 {"fixup_aarch64_pcrel_call26", 0, 26, PCRelFlagVal
}};
73 // Fixup kinds from .reloc directive are like R_AARCH64_NONE. They do not
74 // require any extra processing.
75 if (Kind
>= FirstLiteralRelocationKind
)
76 return MCAsmBackend::getFixupKindInfo(FK_NONE
);
78 if (Kind
< FirstTargetFixupKind
)
79 return MCAsmBackend::getFixupKindInfo(Kind
);
81 assert(unsigned(Kind
- FirstTargetFixupKind
) < getNumFixupKinds() &&
83 return Infos
[Kind
- FirstTargetFixupKind
];
86 void applyFixup(const MCAssembler
&Asm
, const MCFixup
&Fixup
,
87 const MCValue
&Target
, MutableArrayRef
<char> Data
,
88 uint64_t Value
, bool IsResolved
,
89 const MCSubtargetInfo
*STI
) const override
;
91 bool fixupNeedsRelaxation(const MCFixup
&Fixup
,
92 uint64_t Value
) const override
;
93 void relaxInstruction(MCInst
&Inst
,
94 const MCSubtargetInfo
&STI
) const override
;
95 bool writeNopData(raw_ostream
&OS
, uint64_t Count
,
96 const MCSubtargetInfo
*STI
) const override
;
98 unsigned getFixupKindContainereSizeInBytes(unsigned Kind
) const;
100 bool shouldForceRelocation(const MCAssembler
&Asm
, const MCFixup
&Fixup
,
101 const MCValue
&Target
,
102 const MCSubtargetInfo
*STI
) override
;
105 } // end anonymous namespace
107 /// The number of bytes the fixup may change.
108 static unsigned getFixupKindNumBytes(unsigned Kind
) {
111 llvm_unreachable("Unknown fixup kind!");
120 case AArch64::fixup_aarch64_movw
:
121 case AArch64::fixup_aarch64_pcrel_branch9
:
122 case AArch64::fixup_aarch64_pcrel_branch14
:
123 case AArch64::fixup_aarch64_pcrel_branch16
:
124 case AArch64::fixup_aarch64_add_imm12
:
125 case AArch64::fixup_aarch64_ldst_imm12_scale1
:
126 case AArch64::fixup_aarch64_ldst_imm12_scale2
:
127 case AArch64::fixup_aarch64_ldst_imm12_scale4
:
128 case AArch64::fixup_aarch64_ldst_imm12_scale8
:
129 case AArch64::fixup_aarch64_ldst_imm12_scale16
:
130 case AArch64::fixup_aarch64_ldr_pcrel_imm19
:
131 case AArch64::fixup_aarch64_pcrel_branch19
:
134 case AArch64::fixup_aarch64_pcrel_adr_imm21
:
135 case AArch64::fixup_aarch64_pcrel_adrp_imm21
:
136 case AArch64::fixup_aarch64_pcrel_branch26
:
137 case AArch64::fixup_aarch64_pcrel_call26
:
147 static unsigned AdrImmBits(unsigned Value
) {
148 unsigned lo2
= Value
& 0x3;
149 unsigned hi19
= (Value
& 0x1ffffc) >> 2;
150 return (hi19
<< 5) | (lo2
<< 29);
153 static uint64_t adjustFixupValue(const MCFixup
&Fixup
, const MCValue
&Target
,
154 uint64_t Value
, MCContext
&Ctx
,
155 const Triple
&TheTriple
, bool IsResolved
) {
156 int64_t SignedValue
= static_cast<int64_t>(Value
);
157 switch (Fixup
.getTargetKind()) {
159 llvm_unreachable("Unknown fixup kind!");
160 case AArch64::fixup_aarch64_pcrel_adr_imm21
:
161 if (!isInt
<21>(SignedValue
))
162 Ctx
.reportError(Fixup
.getLoc(), "fixup value out of range");
163 return AdrImmBits(Value
& 0x1fffffULL
);
164 case AArch64::fixup_aarch64_pcrel_adrp_imm21
:
166 if (TheTriple
.isOSBinFormatCOFF()) {
167 if (!isInt
<21>(SignedValue
))
168 Ctx
.reportError(Fixup
.getLoc(), "fixup value out of range");
169 return AdrImmBits(Value
& 0x1fffffULL
);
171 return AdrImmBits((Value
& 0x1fffff000ULL
) >> 12);
172 case AArch64::fixup_aarch64_ldr_pcrel_imm19
:
173 case AArch64::fixup_aarch64_pcrel_branch19
:
174 // Signed 19-bit immediate which gets multiplied by 4
175 if (!isInt
<21>(SignedValue
))
176 Ctx
.reportError(Fixup
.getLoc(), "fixup value out of range");
178 Ctx
.reportError(Fixup
.getLoc(), "fixup not sufficiently aligned");
179 // Low two bits are not encoded.
180 return (Value
>> 2) & 0x7ffff;
181 case AArch64::fixup_aarch64_add_imm12
:
182 case AArch64::fixup_aarch64_ldst_imm12_scale1
:
183 if (TheTriple
.isOSBinFormatCOFF() && !IsResolved
)
185 // Unsigned 12-bit immediate
186 if (!isUInt
<12>(Value
))
187 Ctx
.reportError(Fixup
.getLoc(), "fixup value out of range");
189 case AArch64::fixup_aarch64_ldst_imm12_scale2
:
190 if (TheTriple
.isOSBinFormatCOFF() && !IsResolved
)
192 // Unsigned 12-bit immediate which gets multiplied by 2
193 if (!isUInt
<13>(Value
))
194 Ctx
.reportError(Fixup
.getLoc(), "fixup value out of range");
196 Ctx
.reportError(Fixup
.getLoc(), "fixup must be 2-byte aligned");
198 case AArch64::fixup_aarch64_ldst_imm12_scale4
:
199 if (TheTriple
.isOSBinFormatCOFF() && !IsResolved
)
201 // Unsigned 12-bit immediate which gets multiplied by 4
202 if (!isUInt
<14>(Value
))
203 Ctx
.reportError(Fixup
.getLoc(), "fixup value out of range");
205 Ctx
.reportError(Fixup
.getLoc(), "fixup must be 4-byte aligned");
207 case AArch64::fixup_aarch64_ldst_imm12_scale8
:
208 if (TheTriple
.isOSBinFormatCOFF() && !IsResolved
)
210 // Unsigned 12-bit immediate which gets multiplied by 8
211 if (!isUInt
<15>(Value
))
212 Ctx
.reportError(Fixup
.getLoc(), "fixup value out of range");
214 Ctx
.reportError(Fixup
.getLoc(), "fixup must be 8-byte aligned");
216 case AArch64::fixup_aarch64_ldst_imm12_scale16
:
217 if (TheTriple
.isOSBinFormatCOFF() && !IsResolved
)
219 // Unsigned 12-bit immediate which gets multiplied by 16
220 if (!isUInt
<16>(Value
))
221 Ctx
.reportError(Fixup
.getLoc(), "fixup value out of range");
223 Ctx
.reportError(Fixup
.getLoc(), "fixup must be 16-byte aligned");
225 case AArch64::fixup_aarch64_movw
: {
226 AArch64MCExpr::VariantKind RefKind
=
227 static_cast<AArch64MCExpr::VariantKind
>(Target
.getRefKind());
228 if (AArch64MCExpr::getSymbolLoc(RefKind
) != AArch64MCExpr::VK_ABS
&&
229 AArch64MCExpr::getSymbolLoc(RefKind
) != AArch64MCExpr::VK_SABS
) {
231 // The fixup is an expression
232 if (SignedValue
> 0xFFFF || SignedValue
< -0xFFFF)
233 Ctx
.reportError(Fixup
.getLoc(),
234 "fixup value out of range [-0xFFFF, 0xFFFF]");
236 // Invert the negative immediate because it will feed into a MOVN.
238 SignedValue
= ~SignedValue
;
239 Value
= static_cast<uint64_t>(SignedValue
);
241 // VK_GOTTPREL, VK_TPREL, VK_DTPREL are movw fixups, but they can't
242 // ever be resolved in the assembler.
243 Ctx
.reportError(Fixup
.getLoc(),
244 "relocation for a thread-local variable points to an "
250 // FIXME: Figure out when this can actually happen, and verify our
252 Ctx
.reportError(Fixup
.getLoc(), "unresolved movw fixup not yet "
257 if (AArch64MCExpr::getSymbolLoc(RefKind
) == AArch64MCExpr::VK_SABS
) {
258 switch (AArch64MCExpr::getAddressFrag(RefKind
)) {
259 case AArch64MCExpr::VK_G0
:
261 case AArch64MCExpr::VK_G1
:
262 SignedValue
= SignedValue
>> 16;
264 case AArch64MCExpr::VK_G2
:
265 SignedValue
= SignedValue
>> 32;
267 case AArch64MCExpr::VK_G3
:
268 SignedValue
= SignedValue
>> 48;
271 llvm_unreachable("Variant kind doesn't correspond to fixup");
275 switch (AArch64MCExpr::getAddressFrag(RefKind
)) {
276 case AArch64MCExpr::VK_G0
:
278 case AArch64MCExpr::VK_G1
:
281 case AArch64MCExpr::VK_G2
:
284 case AArch64MCExpr::VK_G3
:
288 llvm_unreachable("Variant kind doesn't correspond to fixup");
292 if (RefKind
& AArch64MCExpr::VK_NC
) {
295 else if (AArch64MCExpr::getSymbolLoc(RefKind
) == AArch64MCExpr::VK_SABS
) {
296 if (SignedValue
> 0xFFFF || SignedValue
< -0xFFFF)
297 Ctx
.reportError(Fixup
.getLoc(), "fixup value out of range");
299 // Invert the negative immediate because it will feed into a MOVN.
301 SignedValue
= ~SignedValue
;
302 Value
= static_cast<uint64_t>(SignedValue
);
304 else if (Value
> 0xFFFF) {
305 Ctx
.reportError(Fixup
.getLoc(), "fixup value out of range");
309 case AArch64::fixup_aarch64_pcrel_branch9
:
310 // Signed 11-bit(9bits + 2 shifts) label
311 if (!isInt
<11>(SignedValue
))
312 Ctx
.reportError(Fixup
.getLoc(), "fixup value out of range");
313 // Low two bits are not encoded (4-byte alignment assumed).
315 Ctx
.reportError(Fixup
.getLoc(), "fixup not sufficiently aligned");
316 return (Value
>> 2) & 0x1ff;
317 case AArch64::fixup_aarch64_pcrel_branch14
:
318 // Signed 16-bit immediate
319 if (!isInt
<16>(SignedValue
))
320 Ctx
.reportError(Fixup
.getLoc(), "fixup value out of range");
321 // Low two bits are not encoded (4-byte alignment assumed).
323 Ctx
.reportError(Fixup
.getLoc(), "fixup not sufficiently aligned");
324 return (Value
>> 2) & 0x3fff;
325 case AArch64::fixup_aarch64_pcrel_branch16
:
326 // Unsigned PC-relative offset, so invert the negative immediate.
327 SignedValue
= -SignedValue
;
328 Value
= static_cast<uint64_t>(SignedValue
);
329 // Check valid 18-bit unsigned range.
330 if (SignedValue
< 0 || SignedValue
> ((1 << 18) - 1))
331 Ctx
.reportError(Fixup
.getLoc(), "fixup value out of range");
332 // Low two bits are not encoded (4-byte alignment assumed).
334 Ctx
.reportError(Fixup
.getLoc(), "fixup not sufficiently aligned");
335 return (Value
>> 2) & 0xffff;
336 case AArch64::fixup_aarch64_pcrel_branch26
:
337 case AArch64::fixup_aarch64_pcrel_call26
:
338 if (TheTriple
.isOSBinFormatCOFF() && !IsResolved
&& SignedValue
!= 0) {
339 // MSVC link.exe and lld do not support this relocation type
340 // with a non-zero offset
341 Ctx
.reportError(Fixup
.getLoc(),
342 "cannot perform a PC-relative fixup with a non-zero "
345 // Signed 28-bit immediate
346 if (!isInt
<28>(SignedValue
))
347 Ctx
.reportError(Fixup
.getLoc(), "fixup value out of range");
348 // Low two bits are not encoded (4-byte alignment assumed).
350 Ctx
.reportError(Fixup
.getLoc(), "fixup not sufficiently aligned");
351 return (Value
>> 2) & 0x3ffffff;
362 std::optional
<MCFixupKind
>
363 AArch64AsmBackend::getFixupKind(StringRef Name
) const {
364 if (!TheTriple
.isOSBinFormatELF())
367 unsigned Type
= llvm::StringSwitch
<unsigned>(Name
)
368 #define ELF_RELOC(X, Y) .Case(#X, Y)
369 #include "llvm/BinaryFormat/ELFRelocs/AArch64.def"
371 .Case("BFD_RELOC_NONE", ELF::R_AARCH64_NONE
)
372 .Case("BFD_RELOC_16", ELF::R_AARCH64_ABS16
)
373 .Case("BFD_RELOC_32", ELF::R_AARCH64_ABS32
)
374 .Case("BFD_RELOC_64", ELF::R_AARCH64_ABS64
)
378 return static_cast<MCFixupKind
>(FirstLiteralRelocationKind
+ Type
);
381 /// getFixupKindContainereSizeInBytes - The number of bytes of the
382 /// container involved in big endian or 0 if the item is little endian
383 unsigned AArch64AsmBackend::getFixupKindContainereSizeInBytes(unsigned Kind
) const {
384 if (Endian
== llvm::endianness::little
)
389 llvm_unreachable("Unknown fixup kind!");
400 case AArch64::fixup_aarch64_movw
:
401 case AArch64::fixup_aarch64_pcrel_branch9
:
402 case AArch64::fixup_aarch64_pcrel_branch14
:
403 case AArch64::fixup_aarch64_pcrel_branch16
:
404 case AArch64::fixup_aarch64_add_imm12
:
405 case AArch64::fixup_aarch64_ldst_imm12_scale1
:
406 case AArch64::fixup_aarch64_ldst_imm12_scale2
:
407 case AArch64::fixup_aarch64_ldst_imm12_scale4
:
408 case AArch64::fixup_aarch64_ldst_imm12_scale8
:
409 case AArch64::fixup_aarch64_ldst_imm12_scale16
:
410 case AArch64::fixup_aarch64_ldr_pcrel_imm19
:
411 case AArch64::fixup_aarch64_pcrel_branch19
:
412 case AArch64::fixup_aarch64_pcrel_adr_imm21
:
413 case AArch64::fixup_aarch64_pcrel_adrp_imm21
:
414 case AArch64::fixup_aarch64_pcrel_branch26
:
415 case AArch64::fixup_aarch64_pcrel_call26
:
416 // Instructions are always little endian
421 void AArch64AsmBackend::applyFixup(const MCAssembler
&Asm
, const MCFixup
&Fixup
,
422 const MCValue
&Target
,
423 MutableArrayRef
<char> Data
, uint64_t Value
,
425 const MCSubtargetInfo
*STI
) const {
426 if (Fixup
.getTargetKind() == FK_Data_8
&& TheTriple
.isOSBinFormatELF()) {
427 auto RefKind
= static_cast<AArch64MCExpr::VariantKind
>(Target
.getRefKind());
428 AArch64MCExpr::VariantKind SymLoc
= AArch64MCExpr::getSymbolLoc(RefKind
);
429 if (SymLoc
== AArch64AuthMCExpr::VK_AUTH
||
430 SymLoc
== AArch64AuthMCExpr::VK_AUTHADDR
) {
432 const auto *Expr
= cast
<AArch64AuthMCExpr
>(Fixup
.getValue());
433 Value
= (uint64_t(Expr
->getDiscriminator()) << 32) |
434 (uint64_t(Expr
->getKey()) << 60) |
435 (uint64_t(Expr
->hasAddressDiversity()) << 63);
440 return; // Doesn't change encoding.
441 unsigned Kind
= Fixup
.getKind();
442 if (Kind
>= FirstLiteralRelocationKind
)
444 unsigned NumBytes
= getFixupKindNumBytes(Kind
);
445 MCFixupKindInfo Info
= getFixupKindInfo(Fixup
.getKind());
446 MCContext
&Ctx
= Asm
.getContext();
447 int64_t SignedValue
= static_cast<int64_t>(Value
);
448 // Apply any target-specific value adjustments.
449 Value
= adjustFixupValue(Fixup
, Target
, Value
, Ctx
, TheTriple
, IsResolved
);
451 // Shift the value into position.
452 Value
<<= Info
.TargetOffset
;
454 unsigned Offset
= Fixup
.getOffset();
455 assert(Offset
+ NumBytes
<= Data
.size() && "Invalid fixup offset!");
457 // Used to point to big endian bytes.
458 unsigned FulleSizeInBytes
= getFixupKindContainereSizeInBytes(Fixup
.getKind());
460 // For each byte of the fragment that the fixup touches, mask in the
461 // bits from the fixup value.
462 if (FulleSizeInBytes
== 0) {
463 // Handle as little-endian
464 for (unsigned i
= 0; i
!= NumBytes
; ++i
) {
465 Data
[Offset
+ i
] |= uint8_t((Value
>> (i
* 8)) & 0xff);
468 // Handle as big-endian
469 assert((Offset
+ FulleSizeInBytes
) <= Data
.size() && "Invalid fixup size!");
470 assert(NumBytes
<= FulleSizeInBytes
&& "Invalid fixup size!");
471 for (unsigned i
= 0; i
!= NumBytes
; ++i
) {
472 unsigned Idx
= FulleSizeInBytes
- 1 - i
;
473 Data
[Offset
+ Idx
] |= uint8_t((Value
>> (i
* 8)) & 0xff);
477 // FIXME: getFixupKindInfo() and getFixupKindNumBytes() could be fixed to
478 // handle this more cleanly. This may affect the output of -show-mc-encoding.
479 AArch64MCExpr::VariantKind RefKind
=
480 static_cast<AArch64MCExpr::VariantKind
>(Target
.getRefKind());
481 if (AArch64MCExpr::getSymbolLoc(RefKind
) == AArch64MCExpr::VK_SABS
||
482 (!RefKind
&& Fixup
.getTargetKind() == AArch64::fixup_aarch64_movw
)) {
483 // If the immediate is negative, generate MOVN else MOVZ.
484 // (Bit 30 = 0) ==> MOVN, (Bit 30 = 1) ==> MOVZ.
486 Data
[Offset
+ 3] &= ~(1 << 6);
488 Data
[Offset
+ 3] |= (1 << 6);
492 bool AArch64AsmBackend::fixupNeedsRelaxation(const MCFixup
&Fixup
,
493 uint64_t Value
) const {
494 // FIXME: This isn't correct for AArch64. Just moving the "generic" logic
495 // into the targets for now.
497 // Relax if the value is too big for a (signed) i8.
498 return int64_t(Value
) != int64_t(int8_t(Value
));
501 void AArch64AsmBackend::relaxInstruction(MCInst
&Inst
,
502 const MCSubtargetInfo
&STI
) const {
503 llvm_unreachable("AArch64AsmBackend::relaxInstruction() unimplemented");
506 bool AArch64AsmBackend::writeNopData(raw_ostream
&OS
, uint64_t Count
,
507 const MCSubtargetInfo
*STI
) const {
508 // If the count is not 4-byte aligned, we must be writing data into the text
509 // section (otherwise we have unaligned instructions, and thus have far
510 // bigger problems), so just write zeros instead.
511 OS
.write_zeros(Count
% 4);
513 // We are properly aligned, so write NOPs as requested.
515 for (uint64_t i
= 0; i
!= Count
; ++i
)
516 OS
.write("\x1f\x20\x03\xd5", 4);
520 bool AArch64AsmBackend::shouldForceRelocation(const MCAssembler
&Asm
,
521 const MCFixup
&Fixup
,
522 const MCValue
&Target
,
523 const MCSubtargetInfo
*STI
) {
524 unsigned Kind
= Fixup
.getKind();
525 if (Kind
>= FirstLiteralRelocationKind
)
528 // The ADRP instruction adds some multiple of 0x1000 to the current PC &
529 // ~0xfff. This means that the required offset to reach a symbol can vary by
530 // up to one step depending on where the ADRP is in memory. For example:
535 // If the ADRP occurs at address 0xffc then "there" will be at 0x1000 and
536 // we'll need that as an offset. At any other address "there" will be in the
537 // same page as the ADRP and the instruction should encode 0x0. Assuming the
538 // section isn't 0x1000-aligned, we therefore need to delegate this decision
539 // to the linker -- a relocation!
540 if (Kind
== AArch64::fixup_aarch64_pcrel_adrp_imm21
)
550 /// Compact unwind encoding values.
551 enum CompactUnwindEncodings
{
552 /// A "frameless" leaf function, where no non-volatile registers are
553 /// saved. The return remains in LR throughout the function.
554 UNWIND_ARM64_MODE_FRAMELESS
= 0x02000000,
556 /// No compact unwind encoding available. Instead the low 23-bits of
557 /// the compact unwind encoding is the offset of the DWARF FDE in the
558 /// __eh_frame section. This mode is never used in object files. It is only
559 /// generated by the linker in final linked images, which have only DWARF info
561 UNWIND_ARM64_MODE_DWARF
= 0x03000000,
563 /// This is a standard arm64 prologue where FP/LR are immediately
564 /// pushed on the stack, then SP is copied to FP. If there are any
565 /// non-volatile register saved, they are copied into the stack fame in pairs
566 /// in a contiguous ranger right below the saved FP/LR pair. Any subset of the
567 /// five X pairs and four D pairs can be saved, but the memory layout must be
568 /// in register number order.
569 UNWIND_ARM64_MODE_FRAME
= 0x04000000,
571 /// Frame register pair encodings.
572 UNWIND_ARM64_FRAME_X19_X20_PAIR
= 0x00000001,
573 UNWIND_ARM64_FRAME_X21_X22_PAIR
= 0x00000002,
574 UNWIND_ARM64_FRAME_X23_X24_PAIR
= 0x00000004,
575 UNWIND_ARM64_FRAME_X25_X26_PAIR
= 0x00000008,
576 UNWIND_ARM64_FRAME_X27_X28_PAIR
= 0x00000010,
577 UNWIND_ARM64_FRAME_D8_D9_PAIR
= 0x00000100,
578 UNWIND_ARM64_FRAME_D10_D11_PAIR
= 0x00000200,
579 UNWIND_ARM64_FRAME_D12_D13_PAIR
= 0x00000400,
580 UNWIND_ARM64_FRAME_D14_D15_PAIR
= 0x00000800
583 } // end CU namespace
585 // FIXME: This should be in a separate file.
586 class DarwinAArch64AsmBackend
: public AArch64AsmBackend
{
587 const MCRegisterInfo
&MRI
;
589 /// Encode compact unwind stack adjustment for frameless functions.
590 /// See UNWIND_ARM64_FRAMELESS_STACK_SIZE_MASK in compact_unwind_encoding.h.
591 /// The stack size always needs to be 16 byte aligned.
592 uint32_t encodeStackAdjustment(uint32_t StackSize
) const {
593 return (StackSize
/ 16) << 12;
597 DarwinAArch64AsmBackend(const Target
&T
, const Triple
&TT
,
598 const MCRegisterInfo
&MRI
)
599 : AArch64AsmBackend(T
, TT
, /*IsLittleEndian*/ true), MRI(MRI
) {}
601 std::unique_ptr
<MCObjectTargetWriter
>
602 createObjectTargetWriter() const override
{
603 uint32_t CPUType
= cantFail(MachO::getCPUType(TheTriple
));
604 uint32_t CPUSubType
= cantFail(MachO::getCPUSubType(TheTriple
));
605 return createAArch64MachObjectWriter(CPUType
, CPUSubType
,
606 TheTriple
.isArch32Bit());
609 /// Generate the compact unwind encoding from the CFI directives.
610 uint64_t generateCompactUnwindEncoding(const MCDwarfFrameInfo
*FI
,
611 const MCContext
*Ctxt
) const override
{
612 ArrayRef
<MCCFIInstruction
> Instrs
= FI
->Instructions
;
614 return CU::UNWIND_ARM64_MODE_FRAMELESS
;
615 if (!isDarwinCanonicalPersonality(FI
->Personality
) &&
616 !Ctxt
->emitCompactUnwindNonCanonical())
617 return CU::UNWIND_ARM64_MODE_DWARF
;
620 uint64_t StackSize
= 0;
622 uint64_t CompactUnwindEncoding
= 0;
623 int64_t CurOffset
= 0;
624 for (size_t i
= 0, e
= Instrs
.size(); i
!= e
; ++i
) {
625 const MCCFIInstruction
&Inst
= Instrs
[i
];
627 switch (Inst
.getOperation()) {
629 // Cannot handle this directive: bail out.
630 return CU::UNWIND_ARM64_MODE_DWARF
;
631 case MCCFIInstruction::OpDefCfa
: {
632 // Defines a frame pointer.
634 getXRegFromWReg(*MRI
.getLLVMRegNum(Inst
.getRegister(), true));
636 // Other CFA registers than FP are not supported by compact unwind.
637 // Fallback on DWARF.
638 // FIXME: When opt-remarks are supported in MC, add a remark to notify
640 if (XReg
!= AArch64::FP
)
641 return CU::UNWIND_ARM64_MODE_DWARF
;
644 return CU::UNWIND_ARM64_MODE_DWARF
;
646 const MCCFIInstruction
&LRPush
= Instrs
[++i
];
647 if (LRPush
.getOperation() != MCCFIInstruction::OpOffset
)
648 return CU::UNWIND_ARM64_MODE_DWARF
;
649 const MCCFIInstruction
&FPPush
= Instrs
[++i
];
650 if (FPPush
.getOperation() != MCCFIInstruction::OpOffset
)
651 return CU::UNWIND_ARM64_MODE_DWARF
;
653 if (FPPush
.getOffset() + 8 != LRPush
.getOffset())
654 return CU::UNWIND_ARM64_MODE_DWARF
;
655 CurOffset
= FPPush
.getOffset();
657 MCRegister LRReg
= *MRI
.getLLVMRegNum(LRPush
.getRegister(), true);
658 MCRegister FPReg
= *MRI
.getLLVMRegNum(FPPush
.getRegister(), true);
660 LRReg
= getXRegFromWReg(LRReg
);
661 FPReg
= getXRegFromWReg(FPReg
);
663 if (LRReg
!= AArch64::LR
|| FPReg
!= AArch64::FP
)
664 return CU::UNWIND_ARM64_MODE_DWARF
;
666 // Indicate that the function has a frame.
667 CompactUnwindEncoding
|= CU::UNWIND_ARM64_MODE_FRAME
;
671 case MCCFIInstruction::OpDefCfaOffset
: {
673 return CU::UNWIND_ARM64_MODE_DWARF
;
674 StackSize
= std::abs(Inst
.getOffset());
677 case MCCFIInstruction::OpOffset
: {
678 // Registers are saved in pairs. We expect there to be two consecutive
679 // `.cfi_offset' instructions with the appropriate registers specified.
680 MCRegister Reg1
= *MRI
.getLLVMRegNum(Inst
.getRegister(), true);
682 return CU::UNWIND_ARM64_MODE_DWARF
;
684 if (CurOffset
!= 0 && Inst
.getOffset() != CurOffset
- 8)
685 return CU::UNWIND_ARM64_MODE_DWARF
;
686 CurOffset
= Inst
.getOffset();
688 const MCCFIInstruction
&Inst2
= Instrs
[++i
];
689 if (Inst2
.getOperation() != MCCFIInstruction::OpOffset
)
690 return CU::UNWIND_ARM64_MODE_DWARF
;
691 MCRegister Reg2
= *MRI
.getLLVMRegNum(Inst2
.getRegister(), true);
693 if (Inst2
.getOffset() != CurOffset
- 8)
694 return CU::UNWIND_ARM64_MODE_DWARF
;
695 CurOffset
= Inst2
.getOffset();
697 // N.B. The encodings must be in register number order, and the X
698 // registers before the D registers.
700 // X19/X20 pair = 0x00000001,
701 // X21/X22 pair = 0x00000002,
702 // X23/X24 pair = 0x00000004,
703 // X25/X26 pair = 0x00000008,
704 // X27/X28 pair = 0x00000010
705 Reg1
= getXRegFromWReg(Reg1
);
706 Reg2
= getXRegFromWReg(Reg2
);
708 if (Reg1
== AArch64::X19
&& Reg2
== AArch64::X20
&&
709 (CompactUnwindEncoding
& 0xF1E) == 0)
710 CompactUnwindEncoding
|= CU::UNWIND_ARM64_FRAME_X19_X20_PAIR
;
711 else if (Reg1
== AArch64::X21
&& Reg2
== AArch64::X22
&&
712 (CompactUnwindEncoding
& 0xF1C) == 0)
713 CompactUnwindEncoding
|= CU::UNWIND_ARM64_FRAME_X21_X22_PAIR
;
714 else if (Reg1
== AArch64::X23
&& Reg2
== AArch64::X24
&&
715 (CompactUnwindEncoding
& 0xF18) == 0)
716 CompactUnwindEncoding
|= CU::UNWIND_ARM64_FRAME_X23_X24_PAIR
;
717 else if (Reg1
== AArch64::X25
&& Reg2
== AArch64::X26
&&
718 (CompactUnwindEncoding
& 0xF10) == 0)
719 CompactUnwindEncoding
|= CU::UNWIND_ARM64_FRAME_X25_X26_PAIR
;
720 else if (Reg1
== AArch64::X27
&& Reg2
== AArch64::X28
&&
721 (CompactUnwindEncoding
& 0xF00) == 0)
722 CompactUnwindEncoding
|= CU::UNWIND_ARM64_FRAME_X27_X28_PAIR
;
724 Reg1
= getDRegFromBReg(Reg1
);
725 Reg2
= getDRegFromBReg(Reg2
);
727 // D8/D9 pair = 0x00000100,
728 // D10/D11 pair = 0x00000200,
729 // D12/D13 pair = 0x00000400,
730 // D14/D15 pair = 0x00000800
731 if (Reg1
== AArch64::D8
&& Reg2
== AArch64::D9
&&
732 (CompactUnwindEncoding
& 0xE00) == 0)
733 CompactUnwindEncoding
|= CU::UNWIND_ARM64_FRAME_D8_D9_PAIR
;
734 else if (Reg1
== AArch64::D10
&& Reg2
== AArch64::D11
&&
735 (CompactUnwindEncoding
& 0xC00) == 0)
736 CompactUnwindEncoding
|= CU::UNWIND_ARM64_FRAME_D10_D11_PAIR
;
737 else if (Reg1
== AArch64::D12
&& Reg2
== AArch64::D13
&&
738 (CompactUnwindEncoding
& 0x800) == 0)
739 CompactUnwindEncoding
|= CU::UNWIND_ARM64_FRAME_D12_D13_PAIR
;
740 else if (Reg1
== AArch64::D14
&& Reg2
== AArch64::D15
)
741 CompactUnwindEncoding
|= CU::UNWIND_ARM64_FRAME_D14_D15_PAIR
;
743 // A pair was pushed which we cannot handle.
744 return CU::UNWIND_ARM64_MODE_DWARF
;
753 // With compact unwind info we can only represent stack adjustments of up
755 if (StackSize
> 65520)
756 return CU::UNWIND_ARM64_MODE_DWARF
;
758 CompactUnwindEncoding
|= CU::UNWIND_ARM64_MODE_FRAMELESS
;
759 CompactUnwindEncoding
|= encodeStackAdjustment(StackSize
);
762 return CompactUnwindEncoding
;
766 } // end anonymous namespace
770 class ELFAArch64AsmBackend
: public AArch64AsmBackend
{
775 ELFAArch64AsmBackend(const Target
&T
, const Triple
&TT
, uint8_t OSABI
,
776 bool IsLittleEndian
, bool IsILP32
)
777 : AArch64AsmBackend(T
, TT
, IsLittleEndian
), OSABI(OSABI
),
780 std::unique_ptr
<MCObjectTargetWriter
>
781 createObjectTargetWriter() const override
{
782 return createAArch64ELFObjectWriter(OSABI
, IsILP32
);
789 class COFFAArch64AsmBackend
: public AArch64AsmBackend
{
791 COFFAArch64AsmBackend(const Target
&T
, const Triple
&TheTriple
)
792 : AArch64AsmBackend(T
, TheTriple
, /*IsLittleEndian*/ true) {}
794 std::unique_ptr
<MCObjectTargetWriter
>
795 createObjectTargetWriter() const override
{
796 return createAArch64WinCOFFObjectWriter(TheTriple
);
801 MCAsmBackend
*llvm::createAArch64leAsmBackend(const Target
&T
,
802 const MCSubtargetInfo
&STI
,
803 const MCRegisterInfo
&MRI
,
804 const MCTargetOptions
&Options
) {
805 const Triple
&TheTriple
= STI
.getTargetTriple();
806 if (TheTriple
.isOSBinFormatMachO()) {
807 return new DarwinAArch64AsmBackend(T
, TheTriple
, MRI
);
810 if (TheTriple
.isOSBinFormatCOFF())
811 return new COFFAArch64AsmBackend(T
, TheTriple
);
813 assert(TheTriple
.isOSBinFormatELF() && "Invalid target");
815 uint8_t OSABI
= MCELFObjectTargetWriter::getOSABI(TheTriple
.getOS());
816 bool IsILP32
= STI
.getTargetTriple().getEnvironment() == Triple::GNUILP32
;
817 return new ELFAArch64AsmBackend(T
, TheTriple
, OSABI
, /*IsLittleEndian=*/true,
821 MCAsmBackend
*llvm::createAArch64beAsmBackend(const Target
&T
,
822 const MCSubtargetInfo
&STI
,
823 const MCRegisterInfo
&MRI
,
824 const MCTargetOptions
&Options
) {
825 const Triple
&TheTriple
= STI
.getTargetTriple();
826 assert(TheTriple
.isOSBinFormatELF() &&
827 "Big endian is only supported for ELF targets!");
828 uint8_t OSABI
= MCELFObjectTargetWriter::getOSABI(TheTriple
.getOS());
829 bool IsILP32
= STI
.getTargetTriple().getEnvironment() == Triple::GNUILP32
;
830 return new ELFAArch64AsmBackend(T
, TheTriple
, OSABI
, /*IsLittleEndian=*/false,