[test] Pre-commit llvm.experimental.memset.pattern tests prior to MemoryLocation...
[llvm-project.git] / llvm / lib / Target / ARM / MCTargetDesc / ARMAsmBackend.cpp
blobd0e759d3356f10753bfce4bca6cc0cae20b6b10d
1 //===-- ARMAsmBackend.cpp - ARM Assembler Backend -------------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
9 #include "MCTargetDesc/ARMAsmBackend.h"
10 #include "MCTargetDesc/ARMAddressingModes.h"
11 #include "MCTargetDesc/ARMAsmBackendDarwin.h"
12 #include "MCTargetDesc/ARMAsmBackendELF.h"
13 #include "MCTargetDesc/ARMAsmBackendWinCOFF.h"
14 #include "MCTargetDesc/ARMFixupKinds.h"
15 #include "MCTargetDesc/ARMMCTargetDesc.h"
16 #include "llvm/ADT/StringSwitch.h"
17 #include "llvm/BinaryFormat/ELF.h"
18 #include "llvm/BinaryFormat/MachO.h"
19 #include "llvm/MC/MCAsmBackend.h"
20 #include "llvm/MC/MCAssembler.h"
21 #include "llvm/MC/MCContext.h"
22 #include "llvm/MC/MCDirectives.h"
23 #include "llvm/MC/MCELFObjectWriter.h"
24 #include "llvm/MC/MCExpr.h"
25 #include "llvm/MC/MCFixupKindInfo.h"
26 #include "llvm/MC/MCObjectWriter.h"
27 #include "llvm/MC/MCRegisterInfo.h"
28 #include "llvm/MC/MCSubtargetInfo.h"
29 #include "llvm/MC/MCTargetOptions.h"
30 #include "llvm/MC/MCValue.h"
31 #include "llvm/Support/Debug.h"
32 #include "llvm/Support/EndianStream.h"
33 #include "llvm/Support/ErrorHandling.h"
34 #include "llvm/Support/MathExtras.h"
35 #include "llvm/Support/raw_ostream.h"
36 using namespace llvm;
38 namespace {
39 class ARMELFObjectWriter : public MCELFObjectTargetWriter {
40 public:
41 ARMELFObjectWriter(uint8_t OSABI)
42 : MCELFObjectTargetWriter(/*Is64Bit*/ false, OSABI, ELF::EM_ARM,
43 /*HasRelocationAddend*/ false) {}
45 } // end anonymous namespace
47 std::optional<MCFixupKind> ARMAsmBackend::getFixupKind(StringRef Name) const {
48 return std::nullopt;
51 std::optional<MCFixupKind>
52 ARMAsmBackendELF::getFixupKind(StringRef Name) const {
53 unsigned Type = llvm::StringSwitch<unsigned>(Name)
54 #define ELF_RELOC(X, Y) .Case(#X, Y)
55 #include "llvm/BinaryFormat/ELFRelocs/ARM.def"
56 #undef ELF_RELOC
57 .Case("BFD_RELOC_NONE", ELF::R_ARM_NONE)
58 .Case("BFD_RELOC_8", ELF::R_ARM_ABS8)
59 .Case("BFD_RELOC_16", ELF::R_ARM_ABS16)
60 .Case("BFD_RELOC_32", ELF::R_ARM_ABS32)
61 .Default(-1u);
62 if (Type == -1u)
63 return std::nullopt;
64 return static_cast<MCFixupKind>(FirstLiteralRelocationKind + Type);
67 const MCFixupKindInfo &ARMAsmBackend::getFixupKindInfo(MCFixupKind Kind) const {
68 const static MCFixupKindInfo InfosLE[ARM::NumTargetFixupKinds] = {
69 // This table *must* be in the order that the fixup_* kinds are defined in
70 // ARMFixupKinds.h.
72 // Name Offset (bits) Size (bits) Flags
73 {"fixup_arm_ldst_pcrel_12", 0, 32, MCFixupKindInfo::FKF_IsPCRel},
74 {"fixup_t2_ldst_pcrel_12", 0, 32,
75 MCFixupKindInfo::FKF_IsPCRel |
76 MCFixupKindInfo::FKF_IsAlignedDownTo32Bits},
77 {"fixup_arm_pcrel_10_unscaled", 0, 32, MCFixupKindInfo::FKF_IsPCRel},
78 {"fixup_arm_pcrel_10", 0, 32, MCFixupKindInfo::FKF_IsPCRel},
79 {"fixup_t2_pcrel_10", 0, 32,
80 MCFixupKindInfo::FKF_IsPCRel |
81 MCFixupKindInfo::FKF_IsAlignedDownTo32Bits},
82 {"fixup_arm_pcrel_9", 0, 32, MCFixupKindInfo::FKF_IsPCRel},
83 {"fixup_t2_pcrel_9", 0, 32,
84 MCFixupKindInfo::FKF_IsPCRel |
85 MCFixupKindInfo::FKF_IsAlignedDownTo32Bits},
86 {"fixup_arm_ldst_abs_12", 0, 32, 0},
87 {"fixup_thumb_adr_pcrel_10", 0, 8,
88 MCFixupKindInfo::FKF_IsPCRel |
89 MCFixupKindInfo::FKF_IsAlignedDownTo32Bits},
90 {"fixup_arm_adr_pcrel_12", 0, 32, MCFixupKindInfo::FKF_IsPCRel},
91 {"fixup_t2_adr_pcrel_12", 0, 32,
92 MCFixupKindInfo::FKF_IsPCRel |
93 MCFixupKindInfo::FKF_IsAlignedDownTo32Bits},
94 {"fixup_arm_condbranch", 0, 24, MCFixupKindInfo::FKF_IsPCRel},
95 {"fixup_arm_uncondbranch", 0, 24, MCFixupKindInfo::FKF_IsPCRel},
96 {"fixup_t2_condbranch", 0, 32, MCFixupKindInfo::FKF_IsPCRel},
97 {"fixup_t2_uncondbranch", 0, 32, MCFixupKindInfo::FKF_IsPCRel},
98 {"fixup_arm_thumb_br", 0, 16, MCFixupKindInfo::FKF_IsPCRel},
99 {"fixup_arm_uncondbl", 0, 24, MCFixupKindInfo::FKF_IsPCRel},
100 {"fixup_arm_condbl", 0, 24, MCFixupKindInfo::FKF_IsPCRel},
101 {"fixup_arm_blx", 0, 24, MCFixupKindInfo::FKF_IsPCRel},
102 {"fixup_arm_thumb_bl", 0, 32, MCFixupKindInfo::FKF_IsPCRel},
103 {"fixup_arm_thumb_blx", 0, 32,
104 MCFixupKindInfo::FKF_IsPCRel |
105 MCFixupKindInfo::FKF_IsAlignedDownTo32Bits},
106 {"fixup_arm_thumb_cb", 0, 16, MCFixupKindInfo::FKF_IsPCRel},
107 {"fixup_arm_thumb_cp", 0, 8,
108 MCFixupKindInfo::FKF_IsPCRel |
109 MCFixupKindInfo::FKF_IsAlignedDownTo32Bits},
110 {"fixup_arm_thumb_bcc", 0, 8, MCFixupKindInfo::FKF_IsPCRel},
111 // movw / movt: 16-bits immediate but scattered into two chunks 0 - 12, 16
112 // - 19.
113 {"fixup_arm_movt_hi16", 0, 20, 0},
114 {"fixup_arm_movw_lo16", 0, 20, 0},
115 {"fixup_t2_movt_hi16", 0, 20, 0},
116 {"fixup_t2_movw_lo16", 0, 20, 0},
117 {"fixup_arm_thumb_upper_8_15", 0, 8, 0},
118 {"fixup_arm_thumb_upper_0_7", 0, 8, 0},
119 {"fixup_arm_thumb_lower_8_15", 0, 8, 0},
120 {"fixup_arm_thumb_lower_0_7", 0, 8, 0},
121 {"fixup_arm_mod_imm", 0, 12, 0},
122 {"fixup_t2_so_imm", 0, 26, 0},
123 {"fixup_bf_branch", 0, 32, MCFixupKindInfo::FKF_IsPCRel},
124 {"fixup_bf_target", 0, 32, MCFixupKindInfo::FKF_IsPCRel},
125 {"fixup_bfl_target", 0, 32, MCFixupKindInfo::FKF_IsPCRel},
126 {"fixup_bfc_target", 0, 32, MCFixupKindInfo::FKF_IsPCRel},
127 {"fixup_bfcsel_else_target", 0, 32, 0},
128 {"fixup_wls", 0, 32, MCFixupKindInfo::FKF_IsPCRel},
129 {"fixup_le", 0, 32, MCFixupKindInfo::FKF_IsPCRel}};
130 const static MCFixupKindInfo InfosBE[ARM::NumTargetFixupKinds] = {
131 // This table *must* be in the order that the fixup_* kinds are defined in
132 // ARMFixupKinds.h.
134 // Name Offset (bits) Size (bits) Flags
135 {"fixup_arm_ldst_pcrel_12", 0, 32, MCFixupKindInfo::FKF_IsPCRel},
136 {"fixup_t2_ldst_pcrel_12", 0, 32,
137 MCFixupKindInfo::FKF_IsPCRel |
138 MCFixupKindInfo::FKF_IsAlignedDownTo32Bits},
139 {"fixup_arm_pcrel_10_unscaled", 0, 32, MCFixupKindInfo::FKF_IsPCRel},
140 {"fixup_arm_pcrel_10", 0, 32, MCFixupKindInfo::FKF_IsPCRel},
141 {"fixup_t2_pcrel_10", 0, 32,
142 MCFixupKindInfo::FKF_IsPCRel |
143 MCFixupKindInfo::FKF_IsAlignedDownTo32Bits},
144 {"fixup_arm_pcrel_9", 0, 32, MCFixupKindInfo::FKF_IsPCRel},
145 {"fixup_t2_pcrel_9", 0, 32,
146 MCFixupKindInfo::FKF_IsPCRel |
147 MCFixupKindInfo::FKF_IsAlignedDownTo32Bits},
148 {"fixup_arm_ldst_abs_12", 0, 32, 0},
149 {"fixup_thumb_adr_pcrel_10", 8, 8,
150 MCFixupKindInfo::FKF_IsPCRel |
151 MCFixupKindInfo::FKF_IsAlignedDownTo32Bits},
152 {"fixup_arm_adr_pcrel_12", 0, 32, MCFixupKindInfo::FKF_IsPCRel},
153 {"fixup_t2_adr_pcrel_12", 0, 32,
154 MCFixupKindInfo::FKF_IsPCRel |
155 MCFixupKindInfo::FKF_IsAlignedDownTo32Bits},
156 {"fixup_arm_condbranch", 8, 24, MCFixupKindInfo::FKF_IsPCRel},
157 {"fixup_arm_uncondbranch", 8, 24, MCFixupKindInfo::FKF_IsPCRel},
158 {"fixup_t2_condbranch", 0, 32, MCFixupKindInfo::FKF_IsPCRel},
159 {"fixup_t2_uncondbranch", 0, 32, MCFixupKindInfo::FKF_IsPCRel},
160 {"fixup_arm_thumb_br", 0, 16, MCFixupKindInfo::FKF_IsPCRel},
161 {"fixup_arm_uncondbl", 8, 24, MCFixupKindInfo::FKF_IsPCRel},
162 {"fixup_arm_condbl", 8, 24, MCFixupKindInfo::FKF_IsPCRel},
163 {"fixup_arm_blx", 8, 24, MCFixupKindInfo::FKF_IsPCRel},
164 {"fixup_arm_thumb_bl", 0, 32, MCFixupKindInfo::FKF_IsPCRel},
165 {"fixup_arm_thumb_blx", 0, 32,
166 MCFixupKindInfo::FKF_IsPCRel |
167 MCFixupKindInfo::FKF_IsAlignedDownTo32Bits},
168 {"fixup_arm_thumb_cb", 0, 16, MCFixupKindInfo::FKF_IsPCRel},
169 {"fixup_arm_thumb_cp", 8, 8,
170 MCFixupKindInfo::FKF_IsPCRel |
171 MCFixupKindInfo::FKF_IsAlignedDownTo32Bits},
172 {"fixup_arm_thumb_bcc", 8, 8, MCFixupKindInfo::FKF_IsPCRel},
173 // movw / movt: 16-bits immediate but scattered into two chunks 0 - 12, 16
174 // - 19.
175 {"fixup_arm_movt_hi16", 12, 20, 0},
176 {"fixup_arm_movw_lo16", 12, 20, 0},
177 {"fixup_t2_movt_hi16", 12, 20, 0},
178 {"fixup_t2_movw_lo16", 12, 20, 0},
179 {"fixup_arm_thumb_upper_8_15", 24, 8, 0},
180 {"fixup_arm_thumb_upper_0_7", 24, 8, 0},
181 {"fixup_arm_thumb_lower_8_15", 24, 8, 0},
182 {"fixup_arm_thumb_lower_0_7", 24, 8, 0},
183 {"fixup_arm_mod_imm", 20, 12, 0},
184 {"fixup_t2_so_imm", 26, 6, 0},
185 {"fixup_bf_branch", 0, 32, MCFixupKindInfo::FKF_IsPCRel},
186 {"fixup_bf_target", 0, 32, MCFixupKindInfo::FKF_IsPCRel},
187 {"fixup_bfl_target", 0, 32, MCFixupKindInfo::FKF_IsPCRel},
188 {"fixup_bfc_target", 0, 32, MCFixupKindInfo::FKF_IsPCRel},
189 {"fixup_bfcsel_else_target", 0, 32, 0},
190 {"fixup_wls", 0, 32, MCFixupKindInfo::FKF_IsPCRel},
191 {"fixup_le", 0, 32, MCFixupKindInfo::FKF_IsPCRel}};
193 // Fixup kinds from .reloc directive are like R_ARM_NONE. They do not require
194 // any extra processing.
195 if (Kind >= FirstLiteralRelocationKind)
196 return MCAsmBackend::getFixupKindInfo(FK_NONE);
198 if (Kind < FirstTargetFixupKind)
199 return MCAsmBackend::getFixupKindInfo(Kind);
201 assert(unsigned(Kind - FirstTargetFixupKind) < getNumFixupKinds() &&
202 "Invalid kind!");
203 return (Endian == llvm::endianness::little
204 ? InfosLE
205 : InfosBE)[Kind - FirstTargetFixupKind];
208 void ARMAsmBackend::handleAssemblerFlag(MCAssemblerFlag Flag) {
209 switch (Flag) {
210 default:
211 break;
212 case MCAF_Code16:
213 setIsThumb(true);
214 break;
215 case MCAF_Code32:
216 setIsThumb(false);
217 break;
221 unsigned ARMAsmBackend::getRelaxedOpcode(unsigned Op,
222 const MCSubtargetInfo &STI) const {
223 bool HasThumb2 = STI.hasFeature(ARM::FeatureThumb2);
224 bool HasV8MBaselineOps = STI.hasFeature(ARM::HasV8MBaselineOps);
226 switch (Op) {
227 default:
228 return Op;
229 case ARM::tBcc:
230 return HasThumb2 ? (unsigned)ARM::t2Bcc : Op;
231 case ARM::tLDRpci:
232 return HasThumb2 ? (unsigned)ARM::t2LDRpci : Op;
233 case ARM::tADR:
234 return HasThumb2 ? (unsigned)ARM::t2ADR : Op;
235 case ARM::tB:
236 return HasV8MBaselineOps ? (unsigned)ARM::t2B : Op;
237 case ARM::tCBZ:
238 return ARM::tHINT;
239 case ARM::tCBNZ:
240 return ARM::tHINT;
244 bool ARMAsmBackend::mayNeedRelaxation(const MCInst &Inst,
245 const MCSubtargetInfo &STI) const {
246 if (getRelaxedOpcode(Inst.getOpcode(), STI) != Inst.getOpcode())
247 return true;
248 return false;
251 static const char *checkPCRelOffset(uint64_t Value, int64_t Min, int64_t Max) {
252 int64_t Offset = int64_t(Value) - 4;
253 if (Offset < Min || Offset > Max)
254 return "out of range pc-relative fixup value";
255 return nullptr;
258 const char *ARMAsmBackend::reasonForFixupRelaxation(const MCFixup &Fixup,
259 uint64_t Value) const {
260 switch (Fixup.getTargetKind()) {
261 case ARM::fixup_arm_thumb_br: {
262 // Relaxing tB to t2B. tB has a signed 12-bit displacement with the
263 // low bit being an implied zero. There's an implied +4 offset for the
264 // branch, so we adjust the other way here to determine what's
265 // encodable.
267 // Relax if the value is too big for a (signed) i8.
268 int64_t Offset = int64_t(Value) - 4;
269 if (Offset > 2046 || Offset < -2048)
270 return "out of range pc-relative fixup value";
271 break;
273 case ARM::fixup_arm_thumb_bcc: {
274 // Relaxing tBcc to t2Bcc. tBcc has a signed 9-bit displacement with the
275 // low bit being an implied zero. There's an implied +4 offset for the
276 // branch, so we adjust the other way here to determine what's
277 // encodable.
279 // Relax if the value is too big for a (signed) i8.
280 int64_t Offset = int64_t(Value) - 4;
281 if (Offset > 254 || Offset < -256)
282 return "out of range pc-relative fixup value";
283 break;
285 case ARM::fixup_thumb_adr_pcrel_10:
286 case ARM::fixup_arm_thumb_cp: {
287 // If the immediate is negative, greater than 1020, or not a multiple
288 // of four, the wide version of the instruction must be used.
289 int64_t Offset = int64_t(Value) - 4;
290 if (Offset & 3)
291 return "misaligned pc-relative fixup value";
292 else if (Offset > 1020 || Offset < 0)
293 return "out of range pc-relative fixup value";
294 break;
296 case ARM::fixup_arm_thumb_cb: {
297 // If we have a Thumb CBZ or CBNZ instruction and its target is the next
298 // instruction it is actually out of range for the instruction.
299 // It will be changed to a NOP.
300 int64_t Offset = (Value & ~1);
301 if (Offset == 2)
302 return "will be converted to nop";
303 break;
305 case ARM::fixup_bf_branch:
306 return checkPCRelOffset(Value, 0, 30);
307 case ARM::fixup_bf_target:
308 return checkPCRelOffset(Value, -0x10000, +0xfffe);
309 case ARM::fixup_bfl_target:
310 return checkPCRelOffset(Value, -0x40000, +0x3fffe);
311 case ARM::fixup_bfc_target:
312 return checkPCRelOffset(Value, -0x1000, +0xffe);
313 case ARM::fixup_wls:
314 return checkPCRelOffset(Value, 0, +0xffe);
315 case ARM::fixup_le:
316 // The offset field in the LE and LETP instructions is an 11-bit
317 // value shifted left by 2 (i.e. 0,2,4,...,4094), and it is
318 // interpreted as a negative offset from the value read from pc,
319 // i.e. from instruction_address+4.
321 // So an LE instruction can in principle address the instruction
322 // immediately after itself, or (not very usefully) the address
323 // half way through the 4-byte LE.
324 return checkPCRelOffset(Value, -0xffe, 0);
325 case ARM::fixup_bfcsel_else_target: {
326 if (Value != 2 && Value != 4)
327 return "out of range label-relative fixup value";
328 break;
331 default:
332 llvm_unreachable("Unexpected fixup kind in reasonForFixupRelaxation()!");
334 return nullptr;
337 bool ARMAsmBackend::fixupNeedsRelaxation(const MCFixup &Fixup,
338 uint64_t Value) const {
339 return reasonForFixupRelaxation(Fixup, Value);
342 void ARMAsmBackend::relaxInstruction(MCInst &Inst,
343 const MCSubtargetInfo &STI) const {
344 unsigned RelaxedOp = getRelaxedOpcode(Inst.getOpcode(), STI);
346 // Return a diagnostic if we get here w/ a bogus instruction.
347 if (RelaxedOp == Inst.getOpcode()) {
348 SmallString<256> Tmp;
349 raw_svector_ostream OS(Tmp);
350 Inst.dump_pretty(OS);
351 OS << "\n";
352 report_fatal_error("unexpected instruction to relax: " + OS.str());
355 // If we are changing Thumb CBZ or CBNZ instruction to a NOP, aka tHINT, we
356 // have to change the operands too.
357 if ((Inst.getOpcode() == ARM::tCBZ || Inst.getOpcode() == ARM::tCBNZ) &&
358 RelaxedOp == ARM::tHINT) {
359 MCInst Res;
360 Res.setOpcode(RelaxedOp);
361 Res.addOperand(MCOperand::createImm(0));
362 Res.addOperand(MCOperand::createImm(14));
363 Res.addOperand(MCOperand::createReg(0));
364 Inst = std::move(Res);
365 return;
368 // The rest of instructions we're relaxing have the same operands.
369 // We just need to update to the proper opcode.
370 Inst.setOpcode(RelaxedOp);
373 bool ARMAsmBackend::writeNopData(raw_ostream &OS, uint64_t Count,
374 const MCSubtargetInfo *STI) const {
375 const uint16_t Thumb1_16bitNopEncoding = 0x46c0; // using MOV r8,r8
376 const uint16_t Thumb2_16bitNopEncoding = 0xbf00; // NOP
377 const uint32_t ARMv4_NopEncoding = 0xe1a00000; // using MOV r0,r0
378 const uint32_t ARMv6T2_NopEncoding = 0xe320f000; // NOP
379 if (isThumb()) {
380 const uint16_t nopEncoding =
381 hasNOP(STI) ? Thumb2_16bitNopEncoding : Thumb1_16bitNopEncoding;
382 uint64_t NumNops = Count / 2;
383 for (uint64_t i = 0; i != NumNops; ++i)
384 support::endian::write(OS, nopEncoding, Endian);
385 if (Count & 1)
386 OS << '\0';
387 return true;
389 // ARM mode
390 const uint32_t nopEncoding =
391 hasNOP(STI) ? ARMv6T2_NopEncoding : ARMv4_NopEncoding;
392 uint64_t NumNops = Count / 4;
393 for (uint64_t i = 0; i != NumNops; ++i)
394 support::endian::write(OS, nopEncoding, Endian);
395 // FIXME: should this function return false when unable to write exactly
396 // 'Count' bytes with NOP encodings?
397 switch (Count % 4) {
398 default:
399 break; // No leftover bytes to write
400 case 1:
401 OS << '\0';
402 break;
403 case 2:
404 OS.write("\0\0", 2);
405 break;
406 case 3:
407 OS.write("\0\0\xa0", 3);
408 break;
411 return true;
414 static uint32_t swapHalfWords(uint32_t Value, bool IsLittleEndian) {
415 if (IsLittleEndian) {
416 // Note that the halfwords are stored high first and low second in thumb;
417 // so we need to swap the fixup value here to map properly.
418 uint32_t Swapped = (Value & 0xFFFF0000) >> 16;
419 Swapped |= (Value & 0x0000FFFF) << 16;
420 return Swapped;
421 } else
422 return Value;
425 static uint32_t joinHalfWords(uint32_t FirstHalf, uint32_t SecondHalf,
426 bool IsLittleEndian) {
427 uint32_t Value;
429 if (IsLittleEndian) {
430 Value = (SecondHalf & 0xFFFF) << 16;
431 Value |= (FirstHalf & 0xFFFF);
432 } else {
433 Value = (SecondHalf & 0xFFFF);
434 Value |= (FirstHalf & 0xFFFF) << 16;
437 return Value;
440 unsigned ARMAsmBackend::adjustFixupValue(const MCAssembler &Asm,
441 const MCFixup &Fixup,
442 const MCValue &Target, uint64_t Value,
443 bool IsResolved, MCContext &Ctx,
444 const MCSubtargetInfo* STI) const {
445 unsigned Kind = Fixup.getKind();
446 int64_t Addend = Target.getConstant();
448 // For MOVW/MOVT Instructions, the fixup value must already be within a
449 // signed 16bit range.
450 if ((Kind == ARM::fixup_arm_movw_lo16 || Kind == ARM::fixup_arm_movt_hi16 ||
451 Kind == ARM::fixup_t2_movw_lo16 || Kind == ARM::fixup_t2_movt_hi16) &&
452 (Addend < minIntN(16) || Addend > maxIntN(16))) {
453 Ctx.reportError(Fixup.getLoc(), "Relocation Not In Range");
454 return 0;
457 // MachO tries to make .o files that look vaguely pre-linked, so for MOVW/MOVT
458 // and .word relocations they put the Thumb bit into the addend if possible.
459 // Other relocation types don't want this bit though (branches couldn't encode
460 // it if it *was* present, and no other relocations exist) and it can
461 // interfere with checking valid expressions.
462 if (const MCSymbolRefExpr *A = Target.getSymA()) {
463 if (A->hasSubsectionsViaSymbols() && Asm.isThumbFunc(&A->getSymbol()) &&
464 A->getSymbol().isExternal() &&
465 (Kind == FK_Data_4 || Kind == ARM::fixup_arm_movw_lo16 ||
466 Kind == ARM::fixup_arm_movt_hi16 || Kind == ARM::fixup_t2_movw_lo16 ||
467 Kind == ARM::fixup_t2_movt_hi16))
468 Value |= 1;
471 switch (Kind) {
472 default:
473 return 0;
474 case FK_Data_1:
475 case FK_Data_2:
476 case FK_Data_4:
477 return Value;
478 case FK_SecRel_2:
479 return Value;
480 case FK_SecRel_4:
481 return Value;
482 case ARM::fixup_arm_movt_hi16:
483 assert(STI != nullptr);
484 if (IsResolved || !STI->getTargetTriple().isOSBinFormatELF())
485 Value >>= 16;
486 [[fallthrough]];
487 case ARM::fixup_arm_movw_lo16: {
488 unsigned Hi4 = (Value & 0xF000) >> 12;
489 unsigned Lo12 = Value & 0x0FFF;
490 // inst{19-16} = Hi4;
491 // inst{11-0} = Lo12;
492 Value = (Hi4 << 16) | (Lo12);
493 return Value;
495 case ARM::fixup_t2_movt_hi16:
496 assert(STI != nullptr);
497 if (IsResolved || !STI->getTargetTriple().isOSBinFormatELF())
498 Value >>= 16;
499 [[fallthrough]];
500 case ARM::fixup_t2_movw_lo16: {
501 unsigned Hi4 = (Value & 0xF000) >> 12;
502 unsigned i = (Value & 0x800) >> 11;
503 unsigned Mid3 = (Value & 0x700) >> 8;
504 unsigned Lo8 = Value & 0x0FF;
505 // inst{19-16} = Hi4;
506 // inst{26} = i;
507 // inst{14-12} = Mid3;
508 // inst{7-0} = Lo8;
509 Value = (Hi4 << 16) | (i << 26) | (Mid3 << 12) | (Lo8);
510 return swapHalfWords(Value, Endian == llvm::endianness::little);
512 case ARM::fixup_arm_thumb_upper_8_15:
513 if (IsResolved || !STI->getTargetTriple().isOSBinFormatELF())
514 return (Value & 0xff000000) >> 24;
515 return Value & 0xff;
516 case ARM::fixup_arm_thumb_upper_0_7:
517 if (IsResolved || !STI->getTargetTriple().isOSBinFormatELF())
518 return (Value & 0x00ff0000) >> 16;
519 return Value & 0xff;
520 case ARM::fixup_arm_thumb_lower_8_15:
521 if (IsResolved || !STI->getTargetTriple().isOSBinFormatELF())
522 return (Value & 0x0000ff00) >> 8;
523 return Value & 0xff;
524 case ARM::fixup_arm_thumb_lower_0_7:
525 return Value & 0x000000ff;
526 case ARM::fixup_arm_ldst_pcrel_12:
527 // ARM PC-relative values are offset by 8.
528 Value -= 4;
529 [[fallthrough]];
530 case ARM::fixup_t2_ldst_pcrel_12:
531 // Offset by 4, adjusted by two due to the half-word ordering of thumb.
532 Value -= 4;
533 [[fallthrough]];
534 case ARM::fixup_arm_ldst_abs_12: {
535 bool isAdd = true;
536 if ((int64_t)Value < 0) {
537 Value = -Value;
538 isAdd = false;
540 if (Value >= 4096) {
541 Ctx.reportError(Fixup.getLoc(), "out of range pc-relative fixup value");
542 return 0;
544 Value |= isAdd << 23;
546 // Same addressing mode as fixup_arm_pcrel_10,
547 // but with 16-bit halfwords swapped.
548 if (Kind == ARM::fixup_t2_ldst_pcrel_12)
549 return swapHalfWords(Value, Endian == llvm::endianness::little);
551 return Value;
553 case ARM::fixup_arm_adr_pcrel_12: {
554 // ARM PC-relative values are offset by 8.
555 Value -= 8;
556 unsigned opc = 4; // bits {24-21}. Default to add: 0b0100
557 if ((int64_t)Value < 0) {
558 Value = -Value;
559 opc = 2; // 0b0010
561 if (ARM_AM::getSOImmVal(Value) == -1) {
562 Ctx.reportError(Fixup.getLoc(), "out of range pc-relative fixup value");
563 return 0;
565 // Encode the immediate and shift the opcode into place.
566 return ARM_AM::getSOImmVal(Value) | (opc << 21);
569 case ARM::fixup_t2_adr_pcrel_12: {
570 Value -= 4;
571 unsigned opc = 0;
572 if ((int64_t)Value < 0) {
573 Value = -Value;
574 opc = 5;
577 uint32_t out = (opc << 21);
578 out |= (Value & 0x800) << 15;
579 out |= (Value & 0x700) << 4;
580 out |= (Value & 0x0FF);
582 return swapHalfWords(out, Endian == llvm::endianness::little);
585 case ARM::fixup_arm_condbranch:
586 case ARM::fixup_arm_uncondbranch:
587 case ARM::fixup_arm_uncondbl:
588 case ARM::fixup_arm_condbl:
589 case ARM::fixup_arm_blx:
590 // Check that the relocation value is legal.
591 Value -= 8;
592 if (!isInt<26>(Value)) {
593 Ctx.reportError(Fixup.getLoc(), "Relocation out of range");
594 return 0;
596 // Alignment differs for blx. Because we are switching to thumb ISA, we use
597 // 16-bit alignment. Otherwise, use 32-bit.
598 if ((Kind == ARM::fixup_arm_blx && Value % 2 != 0) ||
599 (Kind != ARM::fixup_arm_blx && Value % 4 != 0)) {
600 Ctx.reportError(Fixup.getLoc(), "Relocation not aligned");
601 return 0;
604 // These values don't encode the low two bits since they're always zero.
605 // Offset by 8 just as above.
606 if (const MCSymbolRefExpr *SRE =
607 dyn_cast<MCSymbolRefExpr>(Fixup.getValue()))
608 if (SRE->getKind() == MCSymbolRefExpr::VK_TLSCALL)
609 return 0;
610 return 0xffffff & (Value >> 2);
611 case ARM::fixup_t2_uncondbranch: {
612 if (STI->getTargetTriple().isOSBinFormatCOFF() && !IsResolved &&
613 Value != 4) {
614 // MSVC link.exe and lld do not support this relocation type
615 // with a non-zero offset. ("Value" is offset by 4 at this point.)
616 Ctx.reportError(Fixup.getLoc(),
617 "cannot perform a PC-relative fixup with a non-zero "
618 "symbol offset");
620 Value = Value - 4;
621 if (!isInt<25>(Value)) {
622 Ctx.reportError(Fixup.getLoc(), "Relocation out of range");
623 return 0;
626 Value >>= 1; // Low bit is not encoded.
628 uint32_t out = 0;
629 bool I = Value & 0x800000;
630 bool J1 = Value & 0x400000;
631 bool J2 = Value & 0x200000;
632 J1 ^= I;
633 J2 ^= I;
635 out |= I << 26; // S bit
636 out |= !J1 << 13; // J1 bit
637 out |= !J2 << 11; // J2 bit
638 out |= (Value & 0x1FF800) << 5; // imm6 field
639 out |= (Value & 0x0007FF); // imm11 field
641 return swapHalfWords(out, Endian == llvm::endianness::little);
643 case ARM::fixup_t2_condbranch: {
644 Value = Value - 4;
645 if (!isInt<21>(Value)) {
646 Ctx.reportError(Fixup.getLoc(), "Relocation out of range");
647 return 0;
650 Value >>= 1; // Low bit is not encoded.
652 uint64_t out = 0;
653 out |= (Value & 0x80000) << 7; // S bit
654 out |= (Value & 0x40000) >> 7; // J2 bit
655 out |= (Value & 0x20000) >> 4; // J1 bit
656 out |= (Value & 0x1F800) << 5; // imm6 field
657 out |= (Value & 0x007FF); // imm11 field
659 return swapHalfWords(out, Endian == llvm::endianness::little);
661 case ARM::fixup_arm_thumb_bl: {
662 if (!isInt<25>(Value - 4) ||
663 (!STI->hasFeature(ARM::FeatureThumb2) &&
664 !STI->hasFeature(ARM::HasV8MBaselineOps) &&
665 !STI->hasFeature(ARM::HasV6MOps) &&
666 !isInt<23>(Value - 4))) {
667 Ctx.reportError(Fixup.getLoc(), "Relocation out of range");
668 return 0;
670 if (STI->getTargetTriple().isOSBinFormatCOFF() && !IsResolved &&
671 Value != 4) {
672 // MSVC link.exe and lld do not support this relocation type
673 // with a non-zero offset. ("Value" is offset by 4 at this point.)
674 Ctx.reportError(Fixup.getLoc(),
675 "cannot perform a PC-relative fixup with a non-zero "
676 "symbol offset");
679 // The value doesn't encode the low bit (always zero) and is offset by
680 // four. The 32-bit immediate value is encoded as
681 // imm32 = SignExtend(S:I1:I2:imm10:imm11:0)
682 // where I1 = NOT(J1 ^ S) and I2 = NOT(J2 ^ S).
683 // The value is encoded into disjoint bit positions in the destination
684 // opcode. x = unchanged, I = immediate value bit, S = sign extension bit,
685 // J = either J1 or J2 bit
687 // BL: xxxxxSIIIIIIIIII xxJxJIIIIIIIIIII
689 // Note that the halfwords are stored high first, low second; so we need
690 // to transpose the fixup value here to map properly.
691 uint32_t offset = (Value - 4) >> 1;
692 uint32_t signBit = (offset & 0x800000) >> 23;
693 uint32_t I1Bit = (offset & 0x400000) >> 22;
694 uint32_t J1Bit = (I1Bit ^ 0x1) ^ signBit;
695 uint32_t I2Bit = (offset & 0x200000) >> 21;
696 uint32_t J2Bit = (I2Bit ^ 0x1) ^ signBit;
697 uint32_t imm10Bits = (offset & 0x1FF800) >> 11;
698 uint32_t imm11Bits = (offset & 0x000007FF);
700 uint32_t FirstHalf = (((uint16_t)signBit << 10) | (uint16_t)imm10Bits);
701 uint32_t SecondHalf = (((uint16_t)J1Bit << 13) | ((uint16_t)J2Bit << 11) |
702 (uint16_t)imm11Bits);
703 return joinHalfWords(FirstHalf, SecondHalf,
704 Endian == llvm::endianness::little);
706 case ARM::fixup_arm_thumb_blx: {
707 if (STI->getTargetTriple().isOSBinFormatCOFF() && !IsResolved &&
708 Value != 4) {
709 // MSVC link.exe and lld do not support this relocation type
710 // with a non-zero offset. ("Value" is offset by 4 at this point.)
711 Ctx.reportError(Fixup.getLoc(),
712 "cannot perform a PC-relative fixup with a non-zero "
713 "symbol offset");
715 // The value doesn't encode the low two bits (always zero) and is offset by
716 // four (see fixup_arm_thumb_cp). The 32-bit immediate value is encoded as
717 // imm32 = SignExtend(S:I1:I2:imm10H:imm10L:00)
718 // where I1 = NOT(J1 ^ S) and I2 = NOT(J2 ^ S).
719 // The value is encoded into disjoint bit positions in the destination
720 // opcode. x = unchanged, I = immediate value bit, S = sign extension bit,
721 // J = either J1 or J2 bit, 0 = zero.
723 // BLX: xxxxxSIIIIIIIIII xxJxJIIIIIIIIII0
725 // Note that the halfwords are stored high first, low second; so we need
726 // to transpose the fixup value here to map properly.
727 if (Value % 4 != 0) {
728 Ctx.reportError(Fixup.getLoc(), "misaligned ARM call destination");
729 return 0;
732 uint32_t offset = (Value - 4) >> 2;
733 if (const MCSymbolRefExpr *SRE =
734 dyn_cast<MCSymbolRefExpr>(Fixup.getValue()))
735 if (SRE->getKind() == MCSymbolRefExpr::VK_TLSCALL)
736 offset = 0;
737 uint32_t signBit = (offset & 0x400000) >> 22;
738 uint32_t I1Bit = (offset & 0x200000) >> 21;
739 uint32_t J1Bit = (I1Bit ^ 0x1) ^ signBit;
740 uint32_t I2Bit = (offset & 0x100000) >> 20;
741 uint32_t J2Bit = (I2Bit ^ 0x1) ^ signBit;
742 uint32_t imm10HBits = (offset & 0xFFC00) >> 10;
743 uint32_t imm10LBits = (offset & 0x3FF);
745 uint32_t FirstHalf = (((uint16_t)signBit << 10) | (uint16_t)imm10HBits);
746 uint32_t SecondHalf = (((uint16_t)J1Bit << 13) | ((uint16_t)J2Bit << 11) |
747 ((uint16_t)imm10LBits) << 1);
748 return joinHalfWords(FirstHalf, SecondHalf,
749 Endian == llvm::endianness::little);
751 case ARM::fixup_thumb_adr_pcrel_10:
752 case ARM::fixup_arm_thumb_cp:
753 // On CPUs supporting Thumb2, this will be relaxed to an ldr.w, otherwise we
754 // could have an error on our hands.
755 assert(STI != nullptr);
756 if (!STI->hasFeature(ARM::FeatureThumb2) && IsResolved) {
757 const char *FixupDiagnostic = reasonForFixupRelaxation(Fixup, Value);
758 if (FixupDiagnostic) {
759 Ctx.reportError(Fixup.getLoc(), FixupDiagnostic);
760 return 0;
763 // Offset by 4, and don't encode the low two bits.
764 return ((Value - 4) >> 2) & 0xff;
765 case ARM::fixup_arm_thumb_cb: {
766 // CB instructions can only branch to offsets in [4, 126] in multiples of 2
767 // so ensure that the raw value LSB is zero and it lies in [2, 130].
768 // An offset of 2 will be relaxed to a NOP.
769 if ((int64_t)Value < 2 || Value > 0x82 || Value & 1) {
770 Ctx.reportError(Fixup.getLoc(), "out of range pc-relative fixup value");
771 return 0;
773 // Offset by 4 and don't encode the lower bit, which is always 0.
774 // FIXME: diagnose if no Thumb2
775 uint32_t Binary = (Value - 4) >> 1;
776 return ((Binary & 0x20) << 4) | ((Binary & 0x1f) << 3);
778 case ARM::fixup_arm_thumb_br:
779 // Offset by 4 and don't encode the lower bit, which is always 0.
780 assert(STI != nullptr);
781 if (!STI->hasFeature(ARM::FeatureThumb2) &&
782 !STI->hasFeature(ARM::HasV8MBaselineOps)) {
783 const char *FixupDiagnostic = reasonForFixupRelaxation(Fixup, Value);
784 if (FixupDiagnostic) {
785 Ctx.reportError(Fixup.getLoc(), FixupDiagnostic);
786 return 0;
789 return ((Value - 4) >> 1) & 0x7ff;
790 case ARM::fixup_arm_thumb_bcc:
791 // Offset by 4 and don't encode the lower bit, which is always 0.
792 assert(STI != nullptr);
793 if (!STI->hasFeature(ARM::FeatureThumb2)) {
794 const char *FixupDiagnostic = reasonForFixupRelaxation(Fixup, Value);
795 if (FixupDiagnostic) {
796 Ctx.reportError(Fixup.getLoc(), FixupDiagnostic);
797 return 0;
800 return ((Value - 4) >> 1) & 0xff;
801 case ARM::fixup_arm_pcrel_10_unscaled: {
802 Value = Value - 8; // ARM fixups offset by an additional word and don't
803 // need to adjust for the half-word ordering.
804 bool isAdd = true;
805 if ((int64_t)Value < 0) {
806 Value = -Value;
807 isAdd = false;
809 // The value has the low 4 bits encoded in [3:0] and the high 4 in [11:8].
810 if (Value >= 256) {
811 Ctx.reportError(Fixup.getLoc(), "out of range pc-relative fixup value");
812 return 0;
814 Value = (Value & 0xf) | ((Value & 0xf0) << 4);
815 return Value | (isAdd << 23);
817 case ARM::fixup_arm_pcrel_10:
818 Value = Value - 4; // ARM fixups offset by an additional word and don't
819 // need to adjust for the half-word ordering.
820 [[fallthrough]];
821 case ARM::fixup_t2_pcrel_10: {
822 // Offset by 4, adjusted by two due to the half-word ordering of thumb.
823 Value = Value - 4;
824 bool isAdd = true;
825 if ((int64_t)Value < 0) {
826 Value = -Value;
827 isAdd = false;
829 // These values don't encode the low two bits since they're always zero.
830 Value >>= 2;
831 if (Value >= 256) {
832 Ctx.reportError(Fixup.getLoc(), "out of range pc-relative fixup value");
833 return 0;
835 Value |= isAdd << 23;
837 // Same addressing mode as fixup_arm_pcrel_10, but with 16-bit halfwords
838 // swapped.
839 if (Kind == ARM::fixup_t2_pcrel_10)
840 return swapHalfWords(Value, Endian == llvm::endianness::little);
842 return Value;
844 case ARM::fixup_arm_pcrel_9:
845 Value = Value - 4; // ARM fixups offset by an additional word and don't
846 // need to adjust for the half-word ordering.
847 [[fallthrough]];
848 case ARM::fixup_t2_pcrel_9: {
849 // Offset by 4, adjusted by two due to the half-word ordering of thumb.
850 Value = Value - 4;
851 bool isAdd = true;
852 if ((int64_t)Value < 0) {
853 Value = -Value;
854 isAdd = false;
856 // These values don't encode the low bit since it's always zero.
857 if (Value & 1) {
858 Ctx.reportError(Fixup.getLoc(), "invalid value for this fixup");
859 return 0;
861 Value >>= 1;
862 if (Value >= 256) {
863 Ctx.reportError(Fixup.getLoc(), "out of range pc-relative fixup value");
864 return 0;
866 Value |= isAdd << 23;
868 // Same addressing mode as fixup_arm_pcrel_9, but with 16-bit halfwords
869 // swapped.
870 if (Kind == ARM::fixup_t2_pcrel_9)
871 return swapHalfWords(Value, Endian == llvm::endianness::little);
873 return Value;
875 case ARM::fixup_arm_mod_imm:
876 Value = ARM_AM::getSOImmVal(Value);
877 if (Value >> 12) {
878 Ctx.reportError(Fixup.getLoc(), "out of range immediate fixup value");
879 return 0;
881 return Value;
882 case ARM::fixup_t2_so_imm: {
883 Value = ARM_AM::getT2SOImmVal(Value);
884 if ((int64_t)Value < 0) {
885 Ctx.reportError(Fixup.getLoc(), "out of range immediate fixup value");
886 return 0;
888 // Value will contain a 12-bit value broken up into a 4-bit shift in bits
889 // 11:8 and the 8-bit immediate in 0:7. The instruction has the immediate
890 // in 0:7. The 4-bit shift is split up into i:imm3 where i is placed at bit
891 // 10 of the upper half-word and imm3 is placed at 14:12 of the lower
892 // half-word.
893 uint64_t EncValue = 0;
894 EncValue |= (Value & 0x800) << 15;
895 EncValue |= (Value & 0x700) << 4;
896 EncValue |= (Value & 0xff);
897 return swapHalfWords(EncValue, Endian == llvm::endianness::little);
899 case ARM::fixup_bf_branch: {
900 const char *FixupDiagnostic = reasonForFixupRelaxation(Fixup, Value);
901 if (FixupDiagnostic) {
902 Ctx.reportError(Fixup.getLoc(), FixupDiagnostic);
903 return 0;
905 uint32_t out = (((Value - 4) >> 1) & 0xf) << 23;
906 return swapHalfWords(out, Endian == llvm::endianness::little);
908 case ARM::fixup_bf_target:
909 case ARM::fixup_bfl_target:
910 case ARM::fixup_bfc_target: {
911 const char *FixupDiagnostic = reasonForFixupRelaxation(Fixup, Value);
912 if (FixupDiagnostic) {
913 Ctx.reportError(Fixup.getLoc(), FixupDiagnostic);
914 return 0;
916 uint32_t out = 0;
917 uint32_t HighBitMask = (Kind == ARM::fixup_bf_target ? 0xf800 :
918 Kind == ARM::fixup_bfl_target ? 0x3f800 : 0x800);
919 out |= (((Value - 4) >> 1) & 0x1) << 11;
920 out |= (((Value - 4) >> 1) & 0x7fe);
921 out |= (((Value - 4) >> 1) & HighBitMask) << 5;
922 return swapHalfWords(out, Endian == llvm::endianness::little);
924 case ARM::fixup_bfcsel_else_target: {
925 // If this is a fixup of a branch future's else target then it should be a
926 // constant MCExpr representing the distance between the branch targetted
927 // and the instruction after that same branch.
928 Value = Target.getConstant();
930 const char *FixupDiagnostic = reasonForFixupRelaxation(Fixup, Value);
931 if (FixupDiagnostic) {
932 Ctx.reportError(Fixup.getLoc(), FixupDiagnostic);
933 return 0;
935 uint32_t out = ((Value >> 2) & 1) << 17;
936 return swapHalfWords(out, Endian == llvm::endianness::little);
938 case ARM::fixup_wls:
939 case ARM::fixup_le: {
940 const char *FixupDiagnostic = reasonForFixupRelaxation(Fixup, Value);
941 if (FixupDiagnostic) {
942 Ctx.reportError(Fixup.getLoc(), FixupDiagnostic);
943 return 0;
945 uint64_t real_value = Value - 4;
946 uint32_t out = 0;
947 if (Kind == ARM::fixup_le)
948 real_value = -real_value;
949 out |= ((real_value >> 1) & 0x1) << 11;
950 out |= ((real_value >> 1) & 0x7fe);
951 return swapHalfWords(out, Endian == llvm::endianness::little);
956 bool ARMAsmBackend::shouldForceRelocation(const MCAssembler &Asm,
957 const MCFixup &Fixup,
958 const MCValue &Target,
959 const MCSubtargetInfo *STI) {
960 const MCSymbolRefExpr *A = Target.getSymA();
961 const MCSymbol *Sym = A ? &A->getSymbol() : nullptr;
962 const unsigned FixupKind = Fixup.getKind();
963 if (FixupKind >= FirstLiteralRelocationKind)
964 return true;
965 if (FixupKind == ARM::fixup_arm_thumb_bl) {
966 assert(Sym && "How did we resolve this?");
968 // If the symbol is external the linker will handle it.
969 // FIXME: Should we handle it as an optimization?
971 // If the symbol is out of range, produce a relocation and hope the
972 // linker can handle it. GNU AS produces an error in this case.
973 if (Sym->isExternal())
974 return true;
976 // Create relocations for unconditional branches to function symbols with
977 // different execution mode in ELF binaries.
978 if (Sym && Sym->isELF()) {
979 unsigned Type = cast<MCSymbolELF>(Sym)->getType();
980 if ((Type == ELF::STT_FUNC || Type == ELF::STT_GNU_IFUNC)) {
981 if (Asm.isThumbFunc(Sym) && (FixupKind == ARM::fixup_arm_uncondbranch))
982 return true;
983 if (!Asm.isThumbFunc(Sym) && (FixupKind == ARM::fixup_arm_thumb_br ||
984 FixupKind == ARM::fixup_arm_thumb_bl ||
985 FixupKind == ARM::fixup_t2_condbranch ||
986 FixupKind == ARM::fixup_t2_uncondbranch))
987 return true;
990 // We must always generate a relocation for BL/BLX instructions if we have
991 // a symbol to reference, as the linker relies on knowing the destination
992 // symbol's thumb-ness to get interworking right.
993 if (A && (FixupKind == ARM::fixup_arm_thumb_blx ||
994 FixupKind == ARM::fixup_arm_blx ||
995 FixupKind == ARM::fixup_arm_uncondbl ||
996 FixupKind == ARM::fixup_arm_condbl))
997 return true;
998 return false;
1001 /// getFixupKindNumBytes - The number of bytes the fixup may change.
1002 static unsigned getFixupKindNumBytes(unsigned Kind) {
1003 switch (Kind) {
1004 default:
1005 llvm_unreachable("Unknown fixup kind!");
1007 case FK_Data_1:
1008 case ARM::fixup_arm_thumb_bcc:
1009 case ARM::fixup_arm_thumb_cp:
1010 case ARM::fixup_thumb_adr_pcrel_10:
1011 case ARM::fixup_arm_thumb_upper_8_15:
1012 case ARM::fixup_arm_thumb_upper_0_7:
1013 case ARM::fixup_arm_thumb_lower_8_15:
1014 case ARM::fixup_arm_thumb_lower_0_7:
1015 return 1;
1017 case FK_Data_2:
1018 case ARM::fixup_arm_thumb_br:
1019 case ARM::fixup_arm_thumb_cb:
1020 case ARM::fixup_arm_mod_imm:
1021 return 2;
1023 case ARM::fixup_arm_pcrel_10_unscaled:
1024 case ARM::fixup_arm_ldst_pcrel_12:
1025 case ARM::fixup_arm_pcrel_10:
1026 case ARM::fixup_arm_pcrel_9:
1027 case ARM::fixup_arm_ldst_abs_12:
1028 case ARM::fixup_arm_adr_pcrel_12:
1029 case ARM::fixup_arm_uncondbl:
1030 case ARM::fixup_arm_condbl:
1031 case ARM::fixup_arm_blx:
1032 case ARM::fixup_arm_condbranch:
1033 case ARM::fixup_arm_uncondbranch:
1034 return 3;
1036 case FK_Data_4:
1037 case ARM::fixup_t2_ldst_pcrel_12:
1038 case ARM::fixup_t2_condbranch:
1039 case ARM::fixup_t2_uncondbranch:
1040 case ARM::fixup_t2_pcrel_10:
1041 case ARM::fixup_t2_pcrel_9:
1042 case ARM::fixup_t2_adr_pcrel_12:
1043 case ARM::fixup_arm_thumb_bl:
1044 case ARM::fixup_arm_thumb_blx:
1045 case ARM::fixup_arm_movt_hi16:
1046 case ARM::fixup_arm_movw_lo16:
1047 case ARM::fixup_t2_movt_hi16:
1048 case ARM::fixup_t2_movw_lo16:
1049 case ARM::fixup_t2_so_imm:
1050 case ARM::fixup_bf_branch:
1051 case ARM::fixup_bf_target:
1052 case ARM::fixup_bfl_target:
1053 case ARM::fixup_bfc_target:
1054 case ARM::fixup_bfcsel_else_target:
1055 case ARM::fixup_wls:
1056 case ARM::fixup_le:
1057 return 4;
1059 case FK_SecRel_2:
1060 return 2;
1061 case FK_SecRel_4:
1062 return 4;
1066 /// getFixupKindContainerSizeBytes - The number of bytes of the
1067 /// container involved in big endian.
1068 static unsigned getFixupKindContainerSizeBytes(unsigned Kind) {
1069 switch (Kind) {
1070 default:
1071 llvm_unreachable("Unknown fixup kind!");
1073 case FK_Data_1:
1074 return 1;
1075 case FK_Data_2:
1076 return 2;
1077 case FK_Data_4:
1078 return 4;
1080 case ARM::fixup_arm_thumb_bcc:
1081 case ARM::fixup_arm_thumb_cp:
1082 case ARM::fixup_thumb_adr_pcrel_10:
1083 case ARM::fixup_arm_thumb_br:
1084 case ARM::fixup_arm_thumb_cb:
1085 case ARM::fixup_arm_thumb_upper_8_15:
1086 case ARM::fixup_arm_thumb_upper_0_7:
1087 case ARM::fixup_arm_thumb_lower_8_15:
1088 case ARM::fixup_arm_thumb_lower_0_7:
1089 // Instruction size is 2 bytes.
1090 return 2;
1092 case ARM::fixup_arm_pcrel_10_unscaled:
1093 case ARM::fixup_arm_ldst_pcrel_12:
1094 case ARM::fixup_arm_pcrel_10:
1095 case ARM::fixup_arm_pcrel_9:
1096 case ARM::fixup_arm_adr_pcrel_12:
1097 case ARM::fixup_arm_uncondbl:
1098 case ARM::fixup_arm_condbl:
1099 case ARM::fixup_arm_blx:
1100 case ARM::fixup_arm_condbranch:
1101 case ARM::fixup_arm_uncondbranch:
1102 case ARM::fixup_t2_ldst_pcrel_12:
1103 case ARM::fixup_t2_condbranch:
1104 case ARM::fixup_t2_uncondbranch:
1105 case ARM::fixup_t2_pcrel_10:
1106 case ARM::fixup_t2_pcrel_9:
1107 case ARM::fixup_t2_adr_pcrel_12:
1108 case ARM::fixup_arm_thumb_bl:
1109 case ARM::fixup_arm_thumb_blx:
1110 case ARM::fixup_arm_movt_hi16:
1111 case ARM::fixup_arm_movw_lo16:
1112 case ARM::fixup_t2_movt_hi16:
1113 case ARM::fixup_t2_movw_lo16:
1114 case ARM::fixup_arm_mod_imm:
1115 case ARM::fixup_t2_so_imm:
1116 case ARM::fixup_bf_branch:
1117 case ARM::fixup_bf_target:
1118 case ARM::fixup_bfl_target:
1119 case ARM::fixup_bfc_target:
1120 case ARM::fixup_bfcsel_else_target:
1121 case ARM::fixup_wls:
1122 case ARM::fixup_le:
1123 // Instruction size is 4 bytes.
1124 return 4;
1128 void ARMAsmBackend::applyFixup(const MCAssembler &Asm, const MCFixup &Fixup,
1129 const MCValue &Target,
1130 MutableArrayRef<char> Data, uint64_t Value,
1131 bool IsResolved,
1132 const MCSubtargetInfo* STI) const {
1133 unsigned Kind = Fixup.getKind();
1134 if (Kind >= FirstLiteralRelocationKind)
1135 return;
1136 MCContext &Ctx = Asm.getContext();
1137 Value = adjustFixupValue(Asm, Fixup, Target, Value, IsResolved, Ctx, STI);
1138 if (!Value)
1139 return; // Doesn't change encoding.
1140 const unsigned NumBytes = getFixupKindNumBytes(Kind);
1142 unsigned Offset = Fixup.getOffset();
1143 assert(Offset + NumBytes <= Data.size() && "Invalid fixup offset!");
1145 // Used to point to big endian bytes.
1146 unsigned FullSizeBytes;
1147 if (Endian == llvm::endianness::big) {
1148 FullSizeBytes = getFixupKindContainerSizeBytes(Kind);
1149 assert((Offset + FullSizeBytes) <= Data.size() && "Invalid fixup size!");
1150 assert(NumBytes <= FullSizeBytes && "Invalid fixup size!");
1153 // For each byte of the fragment that the fixup touches, mask in the bits from
1154 // the fixup value. The Value has been "split up" into the appropriate
1155 // bitfields above.
1156 for (unsigned i = 0; i != NumBytes; ++i) {
1157 unsigned Idx =
1158 Endian == llvm::endianness::little ? i : (FullSizeBytes - 1 - i);
1159 Data[Offset + Idx] |= uint8_t((Value >> (i * 8)) & 0xff);
1163 namespace CU {
1165 /// Compact unwind encoding values.
1166 enum CompactUnwindEncodings {
1167 UNWIND_ARM_MODE_MASK = 0x0F000000,
1168 UNWIND_ARM_MODE_FRAME = 0x01000000,
1169 UNWIND_ARM_MODE_FRAME_D = 0x02000000,
1170 UNWIND_ARM_MODE_DWARF = 0x04000000,
1172 UNWIND_ARM_FRAME_STACK_ADJUST_MASK = 0x00C00000,
1174 UNWIND_ARM_FRAME_FIRST_PUSH_R4 = 0x00000001,
1175 UNWIND_ARM_FRAME_FIRST_PUSH_R5 = 0x00000002,
1176 UNWIND_ARM_FRAME_FIRST_PUSH_R6 = 0x00000004,
1178 UNWIND_ARM_FRAME_SECOND_PUSH_R8 = 0x00000008,
1179 UNWIND_ARM_FRAME_SECOND_PUSH_R9 = 0x00000010,
1180 UNWIND_ARM_FRAME_SECOND_PUSH_R10 = 0x00000020,
1181 UNWIND_ARM_FRAME_SECOND_PUSH_R11 = 0x00000040,
1182 UNWIND_ARM_FRAME_SECOND_PUSH_R12 = 0x00000080,
1184 UNWIND_ARM_FRAME_D_REG_COUNT_MASK = 0x00000F00,
1186 UNWIND_ARM_DWARF_SECTION_OFFSET = 0x00FFFFFF
1189 } // end CU namespace
1191 /// Generate compact unwind encoding for the function based on the CFI
1192 /// instructions. If the CFI instructions describe a frame that cannot be
1193 /// encoded in compact unwind, the method returns UNWIND_ARM_MODE_DWARF which
1194 /// tells the runtime to fallback and unwind using dwarf.
1195 uint64_t ARMAsmBackendDarwin::generateCompactUnwindEncoding(
1196 const MCDwarfFrameInfo *FI, const MCContext *Ctxt) const {
1197 DEBUG_WITH_TYPE("compact-unwind", llvm::dbgs() << "generateCU()\n");
1198 // Only armv7k uses CFI based unwinding.
1199 if (Subtype != MachO::CPU_SUBTYPE_ARM_V7K)
1200 return 0;
1201 // No .cfi directives means no frame.
1202 ArrayRef<MCCFIInstruction> Instrs = FI->Instructions;
1203 if (Instrs.empty())
1204 return 0;
1205 if (!isDarwinCanonicalPersonality(FI->Personality) &&
1206 !Ctxt->emitCompactUnwindNonCanonical())
1207 return CU::UNWIND_ARM_MODE_DWARF;
1209 // Start off assuming CFA is at SP+0.
1210 MCRegister CFARegister = ARM::SP;
1211 int CFARegisterOffset = 0;
1212 // Mark savable registers as initially unsaved
1213 DenseMap<MCRegister, int> RegOffsets;
1214 int FloatRegCount = 0;
1215 // Process each .cfi directive and build up compact unwind info.
1216 for (const MCCFIInstruction &Inst : Instrs) {
1217 MCRegister Reg;
1218 switch (Inst.getOperation()) {
1219 case MCCFIInstruction::OpDefCfa: // DW_CFA_def_cfa
1220 CFARegisterOffset = Inst.getOffset();
1221 CFARegister = *MRI.getLLVMRegNum(Inst.getRegister(), true);
1222 break;
1223 case MCCFIInstruction::OpDefCfaOffset: // DW_CFA_def_cfa_offset
1224 CFARegisterOffset = Inst.getOffset();
1225 break;
1226 case MCCFIInstruction::OpDefCfaRegister: // DW_CFA_def_cfa_register
1227 CFARegister = *MRI.getLLVMRegNum(Inst.getRegister(), true);
1228 break;
1229 case MCCFIInstruction::OpOffset: // DW_CFA_offset
1230 Reg = *MRI.getLLVMRegNum(Inst.getRegister(), true);
1231 if (ARMMCRegisterClasses[ARM::GPRRegClassID].contains(Reg))
1232 RegOffsets[Reg] = Inst.getOffset();
1233 else if (ARMMCRegisterClasses[ARM::DPRRegClassID].contains(Reg)) {
1234 RegOffsets[Reg] = Inst.getOffset();
1235 ++FloatRegCount;
1236 } else {
1237 DEBUG_WITH_TYPE("compact-unwind",
1238 llvm::dbgs() << ".cfi_offset on unknown register="
1239 << Inst.getRegister() << "\n");
1240 return CU::UNWIND_ARM_MODE_DWARF;
1242 break;
1243 case MCCFIInstruction::OpRelOffset: // DW_CFA_advance_loc
1244 // Ignore
1245 break;
1246 default:
1247 // Directive not convertable to compact unwind, bail out.
1248 DEBUG_WITH_TYPE("compact-unwind",
1249 llvm::dbgs()
1250 << "CFI directive not compatible with compact "
1251 "unwind encoding, opcode="
1252 << uint8_t(Inst.getOperation()) << "\n");
1253 return CU::UNWIND_ARM_MODE_DWARF;
1254 break;
1258 // If no frame set up, return no unwind info.
1259 if ((CFARegister == ARM::SP) && (CFARegisterOffset == 0))
1260 return 0;
1262 // Verify standard frame (lr/r7) was used.
1263 if (CFARegister != ARM::R7) {
1264 DEBUG_WITH_TYPE("compact-unwind", llvm::dbgs() << "frame register is "
1265 << CFARegister
1266 << " instead of r7\n");
1267 return CU::UNWIND_ARM_MODE_DWARF;
1269 int StackAdjust = CFARegisterOffset - 8;
1270 if (RegOffsets.lookup(ARM::LR) != (-4 - StackAdjust)) {
1271 DEBUG_WITH_TYPE(
1272 "compact-unwind",
1273 llvm::dbgs() << "LR not saved as standard frame, StackAdjust="
1274 << StackAdjust
1275 << ", CFARegisterOffset=" << CFARegisterOffset
1276 << ", lr save at offset=" << RegOffsets[ARM::LR] << "\n");
1277 return CU::UNWIND_ARM_MODE_DWARF;
1279 if (RegOffsets.lookup(ARM::R7) != (-8 - StackAdjust)) {
1280 DEBUG_WITH_TYPE("compact-unwind",
1281 llvm::dbgs() << "r7 not saved as standard frame\n");
1282 return CU::UNWIND_ARM_MODE_DWARF;
1284 uint32_t CompactUnwindEncoding = CU::UNWIND_ARM_MODE_FRAME;
1286 // If var-args are used, there may be a stack adjust required.
1287 switch (StackAdjust) {
1288 case 0:
1289 break;
1290 case 4:
1291 CompactUnwindEncoding |= 0x00400000;
1292 break;
1293 case 8:
1294 CompactUnwindEncoding |= 0x00800000;
1295 break;
1296 case 12:
1297 CompactUnwindEncoding |= 0x00C00000;
1298 break;
1299 default:
1300 DEBUG_WITH_TYPE("compact-unwind", llvm::dbgs()
1301 << ".cfi_def_cfa stack adjust ("
1302 << StackAdjust << ") out of range\n");
1303 return CU::UNWIND_ARM_MODE_DWARF;
1306 // If r6 is saved, it must be right below r7.
1307 static struct {
1308 unsigned Reg;
1309 unsigned Encoding;
1310 } GPRCSRegs[] = {{ARM::R6, CU::UNWIND_ARM_FRAME_FIRST_PUSH_R6},
1311 {ARM::R5, CU::UNWIND_ARM_FRAME_FIRST_PUSH_R5},
1312 {ARM::R4, CU::UNWIND_ARM_FRAME_FIRST_PUSH_R4},
1313 {ARM::R12, CU::UNWIND_ARM_FRAME_SECOND_PUSH_R12},
1314 {ARM::R11, CU::UNWIND_ARM_FRAME_SECOND_PUSH_R11},
1315 {ARM::R10, CU::UNWIND_ARM_FRAME_SECOND_PUSH_R10},
1316 {ARM::R9, CU::UNWIND_ARM_FRAME_SECOND_PUSH_R9},
1317 {ARM::R8, CU::UNWIND_ARM_FRAME_SECOND_PUSH_R8}};
1319 int CurOffset = -8 - StackAdjust;
1320 for (auto CSReg : GPRCSRegs) {
1321 auto Offset = RegOffsets.find(CSReg.Reg);
1322 if (Offset == RegOffsets.end())
1323 continue;
1325 int RegOffset = Offset->second;
1326 if (RegOffset != CurOffset - 4) {
1327 DEBUG_WITH_TYPE("compact-unwind",
1328 llvm::dbgs() << MRI.getName(CSReg.Reg) << " saved at "
1329 << RegOffset << " but only supported at "
1330 << CurOffset << "\n");
1331 return CU::UNWIND_ARM_MODE_DWARF;
1333 CompactUnwindEncoding |= CSReg.Encoding;
1334 CurOffset -= 4;
1337 // If no floats saved, we are done.
1338 if (FloatRegCount == 0)
1339 return CompactUnwindEncoding;
1341 // Switch mode to include D register saving.
1342 CompactUnwindEncoding &= ~CU::UNWIND_ARM_MODE_MASK;
1343 CompactUnwindEncoding |= CU::UNWIND_ARM_MODE_FRAME_D;
1345 // FIXME: supporting more than 4 saved D-registers compactly would be trivial,
1346 // but needs coordination with the linker and libunwind.
1347 if (FloatRegCount > 4) {
1348 DEBUG_WITH_TYPE("compact-unwind",
1349 llvm::dbgs() << "unsupported number of D registers saved ("
1350 << FloatRegCount << ")\n");
1351 return CU::UNWIND_ARM_MODE_DWARF;
1354 // Floating point registers must either be saved sequentially, or we defer to
1355 // DWARF. No gaps allowed here so check that each saved d-register is
1356 // precisely where it should be.
1357 static MCPhysReg FPRCSRegs[] = {ARM::D8, ARM::D10, ARM::D12, ARM::D14};
1358 for (int Idx = FloatRegCount - 1; Idx >= 0; --Idx) {
1359 auto Offset = RegOffsets.find(FPRCSRegs[Idx]);
1360 if (Offset == RegOffsets.end()) {
1361 DEBUG_WITH_TYPE("compact-unwind",
1362 llvm::dbgs() << FloatRegCount << " D-regs saved, but "
1363 << MRI.getName(FPRCSRegs[Idx])
1364 << " not saved\n");
1365 return CU::UNWIND_ARM_MODE_DWARF;
1366 } else if (Offset->second != CurOffset - 8) {
1367 DEBUG_WITH_TYPE("compact-unwind",
1368 llvm::dbgs() << FloatRegCount << " D-regs saved, but "
1369 << MRI.getName(FPRCSRegs[Idx])
1370 << " saved at " << Offset->second
1371 << ", expected at " << CurOffset - 8
1372 << "\n");
1373 return CU::UNWIND_ARM_MODE_DWARF;
1375 CurOffset -= 8;
1378 return CompactUnwindEncoding | ((FloatRegCount - 1) << 8);
1381 static MCAsmBackend *createARMAsmBackend(const Target &T,
1382 const MCSubtargetInfo &STI,
1383 const MCRegisterInfo &MRI,
1384 const MCTargetOptions &Options,
1385 llvm::endianness Endian) {
1386 const Triple &TheTriple = STI.getTargetTriple();
1387 switch (TheTriple.getObjectFormat()) {
1388 default:
1389 llvm_unreachable("unsupported object format");
1390 case Triple::MachO:
1391 return new ARMAsmBackendDarwin(T, STI, MRI);
1392 case Triple::COFF:
1393 assert(TheTriple.isOSWindows() && "non-Windows ARM COFF is not supported");
1394 return new ARMAsmBackendWinCOFF(T, STI.getTargetTriple().isThumb());
1395 case Triple::ELF:
1396 assert(TheTriple.isOSBinFormatELF() && "using ELF for non-ELF target");
1397 uint8_t OSABI = Options.FDPIC
1398 ? ELF::ELFOSABI_ARM_FDPIC
1399 : MCELFObjectTargetWriter::getOSABI(TheTriple.getOS());
1400 return new ARMAsmBackendELF(T, STI.getTargetTriple().isThumb(), OSABI,
1401 Endian);
1405 MCAsmBackend *llvm::createARMLEAsmBackend(const Target &T,
1406 const MCSubtargetInfo &STI,
1407 const MCRegisterInfo &MRI,
1408 const MCTargetOptions &Options) {
1409 return createARMAsmBackend(T, STI, MRI, Options, llvm::endianness::little);
1412 MCAsmBackend *llvm::createARMBEAsmBackend(const Target &T,
1413 const MCSubtargetInfo &STI,
1414 const MCRegisterInfo &MRI,
1415 const MCTargetOptions &Options) {
1416 return createARMAsmBackend(T, STI, MRI, Options, llvm::endianness::big);