[Alignment][NFC] Use Align with TargetLowering::setMinFunctionAlignment
[llvm-core.git] / lib / Target / ARM / MCTargetDesc / ARMAsmBackend.cpp
blob27141f4fc6797153c3ad9ce1eee8958c881fafca
1 //===-- ARMAsmBackend.cpp - ARM Assembler Backend -------------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
9 #include "MCTargetDesc/ARMAsmBackend.h"
10 #include "MCTargetDesc/ARMAddressingModes.h"
11 #include "MCTargetDesc/ARMAsmBackendDarwin.h"
12 #include "MCTargetDesc/ARMAsmBackendELF.h"
13 #include "MCTargetDesc/ARMAsmBackendWinCOFF.h"
14 #include "MCTargetDesc/ARMFixupKinds.h"
15 #include "MCTargetDesc/ARMMCTargetDesc.h"
16 #include "llvm/ADT/StringSwitch.h"
17 #include "llvm/BinaryFormat/ELF.h"
18 #include "llvm/BinaryFormat/MachO.h"
19 #include "llvm/MC/MCAsmBackend.h"
20 #include "llvm/MC/MCAssembler.h"
21 #include "llvm/MC/MCContext.h"
22 #include "llvm/MC/MCDirectives.h"
23 #include "llvm/MC/MCELFObjectWriter.h"
24 #include "llvm/MC/MCExpr.h"
25 #include "llvm/MC/MCFixupKindInfo.h"
26 #include "llvm/MC/MCObjectWriter.h"
27 #include "llvm/MC/MCRegisterInfo.h"
28 #include "llvm/MC/MCSectionELF.h"
29 #include "llvm/MC/MCSectionMachO.h"
30 #include "llvm/MC/MCSubtargetInfo.h"
31 #include "llvm/MC/MCValue.h"
32 #include "llvm/MC/MCAsmLayout.h"
33 #include "llvm/Support/Debug.h"
34 #include "llvm/Support/EndianStream.h"
35 #include "llvm/Support/ErrorHandling.h"
36 #include "llvm/Support/Format.h"
37 #include "llvm/Support/TargetParser.h"
38 #include "llvm/Support/raw_ostream.h"
39 using namespace llvm;
41 namespace {
42 class ARMELFObjectWriter : public MCELFObjectTargetWriter {
43 public:
44 ARMELFObjectWriter(uint8_t OSABI)
45 : MCELFObjectTargetWriter(/*Is64Bit*/ false, OSABI, ELF::EM_ARM,
46 /*HasRelocationAddend*/ false) {}
48 } // end anonymous namespace
50 Optional<MCFixupKind> ARMAsmBackend::getFixupKind(StringRef Name) const {
51 if (STI.getTargetTriple().isOSBinFormatELF() && Name == "R_ARM_NONE")
52 return FK_NONE;
54 return MCAsmBackend::getFixupKind(Name);
57 const MCFixupKindInfo &ARMAsmBackend::getFixupKindInfo(MCFixupKind Kind) const {
58 const static MCFixupKindInfo InfosLE[ARM::NumTargetFixupKinds] = {
59 // This table *must* be in the order that the fixup_* kinds are defined in
60 // ARMFixupKinds.h.
62 // Name Offset (bits) Size (bits) Flags
63 {"fixup_arm_ldst_pcrel_12", 0, 32, MCFixupKindInfo::FKF_IsPCRel},
64 {"fixup_t2_ldst_pcrel_12", 0, 32,
65 MCFixupKindInfo::FKF_IsPCRel |
66 MCFixupKindInfo::FKF_IsAlignedDownTo32Bits},
67 {"fixup_arm_pcrel_10_unscaled", 0, 32, MCFixupKindInfo::FKF_IsPCRel},
68 {"fixup_arm_pcrel_10", 0, 32, MCFixupKindInfo::FKF_IsPCRel},
69 {"fixup_t2_pcrel_10", 0, 32,
70 MCFixupKindInfo::FKF_IsPCRel |
71 MCFixupKindInfo::FKF_IsAlignedDownTo32Bits},
72 {"fixup_arm_pcrel_9", 0, 32, MCFixupKindInfo::FKF_IsPCRel},
73 {"fixup_t2_pcrel_9", 0, 32,
74 MCFixupKindInfo::FKF_IsPCRel |
75 MCFixupKindInfo::FKF_IsAlignedDownTo32Bits},
76 {"fixup_thumb_adr_pcrel_10", 0, 8,
77 MCFixupKindInfo::FKF_IsPCRel |
78 MCFixupKindInfo::FKF_IsAlignedDownTo32Bits},
79 {"fixup_arm_adr_pcrel_12", 0, 32, MCFixupKindInfo::FKF_IsPCRel},
80 {"fixup_t2_adr_pcrel_12", 0, 32,
81 MCFixupKindInfo::FKF_IsPCRel |
82 MCFixupKindInfo::FKF_IsAlignedDownTo32Bits},
83 {"fixup_arm_condbranch", 0, 24, MCFixupKindInfo::FKF_IsPCRel},
84 {"fixup_arm_uncondbranch", 0, 24, MCFixupKindInfo::FKF_IsPCRel},
85 {"fixup_t2_condbranch", 0, 32, MCFixupKindInfo::FKF_IsPCRel},
86 {"fixup_t2_uncondbranch", 0, 32, MCFixupKindInfo::FKF_IsPCRel},
87 {"fixup_arm_thumb_br", 0, 16, MCFixupKindInfo::FKF_IsPCRel},
88 {"fixup_arm_uncondbl", 0, 24, MCFixupKindInfo::FKF_IsPCRel},
89 {"fixup_arm_condbl", 0, 24, MCFixupKindInfo::FKF_IsPCRel},
90 {"fixup_arm_blx", 0, 24, MCFixupKindInfo::FKF_IsPCRel},
91 {"fixup_arm_thumb_bl", 0, 32, MCFixupKindInfo::FKF_IsPCRel},
92 {"fixup_arm_thumb_blx", 0, 32,
93 MCFixupKindInfo::FKF_IsPCRel |
94 MCFixupKindInfo::FKF_IsAlignedDownTo32Bits},
95 {"fixup_arm_thumb_cb", 0, 16, MCFixupKindInfo::FKF_IsPCRel},
96 {"fixup_arm_thumb_cp", 0, 8,
97 MCFixupKindInfo::FKF_IsPCRel |
98 MCFixupKindInfo::FKF_IsAlignedDownTo32Bits},
99 {"fixup_arm_thumb_bcc", 0, 8, MCFixupKindInfo::FKF_IsPCRel},
100 // movw / movt: 16-bits immediate but scattered into two chunks 0 - 12, 16
101 // - 19.
102 {"fixup_arm_movt_hi16", 0, 20, 0},
103 {"fixup_arm_movw_lo16", 0, 20, 0},
104 {"fixup_t2_movt_hi16", 0, 20, 0},
105 {"fixup_t2_movw_lo16", 0, 20, 0},
106 {"fixup_arm_mod_imm", 0, 12, 0},
107 {"fixup_t2_so_imm", 0, 26, 0},
108 {"fixup_bf_branch", 0, 32, MCFixupKindInfo::FKF_IsPCRel},
109 {"fixup_bf_target", 0, 32, MCFixupKindInfo::FKF_IsPCRel},
110 {"fixup_bfl_target", 0, 32, MCFixupKindInfo::FKF_IsPCRel},
111 {"fixup_bfc_target", 0, 32, MCFixupKindInfo::FKF_IsPCRel},
112 {"fixup_bfcsel_else_target", 0, 32, 0},
113 {"fixup_wls", 0, 32, MCFixupKindInfo::FKF_IsPCRel},
114 {"fixup_le", 0, 32, MCFixupKindInfo::FKF_IsPCRel}
116 const static MCFixupKindInfo InfosBE[ARM::NumTargetFixupKinds] = {
117 // This table *must* be in the order that the fixup_* kinds are defined in
118 // ARMFixupKinds.h.
120 // Name Offset (bits) Size (bits) Flags
121 {"fixup_arm_ldst_pcrel_12", 0, 32, MCFixupKindInfo::FKF_IsPCRel},
122 {"fixup_t2_ldst_pcrel_12", 0, 32,
123 MCFixupKindInfo::FKF_IsPCRel |
124 MCFixupKindInfo::FKF_IsAlignedDownTo32Bits},
125 {"fixup_arm_pcrel_10_unscaled", 0, 32, MCFixupKindInfo::FKF_IsPCRel},
126 {"fixup_arm_pcrel_10", 0, 32, MCFixupKindInfo::FKF_IsPCRel},
127 {"fixup_t2_pcrel_10", 0, 32,
128 MCFixupKindInfo::FKF_IsPCRel |
129 MCFixupKindInfo::FKF_IsAlignedDownTo32Bits},
130 {"fixup_arm_pcrel_9", 0, 32, MCFixupKindInfo::FKF_IsPCRel},
131 {"fixup_t2_pcrel_9", 0, 32,
132 MCFixupKindInfo::FKF_IsPCRel |
133 MCFixupKindInfo::FKF_IsAlignedDownTo32Bits},
134 {"fixup_thumb_adr_pcrel_10", 8, 8,
135 MCFixupKindInfo::FKF_IsPCRel |
136 MCFixupKindInfo::FKF_IsAlignedDownTo32Bits},
137 {"fixup_arm_adr_pcrel_12", 0, 32, MCFixupKindInfo::FKF_IsPCRel},
138 {"fixup_t2_adr_pcrel_12", 0, 32,
139 MCFixupKindInfo::FKF_IsPCRel |
140 MCFixupKindInfo::FKF_IsAlignedDownTo32Bits},
141 {"fixup_arm_condbranch", 8, 24, MCFixupKindInfo::FKF_IsPCRel},
142 {"fixup_arm_uncondbranch", 8, 24, MCFixupKindInfo::FKF_IsPCRel},
143 {"fixup_t2_condbranch", 0, 32, MCFixupKindInfo::FKF_IsPCRel},
144 {"fixup_t2_uncondbranch", 0, 32, MCFixupKindInfo::FKF_IsPCRel},
145 {"fixup_arm_thumb_br", 0, 16, MCFixupKindInfo::FKF_IsPCRel},
146 {"fixup_arm_uncondbl", 8, 24, MCFixupKindInfo::FKF_IsPCRel},
147 {"fixup_arm_condbl", 8, 24, MCFixupKindInfo::FKF_IsPCRel},
148 {"fixup_arm_blx", 8, 24, MCFixupKindInfo::FKF_IsPCRel},
149 {"fixup_arm_thumb_bl", 0, 32, MCFixupKindInfo::FKF_IsPCRel},
150 {"fixup_arm_thumb_blx", 0, 32,
151 MCFixupKindInfo::FKF_IsPCRel |
152 MCFixupKindInfo::FKF_IsAlignedDownTo32Bits},
153 {"fixup_arm_thumb_cb", 0, 16, MCFixupKindInfo::FKF_IsPCRel},
154 {"fixup_arm_thumb_cp", 8, 8,
155 MCFixupKindInfo::FKF_IsPCRel |
156 MCFixupKindInfo::FKF_IsAlignedDownTo32Bits},
157 {"fixup_arm_thumb_bcc", 8, 8, MCFixupKindInfo::FKF_IsPCRel},
158 // movw / movt: 16-bits immediate but scattered into two chunks 0 - 12, 16
159 // - 19.
160 {"fixup_arm_movt_hi16", 12, 20, 0},
161 {"fixup_arm_movw_lo16", 12, 20, 0},
162 {"fixup_t2_movt_hi16", 12, 20, 0},
163 {"fixup_t2_movw_lo16", 12, 20, 0},
164 {"fixup_arm_mod_imm", 20, 12, 0},
165 {"fixup_t2_so_imm", 26, 6, 0},
166 {"fixup_bf_branch", 0, 32, MCFixupKindInfo::FKF_IsPCRel},
167 {"fixup_bf_target", 0, 32, MCFixupKindInfo::FKF_IsPCRel},
168 {"fixup_bfl_target", 0, 32, MCFixupKindInfo::FKF_IsPCRel},
169 {"fixup_bfc_target", 0, 32, MCFixupKindInfo::FKF_IsPCRel},
170 {"fixup_bfcsel_else_target", 0, 32, 0},
171 {"fixup_wls", 0, 32, MCFixupKindInfo::FKF_IsPCRel},
172 {"fixup_le", 0, 32, MCFixupKindInfo::FKF_IsPCRel}
175 if (Kind < FirstTargetFixupKind)
176 return MCAsmBackend::getFixupKindInfo(Kind);
178 assert(unsigned(Kind - FirstTargetFixupKind) < getNumFixupKinds() &&
179 "Invalid kind!");
180 return (Endian == support::little ? InfosLE
181 : InfosBE)[Kind - FirstTargetFixupKind];
184 void ARMAsmBackend::handleAssemblerFlag(MCAssemblerFlag Flag) {
185 switch (Flag) {
186 default:
187 break;
188 case MCAF_Code16:
189 setIsThumb(true);
190 break;
191 case MCAF_Code32:
192 setIsThumb(false);
193 break;
197 unsigned ARMAsmBackend::getRelaxedOpcode(unsigned Op,
198 const MCSubtargetInfo &STI) const {
199 bool HasThumb2 = STI.getFeatureBits()[ARM::FeatureThumb2];
200 bool HasV8MBaselineOps = STI.getFeatureBits()[ARM::HasV8MBaselineOps];
202 switch (Op) {
203 default:
204 return Op;
205 case ARM::tBcc:
206 return HasThumb2 ? (unsigned)ARM::t2Bcc : Op;
207 case ARM::tLDRpci:
208 return HasThumb2 ? (unsigned)ARM::t2LDRpci : Op;
209 case ARM::tADR:
210 return HasThumb2 ? (unsigned)ARM::t2ADR : Op;
211 case ARM::tB:
212 return HasV8MBaselineOps ? (unsigned)ARM::t2B : Op;
213 case ARM::tCBZ:
214 return ARM::tHINT;
215 case ARM::tCBNZ:
216 return ARM::tHINT;
220 bool ARMAsmBackend::mayNeedRelaxation(const MCInst &Inst,
221 const MCSubtargetInfo &STI) const {
222 if (getRelaxedOpcode(Inst.getOpcode(), STI) != Inst.getOpcode())
223 return true;
224 return false;
227 static const char *checkPCRelOffset(uint64_t Value, int64_t Min, int64_t Max) {
228 int64_t Offset = int64_t(Value) - 4;
229 if (Offset < Min || Offset > Max)
230 return "out of range pc-relative fixup value";
231 return nullptr;
234 const char *ARMAsmBackend::reasonForFixupRelaxation(const MCFixup &Fixup,
235 uint64_t Value) const {
236 switch (Fixup.getTargetKind()) {
237 case ARM::fixup_arm_thumb_br: {
238 // Relaxing tB to t2B. tB has a signed 12-bit displacement with the
239 // low bit being an implied zero. There's an implied +4 offset for the
240 // branch, so we adjust the other way here to determine what's
241 // encodable.
243 // Relax if the value is too big for a (signed) i8.
244 int64_t Offset = int64_t(Value) - 4;
245 if (Offset > 2046 || Offset < -2048)
246 return "out of range pc-relative fixup value";
247 break;
249 case ARM::fixup_arm_thumb_bcc: {
250 // Relaxing tBcc to t2Bcc. tBcc has a signed 9-bit displacement with the
251 // low bit being an implied zero. There's an implied +4 offset for the
252 // branch, so we adjust the other way here to determine what's
253 // encodable.
255 // Relax if the value is too big for a (signed) i8.
256 int64_t Offset = int64_t(Value) - 4;
257 if (Offset > 254 || Offset < -256)
258 return "out of range pc-relative fixup value";
259 break;
261 case ARM::fixup_thumb_adr_pcrel_10:
262 case ARM::fixup_arm_thumb_cp: {
263 // If the immediate is negative, greater than 1020, or not a multiple
264 // of four, the wide version of the instruction must be used.
265 int64_t Offset = int64_t(Value) - 4;
266 if (Offset & 3)
267 return "misaligned pc-relative fixup value";
268 else if (Offset > 1020 || Offset < 0)
269 return "out of range pc-relative fixup value";
270 break;
272 case ARM::fixup_arm_thumb_cb: {
273 // If we have a Thumb CBZ or CBNZ instruction and its target is the next
274 // instruction it is actually out of range for the instruction.
275 // It will be changed to a NOP.
276 int64_t Offset = (Value & ~1);
277 if (Offset == 2)
278 return "will be converted to nop";
279 break;
281 case ARM::fixup_bf_branch:
282 return checkPCRelOffset(Value, 0, 30);
283 case ARM::fixup_bf_target:
284 return checkPCRelOffset(Value, -0x10000, +0xfffe);
285 case ARM::fixup_bfl_target:
286 return checkPCRelOffset(Value, -0x40000, +0x3fffe);
287 case ARM::fixup_bfc_target:
288 return checkPCRelOffset(Value, -0x1000, +0xffe);
289 case ARM::fixup_wls:
290 return checkPCRelOffset(Value, 0, +0xffe);
291 case ARM::fixup_le:
292 // The offset field in the LE and LETP instructions is an 11-bit
293 // value shifted left by 2 (i.e. 0,2,4,...,4094), and it is
294 // interpreted as a negative offset from the value read from pc,
295 // i.e. from instruction_address+4.
297 // So an LE instruction can in principle address the instruction
298 // immediately after itself, or (not very usefully) the address
299 // half way through the 4-byte LE.
300 return checkPCRelOffset(Value, -0xffe, 0);
301 case ARM::fixup_bfcsel_else_target: {
302 if (Value != 2 && Value != 4)
303 return "out of range label-relative fixup value";
304 break;
307 default:
308 llvm_unreachable("Unexpected fixup kind in reasonForFixupRelaxation()!");
310 return nullptr;
313 bool ARMAsmBackend::fixupNeedsRelaxation(const MCFixup &Fixup, uint64_t Value,
314 const MCRelaxableFragment *DF,
315 const MCAsmLayout &Layout) const {
316 return reasonForFixupRelaxation(Fixup, Value);
319 void ARMAsmBackend::relaxInstruction(const MCInst &Inst,
320 const MCSubtargetInfo &STI,
321 MCInst &Res) const {
322 unsigned RelaxedOp = getRelaxedOpcode(Inst.getOpcode(), STI);
324 // Sanity check w/ diagnostic if we get here w/ a bogus instruction.
325 if (RelaxedOp == Inst.getOpcode()) {
326 SmallString<256> Tmp;
327 raw_svector_ostream OS(Tmp);
328 Inst.dump_pretty(OS);
329 OS << "\n";
330 report_fatal_error("unexpected instruction to relax: " + OS.str());
333 // If we are changing Thumb CBZ or CBNZ instruction to a NOP, aka tHINT, we
334 // have to change the operands too.
335 if ((Inst.getOpcode() == ARM::tCBZ || Inst.getOpcode() == ARM::tCBNZ) &&
336 RelaxedOp == ARM::tHINT) {
337 Res.setOpcode(RelaxedOp);
338 Res.addOperand(MCOperand::createImm(0));
339 Res.addOperand(MCOperand::createImm(14));
340 Res.addOperand(MCOperand::createReg(0));
341 return;
344 // The rest of instructions we're relaxing have the same operands.
345 // We just need to update to the proper opcode.
346 Res = Inst;
347 Res.setOpcode(RelaxedOp);
350 bool ARMAsmBackend::writeNopData(raw_ostream &OS, uint64_t Count) const {
351 const uint16_t Thumb1_16bitNopEncoding = 0x46c0; // using MOV r8,r8
352 const uint16_t Thumb2_16bitNopEncoding = 0xbf00; // NOP
353 const uint32_t ARMv4_NopEncoding = 0xe1a00000; // using MOV r0,r0
354 const uint32_t ARMv6T2_NopEncoding = 0xe320f000; // NOP
355 if (isThumb()) {
356 const uint16_t nopEncoding =
357 hasNOP() ? Thumb2_16bitNopEncoding : Thumb1_16bitNopEncoding;
358 uint64_t NumNops = Count / 2;
359 for (uint64_t i = 0; i != NumNops; ++i)
360 support::endian::write(OS, nopEncoding, Endian);
361 if (Count & 1)
362 OS << '\0';
363 return true;
365 // ARM mode
366 const uint32_t nopEncoding =
367 hasNOP() ? ARMv6T2_NopEncoding : ARMv4_NopEncoding;
368 uint64_t NumNops = Count / 4;
369 for (uint64_t i = 0; i != NumNops; ++i)
370 support::endian::write(OS, nopEncoding, Endian);
371 // FIXME: should this function return false when unable to write exactly
372 // 'Count' bytes with NOP encodings?
373 switch (Count % 4) {
374 default:
375 break; // No leftover bytes to write
376 case 1:
377 OS << '\0';
378 break;
379 case 2:
380 OS.write("\0\0", 2);
381 break;
382 case 3:
383 OS.write("\0\0\xa0", 3);
384 break;
387 return true;
390 static uint32_t swapHalfWords(uint32_t Value, bool IsLittleEndian) {
391 if (IsLittleEndian) {
392 // Note that the halfwords are stored high first and low second in thumb;
393 // so we need to swap the fixup value here to map properly.
394 uint32_t Swapped = (Value & 0xFFFF0000) >> 16;
395 Swapped |= (Value & 0x0000FFFF) << 16;
396 return Swapped;
397 } else
398 return Value;
401 static uint32_t joinHalfWords(uint32_t FirstHalf, uint32_t SecondHalf,
402 bool IsLittleEndian) {
403 uint32_t Value;
405 if (IsLittleEndian) {
406 Value = (SecondHalf & 0xFFFF) << 16;
407 Value |= (FirstHalf & 0xFFFF);
408 } else {
409 Value = (SecondHalf & 0xFFFF);
410 Value |= (FirstHalf & 0xFFFF) << 16;
413 return Value;
416 unsigned ARMAsmBackend::adjustFixupValue(const MCAssembler &Asm,
417 const MCFixup &Fixup,
418 const MCValue &Target, uint64_t Value,
419 bool IsResolved, MCContext &Ctx,
420 const MCSubtargetInfo* STI) const {
421 unsigned Kind = Fixup.getKind();
423 // MachO tries to make .o files that look vaguely pre-linked, so for MOVW/MOVT
424 // and .word relocations they put the Thumb bit into the addend if possible.
425 // Other relocation types don't want this bit though (branches couldn't encode
426 // it if it *was* present, and no other relocations exist) and it can
427 // interfere with checking valid expressions.
428 if (const MCSymbolRefExpr *A = Target.getSymA()) {
429 if (A->hasSubsectionsViaSymbols() && Asm.isThumbFunc(&A->getSymbol()) &&
430 A->getSymbol().isExternal() &&
431 (Kind == FK_Data_4 || Kind == ARM::fixup_arm_movw_lo16 ||
432 Kind == ARM::fixup_arm_movt_hi16 || Kind == ARM::fixup_t2_movw_lo16 ||
433 Kind == ARM::fixup_t2_movt_hi16))
434 Value |= 1;
437 switch (Kind) {
438 default:
439 Ctx.reportError(Fixup.getLoc(), "bad relocation fixup type");
440 return 0;
441 case FK_NONE:
442 case FK_Data_1:
443 case FK_Data_2:
444 case FK_Data_4:
445 return Value;
446 case FK_SecRel_2:
447 return Value;
448 case FK_SecRel_4:
449 return Value;
450 case ARM::fixup_arm_movt_hi16:
451 assert(STI != nullptr);
452 if (IsResolved || !STI->getTargetTriple().isOSBinFormatELF())
453 Value >>= 16;
454 LLVM_FALLTHROUGH;
455 case ARM::fixup_arm_movw_lo16: {
456 unsigned Hi4 = (Value & 0xF000) >> 12;
457 unsigned Lo12 = Value & 0x0FFF;
458 // inst{19-16} = Hi4;
459 // inst{11-0} = Lo12;
460 Value = (Hi4 << 16) | (Lo12);
461 return Value;
463 case ARM::fixup_t2_movt_hi16:
464 assert(STI != nullptr);
465 if (IsResolved || !STI->getTargetTriple().isOSBinFormatELF())
466 Value >>= 16;
467 LLVM_FALLTHROUGH;
468 case ARM::fixup_t2_movw_lo16: {
469 unsigned Hi4 = (Value & 0xF000) >> 12;
470 unsigned i = (Value & 0x800) >> 11;
471 unsigned Mid3 = (Value & 0x700) >> 8;
472 unsigned Lo8 = Value & 0x0FF;
473 // inst{19-16} = Hi4;
474 // inst{26} = i;
475 // inst{14-12} = Mid3;
476 // inst{7-0} = Lo8;
477 Value = (Hi4 << 16) | (i << 26) | (Mid3 << 12) | (Lo8);
478 return swapHalfWords(Value, Endian == support::little);
480 case ARM::fixup_arm_ldst_pcrel_12:
481 // ARM PC-relative values are offset by 8.
482 Value -= 4;
483 LLVM_FALLTHROUGH;
484 case ARM::fixup_t2_ldst_pcrel_12: {
485 // Offset by 4, adjusted by two due to the half-word ordering of thumb.
486 Value -= 4;
487 bool isAdd = true;
488 if ((int64_t)Value < 0) {
489 Value = -Value;
490 isAdd = false;
492 if (Value >= 4096) {
493 Ctx.reportError(Fixup.getLoc(), "out of range pc-relative fixup value");
494 return 0;
496 Value |= isAdd << 23;
498 // Same addressing mode as fixup_arm_pcrel_10,
499 // but with 16-bit halfwords swapped.
500 if (Kind == ARM::fixup_t2_ldst_pcrel_12)
501 return swapHalfWords(Value, Endian == support::little);
503 return Value;
505 case ARM::fixup_arm_adr_pcrel_12: {
506 // ARM PC-relative values are offset by 8.
507 Value -= 8;
508 unsigned opc = 4; // bits {24-21}. Default to add: 0b0100
509 if ((int64_t)Value < 0) {
510 Value = -Value;
511 opc = 2; // 0b0010
513 if (ARM_AM::getSOImmVal(Value) == -1) {
514 Ctx.reportError(Fixup.getLoc(), "out of range pc-relative fixup value");
515 return 0;
517 // Encode the immediate and shift the opcode into place.
518 return ARM_AM::getSOImmVal(Value) | (opc << 21);
521 case ARM::fixup_t2_adr_pcrel_12: {
522 Value -= 4;
523 unsigned opc = 0;
524 if ((int64_t)Value < 0) {
525 Value = -Value;
526 opc = 5;
529 uint32_t out = (opc << 21);
530 out |= (Value & 0x800) << 15;
531 out |= (Value & 0x700) << 4;
532 out |= (Value & 0x0FF);
534 return swapHalfWords(out, Endian == support::little);
537 case ARM::fixup_arm_condbranch:
538 case ARM::fixup_arm_uncondbranch:
539 case ARM::fixup_arm_uncondbl:
540 case ARM::fixup_arm_condbl:
541 case ARM::fixup_arm_blx:
542 // These values don't encode the low two bits since they're always zero.
543 // Offset by 8 just as above.
544 if (const MCSymbolRefExpr *SRE =
545 dyn_cast<MCSymbolRefExpr>(Fixup.getValue()))
546 if (SRE->getKind() == MCSymbolRefExpr::VK_TLSCALL)
547 return 0;
548 return 0xffffff & ((Value - 8) >> 2);
549 case ARM::fixup_t2_uncondbranch: {
550 Value = Value - 4;
551 if (!isInt<25>(Value)) {
552 Ctx.reportError(Fixup.getLoc(), "Relocation out of range");
553 return 0;
556 Value >>= 1; // Low bit is not encoded.
558 uint32_t out = 0;
559 bool I = Value & 0x800000;
560 bool J1 = Value & 0x400000;
561 bool J2 = Value & 0x200000;
562 J1 ^= I;
563 J2 ^= I;
565 out |= I << 26; // S bit
566 out |= !J1 << 13; // J1 bit
567 out |= !J2 << 11; // J2 bit
568 out |= (Value & 0x1FF800) << 5; // imm6 field
569 out |= (Value & 0x0007FF); // imm11 field
571 return swapHalfWords(out, Endian == support::little);
573 case ARM::fixup_t2_condbranch: {
574 Value = Value - 4;
575 if (!isInt<21>(Value)) {
576 Ctx.reportError(Fixup.getLoc(), "Relocation out of range");
577 return 0;
580 Value >>= 1; // Low bit is not encoded.
582 uint64_t out = 0;
583 out |= (Value & 0x80000) << 7; // S bit
584 out |= (Value & 0x40000) >> 7; // J2 bit
585 out |= (Value & 0x20000) >> 4; // J1 bit
586 out |= (Value & 0x1F800) << 5; // imm6 field
587 out |= (Value & 0x007FF); // imm11 field
589 return swapHalfWords(out, Endian == support::little);
591 case ARM::fixup_arm_thumb_bl: {
592 if (!isInt<25>(Value - 4) ||
593 (!STI->getFeatureBits()[ARM::FeatureThumb2] &&
594 !STI->getFeatureBits()[ARM::HasV8MBaselineOps] &&
595 !STI->getFeatureBits()[ARM::HasV6MOps] &&
596 !isInt<23>(Value - 4))) {
597 Ctx.reportError(Fixup.getLoc(), "Relocation out of range");
598 return 0;
601 // The value doesn't encode the low bit (always zero) and is offset by
602 // four. The 32-bit immediate value is encoded as
603 // imm32 = SignExtend(S:I1:I2:imm10:imm11:0)
604 // where I1 = NOT(J1 ^ S) and I2 = NOT(J2 ^ S).
605 // The value is encoded into disjoint bit positions in the destination
606 // opcode. x = unchanged, I = immediate value bit, S = sign extension bit,
607 // J = either J1 or J2 bit
609 // BL: xxxxxSIIIIIIIIII xxJxJIIIIIIIIIII
611 // Note that the halfwords are stored high first, low second; so we need
612 // to transpose the fixup value here to map properly.
613 uint32_t offset = (Value - 4) >> 1;
614 uint32_t signBit = (offset & 0x800000) >> 23;
615 uint32_t I1Bit = (offset & 0x400000) >> 22;
616 uint32_t J1Bit = (I1Bit ^ 0x1) ^ signBit;
617 uint32_t I2Bit = (offset & 0x200000) >> 21;
618 uint32_t J2Bit = (I2Bit ^ 0x1) ^ signBit;
619 uint32_t imm10Bits = (offset & 0x1FF800) >> 11;
620 uint32_t imm11Bits = (offset & 0x000007FF);
622 uint32_t FirstHalf = (((uint16_t)signBit << 10) | (uint16_t)imm10Bits);
623 uint32_t SecondHalf = (((uint16_t)J1Bit << 13) | ((uint16_t)J2Bit << 11) |
624 (uint16_t)imm11Bits);
625 return joinHalfWords(FirstHalf, SecondHalf, Endian == support::little);
627 case ARM::fixup_arm_thumb_blx: {
628 // The value doesn't encode the low two bits (always zero) and is offset by
629 // four (see fixup_arm_thumb_cp). The 32-bit immediate value is encoded as
630 // imm32 = SignExtend(S:I1:I2:imm10H:imm10L:00)
631 // where I1 = NOT(J1 ^ S) and I2 = NOT(J2 ^ S).
632 // The value is encoded into disjoint bit positions in the destination
633 // opcode. x = unchanged, I = immediate value bit, S = sign extension bit,
634 // J = either J1 or J2 bit, 0 = zero.
636 // BLX: xxxxxSIIIIIIIIII xxJxJIIIIIIIIII0
638 // Note that the halfwords are stored high first, low second; so we need
639 // to transpose the fixup value here to map properly.
640 if (Value % 4 != 0) {
641 Ctx.reportError(Fixup.getLoc(), "misaligned ARM call destination");
642 return 0;
645 uint32_t offset = (Value - 4) >> 2;
646 if (const MCSymbolRefExpr *SRE =
647 dyn_cast<MCSymbolRefExpr>(Fixup.getValue()))
648 if (SRE->getKind() == MCSymbolRefExpr::VK_TLSCALL)
649 offset = 0;
650 uint32_t signBit = (offset & 0x400000) >> 22;
651 uint32_t I1Bit = (offset & 0x200000) >> 21;
652 uint32_t J1Bit = (I1Bit ^ 0x1) ^ signBit;
653 uint32_t I2Bit = (offset & 0x100000) >> 20;
654 uint32_t J2Bit = (I2Bit ^ 0x1) ^ signBit;
655 uint32_t imm10HBits = (offset & 0xFFC00) >> 10;
656 uint32_t imm10LBits = (offset & 0x3FF);
658 uint32_t FirstHalf = (((uint16_t)signBit << 10) | (uint16_t)imm10HBits);
659 uint32_t SecondHalf = (((uint16_t)J1Bit << 13) | ((uint16_t)J2Bit << 11) |
660 ((uint16_t)imm10LBits) << 1);
661 return joinHalfWords(FirstHalf, SecondHalf, Endian == support::little);
663 case ARM::fixup_thumb_adr_pcrel_10:
664 case ARM::fixup_arm_thumb_cp:
665 // On CPUs supporting Thumb2, this will be relaxed to an ldr.w, otherwise we
666 // could have an error on our hands.
667 assert(STI != nullptr);
668 if (!STI->getFeatureBits()[ARM::FeatureThumb2] && IsResolved) {
669 const char *FixupDiagnostic = reasonForFixupRelaxation(Fixup, Value);
670 if (FixupDiagnostic) {
671 Ctx.reportError(Fixup.getLoc(), FixupDiagnostic);
672 return 0;
675 // Offset by 4, and don't encode the low two bits.
676 return ((Value - 4) >> 2) & 0xff;
677 case ARM::fixup_arm_thumb_cb: {
678 // CB instructions can only branch to offsets in [4, 126] in multiples of 2
679 // so ensure that the raw value LSB is zero and it lies in [2, 130].
680 // An offset of 2 will be relaxed to a NOP.
681 if ((int64_t)Value < 2 || Value > 0x82 || Value & 1) {
682 Ctx.reportError(Fixup.getLoc(), "out of range pc-relative fixup value");
683 return 0;
685 // Offset by 4 and don't encode the lower bit, which is always 0.
686 // FIXME: diagnose if no Thumb2
687 uint32_t Binary = (Value - 4) >> 1;
688 return ((Binary & 0x20) << 4) | ((Binary & 0x1f) << 3);
690 case ARM::fixup_arm_thumb_br:
691 // Offset by 4 and don't encode the lower bit, which is always 0.
692 assert(STI != nullptr);
693 if (!STI->getFeatureBits()[ARM::FeatureThumb2] &&
694 !STI->getFeatureBits()[ARM::HasV8MBaselineOps]) {
695 const char *FixupDiagnostic = reasonForFixupRelaxation(Fixup, Value);
696 if (FixupDiagnostic) {
697 Ctx.reportError(Fixup.getLoc(), FixupDiagnostic);
698 return 0;
701 return ((Value - 4) >> 1) & 0x7ff;
702 case ARM::fixup_arm_thumb_bcc:
703 // Offset by 4 and don't encode the lower bit, which is always 0.
704 assert(STI != nullptr);
705 if (!STI->getFeatureBits()[ARM::FeatureThumb2]) {
706 const char *FixupDiagnostic = reasonForFixupRelaxation(Fixup, Value);
707 if (FixupDiagnostic) {
708 Ctx.reportError(Fixup.getLoc(), FixupDiagnostic);
709 return 0;
712 return ((Value - 4) >> 1) & 0xff;
713 case ARM::fixup_arm_pcrel_10_unscaled: {
714 Value = Value - 8; // ARM fixups offset by an additional word and don't
715 // need to adjust for the half-word ordering.
716 bool isAdd = true;
717 if ((int64_t)Value < 0) {
718 Value = -Value;
719 isAdd = false;
721 // The value has the low 4 bits encoded in [3:0] and the high 4 in [11:8].
722 if (Value >= 256) {
723 Ctx.reportError(Fixup.getLoc(), "out of range pc-relative fixup value");
724 return 0;
726 Value = (Value & 0xf) | ((Value & 0xf0) << 4);
727 return Value | (isAdd << 23);
729 case ARM::fixup_arm_pcrel_10:
730 Value = Value - 4; // ARM fixups offset by an additional word and don't
731 // need to adjust for the half-word ordering.
732 LLVM_FALLTHROUGH;
733 case ARM::fixup_t2_pcrel_10: {
734 // Offset by 4, adjusted by two due to the half-word ordering of thumb.
735 Value = Value - 4;
736 bool isAdd = true;
737 if ((int64_t)Value < 0) {
738 Value = -Value;
739 isAdd = false;
741 // These values don't encode the low two bits since they're always zero.
742 Value >>= 2;
743 if (Value >= 256) {
744 Ctx.reportError(Fixup.getLoc(), "out of range pc-relative fixup value");
745 return 0;
747 Value |= isAdd << 23;
749 // Same addressing mode as fixup_arm_pcrel_10, but with 16-bit halfwords
750 // swapped.
751 if (Kind == ARM::fixup_t2_pcrel_10)
752 return swapHalfWords(Value, Endian == support::little);
754 return Value;
756 case ARM::fixup_arm_pcrel_9:
757 Value = Value - 4; // ARM fixups offset by an additional word and don't
758 // need to adjust for the half-word ordering.
759 LLVM_FALLTHROUGH;
760 case ARM::fixup_t2_pcrel_9: {
761 // Offset by 4, adjusted by two due to the half-word ordering of thumb.
762 Value = Value - 4;
763 bool isAdd = true;
764 if ((int64_t)Value < 0) {
765 Value = -Value;
766 isAdd = false;
768 // These values don't encode the low bit since it's always zero.
769 if (Value & 1) {
770 Ctx.reportError(Fixup.getLoc(), "invalid value for this fixup");
771 return 0;
773 Value >>= 1;
774 if (Value >= 256) {
775 Ctx.reportError(Fixup.getLoc(), "out of range pc-relative fixup value");
776 return 0;
778 Value |= isAdd << 23;
780 // Same addressing mode as fixup_arm_pcrel_9, but with 16-bit halfwords
781 // swapped.
782 if (Kind == ARM::fixup_t2_pcrel_9)
783 return swapHalfWords(Value, Endian == support::little);
785 return Value;
787 case ARM::fixup_arm_mod_imm:
788 Value = ARM_AM::getSOImmVal(Value);
789 if (Value >> 12) {
790 Ctx.reportError(Fixup.getLoc(), "out of range immediate fixup value");
791 return 0;
793 return Value;
794 case ARM::fixup_t2_so_imm: {
795 Value = ARM_AM::getT2SOImmVal(Value);
796 if ((int64_t)Value < 0) {
797 Ctx.reportError(Fixup.getLoc(), "out of range immediate fixup value");
798 return 0;
800 // Value will contain a 12-bit value broken up into a 4-bit shift in bits
801 // 11:8 and the 8-bit immediate in 0:7. The instruction has the immediate
802 // in 0:7. The 4-bit shift is split up into i:imm3 where i is placed at bit
803 // 10 of the upper half-word and imm3 is placed at 14:12 of the lower
804 // half-word.
805 uint64_t EncValue = 0;
806 EncValue |= (Value & 0x800) << 15;
807 EncValue |= (Value & 0x700) << 4;
808 EncValue |= (Value & 0xff);
809 return swapHalfWords(EncValue, Endian == support::little);
811 case ARM::fixup_bf_branch: {
812 const char *FixupDiagnostic = reasonForFixupRelaxation(Fixup, Value);
813 if (FixupDiagnostic) {
814 Ctx.reportError(Fixup.getLoc(), FixupDiagnostic);
815 return 0;
817 uint32_t out = (((Value - 4) >> 1) & 0xf) << 23;
818 return swapHalfWords(out, Endian == support::little);
820 case ARM::fixup_bf_target:
821 case ARM::fixup_bfl_target:
822 case ARM::fixup_bfc_target: {
823 const char *FixupDiagnostic = reasonForFixupRelaxation(Fixup, Value);
824 if (FixupDiagnostic) {
825 Ctx.reportError(Fixup.getLoc(), FixupDiagnostic);
826 return 0;
828 uint32_t out = 0;
829 uint32_t HighBitMask = (Kind == ARM::fixup_bf_target ? 0xf800 :
830 Kind == ARM::fixup_bfl_target ? 0x3f800 : 0x800);
831 out |= (((Value - 4) >> 1) & 0x1) << 11;
832 out |= (((Value - 4) >> 1) & 0x7fe);
833 out |= (((Value - 4) >> 1) & HighBitMask) << 5;
834 return swapHalfWords(out, Endian == support::little);
836 case ARM::fixup_bfcsel_else_target: {
837 // If this is a fixup of a branch future's else target then it should be a
838 // constant MCExpr representing the distance between the branch targetted
839 // and the instruction after that same branch.
840 Value = Target.getConstant();
842 const char *FixupDiagnostic = reasonForFixupRelaxation(Fixup, Value);
843 if (FixupDiagnostic) {
844 Ctx.reportError(Fixup.getLoc(), FixupDiagnostic);
845 return 0;
847 uint32_t out = ((Value >> 2) & 1) << 17;
848 return swapHalfWords(out, Endian == support::little);
850 case ARM::fixup_wls:
851 case ARM::fixup_le: {
852 const char *FixupDiagnostic = reasonForFixupRelaxation(Fixup, Value);
853 if (FixupDiagnostic) {
854 Ctx.reportError(Fixup.getLoc(), FixupDiagnostic);
855 return 0;
857 uint64_t real_value = Value - 4;
858 uint32_t out = 0;
859 if (Kind == ARM::fixup_le)
860 real_value = -real_value;
861 out |= ((real_value >> 1) & 0x1) << 11;
862 out |= ((real_value >> 1) & 0x7fe);
863 return swapHalfWords(out, Endian == support::little);
868 bool ARMAsmBackend::shouldForceRelocation(const MCAssembler &Asm,
869 const MCFixup &Fixup,
870 const MCValue &Target) {
871 const MCSymbolRefExpr *A = Target.getSymA();
872 const MCSymbol *Sym = A ? &A->getSymbol() : nullptr;
873 const unsigned FixupKind = Fixup.getKind();
874 if (FixupKind == FK_NONE)
875 return true;
876 if (FixupKind == ARM::fixup_arm_thumb_bl) {
877 assert(Sym && "How did we resolve this?");
879 // If the symbol is external the linker will handle it.
880 // FIXME: Should we handle it as an optimization?
882 // If the symbol is out of range, produce a relocation and hope the
883 // linker can handle it. GNU AS produces an error in this case.
884 if (Sym->isExternal())
885 return true;
887 // Create relocations for unconditional branches to function symbols with
888 // different execution mode in ELF binaries.
889 if (Sym && Sym->isELF()) {
890 unsigned Type = cast<MCSymbolELF>(Sym)->getType();
891 if ((Type == ELF::STT_FUNC || Type == ELF::STT_GNU_IFUNC)) {
892 if (Asm.isThumbFunc(Sym) && (FixupKind == ARM::fixup_arm_uncondbranch))
893 return true;
894 if (!Asm.isThumbFunc(Sym) && (FixupKind == ARM::fixup_arm_thumb_br ||
895 FixupKind == ARM::fixup_arm_thumb_bl ||
896 FixupKind == ARM::fixup_t2_condbranch ||
897 FixupKind == ARM::fixup_t2_uncondbranch))
898 return true;
901 // We must always generate a relocation for BL/BLX instructions if we have
902 // a symbol to reference, as the linker relies on knowing the destination
903 // symbol's thumb-ness to get interworking right.
904 if (A && (FixupKind == ARM::fixup_arm_thumb_blx ||
905 FixupKind == ARM::fixup_arm_blx ||
906 FixupKind == ARM::fixup_arm_uncondbl ||
907 FixupKind == ARM::fixup_arm_condbl))
908 return true;
909 return false;
912 /// getFixupKindNumBytes - The number of bytes the fixup may change.
913 static unsigned getFixupKindNumBytes(unsigned Kind) {
914 switch (Kind) {
915 default:
916 llvm_unreachable("Unknown fixup kind!");
918 case FK_NONE:
919 return 0;
921 case FK_Data_1:
922 case ARM::fixup_arm_thumb_bcc:
923 case ARM::fixup_arm_thumb_cp:
924 case ARM::fixup_thumb_adr_pcrel_10:
925 return 1;
927 case FK_Data_2:
928 case ARM::fixup_arm_thumb_br:
929 case ARM::fixup_arm_thumb_cb:
930 case ARM::fixup_arm_mod_imm:
931 return 2;
933 case ARM::fixup_arm_pcrel_10_unscaled:
934 case ARM::fixup_arm_ldst_pcrel_12:
935 case ARM::fixup_arm_pcrel_10:
936 case ARM::fixup_arm_pcrel_9:
937 case ARM::fixup_arm_adr_pcrel_12:
938 case ARM::fixup_arm_uncondbl:
939 case ARM::fixup_arm_condbl:
940 case ARM::fixup_arm_blx:
941 case ARM::fixup_arm_condbranch:
942 case ARM::fixup_arm_uncondbranch:
943 return 3;
945 case FK_Data_4:
946 case ARM::fixup_t2_ldst_pcrel_12:
947 case ARM::fixup_t2_condbranch:
948 case ARM::fixup_t2_uncondbranch:
949 case ARM::fixup_t2_pcrel_10:
950 case ARM::fixup_t2_pcrel_9:
951 case ARM::fixup_t2_adr_pcrel_12:
952 case ARM::fixup_arm_thumb_bl:
953 case ARM::fixup_arm_thumb_blx:
954 case ARM::fixup_arm_movt_hi16:
955 case ARM::fixup_arm_movw_lo16:
956 case ARM::fixup_t2_movt_hi16:
957 case ARM::fixup_t2_movw_lo16:
958 case ARM::fixup_t2_so_imm:
959 case ARM::fixup_bf_branch:
960 case ARM::fixup_bf_target:
961 case ARM::fixup_bfl_target:
962 case ARM::fixup_bfc_target:
963 case ARM::fixup_bfcsel_else_target:
964 case ARM::fixup_wls:
965 case ARM::fixup_le:
966 return 4;
968 case FK_SecRel_2:
969 return 2;
970 case FK_SecRel_4:
971 return 4;
975 /// getFixupKindContainerSizeBytes - The number of bytes of the
976 /// container involved in big endian.
977 static unsigned getFixupKindContainerSizeBytes(unsigned Kind) {
978 switch (Kind) {
979 default:
980 llvm_unreachable("Unknown fixup kind!");
982 case FK_NONE:
983 return 0;
985 case FK_Data_1:
986 return 1;
987 case FK_Data_2:
988 return 2;
989 case FK_Data_4:
990 return 4;
992 case ARM::fixup_arm_thumb_bcc:
993 case ARM::fixup_arm_thumb_cp:
994 case ARM::fixup_thumb_adr_pcrel_10:
995 case ARM::fixup_arm_thumb_br:
996 case ARM::fixup_arm_thumb_cb:
997 // Instruction size is 2 bytes.
998 return 2;
1000 case ARM::fixup_arm_pcrel_10_unscaled:
1001 case ARM::fixup_arm_ldst_pcrel_12:
1002 case ARM::fixup_arm_pcrel_10:
1003 case ARM::fixup_arm_pcrel_9:
1004 case ARM::fixup_arm_adr_pcrel_12:
1005 case ARM::fixup_arm_uncondbl:
1006 case ARM::fixup_arm_condbl:
1007 case ARM::fixup_arm_blx:
1008 case ARM::fixup_arm_condbranch:
1009 case ARM::fixup_arm_uncondbranch:
1010 case ARM::fixup_t2_ldst_pcrel_12:
1011 case ARM::fixup_t2_condbranch:
1012 case ARM::fixup_t2_uncondbranch:
1013 case ARM::fixup_t2_pcrel_10:
1014 case ARM::fixup_t2_adr_pcrel_12:
1015 case ARM::fixup_arm_thumb_bl:
1016 case ARM::fixup_arm_thumb_blx:
1017 case ARM::fixup_arm_movt_hi16:
1018 case ARM::fixup_arm_movw_lo16:
1019 case ARM::fixup_t2_movt_hi16:
1020 case ARM::fixup_t2_movw_lo16:
1021 case ARM::fixup_arm_mod_imm:
1022 case ARM::fixup_t2_so_imm:
1023 case ARM::fixup_bf_branch:
1024 case ARM::fixup_bf_target:
1025 case ARM::fixup_bfl_target:
1026 case ARM::fixup_bfc_target:
1027 case ARM::fixup_bfcsel_else_target:
1028 case ARM::fixup_wls:
1029 case ARM::fixup_le:
1030 // Instruction size is 4 bytes.
1031 return 4;
1035 void ARMAsmBackend::applyFixup(const MCAssembler &Asm, const MCFixup &Fixup,
1036 const MCValue &Target,
1037 MutableArrayRef<char> Data, uint64_t Value,
1038 bool IsResolved,
1039 const MCSubtargetInfo* STI) const {
1040 unsigned NumBytes = getFixupKindNumBytes(Fixup.getKind());
1041 MCContext &Ctx = Asm.getContext();
1042 Value = adjustFixupValue(Asm, Fixup, Target, Value, IsResolved, Ctx, STI);
1043 if (!Value)
1044 return; // Doesn't change encoding.
1046 unsigned Offset = Fixup.getOffset();
1047 assert(Offset + NumBytes <= Data.size() && "Invalid fixup offset!");
1049 // Used to point to big endian bytes.
1050 unsigned FullSizeBytes;
1051 if (Endian == support::big) {
1052 FullSizeBytes = getFixupKindContainerSizeBytes(Fixup.getKind());
1053 assert((Offset + FullSizeBytes) <= Data.size() && "Invalid fixup size!");
1054 assert(NumBytes <= FullSizeBytes && "Invalid fixup size!");
1057 // For each byte of the fragment that the fixup touches, mask in the bits from
1058 // the fixup value. The Value has been "split up" into the appropriate
1059 // bitfields above.
1060 for (unsigned i = 0; i != NumBytes; ++i) {
1061 unsigned Idx = Endian == support::little ? i : (FullSizeBytes - 1 - i);
1062 Data[Offset + Idx] |= uint8_t((Value >> (i * 8)) & 0xff);
1066 namespace CU {
1068 /// Compact unwind encoding values.
1069 enum CompactUnwindEncodings {
1070 UNWIND_ARM_MODE_MASK = 0x0F000000,
1071 UNWIND_ARM_MODE_FRAME = 0x01000000,
1072 UNWIND_ARM_MODE_FRAME_D = 0x02000000,
1073 UNWIND_ARM_MODE_DWARF = 0x04000000,
1075 UNWIND_ARM_FRAME_STACK_ADJUST_MASK = 0x00C00000,
1077 UNWIND_ARM_FRAME_FIRST_PUSH_R4 = 0x00000001,
1078 UNWIND_ARM_FRAME_FIRST_PUSH_R5 = 0x00000002,
1079 UNWIND_ARM_FRAME_FIRST_PUSH_R6 = 0x00000004,
1081 UNWIND_ARM_FRAME_SECOND_PUSH_R8 = 0x00000008,
1082 UNWIND_ARM_FRAME_SECOND_PUSH_R9 = 0x00000010,
1083 UNWIND_ARM_FRAME_SECOND_PUSH_R10 = 0x00000020,
1084 UNWIND_ARM_FRAME_SECOND_PUSH_R11 = 0x00000040,
1085 UNWIND_ARM_FRAME_SECOND_PUSH_R12 = 0x00000080,
1087 UNWIND_ARM_FRAME_D_REG_COUNT_MASK = 0x00000F00,
1089 UNWIND_ARM_DWARF_SECTION_OFFSET = 0x00FFFFFF
1092 } // end CU namespace
1094 /// Generate compact unwind encoding for the function based on the CFI
1095 /// instructions. If the CFI instructions describe a frame that cannot be
1096 /// encoded in compact unwind, the method returns UNWIND_ARM_MODE_DWARF which
1097 /// tells the runtime to fallback and unwind using dwarf.
1098 uint32_t ARMAsmBackendDarwin::generateCompactUnwindEncoding(
1099 ArrayRef<MCCFIInstruction> Instrs) const {
1100 DEBUG_WITH_TYPE("compact-unwind", llvm::dbgs() << "generateCU()\n");
1101 // Only armv7k uses CFI based unwinding.
1102 if (Subtype != MachO::CPU_SUBTYPE_ARM_V7K)
1103 return 0;
1104 // No .cfi directives means no frame.
1105 if (Instrs.empty())
1106 return 0;
1107 // Start off assuming CFA is at SP+0.
1108 int CFARegister = ARM::SP;
1109 int CFARegisterOffset = 0;
1110 // Mark savable registers as initially unsaved
1111 DenseMap<unsigned, int> RegOffsets;
1112 int FloatRegCount = 0;
1113 // Process each .cfi directive and build up compact unwind info.
1114 for (size_t i = 0, e = Instrs.size(); i != e; ++i) {
1115 int Reg;
1116 const MCCFIInstruction &Inst = Instrs[i];
1117 switch (Inst.getOperation()) {
1118 case MCCFIInstruction::OpDefCfa: // DW_CFA_def_cfa
1119 CFARegisterOffset = -Inst.getOffset();
1120 CFARegister = MRI.getLLVMRegNum(Inst.getRegister(), true);
1121 break;
1122 case MCCFIInstruction::OpDefCfaOffset: // DW_CFA_def_cfa_offset
1123 CFARegisterOffset = -Inst.getOffset();
1124 break;
1125 case MCCFIInstruction::OpDefCfaRegister: // DW_CFA_def_cfa_register
1126 CFARegister = MRI.getLLVMRegNum(Inst.getRegister(), true);
1127 break;
1128 case MCCFIInstruction::OpOffset: // DW_CFA_offset
1129 Reg = MRI.getLLVMRegNum(Inst.getRegister(), true);
1130 if (ARMMCRegisterClasses[ARM::GPRRegClassID].contains(Reg))
1131 RegOffsets[Reg] = Inst.getOffset();
1132 else if (ARMMCRegisterClasses[ARM::DPRRegClassID].contains(Reg)) {
1133 RegOffsets[Reg] = Inst.getOffset();
1134 ++FloatRegCount;
1135 } else {
1136 DEBUG_WITH_TYPE("compact-unwind",
1137 llvm::dbgs() << ".cfi_offset on unknown register="
1138 << Inst.getRegister() << "\n");
1139 return CU::UNWIND_ARM_MODE_DWARF;
1141 break;
1142 case MCCFIInstruction::OpRelOffset: // DW_CFA_advance_loc
1143 // Ignore
1144 break;
1145 default:
1146 // Directive not convertable to compact unwind, bail out.
1147 DEBUG_WITH_TYPE("compact-unwind",
1148 llvm::dbgs()
1149 << "CFI directive not compatiable with comact "
1150 "unwind encoding, opcode=" << Inst.getOperation()
1151 << "\n");
1152 return CU::UNWIND_ARM_MODE_DWARF;
1153 break;
1157 // If no frame set up, return no unwind info.
1158 if ((CFARegister == ARM::SP) && (CFARegisterOffset == 0))
1159 return 0;
1161 // Verify standard frame (lr/r7) was used.
1162 if (CFARegister != ARM::R7) {
1163 DEBUG_WITH_TYPE("compact-unwind", llvm::dbgs() << "frame register is "
1164 << CFARegister
1165 << " instead of r7\n");
1166 return CU::UNWIND_ARM_MODE_DWARF;
1168 int StackAdjust = CFARegisterOffset - 8;
1169 if (RegOffsets.lookup(ARM::LR) != (-4 - StackAdjust)) {
1170 DEBUG_WITH_TYPE("compact-unwind",
1171 llvm::dbgs()
1172 << "LR not saved as standard frame, StackAdjust="
1173 << StackAdjust
1174 << ", CFARegisterOffset=" << CFARegisterOffset
1175 << ", lr save at offset=" << RegOffsets[14] << "\n");
1176 return CU::UNWIND_ARM_MODE_DWARF;
1178 if (RegOffsets.lookup(ARM::R7) != (-8 - StackAdjust)) {
1179 DEBUG_WITH_TYPE("compact-unwind",
1180 llvm::dbgs() << "r7 not saved as standard frame\n");
1181 return CU::UNWIND_ARM_MODE_DWARF;
1183 uint32_t CompactUnwindEncoding = CU::UNWIND_ARM_MODE_FRAME;
1185 // If var-args are used, there may be a stack adjust required.
1186 switch (StackAdjust) {
1187 case 0:
1188 break;
1189 case 4:
1190 CompactUnwindEncoding |= 0x00400000;
1191 break;
1192 case 8:
1193 CompactUnwindEncoding |= 0x00800000;
1194 break;
1195 case 12:
1196 CompactUnwindEncoding |= 0x00C00000;
1197 break;
1198 default:
1199 DEBUG_WITH_TYPE("compact-unwind", llvm::dbgs()
1200 << ".cfi_def_cfa stack adjust ("
1201 << StackAdjust << ") out of range\n");
1202 return CU::UNWIND_ARM_MODE_DWARF;
1205 // If r6 is saved, it must be right below r7.
1206 static struct {
1207 unsigned Reg;
1208 unsigned Encoding;
1209 } GPRCSRegs[] = {{ARM::R6, CU::UNWIND_ARM_FRAME_FIRST_PUSH_R6},
1210 {ARM::R5, CU::UNWIND_ARM_FRAME_FIRST_PUSH_R5},
1211 {ARM::R4, CU::UNWIND_ARM_FRAME_FIRST_PUSH_R4},
1212 {ARM::R12, CU::UNWIND_ARM_FRAME_SECOND_PUSH_R12},
1213 {ARM::R11, CU::UNWIND_ARM_FRAME_SECOND_PUSH_R11},
1214 {ARM::R10, CU::UNWIND_ARM_FRAME_SECOND_PUSH_R10},
1215 {ARM::R9, CU::UNWIND_ARM_FRAME_SECOND_PUSH_R9},
1216 {ARM::R8, CU::UNWIND_ARM_FRAME_SECOND_PUSH_R8}};
1218 int CurOffset = -8 - StackAdjust;
1219 for (auto CSReg : GPRCSRegs) {
1220 auto Offset = RegOffsets.find(CSReg.Reg);
1221 if (Offset == RegOffsets.end())
1222 continue;
1224 int RegOffset = Offset->second;
1225 if (RegOffset != CurOffset - 4) {
1226 DEBUG_WITH_TYPE("compact-unwind",
1227 llvm::dbgs() << MRI.getName(CSReg.Reg) << " saved at "
1228 << RegOffset << " but only supported at "
1229 << CurOffset << "\n");
1230 return CU::UNWIND_ARM_MODE_DWARF;
1232 CompactUnwindEncoding |= CSReg.Encoding;
1233 CurOffset -= 4;
1236 // If no floats saved, we are done.
1237 if (FloatRegCount == 0)
1238 return CompactUnwindEncoding;
1240 // Switch mode to include D register saving.
1241 CompactUnwindEncoding &= ~CU::UNWIND_ARM_MODE_MASK;
1242 CompactUnwindEncoding |= CU::UNWIND_ARM_MODE_FRAME_D;
1244 // FIXME: supporting more than 4 saved D-registers compactly would be trivial,
1245 // but needs coordination with the linker and libunwind.
1246 if (FloatRegCount > 4) {
1247 DEBUG_WITH_TYPE("compact-unwind",
1248 llvm::dbgs() << "unsupported number of D registers saved ("
1249 << FloatRegCount << ")\n");
1250 return CU::UNWIND_ARM_MODE_DWARF;
1253 // Floating point registers must either be saved sequentially, or we defer to
1254 // DWARF. No gaps allowed here so check that each saved d-register is
1255 // precisely where it should be.
1256 static unsigned FPRCSRegs[] = { ARM::D8, ARM::D10, ARM::D12, ARM::D14 };
1257 for (int Idx = FloatRegCount - 1; Idx >= 0; --Idx) {
1258 auto Offset = RegOffsets.find(FPRCSRegs[Idx]);
1259 if (Offset == RegOffsets.end()) {
1260 DEBUG_WITH_TYPE("compact-unwind",
1261 llvm::dbgs() << FloatRegCount << " D-regs saved, but "
1262 << MRI.getName(FPRCSRegs[Idx])
1263 << " not saved\n");
1264 return CU::UNWIND_ARM_MODE_DWARF;
1265 } else if (Offset->second != CurOffset - 8) {
1266 DEBUG_WITH_TYPE("compact-unwind",
1267 llvm::dbgs() << FloatRegCount << " D-regs saved, but "
1268 << MRI.getName(FPRCSRegs[Idx])
1269 << " saved at " << Offset->second
1270 << ", expected at " << CurOffset - 8
1271 << "\n");
1272 return CU::UNWIND_ARM_MODE_DWARF;
1274 CurOffset -= 8;
1277 return CompactUnwindEncoding | ((FloatRegCount - 1) << 8);
1280 static MachO::CPUSubTypeARM getMachOSubTypeFromArch(StringRef Arch) {
1281 ARM::ArchKind AK = ARM::parseArch(Arch);
1282 switch (AK) {
1283 default:
1284 return MachO::CPU_SUBTYPE_ARM_V7;
1285 case ARM::ArchKind::ARMV4T:
1286 return MachO::CPU_SUBTYPE_ARM_V4T;
1287 case ARM::ArchKind::ARMV5T:
1288 case ARM::ArchKind::ARMV5TE:
1289 case ARM::ArchKind::ARMV5TEJ:
1290 return MachO::CPU_SUBTYPE_ARM_V5;
1291 case ARM::ArchKind::ARMV6:
1292 case ARM::ArchKind::ARMV6K:
1293 return MachO::CPU_SUBTYPE_ARM_V6;
1294 case ARM::ArchKind::ARMV7A:
1295 return MachO::CPU_SUBTYPE_ARM_V7;
1296 case ARM::ArchKind::ARMV7S:
1297 return MachO::CPU_SUBTYPE_ARM_V7S;
1298 case ARM::ArchKind::ARMV7K:
1299 return MachO::CPU_SUBTYPE_ARM_V7K;
1300 case ARM::ArchKind::ARMV6M:
1301 return MachO::CPU_SUBTYPE_ARM_V6M;
1302 case ARM::ArchKind::ARMV7M:
1303 return MachO::CPU_SUBTYPE_ARM_V7M;
1304 case ARM::ArchKind::ARMV7EM:
1305 return MachO::CPU_SUBTYPE_ARM_V7EM;
1309 static MCAsmBackend *createARMAsmBackend(const Target &T,
1310 const MCSubtargetInfo &STI,
1311 const MCRegisterInfo &MRI,
1312 const MCTargetOptions &Options,
1313 support::endianness Endian) {
1314 const Triple &TheTriple = STI.getTargetTriple();
1315 switch (TheTriple.getObjectFormat()) {
1316 default:
1317 llvm_unreachable("unsupported object format");
1318 case Triple::MachO: {
1319 MachO::CPUSubTypeARM CS = getMachOSubTypeFromArch(TheTriple.getArchName());
1320 return new ARMAsmBackendDarwin(T, STI, MRI, CS);
1322 case Triple::COFF:
1323 assert(TheTriple.isOSWindows() && "non-Windows ARM COFF is not supported");
1324 return new ARMAsmBackendWinCOFF(T, STI);
1325 case Triple::ELF:
1326 assert(TheTriple.isOSBinFormatELF() && "using ELF for non-ELF target");
1327 uint8_t OSABI = MCELFObjectTargetWriter::getOSABI(TheTriple.getOS());
1328 return new ARMAsmBackendELF(T, STI, OSABI, Endian);
1332 MCAsmBackend *llvm::createARMLEAsmBackend(const Target &T,
1333 const MCSubtargetInfo &STI,
1334 const MCRegisterInfo &MRI,
1335 const MCTargetOptions &Options) {
1336 return createARMAsmBackend(T, STI, MRI, Options, support::little);
1339 MCAsmBackend *llvm::createARMBEAsmBackend(const Target &T,
1340 const MCSubtargetInfo &STI,
1341 const MCRegisterInfo &MRI,
1342 const MCTargetOptions &Options) {
1343 return createARMAsmBackend(T, STI, MRI, Options, support::big);