Revert r354244 "[DAGCombiner] Eliminate dead stores to stack."
[llvm-complete.git] / lib / Target / ARM / MCTargetDesc / ARMAsmBackend.cpp
blobb846ca0699b77e1ab3a3ee1415c32f9b1dd93481
1 //===-- ARMAsmBackend.cpp - ARM Assembler Backend -------------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
9 #include "MCTargetDesc/ARMAsmBackend.h"
10 #include "MCTargetDesc/ARMAddressingModes.h"
11 #include "MCTargetDesc/ARMAsmBackendDarwin.h"
12 #include "MCTargetDesc/ARMAsmBackendELF.h"
13 #include "MCTargetDesc/ARMAsmBackendWinCOFF.h"
14 #include "MCTargetDesc/ARMFixupKinds.h"
15 #include "MCTargetDesc/ARMMCTargetDesc.h"
16 #include "llvm/ADT/StringSwitch.h"
17 #include "llvm/BinaryFormat/ELF.h"
18 #include "llvm/BinaryFormat/MachO.h"
19 #include "llvm/MC/MCAsmBackend.h"
20 #include "llvm/MC/MCAssembler.h"
21 #include "llvm/MC/MCContext.h"
22 #include "llvm/MC/MCDirectives.h"
23 #include "llvm/MC/MCELFObjectWriter.h"
24 #include "llvm/MC/MCExpr.h"
25 #include "llvm/MC/MCFixupKindInfo.h"
26 #include "llvm/MC/MCObjectWriter.h"
27 #include "llvm/MC/MCRegisterInfo.h"
28 #include "llvm/MC/MCSectionELF.h"
29 #include "llvm/MC/MCSectionMachO.h"
30 #include "llvm/MC/MCSubtargetInfo.h"
31 #include "llvm/MC/MCValue.h"
32 #include "llvm/Support/Debug.h"
33 #include "llvm/Support/EndianStream.h"
34 #include "llvm/Support/ErrorHandling.h"
35 #include "llvm/Support/Format.h"
36 #include "llvm/Support/TargetParser.h"
37 #include "llvm/Support/raw_ostream.h"
38 using namespace llvm;
40 namespace {
41 class ARMELFObjectWriter : public MCELFObjectTargetWriter {
42 public:
43 ARMELFObjectWriter(uint8_t OSABI)
44 : MCELFObjectTargetWriter(/*Is64Bit*/ false, OSABI, ELF::EM_ARM,
45 /*HasRelocationAddend*/ false) {}
47 } // end anonymous namespace
49 const MCFixupKindInfo &ARMAsmBackend::getFixupKindInfo(MCFixupKind Kind) const {
50 const static MCFixupKindInfo InfosLE[ARM::NumTargetFixupKinds] = {
51 // This table *must* be in the order that the fixup_* kinds are defined in
52 // ARMFixupKinds.h.
54 // Name Offset (bits) Size (bits) Flags
55 {"fixup_arm_ldst_pcrel_12", 0, 32, MCFixupKindInfo::FKF_IsPCRel},
56 {"fixup_t2_ldst_pcrel_12", 0, 32,
57 MCFixupKindInfo::FKF_IsPCRel |
58 MCFixupKindInfo::FKF_IsAlignedDownTo32Bits},
59 {"fixup_arm_pcrel_10_unscaled", 0, 32, MCFixupKindInfo::FKF_IsPCRel},
60 {"fixup_arm_pcrel_10", 0, 32, MCFixupKindInfo::FKF_IsPCRel},
61 {"fixup_t2_pcrel_10", 0, 32,
62 MCFixupKindInfo::FKF_IsPCRel |
63 MCFixupKindInfo::FKF_IsAlignedDownTo32Bits},
64 {"fixup_arm_pcrel_9", 0, 32, MCFixupKindInfo::FKF_IsPCRel},
65 {"fixup_t2_pcrel_9", 0, 32,
66 MCFixupKindInfo::FKF_IsPCRel |
67 MCFixupKindInfo::FKF_IsAlignedDownTo32Bits},
68 {"fixup_thumb_adr_pcrel_10", 0, 8,
69 MCFixupKindInfo::FKF_IsPCRel |
70 MCFixupKindInfo::FKF_IsAlignedDownTo32Bits},
71 {"fixup_arm_adr_pcrel_12", 0, 32, MCFixupKindInfo::FKF_IsPCRel},
72 {"fixup_t2_adr_pcrel_12", 0, 32,
73 MCFixupKindInfo::FKF_IsPCRel |
74 MCFixupKindInfo::FKF_IsAlignedDownTo32Bits},
75 {"fixup_arm_condbranch", 0, 24, MCFixupKindInfo::FKF_IsPCRel},
76 {"fixup_arm_uncondbranch", 0, 24, MCFixupKindInfo::FKF_IsPCRel},
77 {"fixup_t2_condbranch", 0, 32, MCFixupKindInfo::FKF_IsPCRel},
78 {"fixup_t2_uncondbranch", 0, 32, MCFixupKindInfo::FKF_IsPCRel},
79 {"fixup_arm_thumb_br", 0, 16, MCFixupKindInfo::FKF_IsPCRel},
80 {"fixup_arm_uncondbl", 0, 24, MCFixupKindInfo::FKF_IsPCRel},
81 {"fixup_arm_condbl", 0, 24, MCFixupKindInfo::FKF_IsPCRel},
82 {"fixup_arm_blx", 0, 24, MCFixupKindInfo::FKF_IsPCRel},
83 {"fixup_arm_thumb_bl", 0, 32, MCFixupKindInfo::FKF_IsPCRel},
84 {"fixup_arm_thumb_blx", 0, 32,
85 MCFixupKindInfo::FKF_IsPCRel |
86 MCFixupKindInfo::FKF_IsAlignedDownTo32Bits},
87 {"fixup_arm_thumb_cb", 0, 16, MCFixupKindInfo::FKF_IsPCRel},
88 {"fixup_arm_thumb_cp", 0, 8,
89 MCFixupKindInfo::FKF_IsPCRel |
90 MCFixupKindInfo::FKF_IsAlignedDownTo32Bits},
91 {"fixup_arm_thumb_bcc", 0, 8, MCFixupKindInfo::FKF_IsPCRel},
92 // movw / movt: 16-bits immediate but scattered into two chunks 0 - 12, 16
93 // - 19.
94 {"fixup_arm_movt_hi16", 0, 20, 0},
95 {"fixup_arm_movw_lo16", 0, 20, 0},
96 {"fixup_t2_movt_hi16", 0, 20, 0},
97 {"fixup_t2_movw_lo16", 0, 20, 0},
98 {"fixup_arm_mod_imm", 0, 12, 0},
99 {"fixup_t2_so_imm", 0, 26, 0},
101 const static MCFixupKindInfo InfosBE[ARM::NumTargetFixupKinds] = {
102 // This table *must* be in the order that the fixup_* kinds are defined in
103 // ARMFixupKinds.h.
105 // Name Offset (bits) Size (bits) Flags
106 {"fixup_arm_ldst_pcrel_12", 0, 32, MCFixupKindInfo::FKF_IsPCRel},
107 {"fixup_t2_ldst_pcrel_12", 0, 32,
108 MCFixupKindInfo::FKF_IsPCRel |
109 MCFixupKindInfo::FKF_IsAlignedDownTo32Bits},
110 {"fixup_arm_pcrel_10_unscaled", 0, 32, MCFixupKindInfo::FKF_IsPCRel},
111 {"fixup_arm_pcrel_10", 0, 32, MCFixupKindInfo::FKF_IsPCRel},
112 {"fixup_t2_pcrel_10", 0, 32,
113 MCFixupKindInfo::FKF_IsPCRel |
114 MCFixupKindInfo::FKF_IsAlignedDownTo32Bits},
115 {"fixup_arm_pcrel_9", 0, 32, MCFixupKindInfo::FKF_IsPCRel},
116 {"fixup_t2_pcrel_9", 0, 32,
117 MCFixupKindInfo::FKF_IsPCRel |
118 MCFixupKindInfo::FKF_IsAlignedDownTo32Bits},
119 {"fixup_thumb_adr_pcrel_10", 8, 8,
120 MCFixupKindInfo::FKF_IsPCRel |
121 MCFixupKindInfo::FKF_IsAlignedDownTo32Bits},
122 {"fixup_arm_adr_pcrel_12", 0, 32, MCFixupKindInfo::FKF_IsPCRel},
123 {"fixup_t2_adr_pcrel_12", 0, 32,
124 MCFixupKindInfo::FKF_IsPCRel |
125 MCFixupKindInfo::FKF_IsAlignedDownTo32Bits},
126 {"fixup_arm_condbranch", 8, 24, MCFixupKindInfo::FKF_IsPCRel},
127 {"fixup_arm_uncondbranch", 8, 24, MCFixupKindInfo::FKF_IsPCRel},
128 {"fixup_t2_condbranch", 0, 32, MCFixupKindInfo::FKF_IsPCRel},
129 {"fixup_t2_uncondbranch", 0, 32, MCFixupKindInfo::FKF_IsPCRel},
130 {"fixup_arm_thumb_br", 0, 16, MCFixupKindInfo::FKF_IsPCRel},
131 {"fixup_arm_uncondbl", 8, 24, MCFixupKindInfo::FKF_IsPCRel},
132 {"fixup_arm_condbl", 8, 24, MCFixupKindInfo::FKF_IsPCRel},
133 {"fixup_arm_blx", 8, 24, MCFixupKindInfo::FKF_IsPCRel},
134 {"fixup_arm_thumb_bl", 0, 32, MCFixupKindInfo::FKF_IsPCRel},
135 {"fixup_arm_thumb_blx", 0, 32,
136 MCFixupKindInfo::FKF_IsPCRel |
137 MCFixupKindInfo::FKF_IsAlignedDownTo32Bits},
138 {"fixup_arm_thumb_cb", 0, 16, MCFixupKindInfo::FKF_IsPCRel},
139 {"fixup_arm_thumb_cp", 8, 8,
140 MCFixupKindInfo::FKF_IsPCRel |
141 MCFixupKindInfo::FKF_IsAlignedDownTo32Bits},
142 {"fixup_arm_thumb_bcc", 8, 8, MCFixupKindInfo::FKF_IsPCRel},
143 // movw / movt: 16-bits immediate but scattered into two chunks 0 - 12, 16
144 // - 19.
145 {"fixup_arm_movt_hi16", 12, 20, 0},
146 {"fixup_arm_movw_lo16", 12, 20, 0},
147 {"fixup_t2_movt_hi16", 12, 20, 0},
148 {"fixup_t2_movw_lo16", 12, 20, 0},
149 {"fixup_arm_mod_imm", 20, 12, 0},
150 {"fixup_t2_so_imm", 26, 6, 0},
153 if (Kind < FirstTargetFixupKind)
154 return MCAsmBackend::getFixupKindInfo(Kind);
156 assert(unsigned(Kind - FirstTargetFixupKind) < getNumFixupKinds() &&
157 "Invalid kind!");
158 return (Endian == support::little ? InfosLE
159 : InfosBE)[Kind - FirstTargetFixupKind];
162 void ARMAsmBackend::handleAssemblerFlag(MCAssemblerFlag Flag) {
163 switch (Flag) {
164 default:
165 break;
166 case MCAF_Code16:
167 setIsThumb(true);
168 break;
169 case MCAF_Code32:
170 setIsThumb(false);
171 break;
175 unsigned ARMAsmBackend::getRelaxedOpcode(unsigned Op,
176 const MCSubtargetInfo &STI) const {
177 bool HasThumb2 = STI.getFeatureBits()[ARM::FeatureThumb2];
178 bool HasV8MBaselineOps = STI.getFeatureBits()[ARM::HasV8MBaselineOps];
180 switch (Op) {
181 default:
182 return Op;
183 case ARM::tBcc:
184 return HasThumb2 ? (unsigned)ARM::t2Bcc : Op;
185 case ARM::tLDRpci:
186 return HasThumb2 ? (unsigned)ARM::t2LDRpci : Op;
187 case ARM::tADR:
188 return HasThumb2 ? (unsigned)ARM::t2ADR : Op;
189 case ARM::tB:
190 return HasV8MBaselineOps ? (unsigned)ARM::t2B : Op;
191 case ARM::tCBZ:
192 return ARM::tHINT;
193 case ARM::tCBNZ:
194 return ARM::tHINT;
198 bool ARMAsmBackend::mayNeedRelaxation(const MCInst &Inst,
199 const MCSubtargetInfo &STI) const {
200 if (getRelaxedOpcode(Inst.getOpcode(), STI) != Inst.getOpcode())
201 return true;
202 return false;
205 const char *ARMAsmBackend::reasonForFixupRelaxation(const MCFixup &Fixup,
206 uint64_t Value) const {
207 switch ((unsigned)Fixup.getKind()) {
208 case ARM::fixup_arm_thumb_br: {
209 // Relaxing tB to t2B. tB has a signed 12-bit displacement with the
210 // low bit being an implied zero. There's an implied +4 offset for the
211 // branch, so we adjust the other way here to determine what's
212 // encodable.
214 // Relax if the value is too big for a (signed) i8.
215 int64_t Offset = int64_t(Value) - 4;
216 if (Offset > 2046 || Offset < -2048)
217 return "out of range pc-relative fixup value";
218 break;
220 case ARM::fixup_arm_thumb_bcc: {
221 // Relaxing tBcc to t2Bcc. tBcc has a signed 9-bit displacement with the
222 // low bit being an implied zero. There's an implied +4 offset for the
223 // branch, so we adjust the other way here to determine what's
224 // encodable.
226 // Relax if the value is too big for a (signed) i8.
227 int64_t Offset = int64_t(Value) - 4;
228 if (Offset > 254 || Offset < -256)
229 return "out of range pc-relative fixup value";
230 break;
232 case ARM::fixup_thumb_adr_pcrel_10:
233 case ARM::fixup_arm_thumb_cp: {
234 // If the immediate is negative, greater than 1020, or not a multiple
235 // of four, the wide version of the instruction must be used.
236 int64_t Offset = int64_t(Value) - 4;
237 if (Offset & 3)
238 return "misaligned pc-relative fixup value";
239 else if (Offset > 1020 || Offset < 0)
240 return "out of range pc-relative fixup value";
241 break;
243 case ARM::fixup_arm_thumb_cb: {
244 // If we have a Thumb CBZ or CBNZ instruction and its target is the next
245 // instruction it is actually out of range for the instruction.
246 // It will be changed to a NOP.
247 int64_t Offset = (Value & ~1);
248 if (Offset == 2)
249 return "will be converted to nop";
250 break;
252 default:
253 llvm_unreachable("Unexpected fixup kind in reasonForFixupRelaxation()!");
255 return nullptr;
258 bool ARMAsmBackend::fixupNeedsRelaxation(const MCFixup &Fixup, uint64_t Value,
259 const MCRelaxableFragment *DF,
260 const MCAsmLayout &Layout) const {
261 return reasonForFixupRelaxation(Fixup, Value);
264 void ARMAsmBackend::relaxInstruction(const MCInst &Inst,
265 const MCSubtargetInfo &STI,
266 MCInst &Res) const {
267 unsigned RelaxedOp = getRelaxedOpcode(Inst.getOpcode(), STI);
269 // Sanity check w/ diagnostic if we get here w/ a bogus instruction.
270 if (RelaxedOp == Inst.getOpcode()) {
271 SmallString<256> Tmp;
272 raw_svector_ostream OS(Tmp);
273 Inst.dump_pretty(OS);
274 OS << "\n";
275 report_fatal_error("unexpected instruction to relax: " + OS.str());
278 // If we are changing Thumb CBZ or CBNZ instruction to a NOP, aka tHINT, we
279 // have to change the operands too.
280 if ((Inst.getOpcode() == ARM::tCBZ || Inst.getOpcode() == ARM::tCBNZ) &&
281 RelaxedOp == ARM::tHINT) {
282 Res.setOpcode(RelaxedOp);
283 Res.addOperand(MCOperand::createImm(0));
284 Res.addOperand(MCOperand::createImm(14));
285 Res.addOperand(MCOperand::createReg(0));
286 return;
289 // The rest of instructions we're relaxing have the same operands.
290 // We just need to update to the proper opcode.
291 Res = Inst;
292 Res.setOpcode(RelaxedOp);
295 bool ARMAsmBackend::writeNopData(raw_ostream &OS, uint64_t Count) const {
296 const uint16_t Thumb1_16bitNopEncoding = 0x46c0; // using MOV r8,r8
297 const uint16_t Thumb2_16bitNopEncoding = 0xbf00; // NOP
298 const uint32_t ARMv4_NopEncoding = 0xe1a00000; // using MOV r0,r0
299 const uint32_t ARMv6T2_NopEncoding = 0xe320f000; // NOP
300 if (isThumb()) {
301 const uint16_t nopEncoding =
302 hasNOP() ? Thumb2_16bitNopEncoding : Thumb1_16bitNopEncoding;
303 uint64_t NumNops = Count / 2;
304 for (uint64_t i = 0; i != NumNops; ++i)
305 support::endian::write(OS, nopEncoding, Endian);
306 if (Count & 1)
307 OS << '\0';
308 return true;
310 // ARM mode
311 const uint32_t nopEncoding =
312 hasNOP() ? ARMv6T2_NopEncoding : ARMv4_NopEncoding;
313 uint64_t NumNops = Count / 4;
314 for (uint64_t i = 0; i != NumNops; ++i)
315 support::endian::write(OS, nopEncoding, Endian);
316 // FIXME: should this function return false when unable to write exactly
317 // 'Count' bytes with NOP encodings?
318 switch (Count % 4) {
319 default:
320 break; // No leftover bytes to write
321 case 1:
322 OS << '\0';
323 break;
324 case 2:
325 OS.write("\0\0", 2);
326 break;
327 case 3:
328 OS.write("\0\0\xa0", 3);
329 break;
332 return true;
335 static uint32_t swapHalfWords(uint32_t Value, bool IsLittleEndian) {
336 if (IsLittleEndian) {
337 // Note that the halfwords are stored high first and low second in thumb;
338 // so we need to swap the fixup value here to map properly.
339 uint32_t Swapped = (Value & 0xFFFF0000) >> 16;
340 Swapped |= (Value & 0x0000FFFF) << 16;
341 return Swapped;
342 } else
343 return Value;
346 static uint32_t joinHalfWords(uint32_t FirstHalf, uint32_t SecondHalf,
347 bool IsLittleEndian) {
348 uint32_t Value;
350 if (IsLittleEndian) {
351 Value = (SecondHalf & 0xFFFF) << 16;
352 Value |= (FirstHalf & 0xFFFF);
353 } else {
354 Value = (SecondHalf & 0xFFFF);
355 Value |= (FirstHalf & 0xFFFF) << 16;
358 return Value;
361 unsigned ARMAsmBackend::adjustFixupValue(const MCAssembler &Asm,
362 const MCFixup &Fixup,
363 const MCValue &Target, uint64_t Value,
364 bool IsResolved, MCContext &Ctx,
365 const MCSubtargetInfo* STI) const {
366 unsigned Kind = Fixup.getKind();
368 // MachO tries to make .o files that look vaguely pre-linked, so for MOVW/MOVT
369 // and .word relocations they put the Thumb bit into the addend if possible.
370 // Other relocation types don't want this bit though (branches couldn't encode
371 // it if it *was* present, and no other relocations exist) and it can
372 // interfere with checking valid expressions.
373 if (const MCSymbolRefExpr *A = Target.getSymA()) {
374 if (A->hasSubsectionsViaSymbols() && Asm.isThumbFunc(&A->getSymbol()) &&
375 A->getSymbol().isExternal() &&
376 (Kind == FK_Data_4 || Kind == ARM::fixup_arm_movw_lo16 ||
377 Kind == ARM::fixup_arm_movt_hi16 || Kind == ARM::fixup_t2_movw_lo16 ||
378 Kind == ARM::fixup_t2_movt_hi16))
379 Value |= 1;
382 switch (Kind) {
383 default:
384 Ctx.reportError(Fixup.getLoc(), "bad relocation fixup type");
385 return 0;
386 case FK_Data_1:
387 case FK_Data_2:
388 case FK_Data_4:
389 return Value;
390 case FK_SecRel_2:
391 return Value;
392 case FK_SecRel_4:
393 return Value;
394 case ARM::fixup_arm_movt_hi16:
395 assert(STI != nullptr);
396 if (IsResolved || !STI->getTargetTriple().isOSBinFormatELF())
397 Value >>= 16;
398 LLVM_FALLTHROUGH;
399 case ARM::fixup_arm_movw_lo16: {
400 unsigned Hi4 = (Value & 0xF000) >> 12;
401 unsigned Lo12 = Value & 0x0FFF;
402 // inst{19-16} = Hi4;
403 // inst{11-0} = Lo12;
404 Value = (Hi4 << 16) | (Lo12);
405 return Value;
407 case ARM::fixup_t2_movt_hi16:
408 assert(STI != nullptr);
409 if (IsResolved || !STI->getTargetTriple().isOSBinFormatELF())
410 Value >>= 16;
411 LLVM_FALLTHROUGH;
412 case ARM::fixup_t2_movw_lo16: {
413 unsigned Hi4 = (Value & 0xF000) >> 12;
414 unsigned i = (Value & 0x800) >> 11;
415 unsigned Mid3 = (Value & 0x700) >> 8;
416 unsigned Lo8 = Value & 0x0FF;
417 // inst{19-16} = Hi4;
418 // inst{26} = i;
419 // inst{14-12} = Mid3;
420 // inst{7-0} = Lo8;
421 Value = (Hi4 << 16) | (i << 26) | (Mid3 << 12) | (Lo8);
422 return swapHalfWords(Value, Endian == support::little);
424 case ARM::fixup_arm_ldst_pcrel_12:
425 // ARM PC-relative values are offset by 8.
426 Value -= 4;
427 LLVM_FALLTHROUGH;
428 case ARM::fixup_t2_ldst_pcrel_12: {
429 // Offset by 4, adjusted by two due to the half-word ordering of thumb.
430 Value -= 4;
431 bool isAdd = true;
432 if ((int64_t)Value < 0) {
433 Value = -Value;
434 isAdd = false;
436 if (Value >= 4096) {
437 Ctx.reportError(Fixup.getLoc(), "out of range pc-relative fixup value");
438 return 0;
440 Value |= isAdd << 23;
442 // Same addressing mode as fixup_arm_pcrel_10,
443 // but with 16-bit halfwords swapped.
444 if (Kind == ARM::fixup_t2_ldst_pcrel_12)
445 return swapHalfWords(Value, Endian == support::little);
447 return Value;
449 case ARM::fixup_arm_adr_pcrel_12: {
450 // ARM PC-relative values are offset by 8.
451 Value -= 8;
452 unsigned opc = 4; // bits {24-21}. Default to add: 0b0100
453 if ((int64_t)Value < 0) {
454 Value = -Value;
455 opc = 2; // 0b0010
457 if (ARM_AM::getSOImmVal(Value) == -1) {
458 Ctx.reportError(Fixup.getLoc(), "out of range pc-relative fixup value");
459 return 0;
461 // Encode the immediate and shift the opcode into place.
462 return ARM_AM::getSOImmVal(Value) | (opc << 21);
465 case ARM::fixup_t2_adr_pcrel_12: {
466 Value -= 4;
467 unsigned opc = 0;
468 if ((int64_t)Value < 0) {
469 Value = -Value;
470 opc = 5;
473 uint32_t out = (opc << 21);
474 out |= (Value & 0x800) << 15;
475 out |= (Value & 0x700) << 4;
476 out |= (Value & 0x0FF);
478 return swapHalfWords(out, Endian == support::little);
481 case ARM::fixup_arm_condbranch:
482 case ARM::fixup_arm_uncondbranch:
483 case ARM::fixup_arm_uncondbl:
484 case ARM::fixup_arm_condbl:
485 case ARM::fixup_arm_blx:
486 // These values don't encode the low two bits since they're always zero.
487 // Offset by 8 just as above.
488 if (const MCSymbolRefExpr *SRE =
489 dyn_cast<MCSymbolRefExpr>(Fixup.getValue()))
490 if (SRE->getKind() == MCSymbolRefExpr::VK_TLSCALL)
491 return 0;
492 return 0xffffff & ((Value - 8) >> 2);
493 case ARM::fixup_t2_uncondbranch: {
494 Value = Value - 4;
495 if (!isInt<25>(Value)) {
496 Ctx.reportError(Fixup.getLoc(), "Relocation out of range");
497 return 0;
500 Value >>= 1; // Low bit is not encoded.
502 uint32_t out = 0;
503 bool I = Value & 0x800000;
504 bool J1 = Value & 0x400000;
505 bool J2 = Value & 0x200000;
506 J1 ^= I;
507 J2 ^= I;
509 out |= I << 26; // S bit
510 out |= !J1 << 13; // J1 bit
511 out |= !J2 << 11; // J2 bit
512 out |= (Value & 0x1FF800) << 5; // imm6 field
513 out |= (Value & 0x0007FF); // imm11 field
515 return swapHalfWords(out, Endian == support::little);
517 case ARM::fixup_t2_condbranch: {
518 Value = Value - 4;
519 if (!isInt<21>(Value)) {
520 Ctx.reportError(Fixup.getLoc(), "Relocation out of range");
521 return 0;
524 Value >>= 1; // Low bit is not encoded.
526 uint64_t out = 0;
527 out |= (Value & 0x80000) << 7; // S bit
528 out |= (Value & 0x40000) >> 7; // J2 bit
529 out |= (Value & 0x20000) >> 4; // J1 bit
530 out |= (Value & 0x1F800) << 5; // imm6 field
531 out |= (Value & 0x007FF); // imm11 field
533 return swapHalfWords(out, Endian == support::little);
535 case ARM::fixup_arm_thumb_bl: {
536 if (!isInt<25>(Value - 4) ||
537 (!STI->getFeatureBits()[ARM::FeatureThumb2] &&
538 !STI->getFeatureBits()[ARM::HasV8MBaselineOps] &&
539 !STI->getFeatureBits()[ARM::HasV6MOps] &&
540 !isInt<23>(Value - 4))) {
541 Ctx.reportError(Fixup.getLoc(), "Relocation out of range");
542 return 0;
545 // The value doesn't encode the low bit (always zero) and is offset by
546 // four. The 32-bit immediate value is encoded as
547 // imm32 = SignExtend(S:I1:I2:imm10:imm11:0)
548 // where I1 = NOT(J1 ^ S) and I2 = NOT(J2 ^ S).
549 // The value is encoded into disjoint bit positions in the destination
550 // opcode. x = unchanged, I = immediate value bit, S = sign extension bit,
551 // J = either J1 or J2 bit
553 // BL: xxxxxSIIIIIIIIII xxJxJIIIIIIIIIII
555 // Note that the halfwords are stored high first, low second; so we need
556 // to transpose the fixup value here to map properly.
557 uint32_t offset = (Value - 4) >> 1;
558 uint32_t signBit = (offset & 0x800000) >> 23;
559 uint32_t I1Bit = (offset & 0x400000) >> 22;
560 uint32_t J1Bit = (I1Bit ^ 0x1) ^ signBit;
561 uint32_t I2Bit = (offset & 0x200000) >> 21;
562 uint32_t J2Bit = (I2Bit ^ 0x1) ^ signBit;
563 uint32_t imm10Bits = (offset & 0x1FF800) >> 11;
564 uint32_t imm11Bits = (offset & 0x000007FF);
566 uint32_t FirstHalf = (((uint16_t)signBit << 10) | (uint16_t)imm10Bits);
567 uint32_t SecondHalf = (((uint16_t)J1Bit << 13) | ((uint16_t)J2Bit << 11) |
568 (uint16_t)imm11Bits);
569 return joinHalfWords(FirstHalf, SecondHalf, Endian == support::little);
571 case ARM::fixup_arm_thumb_blx: {
572 // The value doesn't encode the low two bits (always zero) and is offset by
573 // four (see fixup_arm_thumb_cp). The 32-bit immediate value is encoded as
574 // imm32 = SignExtend(S:I1:I2:imm10H:imm10L:00)
575 // where I1 = NOT(J1 ^ S) and I2 = NOT(J2 ^ S).
576 // The value is encoded into disjoint bit positions in the destination
577 // opcode. x = unchanged, I = immediate value bit, S = sign extension bit,
578 // J = either J1 or J2 bit, 0 = zero.
580 // BLX: xxxxxSIIIIIIIIII xxJxJIIIIIIIIII0
582 // Note that the halfwords are stored high first, low second; so we need
583 // to transpose the fixup value here to map properly.
584 if (Value % 4 != 0) {
585 Ctx.reportError(Fixup.getLoc(), "misaligned ARM call destination");
586 return 0;
589 uint32_t offset = (Value - 4) >> 2;
590 if (const MCSymbolRefExpr *SRE =
591 dyn_cast<MCSymbolRefExpr>(Fixup.getValue()))
592 if (SRE->getKind() == MCSymbolRefExpr::VK_TLSCALL)
593 offset = 0;
594 uint32_t signBit = (offset & 0x400000) >> 22;
595 uint32_t I1Bit = (offset & 0x200000) >> 21;
596 uint32_t J1Bit = (I1Bit ^ 0x1) ^ signBit;
597 uint32_t I2Bit = (offset & 0x100000) >> 20;
598 uint32_t J2Bit = (I2Bit ^ 0x1) ^ signBit;
599 uint32_t imm10HBits = (offset & 0xFFC00) >> 10;
600 uint32_t imm10LBits = (offset & 0x3FF);
602 uint32_t FirstHalf = (((uint16_t)signBit << 10) | (uint16_t)imm10HBits);
603 uint32_t SecondHalf = (((uint16_t)J1Bit << 13) | ((uint16_t)J2Bit << 11) |
604 ((uint16_t)imm10LBits) << 1);
605 return joinHalfWords(FirstHalf, SecondHalf, Endian == support::little);
607 case ARM::fixup_thumb_adr_pcrel_10:
608 case ARM::fixup_arm_thumb_cp:
609 // On CPUs supporting Thumb2, this will be relaxed to an ldr.w, otherwise we
610 // could have an error on our hands.
611 assert(STI != nullptr);
612 if (!STI->getFeatureBits()[ARM::FeatureThumb2] && IsResolved) {
613 const char *FixupDiagnostic = reasonForFixupRelaxation(Fixup, Value);
614 if (FixupDiagnostic) {
615 Ctx.reportError(Fixup.getLoc(), FixupDiagnostic);
616 return 0;
619 // Offset by 4, and don't encode the low two bits.
620 return ((Value - 4) >> 2) & 0xff;
621 case ARM::fixup_arm_thumb_cb: {
622 // CB instructions can only branch to offsets in [4, 126] in multiples of 2
623 // so ensure that the raw value LSB is zero and it lies in [2, 130].
624 // An offset of 2 will be relaxed to a NOP.
625 if ((int64_t)Value < 2 || Value > 0x82 || Value & 1) {
626 Ctx.reportError(Fixup.getLoc(), "out of range pc-relative fixup value");
627 return 0;
629 // Offset by 4 and don't encode the lower bit, which is always 0.
630 // FIXME: diagnose if no Thumb2
631 uint32_t Binary = (Value - 4) >> 1;
632 return ((Binary & 0x20) << 4) | ((Binary & 0x1f) << 3);
634 case ARM::fixup_arm_thumb_br:
635 // Offset by 4 and don't encode the lower bit, which is always 0.
636 assert(STI != nullptr);
637 if (!STI->getFeatureBits()[ARM::FeatureThumb2] &&
638 !STI->getFeatureBits()[ARM::HasV8MBaselineOps]) {
639 const char *FixupDiagnostic = reasonForFixupRelaxation(Fixup, Value);
640 if (FixupDiagnostic) {
641 Ctx.reportError(Fixup.getLoc(), FixupDiagnostic);
642 return 0;
645 return ((Value - 4) >> 1) & 0x7ff;
646 case ARM::fixup_arm_thumb_bcc:
647 // Offset by 4 and don't encode the lower bit, which is always 0.
648 assert(STI != nullptr);
649 if (!STI->getFeatureBits()[ARM::FeatureThumb2]) {
650 const char *FixupDiagnostic = reasonForFixupRelaxation(Fixup, Value);
651 if (FixupDiagnostic) {
652 Ctx.reportError(Fixup.getLoc(), FixupDiagnostic);
653 return 0;
656 return ((Value - 4) >> 1) & 0xff;
657 case ARM::fixup_arm_pcrel_10_unscaled: {
658 Value = Value - 8; // ARM fixups offset by an additional word and don't
659 // need to adjust for the half-word ordering.
660 bool isAdd = true;
661 if ((int64_t)Value < 0) {
662 Value = -Value;
663 isAdd = false;
665 // The value has the low 4 bits encoded in [3:0] and the high 4 in [11:8].
666 if (Value >= 256) {
667 Ctx.reportError(Fixup.getLoc(), "out of range pc-relative fixup value");
668 return 0;
670 Value = (Value & 0xf) | ((Value & 0xf0) << 4);
671 return Value | (isAdd << 23);
673 case ARM::fixup_arm_pcrel_10:
674 Value = Value - 4; // ARM fixups offset by an additional word and don't
675 // need to adjust for the half-word ordering.
676 LLVM_FALLTHROUGH;
677 case ARM::fixup_t2_pcrel_10: {
678 // Offset by 4, adjusted by two due to the half-word ordering of thumb.
679 Value = Value - 4;
680 bool isAdd = true;
681 if ((int64_t)Value < 0) {
682 Value = -Value;
683 isAdd = false;
685 // These values don't encode the low two bits since they're always zero.
686 Value >>= 2;
687 if (Value >= 256) {
688 Ctx.reportError(Fixup.getLoc(), "out of range pc-relative fixup value");
689 return 0;
691 Value |= isAdd << 23;
693 // Same addressing mode as fixup_arm_pcrel_10, but with 16-bit halfwords
694 // swapped.
695 if (Kind == ARM::fixup_t2_pcrel_10)
696 return swapHalfWords(Value, Endian == support::little);
698 return Value;
700 case ARM::fixup_arm_pcrel_9:
701 Value = Value - 4; // ARM fixups offset by an additional word and don't
702 // need to adjust for the half-word ordering.
703 LLVM_FALLTHROUGH;
704 case ARM::fixup_t2_pcrel_9: {
705 // Offset by 4, adjusted by two due to the half-word ordering of thumb.
706 Value = Value - 4;
707 bool isAdd = true;
708 if ((int64_t)Value < 0) {
709 Value = -Value;
710 isAdd = false;
712 // These values don't encode the low bit since it's always zero.
713 if (Value & 1) {
714 Ctx.reportError(Fixup.getLoc(), "invalid value for this fixup");
715 return 0;
717 Value >>= 1;
718 if (Value >= 256) {
719 Ctx.reportError(Fixup.getLoc(), "out of range pc-relative fixup value");
720 return 0;
722 Value |= isAdd << 23;
724 // Same addressing mode as fixup_arm_pcrel_9, but with 16-bit halfwords
725 // swapped.
726 if (Kind == ARM::fixup_t2_pcrel_9)
727 return swapHalfWords(Value, Endian == support::little);
729 return Value;
731 case ARM::fixup_arm_mod_imm:
732 Value = ARM_AM::getSOImmVal(Value);
733 if (Value >> 12) {
734 Ctx.reportError(Fixup.getLoc(), "out of range immediate fixup value");
735 return 0;
737 return Value;
738 case ARM::fixup_t2_so_imm: {
739 Value = ARM_AM::getT2SOImmVal(Value);
740 if ((int64_t)Value < 0) {
741 Ctx.reportError(Fixup.getLoc(), "out of range immediate fixup value");
742 return 0;
744 // Value will contain a 12-bit value broken up into a 4-bit shift in bits
745 // 11:8 and the 8-bit immediate in 0:7. The instruction has the immediate
746 // in 0:7. The 4-bit shift is split up into i:imm3 where i is placed at bit
747 // 10 of the upper half-word and imm3 is placed at 14:12 of the lower
748 // half-word.
749 uint64_t EncValue = 0;
750 EncValue |= (Value & 0x800) << 15;
751 EncValue |= (Value & 0x700) << 4;
752 EncValue |= (Value & 0xff);
753 return swapHalfWords(EncValue, Endian == support::little);
758 bool ARMAsmBackend::shouldForceRelocation(const MCAssembler &Asm,
759 const MCFixup &Fixup,
760 const MCValue &Target) {
761 const MCSymbolRefExpr *A = Target.getSymA();
762 const MCSymbol *Sym = A ? &A->getSymbol() : nullptr;
763 const unsigned FixupKind = Fixup.getKind() ;
764 if ((unsigned)Fixup.getKind() == ARM::fixup_arm_thumb_bl) {
765 assert(Sym && "How did we resolve this?");
767 // If the symbol is external the linker will handle it.
768 // FIXME: Should we handle it as an optimization?
770 // If the symbol is out of range, produce a relocation and hope the
771 // linker can handle it. GNU AS produces an error in this case.
772 if (Sym->isExternal())
773 return true;
775 // Create relocations for unconditional branches to function symbols with
776 // different execution mode in ELF binaries.
777 if (Sym && Sym->isELF()) {
778 unsigned Type = cast<MCSymbolELF>(Sym)->getType();
779 if ((Type == ELF::STT_FUNC || Type == ELF::STT_GNU_IFUNC)) {
780 if (Asm.isThumbFunc(Sym) && (FixupKind == ARM::fixup_arm_uncondbranch))
781 return true;
782 if (!Asm.isThumbFunc(Sym) && (FixupKind == ARM::fixup_arm_thumb_br ||
783 FixupKind == ARM::fixup_arm_thumb_bl ||
784 FixupKind == ARM::fixup_t2_condbranch ||
785 FixupKind == ARM::fixup_t2_uncondbranch))
786 return true;
789 // We must always generate a relocation for BL/BLX instructions if we have
790 // a symbol to reference, as the linker relies on knowing the destination
791 // symbol's thumb-ness to get interworking right.
792 if (A && (FixupKind == ARM::fixup_arm_thumb_blx ||
793 FixupKind == ARM::fixup_arm_blx ||
794 FixupKind == ARM::fixup_arm_uncondbl ||
795 FixupKind == ARM::fixup_arm_condbl))
796 return true;
797 return false;
800 /// getFixupKindNumBytes - The number of bytes the fixup may change.
801 static unsigned getFixupKindNumBytes(unsigned Kind) {
802 switch (Kind) {
803 default:
804 llvm_unreachable("Unknown fixup kind!");
806 case FK_Data_1:
807 case ARM::fixup_arm_thumb_bcc:
808 case ARM::fixup_arm_thumb_cp:
809 case ARM::fixup_thumb_adr_pcrel_10:
810 return 1;
812 case FK_Data_2:
813 case ARM::fixup_arm_thumb_br:
814 case ARM::fixup_arm_thumb_cb:
815 case ARM::fixup_arm_mod_imm:
816 return 2;
818 case ARM::fixup_arm_pcrel_10_unscaled:
819 case ARM::fixup_arm_ldst_pcrel_12:
820 case ARM::fixup_arm_pcrel_10:
821 case ARM::fixup_arm_pcrel_9:
822 case ARM::fixup_arm_adr_pcrel_12:
823 case ARM::fixup_arm_uncondbl:
824 case ARM::fixup_arm_condbl:
825 case ARM::fixup_arm_blx:
826 case ARM::fixup_arm_condbranch:
827 case ARM::fixup_arm_uncondbranch:
828 return 3;
830 case FK_Data_4:
831 case ARM::fixup_t2_ldst_pcrel_12:
832 case ARM::fixup_t2_condbranch:
833 case ARM::fixup_t2_uncondbranch:
834 case ARM::fixup_t2_pcrel_10:
835 case ARM::fixup_t2_pcrel_9:
836 case ARM::fixup_t2_adr_pcrel_12:
837 case ARM::fixup_arm_thumb_bl:
838 case ARM::fixup_arm_thumb_blx:
839 case ARM::fixup_arm_movt_hi16:
840 case ARM::fixup_arm_movw_lo16:
841 case ARM::fixup_t2_movt_hi16:
842 case ARM::fixup_t2_movw_lo16:
843 case ARM::fixup_t2_so_imm:
844 return 4;
846 case FK_SecRel_2:
847 return 2;
848 case FK_SecRel_4:
849 return 4;
853 /// getFixupKindContainerSizeBytes - The number of bytes of the
854 /// container involved in big endian.
855 static unsigned getFixupKindContainerSizeBytes(unsigned Kind) {
856 switch (Kind) {
857 default:
858 llvm_unreachable("Unknown fixup kind!");
860 case FK_Data_1:
861 return 1;
862 case FK_Data_2:
863 return 2;
864 case FK_Data_4:
865 return 4;
867 case ARM::fixup_arm_thumb_bcc:
868 case ARM::fixup_arm_thumb_cp:
869 case ARM::fixup_thumb_adr_pcrel_10:
870 case ARM::fixup_arm_thumb_br:
871 case ARM::fixup_arm_thumb_cb:
872 // Instruction size is 2 bytes.
873 return 2;
875 case ARM::fixup_arm_pcrel_10_unscaled:
876 case ARM::fixup_arm_ldst_pcrel_12:
877 case ARM::fixup_arm_pcrel_10:
878 case ARM::fixup_arm_adr_pcrel_12:
879 case ARM::fixup_arm_uncondbl:
880 case ARM::fixup_arm_condbl:
881 case ARM::fixup_arm_blx:
882 case ARM::fixup_arm_condbranch:
883 case ARM::fixup_arm_uncondbranch:
884 case ARM::fixup_t2_ldst_pcrel_12:
885 case ARM::fixup_t2_condbranch:
886 case ARM::fixup_t2_uncondbranch:
887 case ARM::fixup_t2_pcrel_10:
888 case ARM::fixup_t2_adr_pcrel_12:
889 case ARM::fixup_arm_thumb_bl:
890 case ARM::fixup_arm_thumb_blx:
891 case ARM::fixup_arm_movt_hi16:
892 case ARM::fixup_arm_movw_lo16:
893 case ARM::fixup_t2_movt_hi16:
894 case ARM::fixup_t2_movw_lo16:
895 case ARM::fixup_arm_mod_imm:
896 case ARM::fixup_t2_so_imm:
897 // Instruction size is 4 bytes.
898 return 4;
902 void ARMAsmBackend::applyFixup(const MCAssembler &Asm, const MCFixup &Fixup,
903 const MCValue &Target,
904 MutableArrayRef<char> Data, uint64_t Value,
905 bool IsResolved,
906 const MCSubtargetInfo* STI) const {
907 unsigned NumBytes = getFixupKindNumBytes(Fixup.getKind());
908 MCContext &Ctx = Asm.getContext();
909 Value = adjustFixupValue(Asm, Fixup, Target, Value, IsResolved, Ctx, STI);
910 if (!Value)
911 return; // Doesn't change encoding.
913 unsigned Offset = Fixup.getOffset();
914 assert(Offset + NumBytes <= Data.size() && "Invalid fixup offset!");
916 // Used to point to big endian bytes.
917 unsigned FullSizeBytes;
918 if (Endian == support::big) {
919 FullSizeBytes = getFixupKindContainerSizeBytes(Fixup.getKind());
920 assert((Offset + FullSizeBytes) <= Data.size() && "Invalid fixup size!");
921 assert(NumBytes <= FullSizeBytes && "Invalid fixup size!");
924 // For each byte of the fragment that the fixup touches, mask in the bits from
925 // the fixup value. The Value has been "split up" into the appropriate
926 // bitfields above.
927 for (unsigned i = 0; i != NumBytes; ++i) {
928 unsigned Idx = Endian == support::little ? i : (FullSizeBytes - 1 - i);
929 Data[Offset + Idx] |= uint8_t((Value >> (i * 8)) & 0xff);
933 namespace CU {
935 /// Compact unwind encoding values.
936 enum CompactUnwindEncodings {
937 UNWIND_ARM_MODE_MASK = 0x0F000000,
938 UNWIND_ARM_MODE_FRAME = 0x01000000,
939 UNWIND_ARM_MODE_FRAME_D = 0x02000000,
940 UNWIND_ARM_MODE_DWARF = 0x04000000,
942 UNWIND_ARM_FRAME_STACK_ADJUST_MASK = 0x00C00000,
944 UNWIND_ARM_FRAME_FIRST_PUSH_R4 = 0x00000001,
945 UNWIND_ARM_FRAME_FIRST_PUSH_R5 = 0x00000002,
946 UNWIND_ARM_FRAME_FIRST_PUSH_R6 = 0x00000004,
948 UNWIND_ARM_FRAME_SECOND_PUSH_R8 = 0x00000008,
949 UNWIND_ARM_FRAME_SECOND_PUSH_R9 = 0x00000010,
950 UNWIND_ARM_FRAME_SECOND_PUSH_R10 = 0x00000020,
951 UNWIND_ARM_FRAME_SECOND_PUSH_R11 = 0x00000040,
952 UNWIND_ARM_FRAME_SECOND_PUSH_R12 = 0x00000080,
954 UNWIND_ARM_FRAME_D_REG_COUNT_MASK = 0x00000F00,
956 UNWIND_ARM_DWARF_SECTION_OFFSET = 0x00FFFFFF
959 } // end CU namespace
961 /// Generate compact unwind encoding for the function based on the CFI
962 /// instructions. If the CFI instructions describe a frame that cannot be
963 /// encoded in compact unwind, the method returns UNWIND_ARM_MODE_DWARF which
964 /// tells the runtime to fallback and unwind using dwarf.
965 uint32_t ARMAsmBackendDarwin::generateCompactUnwindEncoding(
966 ArrayRef<MCCFIInstruction> Instrs) const {
967 DEBUG_WITH_TYPE("compact-unwind", llvm::dbgs() << "generateCU()\n");
968 // Only armv7k uses CFI based unwinding.
969 if (Subtype != MachO::CPU_SUBTYPE_ARM_V7K)
970 return 0;
971 // No .cfi directives means no frame.
972 if (Instrs.empty())
973 return 0;
974 // Start off assuming CFA is at SP+0.
975 int CFARegister = ARM::SP;
976 int CFARegisterOffset = 0;
977 // Mark savable registers as initially unsaved
978 DenseMap<unsigned, int> RegOffsets;
979 int FloatRegCount = 0;
980 // Process each .cfi directive and build up compact unwind info.
981 for (size_t i = 0, e = Instrs.size(); i != e; ++i) {
982 int Reg;
983 const MCCFIInstruction &Inst = Instrs[i];
984 switch (Inst.getOperation()) {
985 case MCCFIInstruction::OpDefCfa: // DW_CFA_def_cfa
986 CFARegisterOffset = -Inst.getOffset();
987 CFARegister = MRI.getLLVMRegNum(Inst.getRegister(), true);
988 break;
989 case MCCFIInstruction::OpDefCfaOffset: // DW_CFA_def_cfa_offset
990 CFARegisterOffset = -Inst.getOffset();
991 break;
992 case MCCFIInstruction::OpDefCfaRegister: // DW_CFA_def_cfa_register
993 CFARegister = MRI.getLLVMRegNum(Inst.getRegister(), true);
994 break;
995 case MCCFIInstruction::OpOffset: // DW_CFA_offset
996 Reg = MRI.getLLVMRegNum(Inst.getRegister(), true);
997 if (ARMMCRegisterClasses[ARM::GPRRegClassID].contains(Reg))
998 RegOffsets[Reg] = Inst.getOffset();
999 else if (ARMMCRegisterClasses[ARM::DPRRegClassID].contains(Reg)) {
1000 RegOffsets[Reg] = Inst.getOffset();
1001 ++FloatRegCount;
1002 } else {
1003 DEBUG_WITH_TYPE("compact-unwind",
1004 llvm::dbgs() << ".cfi_offset on unknown register="
1005 << Inst.getRegister() << "\n");
1006 return CU::UNWIND_ARM_MODE_DWARF;
1008 break;
1009 case MCCFIInstruction::OpRelOffset: // DW_CFA_advance_loc
1010 // Ignore
1011 break;
1012 default:
1013 // Directive not convertable to compact unwind, bail out.
1014 DEBUG_WITH_TYPE("compact-unwind",
1015 llvm::dbgs()
1016 << "CFI directive not compatiable with comact "
1017 "unwind encoding, opcode=" << Inst.getOperation()
1018 << "\n");
1019 return CU::UNWIND_ARM_MODE_DWARF;
1020 break;
1024 // If no frame set up, return no unwind info.
1025 if ((CFARegister == ARM::SP) && (CFARegisterOffset == 0))
1026 return 0;
1028 // Verify standard frame (lr/r7) was used.
1029 if (CFARegister != ARM::R7) {
1030 DEBUG_WITH_TYPE("compact-unwind", llvm::dbgs() << "frame register is "
1031 << CFARegister
1032 << " instead of r7\n");
1033 return CU::UNWIND_ARM_MODE_DWARF;
1035 int StackAdjust = CFARegisterOffset - 8;
1036 if (RegOffsets.lookup(ARM::LR) != (-4 - StackAdjust)) {
1037 DEBUG_WITH_TYPE("compact-unwind",
1038 llvm::dbgs()
1039 << "LR not saved as standard frame, StackAdjust="
1040 << StackAdjust
1041 << ", CFARegisterOffset=" << CFARegisterOffset
1042 << ", lr save at offset=" << RegOffsets[14] << "\n");
1043 return CU::UNWIND_ARM_MODE_DWARF;
1045 if (RegOffsets.lookup(ARM::R7) != (-8 - StackAdjust)) {
1046 DEBUG_WITH_TYPE("compact-unwind",
1047 llvm::dbgs() << "r7 not saved as standard frame\n");
1048 return CU::UNWIND_ARM_MODE_DWARF;
1050 uint32_t CompactUnwindEncoding = CU::UNWIND_ARM_MODE_FRAME;
1052 // If var-args are used, there may be a stack adjust required.
1053 switch (StackAdjust) {
1054 case 0:
1055 break;
1056 case 4:
1057 CompactUnwindEncoding |= 0x00400000;
1058 break;
1059 case 8:
1060 CompactUnwindEncoding |= 0x00800000;
1061 break;
1062 case 12:
1063 CompactUnwindEncoding |= 0x00C00000;
1064 break;
1065 default:
1066 DEBUG_WITH_TYPE("compact-unwind", llvm::dbgs()
1067 << ".cfi_def_cfa stack adjust ("
1068 << StackAdjust << ") out of range\n");
1069 return CU::UNWIND_ARM_MODE_DWARF;
1072 // If r6 is saved, it must be right below r7.
1073 static struct {
1074 unsigned Reg;
1075 unsigned Encoding;
1076 } GPRCSRegs[] = {{ARM::R6, CU::UNWIND_ARM_FRAME_FIRST_PUSH_R6},
1077 {ARM::R5, CU::UNWIND_ARM_FRAME_FIRST_PUSH_R5},
1078 {ARM::R4, CU::UNWIND_ARM_FRAME_FIRST_PUSH_R4},
1079 {ARM::R12, CU::UNWIND_ARM_FRAME_SECOND_PUSH_R12},
1080 {ARM::R11, CU::UNWIND_ARM_FRAME_SECOND_PUSH_R11},
1081 {ARM::R10, CU::UNWIND_ARM_FRAME_SECOND_PUSH_R10},
1082 {ARM::R9, CU::UNWIND_ARM_FRAME_SECOND_PUSH_R9},
1083 {ARM::R8, CU::UNWIND_ARM_FRAME_SECOND_PUSH_R8}};
1085 int CurOffset = -8 - StackAdjust;
1086 for (auto CSReg : GPRCSRegs) {
1087 auto Offset = RegOffsets.find(CSReg.Reg);
1088 if (Offset == RegOffsets.end())
1089 continue;
1091 int RegOffset = Offset->second;
1092 if (RegOffset != CurOffset - 4) {
1093 DEBUG_WITH_TYPE("compact-unwind",
1094 llvm::dbgs() << MRI.getName(CSReg.Reg) << " saved at "
1095 << RegOffset << " but only supported at "
1096 << CurOffset << "\n");
1097 return CU::UNWIND_ARM_MODE_DWARF;
1099 CompactUnwindEncoding |= CSReg.Encoding;
1100 CurOffset -= 4;
1103 // If no floats saved, we are done.
1104 if (FloatRegCount == 0)
1105 return CompactUnwindEncoding;
1107 // Switch mode to include D register saving.
1108 CompactUnwindEncoding &= ~CU::UNWIND_ARM_MODE_MASK;
1109 CompactUnwindEncoding |= CU::UNWIND_ARM_MODE_FRAME_D;
1111 // FIXME: supporting more than 4 saved D-registers compactly would be trivial,
1112 // but needs coordination with the linker and libunwind.
1113 if (FloatRegCount > 4) {
1114 DEBUG_WITH_TYPE("compact-unwind",
1115 llvm::dbgs() << "unsupported number of D registers saved ("
1116 << FloatRegCount << ")\n");
1117 return CU::UNWIND_ARM_MODE_DWARF;
1120 // Floating point registers must either be saved sequentially, or we defer to
1121 // DWARF. No gaps allowed here so check that each saved d-register is
1122 // precisely where it should be.
1123 static unsigned FPRCSRegs[] = { ARM::D8, ARM::D10, ARM::D12, ARM::D14 };
1124 for (int Idx = FloatRegCount - 1; Idx >= 0; --Idx) {
1125 auto Offset = RegOffsets.find(FPRCSRegs[Idx]);
1126 if (Offset == RegOffsets.end()) {
1127 DEBUG_WITH_TYPE("compact-unwind",
1128 llvm::dbgs() << FloatRegCount << " D-regs saved, but "
1129 << MRI.getName(FPRCSRegs[Idx])
1130 << " not saved\n");
1131 return CU::UNWIND_ARM_MODE_DWARF;
1132 } else if (Offset->second != CurOffset - 8) {
1133 DEBUG_WITH_TYPE("compact-unwind",
1134 llvm::dbgs() << FloatRegCount << " D-regs saved, but "
1135 << MRI.getName(FPRCSRegs[Idx])
1136 << " saved at " << Offset->second
1137 << ", expected at " << CurOffset - 8
1138 << "\n");
1139 return CU::UNWIND_ARM_MODE_DWARF;
1141 CurOffset -= 8;
1144 return CompactUnwindEncoding | ((FloatRegCount - 1) << 8);
1147 static MachO::CPUSubTypeARM getMachOSubTypeFromArch(StringRef Arch) {
1148 ARM::ArchKind AK = ARM::parseArch(Arch);
1149 switch (AK) {
1150 default:
1151 return MachO::CPU_SUBTYPE_ARM_V7;
1152 case ARM::ArchKind::ARMV4T:
1153 return MachO::CPU_SUBTYPE_ARM_V4T;
1154 case ARM::ArchKind::ARMV5T:
1155 case ARM::ArchKind::ARMV5TE:
1156 case ARM::ArchKind::ARMV5TEJ:
1157 return MachO::CPU_SUBTYPE_ARM_V5;
1158 case ARM::ArchKind::ARMV6:
1159 case ARM::ArchKind::ARMV6K:
1160 return MachO::CPU_SUBTYPE_ARM_V6;
1161 case ARM::ArchKind::ARMV7A:
1162 return MachO::CPU_SUBTYPE_ARM_V7;
1163 case ARM::ArchKind::ARMV7S:
1164 return MachO::CPU_SUBTYPE_ARM_V7S;
1165 case ARM::ArchKind::ARMV7K:
1166 return MachO::CPU_SUBTYPE_ARM_V7K;
1167 case ARM::ArchKind::ARMV6M:
1168 return MachO::CPU_SUBTYPE_ARM_V6M;
1169 case ARM::ArchKind::ARMV7M:
1170 return MachO::CPU_SUBTYPE_ARM_V7M;
1171 case ARM::ArchKind::ARMV7EM:
1172 return MachO::CPU_SUBTYPE_ARM_V7EM;
1176 static MCAsmBackend *createARMAsmBackend(const Target &T,
1177 const MCSubtargetInfo &STI,
1178 const MCRegisterInfo &MRI,
1179 const MCTargetOptions &Options,
1180 support::endianness Endian) {
1181 const Triple &TheTriple = STI.getTargetTriple();
1182 switch (TheTriple.getObjectFormat()) {
1183 default:
1184 llvm_unreachable("unsupported object format");
1185 case Triple::MachO: {
1186 MachO::CPUSubTypeARM CS = getMachOSubTypeFromArch(TheTriple.getArchName());
1187 return new ARMAsmBackendDarwin(T, STI, MRI, CS);
1189 case Triple::COFF:
1190 assert(TheTriple.isOSWindows() && "non-Windows ARM COFF is not supported");
1191 return new ARMAsmBackendWinCOFF(T, STI);
1192 case Triple::ELF:
1193 assert(TheTriple.isOSBinFormatELF() && "using ELF for non-ELF target");
1194 uint8_t OSABI = MCELFObjectTargetWriter::getOSABI(TheTriple.getOS());
1195 return new ARMAsmBackendELF(T, STI, OSABI, Endian);
1199 MCAsmBackend *llvm::createARMLEAsmBackend(const Target &T,
1200 const MCSubtargetInfo &STI,
1201 const MCRegisterInfo &MRI,
1202 const MCTargetOptions &Options) {
1203 return createARMAsmBackend(T, STI, MRI, Options, support::little);
1206 MCAsmBackend *llvm::createARMBEAsmBackend(const Target &T,
1207 const MCSubtargetInfo &STI,
1208 const MCRegisterInfo &MRI,
1209 const MCTargetOptions &Options) {
1210 return createARMAsmBackend(T, STI, MRI, Options, support::big);