1 //===- ARM64.cpp ----------------------------------------------------------===//
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7 //===----------------------------------------------------------------------===//
9 #include "Arch/ARM64Common.h"
10 #include "InputFiles.h"
12 #include "SyntheticSections.h"
15 #include "lld/Common/ErrorHandler.h"
16 #include "mach-o/compact_unwind_encoding.h"
17 #include "llvm/ADT/SmallVector.h"
18 #include "llvm/ADT/StringRef.h"
19 #include "llvm/BinaryFormat/MachO.h"
20 #include "llvm/Support/Endian.h"
21 #include "llvm/Support/MathExtras.h"
24 using namespace llvm::MachO
;
25 using namespace llvm::support::endian
;
27 using namespace lld::macho
;
31 struct ARM64
: ARM64Common
{
33 void writeStub(uint8_t *buf
, const Symbol
&) const override
;
34 void writeStubHelperHeader(uint8_t *buf
) const override
;
35 void writeStubHelperEntry(uint8_t *buf
, const Symbol
&,
36 uint64_t entryAddr
) const override
;
37 void populateThunk(InputSection
*thunk
, Symbol
*funcSym
) override
;
38 void applyOptimizationHints(uint8_t *, const ConcatInputSection
*,
39 ArrayRef
<uint64_t>) const override
;
44 // Random notes on reloc types:
45 // ADDEND always pairs with BRANCH26, PAGE21, or PAGEOFF12
46 // POINTER_TO_GOT: ld64 supports a 4-byte pc-relative form as well as an 8-byte
47 // absolute version of this relocation. The semantics of the absolute relocation
48 // are weird -- it results in the value of the GOT slot being written, instead
49 // of the address. Let's not support it unless we find a real-world use case.
50 static constexpr std::array
<RelocAttrs
, 11> relocAttrsArray
{{
51 #define B(x) RelocAttrBits::x
53 B(UNSIGNED
) | B(ABSOLUTE
) | B(EXTERN
) | B(LOCAL
) | B(BYTE4
) | B(BYTE8
)},
54 {"SUBTRACTOR", B(SUBTRAHEND
) | B(EXTERN
) | B(BYTE4
) | B(BYTE8
)},
55 {"BRANCH26", B(PCREL
) | B(EXTERN
) | B(BRANCH
) | B(BYTE4
)},
56 {"PAGE21", B(PCREL
) | B(EXTERN
) | B(BYTE4
)},
57 {"PAGEOFF12", B(ABSOLUTE
) | B(EXTERN
) | B(BYTE4
)},
58 {"GOT_LOAD_PAGE21", B(PCREL
) | B(EXTERN
) | B(GOT
) | B(BYTE4
)},
59 {"GOT_LOAD_PAGEOFF12",
60 B(ABSOLUTE
) | B(EXTERN
) | B(GOT
) | B(LOAD
) | B(BYTE4
)},
61 {"POINTER_TO_GOT", B(PCREL
) | B(EXTERN
) | B(GOT
) | B(POINTER
) | B(BYTE4
)},
62 {"TLVP_LOAD_PAGE21", B(PCREL
) | B(EXTERN
) | B(TLV
) | B(BYTE4
)},
63 {"TLVP_LOAD_PAGEOFF12",
64 B(ABSOLUTE
) | B(EXTERN
) | B(TLV
) | B(LOAD
) | B(BYTE4
)},
65 {"ADDEND", B(ADDEND
)},
69 static constexpr uint32_t stubCode
[] = {
70 0x90000010, // 00: adrp x16, __la_symbol_ptr@page
71 0xf9400210, // 04: ldr x16, [x16, __la_symbol_ptr@pageoff]
72 0xd61f0200, // 08: br x16
75 void ARM64::writeStub(uint8_t *buf8
, const Symbol
&sym
) const {
76 ::writeStub
<LP64
>(buf8
, stubCode
, sym
);
79 static constexpr uint32_t stubHelperHeaderCode
[] = {
80 0x90000011, // 00: adrp x17, _dyld_private@page
81 0x91000231, // 04: add x17, x17, _dyld_private@pageoff
82 0xa9bf47f0, // 08: stp x16/x17, [sp, #-16]!
83 0x90000010, // 0c: adrp x16, dyld_stub_binder@page
84 0xf9400210, // 10: ldr x16, [x16, dyld_stub_binder@pageoff]
85 0xd61f0200, // 14: br x16
88 void ARM64::writeStubHelperHeader(uint8_t *buf8
) const {
89 ::writeStubHelperHeader
<LP64
>(buf8
, stubHelperHeaderCode
);
92 static constexpr uint32_t stubHelperEntryCode
[] = {
93 0x18000050, // 00: ldr w16, l0
94 0x14000000, // 04: b stubHelperHeader
95 0x00000000, // 08: l0: .long 0
98 void ARM64::writeStubHelperEntry(uint8_t *buf8
, const Symbol
&sym
,
99 uint64_t entryVA
) const {
100 ::writeStubHelperEntry(buf8
, stubHelperEntryCode
, sym
, entryVA
);
103 // A thunk is the relaxed variation of stubCode. We don't need the
104 // extra indirection through a lazy pointer because the target address
105 // is known at link time.
106 static constexpr uint32_t thunkCode
[] = {
107 0x90000010, // 00: adrp x16, <thunk.ptr>@page
108 0x91000210, // 04: add x16, [x16,<thunk.ptr>@pageoff]
109 0xd61f0200, // 08: br x16
112 void ARM64::populateThunk(InputSection
*thunk
, Symbol
*funcSym
) {
114 thunk
->data
= {reinterpret_cast<const uint8_t *>(thunkCode
),
116 thunk
->relocs
.push_back({/*type=*/ARM64_RELOC_PAGEOFF12
,
117 /*pcrel=*/false, /*length=*/2,
118 /*offset=*/4, /*addend=*/0,
119 /*referent=*/funcSym
});
120 thunk
->relocs
.push_back({/*type=*/ARM64_RELOC_PAGE21
,
121 /*pcrel=*/true, /*length=*/2,
122 /*offset=*/0, /*addend=*/0,
123 /*referent=*/funcSym
});
126 ARM64::ARM64() : ARM64Common(LP64()) {
127 cpuType
= CPU_TYPE_ARM64
;
128 cpuSubtype
= CPU_SUBTYPE_ARM64_ALL
;
130 stubSize
= sizeof(stubCode
);
131 thunkSize
= sizeof(thunkCode
);
133 // Branch immediate is two's complement 26 bits, which is implicitly
134 // multiplied by 4 (since all functions are 4-aligned: The branch range
135 // is -4*(2**(26-1))..4*(2**(26-1) - 1).
136 backwardBranchRange
= 128 * 1024 * 1024;
137 forwardBranchRange
= backwardBranchRange
- 4;
139 modeDwarfEncoding
= UNWIND_ARM64_MODE_DWARF
;
140 subtractorRelocType
= ARM64_RELOC_SUBTRACTOR
;
141 unsignedRelocType
= ARM64_RELOC_UNSIGNED
;
143 stubHelperHeaderSize
= sizeof(stubHelperHeaderCode
);
144 stubHelperEntrySize
= sizeof(stubHelperEntryCode
);
146 relocAttrs
= {relocAttrsArray
.data(), relocAttrsArray
.size()};
151 uint32_t destRegister
;
155 uint8_t destRegister
;
160 enum ExtendType
{ ZeroExtend
= 1, Sign64
= 2, Sign32
= 3 };
163 uint8_t destRegister
;
164 uint8_t baseRegister
;
167 ExtendType extendType
;
171 struct PerformedReloc
{
176 class OptimizationHintContext
{
178 OptimizationHintContext(uint8_t *buf
, const ConcatInputSection
*isec
,
179 ArrayRef
<uint64_t> relocTargets
)
180 : buf(buf
), isec(isec
), relocTargets(relocTargets
),
181 relocIt(isec
->relocs
.rbegin()) {}
183 void applyAdrpAdd(const OptimizationHint
&);
184 void applyAdrpAdrp(const OptimizationHint
&);
185 void applyAdrpLdr(const OptimizationHint
&);
186 void applyAdrpLdrGot(const OptimizationHint
&);
187 void applyAdrpAddLdr(const OptimizationHint
&);
188 void applyAdrpLdrGotLdr(const OptimizationHint
&);
192 const ConcatInputSection
*isec
;
193 ArrayRef
<uint64_t> relocTargets
;
194 std::vector
<Reloc
>::const_reverse_iterator relocIt
;
196 uint64_t getRelocTarget(const Reloc
&);
198 Optional
<PerformedReloc
> findPrimaryReloc(uint64_t offset
);
199 Optional
<PerformedReloc
> findReloc(uint64_t offset
);
203 static bool parseAdrp(uint32_t insn
, Adrp
&adrp
) {
204 if ((insn
& 0x9f000000) != 0x90000000)
206 adrp
.destRegister
= insn
& 0x1f;
210 static bool parseAdd(uint32_t insn
, Add
&add
) {
211 if ((insn
& 0xffc00000) != 0x91000000)
213 add
.destRegister
= insn
& 0x1f;
214 add
.srcRegister
= (insn
>> 5) & 0x1f;
215 add
.addend
= (insn
>> 10) & 0xfff;
219 static bool parseLdr(uint32_t insn
, Ldr
&ldr
) {
220 ldr
.destRegister
= insn
& 0x1f;
221 ldr
.baseRegister
= (insn
>> 5) & 0x1f;
222 uint8_t size
= insn
>> 30;
223 uint8_t opc
= (insn
>> 22) & 3;
225 if ((insn
& 0x3fc00000) == 0x39400000) {
226 // LDR (immediate), LDRB (immediate), LDRH (immediate)
228 ldr
.extendType
= ZeroExtend
;
230 } else if ((insn
& 0x3f800000) == 0x39800000) {
231 // LDRSB (immediate), LDRSH (immediate), LDRSW (immediate)
233 ldr
.extendType
= static_cast<ExtendType
>(opc
);
235 } else if ((insn
& 0x3f400000) == 0x3d400000) {
236 // LDR (immediate, SIMD&FP)
237 ldr
.extendType
= ZeroExtend
;
241 else if (size
== 0 && opc
== 3)
248 ldr
.offset
= ((insn
>> 10) & 0xfff) << ldr
.p2Size
;
252 static bool isValidAdrOffset(int32_t delta
) { return isInt
<21>(delta
); }
254 static void writeAdr(void *loc
, uint32_t dest
, int32_t delta
) {
255 assert(isValidAdrOffset(delta
));
256 uint32_t opcode
= 0x10000000;
257 uint32_t immHi
= (delta
& 0x001ffffc) << 3;
258 uint32_t immLo
= (delta
& 0x00000003) << 29;
259 write32le(loc
, opcode
| immHi
| immLo
| dest
);
262 static void writeNop(void *loc
) { write32le(loc
, 0xd503201f); }
264 static bool isLiteralLdrEligible(const Ldr
&ldr
) {
265 return ldr
.p2Size
> 1 && isShiftedInt
<19, 2>(ldr
.offset
);
268 static void writeLiteralLdr(void *loc
, const Ldr
&ldr
) {
269 assert(isLiteralLdrEligible(ldr
));
270 uint32_t imm19
= (ldr
.offset
/ 4 & maskTrailingOnes
<uint32_t>(19)) << 5;
272 switch (ldr
.p2Size
) {
277 opcode
= ldr
.extendType
== Sign64
? 0x98000000 : 0x18000000;
280 opcode
= ldr
.isFloat
? 0x5c000000 : 0x58000000;
286 llvm_unreachable("Invalid literal ldr size");
288 write32le(loc
, opcode
| imm19
| ldr
.destRegister
);
291 static bool isImmediateLdrEligible(const Ldr
&ldr
) {
292 // Note: We deviate from ld64's behavior, which converts to immediate loads
293 // only if ldr.offset < 4096, even though the offset is divided by the load's
294 // size in the 12-bit immediate operand. Only the unsigned offset variant is
297 uint32_t size
= 1 << ldr
.p2Size
;
298 return ldr
.offset
>= 0 && (ldr
.offset
% size
) == 0 &&
299 isUInt
<12>(ldr
.offset
>> ldr
.p2Size
);
302 static void writeImmediateLdr(void *loc
, const Ldr
&ldr
) {
303 assert(isImmediateLdrEligible(ldr
));
304 uint32_t opcode
= 0x39000000;
306 opcode
|= 0x04000000;
307 assert(ldr
.extendType
== ZeroExtend
);
309 opcode
|= ldr
.destRegister
;
310 opcode
|= ldr
.baseRegister
<< 5;
312 if (ldr
.p2Size
== 4) {
316 opc
= ldr
.extendType
;
319 uint32_t immBits
= ldr
.offset
>> ldr
.p2Size
;
320 write32le(loc
, opcode
| (immBits
<< 10) | (opc
<< 22) | (size
<< 30));
323 uint64_t OptimizationHintContext::getRelocTarget(const Reloc
&reloc
) {
324 size_t relocIdx
= &reloc
- isec
->relocs
.data();
325 return relocTargets
[relocIdx
];
328 // Optimization hints are sorted in a monotonically increasing order by their
329 // first address as are relocations (albeit in decreasing order), so if we keep
330 // a pointer around to the last found relocation, we don't have to do a full
331 // binary search every time.
332 Optional
<PerformedReloc
>
333 OptimizationHintContext::findPrimaryReloc(uint64_t offset
) {
334 const auto end
= isec
->relocs
.rend();
335 while (relocIt
!= end
&& relocIt
->offset
< offset
)
337 if (relocIt
== end
|| relocIt
->offset
!= offset
)
339 return PerformedReloc
{*relocIt
, getRelocTarget(*relocIt
)};
342 // The second and third addresses of optimization hints have no such
343 // monotonicity as the first, so we search the entire range of relocations.
344 Optional
<PerformedReloc
> OptimizationHintContext::findReloc(uint64_t offset
) {
345 // Optimization hints often apply to successive relocations, so we check for
346 // that first before doing a full binary search.
347 auto end
= isec
->relocs
.rend();
348 if (relocIt
< end
- 1 && (relocIt
+ 1)->offset
== offset
)
349 return PerformedReloc
{*(relocIt
+ 1), getRelocTarget(*(relocIt
+ 1))};
351 auto reloc
= lower_bound(isec
->relocs
, offset
,
352 [](const Reloc
&reloc
, uint64_t offset
) {
353 return offset
< reloc
.offset
;
356 if (reloc
== isec
->relocs
.end() || reloc
->offset
!= offset
)
358 return PerformedReloc
{*reloc
, getRelocTarget(*reloc
)};
361 // Transforms a pair of adrp+add instructions into an adr instruction if the
362 // target is within the +/- 1 MiB range allowed by the adr's 21 bit signed
365 // adrp xN, _foo@PAGE
366 // add xM, xN, _foo@PAGEOFF
370 void OptimizationHintContext::applyAdrpAdd(const OptimizationHint
&hint
) {
371 uint32_t ins1
= read32le(buf
+ hint
.offset0
);
372 uint32_t ins2
= read32le(buf
+ hint
.offset0
+ hint
.delta
[0]);
374 if (!parseAdrp(ins1
, adrp
))
377 if (!parseAdd(ins2
, add
))
379 if (adrp
.destRegister
!= add
.srcRegister
)
382 Optional
<PerformedReloc
> rel1
= findPrimaryReloc(hint
.offset0
);
383 Optional
<PerformedReloc
> rel2
= findReloc(hint
.offset0
+ hint
.delta
[0]);
386 if (rel1
->referentVA
!= rel2
->referentVA
)
388 int64_t delta
= rel1
->referentVA
- rel1
->rel
.offset
- isec
->getVA();
389 if (!isValidAdrOffset(delta
))
392 writeAdr(buf
+ hint
.offset0
, add
.destRegister
, delta
);
393 writeNop(buf
+ hint
.offset0
+ hint
.delta
[0]);
396 // Transforms two adrp instructions into a single adrp if their referent
397 // addresses are located on the same 4096 byte page.
399 // adrp xN, _foo@PAGE
400 // adrp xN, _bar@PAGE
402 // adrp xN, _foo@PAGE
404 void OptimizationHintContext::applyAdrpAdrp(const OptimizationHint
&hint
) {
405 uint32_t ins1
= read32le(buf
+ hint
.offset0
);
406 uint32_t ins2
= read32le(buf
+ hint
.offset0
+ hint
.delta
[0]);
408 if (!parseAdrp(ins1
, adrp1
) || !parseAdrp(ins2
, adrp2
))
410 if (adrp1
.destRegister
!= adrp2
.destRegister
)
413 Optional
<PerformedReloc
> rel1
= findPrimaryReloc(hint
.offset0
);
414 Optional
<PerformedReloc
> rel2
= findReloc(hint
.offset0
+ hint
.delta
[0]);
417 if ((rel1
->referentVA
& ~0xfffULL
) != (rel2
->referentVA
& ~0xfffULL
))
420 writeNop(buf
+ hint
.offset0
+ hint
.delta
[0]);
423 // Transforms a pair of adrp+ldr (immediate) instructions into an ldr (literal)
424 // load from a PC-relative address if it is 4-byte aligned and within +/- 1 MiB,
425 // as ldr can encode a signed 19-bit offset that gets multiplied by 4.
427 // adrp xN, _foo@PAGE
428 // ldr xM, [xN, _foo@PAGEOFF]
432 void OptimizationHintContext::applyAdrpLdr(const OptimizationHint
&hint
) {
433 uint32_t ins1
= read32le(buf
+ hint
.offset0
);
434 uint32_t ins2
= read32le(buf
+ hint
.offset0
+ hint
.delta
[0]);
436 if (!parseAdrp(ins1
, adrp
))
439 if (!parseLdr(ins2
, ldr
))
441 if (adrp
.destRegister
!= ldr
.baseRegister
)
444 Optional
<PerformedReloc
> rel1
= findPrimaryReloc(hint
.offset0
);
445 Optional
<PerformedReloc
> rel2
= findReloc(hint
.offset0
+ hint
.delta
[0]);
448 if (ldr
.offset
!= static_cast<int64_t>(rel1
->referentVA
& 0xfff))
450 ldr
.offset
= rel1
->referentVA
- rel2
->rel
.offset
- isec
->getVA();
451 if (!isLiteralLdrEligible(ldr
))
454 writeNop(buf
+ hint
.offset0
);
455 writeLiteralLdr(buf
+ hint
.offset0
+ hint
.delta
[0], ldr
);
458 // GOT loads are emitted by the compiler as a pair of adrp and ldr instructions,
459 // but they may be changed to adrp+add by relaxGotLoad(). This hint performs
460 // the AdrpLdr or AdrpAdd transformation depending on whether it was relaxed.
461 void OptimizationHintContext::applyAdrpLdrGot(const OptimizationHint
&hint
) {
462 uint32_t ins2
= read32le(buf
+ hint
.offset0
+ hint
.delta
[0]);
465 if (parseAdd(ins2
, add
))
467 else if (parseLdr(ins2
, ldr
))
471 // Optimizes an adrp+add+ldr sequence used for loading from a local symbol's
472 // address by loading directly if it's close enough, or to an adrp(p)+ldr
473 // sequence if it's not.
475 // adrp x0, _foo@PAGE
476 // add x1, x0, _foo@PAGEOFF
477 // ldr x2, [x1, #off]
478 void OptimizationHintContext::applyAdrpAddLdr(const OptimizationHint
&hint
) {
479 uint32_t ins1
= read32le(buf
+ hint
.offset0
);
481 if (!parseAdrp(ins1
, adrp
))
483 uint32_t ins2
= read32le(buf
+ hint
.offset0
+ hint
.delta
[0]);
485 if (!parseAdd(ins2
, add
))
487 uint32_t ins3
= read32le(buf
+ hint
.offset0
+ hint
.delta
[1]);
489 if (!parseLdr(ins3
, ldr
))
492 Optional
<PerformedReloc
> rel1
= findPrimaryReloc(hint
.offset0
);
493 Optional
<PerformedReloc
> rel2
= findReloc(hint
.offset0
+ hint
.delta
[0]);
497 if (adrp
.destRegister
!= add
.srcRegister
)
499 if (add
.destRegister
!= ldr
.baseRegister
)
502 // Load from the target address directly.
505 // ldr x2, [_foo + #off]
506 uint64_t rel3VA
= hint
.offset0
+ hint
.delta
[1] + isec
->getVA();
507 Ldr literalLdr
= ldr
;
508 literalLdr
.offset
+= rel1
->referentVA
- rel3VA
;
509 if (isLiteralLdrEligible(literalLdr
)) {
510 writeNop(buf
+ hint
.offset0
);
511 writeNop(buf
+ hint
.offset0
+ hint
.delta
[0]);
512 writeLiteralLdr(buf
+ hint
.offset0
+ hint
.delta
[1], literalLdr
);
516 // Load the target address into a register and load from there indirectly.
519 // ldr x2, [x1, #off]
520 int64_t adrOffset
= rel1
->referentVA
- rel1
->rel
.offset
- isec
->getVA();
521 if (isValidAdrOffset(adrOffset
)) {
522 writeAdr(buf
+ hint
.offset0
, ldr
.baseRegister
, adrOffset
);
523 // Note: ld64 moves the offset into the adr instruction for AdrpAddLdr, but
524 // not for AdrpLdrGotLdr. Its effect is the same either way.
525 writeNop(buf
+ hint
.offset0
+ hint
.delta
[0]);
529 // Move the target's page offset into the ldr's immediate offset.
530 // adrp x0, _foo@PAGE
532 // ldr x2, [x0, _foo@PAGEOFF + #off]
533 Ldr immediateLdr
= ldr
;
534 immediateLdr
.baseRegister
= adrp
.destRegister
;
535 immediateLdr
.offset
+= add
.addend
;
536 if (isImmediateLdrEligible(immediateLdr
)) {
537 writeNop(buf
+ hint
.offset0
+ hint
.delta
[0]);
538 writeImmediateLdr(buf
+ hint
.offset0
+ hint
.delta
[1], immediateLdr
);
543 // Relaxes a GOT-indirect load.
544 // If the referenced symbol is external and its GOT entry is within +/- 1 MiB,
545 // the GOT entry can be loaded with a single literal ldr instruction.
546 // If the referenced symbol is local and thus has been relaxed to adrp+add+ldr,
547 // we perform the AdrpAddLdr transformation.
548 void OptimizationHintContext::applyAdrpLdrGotLdr(const OptimizationHint
&hint
) {
549 uint32_t ins2
= read32le(buf
+ hint
.offset0
+ hint
.delta
[0]);
553 if (parseAdd(ins2
, add
)) {
554 applyAdrpAddLdr(hint
);
555 } else if (parseLdr(ins2
, ldr2
)) {
556 // adrp x1, _foo@GOTPAGE
557 // ldr x2, [x1, _foo@GOTPAGEOFF]
558 // ldr x3, [x2, #off]
560 uint32_t ins1
= read32le(buf
+ hint
.offset0
);
562 if (!parseAdrp(ins1
, adrp
))
564 uint32_t ins3
= read32le(buf
+ hint
.offset0
+ hint
.delta
[1]);
566 if (!parseLdr(ins3
, ldr3
))
569 Optional
<PerformedReloc
> rel1
= findPrimaryReloc(hint
.offset0
);
570 Optional
<PerformedReloc
> rel2
= findReloc(hint
.offset0
+ hint
.delta
[0]);
574 if (ldr2
.baseRegister
!= adrp
.destRegister
)
576 if (ldr3
.baseRegister
!= ldr2
.destRegister
)
578 // Loads from the GOT must be pointer sized.
579 if (ldr2
.p2Size
!= 3 || ldr2
.isFloat
)
582 // Load the GOT entry's address directly.
584 // ldr x2, _foo@GOTPAGE + _foo@GOTPAGEOFF
585 // ldr x3, [x2, #off]
586 Ldr literalLdr
= ldr2
;
587 literalLdr
.offset
= rel1
->referentVA
- rel2
->rel
.offset
- isec
->getVA();
588 if (isLiteralLdrEligible(literalLdr
)) {
589 writeNop(buf
+ hint
.offset0
);
590 writeLiteralLdr(buf
+ hint
.offset0
+ hint
.delta
[0], literalLdr
);
595 void ARM64::applyOptimizationHints(uint8_t *buf
, const ConcatInputSection
*isec
,
596 ArrayRef
<uint64_t> relocTargets
) const {
598 assert(relocTargets
.size() == isec
->relocs
.size());
600 // Note: Some of these optimizations might not be valid when shared regions
601 // are in use. Will need to revisit this if splitSegInfo is added.
603 OptimizationHintContext
ctx1(buf
, isec
, relocTargets
);
604 for (const OptimizationHint
&hint
: isec
->optimizationHints
) {
606 case LOH_ARM64_ADRP_ADRP
:
607 // This is done in another pass because the other optimization hints
608 // might cause its targets to be turned into NOPs.
610 case LOH_ARM64_ADRP_LDR
:
611 ctx1
.applyAdrpLdr(hint
);
613 case LOH_ARM64_ADRP_ADD_LDR
:
614 ctx1
.applyAdrpAddLdr(hint
);
616 case LOH_ARM64_ADRP_LDR_GOT_LDR
:
617 ctx1
.applyAdrpLdrGotLdr(hint
);
619 case LOH_ARM64_ADRP_ADD_STR
:
620 case LOH_ARM64_ADRP_LDR_GOT_STR
:
621 // TODO: Implement these
623 case LOH_ARM64_ADRP_ADD
:
624 ctx1
.applyAdrpAdd(hint
);
626 case LOH_ARM64_ADRP_LDR_GOT
:
627 ctx1
.applyAdrpLdrGot(hint
);
632 OptimizationHintContext
ctx2(buf
, isec
, relocTargets
);
633 for (const OptimizationHint
&hint
: isec
->optimizationHints
)
634 if (hint
.type
== LOH_ARM64_ADRP_ADRP
)
635 ctx2
.applyAdrpAdrp(hint
);
638 TargetInfo
*macho::createARM64TargetInfo() {