1 //===- UnwindInfoSection.cpp ----------------------------------------------===//
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7 //===----------------------------------------------------------------------===//
9 #include "UnwindInfoSection.h"
10 #include "ConcatOutputSection.h"
12 #include "InputSection.h"
13 #include "OutputSection.h"
14 #include "OutputSegment.h"
15 #include "SymbolTable.h"
17 #include "SyntheticSections.h"
20 #include "lld/Common/ErrorHandler.h"
21 #include "lld/Common/Memory.h"
22 #include "llvm/ADT/STLExtras.h"
23 #include "llvm/ADT/SmallVector.h"
24 #include "llvm/BinaryFormat/MachO.h"
27 using namespace llvm::MachO
;
29 using namespace lld::macho
;
31 #define COMMON_ENCODINGS_MAX 127
32 #define COMPACT_ENCODINGS_MAX 256
34 #define SECOND_LEVEL_PAGE_BYTES 4096
35 #define SECOND_LEVEL_PAGE_WORDS (SECOND_LEVEL_PAGE_BYTES / sizeof(uint32_t))
36 #define REGULAR_SECOND_LEVEL_ENTRIES_MAX \
37 ((SECOND_LEVEL_PAGE_BYTES - \
38 sizeof(unwind_info_regular_second_level_page_header)) / \
39 sizeof(unwind_info_regular_second_level_entry))
40 #define COMPRESSED_SECOND_LEVEL_ENTRIES_MAX \
41 ((SECOND_LEVEL_PAGE_BYTES - \
42 sizeof(unwind_info_compressed_second_level_page_header)) / \
45 #define COMPRESSED_ENTRY_FUNC_OFFSET_BITS 24
46 #define COMPRESSED_ENTRY_FUNC_OFFSET_MASK \
47 UNWIND_INFO_COMPRESSED_ENTRY_FUNC_OFFSET(~0)
49 // Compact Unwind format is a Mach-O evolution of DWARF Unwind that
50 // optimizes space and exception-time lookup. Most DWARF unwind
51 // entries can be replaced with Compact Unwind entries, but the ones
52 // that cannot are retained in DWARF form.
54 // This comment will address macro-level organization of the pre-link
55 // and post-link compact unwind tables. For micro-level organization
56 // pertaining to the bitfield layout of the 32-bit compact unwind
57 // entries, see libunwind/include/mach-o/compact_unwind_encoding.h
59 // Important clarifying factoids:
61 // * __LD,__compact_unwind is the compact unwind format for compiler
62 // output and linker input. It is never a final output. It could be
63 // an intermediate output with the `-r` option which retains relocs.
65 // * __TEXT,__unwind_info is the compact unwind format for final
66 // linker output. It is never an input.
68 // * __TEXT,__eh_frame is the DWARF format for both linker input and output.
70 // * __TEXT,__unwind_info entries are divided into 4 KiB pages (2nd
71 // level) by ascending address, and the pages are referenced by an
72 // index (1st level) in the section header.
74 // * Following the headers in __TEXT,__unwind_info, the bulk of the
75 // section contains a vector of compact unwind entries
76 // `{functionOffset, encoding}` sorted by ascending `functionOffset`.
77 // Adjacent entries with the same encoding can be folded to great
78 // advantage, achieving a 3-order-of-magnitude reduction in the
81 // * The __TEXT,__unwind_info format can accommodate up to 127 unique
82 // encodings for the space-efficient compressed format. In practice,
83 // fewer than a dozen unique encodings are used by C++ programs of
84 // all sizes. Therefore, we don't even bother implementing the regular
85 // non-compressed format. Time will tell if anyone in the field ever
86 // overflows the 127-encodings limit.
88 // Refer to the definition of unwind_info_section_header in
89 // compact_unwind_encoding.h for an overview of the format we are encoding
92 // TODO(gkm): prune __eh_frame entries superseded by __unwind_info, PR50410
93 // TODO(gkm): how do we align the 2nd-level pages?
95 using EncodingMap
= DenseMap
<compact_unwind_encoding_t
, size_t>;
97 struct SecondLevelPage
{
102 std::vector
<compact_unwind_encoding_t
> localEncodings
;
103 EncodingMap localEncodingIndexes
;
107 class UnwindInfoSectionImpl final
: public UnwindInfoSection
{
109 void prepareRelocations(ConcatInputSection
*) override
;
110 void addInput(ConcatInputSection
*) override
;
111 void finalize() override
;
112 void writeTo(uint8_t *buf
) const override
;
115 std::vector
<std::pair
<compact_unwind_encoding_t
, size_t>> commonEncodings
;
116 EncodingMap commonEncodingIndexes
;
117 // Indices of personality functions within the GOT.
118 std::vector
<Ptr
> personalities
;
119 SmallDenseMap
<std::pair
<InputSection
*, uint64_t /* addend */>, Symbol
*>
121 std::vector
<unwind_info_section_header_lsda_index_entry
> lsdaEntries
;
122 // Map of function offset (from the image base) to an index within the LSDA
124 DenseMap
<uint32_t, uint32_t> functionToLsdaIndex
;
125 std::vector
<CompactUnwindEntry
<Ptr
>> cuVector
;
126 std::vector
<CompactUnwindEntry
<Ptr
> *> cuPtrVector
;
127 std::vector
<SecondLevelPage
> secondLevelPages
;
128 uint64_t level2PagesOffset
= 0;
131 UnwindInfoSection::UnwindInfoSection()
132 : SyntheticSection(segment_names::text
, section_names::unwindInfo
) {
134 compactUnwindSection
=
135 make
<ConcatOutputSection
>(section_names::compactUnwind
);
138 void UnwindInfoSection::prepareRelocations() {
139 for (ConcatInputSection
*isec
: compactUnwindSection
->inputs
)
140 prepareRelocations(isec
);
144 void UnwindInfoSectionImpl
<Ptr
>::addInput(ConcatInputSection
*isec
) {
145 assert(isec
->getSegName() == segment_names::ld
&&
146 isec
->getName() == section_names::compactUnwind
);
147 isec
->parent
= compactUnwindSection
;
148 compactUnwindSection
->addInput(isec
);
151 // Compact unwind relocations have different semantics, so we handle them in a
152 // separate code path from regular relocations. First, we do not wish to add
153 // rebase opcodes for __LD,__compact_unwind, because that section doesn't
154 // actually end up in the final binary. Second, personality pointers always
155 // reside in the GOT and must be treated specially.
157 void UnwindInfoSectionImpl
<Ptr
>::prepareRelocations(ConcatInputSection
*isec
) {
158 assert(!isec
->shouldOmitFromOutput() &&
159 "__compact_unwind section should not be omitted");
161 // FIXME: Make this skip relocations for CompactUnwindEntries that
162 // point to dead-stripped functions. That might save some amount of
163 // work. But since there are usually just few personality functions
164 // that are referenced from many places, at least some of them likely
165 // live, it wouldn't reduce number of got entries.
166 for (size_t i
= 0; i
< isec
->relocs
.size(); ++i
) {
167 Reloc
&r
= isec
->relocs
[i
];
168 assert(target
->hasAttr(r
.type
, RelocAttrBits::UNSIGNED
));
170 if (r
.offset
% sizeof(CompactUnwindEntry
<Ptr
>) == 0) {
171 InputSection
*referentIsec
;
172 if (auto *isec
= r
.referent
.dyn_cast
<InputSection
*>())
175 referentIsec
= cast
<Defined
>(r
.referent
.dyn_cast
<Symbol
*>())->isec
;
177 if (!cast
<ConcatInputSection
>(referentIsec
)->shouldOmitFromOutput())
178 allEntriesAreOmitted
= false;
182 if (r
.offset
% sizeof(CompactUnwindEntry
<Ptr
>) !=
183 offsetof(CompactUnwindEntry
<Ptr
>, personality
))
186 if (auto *s
= r
.referent
.dyn_cast
<Symbol
*>()) {
187 if (auto *undefined
= dyn_cast
<Undefined
>(s
)) {
188 treatUndefinedSymbol(*undefined
);
189 // treatUndefinedSymbol() can replace s with a DylibSymbol; re-check.
190 if (isa
<Undefined
>(s
))
193 if (auto *defined
= dyn_cast
<Defined
>(s
)) {
194 // Check if we have created a synthetic symbol at the same address.
195 Symbol
*&personality
=
196 personalityTable
[{defined
->isec
, defined
->value
}];
197 if (personality
== nullptr) {
198 personality
= defined
;
199 in
.got
->addEntry(defined
);
200 } else if (personality
!= defined
) {
201 r
.referent
= personality
;
205 assert(isa
<DylibSymbol
>(s
));
210 if (auto *referentIsec
= r
.referent
.dyn_cast
<InputSection
*>()) {
211 assert(!isCoalescedWeak(referentIsec
));
212 // Personality functions can be referenced via section relocations
213 // if they live in the same object file. Create placeholder synthetic
214 // symbols for them in the GOT.
215 Symbol
*&s
= personalityTable
[{referentIsec
, r
.addend
}];
217 // This runs after dead stripping, so the noDeadStrip argument does not
219 s
= make
<Defined
>("<internal>", /*file=*/nullptr, referentIsec
,
220 r
.addend
, /*size=*/0, /*isWeakDef=*/false,
221 /*isExternal=*/false, /*isPrivateExtern=*/false,
222 /*isThumb=*/false, /*isReferencedDynamically=*/false,
223 /*noDeadStrip=*/false);
232 // Unwind info lives in __DATA, and finalization of __TEXT will occur before
233 // finalization of __DATA. Moreover, the finalization of unwind info depends on
234 // the exact addresses that it references. So it is safe for compact unwind to
235 // reference addresses in __TEXT, but not addresses in any other segment.
236 static ConcatInputSection
*checkTextSegment(InputSection
*isec
) {
237 if (isec
->getSegName() != segment_names::text
)
238 error("compact unwind references address in " + toString(isec
) +
239 " which is not in segment __TEXT");
240 // __text should always be a ConcatInputSection.
241 return cast
<ConcatInputSection
>(isec
);
245 constexpr Ptr TombstoneValue
= std::numeric_limits
<Ptr
>::max();
247 // We need to apply the relocations to the pre-link compact unwind section
248 // before converting it to post-link form. There should only be absolute
249 // relocations here: since we are not emitting the pre-link CU section, there
250 // is no source address to make a relative location meaningful.
253 relocateCompactUnwind(ConcatOutputSection
*compactUnwindSection
,
254 std::vector
<CompactUnwindEntry
<Ptr
>> &cuVector
) {
255 for (const ConcatInputSection
*isec
: compactUnwindSection
->inputs
) {
256 assert(isec
->parent
== compactUnwindSection
);
259 reinterpret_cast<uint8_t *>(cuVector
.data()) + isec
->outSecOff
;
260 memcpy(buf
, isec
->data
.data(), isec
->data
.size());
262 for (const Reloc
&r
: isec
->relocs
) {
263 uint64_t referentVA
= TombstoneValue
<Ptr
>;
264 if (auto *referentSym
= r
.referent
.dyn_cast
<Symbol
*>()) {
265 if (!isa
<Undefined
>(referentSym
)) {
266 if (auto *defined
= dyn_cast
<Defined
>(referentSym
))
267 checkTextSegment(defined
->isec
);
268 // At this point in the link, we may not yet know the final address of
269 // the GOT, so we just encode the index. We make it a 1-based index so
270 // that we can distinguish the null pointer case.
271 referentVA
= referentSym
->gotIndex
+ 1;
274 auto *referentIsec
= r
.referent
.get
<InputSection
*>();
275 ConcatInputSection
*concatIsec
= checkTextSegment(referentIsec
);
276 if (!concatIsec
->shouldOmitFromOutput())
277 referentVA
= referentIsec
->getVA(r
.addend
);
279 writeAddress(buf
+ r
.offset
, referentVA
, r
.length
);
284 // There should only be a handful of unique personality pointers, so we can
285 // encode them as 2-bit indices into a small array.
288 encodePersonalities(const std::vector
<CompactUnwindEntry
<Ptr
> *> &cuPtrVector
,
289 std::vector
<Ptr
> &personalities
) {
290 for (CompactUnwindEntry
<Ptr
> *cu
: cuPtrVector
) {
291 if (cu
->personality
== 0)
293 // Linear search is fast enough for a small array.
294 auto it
= find(personalities
, cu
->personality
);
295 uint32_t personalityIndex
; // 1-based index
296 if (it
!= personalities
.end()) {
297 personalityIndex
= std::distance(personalities
.begin(), it
) + 1;
299 personalities
.push_back(cu
->personality
);
300 personalityIndex
= personalities
.size();
303 personalityIndex
<< countTrailingZeros(
304 static_cast<compact_unwind_encoding_t
>(UNWIND_PERSONALITY_MASK
));
306 if (personalities
.size() > 3)
307 error("too many personalities (" + std::to_string(personalities
.size()) +
308 ") for compact unwind to encode");
311 // __unwind_info stores unwind data for address ranges. If several
312 // adjacent functions have the same unwind encoding, LSDA, and personality
313 // function, they share one unwind entry. For this to work, functions without
314 // unwind info need explicit "no unwind info" unwind entries -- else the
315 // unwinder would think they have the unwind info of the closest function
316 // with unwind info right before in the image.
318 static void addEntriesForFunctionsWithoutUnwindInfo(
319 std::vector
<CompactUnwindEntry
<Ptr
>> &cuVector
) {
320 DenseSet
<Ptr
> hasUnwindInfo
;
321 for (CompactUnwindEntry
<Ptr
> &cuEntry
: cuVector
)
322 if (cuEntry
.functionAddress
!= TombstoneValue
<Ptr
>)
323 hasUnwindInfo
.insert(cuEntry
.functionAddress
);
325 // Add explicit "has no unwind info" entries for all global and local symbols
326 // without unwind info.
327 auto markNoUnwindInfo
= [&cuVector
, &hasUnwindInfo
](const Defined
*d
) {
328 if (d
->isLive() && d
->isec
&& isCodeSection(d
->isec
)) {
329 Ptr ptr
= d
->getVA();
330 if (!hasUnwindInfo
.count(ptr
))
331 cuVector
.push_back({ptr
, 0, 0, 0, 0});
334 for (Symbol
*sym
: symtab
->getSymbols())
335 if (auto *d
= dyn_cast
<Defined
>(sym
))
337 for (const InputFile
*file
: inputFiles
)
338 if (auto *objFile
= dyn_cast
<ObjFile
>(file
))
339 for (Symbol
*sym
: objFile
->symbols
)
340 if (auto *d
= dyn_cast_or_null
<Defined
>(sym
))
341 if (!d
->isExternal())
345 static bool canFoldEncoding(compact_unwind_encoding_t encoding
) {
346 // From compact_unwind_encoding.h:
347 // UNWIND_X86_64_MODE_STACK_IND:
348 // A "frameless" (RBP not used as frame pointer) function large constant
349 // stack size. This case is like the previous, except the stack size is too
350 // large to encode in the compact unwind encoding. Instead it requires that
351 // the function contains "subq $nnnnnnnn,RSP" in its prolog. The compact
352 // encoding contains the offset to the nnnnnnnn value in the function in
353 // UNWIND_X86_64_FRAMELESS_STACK_SIZE.
354 // Since this means the unwinder has to look at the `subq` in the function
355 // of the unwind info's unwind address, two functions that have identical
356 // unwind info can't be folded if it's using this encoding since both
357 // entries need unique addresses.
358 static_assert(UNWIND_X86_64_MODE_MASK
== UNWIND_X86_MODE_MASK
, "");
359 static_assert(UNWIND_X86_64_MODE_STACK_IND
== UNWIND_X86_MODE_STACK_IND
, "");
360 if ((target
->cpuType
== CPU_TYPE_X86_64
|| target
->cpuType
== CPU_TYPE_X86
) &&
361 (encoding
& UNWIND_X86_64_MODE_MASK
) == UNWIND_X86_64_MODE_STACK_IND
) {
362 // FIXME: Consider passing in the two function addresses and getting
363 // their two stack sizes off the `subq` and only returning false if they're
364 // actually different.
370 // Scan the __LD,__compact_unwind entries and compute the space needs of
371 // __TEXT,__unwind_info and __TEXT,__eh_frame
372 template <class Ptr
> void UnwindInfoSectionImpl
<Ptr
>::finalize() {
373 if (compactUnwindSection
== nullptr)
376 // At this point, the address space for __TEXT,__text has been
377 // assigned, so we can relocate the __LD,__compact_unwind entries
378 // into a temporary buffer. Relocation is necessary in order to sort
379 // the CU entries by function address. Sorting is necessary so that
380 // we can fold adjacent CU entries with identical
381 // encoding+personality+lsda. Folding is necessary because it reduces
382 // the number of CU entries by as much as 3 orders of magnitude!
383 compactUnwindSection
->finalize();
384 assert(compactUnwindSection
->getSize() % sizeof(CompactUnwindEntry
<Ptr
>) ==
387 compactUnwindSection
->getSize() / sizeof(CompactUnwindEntry
<Ptr
>);
388 cuVector
.resize(cuCount
);
389 relocateCompactUnwind(compactUnwindSection
, cuVector
);
391 addEntriesForFunctionsWithoutUnwindInfo(cuVector
);
393 // Rather than sort & fold the 32-byte entries directly, we create a
394 // vector of pointers to entries and sort & fold that instead.
395 cuPtrVector
.reserve(cuVector
.size());
396 for (CompactUnwindEntry
<Ptr
> &cuEntry
: cuVector
)
397 cuPtrVector
.emplace_back(&cuEntry
);
398 llvm::sort(cuPtrVector
, [](const CompactUnwindEntry
<Ptr
> *a
,
399 const CompactUnwindEntry
<Ptr
> *b
) {
400 return a
->functionAddress
< b
->functionAddress
;
403 // Dead-stripped functions get a functionAddress of TombstoneValue in
404 // relocateCompactUnwind(). Filter them out here.
405 // FIXME: This doesn't yet collect associated data like LSDAs kept
406 // alive only by a now-removed CompactUnwindEntry or other comdat-like
407 // data (`kindNoneGroupSubordinate*` in ld64).
408 CompactUnwindEntry
<Ptr
> tombstone
;
409 tombstone
.functionAddress
= TombstoneValue
<Ptr
>;
411 std::lower_bound(cuPtrVector
.begin(), cuPtrVector
.end(), &tombstone
,
412 [](const CompactUnwindEntry
<Ptr
> *a
,
413 const CompactUnwindEntry
<Ptr
> *b
) {
414 return a
->functionAddress
< b
->functionAddress
;
418 // If there are no entries left after adding explicit "no unwind info"
419 // entries and removing entries for dead-stripped functions, don't write
420 // an __unwind_info section at all.
421 assert(allEntriesAreOmitted
== cuPtrVector
.empty());
422 if (cuPtrVector
.empty())
425 // Fold adjacent entries with matching encoding+personality+lsda
426 // We use three iterators on the same cuPtrVector to fold in-situ:
427 // (1) `foldBegin` is the first of a potential sequence of matching entries
428 // (2) `foldEnd` is the first non-matching entry after `foldBegin`.
429 // The semi-open interval [ foldBegin .. foldEnd ) contains a range
430 // entries that can be folded into a single entry and written to ...
432 auto foldWrite
= cuPtrVector
.begin();
433 for (auto foldBegin
= cuPtrVector
.begin(); foldBegin
< cuPtrVector
.end();) {
434 auto foldEnd
= foldBegin
;
435 while (++foldEnd
< cuPtrVector
.end() &&
436 (*foldBegin
)->encoding
== (*foldEnd
)->encoding
&&
437 (*foldBegin
)->personality
== (*foldEnd
)->personality
&&
438 (*foldBegin
)->lsda
== (*foldEnd
)->lsda
&&
439 canFoldEncoding((*foldEnd
)->encoding
))
441 *foldWrite
++ = *foldBegin
;
444 cuPtrVector
.erase(foldWrite
, cuPtrVector
.end());
446 encodePersonalities(cuPtrVector
, personalities
);
448 // Count frequencies of the folded encodings
449 EncodingMap encodingFrequencies
;
450 for (const CompactUnwindEntry
<Ptr
> *cuPtrEntry
: cuPtrVector
)
451 encodingFrequencies
[cuPtrEntry
->encoding
]++;
453 // Make a vector of encodings, sorted by descending frequency
454 for (const auto &frequency
: encodingFrequencies
)
455 commonEncodings
.emplace_back(frequency
);
456 llvm::sort(commonEncodings
,
457 [](const std::pair
<compact_unwind_encoding_t
, size_t> &a
,
458 const std::pair
<compact_unwind_encoding_t
, size_t> &b
) {
459 if (a
.second
== b
.second
)
460 // When frequencies match, secondarily sort on encoding
461 // to maintain parity with validate-unwind-info.py
462 return a
.first
> b
.first
;
463 return a
.second
> b
.second
;
466 // Truncate the vector to 127 elements.
467 // Common encoding indexes are limited to 0..126, while encoding
468 // indexes 127..255 are local to each second-level page
469 if (commonEncodings
.size() > COMMON_ENCODINGS_MAX
)
470 commonEncodings
.resize(COMMON_ENCODINGS_MAX
);
472 // Create a map from encoding to common-encoding-table index
473 for (size_t i
= 0; i
< commonEncodings
.size(); i
++)
474 commonEncodingIndexes
[commonEncodings
[i
].first
] = i
;
476 // Split folded encodings into pages, where each page is limited by ...
477 // (a) 4 KiB capacity
478 // (b) 24-bit difference between first & final function address
479 // (c) 8-bit compact-encoding-table index,
480 // for which 0..126 references the global common-encodings table,
481 // and 127..255 references a local per-second-level-page table.
482 // First we try the compact format and determine how many entries fit.
483 // If more entries fit in the regular format, we use that.
484 for (size_t i
= 0; i
< cuPtrVector
.size();) {
485 secondLevelPages
.emplace_back();
486 SecondLevelPage
&page
= secondLevelPages
.back();
488 uintptr_t functionAddressMax
=
489 cuPtrVector
[i
]->functionAddress
+ COMPRESSED_ENTRY_FUNC_OFFSET_MASK
;
490 size_t n
= commonEncodings
.size();
491 size_t wordsRemaining
=
492 SECOND_LEVEL_PAGE_WORDS
-
493 sizeof(unwind_info_compressed_second_level_page_header
) /
495 while (wordsRemaining
>= 1 && i
< cuPtrVector
.size()) {
496 const CompactUnwindEntry
<Ptr
> *cuPtr
= cuPtrVector
[i
];
497 if (cuPtr
->functionAddress
>= functionAddressMax
) {
499 } else if (commonEncodingIndexes
.count(cuPtr
->encoding
) ||
500 page
.localEncodingIndexes
.count(cuPtr
->encoding
)) {
503 } else if (wordsRemaining
>= 2 && n
< COMPACT_ENCODINGS_MAX
) {
504 page
.localEncodings
.emplace_back(cuPtr
->encoding
);
505 page
.localEncodingIndexes
[cuPtr
->encoding
] = n
++;
512 page
.entryCount
= i
- page
.entryIndex
;
514 // If this is not the final page, see if it's possible to fit more
515 // entries by using the regular format. This can happen when there
516 // are many unique encodings, and we we saturated the local
517 // encoding table early.
518 if (i
< cuPtrVector
.size() &&
519 page
.entryCount
< REGULAR_SECOND_LEVEL_ENTRIES_MAX
) {
520 page
.kind
= UNWIND_SECOND_LEVEL_REGULAR
;
521 page
.entryCount
= std::min(REGULAR_SECOND_LEVEL_ENTRIES_MAX
,
522 cuPtrVector
.size() - page
.entryIndex
);
523 i
= page
.entryIndex
+ page
.entryCount
;
525 page
.kind
= UNWIND_SECOND_LEVEL_COMPRESSED
;
529 for (const CompactUnwindEntry
<Ptr
> *cu
: cuPtrVector
) {
530 uint32_t functionOffset
= cu
->functionAddress
- in
.header
->addr
;
531 functionToLsdaIndex
[functionOffset
] = lsdaEntries
.size();
533 lsdaEntries
.push_back(
534 {functionOffset
, static_cast<uint32_t>(cu
->lsda
- in
.header
->addr
)});
537 // compute size of __TEXT,__unwind_info section
539 sizeof(unwind_info_section_header
) +
540 commonEncodings
.size() * sizeof(uint32_t) +
541 personalities
.size() * sizeof(uint32_t) +
542 // The extra second-level-page entry is for the sentinel
543 (secondLevelPages
.size() + 1) *
544 sizeof(unwind_info_section_header_index_entry
) +
545 lsdaEntries
.size() * sizeof(unwind_info_section_header_lsda_index_entry
);
547 level2PagesOffset
+ secondLevelPages
.size() * SECOND_LEVEL_PAGE_BYTES
;
550 // All inputs are relocated and output addresses are known, so write!
553 void UnwindInfoSectionImpl
<Ptr
>::writeTo(uint8_t *buf
) const {
554 assert(!cuPtrVector
.empty() && "call only if there is unwind info");
557 auto *uip
= reinterpret_cast<unwind_info_section_header
*>(buf
);
559 uip
->commonEncodingsArraySectionOffset
= sizeof(unwind_info_section_header
);
560 uip
->commonEncodingsArrayCount
= commonEncodings
.size();
561 uip
->personalityArraySectionOffset
=
562 uip
->commonEncodingsArraySectionOffset
+
563 (uip
->commonEncodingsArrayCount
* sizeof(uint32_t));
564 uip
->personalityArrayCount
= personalities
.size();
565 uip
->indexSectionOffset
= uip
->personalityArraySectionOffset
+
566 (uip
->personalityArrayCount
* sizeof(uint32_t));
567 uip
->indexCount
= secondLevelPages
.size() + 1;
570 auto *i32p
= reinterpret_cast<uint32_t *>(&uip
[1]);
571 for (const auto &encoding
: commonEncodings
)
572 *i32p
++ = encoding
.first
;
575 for (Ptr personality
: personalities
)
577 in
.got
->addr
+ (personality
- 1) * target
->wordSize
- in
.header
->addr
;
580 uint32_t lsdaOffset
=
581 uip
->indexSectionOffset
+
582 uip
->indexCount
* sizeof(unwind_info_section_header_index_entry
);
583 uint64_t l2PagesOffset
= level2PagesOffset
;
584 auto *iep
= reinterpret_cast<unwind_info_section_header_index_entry
*>(i32p
);
585 for (const SecondLevelPage
&page
: secondLevelPages
) {
586 iep
->functionOffset
=
587 cuPtrVector
[page
.entryIndex
]->functionAddress
- in
.header
->addr
;
588 iep
->secondLevelPagesSectionOffset
= l2PagesOffset
;
589 iep
->lsdaIndexArraySectionOffset
=
590 lsdaOffset
+ functionToLsdaIndex
.lookup(iep
->functionOffset
) *
591 sizeof(unwind_info_section_header_lsda_index_entry
);
593 l2PagesOffset
+= SECOND_LEVEL_PAGE_BYTES
;
596 const CompactUnwindEntry
<Ptr
> &cuEnd
= *cuPtrVector
.back();
597 assert(cuEnd
.functionAddress
!= TombstoneValue
<Ptr
>);
598 iep
->functionOffset
=
599 cuEnd
.functionAddress
- in
.header
->addr
+ cuEnd
.functionLength
;
600 iep
->secondLevelPagesSectionOffset
= 0;
601 iep
->lsdaIndexArraySectionOffset
=
603 lsdaEntries
.size() * sizeof(unwind_info_section_header_lsda_index_entry
);
608 lsdaEntries
.size() * sizeof(unwind_info_section_header_lsda_index_entry
);
610 memcpy(iep
, lsdaEntries
.data(), lsdaBytes
);
613 auto *pp
= reinterpret_cast<uint32_t *>(reinterpret_cast<uint8_t *>(iep
) +
615 for (const SecondLevelPage
&page
: secondLevelPages
) {
616 if (page
.kind
== UNWIND_SECOND_LEVEL_COMPRESSED
) {
617 uintptr_t functionAddressBase
=
618 cuPtrVector
[page
.entryIndex
]->functionAddress
;
620 reinterpret_cast<unwind_info_compressed_second_level_page_header
*>(
622 p2p
->kind
= page
.kind
;
623 p2p
->entryPageOffset
=
624 sizeof(unwind_info_compressed_second_level_page_header
);
625 p2p
->entryCount
= page
.entryCount
;
626 p2p
->encodingsPageOffset
=
627 p2p
->entryPageOffset
+ p2p
->entryCount
* sizeof(uint32_t);
628 p2p
->encodingsCount
= page
.localEncodings
.size();
629 auto *ep
= reinterpret_cast<uint32_t *>(&p2p
[1]);
630 for (size_t i
= 0; i
< page
.entryCount
; i
++) {
631 const CompactUnwindEntry
<Ptr
> *cuep
= cuPtrVector
[page
.entryIndex
+ i
];
632 auto it
= commonEncodingIndexes
.find(cuep
->encoding
);
633 if (it
== commonEncodingIndexes
.end())
634 it
= page
.localEncodingIndexes
.find(cuep
->encoding
);
635 *ep
++ = (it
->second
<< COMPRESSED_ENTRY_FUNC_OFFSET_BITS
) |
636 (cuep
->functionAddress
- functionAddressBase
);
638 if (page
.localEncodings
.size() != 0)
639 memcpy(ep
, page
.localEncodings
.data(),
640 page
.localEncodings
.size() * sizeof(uint32_t));
643 reinterpret_cast<unwind_info_regular_second_level_page_header
*>(pp
);
644 p2p
->kind
= page
.kind
;
645 p2p
->entryPageOffset
=
646 sizeof(unwind_info_regular_second_level_page_header
);
647 p2p
->entryCount
= page
.entryCount
;
648 auto *ep
= reinterpret_cast<uint32_t *>(&p2p
[1]);
649 for (size_t i
= 0; i
< page
.entryCount
; i
++) {
650 const CompactUnwindEntry
<Ptr
> *cuep
= cuPtrVector
[page
.entryIndex
+ i
];
651 *ep
++ = cuep
->functionAddress
;
652 *ep
++ = cuep
->encoding
;
655 pp
+= SECOND_LEVEL_PAGE_WORDS
;
659 UnwindInfoSection
*macho::makeUnwindInfoSection() {
660 if (target
->wordSize
== 8)
661 return make
<UnwindInfoSectionImpl
<uint64_t>>();
663 return make
<UnwindInfoSectionImpl
<uint32_t>>();