1 // Copyright 2014 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
5 // Implementation notes:
7 // We need to remove a piece from the ELF shared library. However, we also
8 // want to ensure that code and data loads at the same addresses as before
9 // packing, so that tools like breakpad can still match up addresses found
10 // in any crash dumps with data extracted from the pre-packed version of
11 // the shared library.
13 // Arranging this means that we have to split one of the LOAD segments into
14 // two. Unfortunately, the program headers are located at the very start
15 // of the shared library file, so expanding the program header section
16 // would cause a lot of consequent changes to files offsets that we don't
17 // really want to have to handle.
19 // Luckily, though, there is a segment that is always present and always
20 // unused on Android; the GNU_STACK segment. What we do is to steal that
21 // and repurpose it to be one of the split LOAD segments. We then have to
22 // sort LOAD segments by offset to keep the crazy linker happy.
24 // All of this takes place in SplitProgramHeadersForHole(), used on packing,
25 // and is unraveled on unpacking in CoalesceProgramHeadersForHole(). See
26 // commentary on those functions for an example of this segment stealing
32 #include <sys/types.h>
39 #include "elf_traits.h"
43 namespace relocation_packer
{
45 // Stub identifier written to 'null out' packed data, "NULL".
46 static const uint32_t kStubIdentifier
= 0x4c4c554eu
;
48 // Out-of-band dynamic tags used to indicate the offset and size of the
49 // android packed relocations section.
50 static const ELF::Sword DT_ANDROID_REL_OFFSET
= DT_LOOS
;
51 static const ELF::Sword DT_ANDROID_REL_SIZE
= DT_LOOS
+ 1;
53 // Alignment to preserve, in bytes. This must be at least as large as the
54 // largest d_align and sh_addralign values found in the loaded file.
55 // Out of caution for RELRO page alignment, we preserve to a complete target
56 // page. See http://www.airs.com/blog/archives/189.
57 static const size_t kPreserveAlignment
= 4096;
59 // Alignment values used by ld and gold for the GNU_STACK segment. Different
60 // linkers write different values; the actual value is immaterial on Android
61 // because it ignores GNU_STACK segments. However, it is useful for binary
62 // comparison and unit test purposes if packing and unpacking can preserve
63 // them through a round-trip.
64 static const size_t kLdGnuStackSegmentAlignment
= 16;
65 static const size_t kGoldGnuStackSegmentAlignment
= 0;
69 // Get section data. Checks that the section has exactly one data entry,
70 // so that the section size and the data size are the same. True in
71 // practice for all sections we resize when packing or unpacking. Done
72 // by ensuring that a call to elf_getdata(section, data) returns NULL as
73 // the next data entry.
74 Elf_Data
* GetSectionData(Elf_Scn
* section
) {
75 Elf_Data
* data
= elf_getdata(section
, NULL
);
76 CHECK(data
&& elf_getdata(section
, data
) == NULL
);
80 // Rewrite section data. Allocates new data and makes it the data element's
81 // buffer. Relies on program exit to free allocated data.
82 void RewriteSectionData(Elf_Scn
* section
,
83 const void* section_data
,
85 Elf_Data
* data
= GetSectionData(section
);
86 CHECK(size
== data
->d_size
);
87 uint8_t* area
= new uint8_t[size
];
88 memcpy(area
, section_data
, size
);
92 // Verbose ELF header logging.
93 void VerboseLogElfHeader(const ELF::Ehdr
* elf_header
) {
94 VLOG(1) << "e_phoff = " << elf_header
->e_phoff
;
95 VLOG(1) << "e_shoff = " << elf_header
->e_shoff
;
96 VLOG(1) << "e_ehsize = " << elf_header
->e_ehsize
;
97 VLOG(1) << "e_phentsize = " << elf_header
->e_phentsize
;
98 VLOG(1) << "e_phnum = " << elf_header
->e_phnum
;
99 VLOG(1) << "e_shnum = " << elf_header
->e_shnum
;
100 VLOG(1) << "e_shstrndx = " << elf_header
->e_shstrndx
;
103 // Verbose ELF program header logging.
104 void VerboseLogProgramHeader(size_t program_header_index
,
105 const ELF::Phdr
* program_header
) {
107 switch (program_header
->p_type
) {
108 case PT_NULL
: type
= "NULL"; break;
109 case PT_LOAD
: type
= "LOAD"; break;
110 case PT_DYNAMIC
: type
= "DYNAMIC"; break;
111 case PT_INTERP
: type
= "INTERP"; break;
112 case PT_PHDR
: type
= "PHDR"; break;
113 case PT_GNU_RELRO
: type
= "GNU_RELRO"; break;
114 case PT_GNU_STACK
: type
= "GNU_STACK"; break;
115 case PT_ARM_EXIDX
: type
= "EXIDX"; break;
116 default: type
= "(OTHER)"; break;
118 VLOG(1) << "phdr[" << program_header_index
<< "] : " << type
;
119 VLOG(1) << " p_offset = " << program_header
->p_offset
;
120 VLOG(1) << " p_vaddr = " << program_header
->p_vaddr
;
121 VLOG(1) << " p_paddr = " << program_header
->p_paddr
;
122 VLOG(1) << " p_filesz = " << program_header
->p_filesz
;
123 VLOG(1) << " p_memsz = " << program_header
->p_memsz
;
124 VLOG(1) << " p_flags = " << program_header
->p_flags
;
125 VLOG(1) << " p_align = " << program_header
->p_align
;
128 // Verbose ELF section header logging.
129 void VerboseLogSectionHeader(const std::string
& section_name
,
130 const ELF::Shdr
* section_header
) {
131 VLOG(1) << "section " << section_name
;
132 VLOG(1) << " sh_addr = " << section_header
->sh_addr
;
133 VLOG(1) << " sh_offset = " << section_header
->sh_offset
;
134 VLOG(1) << " sh_size = " << section_header
->sh_size
;
135 VLOG(1) << " sh_addralign = " << section_header
->sh_addralign
;
138 // Verbose ELF section data logging.
139 void VerboseLogSectionData(const Elf_Data
* data
) {
141 VLOG(1) << " d_buf = " << data
->d_buf
;
142 VLOG(1) << " d_off = " << data
->d_off
;
143 VLOG(1) << " d_size = " << data
->d_size
;
144 VLOG(1) << " d_align = " << data
->d_align
;
149 // Load the complete ELF file into a memory image in libelf, and identify
150 // the .rel.dyn or .rela.dyn, .dynamic, and .android.rel.dyn or
151 // .android.rela.dyn sections. No-op if the ELF file has already been loaded.
152 bool ElfFile::Load() {
156 Elf
* elf
= elf_begin(fd_
, ELF_C_RDWR
, NULL
);
159 if (elf_kind(elf
) != ELF_K_ELF
) {
160 LOG(ERROR
) << "File not in ELF format";
164 ELF::Ehdr
* elf_header
= ELF::getehdr(elf
);
166 LOG(ERROR
) << "Failed to load ELF header: " << elf_errmsg(elf_errno());
169 if (elf_header
->e_machine
!= ELF::kMachine
) {
170 LOG(ERROR
) << "ELF file architecture is not " << ELF::Machine();
173 if (elf_header
->e_type
!= ET_DYN
) {
174 LOG(ERROR
) << "ELF file is not a shared object";
178 // Require that our endianness matches that of the target, and that both
179 // are little-endian. Safe for all current build/target combinations.
180 const int endian
= elf_header
->e_ident
[EI_DATA
];
181 CHECK(endian
== ELFDATA2LSB
);
182 CHECK(__BYTE_ORDER__
== __ORDER_LITTLE_ENDIAN__
);
184 // Also require that the file class is as expected.
185 const int file_class
= elf_header
->e_ident
[EI_CLASS
];
186 CHECK(file_class
== ELF::kFileClass
);
188 VLOG(1) << "endian = " << endian
<< ", file class = " << file_class
;
189 VerboseLogElfHeader(elf_header
);
191 const ELF::Phdr
* elf_program_header
= ELF::getphdr(elf
);
192 CHECK(elf_program_header
);
194 const ELF::Phdr
* dynamic_program_header
= NULL
;
195 for (size_t i
= 0; i
< elf_header
->e_phnum
; ++i
) {
196 const ELF::Phdr
* program_header
= &elf_program_header
[i
];
197 VerboseLogProgramHeader(i
, program_header
);
199 if (program_header
->p_type
== PT_DYNAMIC
) {
200 CHECK(dynamic_program_header
== NULL
);
201 dynamic_program_header
= program_header
;
204 CHECK(dynamic_program_header
!= NULL
);
207 elf_getshdrstrndx(elf
, &string_index
);
209 // Notes of the dynamic relocations, packed relocations, and .dynamic
210 // sections. Found while iterating sections, and later stored in class
212 Elf_Scn
* found_relocations_section
= NULL
;
213 Elf_Scn
* found_android_relocations_section
= NULL
;
214 Elf_Scn
* found_dynamic_section
= NULL
;
216 // Notes of relocation section types seen. We require one or the other of
217 // these; both is unsupported.
218 bool has_rel_relocations
= false;
219 bool has_rela_relocations
= false;
221 Elf_Scn
* section
= NULL
;
222 while ((section
= elf_nextscn(elf
, section
)) != NULL
) {
223 const ELF::Shdr
* section_header
= ELF::getshdr(section
);
224 std::string name
= elf_strptr(elf
, string_index
, section_header
->sh_name
);
225 VerboseLogSectionHeader(name
, section_header
);
227 // Note relocation section types.
228 if (section_header
->sh_type
== SHT_REL
) {
229 has_rel_relocations
= true;
231 if (section_header
->sh_type
== SHT_RELA
) {
232 has_rela_relocations
= true;
235 // Note special sections as we encounter them.
236 if ((name
== ".rel.dyn" || name
== ".rela.dyn") &&
237 section_header
->sh_size
> 0) {
238 found_relocations_section
= section
;
240 if ((name
== ".android.rel.dyn" || name
== ".android.rela.dyn") &&
241 section_header
->sh_size
> 0) {
242 found_android_relocations_section
= section
;
244 if (section_header
->sh_offset
== dynamic_program_header
->p_offset
) {
245 found_dynamic_section
= section
;
248 // Ensure we preserve alignment, repeated later for the data block(s).
249 CHECK(section_header
->sh_addralign
<= kPreserveAlignment
);
251 Elf_Data
* data
= NULL
;
252 while ((data
= elf_getdata(section
, data
)) != NULL
) {
253 CHECK(data
->d_align
<= kPreserveAlignment
);
254 VerboseLogSectionData(data
);
258 // Loading failed if we did not find the required special sections.
259 if (!found_relocations_section
) {
260 LOG(ERROR
) << "Missing or empty .rel.dyn or .rela.dyn section";
263 if (!found_android_relocations_section
) {
264 LOG(ERROR
) << "Missing or empty .android.rel.dyn or .android.rela.dyn "
265 << "section (to fix, run with --help and follow the "
266 << "pre-packing instructions)";
269 if (!found_dynamic_section
) {
270 LOG(ERROR
) << "Missing .dynamic section";
274 // Loading failed if we could not identify the relocations type.
275 if (!has_rel_relocations
&& !has_rela_relocations
) {
276 LOG(ERROR
) << "No relocations sections found";
279 if (has_rel_relocations
&& has_rela_relocations
) {
280 LOG(ERROR
) << "Multiple relocations sections with different types found, "
281 << "not currently supported";
286 relocations_section_
= found_relocations_section
;
287 dynamic_section_
= found_dynamic_section
;
288 android_relocations_section_
= found_android_relocations_section
;
289 relocations_type_
= has_rel_relocations
? REL
: RELA
;
295 // Helper for ResizeSection(). Adjust the main ELF header for the hole.
296 void AdjustElfHeaderForHole(ELF::Ehdr
* elf_header
,
299 if (elf_header
->e_phoff
> hole_start
) {
300 elf_header
->e_phoff
+= hole_size
;
301 VLOG(1) << "e_phoff adjusted to " << elf_header
->e_phoff
;
303 if (elf_header
->e_shoff
> hole_start
) {
304 elf_header
->e_shoff
+= hole_size
;
305 VLOG(1) << "e_shoff adjusted to " << elf_header
->e_shoff
;
309 // Helper for ResizeSection(). Adjust all section headers for the hole.
310 void AdjustSectionHeadersForHole(Elf
* elf
,
314 elf_getshdrstrndx(elf
, &string_index
);
316 Elf_Scn
* section
= NULL
;
317 while ((section
= elf_nextscn(elf
, section
)) != NULL
) {
318 ELF::Shdr
* section_header
= ELF::getshdr(section
);
319 std::string name
= elf_strptr(elf
, string_index
, section_header
->sh_name
);
321 if (section_header
->sh_offset
> hole_start
) {
322 section_header
->sh_offset
+= hole_size
;
323 VLOG(1) << "section " << name
324 << " sh_offset adjusted to " << section_header
->sh_offset
;
329 // Helper for ResizeSection(). Adjust the offsets of any program headers
330 // that have offsets currently beyond the hole start.
331 void AdjustProgramHeaderOffsets(ELF::Phdr
* program_headers
,
333 ELF::Phdr
* ignored_1
,
334 ELF::Phdr
* ignored_2
,
337 for (size_t i
= 0; i
< count
; ++i
) {
338 ELF::Phdr
* program_header
= &program_headers
[i
];
340 if (program_header
== ignored_1
|| program_header
== ignored_2
)
343 if (program_header
->p_offset
> hole_start
) {
344 // The hole start is past this segment, so adjust offset.
345 program_header
->p_offset
+= hole_size
;
346 VLOG(1) << "phdr[" << i
347 << "] p_offset adjusted to "<< program_header
->p_offset
;
352 // Helper for ResizeSection(). Find the first loadable segment in the
353 // file. We expect it to map from file offset zero.
354 ELF::Phdr
* FindFirstLoadSegment(ELF::Phdr
* program_headers
,
356 ELF::Phdr
* first_loadable_segment
= NULL
;
358 for (size_t i
= 0; i
< count
; ++i
) {
359 ELF::Phdr
* program_header
= &program_headers
[i
];
361 if (program_header
->p_type
== PT_LOAD
&&
362 program_header
->p_offset
== 0 &&
363 program_header
->p_vaddr
== 0 &&
364 program_header
->p_paddr
== 0) {
365 first_loadable_segment
= program_header
;
368 LOG_IF(FATAL
, !first_loadable_segment
)
369 << "Cannot locate a LOAD segment with address and offset zero";
371 return first_loadable_segment
;
374 // Helper for ResizeSection(). Deduce the alignment that the PT_GNU_STACK
375 // segment will use. Determined by sensing the linker that was used to
376 // create the shared library.
377 size_t DeduceGnuStackSegmentAlignment(Elf
* elf
) {
379 elf_getshdrstrndx(elf
, &string_index
);
381 Elf_Scn
* section
= NULL
;
382 size_t gnu_stack_segment_alignment
= kLdGnuStackSegmentAlignment
;
384 while ((section
= elf_nextscn(elf
, section
)) != NULL
) {
385 const ELF::Shdr
* section_header
= ELF::getshdr(section
);
386 std::string name
= elf_strptr(elf
, string_index
, section_header
->sh_name
);
388 if (name
== ".note.gnu.gold-version") {
389 gnu_stack_segment_alignment
= kGoldGnuStackSegmentAlignment
;
394 return gnu_stack_segment_alignment
;
397 // Helper for ResizeSection(). Find the PT_GNU_STACK segment, and check
398 // that it contains what we expect so we can restore it on unpack if needed.
399 ELF::Phdr
* FindUnusedGnuStackSegment(Elf
* elf
,
400 ELF::Phdr
* program_headers
,
402 ELF::Phdr
* unused_segment
= NULL
;
403 const size_t stack_alignment
= DeduceGnuStackSegmentAlignment(elf
);
405 for (size_t i
= 0; i
< count
; ++i
) {
406 ELF::Phdr
* program_header
= &program_headers
[i
];
408 if (program_header
->p_type
== PT_GNU_STACK
&&
409 program_header
->p_offset
== 0 &&
410 program_header
->p_vaddr
== 0 &&
411 program_header
->p_paddr
== 0 &&
412 program_header
->p_filesz
== 0 &&
413 program_header
->p_memsz
== 0 &&
414 program_header
->p_flags
== (PF_R
| PF_W
) &&
415 program_header
->p_align
== stack_alignment
) {
416 unused_segment
= program_header
;
419 LOG_IF(FATAL
, !unused_segment
)
420 << "Cannot locate the expected GNU_STACK segment";
422 return unused_segment
;
425 // Helper for ResizeSection(). Find the segment that was the first loadable
426 // one before we split it into two. This is the one into which we coalesce
427 // the split segments on unpacking.
428 ELF::Phdr
* FindOriginalFirstLoadSegment(ELF::Phdr
* program_headers
,
430 const ELF::Phdr
* first_loadable_segment
=
431 FindFirstLoadSegment(program_headers
, count
);
433 ELF::Phdr
* original_first_loadable_segment
= NULL
;
435 for (size_t i
= 0; i
< count
; ++i
) {
436 ELF::Phdr
* program_header
= &program_headers
[i
];
438 // The original first loadable segment is the one that follows on from
439 // the one we wrote on split to be the current first loadable segment.
440 if (program_header
->p_type
== PT_LOAD
&&
441 program_header
->p_offset
== first_loadable_segment
->p_filesz
) {
442 original_first_loadable_segment
= program_header
;
445 LOG_IF(FATAL
, !original_first_loadable_segment
)
446 << "Cannot locate the LOAD segment that follows a LOAD at offset zero";
448 return original_first_loadable_segment
;
451 // Helper for ResizeSection(). Find the segment that contains the hole.
452 Elf_Scn
* FindSectionContainingHole(Elf
* elf
,
455 Elf_Scn
* section
= NULL
;
456 Elf_Scn
* last_unholed_section
= NULL
;
458 while ((section
= elf_nextscn(elf
, section
)) != NULL
) {
459 const ELF::Shdr
* section_header
= ELF::getshdr(section
);
461 // Because we get here after section headers have been adjusted for the
462 // hole, we need to 'undo' that adjustment to give a view of the original
464 ELF::Off offset
= section_header
->sh_offset
;
465 if (section_header
->sh_offset
>= hole_start
) {
469 if (offset
<= hole_start
) {
470 last_unholed_section
= section
;
473 LOG_IF(FATAL
, !last_unholed_section
)
474 << "Cannot identify the section before the one containing the hole";
476 // The section containing the hole is the one after the last one found
477 // by the loop above.
478 Elf_Scn
* holed_section
= elf_nextscn(elf
, last_unholed_section
);
479 LOG_IF(FATAL
, !holed_section
)
480 << "Cannot identify the section containing the hole";
482 return holed_section
;
485 // Helper for ResizeSection(). Find the last section contained in a segment.
486 Elf_Scn
* FindLastSectionInSegment(Elf
* elf
,
487 ELF::Phdr
* program_header
,
490 const ELF::Off segment_end
=
491 program_header
->p_offset
+ program_header
->p_filesz
;
493 Elf_Scn
* section
= NULL
;
494 Elf_Scn
* last_section
= NULL
;
496 while ((section
= elf_nextscn(elf
, section
)) != NULL
) {
497 const ELF::Shdr
* section_header
= ELF::getshdr(section
);
499 // As above, 'undo' any section offset adjustment to give a view of the
500 // original sections layout.
501 ELF::Off offset
= section_header
->sh_offset
;
502 if (section_header
->sh_offset
>= hole_start
) {
506 if (offset
< segment_end
) {
507 last_section
= section
;
510 LOG_IF(FATAL
, !last_section
)
511 << "Cannot identify the last section in the given segment";
516 // Helper for ResizeSection(). Order loadable segments by their offsets.
517 // The crazy linker contains assumptions about loadable segment ordering,
518 // and it is better if we do not break them.
519 void SortOrderSensitiveProgramHeaders(ELF::Phdr
* program_headers
,
521 std::vector
<ELF::Phdr
*> orderable
;
523 // Collect together orderable program headers. These are all the LOAD
524 // segments, and any GNU_STACK that may be present (removed on packing,
525 // but replaced on unpacking).
526 for (size_t i
= 0; i
< count
; ++i
) {
527 ELF::Phdr
* program_header
= &program_headers
[i
];
529 if (program_header
->p_type
== PT_LOAD
||
530 program_header
->p_type
== PT_GNU_STACK
) {
531 orderable
.push_back(program_header
);
535 // Order these program headers so that any PT_GNU_STACK is last, and
536 // the LOAD segments that precede it appear in offset order. Uses
538 for (size_t i
= 1; i
< orderable
.size(); ++i
) {
539 for (size_t j
= i
; j
> 0; --j
) {
540 ELF::Phdr
* first
= orderable
[j
- 1];
541 ELF::Phdr
* second
= orderable
[j
];
543 if (!(first
->p_type
== PT_GNU_STACK
||
544 first
->p_offset
> second
->p_offset
)) {
547 std::swap(*first
, *second
);
552 // Helper for ResizeSection(). The GNU_STACK program header is unused in
553 // Android, so we can repurpose it here. Before packing, the program header
554 // table contains something like:
556 // Type Offset VirtAddr PhysAddr FileSiz MemSiz Flg Align
557 // LOAD 0x000000 0x00000000 0x00000000 0x1efc818 0x1efc818 R E 0x1000
558 // LOAD 0x1efd008 0x01efe008 0x01efe008 0x17ec3c 0x1a0324 RW 0x1000
559 // DYNAMIC 0x205ec50 0x0205fc50 0x0205fc50 0x00108 0x00108 RW 0x4
560 // GNU_STACK 0x000000 0x00000000 0x00000000 0x00000 0x00000 RW 0
562 // The hole in the file is in the first of these. In order to preserve all
563 // load addresses, what we do is to turn the GNU_STACK into a new LOAD entry
564 // that maps segments up to where we created the hole, adjust the first LOAD
565 // entry so that it maps segments after that, adjust any other program
566 // headers whose offset is after the hole start, and finally order the LOAD
567 // segments by offset, to give:
569 // Type Offset VirtAddr PhysAddr FileSiz MemSiz Flg Align
570 // LOAD 0x000000 0x00000000 0x00000000 0x14ea4 0x14ea4 R E 0x1000
571 // LOAD 0x014ea4 0x00212ea4 0x00212ea4 0x1cea164 0x1cea164 R E 0x1000
572 // DYNAMIC 0x1e60c50 0x0205fc50 0x0205fc50 0x00108 0x00108 RW 0x4
573 // LOAD 0x1cff008 0x01efe008 0x01efe008 0x17ec3c 0x1a0324 RW 0x1000
575 // We work out the split points by finding the .rel.dyn or .rela.dyn section
576 // that contains the hole, and by finding the last section in a given segment.
578 // To unpack, we reverse the above to leave the file as it was originally.
579 void SplitProgramHeadersForHole(Elf
* elf
,
582 CHECK(hole_size
< 0);
583 const ELF::Ehdr
* elf_header
= ELF::getehdr(elf
);
586 ELF::Phdr
* elf_program_header
= ELF::getphdr(elf
);
587 CHECK(elf_program_header
);
589 const size_t program_header_count
= elf_header
->e_phnum
;
591 // Locate the segment that we can overwrite to form the new LOAD entry,
592 // and the segment that we are going to split into two parts.
593 ELF::Phdr
* spliced_header
=
594 FindUnusedGnuStackSegment(elf
, elf_program_header
, program_header_count
);
595 ELF::Phdr
* split_header
=
596 FindFirstLoadSegment(elf_program_header
, program_header_count
);
598 VLOG(1) << "phdr[" << split_header
- elf_program_header
<< "] split";
599 VLOG(1) << "phdr[" << spliced_header
- elf_program_header
<< "] new LOAD";
601 // Find the section that contains the hole. We split on the section that
603 Elf_Scn
* holed_section
=
604 FindSectionContainingHole(elf
, hole_start
, hole_size
);
607 elf_getshdrstrndx(elf
, &string_index
);
609 ELF::Shdr
* section_header
= ELF::getshdr(holed_section
);
610 std::string name
= elf_strptr(elf
, string_index
, section_header
->sh_name
);
611 VLOG(1) << "section " << name
<< " split after";
613 // Find the last section in the segment we are splitting.
614 Elf_Scn
* last_section
=
615 FindLastSectionInSegment(elf
, split_header
, hole_start
, hole_size
);
617 section_header
= ELF::getshdr(last_section
);
618 name
= elf_strptr(elf
, string_index
, section_header
->sh_name
);
619 VLOG(1) << "section " << name
<< " split end";
621 // Split on the section following the holed one, and up to (but not
622 // including) the section following the last one in the split segment.
623 Elf_Scn
* split_section
= elf_nextscn(elf
, holed_section
);
624 LOG_IF(FATAL
, !split_section
)
625 << "No section follows the section that contains the hole";
626 Elf_Scn
* end_section
= elf_nextscn(elf
, last_section
);
627 LOG_IF(FATAL
, !end_section
)
628 << "No section follows the last section in the segment being split";
630 // Split the first portion of split_header into spliced_header.
631 const ELF::Shdr
* split_section_header
= ELF::getshdr(split_section
);
632 spliced_header
->p_type
= split_header
->p_type
;
633 spliced_header
->p_offset
= split_header
->p_offset
;
634 spliced_header
->p_vaddr
= split_header
->p_vaddr
;
635 spliced_header
->p_paddr
= split_header
->p_paddr
;
636 CHECK(split_header
->p_filesz
== split_header
->p_memsz
);
637 spliced_header
->p_filesz
= split_section_header
->sh_offset
;
638 spliced_header
->p_memsz
= split_section_header
->sh_offset
;
639 spliced_header
->p_flags
= split_header
->p_flags
;
640 spliced_header
->p_align
= split_header
->p_align
;
642 // Now rewrite split_header to remove the part we spliced from it.
643 const ELF::Shdr
* end_section_header
= ELF::getshdr(end_section
);
644 split_header
->p_offset
= spliced_header
->p_filesz
;
645 CHECK(split_header
->p_vaddr
== split_header
->p_paddr
);
646 split_header
->p_vaddr
= split_section_header
->sh_addr
;
647 split_header
->p_paddr
= split_section_header
->sh_addr
;
648 CHECK(split_header
->p_filesz
== split_header
->p_memsz
);
649 split_header
->p_filesz
=
650 end_section_header
->sh_offset
- spliced_header
->p_filesz
;
651 split_header
->p_memsz
=
652 end_section_header
->sh_offset
- spliced_header
->p_filesz
;
654 // Adjust the offsets of all program headers that are not one of the pair
655 // we just created by splitting.
656 AdjustProgramHeaderOffsets(elf_program_header
,
657 program_header_count
,
663 // Finally, order loadable segments by offset/address. The crazy linker
664 // contains assumptions about loadable segment ordering.
665 SortOrderSensitiveProgramHeaders(elf_program_header
,
666 program_header_count
);
669 // Helper for ResizeSection(). Undo the work of SplitProgramHeadersForHole().
670 void CoalesceProgramHeadersForHole(Elf
* elf
,
673 CHECK(hole_size
> 0);
674 const ELF::Ehdr
* elf_header
= ELF::getehdr(elf
);
677 ELF::Phdr
* elf_program_header
= ELF::getphdr(elf
);
678 CHECK(elf_program_header
);
680 const size_t program_header_count
= elf_header
->e_phnum
;
682 // Locate the segment that we overwrote to form the new LOAD entry, and
683 // the segment that we split into two parts on packing.
684 ELF::Phdr
* spliced_header
=
685 FindFirstLoadSegment(elf_program_header
, program_header_count
);
686 ELF::Phdr
* split_header
=
687 FindOriginalFirstLoadSegment(elf_program_header
, program_header_count
);
689 VLOG(1) << "phdr[" << spliced_header
- elf_program_header
<< "] stack";
690 VLOG(1) << "phdr[" << split_header
- elf_program_header
<< "] coalesce";
692 // Find the last section in the second segment we are coalescing.
693 Elf_Scn
* last_section
=
694 FindLastSectionInSegment(elf
, split_header
, hole_start
, hole_size
);
697 elf_getshdrstrndx(elf
, &string_index
);
699 const ELF::Shdr
* section_header
= ELF::getshdr(last_section
);
700 std::string name
= elf_strptr(elf
, string_index
, section_header
->sh_name
);
701 VLOG(1) << "section " << name
<< " coalesced";
703 // Rewrite the coalesced segment into split_header.
704 const ELF::Shdr
* last_section_header
= ELF::getshdr(last_section
);
705 split_header
->p_offset
= spliced_header
->p_offset
;
706 CHECK(split_header
->p_vaddr
== split_header
->p_paddr
);
707 split_header
->p_vaddr
= spliced_header
->p_vaddr
;
708 split_header
->p_paddr
= spliced_header
->p_vaddr
;
709 CHECK(split_header
->p_filesz
== split_header
->p_memsz
);
710 split_header
->p_filesz
=
711 last_section_header
->sh_offset
+ last_section_header
->sh_size
;
712 split_header
->p_memsz
=
713 last_section_header
->sh_offset
+ last_section_header
->sh_size
;
715 // Reconstruct the original GNU_STACK segment into spliced_header.
716 const size_t stack_alignment
= DeduceGnuStackSegmentAlignment(elf
);
717 spliced_header
->p_type
= PT_GNU_STACK
;
718 spliced_header
->p_offset
= 0;
719 spliced_header
->p_vaddr
= 0;
720 spliced_header
->p_paddr
= 0;
721 spliced_header
->p_filesz
= 0;
722 spliced_header
->p_memsz
= 0;
723 spliced_header
->p_flags
= PF_R
| PF_W
;
724 spliced_header
->p_align
= stack_alignment
;
726 // Adjust the offsets of all program headers that are not one of the pair
727 // we just coalesced.
728 AdjustProgramHeaderOffsets(elf_program_header
,
729 program_header_count
,
735 // Finally, order loadable segments by offset/address. The crazy linker
736 // contains assumptions about loadable segment ordering.
737 SortOrderSensitiveProgramHeaders(elf_program_header
,
738 program_header_count
);
741 // Helper for ResizeSection(). Rewrite program headers.
742 void RewriteProgramHeadersForHole(Elf
* elf
,
745 // If hole_size is negative then we are removing a piece of the file, and
746 // we want to split program headers so that we keep the same addresses
747 // for text and data. If positive, then we are putting that piece of the
748 // file back in, so we coalesce the previously split program headers.
750 SplitProgramHeadersForHole(elf
, hole_start
, hole_size
);
751 else if (hole_size
> 0)
752 CoalesceProgramHeadersForHole(elf
, hole_start
, hole_size
);
755 // Helper for ResizeSection(). Locate and return the dynamic section.
756 Elf_Scn
* GetDynamicSection(Elf
* elf
) {
757 const ELF::Ehdr
* elf_header
= ELF::getehdr(elf
);
760 const ELF::Phdr
* elf_program_header
= ELF::getphdr(elf
);
761 CHECK(elf_program_header
);
763 // Find the program header that describes the dynamic section.
764 const ELF::Phdr
* dynamic_program_header
= NULL
;
765 for (size_t i
= 0; i
< elf_header
->e_phnum
; ++i
) {
766 const ELF::Phdr
* program_header
= &elf_program_header
[i
];
768 if (program_header
->p_type
== PT_DYNAMIC
) {
769 dynamic_program_header
= program_header
;
772 CHECK(dynamic_program_header
);
774 // Now find the section with the same offset as this program header.
775 Elf_Scn
* dynamic_section
= NULL
;
776 Elf_Scn
* section
= NULL
;
777 while ((section
= elf_nextscn(elf
, section
)) != NULL
) {
778 ELF::Shdr
* section_header
= ELF::getshdr(section
);
780 if (section_header
->sh_offset
== dynamic_program_header
->p_offset
) {
781 dynamic_section
= section
;
784 CHECK(dynamic_section
!= NULL
);
786 return dynamic_section
;
789 // Helper for ResizeSection(). Adjust the .dynamic section for the hole.
790 template <typename Rel
>
791 void AdjustDynamicSectionForHole(Elf_Scn
* dynamic_section
,
794 Elf_Data
* data
= GetSectionData(dynamic_section
);
796 const ELF::Dyn
* dynamic_base
= reinterpret_cast<ELF::Dyn
*>(data
->d_buf
);
797 std::vector
<ELF::Dyn
> dynamics(
799 dynamic_base
+ data
->d_size
/ sizeof(dynamics
[0]));
801 for (size_t i
= 0; i
< dynamics
.size(); ++i
) {
802 ELF::Dyn
* dynamic
= &dynamics
[i
];
803 const ELF::Sword tag
= dynamic
->d_tag
;
805 // DT_RELSZ or DT_RELASZ indicate the overall size of relocations.
806 // Only one will be present. Adjust by hole size.
807 if (tag
== DT_RELSZ
|| tag
== DT_RELASZ
) {
808 dynamic
->d_un
.d_val
+= hole_size
;
809 VLOG(1) << "dynamic[" << i
<< "] " << dynamic
->d_tag
810 << " d_val adjusted to " << dynamic
->d_un
.d_val
;
813 // DT_RELCOUNT or DT_RELACOUNT hold the count of relative relocations.
814 // Only one will be present. Packing reduces it to the alignment
815 // padding, if any; unpacking restores it to its former value. The
816 // crazy linker does not use it, but we update it anyway.
817 if (tag
== DT_RELCOUNT
|| tag
== DT_RELACOUNT
) {
818 // Cast sizeof to a signed type to avoid the division result being
819 // promoted into an unsigned size_t.
820 const ssize_t sizeof_rel
= static_cast<ssize_t
>(sizeof(Rel
));
821 dynamic
->d_un
.d_val
+= hole_size
/ sizeof_rel
;
822 VLOG(1) << "dynamic[" << i
<< "] " << dynamic
->d_tag
823 << " d_val adjusted to " << dynamic
->d_un
.d_val
;
826 // DT_RELENT and DT_RELAENT do not change, but make sure they are what
827 // we expect. Only one will be present.
828 if (tag
== DT_RELENT
|| tag
== DT_RELAENT
) {
829 CHECK(dynamic
->d_un
.d_val
== sizeof(Rel
));
833 void* section_data
= &dynamics
[0];
834 size_t bytes
= dynamics
.size() * sizeof(dynamics
[0]);
835 RewriteSectionData(dynamic_section
, section_data
, bytes
);
838 // Resize a section. If the new size is larger than the current size, open
839 // up a hole by increasing file offsets that come after the hole. If smaller
840 // than the current size, remove the hole by decreasing those offsets.
841 template <typename Rel
>
842 void ResizeSection(Elf
* elf
, Elf_Scn
* section
, size_t new_size
) {
843 ELF::Shdr
* section_header
= ELF::getshdr(section
);
844 if (section_header
->sh_size
== new_size
)
847 // Note if we are resizing the real dyn relocations.
849 elf_getshdrstrndx(elf
, &string_index
);
850 const std::string section_name
=
851 elf_strptr(elf
, string_index
, section_header
->sh_name
);
852 const bool is_relocations_resize
=
853 (section_name
== ".rel.dyn" || section_name
== ".rela.dyn");
855 // Require that the section size and the data size are the same. True
856 // in practice for all sections we resize when packing or unpacking.
857 Elf_Data
* data
= GetSectionData(section
);
858 CHECK(data
->d_off
== 0 && data
->d_size
== section_header
->sh_size
);
860 // Require that the section is not zero-length (that is, has allocated
861 // data that we can validly expand).
862 CHECK(data
->d_size
&& data
->d_buf
);
864 const ELF::Off hole_start
= section_header
->sh_offset
;
865 const ssize_t hole_size
= new_size
- data
->d_size
;
867 VLOG_IF(1, (hole_size
> 0)) << "expand section size = " << data
->d_size
;
868 VLOG_IF(1, (hole_size
< 0)) << "shrink section size = " << data
->d_size
;
870 // Resize the data and the section header.
871 data
->d_size
+= hole_size
;
872 section_header
->sh_size
+= hole_size
;
874 // Add the hole size to all offsets in the ELF file that are after the
875 // start of the hole. If the hole size is positive we are expanding the
876 // section to create a new hole; if negative, we are closing up a hole.
878 // Start with the main ELF header.
879 ELF::Ehdr
* elf_header
= ELF::getehdr(elf
);
880 AdjustElfHeaderForHole(elf_header
, hole_start
, hole_size
);
882 // Adjust all section headers.
883 AdjustSectionHeadersForHole(elf
, hole_start
, hole_size
);
885 // If resizing the dynamic relocations, rewrite the program headers to
886 // either split or coalesce segments, and adjust dynamic entries to match.
887 if (is_relocations_resize
) {
888 RewriteProgramHeadersForHole(elf
, hole_start
, hole_size
);
890 Elf_Scn
* dynamic_section
= GetDynamicSection(elf
);
891 AdjustDynamicSectionForHole
<Rel
>(dynamic_section
, hole_start
, hole_size
);
895 // Find the first slot in a dynamics array with the given tag. The array
896 // always ends with a free (unused) element, and which we exclude from the
897 // search. Returns dynamics->size() if not found.
898 size_t FindDynamicEntry(ELF::Sword tag
,
899 std::vector
<ELF::Dyn
>* dynamics
) {
900 // Loop until the penultimate entry. We exclude the end sentinel.
901 for (size_t i
= 0; i
< dynamics
->size() - 1; ++i
) {
902 if (dynamics
->at(i
).d_tag
== tag
)
906 // The tag was not found.
907 return dynamics
->size();
910 // Replace the first free (unused) slot in a dynamics vector with the given
911 // value. The vector always ends with a free (unused) element, so the slot
912 // found cannot be the last one in the vector.
913 void AddDynamicEntry(const ELF::Dyn
& dyn
,
914 std::vector
<ELF::Dyn
>* dynamics
) {
915 const size_t slot
= FindDynamicEntry(DT_NULL
, dynamics
);
916 if (slot
== dynamics
->size()) {
917 LOG(FATAL
) << "No spare dynamic array slots found "
918 << "(to fix, increase gold's --spare-dynamic-tags value)";
921 // Replace this entry with the one supplied.
922 dynamics
->at(slot
) = dyn
;
923 VLOG(1) << "dynamic[" << slot
<< "] overwritten with " << dyn
.d_tag
;
926 // Remove the element in the dynamics vector that matches the given tag with
927 // unused slot data. Shuffle the following elements up, and ensure that the
928 // last is the null sentinel.
929 void RemoveDynamicEntry(ELF::Sword tag
,
930 std::vector
<ELF::Dyn
>* dynamics
) {
931 const size_t slot
= FindDynamicEntry(tag
, dynamics
);
932 CHECK(slot
!= dynamics
->size());
934 // Remove this entry by shuffling up everything that follows.
935 for (size_t i
= slot
; i
< dynamics
->size() - 1; ++i
) {
936 dynamics
->at(i
) = dynamics
->at(i
+ 1);
937 VLOG(1) << "dynamic[" << i
938 << "] overwritten with dynamic[" << i
+ 1 << "]";
941 // Ensure that the end sentinel is still present.
942 CHECK(dynamics
->at(dynamics
->size() - 1).d_tag
== DT_NULL
);
945 // Construct a null relocation without addend.
946 void NullRelocation(ELF::Rel
* relocation
) {
947 relocation
->r_offset
= 0;
948 relocation
->r_info
= ELF_R_INFO(0, ELF::kNoRelocationCode
);
951 // Construct a null relocation with addend.
952 void NullRelocation(ELF::Rela
* relocation
) {
953 relocation
->r_offset
= 0;
954 relocation
->r_info
= ELF_R_INFO(0, ELF::kNoRelocationCode
);
955 relocation
->r_addend
= 0;
958 // Pad relocations with the given number of null entries. Generates its
959 // null entry with the appropriate NullRelocation() invocation.
960 template <typename Rel
>
961 void PadRelocations(size_t count
, std::vector
<Rel
>* relocations
) {
963 NullRelocation(&null_relocation
);
964 std::vector
<Rel
> padding(count
, null_relocation
);
965 relocations
->insert(relocations
->end(), padding
.begin(), padding
.end());
970 // Remove relative entries from dynamic relocations and write as packed
971 // data into android packed relocations.
972 bool ElfFile::PackRelocations() {
973 // Load the ELF file into libelf.
975 LOG(ERROR
) << "Failed to load as ELF";
979 // Retrieve the current dynamic relocations section data.
980 Elf_Data
* data
= GetSectionData(relocations_section_
);
982 if (relocations_type_
== REL
) {
983 // Convert data to a vector of relocations.
984 const ELF::Rel
* relocations_base
= reinterpret_cast<ELF::Rel
*>(data
->d_buf
);
985 std::vector
<ELF::Rel
> relocations(
987 relocations_base
+ data
->d_size
/ sizeof(relocations
[0]));
989 LOG(INFO
) << "Relocations : REL";
990 return PackTypedRelocations
<ELF::Rel
>(relocations
);
993 if (relocations_type_
== RELA
) {
994 // Convert data to a vector of relocations with addends.
995 const ELF::Rela
* relocations_base
=
996 reinterpret_cast<ELF::Rela
*>(data
->d_buf
);
997 std::vector
<ELF::Rela
> relocations(
999 relocations_base
+ data
->d_size
/ sizeof(relocations
[0]));
1001 LOG(INFO
) << "Relocations : RELA";
1002 return PackTypedRelocations
<ELF::Rela
>(relocations
);
1009 // Helper for PackRelocations(). Rel type is one of ELF::Rel or ELF::Rela.
1010 template <typename Rel
>
1011 bool ElfFile::PackTypedRelocations(const std::vector
<Rel
>& relocations
) {
1012 // Filter relocations into those that are relative and others.
1013 std::vector
<Rel
> relative_relocations
;
1014 std::vector
<Rel
> other_relocations
;
1016 for (size_t i
= 0; i
< relocations
.size(); ++i
) {
1017 const Rel
& relocation
= relocations
[i
];
1018 if (ELF_R_TYPE(relocation
.r_info
) == ELF::kRelativeRelocationCode
) {
1019 CHECK(ELF_R_SYM(relocation
.r_info
) == 0);
1020 relative_relocations
.push_back(relocation
);
1022 other_relocations
.push_back(relocation
);
1025 LOG(INFO
) << "Relative : " << relative_relocations
.size() << " entries";
1026 LOG(INFO
) << "Other : " << other_relocations
.size() << " entries";
1027 LOG(INFO
) << "Total : " << relocations
.size() << " entries";
1029 // If no relative relocations then we have nothing packable. Perhaps
1030 // the shared object has already been packed?
1031 if (relative_relocations
.empty()) {
1032 LOG(ERROR
) << "No relative relocations found (already packed?)";
1036 // If not padding fully, apply only enough padding to preserve alignment.
1037 // Otherwise, pad so that we do not shrink the relocations section at all.
1038 if (!is_padding_relocations_
) {
1039 // Calculate the size of the hole we will close up when we rewrite
1040 // dynamic relocations.
1042 relative_relocations
.size() * sizeof(relative_relocations
[0]);
1043 const ssize_t unaligned_hole_size
= hole_size
;
1045 // Adjust the actual hole size to preserve alignment. We always adjust
1046 // by a whole number of NONE-type relocations.
1047 while (hole_size
% kPreserveAlignment
)
1048 hole_size
-= sizeof(relative_relocations
[0]);
1049 LOG(INFO
) << "Compaction : " << hole_size
<< " bytes";
1051 // Adjusting for alignment may have removed any packing benefit.
1052 if (hole_size
== 0) {
1053 LOG(INFO
) << "Too few relative relocations to pack after alignment";
1057 // Find the padding needed in other_relocations to preserve alignment.
1058 // Ensure that we never completely empty the real relocations section.
1059 size_t padding_bytes
= unaligned_hole_size
- hole_size
;
1060 if (padding_bytes
== 0 && other_relocations
.size() == 0) {
1062 padding_bytes
+= sizeof(relative_relocations
[0]);
1063 } while (padding_bytes
% kPreserveAlignment
);
1065 CHECK(padding_bytes
% sizeof(other_relocations
[0]) == 0);
1066 const size_t padding
= padding_bytes
/ sizeof(other_relocations
[0]);
1068 // Padding may have removed any packing benefit.
1069 if (padding
>= relative_relocations
.size()) {
1070 LOG(INFO
) << "Too few relative relocations to pack after padding";
1074 // Add null relocations to other_relocations to preserve alignment.
1075 PadRelocations
<Rel
>(padding
, &other_relocations
);
1076 LOG(INFO
) << "Alignment pad : " << padding
<< " relocations";
1078 // If padding, add NONE-type relocations to other_relocations to make it
1079 // the same size as the the original relocations we read in. This makes
1080 // the ResizeSection() below a no-op.
1081 const size_t padding
= relocations
.size() - other_relocations
.size();
1082 PadRelocations
<Rel
>(padding
, &other_relocations
);
1085 // Pack relative relocations.
1086 const size_t initial_bytes
=
1087 relative_relocations
.size() * sizeof(relative_relocations
[0]);
1088 LOG(INFO
) << "Unpacked relative: " << initial_bytes
<< " bytes";
1089 std::vector
<uint8_t> packed
;
1090 RelocationPacker packer
;
1091 packer
.PackRelativeRelocations(relative_relocations
, &packed
);
1092 const void* packed_data
= &packed
[0];
1093 const size_t packed_bytes
= packed
.size() * sizeof(packed
[0]);
1094 LOG(INFO
) << "Packed relative: " << packed_bytes
<< " bytes";
1096 // If we have insufficient relative relocations to form a run then
1098 if (packed
.empty()) {
1099 LOG(INFO
) << "Too few relative relocations to pack";
1103 // Run a loopback self-test as a check that packing is lossless.
1104 std::vector
<Rel
> unpacked
;
1105 packer
.UnpackRelativeRelocations(packed
, &unpacked
);
1106 CHECK(unpacked
.size() == relative_relocations
.size());
1107 CHECK(!memcmp(&unpacked
[0],
1108 &relative_relocations
[0],
1109 unpacked
.size() * sizeof(unpacked
[0])));
1111 // Make sure packing saved some space.
1112 if (packed_bytes
>= initial_bytes
) {
1113 LOG(INFO
) << "Packing relative relocations saves no space";
1117 // Rewrite the current dynamic relocations section to be only the ARM
1118 // non-relative relocations, then shrink it to size.
1119 const void* section_data
= &other_relocations
[0];
1120 const size_t bytes
= other_relocations
.size() * sizeof(other_relocations
[0]);
1121 ResizeSection
<Rel
>(elf_
, relocations_section_
, bytes
);
1122 RewriteSectionData(relocations_section_
, section_data
, bytes
);
1124 // Rewrite the current packed android relocations section to hold the packed
1125 // relative relocations.
1126 ResizeSection
<Rel
>(elf_
, android_relocations_section_
, packed_bytes
);
1127 RewriteSectionData(android_relocations_section_
, packed_data
, packed_bytes
);
1129 // Rewrite .dynamic to include two new tags describing the packed android
1131 Elf_Data
* data
= GetSectionData(dynamic_section_
);
1132 const ELF::Dyn
* dynamic_base
= reinterpret_cast<ELF::Dyn
*>(data
->d_buf
);
1133 std::vector
<ELF::Dyn
> dynamics(
1135 dynamic_base
+ data
->d_size
/ sizeof(dynamics
[0]));
1136 // Use two of the spare slots to describe the packed section.
1137 ELF::Shdr
* section_header
= ELF::getshdr(android_relocations_section_
);
1140 dyn
.d_tag
= DT_ANDROID_REL_OFFSET
;
1141 dyn
.d_un
.d_ptr
= section_header
->sh_offset
;
1142 AddDynamicEntry(dyn
, &dynamics
);
1146 dyn
.d_tag
= DT_ANDROID_REL_SIZE
;
1147 dyn
.d_un
.d_val
= section_header
->sh_size
;
1148 AddDynamicEntry(dyn
, &dynamics
);
1150 const void* dynamics_data
= &dynamics
[0];
1151 const size_t dynamics_bytes
= dynamics
.size() * sizeof(dynamics
[0]);
1152 RewriteSectionData(dynamic_section_
, dynamics_data
, dynamics_bytes
);
1158 // Find packed relative relocations in the packed android relocations
1159 // section, unpack them, and rewrite the dynamic relocations section to
1160 // contain unpacked data.
1161 bool ElfFile::UnpackRelocations() {
1162 // Load the ELF file into libelf.
1164 LOG(ERROR
) << "Failed to load as ELF";
1168 // Retrieve the current packed android relocations section data.
1169 Elf_Data
* data
= GetSectionData(android_relocations_section_
);
1171 // Convert data to a vector of bytes.
1172 const uint8_t* packed_base
= reinterpret_cast<uint8_t*>(data
->d_buf
);
1173 std::vector
<uint8_t> packed(
1175 packed_base
+ data
->d_size
/ sizeof(packed
[0]));
1177 if (packed
.size() > 3 &&
1182 // Signature is APR1, unpack relocations.
1183 CHECK(relocations_type_
== REL
);
1184 LOG(INFO
) << "Relocations : REL";
1185 return UnpackTypedRelocations
<ELF::Rel
>(packed
);
1188 if (packed
.size() > 3 &&
1193 // Signature is APA1, unpack relocations with addends.
1194 CHECK(relocations_type_
== RELA
);
1195 LOG(INFO
) << "Relocations : RELA";
1196 return UnpackTypedRelocations
<ELF::Rela
>(packed
);
1199 LOG(ERROR
) << "Packed relative relocations not found (not packed?)";
1203 // Helper for UnpackRelocations(). Rel type is one of ELF::Rel or ELF::Rela.
1204 template <typename Rel
>
1205 bool ElfFile::UnpackTypedRelocations(const std::vector
<uint8_t>& packed
) {
1206 // Unpack the data to re-materialize the relative relocations.
1207 const size_t packed_bytes
= packed
.size() * sizeof(packed
[0]);
1208 LOG(INFO
) << "Packed relative: " << packed_bytes
<< " bytes";
1209 std::vector
<Rel
> relative_relocations
;
1210 RelocationPacker packer
;
1211 packer
.UnpackRelativeRelocations(packed
, &relative_relocations
);
1212 const size_t unpacked_bytes
=
1213 relative_relocations
.size() * sizeof(relative_relocations
[0]);
1214 LOG(INFO
) << "Unpacked relative: " << unpacked_bytes
<< " bytes";
1216 // Retrieve the current dynamic relocations section data.
1217 Elf_Data
* data
= GetSectionData(relocations_section_
);
1219 // Interpret data as relocations.
1220 const Rel
* relocations_base
= reinterpret_cast<Rel
*>(data
->d_buf
);
1221 std::vector
<Rel
> relocations(
1223 relocations_base
+ data
->d_size
/ sizeof(relocations
[0]));
1225 std::vector
<Rel
> other_relocations
;
1228 // Filter relocations to locate any that are NONE-type. These will occur
1229 // if padding was turned on for packing.
1230 for (size_t i
= 0; i
< relocations
.size(); ++i
) {
1231 const Rel
& relocation
= relocations
[i
];
1232 if (ELF_R_TYPE(relocation
.r_info
) != ELF::kNoRelocationCode
) {
1233 other_relocations
.push_back(relocation
);
1238 LOG(INFO
) << "Relative : " << relative_relocations
.size() << " entries";
1239 LOG(INFO
) << "Other : " << other_relocations
.size() << " entries";
1241 // If we found the same number of null relocation entries in the dynamic
1242 // relocations section as we hold as unpacked relative relocations, then
1243 // this is a padded file.
1244 const bool is_padded
= padding
== relative_relocations
.size();
1246 // Unless padded, report by how much we expand the file.
1248 // Calculate the size of the hole we will open up when we rewrite
1249 // dynamic relocations.
1251 relative_relocations
.size() * sizeof(relative_relocations
[0]);
1253 // Adjust the hole size for the padding added to preserve alignment.
1254 hole_size
-= padding
* sizeof(other_relocations
[0]);
1255 LOG(INFO
) << "Expansion : " << hole_size
<< " bytes";
1258 // Rewrite the current dynamic relocations section to be the relative
1259 // relocations followed by other relocations. This is the usual order in
1260 // which we find them after linking, so this action will normally put the
1261 // entire dynamic relocations section back to its pre-split-and-packed state.
1262 relocations
.assign(relative_relocations
.begin(), relative_relocations
.end());
1263 relocations
.insert(relocations
.end(),
1264 other_relocations
.begin(), other_relocations
.end());
1265 const void* section_data
= &relocations
[0];
1266 const size_t bytes
= relocations
.size() * sizeof(relocations
[0]);
1267 LOG(INFO
) << "Total : " << relocations
.size() << " entries";
1268 ResizeSection
<Rel
>(elf_
, relocations_section_
, bytes
);
1269 RewriteSectionData(relocations_section_
, section_data
, bytes
);
1271 // Nearly empty the current packed android relocations section. Leaves a
1272 // four-byte stub so that some data remains allocated to the section.
1273 // This is a convenience which allows us to re-pack this file again without
1274 // having to remove the section and then add a new small one with objcopy.
1275 // The way we resize sections relies on there being some data in a section.
1277 elf_
, android_relocations_section_
, sizeof(kStubIdentifier
));
1279 android_relocations_section_
, &kStubIdentifier
, sizeof(kStubIdentifier
));
1281 // Rewrite .dynamic to remove two tags describing packed android relocations.
1282 data
= GetSectionData(dynamic_section_
);
1283 const ELF::Dyn
* dynamic_base
= reinterpret_cast<ELF::Dyn
*>(data
->d_buf
);
1284 std::vector
<ELF::Dyn
> dynamics(
1286 dynamic_base
+ data
->d_size
/ sizeof(dynamics
[0]));
1287 RemoveDynamicEntry(DT_ANDROID_REL_OFFSET
, &dynamics
);
1288 RemoveDynamicEntry(DT_ANDROID_REL_SIZE
, &dynamics
);
1289 const void* dynamics_data
= &dynamics
[0];
1290 const size_t dynamics_bytes
= dynamics
.size() * sizeof(dynamics
[0]);
1291 RewriteSectionData(dynamic_section_
, dynamics_data
, dynamics_bytes
);
1297 // Flush rewritten shared object file data.
1298 void ElfFile::Flush() {
1299 // Flag all ELF data held in memory as needing to be written back to the
1300 // file, and tell libelf that we have controlled the file layout.
1301 elf_flagelf(elf_
, ELF_C_SET
, ELF_F_DIRTY
);
1302 elf_flagelf(elf_
, ELF_C_SET
, ELF_F_LAYOUT
);
1304 // Write ELF data back to disk.
1305 const off_t file_bytes
= elf_update(elf_
, ELF_C_WRITE
);
1306 CHECK(file_bytes
> 0);
1307 VLOG(1) << "elf_update returned: " << file_bytes
;
1309 // Clean up libelf, and truncate the output file to the number of bytes
1310 // written by elf_update().
1313 const int truncate
= ftruncate(fd_
, file_bytes
);
1314 CHECK(truncate
== 0);
1317 } // namespace relocation_packer