1 /* -*- mode: C++; c-basic-offset: 4; tab-width: 4 -*-
3 * Copyright (c) 2006 Apple Computer, Inc. All rights reserved.
5 * @APPLE_LICENSE_HEADER_START@
7 * This file contains Original Code and/or Modifications of Original Code
8 * as defined in and that are subject to the Apple Public Source License
9 * Version 2.0 (the 'License'). You may not use this file except in
10 * compliance with the License. Please obtain a copy of the License at
11 * http://www.opensource.apple.com/apsl/ and read it before using this
14 * The Original Code and all software distributed under the License are
15 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
16 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
17 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
19 * Please see the License for the specific language governing rights and
20 * limitations under the License.
22 * @APPLE_LICENSE_HEADER_END@
25 #ifndef __MACHO_REBASER__
26 #define __MACHO_REBASER__
28 #include <sys/types.h>
31 #include <mach/mach.h>
38 #include <mach-o/loader.h>
39 #include <mach-o/fat.h>
40 #include <mach-o/reloc.h>
41 #include <mach-o/x86_64/reloc.h>
42 #include <mach-o/arm/reloc.h>
46 #include "MachOFileAbstraction.hpp"
47 #include "Architectures.hpp"
48 #include "MachOLayout.hpp"
49 #include "MachOTrie.hpp"
56 virtual cpu_type_t
getArchitecture() const = 0;
57 virtual uint64_t getBaseAddress() const = 0;
58 virtual uint64_t getVMSize() const = 0;
59 virtual void rebase(std::vector
<void*>&) = 0;
64 class Rebaser
: public AbstractRebaser
67 Rebaser(const MachOLayoutAbstraction
&);
70 virtual cpu_type_t
getArchitecture() const;
71 virtual uint64_t getBaseAddress() const;
72 virtual uint64_t getVMSize() const;
73 virtual void rebase(std::vector
<void*>&);
76 typedef typename
A::P P
;
77 typedef typename
A::P::E E
;
78 typedef typename
A::P::uint_t pint_t
;
80 pint_t
* mappedAddressForNewAddress(pint_t vmaddress
);
81 pint_t
getSlideForNewAddress(pint_t newAddress
);
84 void calculateRelocBase();
85 void adjustLoadCommands();
86 void adjustSymbolTable();
88 void makeNoPicStub(uint8_t* stub
, pint_t logicalAddress
);
91 void applyRebaseInfo(std::vector
<void*>& pointersInData
);
92 void adjustExportInfo();
93 void doRebase(int segIndex
, uint64_t segOffset
, uint8_t type
, std::vector
<void*>& pointersInData
);
94 void adjustSegmentLoadCommand(macho_segment_command
<P
>* seg
);
95 pint_t
getSlideForVMAddress(pint_t vmaddress
);
96 pint_t
maskedVMAddress(pint_t vmaddress
);
97 pint_t
* mappedAddressForVMAddress(pint_t vmaddress
);
98 pint_t
* mappedAddressForRelocAddress(pint_t r_address
);
99 void adjustRelocBaseAddresses();
100 const uint8_t* doCodeUpdateForEachULEB128Address(const uint8_t* p
, uint8_t kind
, uint64_t orgBaseAddress
, int64_t codeToDataDelta
, int64_t codeToImportDelta
);
101 void doCodeUpdate(uint8_t kind
, uint64_t address
, int64_t codeToDataDelta
, int64_t codeToImportDelta
);
102 void doLocalRelocation(const macho_relocation_info
<P
>* reloc
);
103 bool unequalSlides() const;
106 const macho_header
<P
>* fHeader
;
107 uint8_t* fLinkEditBase
; // add file offset to this to get linkedit content
108 const MachOLayoutAbstraction
& fLayout
;
110 pint_t fOrignalVMRelocBaseAddress
; // add reloc address to this to get original address reloc referred to
111 const macho_symtab_command
<P
>* fSymbolTable
;
112 const macho_dysymtab_command
<P
>* fDynamicSymbolTable
;
113 const macho_dyld_info_command
<P
>* fDyldInfo
;
114 bool fSplittingSegments
;
115 bool fOrignalVMRelocBaseAddressValid
;
116 pint_t fSkipSplitSegInfoStart
;
117 pint_t fSkipSplitSegInfoEnd
;
122 template <typename A
>
123 Rebaser
<A
>::Rebaser(const MachOLayoutAbstraction
& layout
)
124 : fLayout(layout
), fOrignalVMRelocBaseAddress(0), fLinkEditBase(0),
125 fSymbolTable(NULL
), fDynamicSymbolTable(NULL
), fDyldInfo(NULL
), fSplittingSegments(false),
126 fOrignalVMRelocBaseAddressValid(false), fSkipSplitSegInfoStart(0), fSkipSplitSegInfoEnd(0)
128 fHeader
= (const macho_header
<P
>*)fLayout
.getSegments()[0].mappedAddress();
129 switch ( fHeader
->filetype() ) {
134 throw "file is not a dylib or bundle";
137 const std::vector
<MachOLayoutAbstraction::Segment
>& segments
= fLayout
.getSegments();
138 for(std::vector
<MachOLayoutAbstraction::Segment
>::const_iterator it
= segments
.begin(); it
!= segments
.end(); ++it
) {
139 const MachOLayoutAbstraction::Segment
& seg
= *it
;
140 if ( strcmp(seg
.name(), "__LINKEDIT") == 0 ) {
141 fLinkEditBase
= (uint8_t*)seg
.mappedAddress() - seg
.fileOffset();
145 if ( fLinkEditBase
== NULL
)
146 throw "no __LINKEDIT segment";
148 // get symbol table info
149 const macho_load_command
<P
>* const cmds
= (macho_load_command
<P
>*)((uint8_t*)fHeader
+ sizeof(macho_header
<P
>));
150 const uint32_t cmd_count
= fHeader
->ncmds();
151 const macho_load_command
<P
>* cmd
= cmds
;
152 for (uint32_t i
= 0; i
< cmd_count
; ++i
) {
153 switch (cmd
->cmd()) {
155 fSymbolTable
= (macho_symtab_command
<P
>*)cmd
;
158 fDynamicSymbolTable
= (macho_dysymtab_command
<P
>*)cmd
;
161 case LC_DYLD_INFO_ONLY
:
162 fDyldInfo
= (macho_dyld_info_command
<P
>*)cmd
;
165 cmd
= (const macho_load_command
<P
>*)(((uint8_t*)cmd
)+cmd
->cmdsize());
168 calculateRelocBase();
170 fSplittingSegments
= layout
.hasSplitSegInfo() && this->unequalSlides();
173 template <> cpu_type_t Rebaser
<x86
>::getArchitecture() const { return CPU_TYPE_I386
; }
174 template <> cpu_type_t Rebaser
<x86_64
>::getArchitecture() const { return CPU_TYPE_X86_64
; }
175 template <> cpu_type_t Rebaser
<arm
>::getArchitecture() const { return CPU_TYPE_ARM
; }
176 template <> cpu_type_t Rebaser
<arm64
>::getArchitecture() const { return CPU_TYPE_ARM64
; }
178 template <typename A
>
179 bool Rebaser
<A
>::unequalSlides() const
181 const std::vector
<MachOLayoutAbstraction::Segment
>& segments
= fLayout
.getSegments();
182 uint64_t slide
= segments
[0].newAddress() - segments
[0].address();
183 for(std::vector
<MachOLayoutAbstraction::Segment
>::const_iterator it
= segments
.begin(); it
!= segments
.end(); ++it
) {
184 const MachOLayoutAbstraction::Segment
& seg
= *it
;
185 if ( (seg
.newAddress() - seg
.address()) != slide
)
191 template <typename A
>
192 uint64_t Rebaser
<A
>::getBaseAddress() const
194 return fLayout
.getSegments()[0].address();
197 template <typename A
>
198 uint64_t Rebaser
<A
>::getVMSize() const
200 uint64_t highestVMAddress
= 0;
201 const std::vector
<MachOLayoutAbstraction::Segment
>& segments
= fLayout
.getSegments();
202 for(std::vector
<MachOLayoutAbstraction::Segment
>::const_iterator it
= segments
.begin(); it
!= segments
.end(); ++it
) {
203 const MachOLayoutAbstraction::Segment
& seg
= *it
;
204 if ( seg
.address() > highestVMAddress
)
205 highestVMAddress
= seg
.address();
207 return (((highestVMAddress
- getBaseAddress()) + 4095) & (-4096));
212 template <typename A
>
213 void Rebaser
<A
>::rebase(std::vector
<void*>& pointersInData
)
215 // update writable segments that have internal pointers
216 if ( fDyldInfo
!= NULL
)
217 this->applyRebaseInfo(pointersInData
);
221 // if splitting segments, update code-to-data references
224 // change address on relocs now that segments are split
225 this->adjustRelocBaseAddresses();
227 // update load commands
228 this->adjustLoadCommands();
230 // update symbol table
231 this->adjustSymbolTable();
234 this->optimzeStubs();
236 // update export info
237 if ( fDyldInfo
!= NULL
)
238 this->adjustExportInfo();
242 void Rebaser
<x86
>::adjustSegmentLoadCommand(macho_segment_command
<P
>* seg
)
244 // __IMPORT segments are not-writable in shared cache
245 if ( strcmp(seg
->segname(), "__IMPORT") == 0 )
246 seg
->set_initprot(VM_PROT_READ
|VM_PROT_EXECUTE
);
249 template <typename A
>
250 void Rebaser
<A
>::adjustSegmentLoadCommand(macho_segment_command
<P
>* seg
)
255 template <typename A
>
256 void Rebaser
<A
>::adjustLoadCommands()
258 const macho_load_command
<P
>* const cmds
= (macho_load_command
<P
>*)((uint8_t*)fHeader
+ sizeof(macho_header
<P
>));
259 const uint32_t cmd_count
= fHeader
->ncmds();
260 const macho_load_command
<P
>* cmd
= cmds
;
261 for (uint32_t i
= 0; i
< cmd_count
; ++i
) {
262 switch ( cmd
->cmd() ) {
264 if ( (fHeader
->flags() & MH_PREBOUND
) != 0 ) {
265 // clear timestamp so that any prebound clients are invalidated
266 macho_dylib_command
<P
>* dylib
= (macho_dylib_command
<P
>*)cmd
;
267 dylib
->set_timestamp(1);
271 case LC_LOAD_WEAK_DYLIB
:
272 case LC_REEXPORT_DYLIB
:
273 case LC_LOAD_UPWARD_DYLIB
:
274 if ( (fHeader
->flags() & MH_PREBOUND
) != 0 ) {
275 // clear expected timestamps so that this image will load with invalid prebinding
276 macho_dylib_command
<P
>* dylib
= (macho_dylib_command
<P
>*)cmd
;
277 dylib
->set_timestamp(2);
280 case macho_routines_command
<P
>::CMD
:
281 // update -init command
283 struct macho_routines_command
<P
>* routines
= (struct macho_routines_command
<P
>*)cmd
;
284 routines
->set_init_address(routines
->init_address() + this->getSlideForVMAddress(routines
->init_address()));
287 case macho_segment_command
<P
>::CMD
:
288 // update segment commands
290 macho_segment_command
<P
>* seg
= (macho_segment_command
<P
>*)cmd
;
291 this->adjustSegmentLoadCommand(seg
);
292 pint_t slide
= this->getSlideForVMAddress(seg
->vmaddr());
293 seg
->set_vmaddr(seg
->vmaddr() + slide
);
294 macho_section
<P
>* const sectionsStart
= (macho_section
<P
>*)((char*)seg
+ sizeof(macho_segment_command
<P
>));
295 macho_section
<P
>* const sectionsEnd
= §ionsStart
[seg
->nsects()];
296 for(macho_section
<P
>* sect
= sectionsStart
; sect
< sectionsEnd
; ++sect
) {
297 sect
->set_addr(sect
->addr() + slide
);
302 cmd
= (const macho_load_command
<P
>*)(((uint8_t*)cmd
)+cmd
->cmdsize());
307 uint64_t Rebaser
<arm64
>::maskedVMAddress(pint_t vmaddress
)
309 return (vmaddress
& 0x0FFFFFFFFFFFFFFF);
312 template <typename A
>
313 typename
A::P::uint_t Rebaser
<A
>::maskedVMAddress(pint_t vmaddress
)
319 template <typename A
>
320 typename
A::P::uint_t Rebaser
<A
>::getSlideForVMAddress(pint_t vmaddress
)
322 pint_t vmaddr
= this->maskedVMAddress(vmaddress
);
323 const std::vector
<MachOLayoutAbstraction::Segment
>& segments
= fLayout
.getSegments();
324 for(std::vector
<MachOLayoutAbstraction::Segment
>::const_iterator it
= segments
.begin(); it
!= segments
.end(); ++it
) {
325 const MachOLayoutAbstraction::Segment
& seg
= *it
;
326 if ( (seg
.address() <= vmaddr
) && (seg
.size() != 0) && ((vmaddr
< (seg
.address()+seg
.size())) || (seg
.address() == vmaddr
)) ) {
327 return seg
.newAddress() - seg
.address();
330 throwf("vm address 0x%08llX not found", (uint64_t)vmaddr
);
334 template <typename A
>
335 typename
A::P::uint_t
* Rebaser
<A
>::mappedAddressForVMAddress(pint_t vmaddress
)
337 pint_t vmaddr
= this->maskedVMAddress(vmaddress
);
338 const std::vector
<MachOLayoutAbstraction::Segment
>& segments
= fLayout
.getSegments();
339 for(std::vector
<MachOLayoutAbstraction::Segment
>::const_iterator it
= segments
.begin(); it
!= segments
.end(); ++it
) {
340 const MachOLayoutAbstraction::Segment
& seg
= *it
;
341 if ( (seg
.address() <= vmaddr
) && (vmaddr
< (seg
.address()+seg
.size())) ) {
342 return (pint_t
*)((vmaddr
- seg
.address()) + (uint8_t*)seg
.mappedAddress());
345 throwf("mappedAddressForVMAddress(0x%08llX) not found", (uint64_t)vmaddr
);
348 template <typename A
>
349 typename
A::P::uint_t
* Rebaser
<A
>::mappedAddressForNewAddress(pint_t vmaddress
)
351 const std::vector
<MachOLayoutAbstraction::Segment
>& segments
= fLayout
.getSegments();
352 for(std::vector
<MachOLayoutAbstraction::Segment
>::const_iterator it
= segments
.begin(); it
!= segments
.end(); ++it
) {
353 const MachOLayoutAbstraction::Segment
& seg
= *it
;
354 if ( (seg
.newAddress() <= vmaddress
) && (vmaddress
< (seg
.newAddress()+seg
.size())) ) {
355 return (pint_t
*)((vmaddress
- seg
.newAddress()) + (uint8_t*)seg
.mappedAddress());
358 throwf("mappedAddressForNewAddress(0x%08llX) not found", (uint64_t)vmaddress
);
361 template <typename A
>
362 typename
A::P::uint_t Rebaser
<A
>::getSlideForNewAddress(pint_t newAddress
)
364 const std::vector
<MachOLayoutAbstraction::Segment
>& segments
= fLayout
.getSegments();
365 for(std::vector
<MachOLayoutAbstraction::Segment
>::const_iterator it
= segments
.begin(); it
!= segments
.end(); ++it
) {
366 const MachOLayoutAbstraction::Segment
& seg
= *it
;
367 if ( (seg
.newAddress() <= newAddress
) && (newAddress
< (seg
.newAddress()+seg
.size())) ) {
368 return seg
.newAddress() - seg
.address();
371 throwf("new address 0x%08llX not found", (uint64_t)newAddress
);
374 template <typename A
>
375 typename
A::P::uint_t
* Rebaser
<A
>::mappedAddressForRelocAddress(pint_t r_address
)
377 if ( fOrignalVMRelocBaseAddressValid
)
378 return this->mappedAddressForVMAddress(r_address
+ fOrignalVMRelocBaseAddress
);
380 throw "can't apply relocation. Relocation base not known";
385 void Rebaser
<arm
>::makeNoPicStub(uint8_t* stub
, pint_t logicalAddress
)
387 uint32_t* instructions
= (uint32_t*)stub
;
388 if ( (LittleEndian::get32(instructions
[0]) == 0xE59FC004) &&
389 (LittleEndian::get32(instructions
[1]) == 0xE08FC00C) &&
390 (LittleEndian::get32(instructions
[2]) == 0xE59CF000) ) {
391 uint32_t lazyPtrAddress
= instructions
[3] + logicalAddress
+ 12;
392 LittleEndian::set32(instructions
[0], 0xE59FC000); // ldr ip, [pc, #0]
393 LittleEndian::set32(instructions
[1], 0xE59CF000); // ldr pc, [ip]
394 LittleEndian::set32(instructions
[2], lazyPtrAddress
); // .long L_foo$lazy_ptr
395 LittleEndian::set32(instructions
[3], 0xE1A00000); // nop
398 fprintf(stderr
, "unoptimized stub in %s at 0x%08X\n", fLayout
.getFilePath(), logicalAddress
);
403 // disable this optimization do allow cache to slide
405 void Rebaser
<arm
>::optimzeStubs()
407 // convert pic stubs to no-pic stubs in dyld shared cache
408 const macho_load_command
<P
>* const cmds
= (macho_load_command
<P
>*)((uint8_t*)fHeader
+ sizeof(macho_header
<P
>));
409 const uint32_t cmd_count
= fHeader
->ncmds();
410 const macho_load_command
<P
>* cmd
= cmds
;
411 for (uint32_t i
= 0; i
< cmd_count
; ++i
) {
412 if ( cmd
->cmd() == macho_segment_command
<P
>::CMD
) {
413 macho_segment_command
<P
>* seg
= (macho_segment_command
<P
>*)cmd
;
414 macho_section
<P
>* const sectionsStart
= (macho_section
<P
>*)((char*)seg
+ sizeof(macho_segment_command
<P
>));
415 macho_section
<P
>* const sectionsEnd
= §ionsStart
[seg
->nsects()];
416 for(macho_section
<P
>* sect
= sectionsStart
; sect
< sectionsEnd
; ++sect
) {
417 if ( (sect
->flags() & SECTION_TYPE
) == S_SYMBOL_STUBS
) {
418 const uint32_t stubSize
= sect
->reserved2();
419 // ARM PIC stubs are 4 32-bit instructions long
420 if ( stubSize
== 16 ) {
421 uint32_t stubCount
= sect
->size() / 16;
422 pint_t stubLogicalAddress
= sect
->addr();
423 uint8_t* stubMappedAddress
= (uint8_t*)mappedAddressForNewAddress(stubLogicalAddress
);
424 for(uint32_t s
=0; s
< stubCount
; ++s
) {
425 makeNoPicStub(stubMappedAddress
, stubLogicalAddress
);
426 stubLogicalAddress
+= 16;
427 stubMappedAddress
+= 16;
433 cmd
= (const macho_load_command
<P
>*)(((uint8_t*)cmd
)+cmd
->cmdsize());
438 template <typename A
>
439 void Rebaser
<A
>::optimzeStubs()
441 // other architectures don't need stubs changed in shared cache
444 template <typename A
>
445 void Rebaser
<A
>::adjustSymbolTable()
447 macho_nlist
<P
>* symbolTable
= (macho_nlist
<P
>*)(&fLinkEditBase
[fSymbolTable
->symoff()]);
449 // walk all exports and slide their n_value
450 macho_nlist
<P
>* lastExport
= &symbolTable
[fDynamicSymbolTable
->iextdefsym()+fDynamicSymbolTable
->nextdefsym()];
451 for (macho_nlist
<P
>* entry
= &symbolTable
[fDynamicSymbolTable
->iextdefsym()]; entry
< lastExport
; ++entry
) {
452 if ( (entry
->n_type() & N_TYPE
) == N_SECT
)
453 entry
->set_n_value(entry
->n_value() + this->getSlideForVMAddress(entry
->n_value()));
456 // walk all local symbols and slide their n_value (don't adjust any stabs)
457 macho_nlist
<P
>* lastLocal
= &symbolTable
[fDynamicSymbolTable
->ilocalsym()+fDynamicSymbolTable
->nlocalsym()];
458 for (macho_nlist
<P
>* entry
= &symbolTable
[fDynamicSymbolTable
->ilocalsym()]; entry
< lastLocal
; ++entry
) {
459 if ( (entry
->n_sect() != NO_SECT
) && ((entry
->n_type() & N_STAB
) == 0) )
460 entry
->set_n_value(entry
->n_value() + this->getSlideForVMAddress(entry
->n_value()));
464 template <typename A
>
465 void Rebaser
<A
>::adjustExportInfo()
467 // if no export info, nothing to adjust
468 if ( fDyldInfo
->export_size() == 0 )
471 // since export info addresses are offsets from mach_header, everything in __TEXT is fine
472 // only __DATA addresses need to be updated
473 const uint8_t* start
= fLayout
.getDyldInfoExports();
474 const uint8_t* end
= &start
[fDyldInfo
->export_size()];
475 std::vector
<mach_o::trie::Entry
> originalExports
;
477 parseTrie(start
, end
, originalExports
);
479 catch (const char* msg
) {
480 throwf("%s in %s", msg
, fLayout
.getFilePath());
483 std::vector
<mach_o::trie::Entry
> newExports
;
484 newExports
.reserve(originalExports
.size());
485 pint_t baseAddress
= this->getBaseAddress();
486 pint_t baseAddressSlide
= this->getSlideForVMAddress(baseAddress
);
487 for (std::vector
<mach_o::trie::Entry
>::iterator it
=originalExports
.begin(); it
!= originalExports
.end(); ++it
) {
488 // remove symbols used by the static linker only
489 if ( (strncmp(it
->name
, "$ld$", 4) == 0)
490 || (strncmp(it
->name
, ".objc_class_name",16) == 0)
491 || (strncmp(it
->name
, ".objc_category_name",19) == 0) ) {
492 //fprintf(stderr, "ignoring symbol %s\n", it->name);
495 // adjust symbols in slid segments
496 //uint32_t oldOffset = it->address;
497 it
->address
+= (this->getSlideForVMAddress(it
->address
+ baseAddress
) - baseAddressSlide
);
498 //fprintf(stderr, "orig=0x%08X, new=0x%08llX, sym=%s\n", oldOffset, it->address, it->name);
499 newExports
.push_back(*it
);
502 // rebuild export trie
503 std::vector
<uint8_t> newExportTrieBytes
;
504 newExportTrieBytes
.reserve(fDyldInfo
->export_size());
505 mach_o::trie::makeTrie(newExports
, newExportTrieBytes
);
507 while ( (newExportTrieBytes
.size() % sizeof(pint_t
)) != 0 )
508 newExportTrieBytes
.push_back(0);
510 // allocate new buffer and set export_off to use new buffer instead
511 uint32_t newExportsSize
= newExportTrieBytes
.size();
512 uint8_t* sideTrie
= new uint8_t[newExportsSize
];
513 memcpy(sideTrie
, &newExportTrieBytes
[0], newExportsSize
);
514 fLayout
.setDyldInfoExports(sideTrie
);
515 ((macho_dyld_info_command
<P
>*)fDyldInfo
)->set_export_off(0); // invalidate old trie
516 ((macho_dyld_info_command
<P
>*)fDyldInfo
)->set_export_size(newExportsSize
);
521 template <typename A
>
522 void Rebaser
<A
>::doCodeUpdate(uint8_t kind
, uint64_t address
, int64_t codeToDataDelta
, int64_t codeToImportDelta
)
524 // begin hack for <rdar://problem/8253549> split seg info wrong for x86_64 stub helpers
525 if ( (fSkipSplitSegInfoStart
<= address
) && (address
< fSkipSplitSegInfoEnd
) ) {
526 uint8_t* p
= (uint8_t*)mappedAddressForVMAddress(address
);
527 // only ignore split seg info for "push" instructions
531 // end hack for <rdar://problem/8253549>
533 //fprintf(stderr, "doCodeUpdate(kind=%d, address=0x%0llX, dataDelta=0x%08llX, importDelta=0x%08llX, path=%s)\n",
534 // kind, address, codeToDataDelta, codeToImportDelta, fLayout.getFilePath());
536 uint32_t instruction
;
540 case 1: // 32-bit pointer
541 p
= (uint32_t*)mappedAddressForVMAddress(address
);
542 value
= A::P::E::get32(*p
);
543 value
+= codeToDataDelta
;
544 A::P::E::set32(*p
, value
);
546 case 2: // 64-bit pointer
547 p
= (uint32_t*)mappedAddressForVMAddress(address
);
548 value64
= A::P::E::get64(*(uint64_t*)p
);
549 value64
+= codeToDataDelta
;
550 A::P::E::set64(*(uint64_t*)p
, value64
);
552 case 4: // only used for i386, a reference to something in the IMPORT segment
553 p
= (uint32_t*)mappedAddressForVMAddress(address
);
554 value
= A::P::E::get32(*p
);
555 value
+= codeToImportDelta
;
556 A::P::E::set32(*p
, value
);
558 case 5: // used by thumb2 movw
559 p
= (uint32_t*)mappedAddressForVMAddress(address
);
560 instruction
= A::P::E::get32(*p
);
561 // codeToDataDelta is always a multiple of 4096, so only top 4 bits of lo16 will ever need adjusting
562 value
= (instruction
& 0x0000000F) + (codeToDataDelta
>> 12);
563 instruction
= (instruction
& 0xFFFFFFF0) | (value
& 0x0000000F);
564 A::P::E::set32(*p
, instruction
);
566 case 6: // used by ARM movw
567 p
= (uint32_t*)mappedAddressForVMAddress(address
);
568 instruction
= A::P::E::get32(*p
);
569 // codeToDataDelta is always a multiple of 4096, so only top 4 bits of lo16 will ever need adjusting
570 value
= ((instruction
& 0x000F0000) >> 16) + (codeToDataDelta
>> 12);
571 instruction
= (instruction
& 0xFFF0FFFF) | ((value
<<16) & 0x000F0000);
572 A::P::E::set32(*p
, instruction
);
590 // used by thumb2 movt (low nibble of kind is high 4-bits of paired movw)
592 p
= (uint32_t*)mappedAddressForVMAddress(address
);
593 instruction
= A::P::E::get32(*p
);
594 // extract 16-bit value from instruction
595 uint32_t i
= ((instruction
& 0x00000400) >> 10);
596 uint32_t imm4
= (instruction
& 0x0000000F);
597 uint32_t imm3
= ((instruction
& 0x70000000) >> 28);
598 uint32_t imm8
= ((instruction
& 0x00FF0000) >> 16);
599 uint32_t imm16
= (imm4
<< 12) | (i
<< 11) | (imm3
<< 8) | imm8
;
600 // combine with codeToDataDelta and kind nibble
601 uint32_t targetValue
= (imm16
<< 16) | ((kind
& 0xF) << 12);
602 uint32_t newTargetValue
= targetValue
+ codeToDataDelta
;
603 // construct new bits slices
604 uint32_t imm4_
= (newTargetValue
& 0xF0000000) >> 28;
605 uint32_t i_
= (newTargetValue
& 0x08000000) >> 27;
606 uint32_t imm3_
= (newTargetValue
& 0x07000000) >> 24;
607 uint32_t imm8_
= (newTargetValue
& 0x00FF0000) >> 16;
608 // update instruction to match codeToDataDelta
609 uint32_t newInstruction
= (instruction
& 0x8F00FBF0) | imm4_
| (i_
<< 10) | (imm3_
<< 28) | (imm8_
<< 16);
610 A::P::E::set32(*p
, newInstruction
);
629 // used by arm movt (low nibble of kind is high 4-bits of paired movw)
631 p
= (uint32_t*)mappedAddressForVMAddress(address
);
632 instruction
= A::P::E::get32(*p
);
633 // extract 16-bit value from instruction
634 uint32_t imm4
= ((instruction
& 0x000F0000) >> 16);
635 uint32_t imm12
= (instruction
& 0x00000FFF);
636 uint32_t imm16
= (imm4
<< 12) | imm12
;
637 // combine with codeToDataDelta and kind nibble
638 uint32_t targetValue
= (imm16
<< 16) | ((kind
& 0xF) << 12);
639 uint32_t newTargetValue
= targetValue
+ codeToDataDelta
;
640 // construct new bits slices
641 uint32_t imm4_
= (newTargetValue
& 0xF0000000) >> 28;
642 uint32_t imm12_
= (newTargetValue
& 0x0FFF0000) >> 16;
643 // update instruction to match codeToDataDelta
644 uint32_t newInstruction
= (instruction
& 0xFFF0F000) | (imm4_
<< 16) | imm12_
;
645 A::P::E::set32(*p
, newInstruction
);
648 case 3: // used for arm64 ADRP
649 p
= (uint32_t*)mappedAddressForVMAddress(address
);
650 instruction
= A::P::E::get32(*p
);
651 if ( (instruction
& 0x9F000000) == 0x90000000 ) {
652 // codeToDataDelta is always a multiple of 4096, so only top 4 bits of lo16 will ever need adjusting
653 value64
= ((instruction
& 0x60000000) >> 17) | ((instruction
& 0x00FFFFE0) << 9);
654 value64
+= codeToDataDelta
;
655 instruction
= (instruction
& 0x9F00001F) | ((value64
<< 17) & 0x60000000) | ((value64
>> 9) & 0x00FFFFE0);
656 A::P::E::set32(*p
, instruction
);
660 throwf("invalid kind=%d in split seg info", kind
);
664 template <typename A
>
665 const uint8_t* Rebaser
<A
>::doCodeUpdateForEachULEB128Address(const uint8_t* p
, uint8_t kind
, uint64_t orgBaseAddress
, int64_t codeToDataDelta
, int64_t codeToImportDelta
)
667 uint64_t address
= 0;
673 delta
|= ((byte
& 0x7F) << shift
);
678 doCodeUpdate(kind
, address
+orgBaseAddress
, codeToDataDelta
, codeToImportDelta
);
690 template <typename A
>
691 void Rebaser
<A
>::adjustCode()
693 if ( fSplittingSegments
) {
694 // get uleb128 compressed runs of code addresses to update
695 const uint8_t* infoStart
= NULL
;
696 const uint8_t* infoEnd
= NULL
;
697 const macho_segment_command
<P
>* seg
;
698 const macho_load_command
<P
>* const cmds
= (macho_load_command
<P
>*)((uint8_t*)fHeader
+ sizeof(macho_header
<P
>));
699 const uint32_t cmd_count
= fHeader
->ncmds();
700 const macho_load_command
<P
>* cmd
= cmds
;
701 for (uint32_t i
= 0; i
< cmd_count
; ++i
) {
702 switch (cmd
->cmd()) {
703 case LC_SEGMENT_SPLIT_INFO
:
705 const macho_linkedit_data_command
<P
>* segInfo
= (macho_linkedit_data_command
<P
>*)cmd
;
706 infoStart
= &fLinkEditBase
[segInfo
->dataoff()];
707 infoEnd
= &infoStart
[segInfo
->datasize()];
710 // begin hack for <rdar://problem/8253549> split seg info wrong for x86_64 stub helpers
711 case macho_segment_command
<P
>::CMD
:
712 seg
= (macho_segment_command
<P
>*)cmd
;
713 if ( (getArchitecture() == CPU_TYPE_X86_64
) && (strcmp(seg
->segname(), "__TEXT") == 0) ) {
714 const macho_section
<P
>* const sectionsStart
= (macho_section
<P
>*)((char*)seg
+ sizeof(macho_segment_command
<P
>));
715 const macho_section
<P
>* const sectionsEnd
= §ionsStart
[seg
->nsects()];
716 for(const macho_section
<P
>* sect
= sectionsStart
; sect
< sectionsEnd
; ++sect
) {
717 if ( strcmp(sect
->sectname(), "__stub_helper") == 0 ) {
718 fSkipSplitSegInfoStart
= sect
->addr();
719 fSkipSplitSegInfoEnd
= sect
->addr() + sect
->size() - 16;
724 // end hack for <rdar://problem/8253549> split seg info wrong for x86_64 stub helpers
726 cmd
= (const macho_load_command
<P
>*)(((uint8_t*)cmd
)+cmd
->cmdsize());
728 // calculate how much we need to slide writable segments
729 const uint64_t orgBaseAddress
= this->getBaseAddress();
730 int64_t codeToDataDelta
= 0;
731 int64_t codeToImportDelta
= 0;
732 const std::vector
<MachOLayoutAbstraction::Segment
>& segments
= fLayout
.getSegments();
733 const MachOLayoutAbstraction::Segment
& codeSeg
= segments
[0];
734 for(std::vector
<MachOLayoutAbstraction::Segment
>::const_iterator it
= segments
.begin(); it
!= segments
.end(); ++it
) {
735 const MachOLayoutAbstraction::Segment
& dataSeg
= *it
;
736 if ( strcmp(dataSeg
.name(), "__IMPORT") == 0 )
737 codeToImportDelta
= (dataSeg
.newAddress() - codeSeg
.newAddress()) - (dataSeg
.address() - codeSeg
.address());
738 else if ( dataSeg
.writable() ) {
739 if ( (strcmp(dataSeg
.name(), "__DATA") != 0) && (strcmp(dataSeg
.name(), "__OBJC") != 0) )
740 throwf("only one rw segment named '__DATA' can be used in dylibs placed in the dyld shared cache (%s)", fLayout
.getFilePath());
741 codeToDataDelta
= (dataSeg
.newAddress() - codeSeg
.newAddress()) - (dataSeg
.address() - codeSeg
.address());
744 // decompress and call doCodeUpdate() on each address
745 for(const uint8_t* p
= infoStart
; (*p
!= 0) && (p
< infoEnd
);) {
747 p
= this->doCodeUpdateForEachULEB128Address(p
, kind
, orgBaseAddress
, codeToDataDelta
, codeToImportDelta
);
752 template <typename A
>
753 void Rebaser
<A
>::doRebase(int segIndex
, uint64_t segOffset
, uint8_t type
, std::vector
<void*>& pointersInData
)
755 const std::vector
<MachOLayoutAbstraction::Segment
>& segments
= fLayout
.getSegments();
756 if ( segIndex
> segments
.size() )
757 throw "bad segment index in rebase info";
758 const MachOLayoutAbstraction::Segment
& seg
= segments
[segIndex
];
759 uint8_t* mappedAddr
= (uint8_t*)seg
.mappedAddress() + segOffset
;
760 pint_t
* mappedAddrP
= (pint_t
*)mappedAddr
;
761 uint32_t* mappedAddr32
= (uint32_t*)mappedAddr
;
768 case REBASE_TYPE_POINTER
:
769 valueP
= P::getP(*mappedAddrP
);
771 P::setP(*mappedAddrP
, valueP
+ this->getSlideForVMAddress(valueP
));
773 catch (const char* msg
) {
774 throwf("at offset=0x%08llX in seg=%s, pointer cannot be rebased because it does not point to __TEXT or __DATA. %s\n",
775 segOffset
, seg
.name(), msg
);
779 case REBASE_TYPE_TEXT_ABSOLUTE32
:
780 value32
= E::get32(*mappedAddr32
);
781 E::set32(*mappedAddr32
, value32
+ this->getSlideForVMAddress(value32
));
784 case REBASE_TYPE_TEXT_PCREL32
:
785 svalue32
= E::get32(*mappedAddr32
);
786 valueP
= seg
.address() + segOffset
+ 4 + svalue32
;
787 valuePnew
= valueP
+ this->getSlideForVMAddress(valueP
);
788 svalue32new
= seg
.address() + segOffset
+ 4 - valuePnew
;
789 E::set32(*mappedAddr32
, svalue32new
);
793 throw "bad rebase type";
795 pointersInData
.push_back(mappedAddr
);
799 template <typename A
>
800 void Rebaser
<A
>::applyRebaseInfo(std::vector
<void*>& pointersInData
)
802 const uint8_t* p
= &fLinkEditBase
[fDyldInfo
->rebase_off()];
803 const uint8_t* end
= &p
[fDyldInfo
->rebase_size()];
807 uint64_t segOffset
= 0;
811 while ( !done
&& (p
< end
) ) {
812 uint8_t immediate
= *p
& REBASE_IMMEDIATE_MASK
;
813 uint8_t opcode
= *p
& REBASE_OPCODE_MASK
;
816 case REBASE_OPCODE_DONE
:
819 case REBASE_OPCODE_SET_TYPE_IMM
:
822 case REBASE_OPCODE_SET_SEGMENT_AND_OFFSET_ULEB
:
823 segIndex
= immediate
;
824 segOffset
= read_uleb128(p
, end
);
826 case REBASE_OPCODE_ADD_ADDR_ULEB
:
827 segOffset
+= read_uleb128(p
, end
);
829 case REBASE_OPCODE_ADD_ADDR_IMM_SCALED
:
830 segOffset
+= immediate
*sizeof(pint_t
);
832 case REBASE_OPCODE_DO_REBASE_IMM_TIMES
:
833 for (int i
=0; i
< immediate
; ++i
) {
834 doRebase(segIndex
, segOffset
, type
, pointersInData
);
835 segOffset
+= sizeof(pint_t
);
838 case REBASE_OPCODE_DO_REBASE_ULEB_TIMES
:
839 count
= read_uleb128(p
, end
);
840 for (uint32_t i
=0; i
< count
; ++i
) {
841 doRebase(segIndex
, segOffset
, type
, pointersInData
);
842 segOffset
+= sizeof(pint_t
);
845 case REBASE_OPCODE_DO_REBASE_ADD_ADDR_ULEB
:
846 doRebase(segIndex
, segOffset
, type
, pointersInData
);
847 segOffset
+= read_uleb128(p
, end
) + sizeof(pint_t
);
849 case REBASE_OPCODE_DO_REBASE_ULEB_TIMES_SKIPPING_ULEB
:
850 count
= read_uleb128(p
, end
);
851 skip
= read_uleb128(p
, end
);
852 for (uint32_t i
=0; i
< count
; ++i
) {
853 doRebase(segIndex
, segOffset
, type
, pointersInData
);
854 segOffset
+= skip
+ sizeof(pint_t
);
858 throwf("bad rebase opcode %d", *p
);
863 template <typename A
>
864 void Rebaser
<A
>::adjustDATA()
866 // walk all local relocations and slide every pointer
867 const macho_relocation_info
<P
>* const relocsStart
= (macho_relocation_info
<P
>*)(&fLinkEditBase
[fDynamicSymbolTable
->locreloff()]);
868 const macho_relocation_info
<P
>* const relocsEnd
= &relocsStart
[fDynamicSymbolTable
->nlocrel()];
869 for (const macho_relocation_info
<P
>* reloc
=relocsStart
; reloc
< relocsEnd
; ++reloc
) {
870 this->doLocalRelocation(reloc
);
873 // walk non-lazy-pointers and slide the ones that are LOCAL
874 const macho_load_command
<P
>* const cmds
= (macho_load_command
<P
>*)((uint8_t*)fHeader
+ sizeof(macho_header
<P
>));
875 const uint32_t cmd_count
= fHeader
->ncmds();
876 const macho_load_command
<P
>* cmd
= cmds
;
877 for (uint32_t i
= 0; i
< cmd_count
; ++i
) {
878 if ( cmd
->cmd() == macho_segment_command
<P
>::CMD
) {
879 const macho_segment_command
<P
>* seg
= (macho_segment_command
<P
>*)cmd
;
880 const macho_section
<P
>* const sectionsStart
= (macho_section
<P
>*)((char*)seg
+ sizeof(macho_segment_command
<P
>));
881 const macho_section
<P
>* const sectionsEnd
= §ionsStart
[seg
->nsects()];
882 const uint32_t* const indirectTable
= (uint32_t*)(&fLinkEditBase
[fDynamicSymbolTable
->indirectsymoff()]);
883 for(const macho_section
<P
>* sect
= sectionsStart
; sect
< sectionsEnd
; ++sect
) {
884 if ( (sect
->flags() & SECTION_TYPE
) == S_NON_LAZY_SYMBOL_POINTERS
) {
885 const uint32_t indirectTableOffset
= sect
->reserved1();
886 uint32_t pointerCount
= sect
->size() / sizeof(pint_t
);
887 pint_t
* nonLazyPointerAddr
= this->mappedAddressForVMAddress(sect
->addr());
888 for (uint32_t j
=0; j
< pointerCount
; ++j
, ++nonLazyPointerAddr
) {
889 if ( E::get32(indirectTable
[indirectTableOffset
+ j
]) == INDIRECT_SYMBOL_LOCAL
) {
890 pint_t value
= A::P::getP(*nonLazyPointerAddr
);
891 P::setP(*nonLazyPointerAddr
, value
+ this->getSlideForVMAddress(value
));
897 cmd
= (const macho_load_command
<P
>*)(((uint8_t*)cmd
)+cmd
->cmdsize());
902 template <typename A
>
903 void Rebaser
<A
>::adjustRelocBaseAddresses()
905 // split seg file need reloc base to be first writable segment
906 if ( fSplittingSegments
&& ((fHeader
->flags() & MH_SPLIT_SEGS
) == 0) ) {
908 // get amount to adjust reloc address
909 int32_t relocAddressAdjust
= 0;
910 const std::vector
<MachOLayoutAbstraction::Segment
>& segments
= fLayout
.getSegments();
911 for(std::vector
<MachOLayoutAbstraction::Segment
>::const_iterator it
= segments
.begin(); it
!= segments
.end(); ++it
) {
912 const MachOLayoutAbstraction::Segment
& seg
= *it
;
913 if ( seg
.writable() ) {
914 relocAddressAdjust
= seg
.address() - segments
[0].address();
919 // walk all local relocations and adjust every address
920 macho_relocation_info
<P
>* const relocsStart
= (macho_relocation_info
<P
>*)(&fLinkEditBase
[fDynamicSymbolTable
->locreloff()]);
921 macho_relocation_info
<P
>* const relocsEnd
= &relocsStart
[fDynamicSymbolTable
->nlocrel()];
922 for (macho_relocation_info
<P
>* reloc
=relocsStart
; reloc
< relocsEnd
; ++reloc
) {
923 reloc
->set_r_address(reloc
->r_address()-relocAddressAdjust
);
926 // walk all external relocations and adjust every address
927 macho_relocation_info
<P
>* const externRelocsStart
= (macho_relocation_info
<P
>*)(&fLinkEditBase
[fDynamicSymbolTable
->extreloff()]);
928 macho_relocation_info
<P
>* const externRelocsEnd
= &externRelocsStart
[fDynamicSymbolTable
->nextrel()];
929 for (macho_relocation_info
<P
>* reloc
=externRelocsStart
; reloc
< externRelocsEnd
; ++reloc
) {
930 reloc
->set_r_address(reloc
->r_address()-relocAddressAdjust
);
936 void Rebaser
<x86_64
>::adjustRelocBaseAddresses()
938 // x86_64 already have reloc base of first writable segment
943 void Rebaser
<x86_64
>::doLocalRelocation(const macho_relocation_info
<x86_64::P
>* reloc
)
945 if ( reloc
->r_type() == X86_64_RELOC_UNSIGNED
) {
946 pint_t
* addr
= this->mappedAddressForRelocAddress(reloc
->r_address());
947 pint_t value
= P::getP(*addr
);
948 P::setP(*addr
, value
+ this->getSlideForVMAddress(value
));
951 throw "invalid relocation type";
956 void Rebaser
<x86
>::doLocalRelocation(const macho_relocation_info
<P
>* reloc
)
958 if ( (reloc
->r_address() & R_SCATTERED
) == 0 ) {
959 if ( reloc
->r_type() == GENERIC_RELOC_VANILLA
) {
960 pint_t
* addr
= this->mappedAddressForRelocAddress(reloc
->r_address());
961 pint_t value
= P::getP(*addr
);
962 P::setP(*addr
, value
+ this->getSlideForVMAddress(value
));
966 macho_scattered_relocation_info
<P
>* sreloc
= (macho_scattered_relocation_info
<P
>*)reloc
;
967 if ( sreloc
->r_type() == GENERIC_RELOC_PB_LA_PTR
) {
968 sreloc
->set_r_value( sreloc
->r_value() + this->getSlideForVMAddress(sreloc
->r_value()) );
971 throw "cannot rebase final linked image with scattered relocations";
976 template <typename A
>
977 void Rebaser
<A
>::doLocalRelocation(const macho_relocation_info
<P
>* reloc
)
979 if ( (reloc
->r_address() & R_SCATTERED
) == 0 ) {
980 if ( reloc
->r_type() == GENERIC_RELOC_VANILLA
) {
981 pint_t
* addr
= this->mappedAddressForRelocAddress(reloc
->r_address());
982 pint_t value
= P::getP(*addr
);
983 P::setP(*addr
, value
+ this->getSlideForVMAddress(value
));
987 throw "cannot rebase final linked image with scattered relocations";
992 template <typename A
>
993 void Rebaser
<A
>::calculateRelocBase()
995 const std::vector
<MachOLayoutAbstraction::Segment
>& segments
= fLayout
.getSegments();
996 if ( fHeader
->flags() & MH_SPLIT_SEGS
) {
997 // reloc addresses are from the start of the first writable segment
998 for(std::vector
<MachOLayoutAbstraction::Segment
>::const_iterator it
= segments
.begin(); it
!= segments
.end(); ++it
) {
999 const MachOLayoutAbstraction::Segment
& seg
= *it
;
1000 if ( seg
.writable() ) {
1001 // found first writable segment
1002 fOrignalVMRelocBaseAddress
= seg
.address();
1003 fOrignalVMRelocBaseAddressValid
= true;
1008 // reloc addresses are from the start of the mapped file (base address)
1009 fOrignalVMRelocBaseAddress
= segments
[0].address();
1010 fOrignalVMRelocBaseAddressValid
= true;
1016 void Rebaser
<x86_64
>::calculateRelocBase()
1018 // reloc addresses are always based from the start of the first writable segment
1019 const std::vector
<MachOLayoutAbstraction::Segment
>& segments
= fLayout
.getSegments();
1020 for(std::vector
<MachOLayoutAbstraction::Segment
>::const_iterator it
= segments
.begin(); it
!= segments
.end(); ++it
) {
1021 const MachOLayoutAbstraction::Segment
& seg
= *it
;
1022 if ( seg
.writable() ) {
1023 // found first writable segment
1024 fOrignalVMRelocBaseAddress
= seg
.address();
1025 fOrignalVMRelocBaseAddressValid
= true;
1032 class MultiArchRebaser
1035 MultiArchRebaser::MultiArchRebaser(const char* path
, bool writable
=false)
1036 : fMappingAddress(0), fFileSize(0)
1038 // map in whole file
1039 int fd
= ::open(path
, (writable
? O_RDWR
: O_RDONLY
), 0);
1041 throwf("can't open file, errno=%d", errno
);
1042 struct stat stat_buf
;
1043 if ( fstat(fd
, &stat_buf
) == -1)
1044 throwf("can't stat open file %s, errno=%d", path
, errno
);
1045 if ( stat_buf
.st_size
< 20 )
1046 throwf("file too small %s", path
);
1047 const int prot
= writable
? (PROT_READ
| PROT_WRITE
) : PROT_READ
;
1048 const int flags
= writable
? (MAP_FILE
| MAP_SHARED
) : (MAP_FILE
| MAP_PRIVATE
);
1049 uint8_t* p
= (uint8_t*)::mmap(NULL
, stat_buf
.st_size
, prot
, flags
, fd
, 0);
1050 if ( p
== (uint8_t*)(-1) )
1051 throwf("can't map file %s, errno=%d", path
, errno
);
1054 // if fat file, process each architecture
1055 const fat_header
* fh
= (fat_header
*)p
;
1056 const mach_header
* mh
= (mach_header
*)p
;
1057 if ( fh
->magic
== OSSwapBigToHostInt32(FAT_MAGIC
) ) {
1058 // Fat header is always big-endian
1059 const struct fat_arch
* archs
= (struct fat_arch
*)(p
+ sizeof(struct fat_header
));
1060 for (unsigned long i
=0; i
< OSSwapBigToHostInt32(fh
->nfat_arch
); ++i
) {
1061 uint32_t fileOffset
= OSSwapBigToHostInt32(archs
[i
].offset
);
1063 switch ( OSSwapBigToHostInt32(archs
[i
].cputype
) ) {
1065 fRebasers
.push_back(new Rebaser
<x86
>(&p
[fileOffset
]));
1067 case CPU_TYPE_X86_64
:
1068 fRebasers
.push_back(new Rebaser
<x86_64
>(&p
[fileOffset
]));
1071 fRebasers
.push_back(new Rebaser
<arm
>(&p
[fileOffset
]));
1074 throw "unknown file format";
1077 catch (const char* msg
) {
1078 fprintf(stderr
, "rebase warning: %s for %s\n", msg
, path
);
1084 if ( (OSSwapLittleToHostInt32(mh
->magic
) == MH_MAGIC
) && (OSSwapLittleToHostInt32(mh
->cputype
) == CPU_TYPE_I386
)) {
1085 fRebasers
.push_back(new Rebaser
<x86
>(mh
));
1087 else if ( (OSSwapLittleToHostInt32(mh
->magic
) == MH_MAGIC_64
) && (OSSwapLittleToHostInt32(mh
->cputype
) == CPU_TYPE_X86_64
)) {
1088 fRebasers
.push_back(new Rebaser
<x86_64
>(mh
));
1090 else if ( (OSSwapLittleToHostInt32(mh
->magic
) == MH_MAGIC
) && (OSSwapLittleToHostInt32(mh
->cputype
) == CPU_TYPE_ARM
)) {
1091 fRebasers
.push_back(new Rebaser
<arm
>(mh
));
1094 throw "unknown file format";
1097 catch (const char* msg
) {
1098 fprintf(stderr
, "rebase warning: %s for %s\n", msg
, path
);
1102 fMappingAddress
= p
;
1103 fFileSize
= stat_buf
.st_size
;
1107 ~MultiArchRebaser() {::munmap(fMappingAddress
, fFileSize
); }
1109 const std::vector
<AbstractRebaser
*>& getArchs() const { return fRebasers
; }
1110 void commit() { ::msync(fMappingAddress
, fFileSize
, MS_ASYNC
); }
1113 std::vector
<AbstractRebaser
*> fRebasers
;
1114 void* fMappingAddress
;
1120 #endif // __MACHO_REBASER__