2 * Copyright 2008-2010, Ingo Weinhold, ingo_weinhold@gmx.de.
3 * Copyright 2003-2011, Axel Dörfler, axeld@pinc-software.de.
4 * Distributed under the terms of the MIT License.
6 * Copyright 2002, Manuel J. Petit. All rights reserved.
7 * Copyright 2001, Travis Geiselbrecht. All rights reserved.
8 * Distributed under the terms of the NewOS License.
20 #include <image_defs.h>
25 #include "runtime_loader_private.h"
27 #include <util/kernel_cpp.h>
30 // keep in sync with app ldscript
32 // runtime_loader potentially occupies 0x200000 - 0x600000 due to large
33 // page segment alignment.
34 # define RLD_PROGRAM_BASE 0x600000
35 # define MAX_PAGE_SIZE 0x200000
37 # define RLD_PROGRAM_BASE 0x200000
38 # define MAX_PAGE_SIZE B_PAGE_SIZE
42 bool gInvalidImageIDs
;
44 static image_queue_t sLoadedImages
= {0, 0};
45 static image_queue_t sDisposableImages
= {0, 0};
46 static uint32 sLoadedImageCount
= 0;
49 //! Remaps the image ID of \a image after fork.
51 update_image_id(image_t
* image
)
55 while (_kern_get_next_image_info(B_CURRENT_TEAM
, &cookie
, &info
,
56 sizeof(image_info
)) == B_OK
) {
57 for (uint32 i
= 0; i
< image
->num_regions
; i
++) {
58 if (image
->regions
[i
].vmstart
== (addr_t
)info
.text
) {
65 FATAL("Could not update image ID %" B_PRId32
" after fork()!\n", image
->id
);
66 return B_ENTRY_NOT_FOUND
;
71 enqueue_image(image_queue_t
* queue
, image_t
* image
)
75 image
->prev
= queue
->tail
;
77 queue
->tail
->next
= image
;
86 dequeue_image(image_queue_t
* queue
, image_t
* image
)
89 image
->next
->prev
= image
->prev
;
91 queue
->tail
= image
->prev
;
94 image
->prev
->next
= image
->next
;
96 queue
->head
= image
->next
;
104 find_image_in_queue(image_queue_t
* queue
, const char* name
, bool isPath
,
107 for (image_t
* image
= queue
->head
; image
; image
= image
->next
) {
108 const char* imageName
= isPath
? image
->path
: image
->name
;
109 int length
= isPath
? sizeof(image
->path
) : sizeof(image
->name
);
111 if (!strncmp(imageName
, name
, length
)
112 && (typeMask
& IMAGE_TYPE_TO_MASK(image
->type
)) != 0) {
122 update_image_flags_recursively(image_t
* image
, uint32 flagsToSet
,
125 image_t
* queue
[sLoadedImageCount
];
128 queue
[count
++] = image
;
129 image
->flags
|= RFLAG_VISITED
;
131 while (index
< count
) {
133 image
= queue
[index
++];
136 for (uint32 i
= 0; i
< image
->num_needed
; i
++) {
137 image_t
* needed
= image
->needed
[i
];
138 if ((needed
->flags
& RFLAG_VISITED
) == 0) {
139 queue
[count
++] = needed
;
140 needed
->flags
|= RFLAG_VISITED
;
146 for (uint32 i
= 0; i
< count
; i
++) {
147 queue
[i
]->flags
= (queue
[i
]->flags
| flagsToSet
)
148 & ~(flagsToClear
| RFLAG_VISITED
);
154 topological_sort(image_t
* image
, uint32 slot
, image_t
** initList
,
157 if (image
->flags
& sortFlag
)
160 image
->flags
|= sortFlag
; /* make sure we don't visit this one */
161 for (uint32 i
= 0; i
< image
->num_needed
; i
++)
162 slot
= topological_sort(image
->needed
[i
], slot
, initList
, sortFlag
);
164 initList
[slot
] = image
;
169 /*! Finds the load address and address specifier of the given image region.
172 get_image_region_load_address(image_t
* image
, uint32 index
, long lastDelta
,
173 bool fixed
, addr_t
& loadAddress
, uint32
& addressSpecifier
)
176 // relocatable image... we can afford to place wherever
178 // but only the first segment gets a free ride
179 loadAddress
= RLD_PROGRAM_BASE
;
180 addressSpecifier
= B_RANDOMIZED_BASE_ADDRESS
;
182 loadAddress
= image
->regions
[index
].vmstart
+ lastDelta
;
183 addressSpecifier
= B_EXACT_ADDRESS
;
186 // not relocatable, put it where it asks or die trying
187 loadAddress
= image
->regions
[index
].vmstart
;
188 addressSpecifier
= B_EXACT_ADDRESS
;
197 create_image(const char* name
, const char* path
, int regionCount
)
199 size_t allocSize
= sizeof(image_t
)
200 + (regionCount
- 1) * sizeof(elf_region_t
);
202 image_t
* image
= (image_t
*)malloc(allocSize
);
204 FATAL("no memory for image %s\n", path
);
208 memset(image
, 0, allocSize
);
210 strlcpy(image
->path
, path
, sizeof(image
->path
));
212 // Make the last component of the supplied name the image name.
213 // If present, DT_SONAME will replace this name.
214 const char* lastSlash
= strrchr(name
, '/');
215 if (lastSlash
!= NULL
)
216 strlcpy(image
->name
, lastSlash
+ 1, sizeof(image
->name
));
218 strlcpy(image
->name
, name
, sizeof(image
->name
));
220 image
->ref_count
= 1;
221 image
->num_regions
= regionCount
;
228 delete_image_struct(image_t
* image
)
231 size_t size
= sizeof(image_t
)
232 + (image
->num_regions
- 1) * sizeof(elf_region_t
);
233 memset(image
->needed
, 0xa5, sizeof(image
->needed
[0]) * image
->num_needed
);
236 free(image
->versions
);
238 while (RuntimeLoaderSymbolPatcher
* patcher
239 = image
->defined_symbol_patchers
) {
240 image
->defined_symbol_patchers
= patcher
->next
;
243 while (RuntimeLoaderSymbolPatcher
* patcher
244 = image
->undefined_symbol_patchers
) {
245 image
->undefined_symbol_patchers
= patcher
->next
;
250 // overwrite images to make sure they aren't accidently reused anywhere
251 memset(image
, 0xa5, size
);
258 delete_image(image_t
* image
)
263 _kern_unregister_image(image
->id
);
264 // registered in load_container()
266 delete_image_struct(image
);
271 put_image(image_t
* image
)
273 // If all references to the image are gone, add it to the disposable list
274 // and remove all dependencies
276 if (atomic_add(&image
->ref_count
, -1) == 1) {
279 dequeue_image(&sLoadedImages
, image
);
280 enqueue_image(&sDisposableImages
, image
);
283 for (i
= 0; i
< image
->num_needed
; i
++)
284 put_image(image
->needed
[i
]);
290 map_image(int fd
, char const* path
, image_t
* image
, bool fixed
)
292 // cut the file name from the path as base name for the created areas
293 const char* baseName
= strrchr(path
, '/');
294 if (baseName
!= NULL
)
299 // determine how much space we need for all loaded segments
301 addr_t reservedAddress
= 0;
303 size_t reservedSize
= 0;
305 uint32 addressSpecifier
= B_RANDOMIZED_ANY_ADDRESS
;
307 for (uint32 i
= 0; i
< image
->num_regions
; i
++) {
308 // for BeOS compatibility: if we load an old BeOS executable, we
309 // have to relocate it, if possible - we recognize it because the
310 // vmstart is set to 0 (hopefully always)
311 if (fixed
&& image
->regions
[i
].vmstart
== 0)
314 uint32 regionAddressSpecifier
;
315 get_image_region_load_address(image
, i
,
316 i
> 0 ? loadAddress
- image
->regions
[i
- 1].vmstart
: 0,
317 fixed
, loadAddress
, regionAddressSpecifier
);
319 reservedAddress
= loadAddress
;
320 addressSpecifier
= regionAddressSpecifier
;
323 length
+= TO_PAGE_SIZE(image
->regions
[i
].vmsize
324 + (loadAddress
% B_PAGE_SIZE
));
326 size_t size
= TO_PAGE_SIZE(loadAddress
+ image
->regions
[i
].vmsize
)
328 if (size
> reservedSize
)
332 // Check whether the segments have an unreasonable amount of unused space
334 if (reservedSize
> length
+ MAX_PAGE_SIZE
* 2)
337 // reserve that space and allocate the areas from that one
338 if (_kern_reserve_address_range(&reservedAddress
, addressSpecifier
,
339 reservedSize
) != B_OK
)
342 for (uint32 i
= 0; i
< image
->num_regions
; i
++) {
343 char regionName
[B_OS_NAME_LENGTH
];
345 snprintf(regionName
, sizeof(regionName
), "%s_seg%" B_PRIu32
"%s",
346 baseName
, i
, (image
->regions
[i
].flags
& RFLAG_RW
) ? "rw" : "ro");
348 get_image_region_load_address(image
, i
,
349 i
> 0 ? image
->regions
[i
- 1].delta
: 0, fixed
, loadAddress
,
352 // If the image position is arbitrary, we must let it point to the start
353 // of the reserved address range.
354 if (addressSpecifier
!= B_EXACT_ADDRESS
)
355 loadAddress
= reservedAddress
;
357 if ((image
->regions
[i
].flags
& RFLAG_ANON
) != 0) {
358 image
->regions
[i
].id
= _kern_create_area(regionName
,
359 (void**)&loadAddress
, B_EXACT_ADDRESS
,
360 image
->regions
[i
].vmsize
, B_NO_LOCK
,
361 B_READ_AREA
| B_WRITE_AREA
);
363 if (image
->regions
[i
].id
< 0) {
364 _kern_unreserve_address_range(reservedAddress
, reservedSize
);
365 return image
->regions
[i
].id
;
368 // Map all segments r/w first -- write access might be needed for
369 // relocations. When we've done with those we change the protection
370 // of read-only segments back to read-only. We map those segments
371 // over-committing, since quite likely only a relatively small
372 // number of pages needs to be touched and we want to avoid a lot
373 // of memory to be committed for them temporarily, just because we
374 // have to write map them.
375 uint32 protection
= B_READ_AREA
| B_WRITE_AREA
376 | ((image
->regions
[i
].flags
& RFLAG_RW
) != 0
377 ? 0 : B_OVERCOMMITTING_AREA
);
378 image
->regions
[i
].id
= _kern_map_file(regionName
,
379 (void**)&loadAddress
, B_EXACT_ADDRESS
,
380 image
->regions
[i
].vmsize
, protection
, REGION_PRIVATE_MAP
, false,
381 fd
, PAGE_BASE(image
->regions
[i
].fdstart
));
383 if (image
->regions
[i
].id
< 0) {
384 _kern_unreserve_address_range(reservedAddress
, reservedSize
);
385 return image
->regions
[i
].id
;
388 TRACE(("\"%s\" at %p, 0x%lx bytes (%s)\n", path
,
389 (void *)loadAddress
, image
->regions
[i
].vmsize
,
390 image
->regions
[i
].flags
& RFLAG_RW
? "rw" : "read-only"));
392 // handle trailer bits in data segment
393 if (image
->regions
[i
].flags
& RFLAG_RW
) {
394 addr_t startClearing
= loadAddress
395 + PAGE_OFFSET(image
->regions
[i
].start
)
396 + image
->regions
[i
].size
;
397 addr_t toClear
= image
->regions
[i
].vmsize
398 - PAGE_OFFSET(image
->regions
[i
].start
)
399 - image
->regions
[i
].size
;
401 TRACE(("cleared 0x%lx and the following 0x%lx bytes\n",
402 startClearing
, toClear
));
403 memset((void *)startClearing
, 0, toClear
);
407 image
->regions
[i
].delta
= loadAddress
- image
->regions
[i
].vmstart
;
408 image
->regions
[i
].vmstart
= loadAddress
;
410 TLSBlockTemplates::Get().SetBaseAddress(image
->dso_tls_id
,
415 if (image
->dynamic_ptr
!= 0)
416 image
->dynamic_ptr
+= image
->regions
[0].delta
;
423 unmap_image(image_t
* image
)
425 for (uint32 i
= 0; i
< image
->num_regions
; i
++) {
426 _kern_delete_area(image
->regions
[i
].id
);
428 image
->regions
[i
].id
= -1;
433 /*! This function will change the protection of all read-only segments to really
434 be read-only (and executable).
435 The areas have to be read/write first, so that they can be relocated.
436 If at least one image is in compatibility mode then we allow execution of
442 for (image_t
* image
= sLoadedImages
.head
; image
!= NULL
;
443 image
= image
->next
) {
444 for (uint32 i
= 0; i
< image
->num_regions
; i
++) {
445 // we only need to do this once, so we remember those we've already
447 if ((image
->regions
[i
].flags
& RFLAG_REMAPPED
) != 0)
450 status_t result
= B_OK
;
451 if ((image
->regions
[i
].flags
& RFLAG_RW
) == 0) {
452 result
= _kern_set_area_protection(image
->regions
[i
].id
,
453 B_READ_AREA
| B_EXECUTE_AREA
);
454 } else if (image
->abi
< B_HAIKU_ABI_GCC_2_HAIKU
) {
455 result
= _kern_set_area_protection(image
->regions
[i
].id
,
456 B_READ_AREA
| B_WRITE_AREA
| B_EXECUTE_AREA
);
460 image
->regions
[i
].flags
|= RFLAG_REMAPPED
;
467 register_image(image_t
* image
, int fd
, const char* path
)
470 extended_image_info info
;
472 // TODO: set these correctly
473 info
.basic_info
.id
= 0;
474 info
.basic_info
.type
= image
->type
;
475 info
.basic_info
.sequence
= 0;
476 info
.basic_info
.init_order
= 0;
477 info
.basic_info
.init_routine
= (void (*)())image
->init_routine
;
478 info
.basic_info
.term_routine
= (void (*)())image
->term_routine
;
480 if (_kern_read_stat(fd
, NULL
, false, &stat
, sizeof(struct stat
)) == B_OK
) {
481 info
.basic_info
.device
= stat
.st_dev
;
482 info
.basic_info
.node
= stat
.st_ino
;
484 info
.basic_info
.device
= -1;
485 info
.basic_info
.node
= -1;
488 // We may have split segments into separate regions. Compute the correct
489 // segments for the image info.
494 for (uint32 i
= 0; i
< image
->num_regions
; i
++) {
495 addr_t base
= image
->regions
[i
].vmstart
;
496 addr_t end
= base
+ image
->regions
[i
].vmsize
;
497 if (image
->regions
[i
].flags
& RFLAG_RW
) {
503 dataBase
= std::min(dataBase
, base
);
504 dataEnd
= std::max(dataEnd
, end
);
512 textBase
= std::min(textBase
, base
);
513 textEnd
= std::max(textEnd
, end
);
518 strlcpy(info
.basic_info
.name
, path
, sizeof(info
.basic_info
.name
));
519 info
.basic_info
.text
= (void*)textBase
;
520 info
.basic_info
.text_size
= textEnd
- textBase
;
521 info
.basic_info
.data
= (void*)dataBase
;
522 info
.basic_info
.data_size
= dataEnd
- dataBase
;
523 info
.basic_info
.api_version
= image
->api_version
;
524 info
.basic_info
.abi
= image
->abi
;
525 info
.text_delta
= image
->regions
[0].delta
;
526 info
.symbol_table
= image
->syms
;
527 info
.symbol_hash
= image
->symhash
;
528 info
.string_table
= image
->strtab
;
529 image
->id
= _kern_register_image(&info
, sizeof(info
));
533 //! After fork, we lazily rebuild the image IDs of all loaded images.
537 for (image_t
* image
= sLoadedImages
.head
; image
; image
= image
->next
) {
538 status_t status
= update_image_id(image
);
542 for (image_t
* image
= sDisposableImages
.head
; image
; image
= image
->next
) {
543 status_t status
= update_image_id(image
);
548 gInvalidImageIDs
= false;
556 return sLoadedImages
;
561 get_disposable_images()
563 return sDisposableImages
;
568 count_loaded_images()
570 return sLoadedImageCount
;
575 enqueue_loaded_image(image_t
* image
)
577 enqueue_image(&sLoadedImages
, image
);
583 dequeue_loaded_image(image_t
* image
)
585 dequeue_image(&sLoadedImages
, image
);
591 dequeue_disposable_image(image_t
* image
)
593 dequeue_image(&sDisposableImages
, image
);
598 find_loaded_image_by_name(char const* name
, uint32 typeMask
)
600 bool isPath
= strchr(name
, '/') != NULL
;
601 return find_image_in_queue(&sLoadedImages
, name
, isPath
, typeMask
);
606 find_loaded_image_by_id(image_id id
, bool ignoreDisposable
)
608 if (gInvalidImageIDs
) {
609 // After fork, we lazily rebuild the image IDs of all loaded images
613 for (image_t
* image
= sLoadedImages
.head
; image
; image
= image
->next
) {
618 if (ignoreDisposable
)
621 for (image_t
* image
= sDisposableImages
.head
; image
; image
= image
->next
) {
631 find_loaded_image_by_address(addr_t address
)
633 for (image_t
* image
= sLoadedImages
.head
; image
; image
= image
->next
) {
634 for (uint32 i
= 0; i
< image
->num_regions
; i
++) {
635 elf_region_t
& region
= image
->regions
[i
];
636 if (region
.vmstart
<= address
637 && region
.vmstart
- 1 + region
.vmsize
>= address
)
647 set_image_flags_recursively(image_t
* image
, uint32 flags
)
649 update_image_flags_recursively(image
, flags
, 0);
654 clear_image_flags_recursively(image_t
* image
, uint32 flags
)
656 update_image_flags_recursively(image
, 0, flags
);
660 /*! Returns a topologically sorted image list.
662 If \a image is non-NULL, an array containing the image and all its
663 transitive dependencies is returned. If \a image is NULL, all loaded images
664 are returned. In either case dependencies are listed before images
667 \param image The image specifying the tree of images that shall be sorted.
668 If NULL, all loaded images are sorted.
669 \param _list On success it will be set to an array of the sorted images.
670 The caller is responsible for free()ing it.
671 \param sortFlags The image flag that shall be used for sorting. Images that
672 already have this flag set are ignored (and their dependencies, unless
673 they are also reachable via another path). The flag will be set on all
675 \return The number of images in the returned array or an error code on
679 get_sorted_image_list(image_t
* image
, image_t
*** _list
, uint32 sortFlag
)
683 list
= (image_t
**)malloc(sLoadedImageCount
* sizeof(image_t
*));
685 FATAL("memory shortage in get_sorted_image_list()");
690 memset(list
, 0, sLoadedImageCount
* sizeof(image_t
*));
695 return topological_sort(image
, 0, list
, sortFlag
);
697 // no image given -- sort all loaded images
699 image
= sLoadedImages
.head
;
700 while (image
!= NULL
) {
701 count
= topological_sort(image
, count
, list
, sortFlag
);