2 * Copyright 2011, Michael Lotz <mmlr@mlotz.ch>.
3 * Distributed under the terms of the MIT License.
6 #include "malloc_debug_api.h"
19 #include <libroot_private.h>
20 #include <runtime_loader.h>
25 // #pragma mark - Debug Helpers
27 static const size_t kMaxStackTraceDepth
= 50;
30 static bool sDebuggerCalls
= true;
31 static bool sDumpAllocationsOnExit
= false;
32 static size_t sStackTraceDepth
= 0;
33 static int32 sStackBaseTLSIndex
= -1;
34 static int32 sStackEndTLSIndex
= -1;
36 #if __cplusplus >= 201103L
39 static size_t sDefaultAlignment
= alignof(max_align_t
);
41 static size_t sDefaultAlignment
= 8;
46 panic(const char* format
, ...)
51 va_start(args
, format
);
52 vsnprintf(buffer
, sizeof(buffer
), format
, args
);
63 print_stdout(const char* format
, ...)
65 // To avoid any allocations due to dynamic memory need by printf() we use a
66 // stack buffer and vsnprintf(). Otherwise this does the same as printf().
70 va_start(args
, format
);
71 vsnprintf(buffer
, sizeof(buffer
), format
, args
);
74 write(STDOUT_FILENO
, buffer
, strlen(buffer
));
78 // #pragma mark - Linked List
81 #define GET_ITEM(list, item) ((void *)((uint8 *)item - list->offset))
82 #define GET_LINK(list, item) ((list_link *)((uint8 *)item + list->offset))
97 list_remove_item(struct list
* list
, void* item
)
99 list_link
* link
= GET_LINK(list
, item
);
101 link
->next
->prev
= link
->prev
;
102 link
->prev
->next
= link
->next
;
107 list_add_item(struct list
* list
, void* item
)
109 list_link
* link
= GET_LINK(list
, item
);
111 link
->next
= &list
->link
;
112 link
->prev
= list
->link
.prev
;
114 list
->link
.prev
->next
= link
;
115 list
->link
.prev
= link
;
120 list_get_next_item(struct list
* list
, void* item
)
123 if (list
->link
.next
== (list_link
*)list
)
126 return GET_ITEM(list
, list
->link
.next
);
129 list_link
* link
= GET_LINK(list
, item
);
130 if (link
->next
== &list
->link
)
133 return GET_ITEM(list
, link
->next
);
138 list_init_etc(struct list
* list
, int32 offset
)
140 list
->link
.next
= list
->link
.prev
= &list
->link
;
141 list
->offset
= offset
;
145 // #pragma mark - Guarded Heap
148 #define GUARDED_HEAP_PAGE_FLAG_USED 0x01
149 #define GUARDED_HEAP_PAGE_FLAG_FIRST 0x02
150 #define GUARDED_HEAP_PAGE_FLAG_GUARD 0x04
151 #define GUARDED_HEAP_PAGE_FLAG_DEAD 0x08
152 #define GUARDED_HEAP_PAGE_FLAG_AREA 0x10
154 #define GUARDED_HEAP_INITIAL_SIZE 1 * 1024 * 1024
155 #define GUARDED_HEAP_GROW_SIZE 2 * 1024 * 1024
156 #define GUARDED_HEAP_AREA_USE_THRESHOLD 1 * 1024 * 1024
161 struct guarded_heap_page
{
163 size_t allocation_size
;
164 void* allocation_base
;
167 list_link free_list_link
;
168 size_t alloc_stack_trace_depth
;
169 size_t free_stack_trace_depth
;
170 addr_t stack_trace
[kMaxStackTraceDepth
];
173 struct guarded_heap_area
{
175 guarded_heap_area
* next
;
182 struct list free_list
;
183 guarded_heap_page pages
[0];
186 struct guarded_heap
{
190 uint32 area_creation_counter
;
192 guarded_heap_area
* areas
;
196 static guarded_heap sGuardedHeap
= {
197 RW_LOCK_INITIALIZER("guarded heap lock"),
202 static void dump_guarded_heap_page(void* address
, bool doPanic
= false);
206 guarded_heap_segfault_handler(int signal
, siginfo_t
* signalInfo
, void* vregs
)
208 if (signal
!= SIGSEGV
)
211 if (signalInfo
->si_code
!= SEGV_ACCERR
) {
213 panic("generic segfault");
217 dump_guarded_heap_page(signalInfo
->si_addr
, true);
224 guarded_heap_page_protect(guarded_heap_area
& area
, size_t pageIndex
,
227 addr_t address
= area
.base
+ pageIndex
* B_PAGE_SIZE
;
228 mprotect((void*)address
, B_PAGE_SIZE
, protection
);
233 guarded_heap_print_stack_trace(addr_t stackTrace
[], size_t depth
)
240 for (size_t i
= 0; i
< depth
; i
++) {
241 addr_t address
= stackTrace
[i
];
243 status_t status
= __gRuntimeLoader
->get_nearest_symbol_at_address(
244 (void*)address
, NULL
, NULL
, &imageName
, &symbolName
, NULL
,
245 &location
, &exactMatch
);
246 if (status
!= B_OK
) {
247 print_stdout("\t%#" B_PRIxADDR
" (lookup failed: %s)\n", address
,
252 print_stdout("\t<%s> %s + %#" B_PRIxADDR
"%s\n", imageName
, symbolName
,
253 address
- (addr_t
)location
, exactMatch
? "" : " (nearest)");
259 guarded_heap_print_stack_traces(guarded_heap_page
& page
)
261 if (page
.alloc_stack_trace_depth
> 0) {
262 printf("alloc stack trace (%" B_PRIuSIZE
"):\n",
263 page
.alloc_stack_trace_depth
);
264 guarded_heap_print_stack_trace(page
.stack_trace
,
265 page
.alloc_stack_trace_depth
);
268 if (page
.free_stack_trace_depth
> 0) {
269 printf("free stack trace (%" B_PRIuSIZE
"):\n",
270 page
.free_stack_trace_depth
);
271 guarded_heap_print_stack_trace(
272 &page
.stack_trace
[page
.alloc_stack_trace_depth
],
273 page
.free_stack_trace_depth
);
279 guarded_heap_fill_stack_trace(addr_t stackTrace
[], size_t maxDepth
,
285 void** stackBase
= tls_address(sStackBaseTLSIndex
);
286 void** stackEnd
= tls_address(sStackEndTLSIndex
);
287 if (*stackBase
== NULL
|| *stackEnd
== NULL
) {
288 thread_info threadInfo
;
289 status_t result
= get_thread_info(find_thread(NULL
), &threadInfo
);
293 *stackBase
= (void*)threadInfo
.stack_base
;
294 *stackEnd
= (void*)threadInfo
.stack_end
;
297 int32 traceDepth
= __arch_get_stack_trace(stackTrace
, maxDepth
, skipFrames
,
298 (addr_t
)*stackBase
, (addr_t
)*stackEnd
);
300 return traceDepth
< 0 ? 0 : traceDepth
;
305 guarded_heap_page_allocate(guarded_heap_area
& area
, size_t startPageIndex
,
306 size_t pagesNeeded
, size_t allocationSize
, size_t alignment
,
307 void* allocationBase
)
309 if (pagesNeeded
< 2) {
310 panic("need to allocate at least 2 pages, one for guard\n");
314 guarded_heap_page
* firstPage
= NULL
;
315 for (size_t i
= 0; i
< pagesNeeded
; i
++) {
316 guarded_heap_page
& page
= area
.pages
[startPageIndex
+ i
];
317 page
.flags
= GUARDED_HEAP_PAGE_FLAG_USED
;
319 page
.thread
= find_thread(NULL
);
320 page
.allocation_size
= allocationSize
;
321 page
.allocation_base
= allocationBase
;
322 page
.alignment
= alignment
;
323 page
.flags
|= GUARDED_HEAP_PAGE_FLAG_FIRST
;
324 page
.alloc_stack_trace_depth
= guarded_heap_fill_stack_trace(
325 page
.stack_trace
, sStackTraceDepth
, 2);
326 page
.free_stack_trace_depth
= 0;
329 page
.thread
= firstPage
->thread
;
330 page
.allocation_size
= allocationSize
;
331 page
.allocation_base
= allocationBase
;
332 page
.alignment
= alignment
;
333 page
.alloc_stack_trace_depth
= 0;
334 page
.free_stack_trace_depth
= 0;
337 list_remove_item(&area
.free_list
, &page
);
339 if (i
== pagesNeeded
- 1) {
340 page
.flags
|= GUARDED_HEAP_PAGE_FLAG_GUARD
;
341 guarded_heap_page_protect(area
, startPageIndex
+ i
, 0);
343 guarded_heap_page_protect(area
, startPageIndex
+ i
,
344 B_READ_AREA
| B_WRITE_AREA
);
351 guarded_heap_free_page(guarded_heap_area
& area
, size_t pageIndex
,
354 guarded_heap_page
& page
= area
.pages
[pageIndex
];
356 if (area
.heap
->reuse_memory
|| force
)
359 page
.flags
|= GUARDED_HEAP_PAGE_FLAG_DEAD
;
361 page
.thread
= find_thread(NULL
);
363 list_add_item(&area
.free_list
, &page
);
365 guarded_heap_page_protect(area
, pageIndex
, 0);
370 guarded_heap_pages_allocated(guarded_heap
& heap
, size_t pagesAllocated
)
372 atomic_add((int32
*)&heap
.used_pages
, pagesAllocated
);
377 guarded_heap_area_allocate(guarded_heap_area
& area
, size_t pagesNeeded
,
378 size_t size
, size_t alignment
)
380 if (pagesNeeded
> area
.page_count
- area
.used_pages
)
383 // We use the free list this way so that the page that has been free for
384 // the longest time is allocated. This keeps immediate re-use (that may
385 // hide bugs) to a minimum.
386 guarded_heap_page
* page
387 = (guarded_heap_page
*)list_get_next_item(&area
.free_list
, NULL
);
390 page
= (guarded_heap_page
*)list_get_next_item(&area
.free_list
, page
)) {
392 if ((page
->flags
& GUARDED_HEAP_PAGE_FLAG_USED
) != 0)
395 size_t pageIndex
= page
- area
.pages
;
396 if (pageIndex
> area
.page_count
- pagesNeeded
)
399 // Candidate, check if we have enough pages going forward
400 // (including the guard page).
401 bool candidate
= true;
402 for (size_t j
= 1; j
< pagesNeeded
; j
++) {
403 if ((area
.pages
[pageIndex
+ j
].flags
& GUARDED_HEAP_PAGE_FLAG_USED
)
413 size_t offset
= size
& (B_PAGE_SIZE
- 1);
414 void* result
= (void*)((area
.base
+ pageIndex
* B_PAGE_SIZE
415 + (offset
> 0 ? B_PAGE_SIZE
- offset
: 0)) & ~(alignment
- 1));
417 guarded_heap_page_allocate(area
, pageIndex
, pagesNeeded
, size
,
420 area
.used_pages
+= pagesNeeded
;
421 guarded_heap_pages_allocated(*area
.heap
, pagesNeeded
);
430 guarded_heap_area_init(guarded_heap
& heap
, area_id id
, void* baseAddress
,
433 guarded_heap_area
* area
= (guarded_heap_area
*)baseAddress
;
437 area
->page_count
= area
->size
/ B_PAGE_SIZE
;
438 area
->used_pages
= 0;
440 size_t pagesNeeded
= (sizeof(guarded_heap_area
)
441 + area
->page_count
* sizeof(guarded_heap_page
)
442 + B_PAGE_SIZE
- 1) / B_PAGE_SIZE
;
444 area
->page_count
-= pagesNeeded
;
445 area
->size
= area
->page_count
* B_PAGE_SIZE
;
446 area
->base
= (addr_t
)baseAddress
+ pagesNeeded
* B_PAGE_SIZE
;
448 mutex_init(&area
->lock
, "guarded_heap_area_lock");
450 list_init_etc(&area
->free_list
,
451 offsetof(guarded_heap_page
, free_list_link
));
453 for (size_t i
= 0; i
< area
->page_count
; i
++)
454 guarded_heap_free_page(*area
, i
, true);
456 area
->next
= heap
.areas
;
458 heap
.page_count
+= area
->page_count
;
465 guarded_heap_area_create(guarded_heap
& heap
, size_t size
)
467 for (size_t trySize
= size
; trySize
>= 1 * 1024 * 1024;
470 void* baseAddress
= NULL
;
471 area_id id
= create_area("guarded_heap_area", &baseAddress
,
472 B_ANY_ADDRESS
, trySize
, B_NO_LOCK
, B_READ_AREA
| B_WRITE_AREA
);
477 if (guarded_heap_area_init(heap
, id
, baseAddress
, trySize
))
483 panic("failed to allocate a new heap area");
489 guarded_heap_add_area(guarded_heap
& heap
, uint32 counter
)
491 WriteLocker
areaListWriteLocker(heap
.lock
);
492 if (heap
.area_creation_counter
!= counter
)
495 return guarded_heap_area_create(heap
, GUARDED_HEAP_GROW_SIZE
);
500 guarded_heap_allocate(guarded_heap
& heap
, size_t size
, size_t alignment
)
505 if (alignment
> B_PAGE_SIZE
) {
506 panic("alignment of %" B_PRIuSIZE
" not supported", alignment
);
510 size_t pagesNeeded
= (size
+ B_PAGE_SIZE
- 1) / B_PAGE_SIZE
+ 1;
511 if (pagesNeeded
* B_PAGE_SIZE
>= GUARDED_HEAP_AREA_USE_THRESHOLD
) {
512 // Don't bother, use an area directly. Since it will also fault once
513 // it is deleted, that fits our model quite nicely.
515 pagesNeeded
= (size
+ sizeof(guarded_heap_page
) + B_PAGE_SIZE
- 1)
518 void* address
= NULL
;
519 area_id area
= create_area("guarded_heap_huge_allocation", &address
,
520 B_ANY_ADDRESS
, (pagesNeeded
+ 1) * B_PAGE_SIZE
, B_NO_LOCK
,
521 B_READ_AREA
| B_WRITE_AREA
);
523 panic("failed to create area for allocation of %" B_PRIuSIZE
524 " pages", pagesNeeded
);
528 // We just use a page object
529 guarded_heap_page
* page
= (guarded_heap_page
*)address
;
530 page
->flags
= GUARDED_HEAP_PAGE_FLAG_USED
531 | GUARDED_HEAP_PAGE_FLAG_FIRST
| GUARDED_HEAP_PAGE_FLAG_AREA
;
532 page
->allocation_size
= size
;
533 page
->allocation_base
= (void*)(((addr_t
)address
534 + pagesNeeded
* B_PAGE_SIZE
- size
) & ~(alignment
- 1));
535 page
->alignment
= alignment
;
536 page
->thread
= find_thread(NULL
);
537 page
->alloc_stack_trace_depth
= guarded_heap_fill_stack_trace(
538 page
->stack_trace
, sStackTraceDepth
, 2);
539 page
->free_stack_trace_depth
= 0;
541 mprotect((void*)((addr_t
)address
+ pagesNeeded
* B_PAGE_SIZE
),
544 return page
->allocation_base
;
549 ReadLocker
areaListReadLocker(heap
.lock
);
550 for (guarded_heap_area
* area
= heap
.areas
; area
!= NULL
;
553 MutexLocker
locker(area
->lock
);
554 result
= guarded_heap_area_allocate(*area
, pagesNeeded
, size
,
560 uint32 counter
= heap
.area_creation_counter
;
561 areaListReadLocker
.Unlock();
563 if (result
== NULL
) {
564 guarded_heap_add_area(heap
, counter
);
565 return guarded_heap_allocate(heap
, size
, alignment
);
569 panic("ran out of memory");
575 static guarded_heap_area
*
576 guarded_heap_get_locked_area_for(guarded_heap
& heap
, void* address
)
578 ReadLocker
areaListReadLocker(heap
.lock
);
579 for (guarded_heap_area
* area
= heap
.areas
; area
!= NULL
;
581 if ((addr_t
)address
< area
->base
)
584 if ((addr_t
)address
>= area
->base
+ area
->size
)
587 mutex_lock(&area
->lock
);
596 guarded_heap_area_page_index_for(guarded_heap_area
& area
, void* address
)
598 size_t pageIndex
= ((addr_t
)address
- area
.base
) / B_PAGE_SIZE
;
599 guarded_heap_page
& page
= area
.pages
[pageIndex
];
600 if ((page
.flags
& GUARDED_HEAP_PAGE_FLAG_USED
) == 0) {
601 panic("tried to free %p which points at page %" B_PRIuSIZE
602 " which is not marked in use", address
, pageIndex
);
603 return area
.page_count
;
606 if ((page
.flags
& GUARDED_HEAP_PAGE_FLAG_GUARD
) != 0) {
607 panic("tried to free %p which points at page %" B_PRIuSIZE
608 " which is a guard page", address
, pageIndex
);
609 return area
.page_count
;
612 if ((page
.flags
& GUARDED_HEAP_PAGE_FLAG_FIRST
) == 0) {
613 panic("tried to free %p which points at page %" B_PRIuSIZE
614 " which is not an allocation first page", address
, pageIndex
);
615 return area
.page_count
;
618 if ((page
.flags
& GUARDED_HEAP_PAGE_FLAG_DEAD
) != 0) {
619 panic("tried to free %p which points at page %" B_PRIuSIZE
620 " which is a dead page", address
, pageIndex
);
621 return area
.page_count
;
629 guarded_heap_area_free(guarded_heap_area
& area
, void* address
)
631 size_t pageIndex
= guarded_heap_area_page_index_for(area
, address
);
632 if (pageIndex
>= area
.page_count
)
635 size_t pagesFreed
= 0;
636 guarded_heap_page
* page
= &area
.pages
[pageIndex
];
637 while ((page
->flags
& GUARDED_HEAP_PAGE_FLAG_GUARD
) == 0) {
638 // Mark the allocation page as free.
639 guarded_heap_free_page(area
, pageIndex
);
640 if (pagesFreed
== 0 && sStackTraceDepth
> 0) {
642 = kMaxStackTraceDepth
- page
->alloc_stack_trace_depth
;
644 page
->free_stack_trace_depth
= guarded_heap_fill_stack_trace(
645 &page
->stack_trace
[page
->alloc_stack_trace_depth
],
646 min_c(freeEntries
, sStackTraceDepth
), 2);
651 page
= &area
.pages
[pageIndex
];
654 // Mark the guard page as free as well.
655 guarded_heap_free_page(area
, pageIndex
);
658 if (area
.heap
->reuse_memory
) {
659 area
.used_pages
-= pagesFreed
;
660 atomic_add((int32
*)&area
.heap
->used_pages
, -pagesFreed
);
667 static guarded_heap_page
*
668 guarded_heap_area_allocation_for(void* address
, area_id
& allocationArea
)
670 allocationArea
= area_for(address
);
671 if (allocationArea
< 0)
675 if (get_area_info(allocationArea
, &areaInfo
) != B_OK
)
678 guarded_heap_page
* page
= (guarded_heap_page
*)areaInfo
.address
;
679 if (page
->flags
!= (GUARDED_HEAP_PAGE_FLAG_USED
680 | GUARDED_HEAP_PAGE_FLAG_FIRST
| GUARDED_HEAP_PAGE_FLAG_AREA
)) {
684 if (page
->allocation_base
!= address
)
686 if (page
->allocation_size
>= areaInfo
.size
)
694 guarded_heap_free_area_allocation(void* address
)
696 area_id allocationArea
;
697 if (guarded_heap_area_allocation_for(address
, allocationArea
) == NULL
)
700 delete_area(allocationArea
);
706 guarded_heap_free(void* address
)
711 guarded_heap_area
* area
= guarded_heap_get_locked_area_for(sGuardedHeap
,
714 return guarded_heap_free_area_allocation(address
);
716 MutexLocker
locker(area
->lock
, true);
717 return guarded_heap_area_free(*area
, address
);
722 guarded_heap_realloc(void* address
, size_t newSize
)
724 guarded_heap_area
* area
= guarded_heap_get_locked_area_for(sGuardedHeap
,
728 area_id allocationArea
= -1;
730 MutexLocker
locker(area
->lock
, true);
731 size_t pageIndex
= guarded_heap_area_page_index_for(*area
, address
);
732 if (pageIndex
>= area
->page_count
)
735 guarded_heap_page
& page
= area
->pages
[pageIndex
];
736 oldSize
= page
.allocation_size
;
739 guarded_heap_page
* page
= guarded_heap_area_allocation_for(address
,
744 oldSize
= page
->allocation_size
;
747 if (oldSize
== newSize
)
750 void* newBlock
= guarded_heap_allocate(sGuardedHeap
, newSize
,
752 if (newBlock
== NULL
)
755 memcpy(newBlock
, address
, min_c(oldSize
, newSize
));
757 if (allocationArea
>= 0)
758 delete_area(allocationArea
);
760 MutexLocker
locker(area
->lock
);
761 guarded_heap_area_free(*area
, address
);
768 // #pragma mark - Debugger commands
772 dump_guarded_heap_page(guarded_heap_page
& page
)
775 if ((page
.flags
& GUARDED_HEAP_PAGE_FLAG_USED
) != 0)
777 if ((page
.flags
& GUARDED_HEAP_PAGE_FLAG_FIRST
) != 0)
779 if ((page
.flags
& GUARDED_HEAP_PAGE_FLAG_GUARD
) != 0)
781 if ((page
.flags
& GUARDED_HEAP_PAGE_FLAG_DEAD
) != 0)
785 printf("allocation size: %" B_PRIuSIZE
"\n", page
.allocation_size
);
786 printf("allocation base: %p\n", page
.allocation_base
);
787 printf("alignment: %" B_PRIuSIZE
"\n", page
.alignment
);
788 printf("allocating thread: %" B_PRId32
"\n", page
.thread
);
793 dump_guarded_heap_page(void* address
, bool doPanic
)
795 // Find the area that contains this page.
796 guarded_heap_area
* area
= NULL
;
797 for (guarded_heap_area
* candidate
= sGuardedHeap
.areas
; candidate
!= NULL
;
798 candidate
= candidate
->next
) {
800 if ((addr_t
)address
< candidate
->base
)
802 if ((addr_t
)address
>= candidate
->base
+ candidate
->size
)
810 panic("didn't find area for address %p\n", address
);
814 size_t pageIndex
= ((addr_t
)address
- area
->base
) / B_PAGE_SIZE
;
815 guarded_heap_page
& page
= area
->pages
[pageIndex
];
816 dump_guarded_heap_page(page
);
818 // Find the first page and dump the stack traces.
819 for (ssize_t candidateIndex
= (ssize_t
)pageIndex
;
820 sStackTraceDepth
> 0 && candidateIndex
>= 0; candidateIndex
--) {
821 guarded_heap_page
& candidate
= area
->pages
[candidateIndex
];
822 if ((candidate
.flags
& GUARDED_HEAP_PAGE_FLAG_FIRST
) == 0)
825 guarded_heap_print_stack_traces(candidate
);
830 // Note: we do this the complicated way because we absolutely don't
831 // want any character conversion to happen that might provoke other
832 // segfaults in the locale backend. Therefore we avoid using any string
833 // formats, resulting in the mess below.
835 #define DO_PANIC(state) \
836 panic("thread %" B_PRId32 " tried accessing address %p which is " \
837 state " (base: 0x%" B_PRIxADDR ", size: %" B_PRIuSIZE \
838 ", alignment: %" B_PRIuSIZE ", allocated by thread: %" \
839 B_PRId32 ")", find_thread(NULL), address, \
840 page.allocation_base, page.allocation_size, page.alignment, \
843 if ((page
.flags
& GUARDED_HEAP_PAGE_FLAG_USED
) == 0)
844 DO_PANIC("not allocated");
845 else if ((page
.flags
& GUARDED_HEAP_PAGE_FLAG_GUARD
) != 0)
846 DO_PANIC("a guard page");
847 else if ((page
.flags
& GUARDED_HEAP_PAGE_FLAG_DEAD
) != 0)
848 DO_PANIC("a dead page");
850 DO_PANIC("in an unknown state");
858 dump_guarded_heap_area(guarded_heap_area
& area
)
860 printf("guarded heap area: %p\n", &area
);
861 printf("next heap area: %p\n", area
.next
);
862 printf("guarded heap: %p\n", area
.heap
);
863 printf("area id: %" B_PRId32
"\n", area
.area
);
864 printf("base: 0x%" B_PRIxADDR
"\n", area
.base
);
865 printf("size: %" B_PRIuSIZE
"\n", area
.size
);
866 printf("page count: %" B_PRIuSIZE
"\n", area
.page_count
);
867 printf("used pages: %" B_PRIuSIZE
"\n", area
.used_pages
);
868 printf("lock: %p\n", &area
.lock
);
870 size_t freeCount
= 0;
871 void* item
= list_get_next_item(&area
.free_list
, NULL
);
872 while (item
!= NULL
) {
875 if ((((guarded_heap_page
*)item
)->flags
& GUARDED_HEAP_PAGE_FLAG_USED
)
877 printf("free list broken, page %p not actually free\n", item
);
880 item
= list_get_next_item(&area
.free_list
, item
);
883 printf("free_list: %p (%" B_PRIuSIZE
" free)\n", &area
.free_list
,
887 size_t runLength
= 0;
888 size_t longestRun
= 0;
889 for (size_t i
= 0; i
<= area
.page_count
; i
++) {
890 guarded_heap_page
& page
= area
.pages
[i
];
891 if (i
== area
.page_count
892 || (page
.flags
& GUARDED_HEAP_PAGE_FLAG_USED
) != 0) {
893 freeCount
+= runLength
;
894 if (runLength
> longestRun
)
895 longestRun
= runLength
;
901 for (size_t j
= 1; j
< area
.page_count
- i
; j
++) {
902 if ((area
.pages
[i
+ j
].flags
& GUARDED_HEAP_PAGE_FLAG_USED
) != 0)
911 printf("longest free run: %" B_PRIuSIZE
" (%" B_PRIuSIZE
" free)\n",
912 longestRun
, freeCount
);
914 printf("pages: %p\n", area
.pages
);
919 dump_guarded_heap(guarded_heap
& heap
)
921 printf("guarded heap: %p\n", &heap
);
922 printf("rw lock: %p\n", &heap
.lock
);
923 printf("page count: %" B_PRIuSIZE
"\n", heap
.page_count
);
924 printf("used pages: %" B_PRIuSIZE
"\n", heap
.used_pages
);
925 printf("area creation counter: %" B_PRIu32
"\n",
926 heap
.area_creation_counter
);
928 size_t areaCount
= 0;
929 guarded_heap_area
* area
= heap
.areas
;
930 while (area
!= NULL
) {
935 printf("areas: %p (%" B_PRIuSIZE
")\n", heap
.areas
, areaCount
);
940 dump_allocations(guarded_heap
& heap
, bool statsOnly
, thread_id thread
)
942 WriteLocker
heapLocker(heap
.lock
);
944 size_t allocationCount
= 0;
945 size_t allocationSize
= 0;
946 for (guarded_heap_area
* area
= heap
.areas
; area
!= NULL
;
949 MutexLocker
areaLocker(area
->lock
);
950 for (size_t i
= 0; i
< area
->page_count
; i
++) {
951 guarded_heap_page
& page
= area
->pages
[i
];
952 if ((page
.flags
& GUARDED_HEAP_PAGE_FLAG_FIRST
) == 0
953 || (page
.flags
& GUARDED_HEAP_PAGE_FLAG_DEAD
) != 0) {
957 if (thread
>= 0 && thread
!= page
.thread
)
961 allocationSize
+= page
.allocation_size
;
966 print_stdout("allocation: base: %p; size: %" B_PRIuSIZE
967 "; thread: %" B_PRId32
"; alignment: %" B_PRIuSIZE
"\n",
968 page
.allocation_base
, page
.allocation_size
, page
.thread
,
971 guarded_heap_print_stack_trace(page
.stack_trace
,
972 page
.alloc_stack_trace_depth
);
976 print_stdout("total allocations: %" B_PRIuSIZE
", %" B_PRIuSIZE
" bytes\n",
977 allocationCount
, allocationSize
);
982 dump_allocations_full()
984 dump_allocations(sGuardedHeap
, false, -1);
988 // #pragma mark - Heap Debug API
992 guarded_heap_set_memory_reuse(bool enabled
)
994 sGuardedHeap
.reuse_memory
= enabled
;
999 guarded_heap_set_debugger_calls(bool enabled
)
1001 sDebuggerCalls
= enabled
;
1006 guarded_heap_set_default_alignment(size_t defaultAlignment
)
1008 sDefaultAlignment
= defaultAlignment
;
1013 guarded_heap_dump_allocations(bool statsOnly
, thread_id thread
)
1015 dump_allocations(sGuardedHeap
, statsOnly
, thread
);
1020 guarded_heap_dump_heaps(bool dumpAreas
, bool dumpBins
)
1022 WriteLocker
heapLocker(sGuardedHeap
.lock
);
1023 dump_guarded_heap(sGuardedHeap
);
1027 for (guarded_heap_area
* area
= sGuardedHeap
.areas
; area
!= NULL
;
1028 area
= area
->next
) {
1029 MutexLocker
areaLocker(area
->lock
);
1030 dump_guarded_heap_area(*area
);
1035 for (size_t i
= 0; i
< area
->page_count
; i
++) {
1036 dump_guarded_heap_page(area
->pages
[i
]);
1037 if ((area
->pages
[i
].flags
& GUARDED_HEAP_PAGE_FLAG_FIRST
) != 0)
1038 guarded_heap_print_stack_traces(area
->pages
[i
]);
1045 guarded_heap_set_dump_allocations_on_exit(bool enabled
)
1047 sDumpAllocationsOnExit
= enabled
;
1053 guarded_heap_set_stack_trace_depth(size_t stackTraceDepth
)
1055 if (stackTraceDepth
== 0) {
1056 sStackTraceDepth
= 0;
1060 // This is rather wasteful, but these are going to be filled lazily by each
1061 // thread on alloc/free. Therefore we cannot use a dynamic allocation and
1062 // just store a pointer to. Since we only need to store two addresses, we
1063 // use two TLS slots and set them to point at the stack base/end.
1064 if (sStackBaseTLSIndex
< 0) {
1065 sStackBaseTLSIndex
= tls_allocate();
1066 if (sStackBaseTLSIndex
< 0)
1067 return sStackBaseTLSIndex
;
1070 if (sStackEndTLSIndex
< 0) {
1071 sStackEndTLSIndex
= tls_allocate();
1072 if (sStackEndTLSIndex
< 0)
1073 return sStackEndTLSIndex
;
1076 sStackTraceDepth
= min_c(stackTraceDepth
, kMaxStackTraceDepth
);
1081 // #pragma mark - Init
1087 // The memory has actually been copied (or is in a copy on write state) but
1088 // but the area ids have changed.
1089 for (guarded_heap_area
* area
= sGuardedHeap
.areas
; area
!= NULL
;
1090 area
= area
->next
) {
1091 area
->area
= area_for(area
);
1093 panic("failed to find area for heap area %p after fork", area
);
1099 guarded_heap_init(void)
1101 if (!guarded_heap_area_create(sGuardedHeap
, GUARDED_HEAP_INITIAL_SIZE
))
1104 // Install a segfault handler so we can print some info before going down.
1105 struct sigaction action
;
1106 action
.sa_handler
= (__sighandler_t
)guarded_heap_segfault_handler
;
1107 action
.sa_flags
= SA_SIGINFO
;
1108 action
.sa_userdata
= NULL
;
1109 sigemptyset(&action
.sa_mask
);
1110 sigaction(SIGSEGV
, &action
, NULL
);
1112 atfork(&init_after_fork
);
1113 // Note: Needs malloc(). Hence we need to be fully initialized.
1114 // TODO: We should actually also install a hook that is called before
1115 // fork() is being executed. In a multithreaded app it would need to
1116 // acquire *all* allocator locks, so that we don't fork() an
1117 // inconsistent state.
1124 guarded_heap_terminate_after()
1126 if (sDumpAllocationsOnExit
)
1127 dump_allocations_full();
1131 // #pragma mark - Public API
1135 heap_memalign(size_t alignment
, size_t size
)
1140 return guarded_heap_allocate(sGuardedHeap
, size
, alignment
);
1145 heap_malloc(size_t size
)
1147 return heap_memalign(sDefaultAlignment
, size
);
1152 heap_free(void* address
)
1154 if (!guarded_heap_free(address
))
1155 panic("free failed for address %p", address
);
1160 heap_realloc(void* address
, size_t newSize
)
1167 if (address
== NULL
)
1168 return heap_memalign(sDefaultAlignment
, newSize
);
1170 return guarded_heap_realloc(address
, newSize
);
1174 heap_implementation __mallocGuardedHeap
= {
1176 guarded_heap_terminate_after
,
1185 NULL
, // posix_memalign
1187 NULL
, // start_wall_checking
1188 NULL
, // stop_wall_checking
1189 NULL
, // set_paranoid_validation
1191 guarded_heap_set_memory_reuse
,
1192 guarded_heap_set_debugger_calls
,
1193 guarded_heap_set_default_alignment
,
1195 NULL
, // validate_heaps
1196 NULL
, // validate_walls
1198 guarded_heap_dump_allocations
,
1199 guarded_heap_dump_heaps
,
1202 NULL
, // get_allocation_info
1204 guarded_heap_set_dump_allocations_on_exit
,
1205 guarded_heap_set_stack_trace_depth