2 * Copyright 2011, Michael Lotz <mmlr@mlotz.ch>.
3 * Distributed under the terms of the MIT License.
6 #include "malloc_debug_api.h"
19 #include <libroot_private.h>
20 #include <runtime_loader.h>
25 // #pragma mark - Debug Helpers
27 static const size_t kMaxStackTraceDepth
= 50;
30 static bool sDebuggerCalls
= true;
31 static bool sDumpAllocationsOnExit
= false;
32 static size_t sStackTraceDepth
= 0;
33 static int32 sStackBaseTLSIndex
= -1;
34 static int32 sStackEndTLSIndex
= -1;
36 #if __cplusplus >= 201103L
39 static size_t sDefaultAlignment
= alignof(max_align_t
);
41 static size_t sDefaultAlignment
= 8;
46 panic(const char* format
, ...)
51 va_start(args
, format
);
52 vsnprintf(buffer
, sizeof(buffer
), format
, args
);
63 print_stdout(const char* format
, ...)
65 // To avoid any allocations due to dynamic memory need by printf() we use a
66 // stack buffer and vsnprintf(). Otherwise this does the same as printf().
70 va_start(args
, format
);
71 vsnprintf(buffer
, sizeof(buffer
), format
, args
);
74 write(STDOUT_FILENO
, buffer
, strlen(buffer
));
78 // #pragma mark - Linked List
81 #define GET_ITEM(list, item) ((void *)((uint8 *)item - list->offset))
82 #define GET_LINK(list, item) ((list_link *)((uint8 *)item + list->offset))
97 list_remove_item(struct list
* list
, void* item
)
99 list_link
* link
= GET_LINK(list
, item
);
101 link
->next
->prev
= link
->prev
;
102 link
->prev
->next
= link
->next
;
107 list_add_item(struct list
* list
, void* item
)
109 list_link
* link
= GET_LINK(list
, item
);
111 link
->next
= &list
->link
;
112 link
->prev
= list
->link
.prev
;
114 list
->link
.prev
->next
= link
;
115 list
->link
.prev
= link
;
120 list_get_next_item(struct list
* list
, void* item
)
123 if (list
->link
.next
== (list_link
*)list
)
126 return GET_ITEM(list
, list
->link
.next
);
129 list_link
* link
= GET_LINK(list
, item
);
130 if (link
->next
== &list
->link
)
133 return GET_ITEM(list
, link
->next
);
138 list_init_etc(struct list
* list
, int32 offset
)
140 list
->link
.next
= list
->link
.prev
= &list
->link
;
141 list
->offset
= offset
;
145 // #pragma mark - Guarded Heap
148 #define GUARDED_HEAP_PAGE_FLAG_USED 0x01
149 #define GUARDED_HEAP_PAGE_FLAG_FIRST 0x02
150 #define GUARDED_HEAP_PAGE_FLAG_GUARD 0x04
151 #define GUARDED_HEAP_PAGE_FLAG_DEAD 0x08
152 #define GUARDED_HEAP_PAGE_FLAG_AREA 0x10
154 #define GUARDED_HEAP_INITIAL_SIZE 1 * 1024 * 1024
155 #define GUARDED_HEAP_GROW_SIZE 2 * 1024 * 1024
156 #define GUARDED_HEAP_AREA_USE_THRESHOLD 1 * 1024 * 1024
161 struct guarded_heap_page
{
163 size_t allocation_size
;
164 void* allocation_base
;
166 thread_id allocating_thread
;
167 thread_id freeing_thread
;
168 list_link free_list_link
;
169 size_t alloc_stack_trace_depth
;
170 size_t free_stack_trace_depth
;
171 addr_t stack_trace
[kMaxStackTraceDepth
];
174 struct guarded_heap_area
{
176 guarded_heap_area
* next
;
183 struct list free_list
;
184 guarded_heap_page pages
[0];
187 struct guarded_heap
{
191 uint32 area_creation_counter
;
193 guarded_heap_area
* areas
;
197 static guarded_heap sGuardedHeap
= {
198 RW_LOCK_INITIALIZER("guarded heap lock"),
203 static void dump_guarded_heap_page(void* address
, bool doPanic
= false);
207 guarded_heap_segfault_handler(int signal
, siginfo_t
* signalInfo
, void* vregs
)
209 if (signal
!= SIGSEGV
)
212 if (signalInfo
->si_code
!= SEGV_ACCERR
) {
214 panic("generic segfault");
218 dump_guarded_heap_page(signalInfo
->si_addr
, true);
225 guarded_heap_page_protect(guarded_heap_area
& area
, size_t pageIndex
,
228 addr_t address
= area
.base
+ pageIndex
* B_PAGE_SIZE
;
229 mprotect((void*)address
, B_PAGE_SIZE
, protection
);
234 guarded_heap_print_stack_trace(addr_t stackTrace
[], size_t depth
)
241 for (size_t i
= 0; i
< depth
; i
++) {
242 addr_t address
= stackTrace
[i
];
244 status_t status
= __gRuntimeLoader
->get_nearest_symbol_at_address(
245 (void*)address
, NULL
, NULL
, &imageName
, &symbolName
, NULL
,
246 &location
, &exactMatch
);
247 if (status
!= B_OK
) {
248 print_stdout("\t%#" B_PRIxADDR
" (lookup failed: %s)\n", address
,
253 print_stdout("\t<%s> %s + %#" B_PRIxADDR
"%s\n", imageName
, symbolName
,
254 address
- (addr_t
)location
, exactMatch
? "" : " (nearest)");
260 guarded_heap_print_stack_traces(guarded_heap_page
& page
)
262 if (page
.alloc_stack_trace_depth
> 0) {
263 print_stdout("alloc stack trace (%" B_PRIuSIZE
"):\n",
264 page
.alloc_stack_trace_depth
);
265 guarded_heap_print_stack_trace(page
.stack_trace
,
266 page
.alloc_stack_trace_depth
);
269 if (page
.free_stack_trace_depth
> 0) {
270 print_stdout("free stack trace (%" B_PRIuSIZE
"):\n",
271 page
.free_stack_trace_depth
);
272 guarded_heap_print_stack_trace(
273 &page
.stack_trace
[page
.alloc_stack_trace_depth
],
274 page
.free_stack_trace_depth
);
280 guarded_heap_fill_stack_trace(addr_t stackTrace
[], size_t maxDepth
,
286 void** stackBase
= tls_address(sStackBaseTLSIndex
);
287 void** stackEnd
= tls_address(sStackEndTLSIndex
);
288 if (*stackBase
== NULL
|| *stackEnd
== NULL
) {
289 thread_info threadInfo
;
290 status_t result
= get_thread_info(find_thread(NULL
), &threadInfo
);
294 *stackBase
= (void*)threadInfo
.stack_base
;
295 *stackEnd
= (void*)threadInfo
.stack_end
;
298 int32 traceDepth
= __arch_get_stack_trace(stackTrace
, maxDepth
, skipFrames
,
299 (addr_t
)*stackBase
, (addr_t
)*stackEnd
);
301 return traceDepth
< 0 ? 0 : traceDepth
;
306 guarded_heap_page_allocate(guarded_heap_area
& area
, size_t startPageIndex
,
307 size_t pagesNeeded
, size_t allocationSize
, size_t alignment
,
308 void* allocationBase
)
310 if (pagesNeeded
< 2) {
311 panic("need to allocate at least 2 pages, one for guard\n");
315 guarded_heap_page
* firstPage
= NULL
;
316 for (size_t i
= 0; i
< pagesNeeded
; i
++) {
317 guarded_heap_page
& page
= area
.pages
[startPageIndex
+ i
];
318 page
.flags
= GUARDED_HEAP_PAGE_FLAG_USED
;
320 page
.allocating_thread
= find_thread(NULL
);
321 page
.freeing_thread
= -1;
322 page
.allocation_size
= allocationSize
;
323 page
.allocation_base
= allocationBase
;
324 page
.alignment
= alignment
;
325 page
.flags
|= GUARDED_HEAP_PAGE_FLAG_FIRST
;
326 page
.alloc_stack_trace_depth
= guarded_heap_fill_stack_trace(
327 page
.stack_trace
, sStackTraceDepth
, 2);
328 page
.free_stack_trace_depth
= 0;
331 page
.allocating_thread
= firstPage
->allocating_thread
;
332 page
.freeing_thread
= -1;
333 page
.allocation_size
= allocationSize
;
334 page
.allocation_base
= allocationBase
;
335 page
.alignment
= alignment
;
336 page
.alloc_stack_trace_depth
= 0;
337 page
.free_stack_trace_depth
= 0;
340 list_remove_item(&area
.free_list
, &page
);
342 if (i
== pagesNeeded
- 1) {
343 page
.flags
|= GUARDED_HEAP_PAGE_FLAG_GUARD
;
344 guarded_heap_page_protect(area
, startPageIndex
+ i
, 0);
346 guarded_heap_page_protect(area
, startPageIndex
+ i
,
347 B_READ_AREA
| B_WRITE_AREA
);
354 guarded_heap_free_page(guarded_heap_area
& area
, size_t pageIndex
,
357 guarded_heap_page
& page
= area
.pages
[pageIndex
];
359 if (area
.heap
->reuse_memory
|| force
)
362 page
.flags
|= GUARDED_HEAP_PAGE_FLAG_DEAD
;
364 page
.freeing_thread
= find_thread(NULL
);
366 list_add_item(&area
.free_list
, &page
);
368 guarded_heap_page_protect(area
, pageIndex
, 0);
373 guarded_heap_pages_allocated(guarded_heap
& heap
, size_t pagesAllocated
)
375 atomic_add((int32
*)&heap
.used_pages
, pagesAllocated
);
380 guarded_heap_area_allocate(guarded_heap_area
& area
, size_t pagesNeeded
,
381 size_t size
, size_t alignment
)
383 if (pagesNeeded
> area
.page_count
- area
.used_pages
)
386 // We use the free list this way so that the page that has been free for
387 // the longest time is allocated. This keeps immediate re-use (that may
388 // hide bugs) to a minimum.
389 guarded_heap_page
* page
390 = (guarded_heap_page
*)list_get_next_item(&area
.free_list
, NULL
);
393 page
= (guarded_heap_page
*)list_get_next_item(&area
.free_list
, page
)) {
395 if ((page
->flags
& GUARDED_HEAP_PAGE_FLAG_USED
) != 0)
398 size_t pageIndex
= page
- area
.pages
;
399 if (pageIndex
> area
.page_count
- pagesNeeded
)
402 // Candidate, check if we have enough pages going forward
403 // (including the guard page).
404 bool candidate
= true;
405 for (size_t j
= 1; j
< pagesNeeded
; j
++) {
406 if ((area
.pages
[pageIndex
+ j
].flags
& GUARDED_HEAP_PAGE_FLAG_USED
)
416 size_t offset
= size
& (B_PAGE_SIZE
- 1);
417 void* result
= (void*)((area
.base
+ pageIndex
* B_PAGE_SIZE
418 + (offset
> 0 ? B_PAGE_SIZE
- offset
: 0)) & ~(alignment
- 1));
420 guarded_heap_page_allocate(area
, pageIndex
, pagesNeeded
, size
,
423 area
.used_pages
+= pagesNeeded
;
424 guarded_heap_pages_allocated(*area
.heap
, pagesNeeded
);
433 guarded_heap_area_init(guarded_heap
& heap
, area_id id
, void* baseAddress
,
436 guarded_heap_area
* area
= (guarded_heap_area
*)baseAddress
;
440 area
->page_count
= area
->size
/ B_PAGE_SIZE
;
441 area
->used_pages
= 0;
443 size_t pagesNeeded
= (sizeof(guarded_heap_area
)
444 + area
->page_count
* sizeof(guarded_heap_page
)
445 + B_PAGE_SIZE
- 1) / B_PAGE_SIZE
;
447 area
->page_count
-= pagesNeeded
;
448 area
->size
= area
->page_count
* B_PAGE_SIZE
;
449 area
->base
= (addr_t
)baseAddress
+ pagesNeeded
* B_PAGE_SIZE
;
451 mutex_init(&area
->lock
, "guarded_heap_area_lock");
453 list_init_etc(&area
->free_list
,
454 offsetof(guarded_heap_page
, free_list_link
));
456 for (size_t i
= 0; i
< area
->page_count
; i
++)
457 guarded_heap_free_page(*area
, i
, true);
459 area
->next
= heap
.areas
;
461 heap
.page_count
+= area
->page_count
;
468 guarded_heap_area_create(guarded_heap
& heap
, size_t size
)
470 for (size_t trySize
= size
; trySize
>= 1 * 1024 * 1024;
473 void* baseAddress
= NULL
;
474 area_id id
= create_area("guarded_heap_area", &baseAddress
,
475 B_ANY_ADDRESS
, trySize
, B_NO_LOCK
, B_READ_AREA
| B_WRITE_AREA
);
480 if (guarded_heap_area_init(heap
, id
, baseAddress
, trySize
))
486 panic("failed to allocate a new heap area");
492 guarded_heap_add_area(guarded_heap
& heap
, uint32 counter
)
494 WriteLocker
areaListWriteLocker(heap
.lock
);
495 if (heap
.area_creation_counter
!= counter
)
498 return guarded_heap_area_create(heap
, GUARDED_HEAP_GROW_SIZE
);
503 guarded_heap_allocate_with_area(size_t size
, size_t alignment
)
505 size_t infoSpace
= alignment
>= B_PAGE_SIZE
? B_PAGE_SIZE
506 : (sizeof(guarded_heap_page
) + alignment
- 1) & ~(alignment
- 1);
508 size_t pagesNeeded
= (size
+ infoSpace
+ B_PAGE_SIZE
- 1) / B_PAGE_SIZE
;
510 if (alignment
> B_PAGE_SIZE
)
511 pagesNeeded
+= alignment
/ B_PAGE_SIZE
- 1;
513 void* address
= NULL
;
514 area_id area
= create_area("guarded_heap_huge_allocation", &address
,
515 B_ANY_ADDRESS
, (pagesNeeded
+ 1) * B_PAGE_SIZE
, B_NO_LOCK
,
516 B_READ_AREA
| B_WRITE_AREA
);
518 panic("failed to create area for allocation of %" B_PRIuSIZE
" pages",
523 // We just use a page object
524 guarded_heap_page
* page
= (guarded_heap_page
*)address
;
525 page
->flags
= GUARDED_HEAP_PAGE_FLAG_USED
| GUARDED_HEAP_PAGE_FLAG_FIRST
526 | GUARDED_HEAP_PAGE_FLAG_AREA
;
527 page
->allocation_size
= size
;
528 page
->allocation_base
= (void*)(((addr_t
)address
529 + pagesNeeded
* B_PAGE_SIZE
- size
) & ~(alignment
- 1));
530 page
->alignment
= alignment
;
531 page
->allocating_thread
= find_thread(NULL
);
532 page
->freeing_thread
= -1;
533 page
->alloc_stack_trace_depth
= guarded_heap_fill_stack_trace(
534 page
->stack_trace
, sStackTraceDepth
, 2);
535 page
->free_stack_trace_depth
= 0;
537 if (alignment
<= B_PAGE_SIZE
) {
538 // Protect just the guard page.
539 mprotect((void*)((addr_t
)address
+ pagesNeeded
* B_PAGE_SIZE
),
542 // Protect empty pages before the allocation start...
543 addr_t protectedStart
= (addr_t
)address
+ B_PAGE_SIZE
;
544 size_t protectedSize
= (addr_t
)page
->allocation_base
- protectedStart
;
545 if (protectedSize
> 0)
546 mprotect((void*)protectedStart
, protectedSize
, 0);
548 // ... and after allocation end.
549 size_t allocatedPages
= (size
+ B_PAGE_SIZE
- 1) / B_PAGE_SIZE
;
550 protectedStart
= (addr_t
)page
->allocation_base
551 + allocatedPages
* B_PAGE_SIZE
;
552 protectedSize
= (addr_t
)address
+ (pagesNeeded
+ 1) * B_PAGE_SIZE
555 // There is at least the guard page.
556 mprotect((void*)protectedStart
, protectedSize
, 0);
559 return page
->allocation_base
;
564 guarded_heap_allocate(guarded_heap
& heap
, size_t size
, size_t alignment
)
569 size_t pagesNeeded
= (size
+ B_PAGE_SIZE
- 1) / B_PAGE_SIZE
+ 1;
570 if (alignment
> B_PAGE_SIZE
571 || pagesNeeded
* B_PAGE_SIZE
>= GUARDED_HEAP_AREA_USE_THRESHOLD
) {
572 // Don't bother, use an area directly. Since it will also fault once
573 // it is deleted, that fits our model quite nicely.
574 return guarded_heap_allocate_with_area(size
, alignment
);
579 ReadLocker
areaListReadLocker(heap
.lock
);
580 for (guarded_heap_area
* area
= heap
.areas
; area
!= NULL
;
583 MutexLocker
locker(area
->lock
);
584 result
= guarded_heap_area_allocate(*area
, pagesNeeded
, size
,
590 uint32 counter
= heap
.area_creation_counter
;
591 areaListReadLocker
.Unlock();
593 if (result
== NULL
) {
594 guarded_heap_add_area(heap
, counter
);
595 return guarded_heap_allocate(heap
, size
, alignment
);
599 panic("ran out of memory");
605 static guarded_heap_area
*
606 guarded_heap_get_locked_area_for(guarded_heap
& heap
, void* address
)
608 ReadLocker
areaListReadLocker(heap
.lock
);
609 for (guarded_heap_area
* area
= heap
.areas
; area
!= NULL
;
611 if ((addr_t
)address
< area
->base
)
614 if ((addr_t
)address
>= area
->base
+ area
->size
)
617 mutex_lock(&area
->lock
);
626 guarded_heap_area_page_index_for(guarded_heap_area
& area
, void* address
)
628 size_t pageIndex
= ((addr_t
)address
- area
.base
) / B_PAGE_SIZE
;
629 guarded_heap_page
& page
= area
.pages
[pageIndex
];
630 if ((page
.flags
& GUARDED_HEAP_PAGE_FLAG_USED
) == 0) {
631 panic("tried to free %p which points at page %" B_PRIuSIZE
632 " which is not marked in use", address
, pageIndex
);
633 return area
.page_count
;
636 if ((page
.flags
& GUARDED_HEAP_PAGE_FLAG_GUARD
) != 0) {
637 panic("tried to free %p which points at page %" B_PRIuSIZE
638 " which is a guard page", address
, pageIndex
);
639 return area
.page_count
;
642 if ((page
.flags
& GUARDED_HEAP_PAGE_FLAG_FIRST
) == 0) {
643 panic("tried to free %p which points at page %" B_PRIuSIZE
644 " which is not an allocation first page", address
, pageIndex
);
645 return area
.page_count
;
648 if ((page
.flags
& GUARDED_HEAP_PAGE_FLAG_DEAD
) != 0) {
649 panic("tried to free %p which points at page %" B_PRIuSIZE
650 " which is a dead page", address
, pageIndex
);
651 return area
.page_count
;
659 guarded_heap_area_free(guarded_heap_area
& area
, void* address
)
661 size_t pageIndex
= guarded_heap_area_page_index_for(area
, address
);
662 if (pageIndex
>= area
.page_count
)
665 size_t pagesFreed
= 0;
666 guarded_heap_page
* page
= &area
.pages
[pageIndex
];
667 while ((page
->flags
& GUARDED_HEAP_PAGE_FLAG_GUARD
) == 0) {
668 // Mark the allocation page as free.
669 guarded_heap_free_page(area
, pageIndex
);
670 if (pagesFreed
== 0 && sStackTraceDepth
> 0) {
672 = kMaxStackTraceDepth
- page
->alloc_stack_trace_depth
;
674 page
->free_stack_trace_depth
= guarded_heap_fill_stack_trace(
675 &page
->stack_trace
[page
->alloc_stack_trace_depth
],
676 min_c(freeEntries
, sStackTraceDepth
), 2);
681 page
= &area
.pages
[pageIndex
];
684 // Mark the guard page as free as well.
685 guarded_heap_free_page(area
, pageIndex
);
688 if (area
.heap
->reuse_memory
) {
689 area
.used_pages
-= pagesFreed
;
690 atomic_add((int32
*)&area
.heap
->used_pages
, -pagesFreed
);
697 static guarded_heap_page
*
698 guarded_heap_area_allocation_for(void* address
, area_id
& allocationArea
)
700 allocationArea
= area_for(address
);
701 if (allocationArea
< 0)
705 if (get_area_info(allocationArea
, &areaInfo
) != B_OK
)
708 guarded_heap_page
* page
= (guarded_heap_page
*)areaInfo
.address
;
709 if (page
->flags
!= (GUARDED_HEAP_PAGE_FLAG_USED
710 | GUARDED_HEAP_PAGE_FLAG_FIRST
| GUARDED_HEAP_PAGE_FLAG_AREA
)) {
714 if (page
->allocation_base
!= address
)
716 if (page
->allocation_size
>= areaInfo
.size
)
724 guarded_heap_free_area_allocation(void* address
)
726 area_id allocationArea
;
727 if (guarded_heap_area_allocation_for(address
, allocationArea
) == NULL
)
730 delete_area(allocationArea
);
736 guarded_heap_free(void* address
)
741 guarded_heap_area
* area
= guarded_heap_get_locked_area_for(sGuardedHeap
,
744 return guarded_heap_free_area_allocation(address
);
746 MutexLocker
locker(area
->lock
, true);
747 return guarded_heap_area_free(*area
, address
);
752 guarded_heap_realloc(void* address
, size_t newSize
)
754 guarded_heap_area
* area
= guarded_heap_get_locked_area_for(sGuardedHeap
,
758 area_id allocationArea
= -1;
760 MutexLocker
locker(area
->lock
, true);
761 size_t pageIndex
= guarded_heap_area_page_index_for(*area
, address
);
762 if (pageIndex
>= area
->page_count
)
765 guarded_heap_page
& page
= area
->pages
[pageIndex
];
766 oldSize
= page
.allocation_size
;
769 guarded_heap_page
* page
= guarded_heap_area_allocation_for(address
,
774 oldSize
= page
->allocation_size
;
777 if (oldSize
== newSize
)
780 void* newBlock
= guarded_heap_allocate(sGuardedHeap
, newSize
,
782 if (newBlock
== NULL
)
785 memcpy(newBlock
, address
, min_c(oldSize
, newSize
));
787 if (allocationArea
>= 0)
788 delete_area(allocationArea
);
790 MutexLocker
locker(area
->lock
);
791 guarded_heap_area_free(*area
, address
);
798 // #pragma mark - Debugger commands
802 dump_guarded_heap_page(guarded_heap_page
& page
)
805 if ((page
.flags
& GUARDED_HEAP_PAGE_FLAG_USED
) != 0)
807 if ((page
.flags
& GUARDED_HEAP_PAGE_FLAG_FIRST
) != 0)
809 if ((page
.flags
& GUARDED_HEAP_PAGE_FLAG_GUARD
) != 0)
811 if ((page
.flags
& GUARDED_HEAP_PAGE_FLAG_DEAD
) != 0)
815 printf("allocation size: %" B_PRIuSIZE
"\n", page
.allocation_size
);
816 printf("allocation base: %p\n", page
.allocation_base
);
817 printf("alignment: %" B_PRIuSIZE
"\n", page
.alignment
);
818 printf("allocating thread: %" B_PRId32
"\n", page
.allocating_thread
);
819 printf("freeing thread: %" B_PRId32
"\n", page
.freeing_thread
);
824 dump_guarded_heap_page(void* address
, bool doPanic
)
826 // Find the area that contains this page.
827 guarded_heap_area
* area
= NULL
;
828 for (guarded_heap_area
* candidate
= sGuardedHeap
.areas
; candidate
!= NULL
;
829 candidate
= candidate
->next
) {
831 if ((addr_t
)address
< candidate
->base
)
833 if ((addr_t
)address
>= candidate
->base
+ candidate
->size
)
841 panic("didn't find area for address %p\n", address
);
845 size_t pageIndex
= ((addr_t
)address
- area
->base
) / B_PAGE_SIZE
;
846 guarded_heap_page
& page
= area
->pages
[pageIndex
];
847 dump_guarded_heap_page(page
);
849 // Find the first page and dump the stack traces.
850 for (ssize_t candidateIndex
= (ssize_t
)pageIndex
;
851 sStackTraceDepth
> 0 && candidateIndex
>= 0; candidateIndex
--) {
852 guarded_heap_page
& candidate
= area
->pages
[candidateIndex
];
853 if ((candidate
.flags
& GUARDED_HEAP_PAGE_FLAG_FIRST
) == 0)
856 guarded_heap_print_stack_traces(candidate
);
861 // Note: we do this the complicated way because we absolutely don't
862 // want any character conversion to happen that might provoke other
863 // segfaults in the locale backend. Therefore we avoid using any string
864 // formats, resulting in the mess below.
866 #define DO_PANIC(state) \
867 panic("thread %" B_PRId32 " tried accessing address %p which is " \
868 state " (base: 0x%" B_PRIxADDR ", size: %" B_PRIuSIZE \
869 ", alignment: %" B_PRIuSIZE ", allocated by thread: %" \
870 B_PRId32 ", freed by thread: %" B_PRId32 ")", \
871 find_thread(NULL), address, page.allocation_base, \
872 page.allocation_size, page.alignment, page.allocating_thread, \
875 if ((page
.flags
& GUARDED_HEAP_PAGE_FLAG_USED
) == 0)
876 DO_PANIC("not allocated");
877 else if ((page
.flags
& GUARDED_HEAP_PAGE_FLAG_GUARD
) != 0)
878 DO_PANIC("a guard page");
879 else if ((page
.flags
& GUARDED_HEAP_PAGE_FLAG_DEAD
) != 0)
880 DO_PANIC("a dead page");
882 DO_PANIC("in an unknown state");
890 dump_guarded_heap_area(guarded_heap_area
& area
)
892 printf("guarded heap area: %p\n", &area
);
893 printf("next heap area: %p\n", area
.next
);
894 printf("guarded heap: %p\n", area
.heap
);
895 printf("area id: %" B_PRId32
"\n", area
.area
);
896 printf("base: 0x%" B_PRIxADDR
"\n", area
.base
);
897 printf("size: %" B_PRIuSIZE
"\n", area
.size
);
898 printf("page count: %" B_PRIuSIZE
"\n", area
.page_count
);
899 printf("used pages: %" B_PRIuSIZE
"\n", area
.used_pages
);
900 printf("lock: %p\n", &area
.lock
);
902 size_t freeCount
= 0;
903 void* item
= list_get_next_item(&area
.free_list
, NULL
);
904 while (item
!= NULL
) {
907 if ((((guarded_heap_page
*)item
)->flags
& GUARDED_HEAP_PAGE_FLAG_USED
)
909 printf("free list broken, page %p not actually free\n", item
);
912 item
= list_get_next_item(&area
.free_list
, item
);
915 printf("free_list: %p (%" B_PRIuSIZE
" free)\n", &area
.free_list
,
919 size_t runLength
= 0;
920 size_t longestRun
= 0;
921 for (size_t i
= 0; i
<= area
.page_count
; i
++) {
922 guarded_heap_page
& page
= area
.pages
[i
];
923 if (i
== area
.page_count
924 || (page
.flags
& GUARDED_HEAP_PAGE_FLAG_USED
) != 0) {
925 freeCount
+= runLength
;
926 if (runLength
> longestRun
)
927 longestRun
= runLength
;
933 for (size_t j
= 1; j
< area
.page_count
- i
; j
++) {
934 if ((area
.pages
[i
+ j
].flags
& GUARDED_HEAP_PAGE_FLAG_USED
) != 0)
943 printf("longest free run: %" B_PRIuSIZE
" (%" B_PRIuSIZE
" free)\n",
944 longestRun
, freeCount
);
946 printf("pages: %p\n", area
.pages
);
951 dump_guarded_heap(guarded_heap
& heap
)
953 printf("guarded heap: %p\n", &heap
);
954 printf("rw lock: %p\n", &heap
.lock
);
955 printf("page count: %" B_PRIuSIZE
"\n", heap
.page_count
);
956 printf("used pages: %" B_PRIuSIZE
"\n", heap
.used_pages
);
957 printf("area creation counter: %" B_PRIu32
"\n",
958 heap
.area_creation_counter
);
960 size_t areaCount
= 0;
961 guarded_heap_area
* area
= heap
.areas
;
962 while (area
!= NULL
) {
967 printf("areas: %p (%" B_PRIuSIZE
")\n", heap
.areas
, areaCount
);
972 dump_allocations(guarded_heap
& heap
, bool statsOnly
, thread_id thread
)
974 WriteLocker
heapLocker(heap
.lock
);
976 size_t allocationCount
= 0;
977 size_t allocationSize
= 0;
978 for (guarded_heap_area
* area
= heap
.areas
; area
!= NULL
;
981 MutexLocker
areaLocker(area
->lock
);
982 for (size_t i
= 0; i
< area
->page_count
; i
++) {
983 guarded_heap_page
& page
= area
->pages
[i
];
984 if ((page
.flags
& GUARDED_HEAP_PAGE_FLAG_FIRST
) == 0
985 || (page
.flags
& GUARDED_HEAP_PAGE_FLAG_DEAD
) != 0) {
989 if (thread
>= 0 && thread
!= page
.allocating_thread
)
993 allocationSize
+= page
.allocation_size
;
998 print_stdout("allocation: base: %p; size: %" B_PRIuSIZE
999 "; thread: %" B_PRId32
"; alignment: %" B_PRIuSIZE
"\n",
1000 page
.allocation_base
, page
.allocation_size
,
1001 page
.allocating_thread
, page
.alignment
);
1003 guarded_heap_print_stack_trace(page
.stack_trace
,
1004 page
.alloc_stack_trace_depth
);
1008 print_stdout("total allocations: %" B_PRIuSIZE
", %" B_PRIuSIZE
" bytes\n",
1009 allocationCount
, allocationSize
);
1014 dump_allocations_full()
1016 dump_allocations(sGuardedHeap
, false, -1);
1020 // #pragma mark - Heap Debug API
1024 guarded_heap_set_memory_reuse(bool enabled
)
1026 sGuardedHeap
.reuse_memory
= enabled
;
1031 guarded_heap_set_debugger_calls(bool enabled
)
1033 sDebuggerCalls
= enabled
;
1038 guarded_heap_set_default_alignment(size_t defaultAlignment
)
1040 sDefaultAlignment
= defaultAlignment
;
1045 guarded_heap_dump_allocations(bool statsOnly
, thread_id thread
)
1047 dump_allocations(sGuardedHeap
, statsOnly
, thread
);
1052 guarded_heap_dump_heaps(bool dumpAreas
, bool dumpBins
)
1054 WriteLocker
heapLocker(sGuardedHeap
.lock
);
1055 dump_guarded_heap(sGuardedHeap
);
1059 for (guarded_heap_area
* area
= sGuardedHeap
.areas
; area
!= NULL
;
1060 area
= area
->next
) {
1061 MutexLocker
areaLocker(area
->lock
);
1062 dump_guarded_heap_area(*area
);
1067 for (size_t i
= 0; i
< area
->page_count
; i
++) {
1068 dump_guarded_heap_page(area
->pages
[i
]);
1069 if ((area
->pages
[i
].flags
& GUARDED_HEAP_PAGE_FLAG_FIRST
) != 0)
1070 guarded_heap_print_stack_traces(area
->pages
[i
]);
1077 guarded_heap_set_dump_allocations_on_exit(bool enabled
)
1079 sDumpAllocationsOnExit
= enabled
;
1085 guarded_heap_set_stack_trace_depth(size_t stackTraceDepth
)
1087 if (stackTraceDepth
== 0) {
1088 sStackTraceDepth
= 0;
1092 // This is rather wasteful, but these are going to be filled lazily by each
1093 // thread on alloc/free. Therefore we cannot use a dynamic allocation and
1094 // just store a pointer to. Since we only need to store two addresses, we
1095 // use two TLS slots and set them to point at the stack base/end.
1096 if (sStackBaseTLSIndex
< 0) {
1097 sStackBaseTLSIndex
= tls_allocate();
1098 if (sStackBaseTLSIndex
< 0)
1099 return sStackBaseTLSIndex
;
1102 if (sStackEndTLSIndex
< 0) {
1103 sStackEndTLSIndex
= tls_allocate();
1104 if (sStackEndTLSIndex
< 0)
1105 return sStackEndTLSIndex
;
1108 sStackTraceDepth
= min_c(stackTraceDepth
, kMaxStackTraceDepth
);
1113 // #pragma mark - Init
1119 // The memory has actually been copied (or is in a copy on write state) but
1120 // but the area ids have changed.
1121 for (guarded_heap_area
* area
= sGuardedHeap
.areas
; area
!= NULL
;
1122 area
= area
->next
) {
1123 area
->area
= area_for(area
);
1125 panic("failed to find area for heap area %p after fork", area
);
1131 guarded_heap_init(void)
1133 if (!guarded_heap_area_create(sGuardedHeap
, GUARDED_HEAP_INITIAL_SIZE
))
1136 // Install a segfault handler so we can print some info before going down.
1137 struct sigaction action
;
1138 action
.sa_handler
= (__sighandler_t
)guarded_heap_segfault_handler
;
1139 action
.sa_flags
= SA_SIGINFO
;
1140 action
.sa_userdata
= NULL
;
1141 sigemptyset(&action
.sa_mask
);
1142 sigaction(SIGSEGV
, &action
, NULL
);
1144 atfork(&init_after_fork
);
1145 // Note: Needs malloc(). Hence we need to be fully initialized.
1146 // TODO: We should actually also install a hook that is called before
1147 // fork() is being executed. In a multithreaded app it would need to
1148 // acquire *all* allocator locks, so that we don't fork() an
1149 // inconsistent state.
1156 guarded_heap_terminate_after()
1158 if (sDumpAllocationsOnExit
)
1159 dump_allocations_full();
1163 // #pragma mark - Public API
1167 heap_memalign(size_t alignment
, size_t size
)
1172 return guarded_heap_allocate(sGuardedHeap
, size
, alignment
);
1177 heap_malloc(size_t size
)
1179 return heap_memalign(sDefaultAlignment
, size
);
1184 heap_free(void* address
)
1186 if (!guarded_heap_free(address
))
1187 panic("free failed for address %p", address
);
1192 heap_realloc(void* address
, size_t newSize
)
1199 if (address
== NULL
)
1200 return heap_memalign(sDefaultAlignment
, newSize
);
1202 return guarded_heap_realloc(address
, newSize
);
1206 heap_implementation __mallocGuardedHeap
= {
1208 guarded_heap_terminate_after
,
1217 NULL
, // posix_memalign
1219 NULL
, // start_wall_checking
1220 NULL
, // stop_wall_checking
1221 NULL
, // set_paranoid_validation
1223 guarded_heap_set_memory_reuse
,
1224 guarded_heap_set_debugger_calls
,
1225 guarded_heap_set_default_alignment
,
1227 NULL
, // validate_heaps
1228 NULL
, // validate_walls
1230 guarded_heap_dump_allocations
,
1231 guarded_heap_dump_heaps
,
1234 NULL
, // get_allocation_info
1236 guarded_heap_set_dump_allocations_on_exit
,
1237 guarded_heap_set_stack_trace_depth