Assorted whitespace cleanup and typo fixes.
[haiku.git] / src / system / libroot / posix / malloc_debug / guarded_heap.cpp
blobf527ff527d8eef37238adaf3c40887ce768145dd
1 /*
2 * Copyright 2011, Michael Lotz <mmlr@mlotz.ch>.
3 * Distributed under the terms of the MIT License.
4 */
6 #include "malloc_debug_api.h"
9 #include <malloc.h>
10 #include <stdio.h>
11 #include <stdlib.h>
12 #include <string.h>
14 #include <signal.h>
15 #include <sys/mman.h>
17 #include <locks.h>
19 #include <libroot_private.h>
20 #include <runtime_loader.h>
22 #include <TLS.h>
25 // #pragma mark - Debug Helpers
27 static const size_t kMaxStackTraceDepth = 50;
30 static bool sDebuggerCalls = true;
31 static bool sDumpAllocationsOnExit = false;
32 static size_t sStackTraceDepth = 0;
33 static int32 sStackBaseTLSIndex = -1;
34 static int32 sStackEndTLSIndex = -1;
36 #if __cplusplus >= 201103L
37 #include <cstddef>
38 using namespace std;
39 static size_t sDefaultAlignment = alignof(max_align_t);
40 #else
41 static size_t sDefaultAlignment = 8;
42 #endif
45 static void
46 panic(const char* format, ...)
48 char buffer[1024];
50 va_list args;
51 va_start(args, format);
52 vsnprintf(buffer, sizeof(buffer), format, args);
53 va_end(args);
55 if (sDebuggerCalls)
56 debugger(buffer);
57 else
58 debug_printf(buffer);
62 static void
63 print_stdout(const char* format, ...)
65 // To avoid any allocations due to dynamic memory need by printf() we use a
66 // stack buffer and vsnprintf(). Otherwise this does the same as printf().
67 char buffer[1024];
69 va_list args;
70 va_start(args, format);
71 vsnprintf(buffer, sizeof(buffer), format, args);
72 va_end(args);
74 write(STDOUT_FILENO, buffer, strlen(buffer));
78 // #pragma mark - Linked List
81 #define GET_ITEM(list, item) ((void *)((uint8 *)item - list->offset))
82 #define GET_LINK(list, item) ((list_link *)((uint8 *)item + list->offset))
85 struct list_link {
86 list_link* next;
87 list_link* prev;
90 struct list {
91 list_link link;
92 int32 offset;
96 static inline void
97 list_remove_item(struct list* list, void* item)
99 list_link* link = GET_LINK(list, item);
101 link->next->prev = link->prev;
102 link->prev->next = link->next;
106 static inline void
107 list_add_item(struct list* list, void* item)
109 list_link* link = GET_LINK(list, item);
111 link->next = &list->link;
112 link->prev = list->link.prev;
114 list->link.prev->next = link;
115 list->link.prev = link;
119 static inline void*
120 list_get_next_item(struct list* list, void* item)
122 if (item == NULL) {
123 if (list->link.next == (list_link *)list)
124 return NULL;
126 return GET_ITEM(list, list->link.next);
129 list_link* link = GET_LINK(list, item);
130 if (link->next == &list->link)
131 return NULL;
133 return GET_ITEM(list, link->next);
137 static inline void
138 list_init_etc(struct list* list, int32 offset)
140 list->link.next = list->link.prev = &list->link;
141 list->offset = offset;
145 // #pragma mark - Guarded Heap
148 #define GUARDED_HEAP_PAGE_FLAG_USED 0x01
149 #define GUARDED_HEAP_PAGE_FLAG_FIRST 0x02
150 #define GUARDED_HEAP_PAGE_FLAG_GUARD 0x04
151 #define GUARDED_HEAP_PAGE_FLAG_DEAD 0x08
152 #define GUARDED_HEAP_PAGE_FLAG_AREA 0x10
154 #define GUARDED_HEAP_INITIAL_SIZE 1 * 1024 * 1024
155 #define GUARDED_HEAP_GROW_SIZE 2 * 1024 * 1024
156 #define GUARDED_HEAP_AREA_USE_THRESHOLD 1 * 1024 * 1024
159 struct guarded_heap;
161 struct guarded_heap_page {
162 uint8 flags;
163 size_t allocation_size;
164 void* allocation_base;
165 size_t alignment;
166 thread_id thread;
167 list_link free_list_link;
168 size_t alloc_stack_trace_depth;
169 size_t free_stack_trace_depth;
170 addr_t stack_trace[kMaxStackTraceDepth];
173 struct guarded_heap_area {
174 guarded_heap* heap;
175 guarded_heap_area* next;
176 area_id area;
177 addr_t base;
178 size_t size;
179 size_t page_count;
180 size_t used_pages;
181 mutex lock;
182 struct list free_list;
183 guarded_heap_page pages[0];
186 struct guarded_heap {
187 rw_lock lock;
188 size_t page_count;
189 size_t used_pages;
190 uint32 area_creation_counter;
191 bool reuse_memory;
192 guarded_heap_area* areas;
196 static guarded_heap sGuardedHeap = {
197 RW_LOCK_INITIALIZER("guarded heap lock"),
198 0, 0, 0, true, NULL
202 static void dump_guarded_heap_page(void* address, bool doPanic = false);
205 static void
206 guarded_heap_segfault_handler(int signal, siginfo_t* signalInfo, void* vregs)
208 if (signal != SIGSEGV)
209 return;
211 if (signalInfo->si_code != SEGV_ACCERR) {
212 // Not ours.
213 panic("generic segfault");
214 return;
217 dump_guarded_heap_page(signalInfo->si_addr, true);
219 exit(-1);
223 static void
224 guarded_heap_page_protect(guarded_heap_area& area, size_t pageIndex,
225 uint32 protection)
227 addr_t address = area.base + pageIndex * B_PAGE_SIZE;
228 mprotect((void*)address, B_PAGE_SIZE, protection);
232 static void
233 guarded_heap_print_stack_trace(addr_t stackTrace[], size_t depth)
235 char* imageName;
236 char* symbolName;
237 void* location;
238 bool exactMatch;
240 for (size_t i = 0; i < depth; i++) {
241 addr_t address = stackTrace[i];
243 status_t status = __gRuntimeLoader->get_nearest_symbol_at_address(
244 (void*)address, NULL, NULL, &imageName, &symbolName, NULL,
245 &location, &exactMatch);
246 if (status != B_OK) {
247 print_stdout("\t%#" B_PRIxADDR " (lookup failed: %s)\n", address,
248 strerror(status));
249 continue;
252 print_stdout("\t<%s> %s + %#" B_PRIxADDR "%s\n", imageName, symbolName,
253 address - (addr_t)location, exactMatch ? "" : " (nearest)");
258 static void
259 guarded_heap_print_stack_traces(guarded_heap_page& page)
261 if (page.alloc_stack_trace_depth > 0) {
262 printf("alloc stack trace (%" B_PRIuSIZE "):\n",
263 page.alloc_stack_trace_depth);
264 guarded_heap_print_stack_trace(page.stack_trace,
265 page.alloc_stack_trace_depth);
268 if (page.free_stack_trace_depth > 0) {
269 printf("free stack trace (%" B_PRIuSIZE "):\n",
270 page.free_stack_trace_depth);
271 guarded_heap_print_stack_trace(
272 &page.stack_trace[page.alloc_stack_trace_depth],
273 page.free_stack_trace_depth);
278 static size_t
279 guarded_heap_fill_stack_trace(addr_t stackTrace[], size_t maxDepth,
280 size_t skipFrames)
282 if (maxDepth == 0)
283 return 0;
285 void** stackBase = tls_address(sStackBaseTLSIndex);
286 void** stackEnd = tls_address(sStackEndTLSIndex);
287 if (*stackBase == NULL || *stackEnd == NULL) {
288 thread_info threadInfo;
289 status_t result = get_thread_info(find_thread(NULL), &threadInfo);
290 if (result != B_OK)
291 return 0;
293 *stackBase = (void*)threadInfo.stack_base;
294 *stackEnd = (void*)threadInfo.stack_end;
297 int32 traceDepth = __arch_get_stack_trace(stackTrace, maxDepth, skipFrames,
298 (addr_t)*stackBase, (addr_t)*stackEnd);
300 return traceDepth < 0 ? 0 : traceDepth;
304 static void
305 guarded_heap_page_allocate(guarded_heap_area& area, size_t startPageIndex,
306 size_t pagesNeeded, size_t allocationSize, size_t alignment,
307 void* allocationBase)
309 if (pagesNeeded < 2) {
310 panic("need to allocate at least 2 pages, one for guard\n");
311 return;
314 guarded_heap_page* firstPage = NULL;
315 for (size_t i = 0; i < pagesNeeded; i++) {
316 guarded_heap_page& page = area.pages[startPageIndex + i];
317 page.flags = GUARDED_HEAP_PAGE_FLAG_USED;
318 if (i == 0) {
319 page.thread = find_thread(NULL);
320 page.allocation_size = allocationSize;
321 page.allocation_base = allocationBase;
322 page.alignment = alignment;
323 page.flags |= GUARDED_HEAP_PAGE_FLAG_FIRST;
324 page.alloc_stack_trace_depth = guarded_heap_fill_stack_trace(
325 page.stack_trace, sStackTraceDepth, 2);
326 page.free_stack_trace_depth = 0;
327 firstPage = &page;
328 } else {
329 page.thread = firstPage->thread;
330 page.allocation_size = allocationSize;
331 page.allocation_base = allocationBase;
332 page.alignment = alignment;
333 page.alloc_stack_trace_depth = 0;
334 page.free_stack_trace_depth = 0;
337 list_remove_item(&area.free_list, &page);
339 if (i == pagesNeeded - 1) {
340 page.flags |= GUARDED_HEAP_PAGE_FLAG_GUARD;
341 guarded_heap_page_protect(area, startPageIndex + i, 0);
342 } else {
343 guarded_heap_page_protect(area, startPageIndex + i,
344 B_READ_AREA | B_WRITE_AREA);
350 static void
351 guarded_heap_free_page(guarded_heap_area& area, size_t pageIndex,
352 bool force = false)
354 guarded_heap_page& page = area.pages[pageIndex];
356 if (area.heap->reuse_memory || force)
357 page.flags = 0;
358 else
359 page.flags |= GUARDED_HEAP_PAGE_FLAG_DEAD;
361 page.thread = find_thread(NULL);
363 list_add_item(&area.free_list, &page);
365 guarded_heap_page_protect(area, pageIndex, 0);
369 static void
370 guarded_heap_pages_allocated(guarded_heap& heap, size_t pagesAllocated)
372 atomic_add((int32*)&heap.used_pages, pagesAllocated);
376 static void*
377 guarded_heap_area_allocate(guarded_heap_area& area, size_t pagesNeeded,
378 size_t size, size_t alignment)
380 if (pagesNeeded > area.page_count - area.used_pages)
381 return NULL;
383 // We use the free list this way so that the page that has been free for
384 // the longest time is allocated. This keeps immediate re-use (that may
385 // hide bugs) to a minimum.
386 guarded_heap_page* page
387 = (guarded_heap_page*)list_get_next_item(&area.free_list, NULL);
389 for (; page != NULL;
390 page = (guarded_heap_page*)list_get_next_item(&area.free_list, page)) {
392 if ((page->flags & GUARDED_HEAP_PAGE_FLAG_USED) != 0)
393 continue;
395 size_t pageIndex = page - area.pages;
396 if (pageIndex > area.page_count - pagesNeeded)
397 continue;
399 // Candidate, check if we have enough pages going forward
400 // (including the guard page).
401 bool candidate = true;
402 for (size_t j = 1; j < pagesNeeded; j++) {
403 if ((area.pages[pageIndex + j].flags & GUARDED_HEAP_PAGE_FLAG_USED)
404 != 0) {
405 candidate = false;
406 break;
410 if (!candidate)
411 continue;
413 size_t offset = size & (B_PAGE_SIZE - 1);
414 void* result = (void*)((area.base + pageIndex * B_PAGE_SIZE
415 + (offset > 0 ? B_PAGE_SIZE - offset : 0)) & ~(alignment - 1));
417 guarded_heap_page_allocate(area, pageIndex, pagesNeeded, size,
418 alignment, result);
420 area.used_pages += pagesNeeded;
421 guarded_heap_pages_allocated(*area.heap, pagesNeeded);
422 return result;
425 return NULL;
429 static bool
430 guarded_heap_area_init(guarded_heap& heap, area_id id, void* baseAddress,
431 size_t size)
433 guarded_heap_area* area = (guarded_heap_area*)baseAddress;
434 area->heap = &heap;
435 area->area = id;
436 area->size = size;
437 area->page_count = area->size / B_PAGE_SIZE;
438 area->used_pages = 0;
440 size_t pagesNeeded = (sizeof(guarded_heap_area)
441 + area->page_count * sizeof(guarded_heap_page)
442 + B_PAGE_SIZE - 1) / B_PAGE_SIZE;
444 area->page_count -= pagesNeeded;
445 area->size = area->page_count * B_PAGE_SIZE;
446 area->base = (addr_t)baseAddress + pagesNeeded * B_PAGE_SIZE;
448 mutex_init(&area->lock, "guarded_heap_area_lock");
450 list_init_etc(&area->free_list,
451 offsetof(guarded_heap_page, free_list_link));
453 for (size_t i = 0; i < area->page_count; i++)
454 guarded_heap_free_page(*area, i, true);
456 area->next = heap.areas;
457 heap.areas = area;
458 heap.page_count += area->page_count;
460 return true;
464 static bool
465 guarded_heap_area_create(guarded_heap& heap, size_t size)
467 for (size_t trySize = size; trySize >= 1 * 1024 * 1024;
468 trySize /= 2) {
470 void* baseAddress = NULL;
471 area_id id = create_area("guarded_heap_area", &baseAddress,
472 B_ANY_ADDRESS, trySize, B_NO_LOCK, B_READ_AREA | B_WRITE_AREA);
474 if (id < 0)
475 continue;
477 if (guarded_heap_area_init(heap, id, baseAddress, trySize))
478 return true;
480 delete_area(id);
483 panic("failed to allocate a new heap area");
484 return false;
488 static bool
489 guarded_heap_add_area(guarded_heap& heap, uint32 counter)
491 WriteLocker areaListWriteLocker(heap.lock);
492 if (heap.area_creation_counter != counter)
493 return false;
495 return guarded_heap_area_create(heap, GUARDED_HEAP_GROW_SIZE);
499 static void*
500 guarded_heap_allocate(guarded_heap& heap, size_t size, size_t alignment)
502 if (alignment == 0)
503 alignment = 1;
505 if (alignment > B_PAGE_SIZE) {
506 panic("alignment of %" B_PRIuSIZE " not supported", alignment);
507 return NULL;
510 size_t pagesNeeded = (size + B_PAGE_SIZE - 1) / B_PAGE_SIZE + 1;
511 if (pagesNeeded * B_PAGE_SIZE >= GUARDED_HEAP_AREA_USE_THRESHOLD) {
512 // Don't bother, use an area directly. Since it will also fault once
513 // it is deleted, that fits our model quite nicely.
515 pagesNeeded = (size + sizeof(guarded_heap_page) + B_PAGE_SIZE - 1)
516 / B_PAGE_SIZE;
518 void* address = NULL;
519 area_id area = create_area("guarded_heap_huge_allocation", &address,
520 B_ANY_ADDRESS, (pagesNeeded + 1) * B_PAGE_SIZE, B_NO_LOCK,
521 B_READ_AREA | B_WRITE_AREA);
522 if (area < 0) {
523 panic("failed to create area for allocation of %" B_PRIuSIZE
524 " pages", pagesNeeded);
525 return NULL;
528 // We just use a page object
529 guarded_heap_page* page = (guarded_heap_page*)address;
530 page->flags = GUARDED_HEAP_PAGE_FLAG_USED
531 | GUARDED_HEAP_PAGE_FLAG_FIRST | GUARDED_HEAP_PAGE_FLAG_AREA;
532 page->allocation_size = size;
533 page->allocation_base = (void*)(((addr_t)address
534 + pagesNeeded * B_PAGE_SIZE - size) & ~(alignment - 1));
535 page->alignment = alignment;
536 page->thread = find_thread(NULL);
537 page->alloc_stack_trace_depth = guarded_heap_fill_stack_trace(
538 page->stack_trace, sStackTraceDepth, 2);
539 page->free_stack_trace_depth = 0;
541 mprotect((void*)((addr_t)address + pagesNeeded * B_PAGE_SIZE),
542 B_PAGE_SIZE, 0);
544 return page->allocation_base;
547 void* result = NULL;
549 ReadLocker areaListReadLocker(heap.lock);
550 for (guarded_heap_area* area = heap.areas; area != NULL;
551 area = area->next) {
553 MutexLocker locker(area->lock);
554 result = guarded_heap_area_allocate(*area, pagesNeeded, size,
555 alignment);
556 if (result != NULL)
557 break;
560 uint32 counter = heap.area_creation_counter;
561 areaListReadLocker.Unlock();
563 if (result == NULL) {
564 guarded_heap_add_area(heap, counter);
565 return guarded_heap_allocate(heap, size, alignment);
568 if (result == NULL)
569 panic("ran out of memory");
571 return result;
575 static guarded_heap_area*
576 guarded_heap_get_locked_area_for(guarded_heap& heap, void* address)
578 ReadLocker areaListReadLocker(heap.lock);
579 for (guarded_heap_area* area = heap.areas; area != NULL;
580 area = area->next) {
581 if ((addr_t)address < area->base)
582 continue;
584 if ((addr_t)address >= area->base + area->size)
585 continue;
587 mutex_lock(&area->lock);
588 return area;
591 return NULL;
595 static size_t
596 guarded_heap_area_page_index_for(guarded_heap_area& area, void* address)
598 size_t pageIndex = ((addr_t)address - area.base) / B_PAGE_SIZE;
599 guarded_heap_page& page = area.pages[pageIndex];
600 if ((page.flags & GUARDED_HEAP_PAGE_FLAG_USED) == 0) {
601 panic("tried to free %p which points at page %" B_PRIuSIZE
602 " which is not marked in use", address, pageIndex);
603 return area.page_count;
606 if ((page.flags & GUARDED_HEAP_PAGE_FLAG_GUARD) != 0) {
607 panic("tried to free %p which points at page %" B_PRIuSIZE
608 " which is a guard page", address, pageIndex);
609 return area.page_count;
612 if ((page.flags & GUARDED_HEAP_PAGE_FLAG_FIRST) == 0) {
613 panic("tried to free %p which points at page %" B_PRIuSIZE
614 " which is not an allocation first page", address, pageIndex);
615 return area.page_count;
618 if ((page.flags & GUARDED_HEAP_PAGE_FLAG_DEAD) != 0) {
619 panic("tried to free %p which points at page %" B_PRIuSIZE
620 " which is a dead page", address, pageIndex);
621 return area.page_count;
624 return pageIndex;
628 static bool
629 guarded_heap_area_free(guarded_heap_area& area, void* address)
631 size_t pageIndex = guarded_heap_area_page_index_for(area, address);
632 if (pageIndex >= area.page_count)
633 return false;
635 size_t pagesFreed = 0;
636 guarded_heap_page* page = &area.pages[pageIndex];
637 while ((page->flags & GUARDED_HEAP_PAGE_FLAG_GUARD) == 0) {
638 // Mark the allocation page as free.
639 guarded_heap_free_page(area, pageIndex);
640 if (pagesFreed == 0 && sStackTraceDepth > 0) {
641 size_t freeEntries
642 = kMaxStackTraceDepth - page->alloc_stack_trace_depth;
644 page->free_stack_trace_depth = guarded_heap_fill_stack_trace(
645 &page->stack_trace[page->alloc_stack_trace_depth],
646 min_c(freeEntries, sStackTraceDepth), 2);
649 pagesFreed++;
650 pageIndex++;
651 page = &area.pages[pageIndex];
654 // Mark the guard page as free as well.
655 guarded_heap_free_page(area, pageIndex);
656 pagesFreed++;
658 if (area.heap->reuse_memory) {
659 area.used_pages -= pagesFreed;
660 atomic_add((int32*)&area.heap->used_pages, -pagesFreed);
663 return true;
667 static guarded_heap_page*
668 guarded_heap_area_allocation_for(void* address, area_id& allocationArea)
670 allocationArea = area_for(address);
671 if (allocationArea < 0)
672 return NULL;
674 area_info areaInfo;
675 if (get_area_info(allocationArea, &areaInfo) != B_OK)
676 return NULL;
678 guarded_heap_page* page = (guarded_heap_page*)areaInfo.address;
679 if (page->flags != (GUARDED_HEAP_PAGE_FLAG_USED
680 | GUARDED_HEAP_PAGE_FLAG_FIRST | GUARDED_HEAP_PAGE_FLAG_AREA)) {
681 return NULL;
684 if (page->allocation_base != address)
685 return NULL;
686 if (page->allocation_size >= areaInfo.size)
687 return NULL;
689 return page;
693 static bool
694 guarded_heap_free_area_allocation(void* address)
696 area_id allocationArea;
697 if (guarded_heap_area_allocation_for(address, allocationArea) == NULL)
698 return false;
700 delete_area(allocationArea);
701 return true;
705 static bool
706 guarded_heap_free(void* address)
708 if (address == NULL)
709 return true;
711 guarded_heap_area* area = guarded_heap_get_locked_area_for(sGuardedHeap,
712 address);
713 if (area == NULL)
714 return guarded_heap_free_area_allocation(address);
716 MutexLocker locker(area->lock, true);
717 return guarded_heap_area_free(*area, address);
721 static void*
722 guarded_heap_realloc(void* address, size_t newSize)
724 guarded_heap_area* area = guarded_heap_get_locked_area_for(sGuardedHeap,
725 address);
727 size_t oldSize;
728 area_id allocationArea = -1;
729 if (area != NULL) {
730 MutexLocker locker(area->lock, true);
731 size_t pageIndex = guarded_heap_area_page_index_for(*area, address);
732 if (pageIndex >= area->page_count)
733 return NULL;
735 guarded_heap_page& page = area->pages[pageIndex];
736 oldSize = page.allocation_size;
737 locker.Unlock();
738 } else {
739 guarded_heap_page* page = guarded_heap_area_allocation_for(address,
740 allocationArea);
741 if (page == NULL)
742 return NULL;
744 oldSize = page->allocation_size;
747 if (oldSize == newSize)
748 return address;
750 void* newBlock = guarded_heap_allocate(sGuardedHeap, newSize,
751 sDefaultAlignment);
752 if (newBlock == NULL)
753 return NULL;
755 memcpy(newBlock, address, min_c(oldSize, newSize));
757 if (allocationArea >= 0)
758 delete_area(allocationArea);
759 else {
760 MutexLocker locker(area->lock);
761 guarded_heap_area_free(*area, address);
764 return newBlock;
768 // #pragma mark - Debugger commands
771 static void
772 dump_guarded_heap_page(guarded_heap_page& page)
774 printf("flags:");
775 if ((page.flags & GUARDED_HEAP_PAGE_FLAG_USED) != 0)
776 printf(" used");
777 if ((page.flags & GUARDED_HEAP_PAGE_FLAG_FIRST) != 0)
778 printf(" first");
779 if ((page.flags & GUARDED_HEAP_PAGE_FLAG_GUARD) != 0)
780 printf(" guard");
781 if ((page.flags & GUARDED_HEAP_PAGE_FLAG_DEAD) != 0)
782 printf(" dead");
783 printf("\n");
785 printf("allocation size: %" B_PRIuSIZE "\n", page.allocation_size);
786 printf("allocation base: %p\n", page.allocation_base);
787 printf("alignment: %" B_PRIuSIZE "\n", page.alignment);
788 printf("allocating thread: %" B_PRId32 "\n", page.thread);
792 static void
793 dump_guarded_heap_page(void* address, bool doPanic)
795 // Find the area that contains this page.
796 guarded_heap_area* area = NULL;
797 for (guarded_heap_area* candidate = sGuardedHeap.areas; candidate != NULL;
798 candidate = candidate->next) {
800 if ((addr_t)address < candidate->base)
801 continue;
802 if ((addr_t)address >= candidate->base + candidate->size)
803 continue;
805 area = candidate;
806 break;
809 if (area == NULL) {
810 panic("didn't find area for address %p\n", address);
811 return;
814 size_t pageIndex = ((addr_t)address - area->base) / B_PAGE_SIZE;
815 guarded_heap_page& page = area->pages[pageIndex];
816 dump_guarded_heap_page(page);
818 // Find the first page and dump the stack traces.
819 for (ssize_t candidateIndex = (ssize_t)pageIndex;
820 sStackTraceDepth > 0 && candidateIndex >= 0; candidateIndex--) {
821 guarded_heap_page& candidate = area->pages[candidateIndex];
822 if ((candidate.flags & GUARDED_HEAP_PAGE_FLAG_FIRST) == 0)
823 continue;
825 guarded_heap_print_stack_traces(candidate);
826 break;
829 if (doPanic) {
830 // Note: we do this the complicated way because we absolutely don't
831 // want any character conversion to happen that might provoke other
832 // segfaults in the locale backend. Therefore we avoid using any string
833 // formats, resulting in the mess below.
835 #define DO_PANIC(state) \
836 panic("thread %" B_PRId32 " tried accessing address %p which is " \
837 state " (base: 0x%" B_PRIxADDR ", size: %" B_PRIuSIZE \
838 ", alignment: %" B_PRIuSIZE ", allocated by thread: %" \
839 B_PRId32 ")", find_thread(NULL), address, \
840 page.allocation_base, page.allocation_size, page.alignment, \
841 page.thread)
843 if ((page.flags & GUARDED_HEAP_PAGE_FLAG_USED) == 0)
844 DO_PANIC("not allocated");
845 else if ((page.flags & GUARDED_HEAP_PAGE_FLAG_GUARD) != 0)
846 DO_PANIC("a guard page");
847 else if ((page.flags & GUARDED_HEAP_PAGE_FLAG_DEAD) != 0)
848 DO_PANIC("a dead page");
849 else
850 DO_PANIC("in an unknown state");
852 #undef DO_PANIC
857 static void
858 dump_guarded_heap_area(guarded_heap_area& area)
860 printf("guarded heap area: %p\n", &area);
861 printf("next heap area: %p\n", area.next);
862 printf("guarded heap: %p\n", area.heap);
863 printf("area id: %" B_PRId32 "\n", area.area);
864 printf("base: 0x%" B_PRIxADDR "\n", area.base);
865 printf("size: %" B_PRIuSIZE "\n", area.size);
866 printf("page count: %" B_PRIuSIZE "\n", area.page_count);
867 printf("used pages: %" B_PRIuSIZE "\n", area.used_pages);
868 printf("lock: %p\n", &area.lock);
870 size_t freeCount = 0;
871 void* item = list_get_next_item(&area.free_list, NULL);
872 while (item != NULL) {
873 freeCount++;
875 if ((((guarded_heap_page*)item)->flags & GUARDED_HEAP_PAGE_FLAG_USED)
876 != 0) {
877 printf("free list broken, page %p not actually free\n", item);
880 item = list_get_next_item(&area.free_list, item);
883 printf("free_list: %p (%" B_PRIuSIZE " free)\n", &area.free_list,
884 freeCount);
886 freeCount = 0;
887 size_t runLength = 0;
888 size_t longestRun = 0;
889 for (size_t i = 0; i <= area.page_count; i++) {
890 guarded_heap_page& page = area.pages[i];
891 if (i == area.page_count
892 || (page.flags & GUARDED_HEAP_PAGE_FLAG_USED) != 0) {
893 freeCount += runLength;
894 if (runLength > longestRun)
895 longestRun = runLength;
896 runLength = 0;
897 continue;
900 runLength = 1;
901 for (size_t j = 1; j < area.page_count - i; j++) {
902 if ((area.pages[i + j].flags & GUARDED_HEAP_PAGE_FLAG_USED) != 0)
903 break;
905 runLength++;
908 i += runLength - 1;
911 printf("longest free run: %" B_PRIuSIZE " (%" B_PRIuSIZE " free)\n",
912 longestRun, freeCount);
914 printf("pages: %p\n", area.pages);
918 static void
919 dump_guarded_heap(guarded_heap& heap)
921 printf("guarded heap: %p\n", &heap);
922 printf("rw lock: %p\n", &heap.lock);
923 printf("page count: %" B_PRIuSIZE "\n", heap.page_count);
924 printf("used pages: %" B_PRIuSIZE "\n", heap.used_pages);
925 printf("area creation counter: %" B_PRIu32 "\n",
926 heap.area_creation_counter);
928 size_t areaCount = 0;
929 guarded_heap_area* area = heap.areas;
930 while (area != NULL) {
931 areaCount++;
932 area = area->next;
935 printf("areas: %p (%" B_PRIuSIZE ")\n", heap.areas, areaCount);
939 static void
940 dump_allocations(guarded_heap& heap, bool statsOnly, thread_id thread)
942 WriteLocker heapLocker(heap.lock);
944 size_t allocationCount = 0;
945 size_t allocationSize = 0;
946 for (guarded_heap_area* area = heap.areas; area != NULL;
947 area = area->next) {
949 MutexLocker areaLocker(area->lock);
950 for (size_t i = 0; i < area->page_count; i++) {
951 guarded_heap_page& page = area->pages[i];
952 if ((page.flags & GUARDED_HEAP_PAGE_FLAG_FIRST) == 0
953 || (page.flags & GUARDED_HEAP_PAGE_FLAG_DEAD) != 0) {
954 continue;
957 if (thread >= 0 && thread != page.thread)
958 continue;
960 allocationCount++;
961 allocationSize += page.allocation_size;
963 if (statsOnly)
964 continue;
966 print_stdout("allocation: base: %p; size: %" B_PRIuSIZE
967 "; thread: %" B_PRId32 "; alignment: %" B_PRIuSIZE "\n",
968 page.allocation_base, page.allocation_size, page.thread,
969 page.alignment);
971 guarded_heap_print_stack_trace(page.stack_trace,
972 page.alloc_stack_trace_depth);
976 print_stdout("total allocations: %" B_PRIuSIZE ", %" B_PRIuSIZE " bytes\n",
977 allocationCount, allocationSize);
981 static void
982 dump_allocations_full()
984 dump_allocations(sGuardedHeap, false, -1);
988 // #pragma mark - Heap Debug API
991 static void
992 guarded_heap_set_memory_reuse(bool enabled)
994 sGuardedHeap.reuse_memory = enabled;
998 static void
999 guarded_heap_set_debugger_calls(bool enabled)
1001 sDebuggerCalls = enabled;
1005 static void
1006 guarded_heap_set_default_alignment(size_t defaultAlignment)
1008 sDefaultAlignment = defaultAlignment;
1012 static void
1013 guarded_heap_dump_allocations(bool statsOnly, thread_id thread)
1015 dump_allocations(sGuardedHeap, statsOnly, thread);
1019 static void
1020 guarded_heap_dump_heaps(bool dumpAreas, bool dumpBins)
1022 WriteLocker heapLocker(sGuardedHeap.lock);
1023 dump_guarded_heap(sGuardedHeap);
1024 if (!dumpAreas)
1025 return;
1027 for (guarded_heap_area* area = sGuardedHeap.areas; area != NULL;
1028 area = area->next) {
1029 MutexLocker areaLocker(area->lock);
1030 dump_guarded_heap_area(*area);
1032 if (!dumpBins)
1033 continue;
1035 for (size_t i = 0; i < area->page_count; i++) {
1036 dump_guarded_heap_page(area->pages[i]);
1037 if ((area->pages[i].flags & GUARDED_HEAP_PAGE_FLAG_FIRST) != 0)
1038 guarded_heap_print_stack_traces(area->pages[i]);
1044 static status_t
1045 guarded_heap_set_dump_allocations_on_exit(bool enabled)
1047 sDumpAllocationsOnExit = enabled;
1048 return B_OK;
1052 static status_t
1053 guarded_heap_set_stack_trace_depth(size_t stackTraceDepth)
1055 if (stackTraceDepth == 0) {
1056 sStackTraceDepth = 0;
1057 return B_OK;
1060 // This is rather wasteful, but these are going to be filled lazily by each
1061 // thread on alloc/free. Therefore we cannot use a dynamic allocation and
1062 // just store a pointer to. Since we only need to store two addresses, we
1063 // use two TLS slots and set them to point at the stack base/end.
1064 if (sStackBaseTLSIndex < 0) {
1065 sStackBaseTLSIndex = tls_allocate();
1066 if (sStackBaseTLSIndex < 0)
1067 return sStackBaseTLSIndex;
1070 if (sStackEndTLSIndex < 0) {
1071 sStackEndTLSIndex = tls_allocate();
1072 if (sStackEndTLSIndex < 0)
1073 return sStackEndTLSIndex;
1076 sStackTraceDepth = min_c(stackTraceDepth, kMaxStackTraceDepth);
1077 return B_OK;
1081 // #pragma mark - Init
1084 static void
1085 init_after_fork()
1087 // The memory has actually been copied (or is in a copy on write state) but
1088 // but the area ids have changed.
1089 for (guarded_heap_area* area = sGuardedHeap.areas; area != NULL;
1090 area = area->next) {
1091 area->area = area_for(area);
1092 if (area->area < 0)
1093 panic("failed to find area for heap area %p after fork", area);
1098 static status_t
1099 guarded_heap_init(void)
1101 if (!guarded_heap_area_create(sGuardedHeap, GUARDED_HEAP_INITIAL_SIZE))
1102 return B_ERROR;
1104 // Install a segfault handler so we can print some info before going down.
1105 struct sigaction action;
1106 action.sa_handler = (__sighandler_t)guarded_heap_segfault_handler;
1107 action.sa_flags = SA_SIGINFO;
1108 action.sa_userdata = NULL;
1109 sigemptyset(&action.sa_mask);
1110 sigaction(SIGSEGV, &action, NULL);
1112 atfork(&init_after_fork);
1113 // Note: Needs malloc(). Hence we need to be fully initialized.
1114 // TODO: We should actually also install a hook that is called before
1115 // fork() is being executed. In a multithreaded app it would need to
1116 // acquire *all* allocator locks, so that we don't fork() an
1117 // inconsistent state.
1119 return B_OK;
1123 static void
1124 guarded_heap_terminate_after()
1126 if (sDumpAllocationsOnExit)
1127 dump_allocations_full();
1131 // #pragma mark - Public API
1134 static void*
1135 heap_memalign(size_t alignment, size_t size)
1137 if (size == 0)
1138 size = 1;
1140 return guarded_heap_allocate(sGuardedHeap, size, alignment);
1144 static void*
1145 heap_malloc(size_t size)
1147 return heap_memalign(sDefaultAlignment, size);
1151 static void
1152 heap_free(void* address)
1154 if (!guarded_heap_free(address))
1155 panic("free failed for address %p", address);
1159 static void*
1160 heap_realloc(void* address, size_t newSize)
1162 if (newSize == 0) {
1163 free(address);
1164 return NULL;
1167 if (address == NULL)
1168 return heap_memalign(sDefaultAlignment, newSize);
1170 return guarded_heap_realloc(address, newSize);
1174 heap_implementation __mallocGuardedHeap = {
1175 guarded_heap_init,
1176 guarded_heap_terminate_after,
1178 heap_memalign,
1179 heap_malloc,
1180 heap_free,
1181 heap_realloc,
1183 NULL, // calloc
1184 NULL, // valloc
1185 NULL, // posix_memalign
1187 NULL, // start_wall_checking
1188 NULL, // stop_wall_checking
1189 NULL, // set_paranoid_validation
1191 guarded_heap_set_memory_reuse,
1192 guarded_heap_set_debugger_calls,
1193 guarded_heap_set_default_alignment,
1195 NULL, // validate_heaps
1196 NULL, // validate_walls
1198 guarded_heap_dump_allocations,
1199 guarded_heap_dump_heaps,
1200 heap_malloc,
1202 NULL, // get_allocation_info
1204 guarded_heap_set_dump_allocations_on_exit,
1205 guarded_heap_set_stack_trace_depth