vfs: check userland buffers before reading them.
[haiku.git] / src / system / libroot / posix / malloc_debug / guarded_heap.cpp
blob3a552bd5af9c09e411c9e33b3072472e730d8cbe
1 /*
2 * Copyright 2011, Michael Lotz <mmlr@mlotz.ch>.
3 * Distributed under the terms of the MIT License.
4 */
6 #include "malloc_debug_api.h"
9 #include <malloc.h>
10 #include <stdio.h>
11 #include <stdlib.h>
12 #include <string.h>
14 #include <signal.h>
15 #include <sys/mman.h>
17 #include <locks.h>
19 #include <libroot_private.h>
20 #include <runtime_loader.h>
22 #include <TLS.h>
25 // #pragma mark - Debug Helpers
27 static const size_t kMaxStackTraceDepth = 50;
30 static bool sDebuggerCalls = true;
31 static bool sDumpAllocationsOnExit = false;
32 static size_t sStackTraceDepth = 0;
33 static int32 sStackBaseTLSIndex = -1;
34 static int32 sStackEndTLSIndex = -1;
36 #if __cplusplus >= 201103L
37 #include <cstddef>
38 using namespace std;
39 static size_t sDefaultAlignment = alignof(max_align_t);
40 #else
41 static size_t sDefaultAlignment = 8;
42 #endif
45 static void
46 panic(const char* format, ...)
48 char buffer[1024];
50 va_list args;
51 va_start(args, format);
52 vsnprintf(buffer, sizeof(buffer), format, args);
53 va_end(args);
55 if (sDebuggerCalls)
56 debugger(buffer);
57 else
58 debug_printf(buffer);
62 static void
63 print_stdout(const char* format, ...)
65 // To avoid any allocations due to dynamic memory need by printf() we use a
66 // stack buffer and vsnprintf(). Otherwise this does the same as printf().
67 char buffer[1024];
69 va_list args;
70 va_start(args, format);
71 vsnprintf(buffer, sizeof(buffer), format, args);
72 va_end(args);
74 write(STDOUT_FILENO, buffer, strlen(buffer));
78 // #pragma mark - Linked List
81 #define GET_ITEM(list, item) ((void *)((uint8 *)item - list->offset))
82 #define GET_LINK(list, item) ((list_link *)((uint8 *)item + list->offset))
85 struct list_link {
86 list_link* next;
87 list_link* prev;
90 struct list {
91 list_link link;
92 int32 offset;
96 static inline void
97 list_remove_item(struct list* list, void* item)
99 list_link* link = GET_LINK(list, item);
101 link->next->prev = link->prev;
102 link->prev->next = link->next;
106 static inline void
107 list_add_item(struct list* list, void* item)
109 list_link* link = GET_LINK(list, item);
111 link->next = &list->link;
112 link->prev = list->link.prev;
114 list->link.prev->next = link;
115 list->link.prev = link;
119 static inline void*
120 list_get_next_item(struct list* list, void* item)
122 if (item == NULL) {
123 if (list->link.next == (list_link *)list)
124 return NULL;
126 return GET_ITEM(list, list->link.next);
129 list_link* link = GET_LINK(list, item);
130 if (link->next == &list->link)
131 return NULL;
133 return GET_ITEM(list, link->next);
137 static inline void
138 list_init_etc(struct list* list, int32 offset)
140 list->link.next = list->link.prev = &list->link;
141 list->offset = offset;
145 // #pragma mark - Guarded Heap
148 #define GUARDED_HEAP_PAGE_FLAG_USED 0x01
149 #define GUARDED_HEAP_PAGE_FLAG_FIRST 0x02
150 #define GUARDED_HEAP_PAGE_FLAG_GUARD 0x04
151 #define GUARDED_HEAP_PAGE_FLAG_DEAD 0x08
152 #define GUARDED_HEAP_PAGE_FLAG_AREA 0x10
154 #define GUARDED_HEAP_INITIAL_SIZE 1 * 1024 * 1024
155 #define GUARDED_HEAP_GROW_SIZE 2 * 1024 * 1024
156 #define GUARDED_HEAP_AREA_USE_THRESHOLD 1 * 1024 * 1024
159 struct guarded_heap;
161 struct guarded_heap_page {
162 uint8 flags;
163 size_t allocation_size;
164 void* allocation_base;
165 size_t alignment;
166 thread_id allocating_thread;
167 thread_id freeing_thread;
168 list_link free_list_link;
169 size_t alloc_stack_trace_depth;
170 size_t free_stack_trace_depth;
171 addr_t stack_trace[kMaxStackTraceDepth];
174 struct guarded_heap_area {
175 guarded_heap* heap;
176 guarded_heap_area* next;
177 area_id area;
178 addr_t base;
179 size_t size;
180 size_t page_count;
181 size_t used_pages;
182 mutex lock;
183 struct list free_list;
184 guarded_heap_page pages[0];
187 struct guarded_heap {
188 rw_lock lock;
189 size_t page_count;
190 size_t used_pages;
191 uint32 area_creation_counter;
192 bool reuse_memory;
193 guarded_heap_area* areas;
197 static guarded_heap sGuardedHeap = {
198 RW_LOCK_INITIALIZER("guarded heap lock"),
199 0, 0, 0, true, NULL
203 static void dump_guarded_heap_page(void* address, bool doPanic = false);
206 static void
207 guarded_heap_segfault_handler(int signal, siginfo_t* signalInfo, void* vregs)
209 if (signal != SIGSEGV)
210 return;
212 if (signalInfo->si_code != SEGV_ACCERR) {
213 // Not ours.
214 panic("generic segfault");
215 return;
218 dump_guarded_heap_page(signalInfo->si_addr, true);
220 exit(-1);
224 static void
225 guarded_heap_page_protect(guarded_heap_area& area, size_t pageIndex,
226 uint32 protection)
228 addr_t address = area.base + pageIndex * B_PAGE_SIZE;
229 mprotect((void*)address, B_PAGE_SIZE, protection);
233 static void
234 guarded_heap_print_stack_trace(addr_t stackTrace[], size_t depth)
236 char* imageName;
237 char* symbolName;
238 void* location;
239 bool exactMatch;
241 for (size_t i = 0; i < depth; i++) {
242 addr_t address = stackTrace[i];
244 status_t status = __gRuntimeLoader->get_nearest_symbol_at_address(
245 (void*)address, NULL, NULL, &imageName, &symbolName, NULL,
246 &location, &exactMatch);
247 if (status != B_OK) {
248 print_stdout("\t%#" B_PRIxADDR " (lookup failed: %s)\n", address,
249 strerror(status));
250 continue;
253 print_stdout("\t<%s> %s + %#" B_PRIxADDR "%s\n", imageName, symbolName,
254 address - (addr_t)location, exactMatch ? "" : " (nearest)");
259 static void
260 guarded_heap_print_stack_traces(guarded_heap_page& page)
262 if (page.alloc_stack_trace_depth > 0) {
263 print_stdout("alloc stack trace (%" B_PRIuSIZE "):\n",
264 page.alloc_stack_trace_depth);
265 guarded_heap_print_stack_trace(page.stack_trace,
266 page.alloc_stack_trace_depth);
269 if (page.free_stack_trace_depth > 0) {
270 print_stdout("free stack trace (%" B_PRIuSIZE "):\n",
271 page.free_stack_trace_depth);
272 guarded_heap_print_stack_trace(
273 &page.stack_trace[page.alloc_stack_trace_depth],
274 page.free_stack_trace_depth);
279 static size_t
280 guarded_heap_fill_stack_trace(addr_t stackTrace[], size_t maxDepth,
281 size_t skipFrames)
283 if (maxDepth == 0)
284 return 0;
286 void** stackBase = tls_address(sStackBaseTLSIndex);
287 void** stackEnd = tls_address(sStackEndTLSIndex);
288 if (*stackBase == NULL || *stackEnd == NULL) {
289 thread_info threadInfo;
290 status_t result = get_thread_info(find_thread(NULL), &threadInfo);
291 if (result != B_OK)
292 return 0;
294 *stackBase = (void*)threadInfo.stack_base;
295 *stackEnd = (void*)threadInfo.stack_end;
298 int32 traceDepth = __arch_get_stack_trace(stackTrace, maxDepth, skipFrames,
299 (addr_t)*stackBase, (addr_t)*stackEnd);
301 return traceDepth < 0 ? 0 : traceDepth;
305 static void
306 guarded_heap_page_allocate(guarded_heap_area& area, size_t startPageIndex,
307 size_t pagesNeeded, size_t allocationSize, size_t alignment,
308 void* allocationBase)
310 if (pagesNeeded < 2) {
311 panic("need to allocate at least 2 pages, one for guard\n");
312 return;
315 guarded_heap_page* firstPage = NULL;
316 for (size_t i = 0; i < pagesNeeded; i++) {
317 guarded_heap_page& page = area.pages[startPageIndex + i];
318 page.flags = GUARDED_HEAP_PAGE_FLAG_USED;
319 if (i == 0) {
320 page.allocating_thread = find_thread(NULL);
321 page.freeing_thread = -1;
322 page.allocation_size = allocationSize;
323 page.allocation_base = allocationBase;
324 page.alignment = alignment;
325 page.flags |= GUARDED_HEAP_PAGE_FLAG_FIRST;
326 page.alloc_stack_trace_depth = guarded_heap_fill_stack_trace(
327 page.stack_trace, sStackTraceDepth, 2);
328 page.free_stack_trace_depth = 0;
329 firstPage = &page;
330 } else {
331 page.allocating_thread = firstPage->allocating_thread;
332 page.freeing_thread = -1;
333 page.allocation_size = allocationSize;
334 page.allocation_base = allocationBase;
335 page.alignment = alignment;
336 page.alloc_stack_trace_depth = 0;
337 page.free_stack_trace_depth = 0;
340 list_remove_item(&area.free_list, &page);
342 if (i == pagesNeeded - 1) {
343 page.flags |= GUARDED_HEAP_PAGE_FLAG_GUARD;
344 guarded_heap_page_protect(area, startPageIndex + i, 0);
345 } else {
346 guarded_heap_page_protect(area, startPageIndex + i,
347 B_READ_AREA | B_WRITE_AREA);
353 static void
354 guarded_heap_free_page(guarded_heap_area& area, size_t pageIndex,
355 bool force = false)
357 guarded_heap_page& page = area.pages[pageIndex];
359 if (area.heap->reuse_memory || force)
360 page.flags = 0;
361 else
362 page.flags |= GUARDED_HEAP_PAGE_FLAG_DEAD;
364 page.freeing_thread = find_thread(NULL);
366 list_add_item(&area.free_list, &page);
368 guarded_heap_page_protect(area, pageIndex, 0);
372 static void
373 guarded_heap_pages_allocated(guarded_heap& heap, size_t pagesAllocated)
375 atomic_add((int32*)&heap.used_pages, pagesAllocated);
379 static void*
380 guarded_heap_area_allocate(guarded_heap_area& area, size_t pagesNeeded,
381 size_t size, size_t alignment)
383 if (pagesNeeded > area.page_count - area.used_pages)
384 return NULL;
386 // We use the free list this way so that the page that has been free for
387 // the longest time is allocated. This keeps immediate re-use (that may
388 // hide bugs) to a minimum.
389 guarded_heap_page* page
390 = (guarded_heap_page*)list_get_next_item(&area.free_list, NULL);
392 for (; page != NULL;
393 page = (guarded_heap_page*)list_get_next_item(&area.free_list, page)) {
395 if ((page->flags & GUARDED_HEAP_PAGE_FLAG_USED) != 0)
396 continue;
398 size_t pageIndex = page - area.pages;
399 if (pageIndex > area.page_count - pagesNeeded)
400 continue;
402 // Candidate, check if we have enough pages going forward
403 // (including the guard page).
404 bool candidate = true;
405 for (size_t j = 1; j < pagesNeeded; j++) {
406 if ((area.pages[pageIndex + j].flags & GUARDED_HEAP_PAGE_FLAG_USED)
407 != 0) {
408 candidate = false;
409 break;
413 if (!candidate)
414 continue;
416 size_t offset = size & (B_PAGE_SIZE - 1);
417 void* result = (void*)((area.base + pageIndex * B_PAGE_SIZE
418 + (offset > 0 ? B_PAGE_SIZE - offset : 0)) & ~(alignment - 1));
420 guarded_heap_page_allocate(area, pageIndex, pagesNeeded, size,
421 alignment, result);
423 area.used_pages += pagesNeeded;
424 guarded_heap_pages_allocated(*area.heap, pagesNeeded);
425 return result;
428 return NULL;
432 static bool
433 guarded_heap_area_init(guarded_heap& heap, area_id id, void* baseAddress,
434 size_t size)
436 guarded_heap_area* area = (guarded_heap_area*)baseAddress;
437 area->heap = &heap;
438 area->area = id;
439 area->size = size;
440 area->page_count = area->size / B_PAGE_SIZE;
441 area->used_pages = 0;
443 size_t pagesNeeded = (sizeof(guarded_heap_area)
444 + area->page_count * sizeof(guarded_heap_page)
445 + B_PAGE_SIZE - 1) / B_PAGE_SIZE;
447 area->page_count -= pagesNeeded;
448 area->size = area->page_count * B_PAGE_SIZE;
449 area->base = (addr_t)baseAddress + pagesNeeded * B_PAGE_SIZE;
451 mutex_init(&area->lock, "guarded_heap_area_lock");
453 list_init_etc(&area->free_list,
454 offsetof(guarded_heap_page, free_list_link));
456 for (size_t i = 0; i < area->page_count; i++)
457 guarded_heap_free_page(*area, i, true);
459 area->next = heap.areas;
460 heap.areas = area;
461 heap.page_count += area->page_count;
463 return true;
467 static bool
468 guarded_heap_area_create(guarded_heap& heap, size_t size)
470 for (size_t trySize = size; trySize >= 1 * 1024 * 1024;
471 trySize /= 2) {
473 void* baseAddress = NULL;
474 area_id id = create_area("guarded_heap_area", &baseAddress,
475 B_ANY_ADDRESS, trySize, B_NO_LOCK, B_READ_AREA | B_WRITE_AREA);
477 if (id < 0)
478 continue;
480 if (guarded_heap_area_init(heap, id, baseAddress, trySize))
481 return true;
483 delete_area(id);
486 panic("failed to allocate a new heap area");
487 return false;
491 static bool
492 guarded_heap_add_area(guarded_heap& heap, uint32 counter)
494 WriteLocker areaListWriteLocker(heap.lock);
495 if (heap.area_creation_counter != counter)
496 return false;
498 return guarded_heap_area_create(heap, GUARDED_HEAP_GROW_SIZE);
502 static void*
503 guarded_heap_allocate_with_area(size_t size, size_t alignment)
505 size_t infoSpace = alignment >= B_PAGE_SIZE ? B_PAGE_SIZE
506 : (sizeof(guarded_heap_page) + alignment - 1) & ~(alignment - 1);
508 size_t pagesNeeded = (size + infoSpace + B_PAGE_SIZE - 1) / B_PAGE_SIZE;
510 if (alignment > B_PAGE_SIZE)
511 pagesNeeded += alignment / B_PAGE_SIZE - 1;
513 void* address = NULL;
514 area_id area = create_area("guarded_heap_huge_allocation", &address,
515 B_ANY_ADDRESS, (pagesNeeded + 1) * B_PAGE_SIZE, B_NO_LOCK,
516 B_READ_AREA | B_WRITE_AREA);
517 if (area < 0) {
518 panic("failed to create area for allocation of %" B_PRIuSIZE " pages",
519 pagesNeeded);
520 return NULL;
523 // We just use a page object
524 guarded_heap_page* page = (guarded_heap_page*)address;
525 page->flags = GUARDED_HEAP_PAGE_FLAG_USED | GUARDED_HEAP_PAGE_FLAG_FIRST
526 | GUARDED_HEAP_PAGE_FLAG_AREA;
527 page->allocation_size = size;
528 page->allocation_base = (void*)(((addr_t)address
529 + pagesNeeded * B_PAGE_SIZE - size) & ~(alignment - 1));
530 page->alignment = alignment;
531 page->allocating_thread = find_thread(NULL);
532 page->freeing_thread = -1;
533 page->alloc_stack_trace_depth = guarded_heap_fill_stack_trace(
534 page->stack_trace, sStackTraceDepth, 2);
535 page->free_stack_trace_depth = 0;
537 if (alignment <= B_PAGE_SIZE) {
538 // Protect just the guard page.
539 mprotect((void*)((addr_t)address + pagesNeeded * B_PAGE_SIZE),
540 B_PAGE_SIZE, 0);
541 } else {
542 // Protect empty pages before the allocation start...
543 addr_t protectedStart = (addr_t)address + B_PAGE_SIZE;
544 size_t protectedSize = (addr_t)page->allocation_base - protectedStart;
545 if (protectedSize > 0)
546 mprotect((void*)protectedStart, protectedSize, 0);
548 // ... and after allocation end.
549 size_t allocatedPages = (size + B_PAGE_SIZE - 1) / B_PAGE_SIZE;
550 protectedStart = (addr_t)page->allocation_base
551 + allocatedPages * B_PAGE_SIZE;
552 protectedSize = (addr_t)address + (pagesNeeded + 1) * B_PAGE_SIZE
553 - protectedStart;
555 // There is at least the guard page.
556 mprotect((void*)protectedStart, protectedSize, 0);
559 return page->allocation_base;
563 static void*
564 guarded_heap_allocate(guarded_heap& heap, size_t size, size_t alignment)
566 if (alignment == 0)
567 alignment = 1;
569 size_t pagesNeeded = (size + B_PAGE_SIZE - 1) / B_PAGE_SIZE + 1;
570 if (alignment > B_PAGE_SIZE
571 || pagesNeeded * B_PAGE_SIZE >= GUARDED_HEAP_AREA_USE_THRESHOLD) {
572 // Don't bother, use an area directly. Since it will also fault once
573 // it is deleted, that fits our model quite nicely.
574 return guarded_heap_allocate_with_area(size, alignment);
577 void* result = NULL;
579 ReadLocker areaListReadLocker(heap.lock);
580 for (guarded_heap_area* area = heap.areas; area != NULL;
581 area = area->next) {
583 MutexLocker locker(area->lock);
584 result = guarded_heap_area_allocate(*area, pagesNeeded, size,
585 alignment);
586 if (result != NULL)
587 break;
590 uint32 counter = heap.area_creation_counter;
591 areaListReadLocker.Unlock();
593 if (result == NULL) {
594 guarded_heap_add_area(heap, counter);
595 return guarded_heap_allocate(heap, size, alignment);
598 if (result == NULL)
599 panic("ran out of memory");
601 return result;
605 static guarded_heap_area*
606 guarded_heap_get_locked_area_for(guarded_heap& heap, void* address)
608 ReadLocker areaListReadLocker(heap.lock);
609 for (guarded_heap_area* area = heap.areas; area != NULL;
610 area = area->next) {
611 if ((addr_t)address < area->base)
612 continue;
614 if ((addr_t)address >= area->base + area->size)
615 continue;
617 mutex_lock(&area->lock);
618 return area;
621 return NULL;
625 static size_t
626 guarded_heap_area_page_index_for(guarded_heap_area& area, void* address)
628 size_t pageIndex = ((addr_t)address - area.base) / B_PAGE_SIZE;
629 guarded_heap_page& page = area.pages[pageIndex];
630 if ((page.flags & GUARDED_HEAP_PAGE_FLAG_USED) == 0) {
631 panic("tried to free %p which points at page %" B_PRIuSIZE
632 " which is not marked in use", address, pageIndex);
633 return area.page_count;
636 if ((page.flags & GUARDED_HEAP_PAGE_FLAG_GUARD) != 0) {
637 panic("tried to free %p which points at page %" B_PRIuSIZE
638 " which is a guard page", address, pageIndex);
639 return area.page_count;
642 if ((page.flags & GUARDED_HEAP_PAGE_FLAG_FIRST) == 0) {
643 panic("tried to free %p which points at page %" B_PRIuSIZE
644 " which is not an allocation first page", address, pageIndex);
645 return area.page_count;
648 if ((page.flags & GUARDED_HEAP_PAGE_FLAG_DEAD) != 0) {
649 panic("tried to free %p which points at page %" B_PRIuSIZE
650 " which is a dead page", address, pageIndex);
651 return area.page_count;
654 return pageIndex;
658 static bool
659 guarded_heap_area_free(guarded_heap_area& area, void* address)
661 size_t pageIndex = guarded_heap_area_page_index_for(area, address);
662 if (pageIndex >= area.page_count)
663 return false;
665 size_t pagesFreed = 0;
666 guarded_heap_page* page = &area.pages[pageIndex];
667 while ((page->flags & GUARDED_HEAP_PAGE_FLAG_GUARD) == 0) {
668 // Mark the allocation page as free.
669 guarded_heap_free_page(area, pageIndex);
670 if (pagesFreed == 0 && sStackTraceDepth > 0) {
671 size_t freeEntries
672 = kMaxStackTraceDepth - page->alloc_stack_trace_depth;
674 page->free_stack_trace_depth = guarded_heap_fill_stack_trace(
675 &page->stack_trace[page->alloc_stack_trace_depth],
676 min_c(freeEntries, sStackTraceDepth), 2);
679 pagesFreed++;
680 pageIndex++;
681 page = &area.pages[pageIndex];
684 // Mark the guard page as free as well.
685 guarded_heap_free_page(area, pageIndex);
686 pagesFreed++;
688 if (area.heap->reuse_memory) {
689 area.used_pages -= pagesFreed;
690 atomic_add((int32*)&area.heap->used_pages, -pagesFreed);
693 return true;
697 static guarded_heap_page*
698 guarded_heap_area_allocation_for(void* address, area_id& allocationArea)
700 allocationArea = area_for(address);
701 if (allocationArea < 0)
702 return NULL;
704 area_info areaInfo;
705 if (get_area_info(allocationArea, &areaInfo) != B_OK)
706 return NULL;
708 guarded_heap_page* page = (guarded_heap_page*)areaInfo.address;
709 if (page->flags != (GUARDED_HEAP_PAGE_FLAG_USED
710 | GUARDED_HEAP_PAGE_FLAG_FIRST | GUARDED_HEAP_PAGE_FLAG_AREA)) {
711 return NULL;
714 if (page->allocation_base != address)
715 return NULL;
716 if (page->allocation_size >= areaInfo.size)
717 return NULL;
719 return page;
723 static bool
724 guarded_heap_free_area_allocation(void* address)
726 area_id allocationArea;
727 if (guarded_heap_area_allocation_for(address, allocationArea) == NULL)
728 return false;
730 delete_area(allocationArea);
731 return true;
735 static bool
736 guarded_heap_free(void* address)
738 if (address == NULL)
739 return true;
741 guarded_heap_area* area = guarded_heap_get_locked_area_for(sGuardedHeap,
742 address);
743 if (area == NULL)
744 return guarded_heap_free_area_allocation(address);
746 MutexLocker locker(area->lock, true);
747 return guarded_heap_area_free(*area, address);
751 static void*
752 guarded_heap_realloc(void* address, size_t newSize)
754 guarded_heap_area* area = guarded_heap_get_locked_area_for(sGuardedHeap,
755 address);
757 size_t oldSize;
758 area_id allocationArea = -1;
759 if (area != NULL) {
760 MutexLocker locker(area->lock, true);
761 size_t pageIndex = guarded_heap_area_page_index_for(*area, address);
762 if (pageIndex >= area->page_count)
763 return NULL;
765 guarded_heap_page& page = area->pages[pageIndex];
766 oldSize = page.allocation_size;
767 locker.Unlock();
768 } else {
769 guarded_heap_page* page = guarded_heap_area_allocation_for(address,
770 allocationArea);
771 if (page == NULL)
772 return NULL;
774 oldSize = page->allocation_size;
777 if (oldSize == newSize)
778 return address;
780 void* newBlock = guarded_heap_allocate(sGuardedHeap, newSize,
781 sDefaultAlignment);
782 if (newBlock == NULL)
783 return NULL;
785 memcpy(newBlock, address, min_c(oldSize, newSize));
787 if (allocationArea >= 0)
788 delete_area(allocationArea);
789 else {
790 MutexLocker locker(area->lock);
791 guarded_heap_area_free(*area, address);
794 return newBlock;
798 // #pragma mark - Debugger commands
801 static void
802 dump_guarded_heap_page(guarded_heap_page& page)
804 printf("flags:");
805 if ((page.flags & GUARDED_HEAP_PAGE_FLAG_USED) != 0)
806 printf(" used");
807 if ((page.flags & GUARDED_HEAP_PAGE_FLAG_FIRST) != 0)
808 printf(" first");
809 if ((page.flags & GUARDED_HEAP_PAGE_FLAG_GUARD) != 0)
810 printf(" guard");
811 if ((page.flags & GUARDED_HEAP_PAGE_FLAG_DEAD) != 0)
812 printf(" dead");
813 printf("\n");
815 printf("allocation size: %" B_PRIuSIZE "\n", page.allocation_size);
816 printf("allocation base: %p\n", page.allocation_base);
817 printf("alignment: %" B_PRIuSIZE "\n", page.alignment);
818 printf("allocating thread: %" B_PRId32 "\n", page.allocating_thread);
819 printf("freeing thread: %" B_PRId32 "\n", page.freeing_thread);
823 static void
824 dump_guarded_heap_page(void* address, bool doPanic)
826 // Find the area that contains this page.
827 guarded_heap_area* area = NULL;
828 for (guarded_heap_area* candidate = sGuardedHeap.areas; candidate != NULL;
829 candidate = candidate->next) {
831 if ((addr_t)address < candidate->base)
832 continue;
833 if ((addr_t)address >= candidate->base + candidate->size)
834 continue;
836 area = candidate;
837 break;
840 if (area == NULL) {
841 panic("didn't find area for address %p\n", address);
842 return;
845 size_t pageIndex = ((addr_t)address - area->base) / B_PAGE_SIZE;
846 guarded_heap_page& page = area->pages[pageIndex];
847 dump_guarded_heap_page(page);
849 // Find the first page and dump the stack traces.
850 for (ssize_t candidateIndex = (ssize_t)pageIndex;
851 sStackTraceDepth > 0 && candidateIndex >= 0; candidateIndex--) {
852 guarded_heap_page& candidate = area->pages[candidateIndex];
853 if ((candidate.flags & GUARDED_HEAP_PAGE_FLAG_FIRST) == 0)
854 continue;
856 guarded_heap_print_stack_traces(candidate);
857 break;
860 if (doPanic) {
861 // Note: we do this the complicated way because we absolutely don't
862 // want any character conversion to happen that might provoke other
863 // segfaults in the locale backend. Therefore we avoid using any string
864 // formats, resulting in the mess below.
866 #define DO_PANIC(state) \
867 panic("thread %" B_PRId32 " tried accessing address %p which is " \
868 state " (base: 0x%" B_PRIxADDR ", size: %" B_PRIuSIZE \
869 ", alignment: %" B_PRIuSIZE ", allocated by thread: %" \
870 B_PRId32 ", freed by thread: %" B_PRId32 ")", \
871 find_thread(NULL), address, page.allocation_base, \
872 page.allocation_size, page.alignment, page.allocating_thread, \
873 page.freeing_thread)
875 if ((page.flags & GUARDED_HEAP_PAGE_FLAG_USED) == 0)
876 DO_PANIC("not allocated");
877 else if ((page.flags & GUARDED_HEAP_PAGE_FLAG_GUARD) != 0)
878 DO_PANIC("a guard page");
879 else if ((page.flags & GUARDED_HEAP_PAGE_FLAG_DEAD) != 0)
880 DO_PANIC("a dead page");
881 else
882 DO_PANIC("in an unknown state");
884 #undef DO_PANIC
889 static void
890 dump_guarded_heap_area(guarded_heap_area& area)
892 printf("guarded heap area: %p\n", &area);
893 printf("next heap area: %p\n", area.next);
894 printf("guarded heap: %p\n", area.heap);
895 printf("area id: %" B_PRId32 "\n", area.area);
896 printf("base: 0x%" B_PRIxADDR "\n", area.base);
897 printf("size: %" B_PRIuSIZE "\n", area.size);
898 printf("page count: %" B_PRIuSIZE "\n", area.page_count);
899 printf("used pages: %" B_PRIuSIZE "\n", area.used_pages);
900 printf("lock: %p\n", &area.lock);
902 size_t freeCount = 0;
903 void* item = list_get_next_item(&area.free_list, NULL);
904 while (item != NULL) {
905 freeCount++;
907 if ((((guarded_heap_page*)item)->flags & GUARDED_HEAP_PAGE_FLAG_USED)
908 != 0) {
909 printf("free list broken, page %p not actually free\n", item);
912 item = list_get_next_item(&area.free_list, item);
915 printf("free_list: %p (%" B_PRIuSIZE " free)\n", &area.free_list,
916 freeCount);
918 freeCount = 0;
919 size_t runLength = 0;
920 size_t longestRun = 0;
921 for (size_t i = 0; i <= area.page_count; i++) {
922 guarded_heap_page& page = area.pages[i];
923 if (i == area.page_count
924 || (page.flags & GUARDED_HEAP_PAGE_FLAG_USED) != 0) {
925 freeCount += runLength;
926 if (runLength > longestRun)
927 longestRun = runLength;
928 runLength = 0;
929 continue;
932 runLength = 1;
933 for (size_t j = 1; j < area.page_count - i; j++) {
934 if ((area.pages[i + j].flags & GUARDED_HEAP_PAGE_FLAG_USED) != 0)
935 break;
937 runLength++;
940 i += runLength - 1;
943 printf("longest free run: %" B_PRIuSIZE " (%" B_PRIuSIZE " free)\n",
944 longestRun, freeCount);
946 printf("pages: %p\n", area.pages);
950 static void
951 dump_guarded_heap(guarded_heap& heap)
953 printf("guarded heap: %p\n", &heap);
954 printf("rw lock: %p\n", &heap.lock);
955 printf("page count: %" B_PRIuSIZE "\n", heap.page_count);
956 printf("used pages: %" B_PRIuSIZE "\n", heap.used_pages);
957 printf("area creation counter: %" B_PRIu32 "\n",
958 heap.area_creation_counter);
960 size_t areaCount = 0;
961 guarded_heap_area* area = heap.areas;
962 while (area != NULL) {
963 areaCount++;
964 area = area->next;
967 printf("areas: %p (%" B_PRIuSIZE ")\n", heap.areas, areaCount);
971 static void
972 dump_allocations(guarded_heap& heap, bool statsOnly, thread_id thread)
974 WriteLocker heapLocker(heap.lock);
976 size_t allocationCount = 0;
977 size_t allocationSize = 0;
978 for (guarded_heap_area* area = heap.areas; area != NULL;
979 area = area->next) {
981 MutexLocker areaLocker(area->lock);
982 for (size_t i = 0; i < area->page_count; i++) {
983 guarded_heap_page& page = area->pages[i];
984 if ((page.flags & GUARDED_HEAP_PAGE_FLAG_FIRST) == 0
985 || (page.flags & GUARDED_HEAP_PAGE_FLAG_DEAD) != 0) {
986 continue;
989 if (thread >= 0 && thread != page.allocating_thread)
990 continue;
992 allocationCount++;
993 allocationSize += page.allocation_size;
995 if (statsOnly)
996 continue;
998 print_stdout("allocation: base: %p; size: %" B_PRIuSIZE
999 "; thread: %" B_PRId32 "; alignment: %" B_PRIuSIZE "\n",
1000 page.allocation_base, page.allocation_size,
1001 page.allocating_thread, page.alignment);
1003 guarded_heap_print_stack_trace(page.stack_trace,
1004 page.alloc_stack_trace_depth);
1008 print_stdout("total allocations: %" B_PRIuSIZE ", %" B_PRIuSIZE " bytes\n",
1009 allocationCount, allocationSize);
1013 static void
1014 dump_allocations_full()
1016 dump_allocations(sGuardedHeap, false, -1);
1020 // #pragma mark - Heap Debug API
1023 static void
1024 guarded_heap_set_memory_reuse(bool enabled)
1026 sGuardedHeap.reuse_memory = enabled;
1030 static void
1031 guarded_heap_set_debugger_calls(bool enabled)
1033 sDebuggerCalls = enabled;
1037 static void
1038 guarded_heap_set_default_alignment(size_t defaultAlignment)
1040 sDefaultAlignment = defaultAlignment;
1044 static void
1045 guarded_heap_dump_allocations(bool statsOnly, thread_id thread)
1047 dump_allocations(sGuardedHeap, statsOnly, thread);
1051 static void
1052 guarded_heap_dump_heaps(bool dumpAreas, bool dumpBins)
1054 WriteLocker heapLocker(sGuardedHeap.lock);
1055 dump_guarded_heap(sGuardedHeap);
1056 if (!dumpAreas)
1057 return;
1059 for (guarded_heap_area* area = sGuardedHeap.areas; area != NULL;
1060 area = area->next) {
1061 MutexLocker areaLocker(area->lock);
1062 dump_guarded_heap_area(*area);
1064 if (!dumpBins)
1065 continue;
1067 for (size_t i = 0; i < area->page_count; i++) {
1068 dump_guarded_heap_page(area->pages[i]);
1069 if ((area->pages[i].flags & GUARDED_HEAP_PAGE_FLAG_FIRST) != 0)
1070 guarded_heap_print_stack_traces(area->pages[i]);
1076 static status_t
1077 guarded_heap_set_dump_allocations_on_exit(bool enabled)
1079 sDumpAllocationsOnExit = enabled;
1080 return B_OK;
1084 static status_t
1085 guarded_heap_set_stack_trace_depth(size_t stackTraceDepth)
1087 if (stackTraceDepth == 0) {
1088 sStackTraceDepth = 0;
1089 return B_OK;
1092 // This is rather wasteful, but these are going to be filled lazily by each
1093 // thread on alloc/free. Therefore we cannot use a dynamic allocation and
1094 // just store a pointer to. Since we only need to store two addresses, we
1095 // use two TLS slots and set them to point at the stack base/end.
1096 if (sStackBaseTLSIndex < 0) {
1097 sStackBaseTLSIndex = tls_allocate();
1098 if (sStackBaseTLSIndex < 0)
1099 return sStackBaseTLSIndex;
1102 if (sStackEndTLSIndex < 0) {
1103 sStackEndTLSIndex = tls_allocate();
1104 if (sStackEndTLSIndex < 0)
1105 return sStackEndTLSIndex;
1108 sStackTraceDepth = min_c(stackTraceDepth, kMaxStackTraceDepth);
1109 return B_OK;
1113 // #pragma mark - Init
1116 static void
1117 init_after_fork()
1119 // The memory has actually been copied (or is in a copy on write state) but
1120 // but the area ids have changed.
1121 for (guarded_heap_area* area = sGuardedHeap.areas; area != NULL;
1122 area = area->next) {
1123 area->area = area_for(area);
1124 if (area->area < 0)
1125 panic("failed to find area for heap area %p after fork", area);
1130 static status_t
1131 guarded_heap_init(void)
1133 if (!guarded_heap_area_create(sGuardedHeap, GUARDED_HEAP_INITIAL_SIZE))
1134 return B_ERROR;
1136 // Install a segfault handler so we can print some info before going down.
1137 struct sigaction action;
1138 action.sa_handler = (__sighandler_t)guarded_heap_segfault_handler;
1139 action.sa_flags = SA_SIGINFO;
1140 action.sa_userdata = NULL;
1141 sigemptyset(&action.sa_mask);
1142 sigaction(SIGSEGV, &action, NULL);
1144 atfork(&init_after_fork);
1145 // Note: Needs malloc(). Hence we need to be fully initialized.
1146 // TODO: We should actually also install a hook that is called before
1147 // fork() is being executed. In a multithreaded app it would need to
1148 // acquire *all* allocator locks, so that we don't fork() an
1149 // inconsistent state.
1151 return B_OK;
1155 static void
1156 guarded_heap_terminate_after()
1158 if (sDumpAllocationsOnExit)
1159 dump_allocations_full();
1163 // #pragma mark - Public API
1166 static void*
1167 heap_memalign(size_t alignment, size_t size)
1169 if (size == 0)
1170 size = 1;
1172 return guarded_heap_allocate(sGuardedHeap, size, alignment);
1176 static void*
1177 heap_malloc(size_t size)
1179 return heap_memalign(sDefaultAlignment, size);
1183 static void
1184 heap_free(void* address)
1186 if (!guarded_heap_free(address))
1187 panic("free failed for address %p", address);
1191 static void*
1192 heap_realloc(void* address, size_t newSize)
1194 if (newSize == 0) {
1195 free(address);
1196 return NULL;
1199 if (address == NULL)
1200 return heap_memalign(sDefaultAlignment, newSize);
1202 return guarded_heap_realloc(address, newSize);
1206 heap_implementation __mallocGuardedHeap = {
1207 guarded_heap_init,
1208 guarded_heap_terminate_after,
1210 heap_memalign,
1211 heap_malloc,
1212 heap_free,
1213 heap_realloc,
1215 NULL, // calloc
1216 NULL, // valloc
1217 NULL, // posix_memalign
1219 NULL, // start_wall_checking
1220 NULL, // stop_wall_checking
1221 NULL, // set_paranoid_validation
1223 guarded_heap_set_memory_reuse,
1224 guarded_heap_set_debugger_calls,
1225 guarded_heap_set_default_alignment,
1227 NULL, // validate_heaps
1228 NULL, // validate_walls
1230 guarded_heap_dump_allocations,
1231 guarded_heap_dump_heaps,
1232 heap_malloc,
1234 NULL, // get_allocation_info
1236 guarded_heap_set_dump_allocations_on_exit,
1237 guarded_heap_set_stack_trace_depth