libroot_debug: Merge guarded heap into libroot_debug.
[haiku.git] / src / system / kernel / guarded_heap.cpp
blobfb79f5feeba3fb45c8bb13deba3c5d45f6e1f36c
1 /*
2 * Copyright 2011, Michael Lotz <mmlr@mlotz.ch>.
3 * Distributed under the terms of the MIT License.
4 */
7 #include <stdio.h>
8 #include <string.h>
10 #include <arch/debug.h>
11 #include <elf.h>
12 #include <debug.h>
13 #include <heap.h>
14 #include <malloc.h>
15 #include <slab/Slab.h>
16 #include <team.h>
17 #include <tracing.h>
18 #include <util/list.h>
19 #include <util/AutoLock.h>
20 #include <vm/vm.h>
23 #if USE_GUARDED_HEAP_FOR_MALLOC
26 #define GUARDED_HEAP_PAGE_FLAG_USED 0x01
27 #define GUARDED_HEAP_PAGE_FLAG_FIRST 0x02
28 #define GUARDED_HEAP_PAGE_FLAG_GUARD 0x04
29 #define GUARDED_HEAP_PAGE_FLAG_DEAD 0x08
31 #define GUARDED_HEAP_STACK_TRACE_DEPTH 0
34 struct guarded_heap;
36 struct guarded_heap_page {
37 uint8 flags;
38 size_t allocation_size;
39 void* allocation_base;
40 size_t alignment;
41 team_id team;
42 thread_id thread;
43 #if GUARDED_HEAP_STACK_TRACE_DEPTH > 0
44 size_t stack_trace_depth;
45 addr_t stack_trace[GUARDED_HEAP_STACK_TRACE_DEPTH];
46 #endif
47 list_link free_list_link;
50 struct guarded_heap_area {
51 guarded_heap* heap;
52 guarded_heap_area* next;
53 area_id area;
54 addr_t base;
55 size_t size;
56 size_t page_count;
57 size_t used_pages;
58 void* protection_cookie;
59 mutex lock;
60 struct list free_list;
61 guarded_heap_page pages[0];
64 struct guarded_heap {
65 rw_lock lock;
66 size_t page_count;
67 size_t used_pages;
68 int32 area_creation_counter;
69 guarded_heap_area* areas;
73 static guarded_heap sGuardedHeap = {
74 RW_LOCK_INITIALIZER("guarded heap lock"),
75 0, 0, 0, NULL
79 #if GUARDED_HEAP_TRACING
81 namespace GuardedHeapTracing {
84 class GuardedHeapTraceEntry
85 : public TRACE_ENTRY_SELECTOR(GUARDED_HEAP_TRACING_STACK_TRACE) {
86 public:
87 GuardedHeapTraceEntry(guarded_heap* heap)
89 TraceEntryBase(GUARDED_HEAP_TRACING_STACK_TRACE, 0, true),
90 fHeap(heap)
94 protected:
95 guarded_heap* fHeap;
99 class Allocate : public GuardedHeapTraceEntry {
100 public:
101 Allocate(guarded_heap* heap, void* pageBase, uint32 flags)
103 GuardedHeapTraceEntry(heap),
104 fPageBase(pageBase),
105 fFlags(flags)
107 Initialized();
110 virtual void AddDump(TraceOutput& out)
112 out.Print("guarded heap allocate: heap: %p; page: %p; "
113 "flags:%s%s%s%s", fHeap, fPageBase,
114 (fFlags & GUARDED_HEAP_PAGE_FLAG_USED) != 0 ? " used" : "",
115 (fFlags & GUARDED_HEAP_PAGE_FLAG_FIRST) != 0 ? " first" : "",
116 (fFlags & GUARDED_HEAP_PAGE_FLAG_GUARD) != 0 ? " guard" : "",
117 (fFlags & GUARDED_HEAP_PAGE_FLAG_DEAD) != 0 ? " dead" : "");
120 private:
121 void* fPageBase;
122 uint32 fFlags;
126 class Free : public GuardedHeapTraceEntry {
127 public:
128 Free(guarded_heap* heap, void* pageBase)
130 GuardedHeapTraceEntry(heap),
131 fPageBase(pageBase)
133 Initialized();
136 virtual void AddDump(TraceOutput& out)
138 out.Print("guarded heap free: heap: %p; page: %p", fHeap,
139 fPageBase);
142 private:
143 void* fPageBase;
147 } // namespace GuardedHeapTracing
149 # define T(x) new(std::nothrow) GuardedHeapTracing::x
150 #else
151 # define T(x)
152 #endif // GUARDED_HEAP_TRACING
155 static void
156 guarded_heap_page_protect(guarded_heap_area& area, size_t pageIndex,
157 uint32 protection)
159 if (area.area < 0)
160 return;
162 addr_t address = area.base + pageIndex * B_PAGE_SIZE;
163 vm_set_kernel_area_debug_protection(area.protection_cookie, (void*)address,
164 B_PAGE_SIZE, protection);
168 static void
169 guarded_heap_page_allocate(guarded_heap_area& area, size_t startPageIndex,
170 size_t pagesNeeded, size_t allocationSize, size_t alignment,
171 void* allocationBase)
173 if (pagesNeeded < 2) {
174 panic("need to allocate at least 2 pages, one for guard\n");
175 return;
178 guarded_heap_page* firstPage = NULL;
179 for (size_t i = 0; i < pagesNeeded; i++) {
180 guarded_heap_page& page = area.pages[startPageIndex + i];
181 page.flags = GUARDED_HEAP_PAGE_FLAG_USED;
182 if (i == 0) {
183 page.team = (gKernelStartup ? 0 : team_get_current_team_id());
184 page.thread = find_thread(NULL);
185 #if GUARDED_HEAP_STACK_TRACE_DEPTH > 0
186 page.stack_trace_depth = arch_debug_get_stack_trace(
187 page.stack_trace, GUARDED_HEAP_STACK_TRACE_DEPTH, 0, 4,
188 STACK_TRACE_KERNEL);
189 #endif
190 page.allocation_size = allocationSize;
191 page.allocation_base = allocationBase;
192 page.alignment = alignment;
193 page.flags |= GUARDED_HEAP_PAGE_FLAG_FIRST;
194 firstPage = &page;
195 } else {
196 page.team = firstPage->team;
197 page.thread = firstPage->thread;
198 #if GUARDED_HEAP_STACK_TRACE_DEPTH > 0
199 page.stack_trace_depth = 0;
200 #endif
201 page.allocation_size = allocationSize;
202 page.allocation_base = allocationBase;
203 page.alignment = alignment;
206 list_remove_item(&area.free_list, &page);
208 if (i == pagesNeeded - 1) {
209 page.flags |= GUARDED_HEAP_PAGE_FLAG_GUARD;
210 guarded_heap_page_protect(area, startPageIndex + i, 0);
211 } else {
212 guarded_heap_page_protect(area, startPageIndex + i,
213 B_KERNEL_READ_AREA | B_KERNEL_WRITE_AREA);
216 T(Allocate(area.heap,
217 (void*)(area.base + (startPageIndex + i) * B_PAGE_SIZE),
218 page.flags));
223 static void
224 guarded_heap_free_page(guarded_heap_area& area, size_t pageIndex,
225 bool force = false)
227 guarded_heap_page& page = area.pages[pageIndex];
229 #if DEBUG_GUARDED_HEAP_DISABLE_MEMORY_REUSE
230 if (force || area.area < 0)
231 page.flags = 0;
232 else
233 page.flags |= GUARDED_HEAP_PAGE_FLAG_DEAD;
234 #else
235 page.flags = 0;
236 #endif
238 page.allocation_size = 0;
239 page.team = (gKernelStartup ? 0 : team_get_current_team_id());
240 page.thread = find_thread(NULL);
242 #if GUARDED_HEAP_STACK_TRACE_DEPTH > 0
243 page.stack_trace_depth = arch_debug_get_stack_trace(page.stack_trace,
244 GUARDED_HEAP_STACK_TRACE_DEPTH, 0, 3, STACK_TRACE_KERNEL);
245 #endif
247 list_add_item(&area.free_list, &page);
249 guarded_heap_page_protect(area, pageIndex, 0);
251 T(Free(area.heap, (void*)(area.base + pageIndex * B_PAGE_SIZE)));
255 static bool
256 guarded_heap_pages_allocated(guarded_heap& heap, size_t pagesAllocated)
258 return (atomic_add((int32*)&heap.used_pages, pagesAllocated)
259 + pagesAllocated)
260 >= heap.page_count - HEAP_GROW_SIZE / B_PAGE_SIZE / 2;
264 static void*
265 guarded_heap_area_allocate(guarded_heap_area& area, size_t size,
266 size_t alignment, uint32 flags, bool& grow)
268 if (alignment > B_PAGE_SIZE) {
269 panic("alignment of %" B_PRIuSIZE " not supported", alignment);
270 return NULL;
273 size_t pagesNeeded = (size + B_PAGE_SIZE - 1) / B_PAGE_SIZE + 1;
274 if (pagesNeeded > area.page_count - area.used_pages)
275 return NULL;
277 if (pagesNeeded > area.page_count)
278 return NULL;
280 // We use the free list this way so that the page that has been free for
281 // the longest time is allocated. This keeps immediate re-use (that may
282 // hide bugs) to a minimum.
283 guarded_heap_page* page
284 = (guarded_heap_page*)list_get_first_item(&area.free_list);
286 for (; page != NULL;
287 page = (guarded_heap_page*)list_get_next_item(&area.free_list, page)) {
289 if ((page->flags & GUARDED_HEAP_PAGE_FLAG_USED) != 0)
290 continue;
292 size_t pageIndex = page - area.pages;
293 if (pageIndex > area.page_count - pagesNeeded)
294 continue;
296 // Candidate, check if we have enough pages going forward
297 // (including the guard page).
298 bool candidate = true;
299 for (size_t j = 1; j < pagesNeeded; j++) {
300 if ((area.pages[pageIndex + j].flags & GUARDED_HEAP_PAGE_FLAG_USED)
301 != 0) {
302 candidate = false;
303 break;
307 if (!candidate)
308 continue;
310 if (alignment == 0)
311 alignment = 1;
313 size_t offset = size & (B_PAGE_SIZE - 1);
314 void* result = (void*)((area.base + pageIndex * B_PAGE_SIZE
315 + (offset > 0 ? B_PAGE_SIZE - offset : 0)) & ~(alignment - 1));
317 guarded_heap_page_allocate(area, pageIndex, pagesNeeded, size,
318 alignment, result);
320 area.used_pages += pagesNeeded;
321 grow = guarded_heap_pages_allocated(*area.heap, pagesNeeded);
322 return result;
325 return NULL;
329 static bool
330 guarded_heap_area_init(guarded_heap& heap, area_id id, void* baseAddress,
331 size_t size, uint32 flags)
333 guarded_heap_area* area = (guarded_heap_area*)baseAddress;
334 area->heap = &heap;
335 area->area = id;
336 area->size = size;
337 area->page_count = area->size / B_PAGE_SIZE;
338 area->used_pages = 0;
340 size_t pagesNeeded = (sizeof(guarded_heap_area)
341 + area->page_count * sizeof(guarded_heap_page)
342 + B_PAGE_SIZE - 1) / B_PAGE_SIZE;
344 area->page_count -= pagesNeeded;
345 area->size = area->page_count * B_PAGE_SIZE;
346 area->base = (addr_t)baseAddress + pagesNeeded * B_PAGE_SIZE;
348 if (area->area >= 0 && vm_prepare_kernel_area_debug_protection(area->area,
349 &area->protection_cookie) != B_OK) {
350 return false;
353 mutex_init(&area->lock, "guarded_heap_area_lock");
355 list_init_etc(&area->free_list,
356 offsetof(guarded_heap_page, free_list_link));
358 for (size_t i = 0; i < area->page_count; i++)
359 guarded_heap_free_page(*area, i, true);
361 WriteLocker areaListWriteLocker(heap.lock);
362 area->next = heap.areas;
363 heap.areas = area;
364 heap.page_count += area->page_count;
366 return true;
370 static bool
371 guarded_heap_area_create(guarded_heap& heap, uint32 flags)
373 for (size_t trySize = HEAP_GROW_SIZE; trySize >= 1 * 1024 * 1024;
374 trySize /= 2) {
376 void* baseAddress = NULL;
377 area_id id = create_area("guarded_heap_area", &baseAddress,
378 B_ANY_KERNEL_ADDRESS, trySize, B_FULL_LOCK,
379 B_KERNEL_READ_AREA | B_KERNEL_WRITE_AREA);
381 if (id < 0)
382 continue;
384 if (guarded_heap_area_init(heap, id, baseAddress, trySize, flags))
385 return true;
387 delete_area(id);
390 panic("failed to allocate a new heap area");
391 return false;
395 static bool
396 guarded_heap_add_area(guarded_heap& heap, int32 counter, uint32 flags)
398 if ((flags & (HEAP_DONT_LOCK_KERNEL_SPACE | HEAP_DONT_WAIT_FOR_MEMORY))
399 != 0) {
400 return false;
403 if (atomic_test_and_set(&heap.area_creation_counter,
404 counter + 1, counter) == counter) {
405 return guarded_heap_area_create(heap, flags);
408 return false;
412 static void*
413 guarded_heap_allocate(guarded_heap& heap, size_t size, size_t alignment,
414 uint32 flags)
416 bool grow = false;
417 void* result = NULL;
418 ReadLocker areaListReadLocker(heap.lock);
419 for (guarded_heap_area* area = heap.areas; area != NULL;
420 area = area->next) {
422 MutexLocker locker(area->lock);
423 result = guarded_heap_area_allocate(*area, size, alignment, flags,
424 grow);
425 if (result != NULL)
426 break;
429 int32 counter = atomic_get(&heap.area_creation_counter);
430 areaListReadLocker.Unlock();
432 if (result == NULL || grow) {
433 bool added = guarded_heap_add_area(heap, counter, flags);
434 if (result == NULL && added)
435 return guarded_heap_allocate(heap, size, alignment, flags);
438 if (result == NULL)
439 panic("ran out of memory");
441 return result;
445 static guarded_heap_area*
446 guarded_heap_get_locked_area_for(guarded_heap& heap, void* address)
448 ReadLocker areaListReadLocker(heap.lock);
449 for (guarded_heap_area* area = heap.areas; area != NULL;
450 area = area->next) {
451 if ((addr_t)address < area->base)
452 continue;
454 if ((addr_t)address >= area->base + area->size)
455 continue;
457 mutex_lock(&area->lock);
458 return area;
461 panic("guarded heap area for address %p not found", address);
462 return NULL;
466 static size_t
467 guarded_heap_area_page_index_for(guarded_heap_area& area, void* address)
469 size_t pageIndex = ((addr_t)address - area.base) / B_PAGE_SIZE;
470 guarded_heap_page& page = area.pages[pageIndex];
471 if ((page.flags & GUARDED_HEAP_PAGE_FLAG_USED) == 0) {
472 panic("tried to free %p which points at page %" B_PRIuSIZE
473 " which is not marked in use", address, pageIndex);
474 return area.page_count;
477 if ((page.flags & GUARDED_HEAP_PAGE_FLAG_GUARD) != 0) {
478 panic("tried to free %p which points at page %" B_PRIuSIZE
479 " which is a guard page", address, pageIndex);
480 return area.page_count;
483 if ((page.flags & GUARDED_HEAP_PAGE_FLAG_FIRST) == 0) {
484 panic("tried to free %p which points at page %" B_PRIuSIZE
485 " which is not an allocation first page", address, pageIndex);
486 return area.page_count;
489 if ((page.flags & GUARDED_HEAP_PAGE_FLAG_DEAD) != 0) {
490 panic("tried to free %p which points at page %" B_PRIuSIZE
491 " which is a dead page", address, pageIndex);
492 return area.page_count;
495 return pageIndex;
499 static void
500 guarded_heap_area_free(guarded_heap_area& area, void* address, uint32 flags)
502 size_t pageIndex = guarded_heap_area_page_index_for(area, address);
503 if (pageIndex >= area.page_count)
504 return;
506 size_t pagesFreed = 0;
507 guarded_heap_page* page = &area.pages[pageIndex];
508 while ((page->flags & GUARDED_HEAP_PAGE_FLAG_GUARD) == 0) {
509 // Mark the allocation page as free.
510 guarded_heap_free_page(area, pageIndex);
512 pagesFreed++;
513 pageIndex++;
514 page = &area.pages[pageIndex];
517 // Mark the guard page as free as well.
518 guarded_heap_free_page(area, pageIndex);
519 pagesFreed++;
521 #if !DEBUG_GUARDED_HEAP_DISABLE_MEMORY_REUSE
522 area.used_pages -= pagesFreed;
523 atomic_add((int32*)&area.heap->used_pages, -pagesFreed);
524 #endif
528 static void
529 guarded_heap_free(void* address, uint32 flags)
531 if (address == NULL)
532 return;
534 guarded_heap_area* area = guarded_heap_get_locked_area_for(sGuardedHeap,
535 address);
536 if (area == NULL)
537 return;
539 MutexLocker locker(area->lock, true);
540 guarded_heap_area_free(*area, address, flags);
544 static void*
545 guarded_heap_realloc(void* address, size_t newSize)
547 guarded_heap_area* area = guarded_heap_get_locked_area_for(sGuardedHeap,
548 address);
549 if (area == NULL)
550 return NULL;
552 MutexLocker locker(area->lock, true);
554 size_t pageIndex = guarded_heap_area_page_index_for(*area, address);
555 if (pageIndex >= area->page_count)
556 return NULL;
558 guarded_heap_page& page = area->pages[pageIndex];
559 size_t oldSize = page.allocation_size;
560 locker.Unlock();
562 if (oldSize == newSize)
563 return address;
565 void* newBlock = memalign(0, newSize);
566 if (newBlock == NULL)
567 return NULL;
569 memcpy(newBlock, address, min_c(oldSize, newSize));
571 free(address);
573 return newBlock;
577 // #pragma mark - Debugger commands
580 static int
581 dump_guarded_heap_page(int argc, char** argv)
583 if (argc != 2) {
584 print_debugger_command_usage(argv[0]);
585 return 0;
588 addr_t address = parse_expression(argv[1]);
590 // Find the area that contains this page.
591 guarded_heap_area* area = NULL;
592 for (guarded_heap_area* candidate = sGuardedHeap.areas; candidate != NULL;
593 candidate = candidate->next) {
595 if (address < candidate->base)
596 continue;
597 if (address >= candidate->base + candidate->size)
598 continue;
600 area = candidate;
601 break;
604 if (area == NULL) {
605 kprintf("didn't find area for address\n");
606 return 1;
609 size_t pageIndex = ((addr_t)address - area->base) / B_PAGE_SIZE;
610 guarded_heap_page& page = area->pages[pageIndex];
612 kprintf("page index: %" B_PRIuSIZE "\n", pageIndex);
613 kprintf("flags:");
614 if ((page.flags & GUARDED_HEAP_PAGE_FLAG_USED) != 0)
615 kprintf(" used");
616 if ((page.flags & GUARDED_HEAP_PAGE_FLAG_FIRST) != 0)
617 kprintf(" first");
618 if ((page.flags & GUARDED_HEAP_PAGE_FLAG_GUARD) != 0)
619 kprintf(" guard");
620 if ((page.flags & GUARDED_HEAP_PAGE_FLAG_DEAD) != 0)
621 kprintf(" dead");
622 kprintf("\n");
624 kprintf("allocation size: %" B_PRIuSIZE "\n", page.allocation_size);
625 kprintf("allocation base: %p\n", page.allocation_base);
626 kprintf("alignment: %" B_PRIuSIZE "\n", page.alignment);
627 kprintf("allocating team: %" B_PRId32 "\n", page.team);
628 kprintf("allocating thread: %" B_PRId32 "\n", page.thread);
630 #if GUARDED_HEAP_STACK_TRACE_DEPTH > 0
631 kprintf("stack trace:\n");
632 for (size_t i = 0; i < page.stack_trace_depth; i++) {
633 addr_t address = page.stack_trace[i];
635 const char* symbol;
636 const char* imageName;
637 bool exactMatch;
638 addr_t baseAddress;
640 if (elf_debug_lookup_symbol_address(address, &baseAddress, &symbol,
641 &imageName, &exactMatch) == B_OK) {
642 kprintf(" %p %s + 0x%lx (%s)%s\n", (void*)address, symbol,
643 address - baseAddress, imageName,
644 exactMatch ? "" : " (nearest)");
645 } else
646 kprintf(" %p\n", (void*)address);
648 #endif
650 return 0;
654 static int
655 dump_guarded_heap_area(int argc, char** argv)
657 if (argc != 2) {
658 print_debugger_command_usage(argv[0]);
659 return 0;
662 addr_t address = parse_expression(argv[1]);
664 // Find the area that contains this page.
665 guarded_heap_area* area = NULL;
666 for (guarded_heap_area* candidate = sGuardedHeap.areas; candidate != NULL;
667 candidate = candidate->next) {
669 if ((addr_t)candidate != address) {
670 if (address < candidate->base)
671 continue;
672 if (address >= candidate->base + candidate->size)
673 continue;
676 area = candidate;
677 break;
680 if (area == NULL) {
681 kprintf("didn't find area for address\n");
682 return 1;
685 kprintf("guarded heap area: %p\n", area);
686 kprintf("next heap area: %p\n", area->next);
687 kprintf("guarded heap: %p\n", area->heap);
688 kprintf("area id: %" B_PRId32 "\n", area->area);
689 kprintf("base: 0x%" B_PRIxADDR "\n", area->base);
690 kprintf("size: %" B_PRIuSIZE "\n", area->size);
691 kprintf("page count: %" B_PRIuSIZE "\n", area->page_count);
692 kprintf("used pages: %" B_PRIuSIZE "\n", area->used_pages);
693 kprintf("protection cookie: %p\n", area->protection_cookie);
694 kprintf("lock: %p\n", &area->lock);
696 size_t freeCount = 0;
697 void* item = list_get_first_item(&area->free_list);
698 while (item != NULL) {
699 freeCount++;
701 if ((((guarded_heap_page*)item)->flags & GUARDED_HEAP_PAGE_FLAG_USED)
702 != 0) {
703 kprintf("free list broken, page %p not actually free\n", item);
706 item = list_get_next_item(&area->free_list, item);
709 kprintf("free_list: %p (%" B_PRIuSIZE " free)\n", &area->free_list,
710 freeCount);
712 freeCount = 0;
713 size_t runLength = 0;
714 size_t longestRun = 0;
715 for (size_t i = 0; i <= area->page_count; i++) {
716 guarded_heap_page& page = area->pages[i];
717 if (i == area->page_count
718 || (page.flags & GUARDED_HEAP_PAGE_FLAG_USED) != 0) {
719 freeCount += runLength;
720 if (runLength > longestRun)
721 longestRun = runLength;
722 runLength = 0;
723 continue;
726 runLength = 1;
727 for (size_t j = 1; j < area->page_count - i; j++) {
728 if ((area->pages[i + j].flags & GUARDED_HEAP_PAGE_FLAG_USED) != 0)
729 break;
731 runLength++;
734 i += runLength - 1;
737 kprintf("longest free run: %" B_PRIuSIZE " (%" B_PRIuSIZE " free)\n",
738 longestRun, freeCount);
740 kprintf("pages: %p\n", area->pages);
742 return 0;
746 static int
747 dump_guarded_heap(int argc, char** argv)
749 guarded_heap* heap = &sGuardedHeap;
750 if (argc != 1) {
751 if (argc == 2)
752 heap = (guarded_heap*)parse_expression(argv[1]);
753 else {
754 print_debugger_command_usage(argv[0]);
755 return 0;
759 kprintf("guarded heap: %p\n", heap);
760 kprintf("rw lock: %p\n", &heap->lock);
761 kprintf("page count: %" B_PRIuSIZE "\n", heap->page_count);
762 kprintf("used pages: %" B_PRIuSIZE "\n", heap->used_pages);
763 kprintf("area creation counter: %" B_PRId32 "\n",
764 heap->area_creation_counter);
766 size_t areaCount = 0;
767 guarded_heap_area* area = heap->areas;
768 while (area != NULL) {
769 areaCount++;
770 area = area->next;
773 kprintf("areas: %p (%" B_PRIuSIZE ")\n", heap->areas, areaCount);
775 return 0;
779 static int
780 dump_guarded_heap_allocations(int argc, char** argv)
782 team_id team = -1;
783 thread_id thread = -1;
784 addr_t address = 0;
785 bool statsOnly = false;
787 for (int32 i = 1; i < argc; i++) {
788 if (strcmp(argv[i], "team") == 0)
789 team = parse_expression(argv[++i]);
790 else if (strcmp(argv[i], "thread") == 0)
791 thread = parse_expression(argv[++i]);
792 else if (strcmp(argv[i], "address") == 0)
793 address = parse_expression(argv[++i]);
794 else if (strcmp(argv[i], "stats") == 0)
795 statsOnly = true;
796 else {
797 print_debugger_command_usage(argv[0]);
798 return 0;
802 size_t totalSize = 0;
803 uint32 totalCount = 0;
805 guarded_heap_area* area = sGuardedHeap.areas;
806 while (area != NULL) {
807 for (size_t i = 0; i < area->page_count; i++) {
808 guarded_heap_page& page = area->pages[i];
809 if ((page.flags & GUARDED_HEAP_PAGE_FLAG_FIRST) == 0)
810 continue;
812 if ((team < 0 || page.team == team)
813 && (thread < 0 || page.thread == thread)
814 && (address == 0 || (addr_t)page.allocation_base == address)) {
816 if (!statsOnly) {
817 kprintf("team: % 6" B_PRId32 "; thread: % 6" B_PRId32 "; "
818 "address: 0x%08" B_PRIxADDR "; size: %" B_PRIuSIZE
819 " bytes\n", page.team, page.thread,
820 (addr_t)page.allocation_base, page.allocation_size);
823 totalSize += page.allocation_size;
824 totalCount++;
828 area = area->next;
831 kprintf("total allocations: %" B_PRIu32 "; total bytes: %" B_PRIuSIZE
832 "\n", totalCount, totalSize);
833 return 0;
837 // #pragma mark - Malloc API
840 status_t
841 heap_init(addr_t address, size_t size)
843 return guarded_heap_area_init(sGuardedHeap, -1, (void*)address, size, 0)
844 ? B_OK : B_ERROR;
848 status_t
849 heap_init_post_area()
851 return B_OK;
855 status_t
856 heap_init_post_sem()
858 for (guarded_heap_area* area = sGuardedHeap.areas; area != NULL;
859 area = area->next) {
860 if (area->area >= 0)
861 continue;
863 area_id id = area_for((void*)area->base);
864 if (id < 0 || vm_prepare_kernel_area_debug_protection(id,
865 &area->protection_cookie) != B_OK) {
866 panic("failed to prepare initial guarded heap for protection");
867 continue;
870 area->area = id;
871 for (size_t i = 0; i < area->page_count; i++) {
872 guarded_heap_page& page = area->pages[i];
873 if ((page.flags & GUARDED_HEAP_PAGE_FLAG_USED) != 0
874 && (page.flags & GUARDED_HEAP_PAGE_FLAG_GUARD) == 0
875 && (page.flags & GUARDED_HEAP_PAGE_FLAG_DEAD) == 0) {
876 guarded_heap_page_protect(*area, i,
877 B_KERNEL_READ_AREA | B_KERNEL_WRITE_AREA);
878 } else
879 guarded_heap_page_protect(*area, i, 0);
883 add_debugger_command("guarded_heap", &dump_guarded_heap,
884 "Dump info about the guarded heap");
885 add_debugger_command_etc("guarded_heap_area", &dump_guarded_heap_area,
886 "Dump info about a guarded heap area",
887 "<address>\nDump info about guarded heap area containing address.\n",
889 add_debugger_command_etc("guarded_heap_page", &dump_guarded_heap_page,
890 "Dump info about a guarded heap page",
891 "<address>\nDump info about guarded heap page containing address.\n",
893 add_debugger_command_etc("allocations", &dump_guarded_heap_allocations,
894 "Dump current heap allocations",
895 "[\"stats\"] [team] [thread] [address]\n"
896 "If no parameters are given, all current alloactions are dumped.\n"
897 "If the optional argument \"stats\" is specified, only the allocation\n"
898 "counts and no individual allocations are printed.\n"
899 "If a specific allocation address is given, only this allocation is\n"
900 "dumped.\n"
901 "If a team and/or thread is specified, only allocations of this\n"
902 "team/thread are dumped.\n", 0);
904 return B_OK;
908 void*
909 memalign(size_t alignment, size_t size)
911 return memalign_etc(alignment, size, 0);
915 void *
916 memalign_etc(size_t alignment, size_t size, uint32 flags)
918 if (size == 0)
919 size = 1;
921 return guarded_heap_allocate(sGuardedHeap, size, alignment, flags);
925 void
926 free_etc(void *address, uint32 flags)
928 guarded_heap_free(address, flags);
932 void*
933 malloc(size_t size)
935 return memalign_etc(0, size, 0);
939 void
940 free(void* address)
942 free_etc(address, 0);
946 void*
947 realloc(void* address, size_t newSize)
949 if (newSize == 0) {
950 free(address);
951 return NULL;
954 if (address == NULL)
955 return memalign(0, newSize);
957 return guarded_heap_realloc(address, newSize);
961 #if USE_GUARDED_HEAP_FOR_OBJECT_CACHE
964 // #pragma mark - Slab API
967 void
968 request_memory_manager_maintenance()
973 object_cache*
974 create_object_cache(const char*, size_t objectSize, size_t, void*,
975 object_cache_constructor, object_cache_destructor)
977 return (object_cache*)objectSize;
981 object_cache*
982 create_object_cache_etc(const char*, size_t objectSize, size_t, size_t, size_t,
983 size_t, uint32, void*, object_cache_constructor, object_cache_destructor,
984 object_cache_reclaimer)
986 return (object_cache*)objectSize;
990 void
991 delete_object_cache(object_cache* cache)
996 status_t
997 object_cache_set_minimum_reserve(object_cache* cache, size_t objectCount)
999 return B_OK;
1003 void*
1004 object_cache_alloc(object_cache* cache, uint32 flags)
1006 return memalign_etc(0, (size_t)cache, flags);
1010 void
1011 object_cache_free(object_cache* cache, void* object, uint32 flags)
1013 return free_etc(object, flags);
1017 status_t
1018 object_cache_reserve(object_cache* cache, size_t objectCount, uint32 flags)
1020 return B_OK;
1024 void
1025 object_cache_get_usage(object_cache* cache, size_t* _allocatedMemory)
1027 *_allocatedMemory = 0;
1031 void
1032 slab_init(kernel_args* args)
1037 void
1038 slab_init_post_area()
1043 void
1044 slab_init_post_sem()
1049 void
1050 slab_init_post_thread()
1055 #endif // USE_GUARDED_HEAP_FOR_OBJECT_CACHE
1058 #endif // USE_GUARDED_HEAP_FOR_MALLOC