2 * Copyright 2008-2009, Michael Lotz, mmlr@mlotz.ch.
3 * Distributed under the terms of the MIT License.
5 * Copyright 2002-2011, Axel Dörfler, axeld@pinc-software.de.
6 * Distributed under the terms of the MIT License.
8 * Copyright 2001, Travis Geiselbrecht. All rights reserved.
9 * Distributed under the terms of the NewOS License.
13 #include "malloc_debug_api.h"
23 #include <errno_private.h>
33 # define INFO(x) debug_printf x
39 #define ASSERT(x) if (!(x)) panic("assert failed: %s", #x);
42 static void *debug_heap_memalign(size_t alignment
, size_t size
);
45 static bool sDebuggerCalls
= true;
46 static bool sReuseMemory
= true;
47 static bool sParanoidValidation
= false;
48 static thread_id sWallCheckThread
= -1;
49 static bool sStopWallChecking
= false;
50 static bool sUseGuardPage
= false;
52 #if __cplusplus >= 201103L
55 static size_t sDefaultAlignment
= alignof(max_align_t
);
57 static size_t sDefaultAlignment
= 0;
62 panic(const char *format
, ...)
67 va_start(args
, format
);
68 vsnprintf(buffer
, sizeof(buffer
), format
, args
);
78 #define ROUNDUP(x, y) (((x) + (y) - 1) / (y) * (y))
80 #define HEAP_INITIAL_SIZE 1 * 1024 * 1024
81 #define HEAP_GROW_SIZE 2 * 1024 * 1024
82 #define HEAP_AREA_USE_THRESHOLD 1 * 1024 * 1024
84 typedef struct heap_leak_check_info_s
{
87 } heap_leak_check_info
;
89 typedef struct heap_page_s heap_page
;
91 typedef struct heap_area_s
{
98 uint32 free_page_count
;
100 heap_page
* free_pages
;
101 heap_page
* page_table
;
105 heap_area_s
* all_next
;
108 typedef struct heap_page_s
{
111 uint16 bin_index
: 5;
112 uint16 free_count
: 10;
118 uint16 allocation_id
; // used for bin == bin_count allocations
123 typedef struct heap_bin_s
{
126 uint16 max_free_count
;
127 heap_page
* page_list
; // sorted so that the desired page is always first
130 typedef struct heap_allocator_s
{
139 uint32 total_free_pages
;
143 heap_area
* areas
; // sorted so that the desired area is always first
144 heap_area
* all_areas
; // all areas including full ones
147 static const uint32 kAreaAllocationMagic
= 'AAMG';
148 typedef struct area_allocation_info_s
{
154 size_t allocation_size
;
155 size_t allocation_alignment
;
156 void * allocation_base
;
157 } area_allocation_info
;
159 typedef struct heap_class_s
{
161 uint32 initial_percentage
;
162 size_t max_allocation_size
;
165 size_t bin_alignment
;
166 uint32 min_count_per_page
;
167 size_t max_waste_per_page
;
170 // Heap class configuration
171 #define HEAP_CLASS_COUNT 3
172 static const heap_class sHeapClasses
[HEAP_CLASS_COUNT
] = {
175 50, /* initial percentage */
176 B_PAGE_SIZE
/ 8, /* max allocation size */
177 B_PAGE_SIZE
, /* page size */
178 8, /* min bin size */
179 4, /* bin alignment */
180 8, /* min count per page */
181 16 /* max waste per page */
185 30, /* initial percentage */
186 B_PAGE_SIZE
* 2, /* max allocation size */
187 B_PAGE_SIZE
* 8, /* page size */
188 B_PAGE_SIZE
/ 8, /* min bin size */
189 32, /* bin alignment */
190 4, /* min count per page */
191 64 /* max waste per page */
195 20, /* initial percentage */
196 HEAP_AREA_USE_THRESHOLD
, /* max allocation size */
197 B_PAGE_SIZE
* 16, /* page size */
198 B_PAGE_SIZE
* 2, /* min bin size */
199 128, /* bin alignment */
200 1, /* min count per page */
201 256 /* max waste per page */
205 static heap_allocator
*sHeaps
[HEAP_CLASS_COUNT
];
208 // #pragma mark - Debug functions
212 dump_page(heap_page
*page
)
215 for (addr_t
*temp
= page
->free_list
; temp
!= NULL
; temp
= (addr_t
*)*temp
)
218 printf("\t\tpage %p: bin_index: %u; free_count: %u; empty_index: %u; "
219 "free_list %p (%" B_PRIu32
" entr%s)\n", page
, page
->bin_index
,
220 page
->free_count
, page
->empty_index
, page
->free_list
, count
,
221 count
== 1 ? "y" : "ies");
226 dump_bin(heap_bin
*bin
)
228 printf("\telement_size: %" B_PRIu32
"; max_free_count: %u; page_list %p;\n",
229 bin
->element_size
, bin
->max_free_count
, bin
->page_list
);
231 for (heap_page
*temp
= bin
->page_list
; temp
!= NULL
; temp
= temp
->next
)
237 dump_bin_list(heap_allocator
*heap
)
239 for (uint32 i
= 0; i
< heap
->bin_count
; i
++)
240 dump_bin(&heap
->bins
[i
]);
246 dump_allocator_areas(heap_allocator
*heap
)
248 heap_area
*area
= heap
->all_areas
;
250 printf("\tarea %p: area: %" B_PRId32
"; base: 0x%08lx; size: %lu; "
251 "page_count: %" B_PRIu32
"; free_pages: %p (%" B_PRIu32
" entr%s)\n",
252 area
, area
->area
, area
->base
, area
->size
, area
->page_count
,
253 area
->free_pages
, area
->free_page_count
,
254 area
->free_page_count
== 1 ? "y" : "ies");
255 area
= area
->all_next
;
263 dump_allocator(heap_allocator
*heap
, bool areas
, bool bins
)
265 printf("allocator %p: name: %s; page_size: %" B_PRIu32
"; bin_count: %"
266 B_PRIu32
"; pages: %" B_PRIu32
"; free_pages: %" B_PRIu32
"; "
267 "empty_areas: %" B_PRIu32
"\n", heap
, heap
->name
, heap
->page_size
,
268 heap
->bin_count
, heap
->total_pages
, heap
->total_free_pages
,
272 dump_allocator_areas(heap
);
279 dump_allocations(bool statsOnly
, thread_id thread
)
281 size_t totalSize
= 0;
282 uint32 totalCount
= 0;
283 for (uint32 classIndex
= 0; classIndex
< HEAP_CLASS_COUNT
; classIndex
++) {
284 heap_allocator
*heap
= sHeaps
[classIndex
];
286 // go through all the pages in all the areas
287 heap_area
*area
= heap
->all_areas
;
289 heap_leak_check_info
*info
= NULL
;
290 for (uint32 i
= 0; i
< area
->page_count
; i
++) {
291 heap_page
*page
= &area
->page_table
[i
];
295 addr_t base
= area
->base
+ i
* heap
->page_size
;
296 if (page
->bin_index
< heap
->bin_count
) {
297 // page is used by a small allocation bin
298 uint32 elementCount
= page
->empty_index
;
300 = heap
->bins
[page
->bin_index
].element_size
;
301 for (uint32 j
= 0; j
< elementCount
;
302 j
++, base
+= elementSize
) {
303 // walk the free list to see if this element is in use
304 bool elementInUse
= true;
305 for (addr_t
*temp
= page
->free_list
; temp
!= NULL
;
306 temp
= (addr_t
*)*temp
) {
307 if ((addr_t
)temp
== base
) {
308 elementInUse
= false;
316 info
= (heap_leak_check_info
*)(base
+ elementSize
317 - sizeof(heap_leak_check_info
));
319 if (thread
== -1 || info
->thread
== thread
) {
322 printf("thread: % 6" B_PRId32
"; address: "
323 "0x%08lx; size: %lu bytes\n", info
->thread
,
327 totalSize
+= info
->size
;
332 // page is used by a big allocation, find the page count
333 uint32 pageCount
= 1;
334 while (i
+ pageCount
< area
->page_count
335 && area
->page_table
[i
+ pageCount
].in_use
336 && area
->page_table
[i
+ pageCount
].bin_index
338 && area
->page_table
[i
+ pageCount
].allocation_id
339 == page
->allocation_id
)
342 info
= (heap_leak_check_info
*)(base
+ pageCount
343 * heap
->page_size
- sizeof(heap_leak_check_info
));
345 if (thread
== -1 || info
->thread
== thread
) {
348 printf("thread: % 6" B_PRId32
"; address: 0x%08lx;"
349 " size: %lu bytes\n", info
->thread
,
353 totalSize
+= info
->size
;
357 // skip the allocated pages
362 area
= area
->all_next
;
366 printf("total allocations: %" B_PRIu32
"; total bytes: %lu\n", totalCount
,
372 heap_validate_walls()
374 for (uint32 classIndex
= 0; classIndex
< HEAP_CLASS_COUNT
; classIndex
++) {
375 heap_allocator
*heap
= sHeaps
[classIndex
];
376 ReadLocker
areaReadLocker(heap
->area_lock
);
377 for (uint32 i
= 0; i
< heap
->bin_count
; i
++)
378 mutex_lock(&heap
->bins
[i
].lock
);
379 MutexLocker
pageLocker(heap
->page_lock
);
381 // go through all the pages in all the areas
382 heap_area
*area
= heap
->all_areas
;
384 heap_leak_check_info
*info
= NULL
;
385 for (uint32 i
= 0; i
< area
->page_count
; i
++) {
386 heap_page
*page
= &area
->page_table
[i
];
390 addr_t base
= area
->base
+ i
* heap
->page_size
;
391 if (page
->bin_index
< heap
->bin_count
) {
392 // page is used by a small allocation bin
393 uint32 elementCount
= page
->empty_index
;
395 = heap
->bins
[page
->bin_index
].element_size
;
396 for (uint32 j
= 0; j
< elementCount
;
397 j
++, base
+= elementSize
) {
398 // walk the free list to see if this element is in use
399 bool elementInUse
= true;
400 for (addr_t
*temp
= page
->free_list
; temp
!= NULL
;
401 temp
= (addr_t
*)*temp
) {
402 if ((addr_t
)temp
== base
) {
403 elementInUse
= false;
411 info
= (heap_leak_check_info
*)(base
+ elementSize
412 - sizeof(heap_leak_check_info
));
413 if (info
->size
> elementSize
- sizeof(addr_t
)
414 - sizeof(heap_leak_check_info
)) {
415 panic("leak check info has invalid size %lu for"
416 " element size %lu\n", info
->size
, elementSize
);
421 addr_t wallAddress
= base
+ info
->size
;
422 memcpy(&wallValue
, (void *)wallAddress
, sizeof(addr_t
));
423 if (wallValue
!= wallAddress
) {
424 panic("someone wrote beyond small allocation at"
425 " 0x%08lx; size: %lu bytes; allocated by %ld;"
426 " value: 0x%08lx\n", base
, info
->size
,
427 info
->thread
, wallValue
);
431 // page is used by a big allocation, find the page count
432 uint32 pageCount
= 1;
433 while (i
+ pageCount
< area
->page_count
434 && area
->page_table
[i
+ pageCount
].in_use
435 && area
->page_table
[i
+ pageCount
].bin_index
437 && area
->page_table
[i
+ pageCount
].allocation_id
438 == page
->allocation_id
)
441 info
= (heap_leak_check_info
*)(base
+ pageCount
442 * heap
->page_size
- sizeof(heap_leak_check_info
));
444 if (info
->size
> pageCount
* heap
->page_size
445 - sizeof(addr_t
) - sizeof(heap_leak_check_info
)) {
446 panic("leak check info has invalid size %lu for"
447 " page count %lu (%lu bytes)\n", info
->size
,
448 pageCount
, pageCount
* heap
->page_size
);
453 addr_t wallAddress
= base
+ info
->size
;
454 memcpy(&wallValue
, (void *)wallAddress
, sizeof(addr_t
));
455 if (wallValue
!= wallAddress
) {
456 panic("someone wrote beyond big allocation at 0x%08lx;"
457 " size: %lu bytes; allocated by %ld;"
458 " value: 0x%08lx\n", base
, info
->size
,
459 info
->thread
, wallValue
);
462 // skip the allocated pages
467 area
= area
->all_next
;
471 for (uint32 i
= 0; i
< heap
->bin_count
; i
++)
472 mutex_unlock(&heap
->bins
[i
].lock
);
478 heap_validate_heap(heap_allocator
*heap
)
480 ReadLocker
areaReadLocker(heap
->area_lock
);
481 for (uint32 i
= 0; i
< heap
->bin_count
; i
++)
482 mutex_lock(&heap
->bins
[i
].lock
);
483 MutexLocker
pageLocker(heap
->page_lock
);
485 uint32 totalPageCount
= 0;
486 uint32 totalFreePageCount
= 0;
487 heap_area
*area
= heap
->all_areas
;
488 while (area
!= NULL
) {
489 // validate the free pages list
490 uint32 freePageCount
= 0;
491 heap_page
*lastPage
= NULL
;
492 heap_page
*page
= area
->free_pages
;
494 if ((addr_t
)page
< (addr_t
)&area
->page_table
[0]
495 || (addr_t
)page
>= (addr_t
)&area
->page_table
[area
->page_count
])
496 panic("free page is not part of the page table\n");
498 if (page
->index
>= area
->page_count
)
499 panic("free page has invalid index\n");
501 if ((addr_t
)&area
->page_table
[page
->index
] != (addr_t
)page
)
502 panic("free page index does not lead to target page\n");
504 if (page
->prev
!= lastPage
)
505 panic("free page entry has invalid prev link\n");
508 panic("free page marked as in use\n");
515 totalPageCount
+= freePageCount
;
516 totalFreePageCount
+= freePageCount
;
517 if (area
->free_page_count
!= freePageCount
) {
518 panic("free page count %ld doesn't match free page list %ld\n",
519 area
->free_page_count
, freePageCount
);
522 // validate the page table
523 uint32 usedPageCount
= 0;
524 for (uint32 i
= 0; i
< area
->page_count
; i
++) {
525 if (area
->page_table
[i
].in_use
)
529 totalPageCount
+= usedPageCount
;
530 if (freePageCount
+ usedPageCount
!= area
->page_count
) {
531 panic("free pages and used pages do not add up (%lu + %lu != %lu)\n",
532 freePageCount
, usedPageCount
, area
->page_count
);
535 area
= area
->all_next
;
538 // validate the areas
540 heap_area
*lastArea
= NULL
;
541 uint32 lastFreeCount
= 0;
542 while (area
!= NULL
) {
543 if (area
->free_page_count
< lastFreeCount
)
544 panic("size ordering of area list broken\n");
546 if (area
->prev
!= lastArea
)
547 panic("area list entry has invalid prev link\n");
550 lastFreeCount
= area
->free_page_count
;
555 area
= heap
->all_areas
;
556 while (area
!= NULL
) {
557 if (lastArea
!= NULL
&& lastArea
->base
< area
->base
)
558 panic("base ordering of all_areas list broken\n");
561 area
= area
->all_next
;
565 for (uint32 i
= 0; i
< heap
->bin_count
; i
++) {
566 heap_bin
*bin
= &heap
->bins
[i
];
567 heap_page
*lastPage
= NULL
;
568 heap_page
*page
= bin
->page_list
;
571 area
= heap
->all_areas
;
573 if (area
== page
->area
)
575 area
= area
->all_next
;
579 panic("page area not present in area list\n");
584 if ((addr_t
)page
< (addr_t
)&area
->page_table
[0]
585 || (addr_t
)page
>= (addr_t
)&area
->page_table
[area
->page_count
])
586 panic("used page is not part of the page table\n");
588 if (page
->index
>= area
->page_count
)
589 panic("used page has invalid index %lu\n", page
->index
);
591 if ((addr_t
)&area
->page_table
[page
->index
] != (addr_t
)page
) {
592 panic("used page index does not lead to target page"
593 " (%lu vs. %lu)\n", (addr_t
)&area
->page_table
[page
->index
],
597 if (page
->prev
!= lastPage
) {
598 panic("used page entry has invalid prev link (%p vs %p bin "
599 "%lu)\n", page
->prev
, lastPage
, i
);
603 panic("used page %p marked as not in use\n", page
);
605 if (page
->bin_index
!= i
) {
606 panic("used page with bin index %u in page list of bin %lu\n",
610 if (page
->free_count
< lastFreeCount
)
611 panic("ordering of bin page list broken\n");
613 // validate the free list
614 uint32 freeSlotsCount
= 0;
615 addr_t
*element
= page
->free_list
;
616 addr_t pageBase
= area
->base
+ page
->index
* heap
->page_size
;
618 if ((addr_t
)element
< pageBase
619 || (addr_t
)element
>= pageBase
+ heap
->page_size
)
620 panic("free list entry out of page range %p\n", element
);
622 if (((addr_t
)element
- pageBase
) % bin
->element_size
!= 0)
623 panic("free list entry not on a element boundary\n");
625 element
= (addr_t
*)*element
;
629 uint32 slotCount
= bin
->max_free_count
;
630 if (page
->empty_index
> slotCount
) {
631 panic("empty index beyond slot count (%u with %lu slots)\n",
632 page
->empty_index
, slotCount
);
635 freeSlotsCount
+= (slotCount
- page
->empty_index
);
636 if (freeSlotsCount
> slotCount
)
637 panic("more free slots than fit into the page\n");
640 lastFreeCount
= page
->free_count
;
646 for (uint32 i
= 0; i
< heap
->bin_count
; i
++)
647 mutex_unlock(&heap
->bins
[i
].lock
);
651 // #pragma mark - Heap functions
655 heap_add_area(heap_allocator
*heap
, area_id areaID
, addr_t base
, size_t size
)
657 heap_area
*area
= (heap_area
*)base
;
660 base
+= sizeof(heap_area
);
661 size
-= sizeof(heap_area
);
663 uint32 pageCount
= size
/ heap
->page_size
;
664 size_t pageTableSize
= pageCount
* sizeof(heap_page
);
665 area
->page_table
= (heap_page
*)base
;
666 base
+= pageTableSize
;
667 size
-= pageTableSize
;
669 // the rest is now actually usable memory (rounded to the next page)
670 area
->base
= ROUNDUP(base
, B_PAGE_SIZE
);
671 area
->size
= size
& ~(B_PAGE_SIZE
- 1);
673 // now we know the real page count
674 pageCount
= area
->size
/ heap
->page_size
;
675 area
->page_count
= pageCount
;
677 // zero out the page table and fill in page indexes
678 memset((void *)area
->page_table
, 0, pageTableSize
);
679 for (uint32 i
= 0; i
< pageCount
; i
++) {
680 area
->page_table
[i
].area
= area
;
681 area
->page_table
[i
].index
= i
;
684 // add all pages up into the free pages list
685 for (uint32 i
= 1; i
< pageCount
; i
++) {
686 area
->page_table
[i
- 1].next
= &area
->page_table
[i
];
687 area
->page_table
[i
].prev
= &area
->page_table
[i
- 1];
689 area
->free_pages
= &area
->page_table
[0];
690 area
->free_page_count
= pageCount
;
691 area
->page_table
[0].prev
= NULL
;
694 WriteLocker
areaWriteLocker(heap
->area_lock
);
695 MutexLocker
pageLocker(heap
->page_lock
);
696 if (heap
->areas
== NULL
) {
697 // it's the only (empty) area in that heap
701 // link in this area as the last one as it is completely empty
702 heap_area
*lastArea
= heap
->areas
;
703 while (lastArea
->next
!= NULL
)
704 lastArea
= lastArea
->next
;
706 lastArea
->next
= area
;
707 area
->prev
= lastArea
;
710 // insert this area in the all_areas list so it stays ordered by base
711 if (heap
->all_areas
== NULL
|| heap
->all_areas
->base
< area
->base
) {
712 area
->all_next
= heap
->all_areas
;
713 heap
->all_areas
= area
;
715 heap_area
*insert
= heap
->all_areas
;
716 while (insert
->all_next
&& insert
->all_next
->base
> area
->base
)
717 insert
= insert
->all_next
;
719 area
->all_next
= insert
->all_next
;
720 insert
->all_next
= area
;
723 heap
->total_pages
+= area
->page_count
;
724 heap
->total_free_pages
+= area
->free_page_count
;
727 // this later on deletable area is yet empty - the empty count will be
728 // decremented as soon as this area is used for the first time
733 areaWriteLocker
.Unlock();
738 heap_remove_area(heap_allocator
*heap
, heap_area
*area
)
740 if (area
->free_page_count
!= area
->page_count
) {
741 panic("tried removing heap area that has still pages in use");
745 if (area
->prev
== NULL
&& area
->next
== NULL
) {
746 panic("tried removing the last non-full heap area");
750 if (heap
->areas
== area
)
751 heap
->areas
= area
->next
;
752 if (area
->prev
!= NULL
)
753 area
->prev
->next
= area
->next
;
754 if (area
->next
!= NULL
)
755 area
->next
->prev
= area
->prev
;
757 if (heap
->all_areas
== area
)
758 heap
->all_areas
= area
->all_next
;
760 heap_area
*previous
= heap
->all_areas
;
762 if (previous
->all_next
== area
) {
763 previous
->all_next
= area
->all_next
;
767 previous
= previous
->all_next
;
770 if (previous
== NULL
)
771 panic("removing heap area that is not in all list");
774 heap
->total_pages
-= area
->page_count
;
775 heap
->total_free_pages
-= area
->free_page_count
;
780 static heap_allocator
*
781 heap_create_allocator(const char *name
, addr_t base
, size_t size
,
782 const heap_class
*heapClass
)
784 heap_allocator
*heap
= (heap_allocator
*)base
;
785 base
+= sizeof(heap_allocator
);
786 size
-= sizeof(heap_allocator
);
789 heap
->page_size
= heapClass
->page_size
;
790 heap
->total_pages
= heap
->total_free_pages
= heap
->empty_areas
= 0;
791 heap
->areas
= heap
->all_areas
= NULL
;
792 heap
->bins
= (heap_bin
*)base
;
795 size_t binSize
= 0, lastSize
= 0;
796 uint32 count
= heap
->page_size
/ heapClass
->min_bin_size
;
797 for (; count
>= heapClass
->min_count_per_page
; count
--, lastSize
= binSize
) {
798 if (heap
->bin_count
>= 32)
799 panic("heap configuration invalid - max bin count reached\n");
801 binSize
= (heap
->page_size
/ count
) & ~(heapClass
->bin_alignment
- 1);
802 if (binSize
== lastSize
)
804 if (heap
->page_size
- count
* binSize
> heapClass
->max_waste_per_page
)
807 heap_bin
*bin
= &heap
->bins
[heap
->bin_count
];
808 mutex_init(&bin
->lock
, "heap bin lock");
809 bin
->element_size
= binSize
;
810 bin
->max_free_count
= heap
->page_size
/ binSize
;
811 bin
->page_list
= NULL
;
815 base
+= heap
->bin_count
* sizeof(heap_bin
);
816 size
-= heap
->bin_count
* sizeof(heap_bin
);
818 rw_lock_init(&heap
->area_lock
, "heap area lock");
819 mutex_init(&heap
->page_lock
, "heap page lock");
821 heap_add_area(heap
, -1, base
, size
);
827 heap_free_pages_added(heap_allocator
*heap
, heap_area
*area
, uint32 pageCount
)
829 area
->free_page_count
+= pageCount
;
830 heap
->total_free_pages
+= pageCount
;
832 if (area
->free_page_count
== pageCount
) {
833 // we need to add ourselfs to the area list of the heap
835 area
->next
= heap
->areas
;
837 area
->next
->prev
= area
;
840 // we might need to move back in the area list
841 if (area
->next
&& area
->next
->free_page_count
< area
->free_page_count
) {
842 // move ourselfs so the list stays ordered
843 heap_area
*insert
= area
->next
;
845 && insert
->next
->free_page_count
< area
->free_page_count
)
846 insert
= insert
->next
;
849 area
->prev
->next
= area
->next
;
851 area
->next
->prev
= area
->prev
;
852 if (heap
->areas
== area
)
853 heap
->areas
= area
->next
;
856 area
->next
= insert
->next
;
858 area
->next
->prev
= area
;
863 if (area
->free_page_count
== area
->page_count
&& area
->area
>= 0)
869 heap_free_pages_removed(heap_allocator
*heap
, heap_area
*area
, uint32 pageCount
)
871 if (area
->free_page_count
== area
->page_count
&& area
->area
>= 0) {
872 // this area was completely empty
876 area
->free_page_count
-= pageCount
;
877 heap
->total_free_pages
-= pageCount
;
879 if (area
->free_page_count
== 0) {
880 // the area is now full so we remove it from the area list
882 area
->prev
->next
= area
->next
;
884 area
->next
->prev
= area
->prev
;
885 if (heap
->areas
== area
)
886 heap
->areas
= area
->next
;
887 area
->next
= area
->prev
= NULL
;
889 // we might need to move forward in the area list
890 if (area
->prev
&& area
->prev
->free_page_count
> area
->free_page_count
) {
891 // move ourselfs so the list stays ordered
892 heap_area
*insert
= area
->prev
;
894 && insert
->prev
->free_page_count
> area
->free_page_count
)
895 insert
= insert
->prev
;
898 area
->prev
->next
= area
->next
;
900 area
->next
->prev
= area
->prev
;
902 area
->prev
= insert
->prev
;
905 area
->prev
->next
= area
;
906 if (heap
->areas
== insert
)
915 heap_link_page(heap_page
*page
, heap_page
**list
)
920 page
->next
->prev
= page
;
926 heap_unlink_page(heap_page
*page
, heap_page
**list
)
929 page
->prev
->next
= page
->next
;
931 page
->next
->prev
= page
->prev
;
932 if (list
&& *list
== page
) {
935 page
->next
->prev
= NULL
;
941 heap_allocate_contiguous_pages(heap_allocator
*heap
, uint32 pageCount
,
944 heap_area
*area
= heap
->areas
;
946 if (area
->free_page_count
< pageCount
) {
952 uint32 firstValid
= 0;
953 const uint32 lastValid
= area
->page_count
- pageCount
+ 1;
955 if (alignment
> heap
->page_size
) {
956 firstValid
= (ROUNDUP(area
->base
, alignment
) - area
->base
)
958 step
= alignment
/ heap
->page_size
;
962 for (uint32 i
= firstValid
; i
< lastValid
; i
+= step
) {
963 if (area
->page_table
[i
].in_use
)
968 for (uint32 j
= 1; j
< pageCount
; j
++) {
969 if (area
->page_table
[i
+ j
].in_use
) {
971 i
+= j
/ step
* step
;
985 for (uint32 i
= first
; i
< first
+ pageCount
; i
++) {
986 heap_page
*page
= &area
->page_table
[i
];
988 page
->bin_index
= heap
->bin_count
;
990 heap_unlink_page(page
, &area
->free_pages
);
992 page
->next
= page
->prev
= NULL
;
993 page
->free_list
= NULL
;
994 page
->allocation_id
= (uint16
)first
;
997 heap_free_pages_removed(heap
, area
, pageCount
);
998 return &area
->page_table
[first
];
1006 heap_add_leak_check_info(addr_t address
, size_t allocated
, size_t size
)
1008 size
-= sizeof(addr_t
) + sizeof(heap_leak_check_info
);
1009 heap_leak_check_info
*info
= (heap_leak_check_info
*)(address
+ allocated
1010 - sizeof(heap_leak_check_info
));
1011 info
->thread
= find_thread(NULL
);
1014 addr_t wallAddress
= address
+ size
;
1015 memcpy((void *)wallAddress
, &wallAddress
, sizeof(addr_t
));
1020 heap_raw_alloc(heap_allocator
*heap
, size_t size
, size_t alignment
)
1022 INFO(("heap %p: allocate %lu bytes from raw pages with alignment %lu\n",
1023 heap
, size
, alignment
));
1025 uint32 pageCount
= (size
+ heap
->page_size
- 1) / heap
->page_size
;
1027 MutexLocker
pageLocker(heap
->page_lock
);
1028 heap_page
*firstPage
= heap_allocate_contiguous_pages(heap
, pageCount
,
1030 if (firstPage
== NULL
) {
1031 INFO(("heap %p: found no contiguous pages to allocate %ld bytes\n",
1036 addr_t address
= firstPage
->area
->base
+ firstPage
->index
* heap
->page_size
;
1037 heap_add_leak_check_info(address
, pageCount
* heap
->page_size
, size
);
1038 return (void *)address
;
1043 heap_allocate_from_bin(heap_allocator
*heap
, uint32 binIndex
, size_t size
)
1045 heap_bin
*bin
= &heap
->bins
[binIndex
];
1046 INFO(("heap %p: allocate %lu bytes from bin %lu with element_size %lu\n",
1047 heap
, size
, binIndex
, bin
->element_size
));
1049 MutexLocker
binLocker(bin
->lock
);
1050 heap_page
*page
= bin
->page_list
;
1052 MutexLocker
pageLocker(heap
->page_lock
);
1053 heap_area
*area
= heap
->areas
;
1055 INFO(("heap %p: no free pages to allocate %lu bytes\n", heap
,
1060 // by design there are only areas in the list that still have
1061 // free pages available
1062 page
= area
->free_pages
;
1063 area
->free_pages
= page
->next
;
1065 page
->next
->prev
= NULL
;
1067 heap_free_pages_removed(heap
, area
, 1);
1070 panic("got an in use page from the free pages list\n");
1073 pageLocker
.Unlock();
1075 page
->bin_index
= binIndex
;
1076 page
->free_count
= bin
->max_free_count
;
1077 page
->empty_index
= 0;
1078 page
->free_list
= NULL
;
1079 page
->next
= page
->prev
= NULL
;
1080 bin
->page_list
= page
;
1083 // we have a page where we have a free slot
1084 void *address
= NULL
;
1085 if (page
->free_list
) {
1086 // there's a previously freed entry we can use
1087 address
= page
->free_list
;
1088 page
->free_list
= (addr_t
*)*page
->free_list
;
1090 // the page hasn't been fully allocated so use the next empty_index
1091 address
= (void *)(page
->area
->base
+ page
->index
* heap
->page_size
1092 + page
->empty_index
* bin
->element_size
);
1093 page
->empty_index
++;
1097 if (page
->free_count
== 0) {
1098 // the page is now full so we remove it from the page_list
1099 bin
->page_list
= page
->next
;
1101 page
->next
->prev
= NULL
;
1102 page
->next
= page
->prev
= NULL
;
1105 heap_add_leak_check_info((addr_t
)address
, bin
->element_size
, size
);
1111 heap_memalign(heap_allocator
*heap
, size_t alignment
, size_t size
)
1113 INFO(("memalign(alignment = %lu, size = %lu)\n", alignment
, size
));
1115 if (!is_valid_alignment(alignment
)) {
1116 panic("memalign() with an alignment which is not a power of 2 (%lu)\n",
1120 size
+= sizeof(addr_t
) + sizeof(heap_leak_check_info
);
1122 void *address
= NULL
;
1123 if (alignment
< B_PAGE_SIZE
) {
1124 if (alignment
!= 0) {
1125 // TODO: The alignment is done by ensuring that the element size
1126 // of the target bin is aligned with the requested alignment. This
1127 // has the problem that it wastes space because a better (smaller)
1128 // bin could possibly be selected. We should pick the best bin and
1129 // check if there is an aligned block in the free list or if a new
1130 // (page aligned) page has to be allocated anyway.
1131 size
= ROUNDUP(size
, alignment
);
1132 for (uint32 i
= 0; i
< heap
->bin_count
; i
++) {
1133 if (size
<= heap
->bins
[i
].element_size
1134 && is_valid_alignment(heap
->bins
[i
].element_size
)) {
1135 address
= heap_allocate_from_bin(heap
, i
, size
);
1140 for (uint32 i
= 0; i
< heap
->bin_count
; i
++) {
1141 if (size
<= heap
->bins
[i
].element_size
) {
1142 address
= heap_allocate_from_bin(heap
, i
, size
);
1149 if (address
== NULL
)
1150 address
= heap_raw_alloc(heap
, size
, alignment
);
1152 size
-= sizeof(addr_t
) + sizeof(heap_leak_check_info
);
1154 INFO(("memalign(): asked to allocate %lu bytes, returning pointer %p\n",
1157 if (address
== NULL
)
1160 memset(address
, 0xcc, size
);
1162 // make sure 0xdeadbeef is cleared if we do not overwrite the memory
1163 // and the user does not clear it
1164 if (((uint32
*)address
)[1] == 0xdeadbeef)
1165 ((uint32
*)address
)[1] = 0xcccccccc;
1172 heap_free(heap_allocator
*heap
, void *address
)
1174 if (address
== NULL
)
1177 ReadLocker
areaReadLocker(heap
->area_lock
);
1178 heap_area
*area
= heap
->all_areas
;
1180 // since the all_areas list is ordered by base with the biggest
1181 // base at the top, we need only find the first area with a base
1182 // smaller than our address to become our only candidate for freeing
1183 if (area
->base
<= (addr_t
)address
) {
1184 if ((addr_t
)address
>= area
->base
+ area
->size
) {
1185 // none of the other areas can contain the address as the list
1187 return B_ENTRY_NOT_FOUND
;
1190 // this area contains the allocation, we're done searching
1194 area
= area
->all_next
;
1198 // this address does not belong to us
1199 return B_ENTRY_NOT_FOUND
;
1202 INFO(("free(): asked to free pointer %p\n", address
));
1204 heap_page
*page
= &area
->page_table
[((addr_t
)address
- area
->base
)
1207 INFO(("free(): page %p: bin_index %d, free_count %d\n", page
,
1208 page
->bin_index
, page
->free_count
));
1210 if (page
->bin_index
> heap
->bin_count
) {
1211 panic("free(): page %p: invalid bin_index %d\n", page
,
1216 addr_t pageBase
= area
->base
+ page
->index
* heap
->page_size
;
1217 if (page
->bin_index
< heap
->bin_count
) {
1219 heap_bin
*bin
= &heap
->bins
[page
->bin_index
];
1220 if (((addr_t
)address
- pageBase
) % bin
->element_size
!= 0) {
1221 panic("free(): address %p does not fall on allocation boundary"
1222 " for page base %p and element size %lu\n", address
,
1223 (void *)pageBase
, bin
->element_size
);
1227 MutexLocker
binLocker(bin
->lock
);
1229 if (((uint32
*)address
)[1] == 0xdeadbeef) {
1230 // This block looks like it was freed already, walk the free list
1231 // on this page to make sure this address doesn't exist.
1232 for (addr_t
*temp
= page
->free_list
; temp
!= NULL
;
1233 temp
= (addr_t
*)*temp
) {
1234 if (temp
== address
) {
1235 panic("free(): address %p already exists in page free"
1236 " list (double free)\n", address
);
1242 heap_leak_check_info
*info
= (heap_leak_check_info
*)((addr_t
)address
1243 + bin
->element_size
- sizeof(heap_leak_check_info
));
1244 if (info
->size
> bin
->element_size
- sizeof(addr_t
)
1245 - sizeof(heap_leak_check_info
)) {
1246 panic("leak check info has invalid size %lu for element size %lu,"
1247 " probably memory has been overwritten past allocation size\n",
1248 info
->size
, bin
->element_size
);
1252 addr_t wallAddress
= (addr_t
)address
+ info
->size
;
1253 memcpy(&wallValue
, (void *)wallAddress
, sizeof(addr_t
));
1254 if (wallValue
!= wallAddress
) {
1255 panic("someone wrote beyond small allocation at %p;"
1256 " size: %lu bytes; allocated by %ld; value: 0x%08lx\n",
1257 address
, info
->size
, info
->thread
, wallValue
);
1260 // the first 4 bytes are overwritten with the next free list pointer
1262 uint32
*dead
= (uint32
*)address
;
1263 for (uint32 i
= 0; i
< bin
->element_size
/ sizeof(uint32
); i
++)
1264 dead
[i
] = 0xdeadbeef;
1267 // add the address to the page free list
1268 *(addr_t
*)address
= (addr_t
)page
->free_list
;
1269 page
->free_list
= (addr_t
*)address
;
1272 if (page
->free_count
== bin
->max_free_count
) {
1273 // we are now empty, remove the page from the bin list
1274 MutexLocker
pageLocker(heap
->page_lock
);
1275 heap_unlink_page(page
, &bin
->page_list
);
1277 heap_link_page(page
, &area
->free_pages
);
1278 heap_free_pages_added(heap
, area
, 1);
1279 } else if (page
->free_count
== 1) {
1280 // we need to add ourselfs to the page list of the bin
1281 heap_link_page(page
, &bin
->page_list
);
1283 // we might need to move back in the free pages list
1284 if (page
->next
&& page
->next
->free_count
< page
->free_count
) {
1285 // move ourselfs so the list stays ordered
1286 heap_page
*insert
= page
->next
;
1288 && insert
->next
->free_count
< page
->free_count
)
1289 insert
= insert
->next
;
1291 heap_unlink_page(page
, &bin
->page_list
);
1293 page
->prev
= insert
;
1294 page
->next
= insert
->next
;
1296 page
->next
->prev
= page
;
1297 insert
->next
= page
;
1302 if ((addr_t
)address
!= pageBase
) {
1303 panic("free(): large allocation address %p not on page base %p\n",
1304 address
, (void *)pageBase
);
1308 // large allocation, just return the pages to the page free list
1309 uint32 allocationID
= page
->allocation_id
;
1310 uint32 maxPages
= area
->page_count
- page
->index
;
1311 uint32 pageCount
= 0;
1313 MutexLocker
pageLocker(heap
->page_lock
);
1314 for (uint32 i
= 0; i
< maxPages
; i
++) {
1315 // loop until we find the end of this allocation
1316 if (!page
[i
].in_use
|| page
[i
].bin_index
!= heap
->bin_count
1317 || page
[i
].allocation_id
!= allocationID
)
1320 // this page still belongs to the same allocation
1323 page
[i
].allocation_id
= 0;
1325 // return it to the free list
1326 heap_link_page(&page
[i
], &area
->free_pages
);
1332 size_t allocationSize
= pageCount
* heap
->page_size
;
1333 heap_leak_check_info
*info
= (heap_leak_check_info
*)((addr_t
)address
1334 + allocationSize
- sizeof(heap_leak_check_info
));
1335 if (info
->size
> allocationSize
- sizeof(addr_t
)
1336 - sizeof(heap_leak_check_info
)) {
1337 panic("leak check info has invalid size %lu for allocation of %lu,"
1338 " probably memory has been overwritten past allocation size\n",
1339 info
->size
, allocationSize
);
1343 addr_t wallAddress
= (addr_t
)address
+ info
->size
;
1344 memcpy(&wallValue
, (void *)wallAddress
, sizeof(addr_t
));
1345 if (wallValue
!= wallAddress
) {
1346 panic("someone wrote beyond big allocation at %p;"
1347 " size: %lu bytes; allocated by %ld; value: 0x%08lx\n",
1348 address
, info
->size
, info
->thread
, wallValue
);
1351 uint32
*dead
= (uint32
*)address
;
1352 for (uint32 i
= 0; i
< allocationSize
/ sizeof(uint32
); i
++)
1353 dead
[i
] = 0xdeadbeef;
1356 heap_free_pages_added(heap
, area
, pageCount
);
1359 areaReadLocker
.Unlock();
1361 if (heap
->empty_areas
> 1) {
1362 WriteLocker
areaWriteLocker(heap
->area_lock
);
1363 MutexLocker
pageLocker(heap
->page_lock
);
1366 while (area
!= NULL
&& heap
->empty_areas
> 1) {
1367 heap_area
*next
= area
->next
;
1369 && area
->free_page_count
== area
->page_count
1370 && heap_remove_area(heap
, area
) == B_OK
) {
1371 delete_area(area
->area
);
1372 heap
->empty_areas
--;
1384 heap_realloc(heap_allocator
*heap
, void *address
, void **newAddress
,
1387 ReadLocker
areaReadLocker(heap
->area_lock
);
1388 heap_area
*area
= heap
->all_areas
;
1390 // since the all_areas list is ordered by base with the biggest
1391 // base at the top, we need only find the first area with a base
1392 // smaller than our address to become our only candidate for
1394 if (area
->base
<= (addr_t
)address
) {
1395 if ((addr_t
)address
>= area
->base
+ area
->size
) {
1396 // none of the other areas can contain the address as the list
1398 return B_ENTRY_NOT_FOUND
;
1401 // this area contains the allocation, we're done searching
1405 area
= area
->all_next
;
1409 // this address does not belong to us
1410 return B_ENTRY_NOT_FOUND
;
1413 INFO(("realloc(address = %p, newSize = %lu)\n", address
, newSize
));
1415 heap_page
*page
= &area
->page_table
[((addr_t
)address
- area
->base
)
1417 if (page
->bin_index
> heap
->bin_count
) {
1418 panic("realloc(): page %p: invalid bin_index %d\n", page
,
1423 // find out the size of the old allocation first
1426 if (page
->bin_index
< heap
->bin_count
) {
1427 // this was a small allocation
1428 heap_bin
*bin
= &heap
->bins
[page
->bin_index
];
1429 maxSize
= bin
->element_size
;
1430 if (page
->bin_index
> 0)
1431 minSize
= heap
->bins
[page
->bin_index
- 1].element_size
+ 1;
1433 // this was a large allocation
1434 uint32 allocationID
= page
->allocation_id
;
1435 uint32 maxPages
= area
->page_count
- page
->index
;
1436 maxSize
= heap
->page_size
;
1438 MutexLocker
pageLocker(heap
->page_lock
);
1439 for (uint32 i
= 1; i
< maxPages
; i
++) {
1440 if (!page
[i
].in_use
|| page
[i
].bin_index
!= heap
->bin_count
1441 || page
[i
].allocation_id
!= allocationID
)
1444 minSize
+= heap
->page_size
;
1445 maxSize
+= heap
->page_size
;
1449 newSize
+= sizeof(addr_t
) + sizeof(heap_leak_check_info
);
1451 // does the new allocation simply fit in the old allocation?
1452 if (newSize
> minSize
&& newSize
<= maxSize
) {
1453 // update the size info (the info is at the end so stays where it is)
1454 heap_leak_check_info
*info
= (heap_leak_check_info
*)((addr_t
)address
1455 + maxSize
- sizeof(heap_leak_check_info
));
1456 newSize
-= sizeof(addr_t
) + sizeof(heap_leak_check_info
);
1457 *newAddress
= address
;
1459 MutexLocker
pageLocker(heap
->page_lock
);
1460 info
->size
= newSize
;
1461 addr_t wallAddress
= (addr_t
)address
+ newSize
;
1462 memcpy((void *)wallAddress
, &wallAddress
, sizeof(addr_t
));
1466 areaReadLocker
.Unlock();
1468 // new leak check info will be created with the malloc below
1469 newSize
-= sizeof(addr_t
) + sizeof(heap_leak_check_info
);
1471 // if not, allocate a new chunk of memory
1472 *newAddress
= debug_heap_memalign(sDefaultAlignment
, newSize
);
1473 if (*newAddress
== NULL
) {
1474 // we tried but it didn't work out, but still the operation is done
1478 // copy the old data and free the old allocation
1479 memcpy(*newAddress
, address
, min_c(maxSize
, newSize
));
1480 heap_free(heap
, address
);
1486 heap_class_for(size_t size
)
1488 // take the extra info size into account
1489 size
+= sizeof(addr_t
) + sizeof(heap_leak_check_info
);
1492 for (; index
< HEAP_CLASS_COUNT
- 1; index
++) {
1493 if (size
<= sHeapClasses
[index
].max_allocation_size
)
1502 heap_get_allocation_info(heap_allocator
*heap
, void *address
, size_t *size
,
1505 ReadLocker
areaReadLocker(heap
->area_lock
);
1506 heap_area
*area
= heap
->all_areas
;
1508 // since the all_areas list is ordered by base with the biggest
1509 // base at the top, we need only find the first area with a base
1510 // smaller than our address to become our only candidate for freeing
1511 if (area
->base
<= (addr_t
)address
) {
1512 if ((addr_t
)address
>= area
->base
+ area
->size
) {
1513 // none of the other areas can contain the address as the list
1515 return B_ENTRY_NOT_FOUND
;
1518 // this area contains the allocation, we're done searching
1522 area
= area
->all_next
;
1526 // this address does not belong to us
1527 return B_ENTRY_NOT_FOUND
;
1530 heap_page
*page
= &area
->page_table
[((addr_t
)address
- area
->base
)
1533 if (page
->bin_index
> heap
->bin_count
) {
1534 panic("get_allocation_info(): page %p: invalid bin_index %d\n", page
,
1539 heap_leak_check_info
*info
= NULL
;
1540 addr_t pageBase
= area
->base
+ page
->index
* heap
->page_size
;
1541 if (page
->bin_index
< heap
->bin_count
) {
1543 heap_bin
*bin
= &heap
->bins
[page
->bin_index
];
1544 if (((addr_t
)address
- pageBase
) % bin
->element_size
!= 0) {
1545 panic("get_allocation_info(): address %p does not fall on"
1546 " allocation boundary for page base %p and element size %lu\n",
1547 address
, (void *)pageBase
, bin
->element_size
);
1551 MutexLocker
binLocker(bin
->lock
);
1553 info
= (heap_leak_check_info
*)((addr_t
)address
+ bin
->element_size
1554 - sizeof(heap_leak_check_info
));
1555 if (info
->size
> bin
->element_size
- sizeof(addr_t
)
1556 - sizeof(heap_leak_check_info
)) {
1557 panic("leak check info has invalid size %lu for element size %lu,"
1558 " probably memory has been overwritten past allocation size\n",
1559 info
->size
, bin
->element_size
);
1563 if ((addr_t
)address
!= pageBase
) {
1564 panic("get_allocation_info(): large allocation address %p not on"
1565 " page base %p\n", address
, (void *)pageBase
);
1569 uint32 allocationID
= page
->allocation_id
;
1570 uint32 maxPages
= area
->page_count
- page
->index
;
1571 uint32 pageCount
= 0;
1573 MutexLocker
pageLocker(heap
->page_lock
);
1574 for (uint32 i
= 0; i
< maxPages
; i
++) {
1575 // loop until we find the end of this allocation
1576 if (!page
[i
].in_use
|| page
[i
].bin_index
!= heap
->bin_count
1577 || page
[i
].allocation_id
!= allocationID
)
1583 size_t allocationSize
= pageCount
* heap
->page_size
;
1584 info
= (heap_leak_check_info
*)((addr_t
)address
+ allocationSize
1585 - sizeof(heap_leak_check_info
));
1586 if (info
->size
> allocationSize
- sizeof(addr_t
)
1587 - sizeof(heap_leak_check_info
)) {
1588 panic("leak check info has invalid size %lu for allocation of %lu,"
1589 " probably memory has been overwritten past allocation size\n",
1590 info
->size
, allocationSize
);
1598 *thread
= info
->thread
;
1608 heap_create_new_heap_area(heap_allocator
*heap
, const char *name
, size_t size
)
1610 void *address
= NULL
;
1611 area_id heapArea
= create_area(name
, &address
, B_ANY_ADDRESS
, size
,
1612 B_NO_LOCK
, B_READ_AREA
| B_WRITE_AREA
);
1613 if (heapArea
< B_OK
) {
1614 INFO(("heap: couldn't allocate heap area \"%s\"\n", name
));
1618 heap_add_area(heap
, heapArea
, (addr_t
)address
, size
);
1619 if (sParanoidValidation
)
1620 heap_validate_heap(heap
);
1627 heap_wall_checker(void *data
)
1629 int msInterval
= (addr_t
)data
;
1630 while (!sStopWallChecking
) {
1631 heap_validate_walls();
1632 snooze(msInterval
* 1000);
1639 // #pragma mark - Heap Debug API
1643 debug_heap_start_wall_checking(int msInterval
)
1645 if (sWallCheckThread
< 0) {
1646 sWallCheckThread
= spawn_thread(heap_wall_checker
, "heap wall checker",
1647 B_LOW_PRIORITY
, (void *)(addr_t
)msInterval
);
1650 if (sWallCheckThread
< 0)
1651 return sWallCheckThread
;
1653 sStopWallChecking
= false;
1654 return resume_thread(sWallCheckThread
);
1659 debug_heap_stop_wall_checking()
1662 sStopWallChecking
= true;
1663 return wait_for_thread(sWallCheckThread
, &result
);
1668 debug_heap_set_paranoid_validation(bool enabled
)
1670 sParanoidValidation
= enabled
;
1675 debug_heap_set_memory_reuse(bool enabled
)
1677 sReuseMemory
= enabled
;
1682 debug_heap_set_debugger_calls(bool enabled
)
1684 sDebuggerCalls
= enabled
;
1689 debug_heap_set_default_alignment(size_t defaultAlignment
)
1691 sDefaultAlignment
= defaultAlignment
;
1696 debug_heap_validate_heaps()
1698 for (uint32 i
= 0; i
< HEAP_CLASS_COUNT
; i
++)
1699 heap_validate_heap(sHeaps
[i
]);
1704 debug_heap_dump_heaps(bool dumpAreas
, bool dumpBins
)
1706 for (uint32 i
= 0; i
< HEAP_CLASS_COUNT
; i
++)
1707 dump_allocator(sHeaps
[i
], dumpAreas
, dumpBins
);
1712 debug_heap_malloc_with_guard_page(size_t size
)
1714 size_t areaSize
= ROUNDUP(size
+ sizeof(area_allocation_info
) + B_PAGE_SIZE
,
1716 if (areaSize
< size
) {
1717 // the size overflowed
1721 void *address
= NULL
;
1722 area_id allocationArea
= create_area("guarded area", &address
,
1723 B_ANY_ADDRESS
, areaSize
, B_NO_LOCK
, B_READ_AREA
| B_WRITE_AREA
);
1724 if (allocationArea
< B_OK
) {
1725 panic("heap: failed to create area for guarded allocation of %lu"
1730 if (mprotect((void *)((addr_t
)address
+ areaSize
- B_PAGE_SIZE
),
1731 B_PAGE_SIZE
, PROT_NONE
) != 0) {
1732 panic("heap: failed to protect guard page: %s\n", strerror(errno
));
1733 delete_area(allocationArea
);
1737 area_allocation_info
*info
= (area_allocation_info
*)address
;
1738 info
->magic
= kAreaAllocationMagic
;
1739 info
->area
= allocationArea
;
1740 info
->base
= address
;
1741 info
->size
= areaSize
;
1742 info
->thread
= find_thread(NULL
);
1743 info
->allocation_size
= size
;
1744 info
->allocation_alignment
= 0;
1746 // the address is calculated so that the end of the allocation
1747 // is at the end of the usable space of the requested area
1748 address
= (void *)((addr_t
)address
+ areaSize
- B_PAGE_SIZE
- size
);
1750 INFO(("heap: allocated area %ld for guarded allocation of %lu bytes\n",
1751 allocationArea
, size
));
1753 info
->allocation_base
= address
;
1755 memset(address
, 0xcc, size
);
1761 debug_heap_get_allocation_info(void *address
, size_t *size
,
1764 for (uint32 i
= 0; i
< HEAP_CLASS_COUNT
; i
++) {
1765 heap_allocator
*heap
= sHeaps
[i
];
1766 if (heap_get_allocation_info(heap
, address
, size
, thread
) == B_OK
)
1770 // or maybe it was a huge allocation using an area
1772 area_id area
= area_for(address
);
1773 if (area
>= B_OK
&& get_area_info(area
, &areaInfo
) == B_OK
) {
1774 area_allocation_info
*info
= (area_allocation_info
*)areaInfo
.address
;
1776 // just make extra sure it was allocated by us
1777 if (info
->magic
== kAreaAllocationMagic
&& info
->area
== area
1778 && info
->size
== areaInfo
.size
&& info
->base
== areaInfo
.address
1779 && info
->allocation_size
< areaInfo
.size
) {
1781 *size
= info
->allocation_size
;
1783 *thread
= info
->thread
;
1788 return B_ENTRY_NOT_FOUND
;
1792 // #pragma mark - Init
1796 debug_heap_init(void)
1798 // This will locate the heap base at 384 MB and reserve the next 1152 MB
1799 // for it. They may get reclaimed by other areas, though, but the maximum
1800 // size of the heap is guaranteed until the space is really needed.
1801 void *heapBase
= (void *)0x18000000;
1802 status_t status
= _kern_reserve_address_range((addr_t
*)&heapBase
,
1803 B_EXACT_ADDRESS
, 0x48000000);
1805 area_id heapArea
= create_area("heap", (void **)&heapBase
,
1806 status
== B_OK
? B_EXACT_ADDRESS
: B_BASE_ADDRESS
,
1807 HEAP_INITIAL_SIZE
, B_NO_LOCK
, B_READ_AREA
| B_WRITE_AREA
);
1808 if (heapArea
< B_OK
)
1811 addr_t base
= (addr_t
)heapBase
;
1812 for (uint32 i
= 0; i
< HEAP_CLASS_COUNT
; i
++) {
1813 size_t partSize
= HEAP_INITIAL_SIZE
1814 * sHeapClasses
[i
].initial_percentage
/ 100;
1815 sHeaps
[i
] = heap_create_allocator(sHeapClasses
[i
].name
, base
, partSize
,
1824 // #pragma mark - Public API
1828 debug_heap_memalign(size_t alignment
, size_t size
)
1830 size_t alignedSize
= size
+ sizeof(addr_t
) + sizeof(heap_leak_check_info
);
1831 if (alignment
!= 0 && alignment
< B_PAGE_SIZE
)
1832 alignedSize
= ROUNDUP(alignedSize
, alignment
);
1834 if (alignedSize
>= HEAP_AREA_USE_THRESHOLD
) {
1835 // don't even attempt such a huge allocation - use areas instead
1836 size_t areaSize
= ROUNDUP(size
+ sizeof(area_allocation_info
)
1837 + alignment
, B_PAGE_SIZE
);
1838 if (areaSize
< size
) {
1839 // the size overflowed
1843 void *address
= NULL
;
1844 area_id allocationArea
= create_area("memalign area", &address
,
1845 B_ANY_ADDRESS
, areaSize
, B_NO_LOCK
, B_READ_AREA
| B_WRITE_AREA
);
1846 if (allocationArea
< B_OK
) {
1847 panic("heap: failed to create area for huge allocation of %lu"
1852 area_allocation_info
*info
= (area_allocation_info
*)address
;
1853 info
->magic
= kAreaAllocationMagic
;
1854 info
->area
= allocationArea
;
1855 info
->base
= address
;
1856 info
->size
= areaSize
;
1857 info
->thread
= find_thread(NULL
);
1858 info
->allocation_size
= size
;
1859 info
->allocation_alignment
= alignment
;
1861 address
= (void *)((addr_t
)address
+ sizeof(area_allocation_info
));
1862 if (alignment
!= 0) {
1863 address
= (void *)ROUNDUP((addr_t
)address
, alignment
);
1864 ASSERT((addr_t
)address
% alignment
== 0);
1865 ASSERT((addr_t
)address
+ size
- 1 < (addr_t
)info
+ areaSize
- 1);
1868 INFO(("heap: allocated area %ld for huge allocation of %lu bytes\n",
1869 allocationArea
, size
));
1871 info
->allocation_base
= address
;
1873 memset(address
, 0xcc, size
);
1877 uint32 heapClass
= alignment
< B_PAGE_SIZE
1878 ? heap_class_for(alignedSize
) : 0;
1880 heap_allocator
*heap
= sHeaps
[heapClass
];
1881 void *result
= heap_memalign(heap
, alignment
, size
);
1882 if (result
== NULL
) {
1883 // add new area and try again
1884 heap_create_new_heap_area(heap
, "additional heap area", HEAP_GROW_SIZE
);
1885 result
= heap_memalign(heap
, alignment
, size
);
1888 if (sParanoidValidation
)
1889 heap_validate_heap(heap
);
1891 if (result
== NULL
) {
1892 panic("heap: heap has run out of memory trying to allocate %lu bytes\n",
1897 ASSERT((addr_t
)result
% alignment
== 0);
1904 debug_heap_malloc(size_t size
)
1907 return debug_heap_malloc_with_guard_page(size
);
1909 return debug_heap_memalign(sDefaultAlignment
, size
);
1914 debug_heap_free(void *address
)
1916 for (uint32 i
= 0; i
< HEAP_CLASS_COUNT
; i
++) {
1917 heap_allocator
*heap
= sHeaps
[i
];
1918 if (heap_free(heap
, address
) == B_OK
) {
1919 if (sParanoidValidation
)
1920 heap_validate_heap(heap
);
1925 // or maybe it was a huge allocation using an area
1927 area_id area
= area_for(address
);
1928 if (area
>= B_OK
&& get_area_info(area
, &areaInfo
) == B_OK
) {
1929 area_allocation_info
*info
= (area_allocation_info
*)areaInfo
.address
;
1931 // just make extra sure it was allocated by us
1932 if (info
->magic
== kAreaAllocationMagic
&& info
->area
== area
1933 && info
->size
== areaInfo
.size
&& info
->base
== areaInfo
.address
1934 && info
->allocation_size
< areaInfo
.size
) {
1936 INFO(("free(): freed huge allocation by deleting area %ld\n",
1942 panic("free(): free failed for address %p\n", address
);
1947 debug_heap_realloc(void *address
, size_t newSize
)
1949 if (address
== NULL
)
1950 return debug_heap_memalign(sDefaultAlignment
, newSize
);
1957 void *newAddress
= NULL
;
1958 for (uint32 i
= 0; i
< HEAP_CLASS_COUNT
; i
++) {
1959 heap_allocator
*heap
= sHeaps
[i
];
1960 if (heap_realloc(heap
, address
, &newAddress
, newSize
) == B_OK
) {
1961 if (sParanoidValidation
)
1962 heap_validate_heap(heap
);
1967 // or maybe it was a huge allocation using an area
1969 area_id area
= area_for(address
);
1970 if (area
>= B_OK
&& get_area_info(area
, &areaInfo
) == B_OK
) {
1971 area_allocation_info
*info
= (area_allocation_info
*)areaInfo
.address
;
1973 // just make extra sure it was allocated by us
1974 if (info
->magic
== kAreaAllocationMagic
&& info
->area
== area
1975 && info
->size
== areaInfo
.size
&& info
->base
== areaInfo
.address
1976 && info
->allocation_size
< areaInfo
.size
) {
1977 if (sUseGuardPage
) {
1978 size_t available
= info
->size
- B_PAGE_SIZE
1979 - sizeof(area_allocation_info
);
1981 if (available
>= newSize
) {
1982 // there is enough room available for the newSize
1983 newAddress
= (void*)((addr_t
)info
->allocation_base
1984 + info
->allocation_size
- newSize
);
1985 INFO(("realloc(): new size %ld fits in old area %ld with "
1986 "%ld available -> new address: %p\n", newSize
, area
,
1987 available
, newAddress
));
1988 memmove(newAddress
, info
->allocation_base
,
1989 min_c(newSize
, info
->allocation_size
));
1990 info
->allocation_base
= newAddress
;
1991 info
->allocation_size
= newSize
;
1995 size_t available
= info
->size
- ((addr_t
)info
->allocation_base
1996 - (addr_t
)info
->base
);
1998 if (available
>= newSize
) {
1999 // there is enough room available for the newSize
2000 INFO(("realloc(): new size %ld fits in old area %ld with "
2001 "%ld available\n", newSize
, area
, available
));
2002 info
->allocation_size
= newSize
;
2007 // have to allocate/copy/free - TODO maybe resize the area instead?
2008 newAddress
= debug_heap_memalign(sDefaultAlignment
, newSize
);
2009 if (newAddress
== NULL
) {
2010 panic("realloc(): failed to allocate new block of %ld"
2011 " bytes\n", newSize
);
2015 memcpy(newAddress
, address
, min_c(newSize
, info
->allocation_size
));
2017 INFO(("realloc(): allocated new block %p for size %ld and deleted "
2018 "old area %ld\n", newAddress
, newSize
, area
));
2023 panic("realloc(): failed to realloc address %p to size %lu\n", address
,
2029 heap_implementation __mallocDebugHeap
= {
2031 NULL
, // terminate_after
2033 debug_heap_memalign
,
2040 NULL
, // posix_memalign
2042 debug_heap_start_wall_checking
,
2043 debug_heap_stop_wall_checking
,
2044 debug_heap_set_paranoid_validation
,
2045 debug_heap_set_memory_reuse
,
2046 debug_heap_set_debugger_calls
,
2047 debug_heap_set_default_alignment
,
2048 debug_heap_validate_heaps
,
2049 heap_validate_walls
,
2051 debug_heap_dump_heaps
,
2052 debug_heap_malloc_with_guard_page
,
2053 debug_heap_get_allocation_info
,
2055 NULL
, // set_dump_allocations_on_exit
2056 NULL
// set_stack_trace_depth