2 * Copyright 2008-2010, Michael Lotz, mmlr@mlotz.ch.
3 * Copyright 2002-2010, Axel Dörfler, axeld@pinc-software.de.
4 * Distributed under the terms of the MIT License.
6 * Copyright 2001, Travis Geiselbrecht. All rights reserved.
7 * Distributed under the terms of the NewOS License.
11 #include <arch/debug.h>
22 #include <util/AutoLock.h>
24 #include <vm/vm_page.h>
29 # define TRACE(x) dprintf x
35 #if !USE_DEBUG_HEAP_FOR_MALLOC
36 # undef KERNEL_HEAP_LEAK_CHECK
40 #if KERNEL_HEAP_LEAK_CHECK
41 typedef struct heap_leak_check_info_s
{
46 } heap_leak_check_info
;
54 static const int32 kCallerInfoTableSize
= 1024;
55 static caller_info sCallerInfoTable
[kCallerInfoTableSize
];
56 static int32 sCallerInfoCount
= 0;
57 #endif // KERNEL_HEAP_LEAK_CHECK
60 typedef struct heap_page_s heap_page
;
63 typedef struct heap_area_s
{
70 uint32 free_page_count
;
72 heap_page
* free_pages
;
73 heap_page
* page_table
;
77 heap_area_s
* all_next
;
81 #define MAX_BIN_COUNT 31 // depends on the size of the bin_index field
83 typedef struct heap_page_s
{
87 uint16 free_count
: 10;
93 uint16 allocation_id
; // used for bin == bin_count allocations
99 typedef struct heap_bin_s
{
102 uint16 max_free_count
;
103 heap_page
* page_list
; // sorted so that the desired page is always first
107 struct heap_allocator_s
{
116 uint32 total_free_pages
;
119 #if KERNEL_HEAP_LEAK_CHECK
120 addr_t (*get_caller
)();
124 heap_area
* areas
; // sorted so that the desired area is always first
125 heap_area
* all_areas
; // all areas including full ones
129 static const uint32 kAreaAllocationMagic
= 'AAMG';
130 typedef struct area_allocation_info_s
{
135 size_t allocation_size
;
136 size_t allocation_alignment
;
137 void * allocation_base
;
138 } area_allocation_info
;
141 struct DeferredFreeListEntry
: SinglyLinkedListLinkImpl
<DeferredFreeListEntry
> {
145 typedef SinglyLinkedList
<DeferredFreeListEntry
> DeferredFreeList
;
146 typedef SinglyLinkedList
<DeferredDeletable
> DeferredDeletableList
;
149 #if USE_DEBUG_HEAP_FOR_MALLOC
151 #define VIP_HEAP_SIZE 1024 * 1024
153 // Heap class configuration
154 #define HEAP_CLASS_COUNT 3
155 static const heap_class sHeapClasses
[HEAP_CLASS_COUNT
] = {
158 50, /* initial percentage */
159 B_PAGE_SIZE
/ 8, /* max allocation size */
160 B_PAGE_SIZE
, /* page size */
161 8, /* min bin size */
162 4, /* bin alignment */
163 8, /* min count per page */
164 16 /* max waste per page */
168 30, /* initial percentage */
169 B_PAGE_SIZE
* 2, /* max allocation size */
170 B_PAGE_SIZE
* 8, /* page size */
171 B_PAGE_SIZE
/ 8, /* min bin size */
172 32, /* bin alignment */
173 4, /* min count per page */
174 64 /* max waste per page */
178 20, /* initial percentage */
179 HEAP_AREA_USE_THRESHOLD
, /* max allocation size */
180 B_PAGE_SIZE
* 16, /* page size */
181 B_PAGE_SIZE
* 2, /* min bin size */
182 128, /* bin alignment */
183 1, /* min count per page */
184 256 /* max waste per page */
189 static uint32 sHeapCount
;
190 static heap_allocator
*sHeaps
[HEAP_CLASS_COUNT
* SMP_MAX_CPUS
];
191 static uint32
*sLastGrowRequest
[HEAP_CLASS_COUNT
* SMP_MAX_CPUS
];
192 static uint32
*sLastHandledGrowRequest
[HEAP_CLASS_COUNT
* SMP_MAX_CPUS
];
194 static heap_allocator
*sVIPHeap
;
195 static heap_allocator
*sGrowHeap
= NULL
;
196 static thread_id sHeapGrowThread
= -1;
197 static sem_id sHeapGrowSem
= -1;
198 static sem_id sHeapGrownNotify
= -1;
199 static bool sAddGrowHeap
= false;
201 #endif // USE_DEBUG_HEAP_FOR_MALLOC
203 static DeferredFreeList sDeferredFreeList
;
204 static DeferredDeletableList sDeferredDeletableList
;
205 static spinlock sDeferredFreeListLock
;
209 // #pragma mark - Tracing
211 #if KERNEL_HEAP_TRACING
212 namespace KernelHeapTracing
{
214 class Allocate
: public AbstractTraceEntry
{
216 Allocate(addr_t address
, size_t size
)
223 virtual void AddDump(TraceOutput
&out
)
225 out
.Print("heap allocate: 0x%08lx (%lu bytes)", fAddress
, fSize
);
234 class Reallocate
: public AbstractTraceEntry
{
236 Reallocate(addr_t oldAddress
, addr_t newAddress
, size_t newSize
)
237 : fOldAddress(oldAddress
),
238 fNewAddress(newAddress
),
244 virtual void AddDump(TraceOutput
&out
)
246 out
.Print("heap reallocate: 0x%08lx -> 0x%08lx (%lu bytes)",
247 fOldAddress
, fNewAddress
, fNewSize
);
257 class Free
: public AbstractTraceEntry
{
265 virtual void AddDump(TraceOutput
&out
)
267 out
.Print("heap free: 0x%08lx", fAddress
);
275 } // namespace KernelHeapTracing
277 # define T(x) if (!gKernelStartup) new(std::nothrow) KernelHeapTracing::x;
283 // #pragma mark - Debug functions
286 #if KERNEL_HEAP_LEAK_CHECK
290 // Find the first return address outside of the allocator code. Note, that
291 // this makes certain assumptions about how the code for the functions
292 // ends up in the kernel object.
293 addr_t returnAddresses
[5];
294 int32 depth
= arch_debug_get_stack_trace(returnAddresses
, 5, 0, 1,
296 for (int32 i
= 0; i
< depth
; i
++) {
297 if (returnAddresses
[i
] < (addr_t
)&get_caller
298 || returnAddresses
[i
] > (addr_t
)&malloc_referenced_release
) {
299 return returnAddresses
[i
];
309 dump_page(heap_page
*page
)
312 for (addr_t
*temp
= page
->free_list
; temp
!= NULL
; temp
= (addr_t
*)*temp
)
315 kprintf("\t\tpage %p: bin_index: %u; free_count: %u; empty_index: %u; "
316 "free_list %p (%" B_PRIu32
" entr%s)\n", page
, page
->bin_index
,
317 page
->free_count
, page
->empty_index
, page
->free_list
, count
,
318 count
== 1 ? "y" : "ies");
323 dump_bin(heap_bin
*bin
)
326 for (heap_page
*page
= bin
->page_list
; page
!= NULL
; page
= page
->next
)
329 kprintf("\telement_size: %" B_PRIu32
"; max_free_count: %u; page_list %p "
330 "(%" B_PRIu32
" pages);\n", bin
->element_size
, bin
->max_free_count
,
331 bin
->page_list
, count
);
333 for (heap_page
*page
= bin
->page_list
; page
!= NULL
; page
= page
->next
)
339 dump_bin_list(heap_allocator
*heap
)
341 for (uint32 i
= 0; i
< heap
->bin_count
; i
++)
342 dump_bin(&heap
->bins
[i
]);
348 dump_allocator_areas(heap_allocator
*heap
)
350 heap_area
*area
= heap
->all_areas
;
352 kprintf("\tarea %p: area: %" B_PRId32
"; base: %p; size: %zu; page_count: "
353 "%" B_PRIu32
"; free_pages: %p (%" B_PRIu32
" entr%s)\n", area
,
354 area
->area
, (void *)area
->base
, area
->size
, area
->page_count
,
355 area
->free_pages
, area
->free_page_count
,
356 area
->free_page_count
== 1 ? "y" : "ies");
357 area
= area
->all_next
;
365 dump_allocator(heap_allocator
*heap
, bool areas
, bool bins
)
367 kprintf("allocator %p: name: %s; page_size: %" B_PRIu32
"; bin_count: "
368 "%" B_PRIu32
"; pages: %" B_PRIu32
"; free_pages: %" B_PRIu32
"; "
369 "empty_areas: %" B_PRIu32
"\n", heap
, heap
->name
, heap
->page_size
,
370 heap
->bin_count
, heap
->total_pages
, heap
->total_free_pages
,
374 dump_allocator_areas(heap
);
381 dump_heap_list(int argc
, char **argv
)
383 #if USE_DEBUG_HEAP_FOR_MALLOC
384 if (argc
== 2 && strcmp(argv
[1], "grow") == 0) {
385 // only dump dedicated grow heap info
386 kprintf("dedicated grow heap:\n");
387 dump_allocator(sGrowHeap
, true, true);
395 if (strcmp(argv
[1], "stats") == 0) {
400 uint64 heapAddress
= 0;
401 if (i
< argc
&& !evaluate_debug_expression(argv
[i
], &heapAddress
, true)) {
402 print_debugger_command_usage(argv
[0]);
406 if (heapAddress
== 0) {
407 #if USE_DEBUG_HEAP_FOR_MALLOC
408 // dump default kernel heaps
409 for (uint32 i
= 0; i
< sHeapCount
; i
++)
410 dump_allocator(sHeaps
[i
], !stats
, !stats
);
412 print_debugger_command_usage(argv
[0]);
415 // dump specified heap
416 dump_allocator((heap_allocator
*)(addr_t
)heapAddress
, !stats
, !stats
);
423 #if !KERNEL_HEAP_LEAK_CHECK
426 dump_allocations(int argc
, char **argv
)
428 uint64 heapAddress
= 0;
429 bool statsOnly
= false;
430 for (int32 i
= 1; i
< argc
; i
++) {
431 if (strcmp(argv
[i
], "stats") == 0)
433 else if (!evaluate_debug_expression(argv
[i
], &heapAddress
, true)) {
434 print_debugger_command_usage(argv
[0]);
439 size_t totalSize
= 0;
440 uint32 totalCount
= 0;
441 #if USE_DEBUG_HEAP_FOR_MALLOC
442 for (uint32 heapIndex
= 0; heapIndex
< sHeapCount
; heapIndex
++) {
443 heap_allocator
*heap
= sHeaps
[heapIndex
];
444 if (heapAddress
!= 0)
445 heap
= (heap_allocator
*)(addr_t
)heapAddress
;
448 heap_allocator
*heap
= (heap_allocator
*)(addr_t
)heapAddress
;
450 print_debugger_command_usage(argv
[0]);
458 // go through all the pages in all the areas
459 heap_area
*area
= heap
->all_areas
;
461 for (uint32 i
= 0; i
< area
->page_count
; i
++) {
462 heap_page
*page
= &area
->page_table
[i
];
466 addr_t base
= area
->base
+ i
* heap
->page_size
;
467 if (page
->bin_index
< heap
->bin_count
) {
468 // page is used by a small allocation bin
469 uint32 elementCount
= page
->empty_index
;
471 = heap
->bins
[page
->bin_index
].element_size
;
472 for (uint32 j
= 0; j
< elementCount
;
473 j
++, base
+= elementSize
) {
474 // walk the free list to see if this element is in use
475 bool elementInUse
= true;
476 for (addr_t
*temp
= page
->free_list
; temp
!= NULL
;
477 temp
= (addr_t
*)*temp
) {
478 if ((addr_t
)temp
== base
) {
479 elementInUse
= false;
488 kprintf("address: 0x%p; size: %lu bytes\n",
489 (void *)base
, elementSize
);
492 totalSize
+= elementSize
;
496 // page is used by a big allocation, find the page count
497 uint32 pageCount
= 1;
498 while (i
+ pageCount
< area
->page_count
499 && area
->page_table
[i
+ pageCount
].in_use
500 && area
->page_table
[i
+ pageCount
].bin_index
502 && area
->page_table
[i
+ pageCount
].allocation_id
503 == page
->allocation_id
)
506 size_t size
= pageCount
* heap
->page_size
;
509 kprintf("address: %p; size: %lu bytes\n", (void *)base
,
516 // skip the allocated pages
521 area
= area
->all_next
;
524 if (heapAddress
!= 0)
528 kprintf("total allocations: %" B_PRIu32
"; total bytes: %zu\n", totalCount
, totalSize
);
532 #else // !KERNEL_HEAP_LEAK_CHECK
535 dump_allocations(int argc
, char **argv
)
538 thread_id thread
= -1;
541 bool statsOnly
= false;
543 for (int32 i
= 1; i
< argc
; i
++) {
544 if (strcmp(argv
[i
], "team") == 0)
545 team
= parse_expression(argv
[++i
]);
546 else if (strcmp(argv
[i
], "thread") == 0)
547 thread
= parse_expression(argv
[++i
]);
548 else if (strcmp(argv
[i
], "caller") == 0)
549 caller
= parse_expression(argv
[++i
]);
550 else if (strcmp(argv
[i
], "address") == 0)
551 address
= parse_expression(argv
[++i
]);
552 else if (strcmp(argv
[i
], "stats") == 0)
555 print_debugger_command_usage(argv
[0]);
560 size_t totalSize
= 0;
561 uint32 totalCount
= 0;
562 for (uint32 heapIndex
= 0; heapIndex
< sHeapCount
; heapIndex
++) {
563 heap_allocator
*heap
= sHeaps
[heapIndex
];
565 // go through all the pages in all the areas
566 heap_area
*area
= heap
->all_areas
;
568 heap_leak_check_info
*info
= NULL
;
569 for (uint32 i
= 0; i
< area
->page_count
; i
++) {
570 heap_page
*page
= &area
->page_table
[i
];
574 addr_t base
= area
->base
+ i
* heap
->page_size
;
575 if (page
->bin_index
< heap
->bin_count
) {
576 // page is used by a small allocation bin
577 uint32 elementCount
= page
->empty_index
;
579 = heap
->bins
[page
->bin_index
].element_size
;
580 for (uint32 j
= 0; j
< elementCount
;
581 j
++, base
+= elementSize
) {
582 // walk the free list to see if this element is in use
583 bool elementInUse
= true;
584 for (addr_t
*temp
= page
->free_list
; temp
!= NULL
;
585 temp
= (addr_t
*)*temp
) {
586 if ((addr_t
)temp
== base
) {
587 elementInUse
= false;
595 info
= (heap_leak_check_info
*)(base
+ elementSize
596 - sizeof(heap_leak_check_info
));
598 if ((team
== -1 || info
->team
== team
)
599 && (thread
== -1 || info
->thread
== thread
)
600 && (caller
== 0 || info
->caller
== caller
)
601 && (address
== 0 || base
== address
)) {
604 kprintf("team: % 6ld; thread: % 6ld; "
605 "address: 0x%08lx; size: %lu bytes; "
606 "caller: %#lx\n", info
->team
, info
->thread
,
607 base
, info
->size
, info
->caller
);
610 totalSize
+= info
->size
;
615 // page is used by a big allocation, find the page count
616 uint32 pageCount
= 1;
617 while (i
+ pageCount
< area
->page_count
618 && area
->page_table
[i
+ pageCount
].in_use
619 && area
->page_table
[i
+ pageCount
].bin_index
621 && area
->page_table
[i
+ pageCount
].allocation_id
622 == page
->allocation_id
)
625 info
= (heap_leak_check_info
*)(base
+ pageCount
626 * heap
->page_size
- sizeof(heap_leak_check_info
));
628 if ((team
== -1 || info
->team
== team
)
629 && (thread
== -1 || info
->thread
== thread
)
630 && (caller
== 0 || info
->caller
== caller
)
631 && (address
== 0 || base
== address
)) {
634 kprintf("team: % 6ld; thread: % 6ld;"
635 " address: 0x%08lx; size: %lu bytes;"
636 " caller: %#lx\n", info
->team
, info
->thread
,
637 base
, info
->size
, info
->caller
);
640 totalSize
+= info
->size
;
644 // skip the allocated pages
649 area
= area
->all_next
;
653 kprintf("total allocations: %lu; total bytes: %lu\n", totalCount
,
660 get_caller_info(addr_t caller
)
662 // find the caller info
663 for (int32 i
= 0; i
< sCallerInfoCount
; i
++) {
664 if (caller
== sCallerInfoTable
[i
].caller
)
665 return &sCallerInfoTable
[i
];
668 // not found, add a new entry, if there are free slots
669 if (sCallerInfoCount
>= kCallerInfoTableSize
)
672 caller_info
* info
= &sCallerInfoTable
[sCallerInfoCount
++];
673 info
->caller
= caller
;
682 caller_info_compare_size(const void* _a
, const void* _b
)
684 const caller_info
* a
= (const caller_info
*)_a
;
685 const caller_info
* b
= (const caller_info
*)_b
;
686 return (int)(b
->size
- a
->size
);
691 caller_info_compare_count(const void* _a
, const void* _b
)
693 const caller_info
* a
= (const caller_info
*)_a
;
694 const caller_info
* b
= (const caller_info
*)_b
;
695 return (int)(b
->count
- a
->count
);
700 analyze_allocation_callers(heap_allocator
*heap
)
702 // go through all the pages in all the areas
703 heap_area
*area
= heap
->all_areas
;
705 heap_leak_check_info
*info
= NULL
;
706 for (uint32 i
= 0; i
< area
->page_count
; i
++) {
707 heap_page
*page
= &area
->page_table
[i
];
711 addr_t base
= area
->base
+ i
* heap
->page_size
;
712 if (page
->bin_index
< heap
->bin_count
) {
713 // page is used by a small allocation bin
714 uint32 elementCount
= page
->empty_index
;
715 size_t elementSize
= heap
->bins
[page
->bin_index
].element_size
;
716 for (uint32 j
= 0; j
< elementCount
; j
++, base
+= elementSize
) {
717 // walk the free list to see if this element is in use
718 bool elementInUse
= true;
719 for (addr_t
*temp
= page
->free_list
; temp
!= NULL
;
720 temp
= (addr_t
*)*temp
) {
721 if ((addr_t
)temp
== base
) {
722 elementInUse
= false;
730 info
= (heap_leak_check_info
*)(base
+ elementSize
731 - sizeof(heap_leak_check_info
));
733 caller_info
*callerInfo
= get_caller_info(info
->caller
);
734 if (callerInfo
== NULL
) {
735 kprintf("out of space for caller infos\n");
740 callerInfo
->size
+= info
->size
;
743 // page is used by a big allocation, find the page count
744 uint32 pageCount
= 1;
745 while (i
+ pageCount
< area
->page_count
746 && area
->page_table
[i
+ pageCount
].in_use
747 && area
->page_table
[i
+ pageCount
].bin_index
749 && area
->page_table
[i
+ pageCount
].allocation_id
750 == page
->allocation_id
) {
754 info
= (heap_leak_check_info
*)(base
+ pageCount
755 * heap
->page_size
- sizeof(heap_leak_check_info
));
757 caller_info
*callerInfo
= get_caller_info(info
->caller
);
758 if (callerInfo
== NULL
) {
759 kprintf("out of space for caller infos\n");
764 callerInfo
->size
+= info
->size
;
766 // skip the allocated pages
771 area
= area
->all_next
;
779 dump_allocations_per_caller(int argc
, char **argv
)
781 bool sortBySize
= true;
782 heap_allocator
*heap
= NULL
;
784 for (int32 i
= 1; i
< argc
; i
++) {
785 if (strcmp(argv
[i
], "-c") == 0) {
787 } else if (strcmp(argv
[i
], "-h") == 0) {
790 || !evaluate_debug_expression(argv
[i
], &heapAddress
, true)) {
791 print_debugger_command_usage(argv
[0]);
795 heap
= (heap_allocator
*)(addr_t
)heapAddress
;
797 print_debugger_command_usage(argv
[0]);
802 sCallerInfoCount
= 0;
805 if (!analyze_allocation_callers(heap
))
808 for (uint32 heapIndex
= 0; heapIndex
< sHeapCount
; heapIndex
++) {
809 if (!analyze_allocation_callers(sHeaps
[heapIndex
]))
815 qsort(sCallerInfoTable
, sCallerInfoCount
, sizeof(caller_info
),
816 sortBySize
? &caller_info_compare_size
: &caller_info_compare_count
);
818 kprintf("%ld different callers, sorted by %s...\n\n", sCallerInfoCount
,
819 sortBySize
? "size" : "count");
821 kprintf(" count size caller\n");
822 kprintf("----------------------------------\n");
823 for (int32 i
= 0; i
< sCallerInfoCount
; i
++) {
824 caller_info
& info
= sCallerInfoTable
[i
];
825 kprintf("%10ld %10ld %#08lx", info
.count
, info
.size
, info
.caller
);
828 const char *imageName
;
832 if (elf_debug_lookup_symbol_address(info
.caller
, &baseAddress
, &symbol
,
833 &imageName
, &exactMatch
) == B_OK
) {
834 kprintf(" %s + 0x%lx (%s)%s\n", symbol
,
835 info
.caller
- baseAddress
, imageName
,
836 exactMatch
? "" : " (nearest)");
844 #endif // KERNEL_HEAP_LEAK_CHECK
847 #if PARANOID_HEAP_VALIDATION
849 heap_validate_heap(heap_allocator
*heap
)
851 ReadLocker
areaReadLocker(heap
->area_lock
);
852 for (uint32 i
= 0; i
< heap
->bin_count
; i
++)
853 mutex_lock(&heap
->bins
[i
].lock
);
854 MutexLocker
pageLocker(heap
->page_lock
);
856 uint32 totalPageCount
= 0;
857 uint32 totalFreePageCount
= 0;
858 heap_area
*area
= heap
->all_areas
;
859 while (area
!= NULL
) {
860 // validate the free pages list
861 uint32 freePageCount
= 0;
862 heap_page
*lastPage
= NULL
;
863 heap_page
*page
= area
->free_pages
;
865 if ((addr_t
)page
< (addr_t
)&area
->page_table
[0]
866 || (addr_t
)page
>= (addr_t
)&area
->page_table
[area
->page_count
])
867 panic("free page is not part of the page table\n");
869 if (page
->index
>= area
->page_count
)
870 panic("free page has invalid index\n");
872 if ((addr_t
)&area
->page_table
[page
->index
] != (addr_t
)page
)
873 panic("free page index does not lead to target page\n");
875 if (page
->prev
!= lastPage
)
876 panic("free page entry has invalid prev link\n");
879 panic("free page marked as in use\n");
886 totalPageCount
+= freePageCount
;
887 totalFreePageCount
+= freePageCount
;
888 if (area
->free_page_count
!= freePageCount
)
889 panic("free page count doesn't match free page list\n");
891 // validate the page table
892 uint32 usedPageCount
= 0;
893 for (uint32 i
= 0; i
< area
->page_count
; i
++) {
894 if (area
->page_table
[i
].in_use
)
898 totalPageCount
+= usedPageCount
;
899 if (freePageCount
+ usedPageCount
!= area
->page_count
) {
900 panic("free pages and used pages do not add up (%lu + %lu != %lu)\n",
901 freePageCount
, usedPageCount
, area
->page_count
);
904 area
= area
->all_next
;
907 // validate the areas
909 heap_area
*lastArea
= NULL
;
910 uint32 lastFreeCount
= 0;
911 while (area
!= NULL
) {
912 if (area
->free_page_count
< lastFreeCount
)
913 panic("size ordering of area list broken\n");
915 if (area
->prev
!= lastArea
)
916 panic("area list entry has invalid prev link\n");
919 lastFreeCount
= area
->free_page_count
;
924 area
= heap
->all_areas
;
925 while (area
!= NULL
) {
926 if (lastArea
!= NULL
&& lastArea
->base
< area
->base
)
927 panic("base ordering of all_areas list broken\n");
930 area
= area
->all_next
;
934 for (uint32 i
= 0; i
< heap
->bin_count
; i
++) {
935 heap_bin
*bin
= &heap
->bins
[i
];
936 heap_page
*lastPage
= NULL
;
937 heap_page
*page
= bin
->page_list
;
940 area
= heap
->all_areas
;
942 if (area
== page
->area
)
944 area
= area
->all_next
;
948 panic("page area not present in area list\n");
953 if ((addr_t
)page
< (addr_t
)&area
->page_table
[0]
954 || (addr_t
)page
>= (addr_t
)&area
->page_table
[area
->page_count
])
955 panic("used page is not part of the page table\n");
957 if (page
->index
>= area
->page_count
)
958 panic("used page has invalid index\n");
960 if ((addr_t
)&area
->page_table
[page
->index
] != (addr_t
)page
)
961 panic("used page index does not lead to target page\n");
963 if (page
->prev
!= lastPage
) {
964 panic("used page entry has invalid prev link (%p vs %p bin "
965 "%lu)\n", page
->prev
, lastPage
, i
);
969 panic("used page marked as not in use\n");
971 if (page
->bin_index
!= i
) {
972 panic("used page with bin index %u in page list of bin %lu\n",
976 if (page
->free_count
< lastFreeCount
)
977 panic("ordering of bin page list broken\n");
979 // validate the free list
980 uint32 freeSlotsCount
= 0;
981 addr_t
*element
= page
->free_list
;
982 addr_t pageBase
= area
->base
+ page
->index
* heap
->page_size
;
984 if ((addr_t
)element
< pageBase
985 || (addr_t
)element
>= pageBase
+ heap
->page_size
)
986 panic("free list entry out of page range\n");
988 if (((addr_t
)element
- pageBase
) % bin
->element_size
!= 0)
989 panic("free list entry not on a element boundary\n");
991 element
= (addr_t
*)*element
;
995 uint32 slotCount
= bin
->max_free_count
;
996 if (page
->empty_index
> slotCount
) {
997 panic("empty index beyond slot count (%u with %lu slots)\n",
998 page
->empty_index
, slotCount
);
1001 freeSlotsCount
+= (slotCount
- page
->empty_index
);
1002 if (freeSlotsCount
> slotCount
)
1003 panic("more free slots than fit into the page\n");
1006 lastFreeCount
= page
->free_count
;
1011 pageLocker
.Unlock();
1012 for (uint32 i
= 0; i
< heap
->bin_count
; i
++)
1013 mutex_unlock(&heap
->bins
[i
].lock
);
1014 areaReadLocker
.Unlock();
1016 #endif // PARANOID_HEAP_VALIDATION
1019 // #pragma mark - Heap functions
1023 heap_add_area(heap_allocator
*heap
, area_id areaID
, addr_t base
, size_t size
)
1025 heap_area
*area
= (heap_area
*)base
;
1026 area
->area
= areaID
;
1028 base
+= sizeof(heap_area
);
1029 size
-= sizeof(heap_area
);
1031 uint32 pageCount
= size
/ heap
->page_size
;
1032 size_t pageTableSize
= pageCount
* sizeof(heap_page
);
1033 area
->page_table
= (heap_page
*)base
;
1034 base
+= pageTableSize
;
1035 size
-= pageTableSize
;
1037 // the rest is now actually usable memory (rounded to the next page)
1038 area
->base
= ROUNDUP(base
, B_PAGE_SIZE
);
1039 area
->size
= size
& ~(B_PAGE_SIZE
- 1);
1041 // now we know the real page count
1042 pageCount
= area
->size
/ heap
->page_size
;
1043 area
->page_count
= pageCount
;
1045 // zero out the page table and fill in page indexes
1046 memset((void *)area
->page_table
, 0, pageTableSize
);
1047 for (uint32 i
= 0; i
< pageCount
; i
++) {
1048 area
->page_table
[i
].area
= area
;
1049 area
->page_table
[i
].index
= i
;
1052 // add all pages up into the free pages list
1053 for (uint32 i
= 1; i
< pageCount
; i
++) {
1054 area
->page_table
[i
- 1].next
= &area
->page_table
[i
];
1055 area
->page_table
[i
].prev
= &area
->page_table
[i
- 1];
1057 area
->free_pages
= &area
->page_table
[0];
1058 area
->free_page_count
= pageCount
;
1059 area
->page_table
[0].prev
= NULL
;
1062 WriteLocker
areaWriteLocker(heap
->area_lock
);
1063 MutexLocker
pageLocker(heap
->page_lock
);
1064 if (heap
->areas
== NULL
) {
1065 // it's the only (empty) area in that heap
1069 // link in this area as the last one as it is completely empty
1070 heap_area
*lastArea
= heap
->areas
;
1071 while (lastArea
->next
!= NULL
)
1072 lastArea
= lastArea
->next
;
1074 lastArea
->next
= area
;
1075 area
->prev
= lastArea
;
1078 // insert this area in the all_areas list so it stays ordered by base
1079 if (heap
->all_areas
== NULL
|| heap
->all_areas
->base
< area
->base
) {
1080 area
->all_next
= heap
->all_areas
;
1081 heap
->all_areas
= area
;
1083 heap_area
*insert
= heap
->all_areas
;
1084 while (insert
->all_next
&& insert
->all_next
->base
> area
->base
)
1085 insert
= insert
->all_next
;
1087 area
->all_next
= insert
->all_next
;
1088 insert
->all_next
= area
;
1091 heap
->total_pages
+= area
->page_count
;
1092 heap
->total_free_pages
+= area
->free_page_count
;
1095 // this later on deletable area is yet empty - the empty count will be
1096 // decremented as soon as this area is used for the first time
1097 heap
->empty_areas
++;
1100 pageLocker
.Unlock();
1101 areaWriteLocker
.Unlock();
1103 dprintf("heap_add_area: area %" B_PRId32
" added to %s heap %p - usable "
1104 "range %p - %p\n", area
->area
, heap
->name
, heap
, (void *)area
->base
,
1105 (void *)(area
->base
+ area
->size
));
1110 heap_remove_area(heap_allocator
*heap
, heap_area
*area
)
1112 if (area
->free_page_count
!= area
->page_count
) {
1113 panic("tried removing heap area that has still pages in use");
1117 if (area
->prev
== NULL
&& area
->next
== NULL
) {
1118 panic("tried removing the last non-full heap area");
1122 if (heap
->areas
== area
)
1123 heap
->areas
= area
->next
;
1124 if (area
->prev
!= NULL
)
1125 area
->prev
->next
= area
->next
;
1126 if (area
->next
!= NULL
)
1127 area
->next
->prev
= area
->prev
;
1129 if (heap
->all_areas
== area
)
1130 heap
->all_areas
= area
->all_next
;
1132 heap_area
*previous
= heap
->all_areas
;
1134 if (previous
->all_next
== area
) {
1135 previous
->all_next
= area
->all_next
;
1139 previous
= previous
->all_next
;
1142 if (previous
== NULL
)
1143 panic("removing heap area that is not in all list");
1146 heap
->total_pages
-= area
->page_count
;
1147 heap
->total_free_pages
-= area
->free_page_count
;
1149 dprintf("heap_remove_area: area %" B_PRId32
" with range %p - %p removed "
1150 "from %s heap %p\n", area
->area
, (void *)area
->base
,
1151 (void *)(area
->base
+ area
->size
), heap
->name
, heap
);
1158 heap_create_allocator(const char *name
, addr_t base
, size_t size
,
1159 const heap_class
*heapClass
, bool allocateOnHeap
)
1161 heap_allocator
*heap
;
1162 if (allocateOnHeap
) {
1163 // allocate seperately on the heap
1164 heap
= (heap_allocator
*)malloc(sizeof(heap_allocator
)
1165 + sizeof(heap_bin
) * MAX_BIN_COUNT
);
1167 // use up the first part of the area
1168 heap
= (heap_allocator
*)base
;
1169 base
+= sizeof(heap_allocator
);
1170 size
-= sizeof(heap_allocator
);
1174 heap
->page_size
= heapClass
->page_size
;
1175 heap
->total_pages
= heap
->total_free_pages
= heap
->empty_areas
= 0;
1176 heap
->areas
= heap
->all_areas
= NULL
;
1177 heap
->bins
= (heap_bin
*)((addr_t
)heap
+ sizeof(heap_allocator
));
1179 #if KERNEL_HEAP_LEAK_CHECK
1180 heap
->get_caller
= &get_caller
;
1183 heap
->bin_count
= 0;
1184 size_t binSize
= 0, lastSize
= 0;
1185 uint32 count
= heap
->page_size
/ heapClass
->min_bin_size
;
1186 for (; count
>= heapClass
->min_count_per_page
; count
--, lastSize
= binSize
) {
1187 if (heap
->bin_count
>= MAX_BIN_COUNT
)
1188 panic("heap configuration invalid - max bin count reached\n");
1190 binSize
= (heap
->page_size
/ count
) & ~(heapClass
->bin_alignment
- 1);
1191 if (binSize
== lastSize
)
1193 if (heap
->page_size
- count
* binSize
> heapClass
->max_waste_per_page
)
1196 heap_bin
*bin
= &heap
->bins
[heap
->bin_count
];
1197 mutex_init(&bin
->lock
, "heap bin lock");
1198 bin
->element_size
= binSize
;
1199 bin
->max_free_count
= heap
->page_size
/ binSize
;
1200 bin
->page_list
= NULL
;
1204 if (!allocateOnHeap
) {
1205 base
+= heap
->bin_count
* sizeof(heap_bin
);
1206 size
-= heap
->bin_count
* sizeof(heap_bin
);
1209 rw_lock_init(&heap
->area_lock
, "heap area rw lock");
1210 mutex_init(&heap
->page_lock
, "heap page lock");
1212 heap_add_area(heap
, -1, base
, size
);
1218 heap_free_pages_added(heap_allocator
*heap
, heap_area
*area
, uint32 pageCount
)
1220 area
->free_page_count
+= pageCount
;
1221 heap
->total_free_pages
+= pageCount
;
1223 if (area
->free_page_count
== pageCount
) {
1224 // we need to add ourselfs to the area list of the heap
1226 area
->next
= heap
->areas
;
1228 area
->next
->prev
= area
;
1231 // we might need to move back in the area list
1232 if (area
->next
&& area
->next
->free_page_count
< area
->free_page_count
) {
1233 // move ourselfs so the list stays ordered
1234 heap_area
*insert
= area
->next
;
1236 && insert
->next
->free_page_count
< area
->free_page_count
)
1237 insert
= insert
->next
;
1240 area
->prev
->next
= area
->next
;
1242 area
->next
->prev
= area
->prev
;
1243 if (heap
->areas
== area
)
1244 heap
->areas
= area
->next
;
1246 area
->prev
= insert
;
1247 area
->next
= insert
->next
;
1249 area
->next
->prev
= area
;
1250 insert
->next
= area
;
1254 if (area
->free_page_count
== area
->page_count
&& area
->area
>= 0)
1255 heap
->empty_areas
++;
1260 heap_free_pages_removed(heap_allocator
*heap
, heap_area
*area
, uint32 pageCount
)
1262 if (area
->free_page_count
== area
->page_count
&& area
->area
>= 0) {
1263 // this area was completely empty
1264 heap
->empty_areas
--;
1267 area
->free_page_count
-= pageCount
;
1268 heap
->total_free_pages
-= pageCount
;
1270 if (area
->free_page_count
== 0) {
1271 // the area is now full so we remove it from the area list
1273 area
->prev
->next
= area
->next
;
1275 area
->next
->prev
= area
->prev
;
1276 if (heap
->areas
== area
)
1277 heap
->areas
= area
->next
;
1278 area
->next
= area
->prev
= NULL
;
1280 // we might need to move forward in the area list
1281 if (area
->prev
&& area
->prev
->free_page_count
> area
->free_page_count
) {
1282 // move ourselfs so the list stays ordered
1283 heap_area
*insert
= area
->prev
;
1285 && insert
->prev
->free_page_count
> area
->free_page_count
)
1286 insert
= insert
->prev
;
1289 area
->prev
->next
= area
->next
;
1291 area
->next
->prev
= area
->prev
;
1293 area
->prev
= insert
->prev
;
1294 area
->next
= insert
;
1296 area
->prev
->next
= area
;
1297 if (heap
->areas
== insert
)
1299 insert
->prev
= area
;
1306 heap_link_page(heap_page
*page
, heap_page
**list
)
1311 page
->next
->prev
= page
;
1317 heap_unlink_page(heap_page
*page
, heap_page
**list
)
1320 page
->prev
->next
= page
->next
;
1322 page
->next
->prev
= page
->prev
;
1323 if (list
&& *list
== page
) {
1326 page
->next
->prev
= NULL
;
1332 heap_allocate_contiguous_pages(heap_allocator
*heap
, uint32 pageCount
,
1335 MutexLocker
pageLocker(heap
->page_lock
);
1336 heap_area
*area
= heap
->areas
;
1338 if (area
->free_page_count
< pageCount
) {
1344 uint32 firstValid
= 0;
1345 const uint32 lastValid
= area
->page_count
- pageCount
+ 1;
1347 if (alignment
> heap
->page_size
) {
1348 firstValid
= (ROUNDUP(area
->base
, alignment
) - area
->base
)
1350 step
= alignment
/ heap
->page_size
;
1354 for (uint32 i
= firstValid
; i
< lastValid
; i
+= step
) {
1355 if (area
->page_table
[i
].in_use
)
1360 for (uint32 j
= 1; j
< pageCount
; j
++) {
1361 if (area
->page_table
[i
+ j
].in_use
) {
1363 i
+= j
/ step
* step
;
1377 for (uint32 i
= first
; i
< first
+ pageCount
; i
++) {
1378 heap_page
*page
= &area
->page_table
[i
];
1380 page
->bin_index
= heap
->bin_count
;
1382 heap_unlink_page(page
, &area
->free_pages
);
1384 page
->next
= page
->prev
= NULL
;
1385 page
->free_list
= NULL
;
1386 page
->allocation_id
= (uint16
)first
;
1389 heap_free_pages_removed(heap
, area
, pageCount
);
1390 return &area
->page_table
[first
];
1397 #if KERNEL_HEAP_LEAK_CHECK
1399 heap_add_leak_check_info(heap_allocator
*heap
, addr_t address
, size_t allocated
,
1402 heap_leak_check_info
*info
= (heap_leak_check_info
*)(address
+ allocated
1403 - sizeof(heap_leak_check_info
));
1404 info
->size
= size
- sizeof(heap_leak_check_info
);
1405 info
->thread
= (gKernelStartup
? 0 : thread_get_current_thread_id());
1406 info
->team
= (gKernelStartup
? 0 : team_get_current_team_id());
1407 info
->caller
= heap
->get_caller();
1413 heap_raw_alloc(heap_allocator
*heap
, size_t size
, size_t alignment
)
1415 TRACE(("heap %p: allocate %lu bytes from raw pages with alignment %lu\n",
1416 heap
, size
, alignment
));
1418 uint32 pageCount
= (size
+ heap
->page_size
- 1) / heap
->page_size
;
1419 heap_page
*firstPage
= heap_allocate_contiguous_pages(heap
, pageCount
,
1421 if (firstPage
== NULL
) {
1422 TRACE(("heap %p: found no contiguous pages to allocate %ld bytes\n",
1427 addr_t address
= firstPage
->area
->base
+ firstPage
->index
* heap
->page_size
;
1428 #if KERNEL_HEAP_LEAK_CHECK
1429 heap_add_leak_check_info(heap
, address
, pageCount
* heap
->page_size
, size
);
1431 return (void *)address
;
1436 heap_allocate_from_bin(heap_allocator
*heap
, uint32 binIndex
, size_t size
)
1438 heap_bin
*bin
= &heap
->bins
[binIndex
];
1439 TRACE(("heap %p: allocate %lu bytes from bin %lu with element_size %lu\n",
1440 heap
, size
, binIndex
, bin
->element_size
));
1442 MutexLocker
binLocker(bin
->lock
);
1443 heap_page
*page
= bin
->page_list
;
1445 MutexLocker
pageLocker(heap
->page_lock
);
1446 heap_area
*area
= heap
->areas
;
1448 TRACE(("heap %p: no free pages to allocate %lu bytes\n", heap
,
1453 // by design there are only areas in the list that still have
1454 // free pages available
1455 page
= area
->free_pages
;
1456 area
->free_pages
= page
->next
;
1458 page
->next
->prev
= NULL
;
1460 heap_free_pages_removed(heap
, area
, 1);
1463 panic("got an in use page %p from the free pages list\n", page
);
1466 pageLocker
.Unlock();
1468 page
->bin_index
= binIndex
;
1469 page
->free_count
= bin
->max_free_count
;
1470 page
->empty_index
= 0;
1471 page
->free_list
= NULL
;
1472 page
->next
= page
->prev
= NULL
;
1473 bin
->page_list
= page
;
1476 // we have a page where we have a free slot
1477 void *address
= NULL
;
1478 if (page
->free_list
) {
1479 // there's a previously freed entry we can use
1480 address
= page
->free_list
;
1481 page
->free_list
= (addr_t
*)*page
->free_list
;
1483 // the page hasn't been fully allocated so use the next empty_index
1484 address
= (void *)(page
->area
->base
+ page
->index
* heap
->page_size
1485 + page
->empty_index
* bin
->element_size
);
1486 page
->empty_index
++;
1490 if (page
->free_count
== 0) {
1491 // the page is now full so we remove it from the page_list
1492 bin
->page_list
= page
->next
;
1494 page
->next
->prev
= NULL
;
1495 page
->next
= page
->prev
= NULL
;
1498 #if KERNEL_HEAP_LEAK_CHECK
1500 heap_add_leak_check_info(heap
, (addr_t
)address
, bin
->element_size
, size
);
1507 is_valid_alignment(size_t number
)
1509 // this cryptic line accepts zero and all powers of two
1510 return ((~number
+ 1) | ((number
<< 1) - 1)) == ~0UL;
1515 heap_should_grow(heap_allocator
*heap
)
1517 // suggest growing if there is less than 20% of a grow size available
1518 return heap
->total_free_pages
* heap
->page_size
< HEAP_GROW_SIZE
/ 5;
1523 heap_memalign(heap_allocator
*heap
, size_t alignment
, size_t size
)
1525 TRACE(("memalign(alignment = %lu, size = %lu)\n", alignment
, size
));
1528 if (!is_valid_alignment(alignment
))
1529 panic("memalign() with an alignment which is not a power of 2\n");
1532 #if KERNEL_HEAP_LEAK_CHECK
1533 size
+= sizeof(heap_leak_check_info
);
1536 void *address
= NULL
;
1537 if (alignment
< B_PAGE_SIZE
) {
1538 if (alignment
!= 0) {
1539 // TODO: The alignment is done by ensuring that the element size
1540 // of the target bin is aligned with the requested alignment. This
1541 // has the problem that it wastes space because a better (smaller)
1542 // bin could possibly be selected. We should pick the best bin and
1543 // check if there is an aligned block in the free list or if a new
1544 // (page aligned) page has to be allocated anyway.
1545 size
= ROUNDUP(size
, alignment
);
1546 for (uint32 i
= 0; i
< heap
->bin_count
; i
++) {
1547 if (size
<= heap
->bins
[i
].element_size
1548 && is_valid_alignment(heap
->bins
[i
].element_size
)) {
1549 address
= heap_allocate_from_bin(heap
, i
, size
);
1554 for (uint32 i
= 0; i
< heap
->bin_count
; i
++) {
1555 if (size
<= heap
->bins
[i
].element_size
) {
1556 address
= heap_allocate_from_bin(heap
, i
, size
);
1563 if (address
== NULL
)
1564 address
= heap_raw_alloc(heap
, size
, alignment
);
1566 #if KERNEL_HEAP_LEAK_CHECK
1567 size
-= sizeof(heap_leak_check_info
);
1570 TRACE(("memalign(): asked to allocate %lu bytes, returning pointer %p\n",
1573 T(Allocate((addr_t
)address
, size
));
1574 if (address
== NULL
)
1577 #if PARANOID_KERNEL_MALLOC
1578 memset(address
, 0xcc, size
);
1581 #if PARANOID_KERNEL_FREE
1582 // make sure 0xdeadbeef is cleared if we do not overwrite the memory
1583 // and the user does not clear it
1584 if (((uint32
*)address
)[1] == 0xdeadbeef)
1585 ((uint32
*)address
)[1] = 0xcccccccc;
1593 heap_free(heap_allocator
*heap
, void *address
)
1595 if (address
== NULL
)
1598 ReadLocker
areaReadLocker(heap
->area_lock
);
1599 heap_area
*area
= heap
->all_areas
;
1601 // since the all_areas list is ordered by base with the biggest
1602 // base at the top, we need only find the first area with a base
1603 // smaller than our address to become our only candidate for freeing
1604 if (area
->base
<= (addr_t
)address
) {
1605 if ((addr_t
)address
>= area
->base
+ area
->size
) {
1606 // none of the other areas can contain the address as the list
1608 return B_ENTRY_NOT_FOUND
;
1611 // this area contains the allocation, we're done searching
1615 area
= area
->all_next
;
1619 // this address does not belong to us
1620 return B_ENTRY_NOT_FOUND
;
1623 TRACE(("free(): asked to free pointer %p\n", address
));
1625 heap_page
*page
= &area
->page_table
[((addr_t
)address
- area
->base
)
1628 TRACE(("free(): page %p: bin_index %d, free_count %d\n", page
,
1629 page
->bin_index
, page
->free_count
));
1631 if (page
->bin_index
> heap
->bin_count
) {
1632 panic("free(): page %p: invalid bin_index %d\n", page
, page
->bin_index
);
1636 if (page
->bin_index
< heap
->bin_count
) {
1638 heap_bin
*bin
= &heap
->bins
[page
->bin_index
];
1640 #if PARANOID_KERNEL_FREE
1641 if (((uint32
*)address
)[1] == 0xdeadbeef) {
1642 // This block looks like it was freed already, walk the free list
1643 // on this page to make sure this address doesn't exist.
1644 MutexLocker
binLocker(bin
->lock
);
1645 for (addr_t
*temp
= page
->free_list
; temp
!= NULL
;
1646 temp
= (addr_t
*)*temp
) {
1647 if (temp
== address
) {
1648 panic("free(): address %p already exists in page free "
1655 // the first 4 bytes are overwritten with the next free list pointer
1657 uint32
*dead
= (uint32
*)address
;
1658 for (uint32 i
= 1; i
< bin
->element_size
/ sizeof(uint32
); i
++)
1659 dead
[i
] = 0xdeadbeef;
1662 MutexLocker
binLocker(bin
->lock
);
1663 if (((addr_t
)address
- area
->base
- page
->index
1664 * heap
->page_size
) % bin
->element_size
!= 0) {
1665 panic("free(): passed invalid pointer %p supposed to be in bin for "
1666 "element size %" B_PRIu32
"\n", address
, bin
->element_size
);
1670 // add the address to the page free list
1671 *(addr_t
*)address
= (addr_t
)page
->free_list
;
1672 page
->free_list
= (addr_t
*)address
;
1675 if (page
->free_count
== bin
->max_free_count
) {
1676 // we are now empty, remove the page from the bin list
1677 MutexLocker
pageLocker(heap
->page_lock
);
1678 heap_unlink_page(page
, &bin
->page_list
);
1680 heap_link_page(page
, &area
->free_pages
);
1681 heap_free_pages_added(heap
, area
, 1);
1682 } else if (page
->free_count
== 1) {
1683 // we need to add ourselfs to the page list of the bin
1684 heap_link_page(page
, &bin
->page_list
);
1686 // we might need to move back in the free pages list
1687 if (page
->next
&& page
->next
->free_count
< page
->free_count
) {
1688 // move ourselfs so the list stays ordered
1689 heap_page
*insert
= page
->next
;
1691 && insert
->next
->free_count
< page
->free_count
)
1692 insert
= insert
->next
;
1694 heap_unlink_page(page
, &bin
->page_list
);
1696 page
->prev
= insert
;
1697 page
->next
= insert
->next
;
1699 page
->next
->prev
= page
;
1700 insert
->next
= page
;
1704 // large allocation, just return the pages to the page free list
1705 uint32 allocationID
= page
->allocation_id
;
1706 uint32 maxPages
= area
->page_count
- page
->index
;
1707 uint32 pageCount
= 0;
1709 MutexLocker
pageLocker(heap
->page_lock
);
1710 for (uint32 i
= 0; i
< maxPages
; i
++) {
1711 // loop until we find the end of this allocation
1712 if (!page
[i
].in_use
|| page
[i
].bin_index
!= heap
->bin_count
1713 || page
[i
].allocation_id
!= allocationID
)
1716 // this page still belongs to the same allocation
1718 page
[i
].allocation_id
= 0;
1720 // return it to the free list
1721 heap_link_page(&page
[i
], &area
->free_pages
);
1725 heap_free_pages_added(heap
, area
, pageCount
);
1728 T(Free((addr_t
)address
));
1729 areaReadLocker
.Unlock();
1731 if (heap
->empty_areas
> 1) {
1732 WriteLocker
areaWriteLocker(heap
->area_lock
);
1733 MutexLocker
pageLocker(heap
->page_lock
);
1735 area_id areasToDelete
[heap
->empty_areas
- 1];
1736 int32 areasToDeleteIndex
= 0;
1739 while (area
!= NULL
&& heap
->empty_areas
> 1) {
1740 heap_area
*next
= area
->next
;
1742 && area
->free_page_count
== area
->page_count
1743 && heap_remove_area(heap
, area
) == B_OK
) {
1744 areasToDelete
[areasToDeleteIndex
++] = area
->area
;
1745 heap
->empty_areas
--;
1751 pageLocker
.Unlock();
1752 areaWriteLocker
.Unlock();
1754 for (int32 i
= 0; i
< areasToDeleteIndex
; i
++)
1755 delete_area(areasToDelete
[i
]);
1762 #if KERNEL_HEAP_LEAK_CHECK
1764 heap_set_get_caller(heap_allocator
* heap
, addr_t (*getCaller
)())
1766 heap
->get_caller
= getCaller
;
1771 #if USE_DEBUG_HEAP_FOR_MALLOC
1775 heap_realloc(heap_allocator
*heap
, void *address
, void **newAddress
,
1778 ReadLocker
areaReadLocker(heap
->area_lock
);
1779 heap_area
*area
= heap
->all_areas
;
1781 // since the all_areas list is ordered by base with the biggest
1782 // base at the top, we need only find the first area with a base
1783 // smaller than our address to become our only candidate for
1785 if (area
->base
<= (addr_t
)address
) {
1786 if ((addr_t
)address
>= area
->base
+ area
->size
) {
1787 // none of the other areas can contain the address as the list
1789 return B_ENTRY_NOT_FOUND
;
1792 // this area contains the allocation, we're done searching
1796 area
= area
->all_next
;
1800 // this address does not belong to us
1801 return B_ENTRY_NOT_FOUND
;
1804 TRACE(("realloc(address = %p, newSize = %lu)\n", address
, newSize
));
1806 heap_page
*page
= &area
->page_table
[((addr_t
)address
- area
->base
)
1808 if (page
->bin_index
> heap
->bin_count
) {
1809 panic("realloc(): page %p: invalid bin_index %d\n", page
,
1814 // find out the size of the old allocation first
1817 if (page
->bin_index
< heap
->bin_count
) {
1818 // this was a small allocation
1819 heap_bin
*bin
= &heap
->bins
[page
->bin_index
];
1820 maxSize
= bin
->element_size
;
1821 if (page
->bin_index
> 0)
1822 minSize
= heap
->bins
[page
->bin_index
- 1].element_size
+ 1;
1824 // this was a large allocation
1825 uint32 allocationID
= page
->allocation_id
;
1826 uint32 maxPages
= area
->page_count
- page
->index
;
1827 maxSize
= heap
->page_size
;
1829 MutexLocker
pageLocker(heap
->page_lock
);
1830 for (uint32 i
= 1; i
< maxPages
; i
++) {
1831 if (!page
[i
].in_use
|| page
[i
].bin_index
!= heap
->bin_count
1832 || page
[i
].allocation_id
!= allocationID
)
1835 minSize
+= heap
->page_size
;
1836 maxSize
+= heap
->page_size
;
1840 areaReadLocker
.Unlock();
1842 #if KERNEL_HEAP_LEAK_CHECK
1843 newSize
+= sizeof(heap_leak_check_info
);
1846 // does the new allocation simply fit in the old allocation?
1847 if (newSize
> minSize
&& newSize
<= maxSize
) {
1848 #if KERNEL_HEAP_LEAK_CHECK
1849 // update the size info (the info is at the end so stays where it is)
1850 heap_leak_check_info
*info
= (heap_leak_check_info
*)((addr_t
)address
1851 + maxSize
- sizeof(heap_leak_check_info
));
1852 info
->size
= newSize
- sizeof(heap_leak_check_info
);
1853 newSize
-= sizeof(heap_leak_check_info
);
1856 T(Reallocate((addr_t
)address
, (addr_t
)address
, newSize
));
1857 *newAddress
= address
;
1861 #if KERNEL_HEAP_LEAK_CHECK
1862 // new leak check info will be created with the malloc below
1863 newSize
-= sizeof(heap_leak_check_info
);
1866 // if not, allocate a new chunk of memory
1867 *newAddress
= memalign(0, newSize
);
1868 T(Reallocate((addr_t
)address
, (addr_t
)*newAddress
, newSize
));
1869 if (*newAddress
== NULL
) {
1870 // we tried but it didn't work out, but still the operation is done
1874 // copy the old data and free the old allocation
1875 memcpy(*newAddress
, address
, min_c(maxSize
, newSize
));
1876 heap_free(heap
, address
);
1882 heap_index_for(size_t size
, int32 cpu
)
1884 #if KERNEL_HEAP_LEAK_CHECK
1885 // take the extra info size into account
1886 size
+= sizeof(heap_leak_check_info_s
);
1890 for (; index
< HEAP_CLASS_COUNT
- 1; index
++) {
1891 if (size
<= sHeapClasses
[index
].max_allocation_size
)
1895 return (index
+ cpu
* HEAP_CLASS_COUNT
) % sHeapCount
;
1900 memalign_nogrow(size_t alignment
, size_t size
)
1902 // use dedicated memory in the grow thread by default
1903 if (thread_get_current_thread_id() == sHeapGrowThread
) {
1904 void *result
= heap_memalign(sGrowHeap
, alignment
, size
);
1905 if (!sAddGrowHeap
&& heap_should_grow(sGrowHeap
)) {
1906 // hopefully the heap grower will manage to create a new heap
1907 // before running out of private memory...
1908 dprintf("heap: requesting new grow heap\n");
1909 sAddGrowHeap
= true;
1910 release_sem_etc(sHeapGrowSem
, 1, B_DO_NOT_RESCHEDULE
);
1917 // try public memory, there might be something available
1918 void *result
= NULL
;
1919 int32 cpuCount
= MIN(smp_get_num_cpus(),
1920 (int32
)sHeapCount
/ HEAP_CLASS_COUNT
);
1921 int32 cpuNumber
= smp_get_current_cpu();
1922 for (int32 i
= 0; i
< cpuCount
; i
++) {
1923 uint32 heapIndex
= heap_index_for(size
, cpuNumber
++ % cpuCount
);
1924 heap_allocator
*heap
= sHeaps
[heapIndex
];
1925 result
= heap_memalign(heap
, alignment
, size
);
1930 // no memory available
1931 if (thread_get_current_thread_id() == sHeapGrowThread
)
1932 panic("heap: all heaps have run out of memory while growing\n");
1934 dprintf("heap: all heaps have run out of memory\n");
1941 heap_create_new_heap_area(heap_allocator
*heap
, const char *name
, size_t size
)
1943 void *address
= NULL
;
1944 area_id heapArea
= create_area(name
, &address
,
1945 B_ANY_KERNEL_BLOCK_ADDRESS
, size
, B_FULL_LOCK
,
1946 B_KERNEL_READ_AREA
| B_KERNEL_WRITE_AREA
);
1947 if (heapArea
< B_OK
) {
1948 TRACE(("heap: couldn't allocate heap area \"%s\"\n", name
));
1952 heap_add_area(heap
, heapArea
, (addr_t
)address
, size
);
1953 #if PARANOID_HEAP_VALIDATION
1954 heap_validate_heap(heap
);
1961 heap_grow_thread(void *)
1964 // wait for a request to grow the heap list
1965 if (acquire_sem(sHeapGrowSem
) < B_OK
)
1969 // the grow heap is going to run full soon, try to allocate a new
1970 // one to make some room.
1971 TRACE(("heap_grower: grow heaps will run out of memory soon\n"));
1972 if (heap_create_new_heap_area(sGrowHeap
, "additional grow heap",
1973 HEAP_DEDICATED_GROW_SIZE
) != B_OK
)
1974 dprintf("heap_grower: failed to create new grow heap area\n");
1977 for (uint32 i
= 0; i
< sHeapCount
; i
++) {
1978 heap_allocator
*heap
= sHeaps
[i
];
1979 if (sLastGrowRequest
[i
] > sLastHandledGrowRequest
[i
]
1980 || heap_should_grow(heap
)) {
1981 // grow this heap if it is nearly full or if a grow was
1982 // explicitly requested for this heap (happens when a large
1983 // allocation cannot be fulfilled due to lack of contiguous
1985 if (heap_create_new_heap_area(heap
, "additional heap",
1986 HEAP_GROW_SIZE
) != B_OK
)
1987 dprintf("heap_grower: failed to create new heap area\n");
1988 sLastHandledGrowRequest
[i
] = sLastGrowRequest
[i
];
1992 // notify anyone waiting for this request
1993 release_sem_etc(sHeapGrownNotify
, -1, B_RELEASE_ALL
);
2000 #endif // USE_DEBUG_HEAP_FOR_MALLOC
2004 deferred_deleter(void *arg
, int iteration
)
2006 // move entries and deletables to on-stack lists
2007 InterruptsSpinLocker
locker(sDeferredFreeListLock
);
2008 if (sDeferredFreeList
.IsEmpty() && sDeferredDeletableList
.IsEmpty())
2011 DeferredFreeList entries
;
2012 entries
.MoveFrom(&sDeferredFreeList
);
2014 DeferredDeletableList deletables
;
2015 deletables
.MoveFrom(&sDeferredDeletableList
);
2020 while (DeferredFreeListEntry
* entry
= entries
.RemoveHead())
2023 // delete the deletables
2024 while (DeferredDeletable
* deletable
= deletables
.RemoveHead())
2032 #if USE_DEBUG_HEAP_FOR_MALLOC
2036 heap_init(addr_t base
, size_t size
)
2038 for (uint32 i
= 0; i
< HEAP_CLASS_COUNT
; i
++) {
2039 size_t partSize
= size
* sHeapClasses
[i
].initial_percentage
/ 100;
2040 sHeaps
[i
] = heap_create_allocator(sHeapClasses
[i
].name
, base
, partSize
,
2041 &sHeapClasses
[i
], false);
2042 sLastGrowRequest
[i
] = sLastHandledGrowRequest
[i
] = 0;
2047 // set up some debug commands
2048 add_debugger_command_etc("heap", &dump_heap_list
,
2049 "Dump infos about the kernel heap(s)",
2050 "[(\"grow\" | \"stats\" | <heap>)]\n"
2051 "Dump infos about the kernel heap(s). If \"grow\" is specified, only\n"
2052 "infos about the dedicated grow heap are printed. If \"stats\" is\n"
2053 "given as the argument, currently only the heap count is printed.\n"
2054 "If <heap> is given, it is interpreted as the address of the heap to\n"
2055 "print infos about.\n", 0);
2056 #if !KERNEL_HEAP_LEAK_CHECK
2057 add_debugger_command_etc("allocations", &dump_allocations
,
2058 "Dump current heap allocations",
2059 "[\"stats\"] [<heap>]\n"
2060 "If no parameters are given, all current alloactions are dumped.\n"
2061 "If the optional argument \"stats\" is specified, only the allocation\n"
2062 "counts and no individual allocations are printed\n"
2063 "If a specific heap address is given, only allocations of this\n"
2064 "allocator are dumped\n", 0);
2065 #else // !KERNEL_HEAP_LEAK_CHECK
2066 add_debugger_command_etc("allocations", &dump_allocations
,
2067 "Dump current heap allocations",
2068 "[(\"team\" | \"thread\") <id>] [\"caller\" <address>] [\"address\" <address>] [\"stats\"]\n"
2069 "If no parameters are given, all current alloactions are dumped.\n"
2070 "If \"team\", \"thread\", \"caller\", and/or \"address\" is specified as the first\n"
2071 "argument, only allocations matching the team ID, thread ID, caller\n"
2072 "address or allocated address given in the second argument are printed.\n"
2073 "If the optional argument \"stats\" is specified, only the allocation\n"
2074 "counts and no individual allocations are printed.\n", 0);
2075 add_debugger_command_etc("allocations_per_caller",
2076 &dump_allocations_per_caller
,
2077 "Dump current heap allocations summed up per caller",
2078 "[ \"-c\" ] [ -h <heap> ]\n"
2079 "The current allocations will by summed up by caller (their count and\n"
2080 "size) printed in decreasing order by size or, if \"-c\" is\n"
2081 "specified, by allocation count. If given <heap> specifies the\n"
2082 "address of the heap for which to print the allocations.\n", 0);
2083 #endif // KERNEL_HEAP_LEAK_CHECK
2089 heap_init_post_area()
2091 void *address
= NULL
;
2092 area_id growHeapArea
= create_area("dedicated grow heap", &address
,
2093 B_ANY_KERNEL_BLOCK_ADDRESS
, HEAP_DEDICATED_GROW_SIZE
, B_FULL_LOCK
,
2094 B_KERNEL_READ_AREA
| B_KERNEL_WRITE_AREA
);
2095 if (growHeapArea
< 0) {
2096 panic("heap_init_post_area(): couldn't allocate dedicate grow heap "
2098 return growHeapArea
;
2101 sGrowHeap
= heap_create_allocator("grow", (addr_t
)address
,
2102 HEAP_DEDICATED_GROW_SIZE
, &sHeapClasses
[0], false);
2103 if (sGrowHeap
== NULL
) {
2104 panic("heap_init_post_area(): failed to create dedicated grow heap\n");
2108 // create the VIP heap
2109 static const heap_class heapClass
= {
2110 "VIP I/O", /* name */
2111 100, /* initial percentage */
2112 B_PAGE_SIZE
/ 8, /* max allocation size */
2113 B_PAGE_SIZE
, /* page size */
2114 8, /* min bin size */
2115 4, /* bin alignment */
2116 8, /* min count per page */
2117 16 /* max waste per page */
2120 area_id vipHeapArea
= create_area("VIP heap", &address
,
2121 B_ANY_KERNEL_ADDRESS
, VIP_HEAP_SIZE
, B_FULL_LOCK
,
2122 B_KERNEL_READ_AREA
| B_KERNEL_WRITE_AREA
);
2123 if (vipHeapArea
< 0) {
2124 panic("heap_init_post_area(): couldn't allocate VIP heap area");
2128 sVIPHeap
= heap_create_allocator("VIP heap", (addr_t
)address
,
2129 VIP_HEAP_SIZE
, &heapClass
, false);
2130 if (sVIPHeap
== NULL
) {
2131 panic("heap_init_post_area(): failed to create VIP heap\n");
2135 dprintf("heap_init_post_area(): created VIP heap: %p\n", sVIPHeap
);
2142 heap_init_post_sem()
2144 sHeapGrowSem
= create_sem(0, "heap_grow_sem");
2145 if (sHeapGrowSem
< 0) {
2146 panic("heap_init_post_sem(): failed to create heap grow sem\n");
2150 sHeapGrownNotify
= create_sem(0, "heap_grown_notify");
2151 if (sHeapGrownNotify
< 0) {
2152 panic("heap_init_post_sem(): failed to create heap grown notify sem\n");
2160 #endif // USE_DEBUG_HEAP_FOR_MALLOC
2164 heap_init_post_thread()
2166 #if USE_DEBUG_HEAP_FOR_MALLOC
2167 sHeapGrowThread
= spawn_kernel_thread(heap_grow_thread
, "heap grower",
2168 B_URGENT_PRIORITY
, NULL
);
2169 if (sHeapGrowThread
< 0) {
2170 panic("heap_init_post_thread(): cannot create heap grow thread\n");
2171 return sHeapGrowThread
;
2174 // create per-cpu heaps if there's enough memory
2175 int32 heapCount
= MIN(smp_get_num_cpus(),
2176 (int32
)vm_page_num_pages() / 60 / 1024);
2177 for (int32 i
= 1; i
< heapCount
; i
++) {
2179 size_t size
= HEAP_GROW_SIZE
* HEAP_CLASS_COUNT
;
2180 area_id perCPUHeapArea
= create_area("per cpu initial heap",
2181 (void **)&base
, B_ANY_KERNEL_ADDRESS
, size
, B_FULL_LOCK
,
2182 B_KERNEL_READ_AREA
| B_KERNEL_WRITE_AREA
);
2183 if (perCPUHeapArea
< 0)
2186 for (uint32 j
= 0; j
< HEAP_CLASS_COUNT
; j
++) {
2187 int32 heapIndex
= i
* HEAP_CLASS_COUNT
+ j
;
2188 size_t partSize
= size
* sHeapClasses
[j
].initial_percentage
/ 100;
2189 sHeaps
[heapIndex
] = heap_create_allocator(sHeapClasses
[j
].name
,
2190 base
, partSize
, &sHeapClasses
[j
], false);
2191 sLastGrowRequest
[heapIndex
] = 0;
2192 sLastHandledGrowRequest
[heapIndex
] = 0;
2198 resume_thread(sHeapGrowThread
);
2200 #else // USE_DEBUG_HEAP_FOR_MALLOC
2202 // set up some debug commands
2203 add_debugger_command_etc("heap", &dump_heap_list
,
2204 "Dump infos about a specific heap",
2205 "[\"stats\"] <heap>\n"
2206 "Dump infos about the specified kernel heap. If \"stats\" is given\n"
2207 "as the argument, currently only the heap count is printed.\n", 0);
2208 #if !KERNEL_HEAP_LEAK_CHECK
2209 add_debugger_command_etc("heap_allocations", &dump_allocations
,
2210 "Dump current heap allocations",
2211 "[\"stats\"] <heap>\n"
2212 "If the optional argument \"stats\" is specified, only the allocation\n"
2213 "counts and no individual allocations are printed.\n", 0);
2214 #endif // KERNEL_HEAP_LEAK_CHECK
2215 #endif // !USE_DEBUG_HEAP_FOR_MALLOC
2217 // run the deferred deleter roughly once a second
2218 if (register_kernel_daemon(deferred_deleter
, NULL
, 10) != B_OK
)
2219 panic("heap_init_post_thread(): failed to init deferred deleter");
2225 // #pragma mark - Public API
2228 #if USE_DEBUG_HEAP_FOR_MALLOC
2232 memalign(size_t alignment
, size_t size
)
2234 if (!gKernelStartup
&& !are_interrupts_enabled()) {
2235 panic("memalign(): called with interrupts disabled\n");
2239 if (!gKernelStartup
&& size
> HEAP_AREA_USE_THRESHOLD
) {
2240 // don't even attempt such a huge allocation - use areas instead
2241 size_t areaSize
= ROUNDUP(size
+ sizeof(area_allocation_info
)
2242 + alignment
, B_PAGE_SIZE
);
2243 if (areaSize
< size
) {
2244 // the size overflowed
2248 void *address
= NULL
;
2249 area_id allocationArea
= create_area("memalign area", &address
,
2250 B_ANY_KERNEL_BLOCK_ADDRESS
, areaSize
, B_FULL_LOCK
,
2251 B_KERNEL_READ_AREA
| B_KERNEL_WRITE_AREA
);
2252 if (allocationArea
< B_OK
) {
2253 dprintf("heap: failed to create area for huge allocation\n");
2257 area_allocation_info
*info
= (area_allocation_info
*)address
;
2258 info
->magic
= kAreaAllocationMagic
;
2259 info
->area
= allocationArea
;
2260 info
->base
= address
;
2261 info
->size
= areaSize
;
2262 info
->allocation_size
= size
;
2263 info
->allocation_alignment
= alignment
;
2265 address
= (void *)((addr_t
)address
+ sizeof(area_allocation_info
));
2266 if (alignment
!= 0) {
2267 address
= (void *)ROUNDUP((addr_t
)address
, alignment
);
2268 ASSERT((addr_t
)address
% alignment
== 0);
2269 ASSERT((addr_t
)address
+ size
- 1 < (addr_t
)info
+ areaSize
- 1);
2272 TRACE(("heap: allocated area %ld for huge allocation of %lu bytes\n",
2273 allocationArea
, size
));
2275 info
->allocation_base
= address
;
2277 #if PARANOID_KERNEL_MALLOC
2278 memset(address
, 0xcc, size
);
2283 void *result
= NULL
;
2284 bool shouldGrow
= false;
2285 int32 cpuCount
= MIN(smp_get_num_cpus(),
2286 (int32
)sHeapCount
/ HEAP_CLASS_COUNT
);
2287 int32 cpuNumber
= smp_get_current_cpu();
2288 for (int32 i
= 0; i
< cpuCount
; i
++) {
2289 uint32 heapIndex
= heap_index_for(size
, cpuNumber
++ % cpuCount
);
2290 heap_allocator
*heap
= sHeaps
[heapIndex
];
2291 result
= heap_memalign(heap
, alignment
, size
);
2292 if (result
!= NULL
) {
2293 shouldGrow
= heap_should_grow(heap
);
2297 #if PARANOID_HEAP_VALIDATION
2298 heap_validate_heap(heap
);
2302 if (result
== NULL
) {
2303 // request an urgent grow and wait - we don't do it ourselfs here to
2304 // serialize growing through the grow thread, as otherwise multiple
2305 // threads hitting this situation (likely when memory ran out) would
2307 uint32 heapIndex
= heap_index_for(size
, smp_get_current_cpu());
2308 sLastGrowRequest
[heapIndex
]++;
2309 switch_sem(sHeapGrowSem
, sHeapGrownNotify
);
2311 // and then try again
2312 result
= heap_memalign(sHeaps
[heapIndex
], alignment
, size
);
2313 } else if (shouldGrow
) {
2314 // should grow sometime soon, notify the grower
2315 release_sem_etc(sHeapGrowSem
, 1, B_DO_NOT_RESCHEDULE
);
2319 panic("heap: kernel heap has run out of memory\n");
2325 memalign_etc(size_t alignment
, size_t size
, uint32 flags
)
2327 if ((flags
& HEAP_PRIORITY_VIP
) != 0)
2328 return heap_memalign(sVIPHeap
, alignment
, size
);
2330 if ((flags
& (HEAP_DONT_WAIT_FOR_MEMORY
| HEAP_DONT_LOCK_KERNEL_SPACE
))
2332 return memalign_nogrow(alignment
, size
);
2335 return memalign(alignment
, size
);
2340 free_etc(void *address
, uint32 flags
)
2342 if ((flags
& HEAP_PRIORITY_VIP
) != 0)
2343 heap_free(sVIPHeap
, address
);
2352 return memalign(0, size
);
2359 if (!gKernelStartup
&& !are_interrupts_enabled()) {
2360 panic("free(): called with interrupts disabled\n");
2364 int32 offset
= smp_get_current_cpu() * HEAP_CLASS_COUNT
;
2365 for (uint32 i
= 0; i
< sHeapCount
; i
++) {
2366 heap_allocator
*heap
= sHeaps
[(i
+ offset
) % sHeapCount
];
2367 if (heap_free(heap
, address
) == B_OK
) {
2368 #if PARANOID_HEAP_VALIDATION
2369 heap_validate_heap(heap
);
2375 // maybe it was allocated from the dedicated grow heap
2376 if (heap_free(sGrowHeap
, address
) == B_OK
)
2379 // or maybe it was allocated from the VIP heap
2380 if (heap_free(sVIPHeap
, address
) == B_OK
)
2383 // or maybe it was a huge allocation using an area
2385 area_id area
= area_for(address
);
2386 if (area
>= B_OK
&& get_area_info(area
, &areaInfo
) == B_OK
) {
2387 area_allocation_info
*info
= (area_allocation_info
*)areaInfo
.address
;
2389 // just make extra sure it was allocated by us
2390 if (info
->magic
== kAreaAllocationMagic
&& info
->area
== area
2391 && info
->size
== areaInfo
.size
&& info
->base
== areaInfo
.address
2392 && info
->allocation_size
< areaInfo
.size
) {
2394 TRACE(("free(): freed huge allocation by deleting area %ld\n",
2400 panic("free(): free failed for address %p\n", address
);
2405 realloc(void *address
, size_t newSize
)
2407 if (!gKernelStartup
&& !are_interrupts_enabled()) {
2408 panic("realloc(): called with interrupts disabled\n");
2412 if (address
== NULL
)
2413 return memalign(0, newSize
);
2420 void *newAddress
= NULL
;
2421 int32 offset
= smp_get_current_cpu() * HEAP_CLASS_COUNT
;
2422 for (uint32 i
= 0; i
< sHeapCount
; i
++) {
2423 heap_allocator
*heap
= sHeaps
[(i
+ offset
) % sHeapCount
];
2424 if (heap_realloc(heap
, address
, &newAddress
, newSize
) == B_OK
) {
2425 #if PARANOID_HEAP_VALIDATION
2426 heap_validate_heap(heap
);
2432 // maybe it was allocated from the dedicated grow heap
2433 if (heap_realloc(sGrowHeap
, address
, &newAddress
, newSize
) == B_OK
)
2436 // or maybe it was a huge allocation using an area
2438 area_id area
= area_for(address
);
2439 if (area
>= B_OK
&& get_area_info(area
, &areaInfo
) == B_OK
) {
2440 area_allocation_info
*info
= (area_allocation_info
*)areaInfo
.address
;
2442 // just make extra sure it was allocated by us
2443 if (info
->magic
== kAreaAllocationMagic
&& info
->area
== area
2444 && info
->size
== areaInfo
.size
&& info
->base
== areaInfo
.address
2445 && info
->allocation_size
< areaInfo
.size
) {
2446 size_t available
= info
->size
- ((addr_t
)info
->allocation_base
2447 - (addr_t
)info
->base
);
2449 if (available
>= newSize
) {
2450 // there is enough room available for the newSize
2451 TRACE(("realloc(): new size %ld fits in old area %ld with %ld "
2452 "available\n", newSize
, area
, available
));
2453 info
->allocation_size
= newSize
;
2457 // have to allocate/copy/free - TODO maybe resize the area instead?
2458 newAddress
= memalign(0, newSize
);
2459 if (newAddress
== NULL
) {
2460 dprintf("realloc(): failed to allocate new block of %ld bytes\n",
2465 memcpy(newAddress
, address
, min_c(newSize
, info
->allocation_size
));
2467 TRACE(("realloc(): allocated new block %p for size %ld and deleted "
2468 "old area %ld\n", newAddress
, newSize
, area
));
2473 panic("realloc(): failed to realloc address %p to size %lu\n", address
,
2479 #endif // USE_DEBUG_HEAP_FOR_MALLOC
2483 calloc(size_t numElements
, size_t size
)
2485 void *address
= memalign(0, numElements
* size
);
2486 if (address
!= NULL
)
2487 memset(address
, 0, numElements
* size
);
2494 deferred_free(void *block
)
2499 DeferredFreeListEntry
*entry
= new(block
) DeferredFreeListEntry
;
2501 InterruptsSpinLocker
_(sDeferredFreeListLock
);
2502 sDeferredFreeList
.Add(entry
);
2507 malloc_referenced(size_t size
)
2509 int32
*referencedData
= (int32
*)malloc(size
+ 4);
2510 if (referencedData
== NULL
)
2513 *referencedData
= 1;
2514 return referencedData
+ 1;
2519 malloc_referenced_acquire(void *data
)
2522 int32
*referencedData
= (int32
*)data
- 1;
2523 atomic_add(referencedData
, 1);
2531 malloc_referenced_release(void *data
)
2536 int32
*referencedData
= (int32
*)data
- 1;
2537 if (atomic_add(referencedData
, -1) < 1)
2538 free(referencedData
);
2542 DeferredDeletable::~DeferredDeletable()
2548 deferred_delete(DeferredDeletable
*deletable
)
2550 if (deletable
== NULL
)
2553 InterruptsSpinLocker
_(sDeferredFreeListLock
);
2554 sDeferredDeletableList
.Add(deletable
);