2 * Copyright 2011, Michael Lotz <mmlr@mlotz.ch>.
3 * Distributed under the terms of the MIT License.
10 #include <arch/debug.h>
15 #include <slab/Slab.h>
18 #include <util/list.h>
19 #include <util/AutoLock.h>
23 #if USE_GUARDED_HEAP_FOR_MALLOC
26 #define GUARDED_HEAP_PAGE_FLAG_USED 0x01
27 #define GUARDED_HEAP_PAGE_FLAG_FIRST 0x02
28 #define GUARDED_HEAP_PAGE_FLAG_GUARD 0x04
29 #define GUARDED_HEAP_PAGE_FLAG_DEAD 0x08
31 #define GUARDED_HEAP_STACK_TRACE_DEPTH 0
36 struct guarded_heap_page
{
38 size_t allocation_size
;
39 void* allocation_base
;
43 #if GUARDED_HEAP_STACK_TRACE_DEPTH > 0
44 size_t stack_trace_depth
;
45 addr_t stack_trace
[GUARDED_HEAP_STACK_TRACE_DEPTH
];
47 list_link free_list_link
;
50 struct guarded_heap_area
{
52 guarded_heap_area
* next
;
58 void* protection_cookie
;
60 struct list free_list
;
61 guarded_heap_page pages
[0];
68 int32 area_creation_counter
;
69 guarded_heap_area
* areas
;
73 static guarded_heap sGuardedHeap
= {
74 RW_LOCK_INITIALIZER("guarded heap lock"),
79 #if GUARDED_HEAP_TRACING
81 namespace GuardedHeapTracing
{
84 class GuardedHeapTraceEntry
85 : public TRACE_ENTRY_SELECTOR(GUARDED_HEAP_TRACING_STACK_TRACE
) {
87 GuardedHeapTraceEntry(guarded_heap
* heap
)
89 TraceEntryBase(GUARDED_HEAP_TRACING_STACK_TRACE
, 0, true),
99 class Allocate
: public GuardedHeapTraceEntry
{
101 Allocate(guarded_heap
* heap
, void* pageBase
, uint32 flags
)
103 GuardedHeapTraceEntry(heap
),
110 virtual void AddDump(TraceOutput
& out
)
112 out
.Print("guarded heap allocate: heap: %p; page: %p; "
113 "flags:%s%s%s%s", fHeap
, fPageBase
,
114 (fFlags
& GUARDED_HEAP_PAGE_FLAG_USED
) != 0 ? " used" : "",
115 (fFlags
& GUARDED_HEAP_PAGE_FLAG_FIRST
) != 0 ? " first" : "",
116 (fFlags
& GUARDED_HEAP_PAGE_FLAG_GUARD
) != 0 ? " guard" : "",
117 (fFlags
& GUARDED_HEAP_PAGE_FLAG_DEAD
) != 0 ? " dead" : "");
126 class Free
: public GuardedHeapTraceEntry
{
128 Free(guarded_heap
* heap
, void* pageBase
)
130 GuardedHeapTraceEntry(heap
),
136 virtual void AddDump(TraceOutput
& out
)
138 out
.Print("guarded heap free: heap: %p; page: %p", fHeap
,
147 } // namespace GuardedHeapTracing
149 # define T(x) new(std::nothrow) GuardedHeapTracing::x
152 #endif // GUARDED_HEAP_TRACING
156 guarded_heap_page_protect(guarded_heap_area
& area
, size_t pageIndex
,
162 addr_t address
= area
.base
+ pageIndex
* B_PAGE_SIZE
;
163 vm_set_kernel_area_debug_protection(area
.protection_cookie
, (void*)address
,
164 B_PAGE_SIZE
, protection
);
169 guarded_heap_page_allocate(guarded_heap_area
& area
, size_t startPageIndex
,
170 size_t pagesNeeded
, size_t allocationSize
, size_t alignment
,
171 void* allocationBase
)
173 if (pagesNeeded
< 2) {
174 panic("need to allocate at least 2 pages, one for guard\n");
178 guarded_heap_page
* firstPage
= NULL
;
179 for (size_t i
= 0; i
< pagesNeeded
; i
++) {
180 guarded_heap_page
& page
= area
.pages
[startPageIndex
+ i
];
181 page
.flags
= GUARDED_HEAP_PAGE_FLAG_USED
;
183 page
.team
= (gKernelStartup
? 0 : team_get_current_team_id());
184 page
.thread
= find_thread(NULL
);
185 #if GUARDED_HEAP_STACK_TRACE_DEPTH > 0
186 page
.stack_trace_depth
= arch_debug_get_stack_trace(
187 page
.stack_trace
, GUARDED_HEAP_STACK_TRACE_DEPTH
, 0, 4,
190 page
.allocation_size
= allocationSize
;
191 page
.allocation_base
= allocationBase
;
192 page
.alignment
= alignment
;
193 page
.flags
|= GUARDED_HEAP_PAGE_FLAG_FIRST
;
196 page
.team
= firstPage
->team
;
197 page
.thread
= firstPage
->thread
;
198 #if GUARDED_HEAP_STACK_TRACE_DEPTH > 0
199 page
.stack_trace_depth
= 0;
201 page
.allocation_size
= allocationSize
;
202 page
.allocation_base
= allocationBase
;
203 page
.alignment
= alignment
;
206 list_remove_item(&area
.free_list
, &page
);
208 if (i
== pagesNeeded
- 1) {
209 page
.flags
|= GUARDED_HEAP_PAGE_FLAG_GUARD
;
210 guarded_heap_page_protect(area
, startPageIndex
+ i
, 0);
212 guarded_heap_page_protect(area
, startPageIndex
+ i
,
213 B_KERNEL_READ_AREA
| B_KERNEL_WRITE_AREA
);
216 T(Allocate(area
.heap
,
217 (void*)(area
.base
+ (startPageIndex
+ i
) * B_PAGE_SIZE
),
224 guarded_heap_free_page(guarded_heap_area
& area
, size_t pageIndex
,
227 guarded_heap_page
& page
= area
.pages
[pageIndex
];
229 #if DEBUG_GUARDED_HEAP_DISABLE_MEMORY_REUSE
230 if (force
|| area
.area
< 0)
233 page
.flags
|= GUARDED_HEAP_PAGE_FLAG_DEAD
;
238 page
.allocation_size
= 0;
239 page
.team
= (gKernelStartup
? 0 : team_get_current_team_id());
240 page
.thread
= find_thread(NULL
);
242 #if GUARDED_HEAP_STACK_TRACE_DEPTH > 0
243 page
.stack_trace_depth
= arch_debug_get_stack_trace(page
.stack_trace
,
244 GUARDED_HEAP_STACK_TRACE_DEPTH
, 0, 3, STACK_TRACE_KERNEL
);
247 list_add_item(&area
.free_list
, &page
);
249 guarded_heap_page_protect(area
, pageIndex
, 0);
251 T(Free(area
.heap
, (void*)(area
.base
+ pageIndex
* B_PAGE_SIZE
)));
256 guarded_heap_pages_allocated(guarded_heap
& heap
, size_t pagesAllocated
)
258 return (atomic_add((int32
*)&heap
.used_pages
, pagesAllocated
)
260 >= heap
.page_count
- HEAP_GROW_SIZE
/ B_PAGE_SIZE
/ 2;
265 guarded_heap_area_allocate(guarded_heap_area
& area
, size_t size
,
266 size_t alignment
, uint32 flags
, bool& grow
)
268 if (alignment
> B_PAGE_SIZE
) {
269 panic("alignment of %" B_PRIuSIZE
" not supported", alignment
);
273 size_t pagesNeeded
= (size
+ B_PAGE_SIZE
- 1) / B_PAGE_SIZE
+ 1;
274 if (pagesNeeded
> area
.page_count
- area
.used_pages
)
277 if (pagesNeeded
> area
.page_count
)
280 // We use the free list this way so that the page that has been free for
281 // the longest time is allocated. This keeps immediate re-use (that may
282 // hide bugs) to a minimum.
283 guarded_heap_page
* page
284 = (guarded_heap_page
*)list_get_first_item(&area
.free_list
);
287 page
= (guarded_heap_page
*)list_get_next_item(&area
.free_list
, page
)) {
289 if ((page
->flags
& GUARDED_HEAP_PAGE_FLAG_USED
) != 0)
292 size_t pageIndex
= page
- area
.pages
;
293 if (pageIndex
> area
.page_count
- pagesNeeded
)
296 // Candidate, check if we have enough pages going forward
297 // (including the guard page).
298 bool candidate
= true;
299 for (size_t j
= 1; j
< pagesNeeded
; j
++) {
300 if ((area
.pages
[pageIndex
+ j
].flags
& GUARDED_HEAP_PAGE_FLAG_USED
)
313 size_t offset
= size
& (B_PAGE_SIZE
- 1);
314 void* result
= (void*)((area
.base
+ pageIndex
* B_PAGE_SIZE
315 + (offset
> 0 ? B_PAGE_SIZE
- offset
: 0)) & ~(alignment
- 1));
317 guarded_heap_page_allocate(area
, pageIndex
, pagesNeeded
, size
,
320 area
.used_pages
+= pagesNeeded
;
321 grow
= guarded_heap_pages_allocated(*area
.heap
, pagesNeeded
);
330 guarded_heap_area_init(guarded_heap
& heap
, area_id id
, void* baseAddress
,
331 size_t size
, uint32 flags
)
333 guarded_heap_area
* area
= (guarded_heap_area
*)baseAddress
;
337 area
->page_count
= area
->size
/ B_PAGE_SIZE
;
338 area
->used_pages
= 0;
340 size_t pagesNeeded
= (sizeof(guarded_heap_area
)
341 + area
->page_count
* sizeof(guarded_heap_page
)
342 + B_PAGE_SIZE
- 1) / B_PAGE_SIZE
;
344 area
->page_count
-= pagesNeeded
;
345 area
->size
= area
->page_count
* B_PAGE_SIZE
;
346 area
->base
= (addr_t
)baseAddress
+ pagesNeeded
* B_PAGE_SIZE
;
348 if (area
->area
>= 0 && vm_prepare_kernel_area_debug_protection(area
->area
,
349 &area
->protection_cookie
) != B_OK
) {
353 mutex_init(&area
->lock
, "guarded_heap_area_lock");
355 list_init_etc(&area
->free_list
,
356 offsetof(guarded_heap_page
, free_list_link
));
358 for (size_t i
= 0; i
< area
->page_count
; i
++)
359 guarded_heap_free_page(*area
, i
, true);
361 WriteLocker
areaListWriteLocker(heap
.lock
);
362 area
->next
= heap
.areas
;
364 heap
.page_count
+= area
->page_count
;
371 guarded_heap_area_create(guarded_heap
& heap
, uint32 flags
)
373 for (size_t trySize
= HEAP_GROW_SIZE
; trySize
>= 1 * 1024 * 1024;
376 void* baseAddress
= NULL
;
377 area_id id
= create_area("guarded_heap_area", &baseAddress
,
378 B_ANY_KERNEL_ADDRESS
, trySize
, B_FULL_LOCK
,
379 B_KERNEL_READ_AREA
| B_KERNEL_WRITE_AREA
);
384 if (guarded_heap_area_init(heap
, id
, baseAddress
, trySize
, flags
))
390 panic("failed to allocate a new heap area");
396 guarded_heap_add_area(guarded_heap
& heap
, int32 counter
, uint32 flags
)
398 if ((flags
& (HEAP_DONT_LOCK_KERNEL_SPACE
| HEAP_DONT_WAIT_FOR_MEMORY
))
403 if (atomic_test_and_set(&heap
.area_creation_counter
,
404 counter
+ 1, counter
) == counter
) {
405 return guarded_heap_area_create(heap
, flags
);
413 guarded_heap_allocate(guarded_heap
& heap
, size_t size
, size_t alignment
,
418 ReadLocker
areaListReadLocker(heap
.lock
);
419 for (guarded_heap_area
* area
= heap
.areas
; area
!= NULL
;
422 MutexLocker
locker(area
->lock
);
423 result
= guarded_heap_area_allocate(*area
, size
, alignment
, flags
,
429 int32 counter
= atomic_get(&heap
.area_creation_counter
);
430 areaListReadLocker
.Unlock();
432 if (result
== NULL
|| grow
) {
433 bool added
= guarded_heap_add_area(heap
, counter
, flags
);
434 if (result
== NULL
&& added
)
435 return guarded_heap_allocate(heap
, size
, alignment
, flags
);
439 panic("ran out of memory");
445 static guarded_heap_area
*
446 guarded_heap_get_locked_area_for(guarded_heap
& heap
, void* address
)
448 ReadLocker
areaListReadLocker(heap
.lock
);
449 for (guarded_heap_area
* area
= heap
.areas
; area
!= NULL
;
451 if ((addr_t
)address
< area
->base
)
454 if ((addr_t
)address
>= area
->base
+ area
->size
)
457 mutex_lock(&area
->lock
);
461 panic("guarded heap area for address %p not found", address
);
467 guarded_heap_area_page_index_for(guarded_heap_area
& area
, void* address
)
469 size_t pageIndex
= ((addr_t
)address
- area
.base
) / B_PAGE_SIZE
;
470 guarded_heap_page
& page
= area
.pages
[pageIndex
];
471 if ((page
.flags
& GUARDED_HEAP_PAGE_FLAG_USED
) == 0) {
472 panic("tried to free %p which points at page %" B_PRIuSIZE
473 " which is not marked in use", address
, pageIndex
);
474 return area
.page_count
;
477 if ((page
.flags
& GUARDED_HEAP_PAGE_FLAG_GUARD
) != 0) {
478 panic("tried to free %p which points at page %" B_PRIuSIZE
479 " which is a guard page", address
, pageIndex
);
480 return area
.page_count
;
483 if ((page
.flags
& GUARDED_HEAP_PAGE_FLAG_FIRST
) == 0) {
484 panic("tried to free %p which points at page %" B_PRIuSIZE
485 " which is not an allocation first page", address
, pageIndex
);
486 return area
.page_count
;
489 if ((page
.flags
& GUARDED_HEAP_PAGE_FLAG_DEAD
) != 0) {
490 panic("tried to free %p which points at page %" B_PRIuSIZE
491 " which is a dead page", address
, pageIndex
);
492 return area
.page_count
;
500 guarded_heap_area_free(guarded_heap_area
& area
, void* address
, uint32 flags
)
502 size_t pageIndex
= guarded_heap_area_page_index_for(area
, address
);
503 if (pageIndex
>= area
.page_count
)
506 size_t pagesFreed
= 0;
507 guarded_heap_page
* page
= &area
.pages
[pageIndex
];
508 while ((page
->flags
& GUARDED_HEAP_PAGE_FLAG_GUARD
) == 0) {
509 // Mark the allocation page as free.
510 guarded_heap_free_page(area
, pageIndex
);
514 page
= &area
.pages
[pageIndex
];
517 // Mark the guard page as free as well.
518 guarded_heap_free_page(area
, pageIndex
);
521 #if !DEBUG_GUARDED_HEAP_DISABLE_MEMORY_REUSE
522 area
.used_pages
-= pagesFreed
;
523 atomic_add((int32
*)&area
.heap
->used_pages
, -pagesFreed
);
529 guarded_heap_free(void* address
, uint32 flags
)
534 guarded_heap_area
* area
= guarded_heap_get_locked_area_for(sGuardedHeap
,
539 MutexLocker
locker(area
->lock
, true);
540 guarded_heap_area_free(*area
, address
, flags
);
545 guarded_heap_realloc(void* address
, size_t newSize
)
547 guarded_heap_area
* area
= guarded_heap_get_locked_area_for(sGuardedHeap
,
552 MutexLocker
locker(area
->lock
, true);
554 size_t pageIndex
= guarded_heap_area_page_index_for(*area
, address
);
555 if (pageIndex
>= area
->page_count
)
558 guarded_heap_page
& page
= area
->pages
[pageIndex
];
559 size_t oldSize
= page
.allocation_size
;
562 if (oldSize
== newSize
)
565 void* newBlock
= memalign(0, newSize
);
566 if (newBlock
== NULL
)
569 memcpy(newBlock
, address
, min_c(oldSize
, newSize
));
577 // #pragma mark - Debugger commands
581 dump_guarded_heap_page(int argc
, char** argv
)
584 print_debugger_command_usage(argv
[0]);
588 addr_t address
= parse_expression(argv
[1]);
590 // Find the area that contains this page.
591 guarded_heap_area
* area
= NULL
;
592 for (guarded_heap_area
* candidate
= sGuardedHeap
.areas
; candidate
!= NULL
;
593 candidate
= candidate
->next
) {
595 if (address
< candidate
->base
)
597 if (address
>= candidate
->base
+ candidate
->size
)
605 kprintf("didn't find area for address\n");
609 size_t pageIndex
= ((addr_t
)address
- area
->base
) / B_PAGE_SIZE
;
610 guarded_heap_page
& page
= area
->pages
[pageIndex
];
612 kprintf("page index: %" B_PRIuSIZE
"\n", pageIndex
);
614 if ((page
.flags
& GUARDED_HEAP_PAGE_FLAG_USED
) != 0)
616 if ((page
.flags
& GUARDED_HEAP_PAGE_FLAG_FIRST
) != 0)
618 if ((page
.flags
& GUARDED_HEAP_PAGE_FLAG_GUARD
) != 0)
620 if ((page
.flags
& GUARDED_HEAP_PAGE_FLAG_DEAD
) != 0)
624 kprintf("allocation size: %" B_PRIuSIZE
"\n", page
.allocation_size
);
625 kprintf("allocation base: %p\n", page
.allocation_base
);
626 kprintf("alignment: %" B_PRIuSIZE
"\n", page
.alignment
);
627 kprintf("allocating team: %" B_PRId32
"\n", page
.team
);
628 kprintf("allocating thread: %" B_PRId32
"\n", page
.thread
);
630 #if GUARDED_HEAP_STACK_TRACE_DEPTH > 0
631 kprintf("stack trace:\n");
632 for (size_t i
= 0; i
< page
.stack_trace_depth
; i
++) {
633 addr_t address
= page
.stack_trace
[i
];
636 const char* imageName
;
640 if (elf_debug_lookup_symbol_address(address
, &baseAddress
, &symbol
,
641 &imageName
, &exactMatch
) == B_OK
) {
642 kprintf(" %p %s + 0x%lx (%s)%s\n", (void*)address
, symbol
,
643 address
- baseAddress
, imageName
,
644 exactMatch
? "" : " (nearest)");
646 kprintf(" %p\n", (void*)address
);
655 dump_guarded_heap_area(int argc
, char** argv
)
658 print_debugger_command_usage(argv
[0]);
662 addr_t address
= parse_expression(argv
[1]);
664 // Find the area that contains this page.
665 guarded_heap_area
* area
= NULL
;
666 for (guarded_heap_area
* candidate
= sGuardedHeap
.areas
; candidate
!= NULL
;
667 candidate
= candidate
->next
) {
669 if ((addr_t
)candidate
!= address
) {
670 if (address
< candidate
->base
)
672 if (address
>= candidate
->base
+ candidate
->size
)
681 kprintf("didn't find area for address\n");
685 kprintf("guarded heap area: %p\n", area
);
686 kprintf("next heap area: %p\n", area
->next
);
687 kprintf("guarded heap: %p\n", area
->heap
);
688 kprintf("area id: %" B_PRId32
"\n", area
->area
);
689 kprintf("base: 0x%" B_PRIxADDR
"\n", area
->base
);
690 kprintf("size: %" B_PRIuSIZE
"\n", area
->size
);
691 kprintf("page count: %" B_PRIuSIZE
"\n", area
->page_count
);
692 kprintf("used pages: %" B_PRIuSIZE
"\n", area
->used_pages
);
693 kprintf("protection cookie: %p\n", area
->protection_cookie
);
694 kprintf("lock: %p\n", &area
->lock
);
696 size_t freeCount
= 0;
697 void* item
= list_get_first_item(&area
->free_list
);
698 while (item
!= NULL
) {
701 if ((((guarded_heap_page
*)item
)->flags
& GUARDED_HEAP_PAGE_FLAG_USED
)
703 kprintf("free list broken, page %p not actually free\n", item
);
706 item
= list_get_next_item(&area
->free_list
, item
);
709 kprintf("free_list: %p (%" B_PRIuSIZE
" free)\n", &area
->free_list
,
713 size_t runLength
= 0;
714 size_t longestRun
= 0;
715 for (size_t i
= 0; i
<= area
->page_count
; i
++) {
716 guarded_heap_page
& page
= area
->pages
[i
];
717 if (i
== area
->page_count
718 || (page
.flags
& GUARDED_HEAP_PAGE_FLAG_USED
) != 0) {
719 freeCount
+= runLength
;
720 if (runLength
> longestRun
)
721 longestRun
= runLength
;
727 for (size_t j
= 1; j
< area
->page_count
- i
; j
++) {
728 if ((area
->pages
[i
+ j
].flags
& GUARDED_HEAP_PAGE_FLAG_USED
) != 0)
737 kprintf("longest free run: %" B_PRIuSIZE
" (%" B_PRIuSIZE
" free)\n",
738 longestRun
, freeCount
);
740 kprintf("pages: %p\n", area
->pages
);
747 dump_guarded_heap(int argc
, char** argv
)
749 guarded_heap
* heap
= &sGuardedHeap
;
752 heap
= (guarded_heap
*)parse_expression(argv
[1]);
754 print_debugger_command_usage(argv
[0]);
759 kprintf("guarded heap: %p\n", heap
);
760 kprintf("rw lock: %p\n", &heap
->lock
);
761 kprintf("page count: %" B_PRIuSIZE
"\n", heap
->page_count
);
762 kprintf("used pages: %" B_PRIuSIZE
"\n", heap
->used_pages
);
763 kprintf("area creation counter: %" B_PRId32
"\n",
764 heap
->area_creation_counter
);
766 size_t areaCount
= 0;
767 guarded_heap_area
* area
= heap
->areas
;
768 while (area
!= NULL
) {
773 kprintf("areas: %p (%" B_PRIuSIZE
")\n", heap
->areas
, areaCount
);
780 dump_guarded_heap_allocations(int argc
, char** argv
)
783 thread_id thread
= -1;
785 bool statsOnly
= false;
787 for (int32 i
= 1; i
< argc
; i
++) {
788 if (strcmp(argv
[i
], "team") == 0)
789 team
= parse_expression(argv
[++i
]);
790 else if (strcmp(argv
[i
], "thread") == 0)
791 thread
= parse_expression(argv
[++i
]);
792 else if (strcmp(argv
[i
], "address") == 0)
793 address
= parse_expression(argv
[++i
]);
794 else if (strcmp(argv
[i
], "stats") == 0)
797 print_debugger_command_usage(argv
[0]);
802 size_t totalSize
= 0;
803 uint32 totalCount
= 0;
805 guarded_heap_area
* area
= sGuardedHeap
.areas
;
806 while (area
!= NULL
) {
807 for (size_t i
= 0; i
< area
->page_count
; i
++) {
808 guarded_heap_page
& page
= area
->pages
[i
];
809 if ((page
.flags
& GUARDED_HEAP_PAGE_FLAG_FIRST
) == 0)
812 if ((team
< 0 || page
.team
== team
)
813 && (thread
< 0 || page
.thread
== thread
)
814 && (address
== 0 || (addr_t
)page
.allocation_base
== address
)) {
817 kprintf("team: % 6" B_PRId32
"; thread: % 6" B_PRId32
"; "
818 "address: 0x%08" B_PRIxADDR
"; size: %" B_PRIuSIZE
819 " bytes\n", page
.team
, page
.thread
,
820 (addr_t
)page
.allocation_base
, page
.allocation_size
);
823 totalSize
+= page
.allocation_size
;
831 kprintf("total allocations: %" B_PRIu32
"; total bytes: %" B_PRIuSIZE
832 "\n", totalCount
, totalSize
);
837 // #pragma mark - Malloc API
841 heap_init(addr_t address
, size_t size
)
843 return guarded_heap_area_init(sGuardedHeap
, -1, (void*)address
, size
, 0)
849 heap_init_post_area()
858 for (guarded_heap_area
* area
= sGuardedHeap
.areas
; area
!= NULL
;
863 area_id id
= area_for((void*)area
->base
);
864 if (id
< 0 || vm_prepare_kernel_area_debug_protection(id
,
865 &area
->protection_cookie
) != B_OK
) {
866 panic("failed to prepare initial guarded heap for protection");
871 for (size_t i
= 0; i
< area
->page_count
; i
++) {
872 guarded_heap_page
& page
= area
->pages
[i
];
873 if ((page
.flags
& GUARDED_HEAP_PAGE_FLAG_USED
) != 0
874 && (page
.flags
& GUARDED_HEAP_PAGE_FLAG_GUARD
) == 0
875 && (page
.flags
& GUARDED_HEAP_PAGE_FLAG_DEAD
) == 0) {
876 guarded_heap_page_protect(*area
, i
,
877 B_KERNEL_READ_AREA
| B_KERNEL_WRITE_AREA
);
879 guarded_heap_page_protect(*area
, i
, 0);
883 add_debugger_command("guarded_heap", &dump_guarded_heap
,
884 "Dump info about the guarded heap");
885 add_debugger_command_etc("guarded_heap_area", &dump_guarded_heap_area
,
886 "Dump info about a guarded heap area",
887 "<address>\nDump info about guarded heap area containing address.\n",
889 add_debugger_command_etc("guarded_heap_page", &dump_guarded_heap_page
,
890 "Dump info about a guarded heap page",
891 "<address>\nDump info about guarded heap page containing address.\n",
893 add_debugger_command_etc("allocations", &dump_guarded_heap_allocations
,
894 "Dump current heap allocations",
895 "[\"stats\"] [team] [thread] [address]\n"
896 "If no parameters are given, all current alloactions are dumped.\n"
897 "If the optional argument \"stats\" is specified, only the allocation\n"
898 "counts and no individual allocations are printed.\n"
899 "If a specific allocation address is given, only this allocation is\n"
901 "If a team and/or thread is specified, only allocations of this\n"
902 "team/thread are dumped.\n", 0);
909 memalign(size_t alignment
, size_t size
)
911 return memalign_etc(alignment
, size
, 0);
916 memalign_etc(size_t alignment
, size_t size
, uint32 flags
)
921 return guarded_heap_allocate(sGuardedHeap
, size
, alignment
, flags
);
926 free_etc(void *address
, uint32 flags
)
928 guarded_heap_free(address
, flags
);
935 return memalign_etc(0, size
, 0);
942 free_etc(address
, 0);
947 realloc(void* address
, size_t newSize
)
955 return memalign(0, newSize
);
957 return guarded_heap_realloc(address
, newSize
);
961 #if USE_GUARDED_HEAP_FOR_OBJECT_CACHE
964 // #pragma mark - Slab API
968 request_memory_manager_maintenance()
974 create_object_cache(const char*, size_t objectSize
, size_t, void*,
975 object_cache_constructor
, object_cache_destructor
)
977 return (object_cache
*)objectSize
;
982 create_object_cache_etc(const char*, size_t objectSize
, size_t, size_t, size_t,
983 size_t, uint32
, void*, object_cache_constructor
, object_cache_destructor
,
984 object_cache_reclaimer
)
986 return (object_cache
*)objectSize
;
991 delete_object_cache(object_cache
* cache
)
997 object_cache_set_minimum_reserve(object_cache
* cache
, size_t objectCount
)
1004 object_cache_alloc(object_cache
* cache
, uint32 flags
)
1006 return memalign_etc(0, (size_t)cache
, flags
);
1011 object_cache_free(object_cache
* cache
, void* object
, uint32 flags
)
1013 return free_etc(object
, flags
);
1018 object_cache_reserve(object_cache
* cache
, size_t objectCount
, uint32 flags
)
1025 object_cache_get_usage(object_cache
* cache
, size_t* _allocatedMemory
)
1027 *_allocatedMemory
= 0;
1032 slab_init(kernel_args
* args
)
1038 slab_init_post_area()
1044 slab_init_post_sem()
1050 slab_init_post_thread()
1055 #endif // USE_GUARDED_HEAP_FOR_OBJECT_CACHE
1058 #endif // USE_GUARDED_HEAP_FOR_MALLOC