2 * Copyright 2009-2010, Ingo Weinhold, ingo_weinhold@gmx.de
3 * Distributed under the terms of the MIT License.
7 #include <debug_heap.h>
11 #include <util/AutoLock.h>
15 #define INITIAL_DEBUG_HEAP_SIZE B_PAGE_SIZE
17 static char sInitialHeap
[INITIAL_DEBUG_HEAP_SIZE
] __attribute__ ((aligned (8)));
18 static void* sHeapBase
= sInitialHeap
;
19 static size_t sHeapSize
= INITIAL_DEBUG_HEAP_SIZE
;
21 const kdebug_alloc_t kdebug_alloc
= {};
24 struct allocation_header
{
25 uint32 size
: 31; // size in allocation_header units
30 struct free_entry
: allocation_header
{
36 struct DebugAllocPool
{
37 void Init(void* heap
, size_t heapSize
)
42 uint32 size
= heapSize
/ 8;
43 fBase
= (allocation_header
*)heap
- 1;
48 // add free entry spanning the whole area
49 fBase
[1].size
= size
- 1;
50 fBase
[1].previous
= 0;
54 DebugAllocPool
* CreateChildPool()
56 // do we already have a child pool?
60 // create the pool object
62 = (DebugAllocPool
*)Allocate(sizeof(DebugAllocPool
));
66 // do we have enough free space?
67 if (fLastFree
== 0 || fBase
[fLastFree
].size
< 2) {
72 allocation_header
* header
= &fBase
[fLastFree
];
73 _RemoveFreeEntry(fLastFree
);
75 pool
->Init(header
+ 1, header
->size
* 8);
83 if (fParent
!= NULL
) {
84 fParent
->fChild
= NULL
;
85 fParent
->Free(fBase
+ 1);
89 DebugAllocPool
* Parent() const
94 void* Allocate(size_t size
)
96 size
= (size
+ 7) / 8;
97 uint32 index
= fFirstFree
;
98 while (index
!= 0 && fBase
[index
].size
< size
)
99 index
= ((free_entry
*)&fBase
[index
])->next_free
;
104 _RemoveFreeEntry(index
);
106 // if the entry is big enough, we split it
107 if (fBase
[index
].size
- size
>= 2) {
108 uint32 next
= index
+ 1 + size
;
109 uint32 nextNext
= index
+ 1 + fBase
[index
].size
;
110 fBase
[next
].size
= fBase
[index
].size
- size
- 1;
111 fBase
[next
].previous
= index
;
112 fBase
[index
].size
= size
;
113 _InsertFreeEntry(next
);
116 fBase
[nextNext
].previous
= next
;
119 return &fBase
[index
+ 1];
122 void Free(void* address
)
125 if (((addr_t
)address
& 7) != 0 || address
<= fBase
+ 1
126 || address
>= fBase
+ fEnd
) {
127 kprintf("DebugAllocPool::Free(%p): bad address\n", address
);
132 allocation_header
* header
= (allocation_header
*)address
- 1;
133 uint32 index
= header
- fBase
;
135 kprintf("DebugAllocPool::Free(%p): double free\n", address
);
139 uint32 next
= index
+ 1 + header
->size
;
141 // join with previous, if possible
142 if (index
> 1 && fBase
[header
->previous
].free
) {
143 uint32 previous
= header
->previous
;
144 _RemoveFreeEntry(previous
);
146 fBase
[previous
].size
+= 1 + header
->size
;
147 fBase
[next
].previous
= previous
;
150 header
= fBase
+ index
;
153 // join with next, if possible
154 if (next
< fEnd
&& fBase
[next
].free
) {
155 _RemoveFreeEntry(next
);
157 header
->size
+= 1 + fBase
[next
].size
;
159 uint32 nextNext
= index
+ 1 + header
->size
;
161 fBase
[nextNext
].previous
= index
;
164 _InsertFreeEntry(index
);
168 void _InsertFreeEntry(uint32 index
)
170 // find the insertion point -- list is sorted by ascending size
171 uint32 size
= fBase
[index
].size
;
172 uint32 next
= fFirstFree
;
173 while (next
!= 0 && size
> fBase
[next
].size
)
174 next
= ((free_entry
*)&fBase
[next
])->next_free
;
179 previous
= ((free_entry
*)&fBase
[next
])->previous_free
;
180 ((free_entry
*)&fBase
[next
])->previous_free
= index
;
182 previous
= fLastFree
;
187 ((free_entry
*)&fBase
[previous
])->next_free
= index
;
191 ((free_entry
*)&fBase
[index
])->previous_free
= previous
;
192 ((free_entry
*)&fBase
[index
])->next_free
= next
;
194 fBase
[index
].free
= true;
197 void _RemoveFreeEntry(uint32 index
)
199 uint32 previous
= ((free_entry
*)&fBase
[index
])->previous_free
;
200 uint32 next
= ((free_entry
*)&fBase
[index
])->next_free
;
203 ((free_entry
*)&fBase
[previous
])->next_free
= next
;
208 ((free_entry
*)&fBase
[next
])->previous_free
= previous
;
210 fLastFree
= previous
;
212 fBase
[index
].free
= false;
216 DebugAllocPool
* fParent
;
217 DebugAllocPool
* fChild
;
218 allocation_header
* fBase
; // actually base - 1, so that index 0 is
226 static DebugAllocPool
* sCurrentPool
;
227 static DebugAllocPool sInitialPool
;
231 create_debug_alloc_pool()
233 if (sCurrentPool
== NULL
) {
234 sInitialPool
.Init(sHeapBase
, sHeapSize
);
235 sCurrentPool
= &sInitialPool
;
239 DebugAllocPool
* pool
= sCurrentPool
->CreateChildPool();
249 delete_debug_alloc_pool(debug_alloc_pool
* pool
)
251 if (pool
== NULL
|| sCurrentPool
== NULL
)
254 // find the pool in the hierarchy
255 DebugAllocPool
* otherPool
= sCurrentPool
;
256 while (otherPool
!= NULL
&& otherPool
!= pool
)
257 otherPool
= otherPool
->Parent();
259 if (otherPool
== NULL
)
263 sCurrentPool
= pool
->Parent();
266 if (pool
!= &sInitialPool
)
272 debug_malloc(size_t size
)
274 if (sCurrentPool
== NULL
)
277 return sCurrentPool
->Allocate(size
);
282 debug_calloc(size_t num
, size_t size
)
284 size_t allocationSize
= num
* size
;
285 void* allocation
= debug_malloc(allocationSize
);
286 if (allocation
== NULL
)
289 memset(allocation
, 0, allocationSize
);
295 debug_free(void* address
)
297 if (address
!= NULL
&& sCurrentPool
!= NULL
)
298 sCurrentPool
->Free(address
);
305 // create the heap area
307 virtual_address_restrictions virtualRestrictions
= {};
308 virtualRestrictions
.address_specification
= B_ANY_KERNEL_ADDRESS
;
309 physical_address_restrictions physicalRestrictions
= {};
310 area_id area
= create_area_etc(B_SYSTEM_TEAM
, "kdebug heap", KDEBUG_HEAP
,
311 B_FULL_LOCK
, B_KERNEL_READ_AREA
| B_KERNEL_WRITE_AREA
,
312 CREATE_AREA_DONT_WAIT
, 0, &virtualRestrictions
, &physicalRestrictions
,
317 // switch from the small static buffer to the area
318 InterruptsLocker locker
;
320 sHeapSize
= KDEBUG_HEAP
;