vfs: check userland buffers before reading them.
[haiku.git] / src / system / kernel / debug / debug_heap.cpp
blobefdd7345605e706efab337bb0c7e4aaee0d359d9
1 /*
2 * Copyright 2009-2010, Ingo Weinhold, ingo_weinhold@gmx.de
3 * Distributed under the terms of the MIT License.
4 */
7 #include <debug_heap.h>
9 #include <new>
11 #include <util/AutoLock.h>
12 #include <vm/vm.h>
15 #define INITIAL_DEBUG_HEAP_SIZE B_PAGE_SIZE
17 static char sInitialHeap[INITIAL_DEBUG_HEAP_SIZE] __attribute__ ((aligned (8)));
18 static void* sHeapBase = sInitialHeap;
19 static size_t sHeapSize = INITIAL_DEBUG_HEAP_SIZE;
21 const kdebug_alloc_t kdebug_alloc = {};
24 struct allocation_header {
25 uint32 size : 31; // size in allocation_header units
26 bool free : 1;
27 uint32 previous;
30 struct free_entry : allocation_header {
31 uint32 previous_free;
32 uint32 next_free;
36 struct DebugAllocPool {
37 void Init(void* heap, size_t heapSize)
39 fParent = NULL;
40 fChild = NULL;
42 uint32 size = heapSize / 8;
43 fBase = (allocation_header*)heap - 1;
44 fEnd = size + 1;
45 fFirstFree = 0;
46 fLastFree = 0;
48 // add free entry spanning the whole area
49 fBase[1].size = size - 1;
50 fBase[1].previous = 0;
51 _InsertFreeEntry(1);
54 DebugAllocPool* CreateChildPool()
56 // do we already have a child pool?
57 if (fChild != NULL)
58 return NULL;
60 // create the pool object
61 DebugAllocPool* pool
62 = (DebugAllocPool*)Allocate(sizeof(DebugAllocPool));
63 if (pool == NULL)
64 return NULL;
66 // do we have enough free space?
67 if (fLastFree == 0 || fBase[fLastFree].size < 2) {
68 Free(pool);
69 return NULL;
72 allocation_header* header = &fBase[fLastFree];
73 _RemoveFreeEntry(fLastFree);
75 pool->Init(header + 1, header->size * 8);
76 pool->fParent = this;
78 return fChild = pool;
81 void Destroy()
83 if (fParent != NULL) {
84 fParent->fChild = NULL;
85 fParent->Free(fBase + 1);
89 DebugAllocPool* Parent() const
91 return fParent;
94 void* Allocate(size_t size)
96 size = (size + 7) / 8;
97 uint32 index = fFirstFree;
98 while (index != 0 && fBase[index].size < size)
99 index = ((free_entry*)&fBase[index])->next_free;
101 if (index == 0)
102 return NULL;
104 _RemoveFreeEntry(index);
106 // if the entry is big enough, we split it
107 if (fBase[index].size - size >= 2) {
108 uint32 next = index + 1 + size;
109 uint32 nextNext = index + 1 + fBase[index].size;
110 fBase[next].size = fBase[index].size - size - 1;
111 fBase[next].previous = index;
112 fBase[index].size = size;
113 _InsertFreeEntry(next);
115 if (nextNext < fEnd)
116 fBase[nextNext].previous = next;
119 return &fBase[index + 1];
122 void Free(void* address)
124 // check address
125 if (((addr_t)address & 7) != 0 || address <= fBase + 1
126 || address >= fBase + fEnd) {
127 kprintf("DebugAllocPool::Free(%p): bad address\n", address);
128 return;
131 // get header
132 allocation_header* header = (allocation_header*)address - 1;
133 uint32 index = header - fBase;
134 if (header->free) {
135 kprintf("DebugAllocPool::Free(%p): double free\n", address);
136 return;
139 uint32 next = index + 1 + header->size;
141 // join with previous, if possible
142 if (index > 1 && fBase[header->previous].free) {
143 uint32 previous = header->previous;
144 _RemoveFreeEntry(previous);
146 fBase[previous].size += 1 + header->size;
147 fBase[next].previous = previous;
149 index = previous;
150 header = fBase + index;
153 // join with next, if possible
154 if (next < fEnd && fBase[next].free) {
155 _RemoveFreeEntry(next);
157 header->size += 1 + fBase[next].size;
159 uint32 nextNext = index + 1 + header->size;
160 if (nextNext < fEnd)
161 fBase[nextNext].previous = index;
164 _InsertFreeEntry(index);
167 private:
168 void _InsertFreeEntry(uint32 index)
170 // find the insertion point -- list is sorted by ascending size
171 uint32 size = fBase[index].size;
172 uint32 next = fFirstFree;
173 while (next != 0 && size > fBase[next].size)
174 next = ((free_entry*)&fBase[next])->next_free;
176 // insert
177 uint32 previous;
178 if (next != 0) {
179 previous = ((free_entry*)&fBase[next])->previous_free;
180 ((free_entry*)&fBase[next])->previous_free = index;
181 } else {
182 previous = fLastFree;
183 fLastFree = index;
186 if (previous != 0)
187 ((free_entry*)&fBase[previous])->next_free = index;
188 else
189 fFirstFree = index;
191 ((free_entry*)&fBase[index])->previous_free = previous;
192 ((free_entry*)&fBase[index])->next_free = next;
194 fBase[index].free = true;
197 void _RemoveFreeEntry(uint32 index)
199 uint32 previous = ((free_entry*)&fBase[index])->previous_free;
200 uint32 next = ((free_entry*)&fBase[index])->next_free;
202 if (previous != 0)
203 ((free_entry*)&fBase[previous])->next_free = next;
204 else
205 fFirstFree = next;
207 if (next != 0)
208 ((free_entry*)&fBase[next])->previous_free = previous;
209 else
210 fLastFree = previous;
212 fBase[index].free = false;
215 private:
216 DebugAllocPool* fParent;
217 DebugAllocPool* fChild;
218 allocation_header* fBase; // actually base - 1, so that index 0 is
219 // invalid
220 uint32 fEnd;
221 uint32 fFirstFree;
222 uint32 fLastFree;
226 static DebugAllocPool* sCurrentPool;
227 static DebugAllocPool sInitialPool;
230 debug_alloc_pool*
231 create_debug_alloc_pool()
233 if (sCurrentPool == NULL) {
234 sInitialPool.Init(sHeapBase, sHeapSize);
235 sCurrentPool = &sInitialPool;
236 return sCurrentPool;
239 DebugAllocPool* pool = sCurrentPool->CreateChildPool();
240 if (pool == NULL)
241 return NULL;
243 sCurrentPool = pool;
244 return sCurrentPool;
248 void
249 delete_debug_alloc_pool(debug_alloc_pool* pool)
251 if (pool == NULL || sCurrentPool == NULL)
252 return;
254 // find the pool in the hierarchy
255 DebugAllocPool* otherPool = sCurrentPool;
256 while (otherPool != NULL && otherPool != pool)
257 otherPool = otherPool->Parent();
259 if (otherPool == NULL)
260 return;
262 // destroy the pool
263 sCurrentPool = pool->Parent();
264 pool->Destroy();
266 if (pool != &sInitialPool)
267 debug_free(pool);
271 void*
272 debug_malloc(size_t size)
274 if (sCurrentPool == NULL)
275 return NULL;
277 return sCurrentPool->Allocate(size);
281 void*
282 debug_calloc(size_t num, size_t size)
284 size_t allocationSize = num * size;
285 void* allocation = debug_malloc(allocationSize);
286 if (allocation == NULL)
287 return NULL;
289 memset(allocation, 0, allocationSize);
290 return allocation;
294 void
295 debug_free(void* address)
297 if (address != NULL && sCurrentPool != NULL)
298 sCurrentPool->Free(address);
302 void
303 debug_heap_init()
305 // create the heap area
306 void* base;
307 virtual_address_restrictions virtualRestrictions = {};
308 virtualRestrictions.address_specification = B_ANY_KERNEL_ADDRESS;
309 physical_address_restrictions physicalRestrictions = {};
310 area_id area = create_area_etc(B_SYSTEM_TEAM, "kdebug heap", KDEBUG_HEAP,
311 B_FULL_LOCK, B_KERNEL_READ_AREA | B_KERNEL_WRITE_AREA,
312 CREATE_AREA_DONT_WAIT, 0, &virtualRestrictions, &physicalRestrictions,
313 (void**)&base);
314 if (area < 0)
315 return;
317 // switch from the small static buffer to the area
318 InterruptsLocker locker;
319 sHeapBase = base;
320 sHeapSize = KDEBUG_HEAP;