vfs: check userland buffers before reading them.
[haiku.git] / src / kits / media / RealtimeAlloc.cpp
blob4bd0e9ff0fba0e2c7c4d4e936fba9ce6fe4e34d3
1 /*
2 * Copyright 2009, Axel Dörfler, axeld@pinc-software.de.
3 * Distributed under the terms of the MIT License.
4 */
7 /*! A simple allocator that works directly on an area, based on the boot
8 loader's heap. See there for more information about its inner workings.
9 */
12 #include <RealtimeAlloc.h>
14 #include <pthread.h>
15 #include <stdlib.h>
16 #include <stdio.h>
17 #include <string.h>
19 #include <OS.h>
21 #include <locks.h>
22 #include <kernel/util/DoublyLinkedList.h>
25 //#define TRACE_RTM
26 #ifdef TRACE_RTM
27 # define TRACE(x...) printf(x);
28 #else
29 # define TRACE(x...) ;
30 #endif
33 class FreeChunk {
34 public:
35 void SetTo(size_t size, FreeChunk* next);
37 uint32 Size() const;
38 uint32 CompleteSize() const { return fSize; }
40 FreeChunk* Next() const { return fNext; }
41 void SetNext(FreeChunk* next) { fNext = next; }
43 FreeChunk* Split(uint32 splitSize);
44 bool IsTouching(FreeChunk* link);
45 FreeChunk* Join(FreeChunk* link);
46 void Remove(rtm_pool* pool,
47 FreeChunk* previous = NULL);
48 void Enqueue(rtm_pool* pool);
50 void* AllocatedAddress() const;
51 static FreeChunk* SetToAllocated(void* allocated);
52 static addr_t NextOffset() { return sizeof(size_t); }
54 private:
55 size_t fSize;
56 FreeChunk* fNext;
60 struct rtm_pool : DoublyLinkedListLinkImpl<rtm_pool> {
61 area_id area;
62 void* heap_base;
63 size_t max_size;
64 size_t available;
65 FreeChunk free_anchor;
66 mutex lock;
68 bool Contains(void* buffer) const;
69 void Free(void* buffer);
72 typedef DoublyLinkedList<rtm_pool> PoolList;
75 const static uint32 kAlignment = 256;
76 // all memory chunks will be a multiple of this
78 static mutex sPoolsLock = MUTEX_INITIALIZER("rtm pools");
79 static PoolList sPools;
82 void
83 FreeChunk::SetTo(size_t size, FreeChunk* next)
85 fSize = size;
86 fNext = next;
90 /*! Returns the amount of bytes that can be allocated
91 in this chunk.
93 uint32
94 FreeChunk::Size() const
96 return fSize - FreeChunk::NextOffset();
100 /*! Splits the upper half at the requested location
101 and returns it.
103 FreeChunk*
104 FreeChunk::Split(uint32 splitSize)
106 splitSize = (splitSize - 1 + kAlignment) & ~(kAlignment - 1);
108 FreeChunk* chunk
109 = (FreeChunk*)((uint8*)this + FreeChunk::NextOffset() + splitSize);
110 chunk->fSize = fSize - splitSize - FreeChunk::NextOffset();
111 chunk->fNext = fNext;
113 fSize = splitSize + FreeChunk::NextOffset();
115 return chunk;
119 /*! Checks if the specified chunk touches this chunk, so
120 that they could be joined.
122 bool
123 FreeChunk::IsTouching(FreeChunk* chunk)
125 return chunk
126 && (((uint8*)this + fSize == (uint8*)chunk)
127 || (uint8*)chunk + chunk->fSize == (uint8*)this);
131 /*! Joins the chunk to this chunk and returns the pointer
132 to the new chunk - which will either be one of the
133 two chunks.
134 Note, the chunks must be joinable, or else this method
135 doesn't work correctly. Use FreeChunk::IsTouching()
136 to check if this method can be applied.
138 FreeChunk*
139 FreeChunk::Join(FreeChunk* chunk)
141 if (chunk < this) {
142 chunk->fSize += fSize;
143 chunk->fNext = fNext;
145 return chunk;
148 fSize += chunk->fSize;
149 fNext = chunk->fNext;
151 return this;
155 void
156 FreeChunk::Remove(rtm_pool* pool, FreeChunk* previous)
158 if (previous == NULL) {
159 // find the previous chunk in the list
160 FreeChunk* chunk = pool->free_anchor.fNext;
162 while (chunk != NULL && chunk != this) {
163 previous = chunk;
164 chunk = chunk->fNext;
167 if (chunk == NULL)
168 return;
171 previous->fNext = fNext;
172 fNext = NULL;
176 void
177 FreeChunk::Enqueue(rtm_pool* pool)
179 FreeChunk* chunk = pool->free_anchor.fNext;
180 FreeChunk* last = &pool->free_anchor;
181 while (chunk && chunk->Size() < fSize) {
182 last = chunk;
183 chunk = chunk->fNext;
186 fNext = chunk;
187 last->fNext = this;
191 void*
192 FreeChunk::AllocatedAddress() const
194 return (void*)&fNext;
198 FreeChunk*
199 FreeChunk::SetToAllocated(void* allocated)
201 return (FreeChunk*)((addr_t)allocated - FreeChunk::NextOffset());
205 // #pragma mark - rtm_pool
208 bool
209 rtm_pool::Contains(void* buffer) const
211 return (addr_t)heap_base <= (addr_t)buffer
212 && (addr_t)heap_base - 1 + max_size >= (addr_t)buffer;
216 void
217 rtm_pool::Free(void* allocated)
219 FreeChunk* freedChunk = FreeChunk::SetToAllocated(allocated);
220 available += freedChunk->CompleteSize();
222 // try to join the new free chunk with an existing one
223 // it may be joined with up to two chunks
225 FreeChunk* chunk = free_anchor.Next();
226 FreeChunk* last = &free_anchor;
227 int32 joinCount = 0;
229 while (chunk) {
230 if (chunk->IsTouching(freedChunk)) {
231 // almost "insert" it into the list before joining
232 // because the next pointer is inherited by the chunk
233 freedChunk->SetNext(chunk->Next());
234 freedChunk = chunk->Join(freedChunk);
236 // remove the joined chunk from the list
237 last->SetNext(freedChunk->Next());
238 chunk = last;
240 if (++joinCount == 2)
241 break;
244 last = chunk;
245 chunk = chunk->Next();
248 // enqueue the link at the right position; the
249 // free link queue is ordered by size
251 freedChunk->Enqueue(this);
255 // #pragma mark -
258 static rtm_pool*
259 pool_for(void* buffer)
261 MutexLocker _(&sPoolsLock);
263 PoolList::Iterator iterator = sPools.GetIterator();
264 while (rtm_pool* pool = iterator.Next()) {
265 if (pool->Contains(buffer))
266 return pool;
269 return NULL;
273 // #pragma mark - public API
276 status_t
277 rtm_create_pool(rtm_pool** _pool, size_t totalSize, const char* name)
279 rtm_pool* pool = (rtm_pool*)malloc(sizeof(rtm_pool));
280 if (pool == NULL)
281 return B_NO_MEMORY;
283 if (name != NULL)
284 mutex_init_etc(&pool->lock, name, MUTEX_FLAG_CLONE_NAME);
285 else
286 mutex_init(&pool->lock, "realtime pool");
288 // Allocate enough space for at least one allocation over \a totalSize
289 pool->max_size = (totalSize + sizeof(FreeChunk) - 1 + B_PAGE_SIZE)
290 & ~(B_PAGE_SIZE - 1);
292 area_id area = create_area(name, &pool->heap_base, B_ANY_ADDRESS,
293 pool->max_size, B_LAZY_LOCK, B_READ_AREA | B_WRITE_AREA);
294 if (area < 0) {
295 mutex_destroy(&pool->lock);
296 free(pool);
297 return area;
300 pool->area = area;
301 pool->available = pool->max_size - FreeChunk::NextOffset();
303 // declare the whole heap as one chunk, and add it
304 // to the free list
306 FreeChunk* chunk = (FreeChunk*)pool->heap_base;
307 chunk->SetTo(pool->max_size, NULL);
309 pool->free_anchor.SetTo(0, chunk);
311 *_pool = pool;
313 MutexLocker _(&sPoolsLock);
314 sPools.Add(pool);
315 return B_OK;
319 status_t
320 rtm_delete_pool(rtm_pool* pool)
322 if (pool == NULL)
323 return B_BAD_VALUE;
325 mutex_lock(&pool->lock);
328 MutexLocker _(&sPoolsLock);
329 sPools.Remove(pool);
332 delete_area(pool->area);
333 mutex_destroy(&pool->lock);
334 free(pool);
336 return B_OK;
340 void*
341 rtm_alloc(rtm_pool* pool, size_t size)
343 if (pool == NULL)
344 return malloc(size);
346 if (pool->heap_base == NULL || size == 0)
347 return NULL;
349 MutexLocker _(&pool->lock);
351 // align the size requirement to a kAlignment bytes boundary
352 size = (size - 1 + kAlignment) & ~(size_t)(kAlignment - 1);
354 if (size > pool->available) {
355 TRACE("malloc(): Out of memory!\n");
356 return NULL;
359 FreeChunk* chunk = pool->free_anchor.Next();
360 FreeChunk* last = &pool->free_anchor;
361 while (chunk && chunk->Size() < size) {
362 last = chunk;
363 chunk = chunk->Next();
366 if (chunk == NULL) {
367 // could not find a free chunk as large as needed
368 TRACE("malloc(): Out of memory!\n");
369 return NULL;
372 if (chunk->Size() > size + sizeof(FreeChunk) + kAlignment) {
373 // if this chunk is bigger than the requested size,
374 // we split it to form two chunks (with a minimal
375 // size of kAlignment allocatable bytes).
377 FreeChunk* freeChunk = chunk->Split(size);
378 last->SetNext(freeChunk);
380 // re-enqueue the free chunk at the correct position
381 freeChunk->Remove(pool, last);
382 freeChunk->Enqueue(pool);
383 } else {
384 // remove the chunk from the free list
386 last->SetNext(chunk->Next());
389 pool->available -= size + sizeof(size_t);
391 TRACE("malloc(%lu) -> %p\n", size, chunk->AllocatedAddress());
392 return chunk->AllocatedAddress();
396 status_t
397 rtm_free(void* allocated)
399 if (allocated == NULL)
400 return B_OK;
402 TRACE("rtm_free(%p)\n", allocated);
404 // find pool
405 rtm_pool* pool = pool_for(allocated);
406 if (pool == NULL) {
407 free(allocated);
408 return B_OK;
411 MutexLocker _(&pool->lock);
412 pool->Free(allocated);
413 return B_OK;
417 status_t
418 rtm_realloc(void** _buffer, size_t newSize)
420 if (_buffer == NULL)
421 return B_BAD_VALUE;
423 TRACE("rtm_realloc(%p, %lu)\n", *_buffer, newSize);
425 void* oldBuffer = *_buffer;
427 // find pool
428 rtm_pool* pool = pool_for(oldBuffer);
429 if (pool == NULL) {
430 void* buffer = realloc(oldBuffer, newSize);
431 if (buffer != NULL) {
432 *_buffer = buffer;
433 return B_OK;
435 return B_NO_MEMORY;
438 MutexLocker _(&pool->lock);
440 if (newSize == 0) {
441 TRACE("realloc(%p, %lu) -> NULL\n", oldBuffer, newSize);
442 pool->Free(oldBuffer);
443 *_buffer = NULL;
444 return B_OK;
447 size_t copySize = newSize;
448 if (oldBuffer != NULL) {
449 FreeChunk* oldChunk = FreeChunk::SetToAllocated(oldBuffer);
451 // Check if the old buffer still fits, and if it makes sense to keep it
452 if (oldChunk->Size() >= newSize && newSize > oldChunk->Size() / 3) {
453 TRACE("realloc(%p, %lu) old buffer is large enough\n",
454 oldBuffer, newSize);
455 return B_OK;
458 if (copySize > oldChunk->Size())
459 copySize = oldChunk->Size();
462 void* newBuffer = rtm_alloc(pool, newSize);
463 if (newBuffer == NULL)
464 return B_NO_MEMORY;
466 if (oldBuffer != NULL) {
467 memcpy(newBuffer, oldBuffer, copySize);
468 pool->Free(oldBuffer);
471 TRACE("realloc(%p, %lu) -> %p\n", oldBuffer, newSize, newBuffer);
472 *_buffer = newBuffer;
473 return B_OK;
477 status_t
478 rtm_size_for(void* buffer)
480 if (buffer == NULL)
481 return 0;
483 FreeChunk* chunk = FreeChunk::SetToAllocated(buffer);
484 // TODO: we currently always return the actual chunk size, not the allocated
485 // one
486 return chunk->Size();
490 status_t
491 rtm_phys_size_for(void* buffer)
493 if (buffer == NULL)
494 return 0;
496 FreeChunk* chunk = FreeChunk::SetToAllocated(buffer);
497 return chunk->Size();
501 size_t
502 rtm_available(rtm_pool* pool)
504 if (pool == NULL) {
505 // whatever - might want to use system_info instead
506 return 1024 * 1024;
509 return pool->available;
513 rtm_pool*
514 rtm_default_pool()
516 // We always return NULL - the default pool will just use malloc()/free()
517 return NULL;
521 #if 0
522 extern "C" {
524 // undocumented symbols that BeOS exports
525 status_t rtm_create_pool_etc(rtm_pool ** out_pool, size_t total_size, const char * name, int32 param4, int32 param5, ...);
526 void rtm_get_pool(rtm_pool *pool,void *data,int32 param3,int32 param4, ...);
529 #endif