2 * Copyright 2009, Axel Dörfler, axeld@pinc-software.de.
3 * Distributed under the terms of the MIT License.
7 /*! A simple allocator that works directly on an area, based on the boot
8 loader's heap. See there for more information about its inner workings.
12 #include <RealtimeAlloc.h>
22 #include <kernel/util/DoublyLinkedList.h>
27 # define TRACE(x...) printf(x);
29 # define TRACE(x...) ;
35 void SetTo(size_t size
, FreeChunk
* next
);
38 uint32
CompleteSize() const { return fSize
; }
40 FreeChunk
* Next() const { return fNext
; }
41 void SetNext(FreeChunk
* next
) { fNext
= next
; }
43 FreeChunk
* Split(uint32 splitSize
);
44 bool IsTouching(FreeChunk
* link
);
45 FreeChunk
* Join(FreeChunk
* link
);
46 void Remove(rtm_pool
* pool
,
47 FreeChunk
* previous
= NULL
);
48 void Enqueue(rtm_pool
* pool
);
50 void* AllocatedAddress() const;
51 static FreeChunk
* SetToAllocated(void* allocated
);
52 static addr_t
NextOffset() { return sizeof(size_t); }
60 struct rtm_pool
: DoublyLinkedListLinkImpl
<rtm_pool
> {
65 FreeChunk free_anchor
;
68 bool Contains(void* buffer
) const;
69 void Free(void* buffer
);
72 typedef DoublyLinkedList
<rtm_pool
> PoolList
;
75 const static uint32 kAlignment
= 256;
76 // all memory chunks will be a multiple of this
78 static mutex sPoolsLock
= MUTEX_INITIALIZER("rtm pools");
79 static PoolList sPools
;
83 FreeChunk::SetTo(size_t size
, FreeChunk
* next
)
90 /*! Returns the amount of bytes that can be allocated
94 FreeChunk::Size() const
96 return fSize
- FreeChunk::NextOffset();
100 /*! Splits the upper half at the requested location
104 FreeChunk::Split(uint32 splitSize
)
106 splitSize
= (splitSize
- 1 + kAlignment
) & ~(kAlignment
- 1);
109 = (FreeChunk
*)((uint8
*)this + FreeChunk::NextOffset() + splitSize
);
110 chunk
->fSize
= fSize
- splitSize
- FreeChunk::NextOffset();
111 chunk
->fNext
= fNext
;
113 fSize
= splitSize
+ FreeChunk::NextOffset();
119 /*! Checks if the specified chunk touches this chunk, so
120 that they could be joined.
123 FreeChunk::IsTouching(FreeChunk
* chunk
)
126 && (((uint8
*)this + fSize
== (uint8
*)chunk
)
127 || (uint8
*)chunk
+ chunk
->fSize
== (uint8
*)this);
131 /*! Joins the chunk to this chunk and returns the pointer
132 to the new chunk - which will either be one of the
134 Note, the chunks must be joinable, or else this method
135 doesn't work correctly. Use FreeChunk::IsTouching()
136 to check if this method can be applied.
139 FreeChunk::Join(FreeChunk
* chunk
)
142 chunk
->fSize
+= fSize
;
143 chunk
->fNext
= fNext
;
148 fSize
+= chunk
->fSize
;
149 fNext
= chunk
->fNext
;
156 FreeChunk::Remove(rtm_pool
* pool
, FreeChunk
* previous
)
158 if (previous
== NULL
) {
159 // find the previous chunk in the list
160 FreeChunk
* chunk
= pool
->free_anchor
.fNext
;
162 while (chunk
!= NULL
&& chunk
!= this) {
164 chunk
= chunk
->fNext
;
171 previous
->fNext
= fNext
;
177 FreeChunk::Enqueue(rtm_pool
* pool
)
179 FreeChunk
* chunk
= pool
->free_anchor
.fNext
;
180 FreeChunk
* last
= &pool
->free_anchor
;
181 while (chunk
&& chunk
->Size() < fSize
) {
183 chunk
= chunk
->fNext
;
192 FreeChunk::AllocatedAddress() const
194 return (void*)&fNext
;
199 FreeChunk::SetToAllocated(void* allocated
)
201 return (FreeChunk
*)((addr_t
)allocated
- FreeChunk::NextOffset());
205 // #pragma mark - rtm_pool
209 rtm_pool::Contains(void* buffer
) const
211 return (addr_t
)heap_base
<= (addr_t
)buffer
212 && (addr_t
)heap_base
- 1 + max_size
>= (addr_t
)buffer
;
217 rtm_pool::Free(void* allocated
)
219 FreeChunk
* freedChunk
= FreeChunk::SetToAllocated(allocated
);
220 available
+= freedChunk
->CompleteSize();
222 // try to join the new free chunk with an existing one
223 // it may be joined with up to two chunks
225 FreeChunk
* chunk
= free_anchor
.Next();
226 FreeChunk
* last
= &free_anchor
;
230 if (chunk
->IsTouching(freedChunk
)) {
231 // almost "insert" it into the list before joining
232 // because the next pointer is inherited by the chunk
233 freedChunk
->SetNext(chunk
->Next());
234 freedChunk
= chunk
->Join(freedChunk
);
236 // remove the joined chunk from the list
237 last
->SetNext(freedChunk
->Next());
240 if (++joinCount
== 2)
245 chunk
= chunk
->Next();
248 // enqueue the link at the right position; the
249 // free link queue is ordered by size
251 freedChunk
->Enqueue(this);
259 pool_for(void* buffer
)
261 MutexLocker
_(&sPoolsLock
);
263 PoolList::Iterator iterator
= sPools
.GetIterator();
264 while (rtm_pool
* pool
= iterator
.Next()) {
265 if (pool
->Contains(buffer
))
273 // #pragma mark - public API
277 rtm_create_pool(rtm_pool
** _pool
, size_t totalSize
, const char* name
)
279 rtm_pool
* pool
= (rtm_pool
*)malloc(sizeof(rtm_pool
));
284 mutex_init_etc(&pool
->lock
, name
, MUTEX_FLAG_CLONE_NAME
);
286 mutex_init(&pool
->lock
, "realtime pool");
288 // Allocate enough space for at least one allocation over \a totalSize
289 pool
->max_size
= (totalSize
+ sizeof(FreeChunk
) - 1 + B_PAGE_SIZE
)
290 & ~(B_PAGE_SIZE
- 1);
292 area_id area
= create_area(name
, &pool
->heap_base
, B_ANY_ADDRESS
,
293 pool
->max_size
, B_LAZY_LOCK
, B_READ_AREA
| B_WRITE_AREA
);
295 mutex_destroy(&pool
->lock
);
301 pool
->available
= pool
->max_size
- FreeChunk::NextOffset();
303 // declare the whole heap as one chunk, and add it
306 FreeChunk
* chunk
= (FreeChunk
*)pool
->heap_base
;
307 chunk
->SetTo(pool
->max_size
, NULL
);
309 pool
->free_anchor
.SetTo(0, chunk
);
313 MutexLocker
_(&sPoolsLock
);
320 rtm_delete_pool(rtm_pool
* pool
)
325 mutex_lock(&pool
->lock
);
328 MutexLocker
_(&sPoolsLock
);
332 delete_area(pool
->area
);
333 mutex_destroy(&pool
->lock
);
341 rtm_alloc(rtm_pool
* pool
, size_t size
)
346 if (pool
->heap_base
== NULL
|| size
== 0)
349 MutexLocker
_(&pool
->lock
);
351 // align the size requirement to a kAlignment bytes boundary
352 size
= (size
- 1 + kAlignment
) & ~(size_t)(kAlignment
- 1);
354 if (size
> pool
->available
) {
355 TRACE("malloc(): Out of memory!\n");
359 FreeChunk
* chunk
= pool
->free_anchor
.Next();
360 FreeChunk
* last
= &pool
->free_anchor
;
361 while (chunk
&& chunk
->Size() < size
) {
363 chunk
= chunk
->Next();
367 // could not find a free chunk as large as needed
368 TRACE("malloc(): Out of memory!\n");
372 if (chunk
->Size() > size
+ sizeof(FreeChunk
) + kAlignment
) {
373 // if this chunk is bigger than the requested size,
374 // we split it to form two chunks (with a minimal
375 // size of kAlignment allocatable bytes).
377 FreeChunk
* freeChunk
= chunk
->Split(size
);
378 last
->SetNext(freeChunk
);
380 // re-enqueue the free chunk at the correct position
381 freeChunk
->Remove(pool
, last
);
382 freeChunk
->Enqueue(pool
);
384 // remove the chunk from the free list
386 last
->SetNext(chunk
->Next());
389 pool
->available
-= size
+ sizeof(size_t);
391 TRACE("malloc(%lu) -> %p\n", size
, chunk
->AllocatedAddress());
392 return chunk
->AllocatedAddress();
397 rtm_free(void* allocated
)
399 if (allocated
== NULL
)
402 TRACE("rtm_free(%p)\n", allocated
);
405 rtm_pool
* pool
= pool_for(allocated
);
411 MutexLocker
_(&pool
->lock
);
412 pool
->Free(allocated
);
418 rtm_realloc(void** _buffer
, size_t newSize
)
423 TRACE("rtm_realloc(%p, %lu)\n", *_buffer
, newSize
);
425 void* oldBuffer
= *_buffer
;
428 rtm_pool
* pool
= pool_for(oldBuffer
);
430 void* buffer
= realloc(oldBuffer
, newSize
);
431 if (buffer
!= NULL
) {
438 MutexLocker
_(&pool
->lock
);
441 TRACE("realloc(%p, %lu) -> NULL\n", oldBuffer
, newSize
);
442 pool
->Free(oldBuffer
);
447 size_t copySize
= newSize
;
448 if (oldBuffer
!= NULL
) {
449 FreeChunk
* oldChunk
= FreeChunk::SetToAllocated(oldBuffer
);
451 // Check if the old buffer still fits, and if it makes sense to keep it
452 if (oldChunk
->Size() >= newSize
&& newSize
> oldChunk
->Size() / 3) {
453 TRACE("realloc(%p, %lu) old buffer is large enough\n",
458 if (copySize
> oldChunk
->Size())
459 copySize
= oldChunk
->Size();
462 void* newBuffer
= rtm_alloc(pool
, newSize
);
463 if (newBuffer
== NULL
)
466 if (oldBuffer
!= NULL
) {
467 memcpy(newBuffer
, oldBuffer
, copySize
);
468 pool
->Free(oldBuffer
);
471 TRACE("realloc(%p, %lu) -> %p\n", oldBuffer
, newSize
, newBuffer
);
472 *_buffer
= newBuffer
;
478 rtm_size_for(void* buffer
)
483 FreeChunk
* chunk
= FreeChunk::SetToAllocated(buffer
);
484 // TODO: we currently always return the actual chunk size, not the allocated
486 return chunk
->Size();
491 rtm_phys_size_for(void* buffer
)
496 FreeChunk
* chunk
= FreeChunk::SetToAllocated(buffer
);
497 return chunk
->Size();
502 rtm_available(rtm_pool
* pool
)
505 // whatever - might want to use system_info instead
509 return pool
->available
;
516 // We always return NULL - the default pool will just use malloc()/free()
524 // undocumented symbols that BeOS exports
525 status_t
rtm_create_pool_etc(rtm_pool
** out_pool
, size_t total_size
, const char * name
, int32 param4
, int32 param5
, ...);
526 void rtm_get_pool(rtm_pool
*pool
,void *data
,int32 param3
,int32 param4
, ...);