2 * Copyright 2003-2013, Axel Dörfler, axeld@pinc-software.de.
3 * Distributed under the terms of the MIT License.
7 #include "runtime_loader_private.h"
19 #include <util/SplayTree.h>
22 /*! This is a very simple malloc()/free() implementation - it only
24 After heap_init() is called, all free memory is contained in one
25 big chunk, the only entry in the free link list (which is a single
27 When memory is allocated, the smallest free chunk that contains
28 the requested size is split (or taken as a whole if it can't be
29 splitted anymore), and it's lower half will be removed from the
31 The free list is ordered by size, starting with the smallest
32 free chunk available. When a chunk is freed, it will be joint
33 with its predecessor or successor, if possible.
34 To ease list handling, the list anchor itself is a free chunk with
35 size 0 that can't be allocated.
37 const static size_t kAlignment
= 8;
38 // all memory chunks will be a multiple of this
40 const static size_t kInitialHeapSize
= 64 * 1024;
41 const static size_t kHeapGrowthAlignment
= 32 * 1024;
46 size_t CompleteSize() const
54 char fAlignment
[kAlignment
];
62 struct FreeChunkData
: SplayTreeLink
<FreeChunk
> {
64 FreeChunk
* Next() const
69 FreeChunk
** NextLink()
79 class FreeChunk
: public Chunk
, public FreeChunkData
{
81 void SetTo(size_t size
);
85 FreeChunk
* Split(size_t splitSize
);
86 bool IsTouching(FreeChunk
* link
);
87 FreeChunk
* Join(FreeChunk
* link
);
89 void* AllocatedAddress() const;
90 static FreeChunk
* SetToAllocated(void* allocated
);
95 FreeChunkKey(size_t size
)
102 FreeChunkKey(const FreeChunk
* chunk
)
104 fSize(chunk
->Size()),
109 int Compare(const FreeChunk
* chunk
) const
111 size_t chunkSize
= chunk
->Size();
112 if (chunkSize
!= fSize
)
113 return fSize
< chunkSize
? -1 : 1;
117 return fChunk
< chunk
? -1 : 1;
122 const FreeChunk
* fChunk
;
126 struct FreeChunkTreeDefinition
{
127 typedef FreeChunkKey KeyType
;
128 typedef FreeChunk NodeType
;
130 static FreeChunkKey
GetKey(const FreeChunk
* node
)
132 return FreeChunkKey(node
);
135 static SplayTreeLink
<FreeChunk
>* GetLink(FreeChunk
* node
)
140 static int Compare(const FreeChunkKey
& key
, const FreeChunk
* node
)
142 return key
.Compare(node
);
145 static FreeChunk
** GetListLink(FreeChunk
* node
)
147 return node
->NextLink();
152 typedef IteratableSplayTree
<FreeChunkTreeDefinition
> FreeChunkTree
;
155 static size_t sAvailable
;
156 static FreeChunkTree sFreeChunkTree
;
160 align(size_t size
, size_t alignment
= kAlignment
)
162 return (size
+ alignment
- 1) & ~(alignment
- 1);
167 FreeChunk::SetTo(size_t size
)
174 /*! Returns the amount of bytes that can be allocated
178 FreeChunk::Size() const
180 return (addr_t
)this + fSize
- (addr_t
)AllocatedAddress();
184 /*! Splits the upper half at the requested location and returns it. This chunk
185 will no longer be a valid FreeChunk object; only its fSize will be valid.
188 FreeChunk::Split(size_t splitSize
)
190 splitSize
= align(splitSize
);
192 FreeChunk
* chunk
= (FreeChunk
*)((addr_t
)AllocatedAddress() + splitSize
);
193 size_t newSize
= (addr_t
)chunk
- (addr_t
)this;
194 chunk
->fSize
= fSize
- newSize
;
203 /*! Checks if the specified chunk touches this chunk, so
204 that they could be joined.
207 FreeChunk::IsTouching(FreeChunk
* chunk
)
210 && (((uint8
*)this + fSize
== (uint8
*)chunk
)
211 || (uint8
*)chunk
+ chunk
->fSize
== (uint8
*)this);
215 /*! Joins the chunk to this chunk and returns the pointer
216 to the new chunk - which will either be one of the
218 Note, the chunks must be joinable, or else this method
219 doesn't work correctly. Use FreeChunk::IsTouching()
220 to check if this method can be applied.
223 FreeChunk::Join(FreeChunk
* chunk
)
226 chunk
->fSize
+= fSize
;
227 chunk
->fNext
= fNext
;
232 fSize
+= chunk
->fSize
;
233 fNext
= chunk
->fNext
;
240 FreeChunk::AllocatedAddress() const
242 return (void*)static_cast<const FreeChunkData
*>(this);
247 FreeChunk::SetToAllocated(void* allocated
)
249 return static_cast<FreeChunk
*>((FreeChunkData
*)allocated
);
257 add_area(size_t size
)
260 area_id area
= _kern_create_area("rld heap", &base
,
261 B_RANDOMIZED_ANY_ADDRESS
, size
, B_NO_LOCK
, B_READ_AREA
| B_WRITE_AREA
);
265 // declare the whole area as one chunk, and add it to the free tree
266 FreeChunk
* chunk
= (FreeChunk
*)base
;
268 sFreeChunkTree
.Insert(chunk
);
270 sAvailable
+= chunk
->Size();
276 grow_heap(size_t bytes
)
278 return add_area(align(align(sizeof(Chunk
)) + bytes
, kHeapGrowthAlignment
));
288 return add_area(kInitialHeapSize
);
296 FreeChunk
* chunk
= sFreeChunkTree
.FindMin();
297 while (chunk
!= NULL
) {
298 printf("\t%p: chunk size = %ld, end = %p, next = %p\n", chunk
,
299 chunk
->Size(), (uint8
*)chunk
+ chunk
->CompleteSize(),
301 chunk
= chunk
->Next();
313 // align the size requirement to a kAlignment bytes boundary
314 if (size
< sizeof(FreeChunkData
))
315 size
= sizeof(FreeChunkData
);
318 if (size
> sAvailable
) {
319 // try to enlarge heap
320 if (grow_heap(size
) != B_OK
)
324 FreeChunkKey
key(size
);
325 FreeChunk
* chunk
= sFreeChunkTree
.FindClosest(key
, true, true);
327 // could not find a free chunk as large as needed
328 if (grow_heap(size
) != B_OK
)
331 chunk
= sFreeChunkTree
.FindClosest(key
, true, true);
333 TRACE(("no allocation chunk found after growing the heap\n"));
338 sFreeChunkTree
.Remove(chunk
);
339 sAvailable
-= chunk
->Size();
341 void* allocatedAddress
= chunk
->AllocatedAddress();
343 // If this chunk is bigger than the requested size and there's enough space
344 // left over for a new chunk, we split it.
345 if (chunk
->Size() >= size
+ align(sizeof(FreeChunk
))) {
346 FreeChunk
* freeChunk
= chunk
->Split(size
);
347 sFreeChunkTree
.Insert(freeChunk
);
348 sAvailable
+= freeChunk
->Size();
351 TRACE(("malloc(%lu) -> %p\n", size
, allocatedAddress
));
352 return allocatedAddress
;
357 realloc(void* oldBuffer
, size_t newSize
)
360 TRACE(("realloc(%p, %lu) -> NULL\n", oldBuffer
, newSize
));
366 if (oldBuffer
!= NULL
) {
367 FreeChunk
* oldChunk
= FreeChunk::SetToAllocated(oldBuffer
);
368 oldSize
= oldChunk
->Size();
370 // Check if the old buffer still fits, and if it makes sense to keep it.
371 if (oldSize
>= newSize
372 && (oldSize
< 128 || newSize
> oldSize
/ 3)) {
373 TRACE(("realloc(%p, %lu) old buffer is large enough\n",
374 oldBuffer
, newSize
));
379 void* newBuffer
= malloc(newSize
);
380 if (newBuffer
== NULL
)
383 if (oldBuffer
!= NULL
) {
384 memcpy(newBuffer
, oldBuffer
, std::min(oldSize
, newSize
));
388 TRACE(("realloc(%p, %lu) -> %p\n", oldBuffer
, newSize
, newBuffer
));
394 free(void* allocated
)
396 if (allocated
== NULL
)
399 TRACE(("free(%p)\n", allocated
));
402 FreeChunk
* freedChunk
= FreeChunk::SetToAllocated(allocated
);
404 // try to join the new free chunk with an existing one
405 // it may be joined with up to two chunks
407 FreeChunk
* chunk
= sFreeChunkTree
.FindMin();
411 FreeChunk
* nextChunk
= chunk
->Next();
413 if (chunk
->IsTouching(freedChunk
)) {
414 sFreeChunkTree
.Remove(chunk
);
415 sAvailable
-= chunk
->Size();
417 freedChunk
= chunk
->Join(freedChunk
);
419 if (++joinCount
== 2)
426 sFreeChunkTree
.Insert(freedChunk
);
427 sAvailable
+= freedChunk
->Size();