1 ///-*-C++-*-//////////////////////////////////////////////////////////////////
3 // Hoard: A Fast, Scalable, and Memory-Efficient Allocator
4 // for Shared-Memory Multiprocessors
5 // Contact author: Emery Berger, http://www.cs.utexas.edu/users/emery
7 // Copyright (c) 1998-2000, The University of Texas at Austin.
9 // This library is free software; you can redistribute it and/or modify
10 // it under the terms of the GNU Library General Public License as
11 // published by the Free Software Foundation, http://www.fsf.org.
13 // This library is distributed in the hope that it will be useful, but
14 // WITHOUT ANY WARRANTY; without even the implied warranty of
15 // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 // Library General Public License for more details.
18 //////////////////////////////////////////////////////////////////////////////
20 #include "arch-specific.h"
27 #include <libroot_private.h>
32 //#define TRACE_CHUNKS
34 # define CTRACE(x) debug_printf x
39 using namespace BPrivate
;
47 static const size_t kInitialHeapSize
= 64 * B_PAGE_SIZE
;
48 // that's about what hoard allocates anyway (should be kHeapIncrement
51 static const size_t kHeapIncrement
= 16 * B_PAGE_SIZE
;
52 // the steps in which to increase the heap size (must be a power of 2)
55 static const addr_t kHeapReservationBase
= 0x1000000000;
56 static const addr_t kHeapReservationSize
= 0x1000000000;
58 static const addr_t kHeapReservationBase
= 0x18000000;
59 static const addr_t kHeapReservationSize
= 0x48000000;
62 static area_id sHeapArea
;
63 static hoardLockType sHeapLock
;
64 static void *sHeapBase
;
65 static addr_t sFreeHeapBase
;
66 static size_t sFreeHeapSize
, sHeapAreaSize
;
67 static free_chunk
*sFreeChunks
;
71 __init_after_fork(void)
74 sHeapArea
= area_for((void*)sFreeHeapBase
);
77 debug_printf("hoard: init_after_fork(): thread %" B_PRId32
", Heap "
78 "area not found! Base address: %p\n", find_thread(NULL
),
88 hoardHeap::initNumProcs();
90 // This will locate the heap base at 384 MB and reserve the next 1152 MB
91 // for it. They may get reclaimed by other areas, though, but the maximum
92 // size of the heap is guaranteed until the space is really needed.
93 sHeapBase
= (void *)kHeapReservationBase
;
94 status_t status
= _kern_reserve_address_range((addr_t
*)&sHeapBase
,
95 B_RANDOMIZED_BASE_ADDRESS
, kHeapReservationSize
);
99 uint32 protection
= B_READ_AREA
| B_WRITE_AREA
;
100 if (__gABIVersion
< B_HAIKU_ABI_GCC_2_HAIKU
)
101 protection
|= B_EXECUTE_AREA
;
102 sHeapArea
= create_area("heap", (void **)&sHeapBase
,
103 status
== B_OK
? B_EXACT_ADDRESS
: B_RANDOMIZED_BASE_ADDRESS
,
104 kInitialHeapSize
, B_NO_LOCK
, protection
);
105 if (sHeapArea
< B_OK
)
108 sFreeHeapBase
= (addr_t
)sHeapBase
;
109 sHeapAreaSize
= kInitialHeapSize
;
111 hoardLockInit(sHeapLock
, "heap");
118 __heap_terminate_after()
125 insert_chunk(free_chunk
*newChunk
)
127 free_chunk
*chunk
= (free_chunk
*)sFreeChunks
, *smaller
= NULL
;
128 for (; chunk
!= NULL
; chunk
= chunk
->next
) {
129 if (chunk
->size
< newChunk
->size
)
136 newChunk
->next
= smaller
->next
;
137 smaller
->next
= newChunk
;
139 newChunk
->next
= sFreeChunks
;
140 sFreeChunks
= newChunk
;
151 CTRACE(("sbrk: size = %ld\n", size
));
153 // align size request
154 size
= (size
+ hoardHeap::ALIGNMENT
- 1) & ~(hoardHeap::ALIGNMENT
- 1);
156 // choose correct protection flags
157 uint32 protection
= B_READ_AREA
| B_WRITE_AREA
;
158 if (__gABIVersion
< B_HAIKU_ABI_GCC_2_HAIKU
)
159 protection
|= B_EXECUTE_AREA
;
161 hoardLock(sHeapLock
);
163 // find chunk in free list
164 free_chunk
*chunk
= sFreeChunks
, *last
= NULL
;
165 for (; chunk
!= NULL
; chunk
= chunk
->next
) {
166 CTRACE((" chunk %p (%ld)\n", chunk
, chunk
->size
));
168 if (chunk
->size
< (size_t)size
) {
173 // this chunk is large enough to satisfy the request
175 SERIAL_PRINT(("HEAP-%ld: found free chunk to hold %ld bytes\n",
176 find_thread(NULL
), size
));
178 void *address
= (void *)chunk
;
180 if (chunk
->size
> (size_t)size
+ sizeof(free_chunk
)) {
181 // divide this chunk into smaller bits
182 size_t newSize
= chunk
->size
- size
;
183 free_chunk
*next
= chunk
->next
;
185 chunk
= (free_chunk
*)((addr_t
)chunk
+ size
);
187 chunk
->size
= newSize
;
203 hoardUnlock(sHeapLock
);
207 // There was no chunk, let's see if the area is large enough
209 size_t oldHeapSize
= sFreeHeapSize
;
210 sFreeHeapSize
+= size
;
212 // round to next heap increment aligned size
213 size_t incrementAlignedSize
= (sFreeHeapSize
+ kHeapIncrement
- 1)
214 & ~(kHeapIncrement
- 1);
216 if (incrementAlignedSize
<= sHeapAreaSize
) {
217 SERIAL_PRINT(("HEAP-%ld: heap area large enough for %ld\n",
218 find_thread(NULL
), size
));
219 // the area is large enough already
220 hoardUnlock(sHeapLock
);
221 return (void *)(sFreeHeapBase
+ oldHeapSize
);
224 // We need to grow the area
226 SERIAL_PRINT(("HEAP-%ld: need to resize heap area to %ld (%ld requested)\n",
227 find_thread(NULL
), incrementAlignedSize
, size
));
229 status_t status
= resize_area(sHeapArea
, incrementAlignedSize
);
230 if (status
!= B_OK
) {
231 // Either the system is out of memory or another area is in the way and
232 // prevents ours from being resized. As a special case of the latter
233 // the user might have mmap()ed something over malloc()ed memory. This
234 // splits the heap area in two, the first one retaining the original
235 // area ID. In either case, if there's still memory, it is a good idea
236 // to try and allocate a new area.
237 sFreeHeapSize
= oldHeapSize
;
239 if (status
== B_NO_MEMORY
) {
240 hoardUnlock(sHeapLock
);
244 size_t newHeapSize
= (size
+ kHeapIncrement
- 1) / kHeapIncrement
247 // First try at the location directly after the current heap area, if
248 // that is still in the reserved memory region.
249 void* base
= (void*)(sFreeHeapBase
+ sHeapAreaSize
);
251 if (sHeapBase
!= NULL
253 && (addr_t
)base
+ newHeapSize
254 <= (addr_t
)sHeapBase
+ kHeapReservationSize
) {
255 area
= create_area("heap", &base
, B_EXACT_ADDRESS
, newHeapSize
,
256 B_NO_LOCK
, protection
);
258 if (area
== B_NO_MEMORY
) {
259 hoardUnlock(sHeapLock
);
264 // If we don't have an area yet, try again with a free location
267 base
= (void*)(sFreeHeapBase
+ sHeapAreaSize
);
268 area
= create_area("heap", &base
, B_RANDOMIZED_BASE_ADDRESS
,
269 newHeapSize
, B_NO_LOCK
, protection
);
273 hoardUnlock(sHeapLock
);
277 // We have a new area, so make it the new heap area.
279 sFreeHeapBase
= (addr_t
)base
;
280 sHeapAreaSize
= newHeapSize
;
281 sFreeHeapSize
= size
;
284 sHeapAreaSize
= incrementAlignedSize
;
286 hoardUnlock(sHeapLock
);
287 return (void *)(sFreeHeapBase
+ oldHeapSize
);
292 hoardUnsbrk(void *ptr
, long size
)
294 CTRACE(("unsbrk: %p, %ld!\n", ptr
, size
));
296 hoardLock(sHeapLock
);
298 // TODO: hoard always allocates and frees in typical sizes, so we could
299 // save a lot of effort if we just had a similar mechanism
301 // We add this chunk to our free list - first, try to find an adjacent
302 // chunk, so that we can merge them together
304 free_chunk
*chunk
= (free_chunk
*)sFreeChunks
, *last
= NULL
, *smaller
= NULL
;
305 for (; chunk
!= NULL
; chunk
= chunk
->next
) {
306 if ((addr_t
)chunk
+ chunk
->size
== (addr_t
)ptr
307 || (addr_t
)ptr
+ size
== (addr_t
)chunk
) {
308 // chunks are adjacent - merge them
310 CTRACE((" found adjacent chunks: %p, %ld\n", chunk
, chunk
->size
));
312 last
->next
= chunk
->next
;
314 sFreeChunks
= chunk
->next
;
316 if ((addr_t
)chunk
< (addr_t
)ptr
)
319 free_chunk
*newChunk
= (free_chunk
*)ptr
;
320 newChunk
->next
= chunk
->next
;
321 newChunk
->size
= size
+ chunk
->size
;
326 hoardUnlock(sHeapLock
);
332 if (chunk
->size
< (size_t)size
)
336 // we didn't find an adjacent chunk, so insert the new chunk into the list
338 free_chunk
*newChunk
= (free_chunk
*)ptr
;
339 newChunk
->size
= size
;
341 newChunk
->next
= smaller
->next
;
342 smaller
->next
= newChunk
;
344 newChunk
->next
= sFreeChunks
;
345 sFreeChunks
= newChunk
;
348 hoardUnlock(sHeapLock
);
353 hoardLockInit(hoardLockType
&lock
, const char *name
)
355 mutex_init_etc(&lock
, name
, MUTEX_FLAG_ADAPTIVE
);
360 hoardLock(hoardLockType
&lock
)
367 hoardUnlock(hoardLockType
&lock
)
376 _kern_thread_yield();
379 } // namespace BPrivate