headers/bsd: Add sys/queue.h.
[haiku.git] / src / system / libroot / posix / malloc / arch-specific.cpp
blob7ac64c2a552614bddb3cbce67c5bed69aee49546
1 ///-*-C++-*-//////////////////////////////////////////////////////////////////
2 //
3 // Hoard: A Fast, Scalable, and Memory-Efficient Allocator
4 // for Shared-Memory Multiprocessors
5 // Contact author: Emery Berger, http://www.cs.utexas.edu/users/emery
6 //
7 // Copyright (c) 1998-2000, The University of Texas at Austin.
8 //
9 // This library is free software; you can redistribute it and/or modify
10 // it under the terms of the GNU Library General Public License as
11 // published by the Free Software Foundation, http://www.fsf.org.
13 // This library is distributed in the hope that it will be useful, but
14 // WITHOUT ANY WARRANTY; without even the implied warranty of
15 // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 // Library General Public License for more details.
18 //////////////////////////////////////////////////////////////////////////////
20 #include "arch-specific.h"
21 #include "heap.h"
23 #include <OS.h>
24 #include <Debug.h>
25 #include <syscalls.h>
27 #include <libroot_private.h>
29 #include <stdlib.h>
30 #include <unistd.h>
32 //#define TRACE_CHUNKS
33 #ifdef TRACE_CHUNKS
34 # define CTRACE(x) debug_printf x
35 #else
36 # define CTRACE(x) ;
37 #endif
39 using namespace BPrivate;
41 struct free_chunk {
42 free_chunk *next;
43 size_t size;
47 static const size_t kInitialHeapSize = 64 * B_PAGE_SIZE;
48 // that's about what hoard allocates anyway (should be kHeapIncrement
49 // aligned)
51 static const size_t kHeapIncrement = 16 * B_PAGE_SIZE;
52 // the steps in which to increase the heap size (must be a power of 2)
54 #if B_HAIKU_64_BIT
55 static const addr_t kHeapReservationBase = 0x1000000000;
56 static const addr_t kHeapReservationSize = 0x1000000000;
57 #else
58 static const addr_t kHeapReservationBase = 0x18000000;
59 static const addr_t kHeapReservationSize = 0x48000000;
60 #endif
62 static area_id sHeapArea;
63 static hoardLockType sHeapLock;
64 static void *sHeapBase;
65 static addr_t sFreeHeapBase;
66 static size_t sFreeHeapSize, sHeapAreaSize;
67 static free_chunk *sFreeChunks;
70 void
71 __init_after_fork(void)
73 // find the heap area
74 sHeapArea = area_for((void*)sFreeHeapBase);
75 if (sHeapArea < 0) {
76 // Where is it gone?
77 debug_printf("hoard: init_after_fork(): thread %" B_PRId32 ", Heap "
78 "area not found! Base address: %p\n", find_thread(NULL),
79 sHeapBase);
80 exit(1);
85 extern "C" status_t
86 __init_heap(void)
88 hoardHeap::initNumProcs();
90 // This will locate the heap base at 384 MB and reserve the next 1152 MB
91 // for it. They may get reclaimed by other areas, though, but the maximum
92 // size of the heap is guaranteed until the space is really needed.
93 sHeapBase = (void *)kHeapReservationBase;
94 status_t status = _kern_reserve_address_range((addr_t *)&sHeapBase,
95 B_RANDOMIZED_BASE_ADDRESS, kHeapReservationSize);
96 if (status != B_OK)
97 sHeapBase = NULL;
99 uint32 protection = B_READ_AREA | B_WRITE_AREA;
100 if (__gABIVersion < B_HAIKU_ABI_GCC_2_HAIKU)
101 protection |= B_EXECUTE_AREA;
102 sHeapArea = create_area("heap", (void **)&sHeapBase,
103 status == B_OK ? B_EXACT_ADDRESS : B_RANDOMIZED_BASE_ADDRESS,
104 kInitialHeapSize, B_NO_LOCK, protection);
105 if (sHeapArea < B_OK)
106 return sHeapArea;
108 sFreeHeapBase = (addr_t)sHeapBase;
109 sHeapAreaSize = kInitialHeapSize;
111 hoardLockInit(sHeapLock, "heap");
113 return B_OK;
117 extern "C" void
118 __heap_terminate_after()
120 // nothing to do
124 static void
125 insert_chunk(free_chunk *newChunk)
127 free_chunk *chunk = (free_chunk *)sFreeChunks, *smaller = NULL;
128 for (; chunk != NULL; chunk = chunk->next) {
129 if (chunk->size < newChunk->size)
130 smaller = chunk;
131 else
132 break;
135 if (smaller) {
136 newChunk->next = smaller->next;
137 smaller->next = newChunk;
138 } else {
139 newChunk->next = sFreeChunks;
140 sFreeChunks = newChunk;
145 namespace BPrivate {
147 void *
148 hoardSbrk(long size)
150 assert(size > 0);
151 CTRACE(("sbrk: size = %ld\n", size));
153 // align size request
154 size = (size + hoardHeap::ALIGNMENT - 1) & ~(hoardHeap::ALIGNMENT - 1);
156 // choose correct protection flags
157 uint32 protection = B_READ_AREA | B_WRITE_AREA;
158 if (__gABIVersion < B_HAIKU_ABI_GCC_2_HAIKU)
159 protection |= B_EXECUTE_AREA;
161 hoardLock(sHeapLock);
163 // find chunk in free list
164 free_chunk *chunk = sFreeChunks, *last = NULL;
165 for (; chunk != NULL; chunk = chunk->next) {
166 CTRACE((" chunk %p (%ld)\n", chunk, chunk->size));
168 if (chunk->size < (size_t)size) {
169 last = chunk;
170 continue;
173 // this chunk is large enough to satisfy the request
175 SERIAL_PRINT(("HEAP-%ld: found free chunk to hold %ld bytes\n",
176 find_thread(NULL), size));
178 void *address = (void *)chunk;
180 if (chunk->size > (size_t)size + sizeof(free_chunk)) {
181 // divide this chunk into smaller bits
182 size_t newSize = chunk->size - size;
183 free_chunk *next = chunk->next;
185 chunk = (free_chunk *)((addr_t)chunk + size);
186 chunk->next = next;
187 chunk->size = newSize;
189 if (last != NULL) {
190 last->next = next;
191 insert_chunk(chunk);
192 } else
193 sFreeChunks = chunk;
194 } else {
195 chunk = chunk->next;
197 if (last != NULL)
198 last->next = chunk;
199 else
200 sFreeChunks = chunk;
203 hoardUnlock(sHeapLock);
204 return address;
207 // There was no chunk, let's see if the area is large enough
209 size_t oldHeapSize = sFreeHeapSize;
210 sFreeHeapSize += size;
212 // round to next heap increment aligned size
213 size_t incrementAlignedSize = (sFreeHeapSize + kHeapIncrement - 1)
214 & ~(kHeapIncrement - 1);
216 if (incrementAlignedSize <= sHeapAreaSize) {
217 SERIAL_PRINT(("HEAP-%ld: heap area large enough for %ld\n",
218 find_thread(NULL), size));
219 // the area is large enough already
220 hoardUnlock(sHeapLock);
221 return (void *)(sFreeHeapBase + oldHeapSize);
224 // We need to grow the area
226 SERIAL_PRINT(("HEAP-%ld: need to resize heap area to %ld (%ld requested)\n",
227 find_thread(NULL), incrementAlignedSize, size));
229 status_t status = resize_area(sHeapArea, incrementAlignedSize);
230 if (status != B_OK) {
231 // Either the system is out of memory or another area is in the way and
232 // prevents ours from being resized. As a special case of the latter
233 // the user might have mmap()ed something over malloc()ed memory. This
234 // splits the heap area in two, the first one retaining the original
235 // area ID. In either case, if there's still memory, it is a good idea
236 // to try and allocate a new area.
237 sFreeHeapSize = oldHeapSize;
239 if (status == B_NO_MEMORY) {
240 hoardUnlock(sHeapLock);
241 return NULL;
244 size_t newHeapSize = (size + kHeapIncrement - 1) / kHeapIncrement
245 * kHeapIncrement;
247 // First try at the location directly after the current heap area, if
248 // that is still in the reserved memory region.
249 void* base = (void*)(sFreeHeapBase + sHeapAreaSize);
250 area_id area = -1;
251 if (sHeapBase != NULL
252 && base >= sHeapBase
253 && (addr_t)base + newHeapSize
254 <= (addr_t)sHeapBase + kHeapReservationSize) {
255 area = create_area("heap", &base, B_EXACT_ADDRESS, newHeapSize,
256 B_NO_LOCK, protection);
258 if (area == B_NO_MEMORY) {
259 hoardUnlock(sHeapLock);
260 return NULL;
264 // If we don't have an area yet, try again with a free location
265 // allocation.
266 if (area < 0) {
267 base = (void*)(sFreeHeapBase + sHeapAreaSize);
268 area = create_area("heap", &base, B_RANDOMIZED_BASE_ADDRESS,
269 newHeapSize, B_NO_LOCK, protection);
272 if (area < 0) {
273 hoardUnlock(sHeapLock);
274 return NULL;
277 // We have a new area, so make it the new heap area.
278 sHeapArea = area;
279 sFreeHeapBase = (addr_t)base;
280 sHeapAreaSize = newHeapSize;
281 sFreeHeapSize = size;
282 oldHeapSize = 0;
283 } else
284 sHeapAreaSize = incrementAlignedSize;
286 hoardUnlock(sHeapLock);
287 return (void *)(sFreeHeapBase + oldHeapSize);
291 void
292 hoardUnsbrk(void *ptr, long size)
294 CTRACE(("unsbrk: %p, %ld!\n", ptr, size));
296 hoardLock(sHeapLock);
298 // TODO: hoard always allocates and frees in typical sizes, so we could
299 // save a lot of effort if we just had a similar mechanism
301 // We add this chunk to our free list - first, try to find an adjacent
302 // chunk, so that we can merge them together
304 free_chunk *chunk = (free_chunk *)sFreeChunks, *last = NULL, *smaller = NULL;
305 for (; chunk != NULL; chunk = chunk->next) {
306 if ((addr_t)chunk + chunk->size == (addr_t)ptr
307 || (addr_t)ptr + size == (addr_t)chunk) {
308 // chunks are adjacent - merge them
310 CTRACE((" found adjacent chunks: %p, %ld\n", chunk, chunk->size));
311 if (last)
312 last->next = chunk->next;
313 else
314 sFreeChunks = chunk->next;
316 if ((addr_t)chunk < (addr_t)ptr)
317 chunk->size += size;
318 else {
319 free_chunk *newChunk = (free_chunk *)ptr;
320 newChunk->next = chunk->next;
321 newChunk->size = size + chunk->size;
322 chunk = newChunk;
325 insert_chunk(chunk);
326 hoardUnlock(sHeapLock);
327 return;
330 last = chunk;
332 if (chunk->size < (size_t)size)
333 smaller = chunk;
336 // we didn't find an adjacent chunk, so insert the new chunk into the list
338 free_chunk *newChunk = (free_chunk *)ptr;
339 newChunk->size = size;
340 if (smaller) {
341 newChunk->next = smaller->next;
342 smaller->next = newChunk;
343 } else {
344 newChunk->next = sFreeChunks;
345 sFreeChunks = newChunk;
348 hoardUnlock(sHeapLock);
352 void
353 hoardLockInit(hoardLockType &lock, const char *name)
355 mutex_init_etc(&lock, name, MUTEX_FLAG_ADAPTIVE);
359 void
360 hoardLock(hoardLockType &lock)
362 mutex_lock(&lock);
366 void
367 hoardUnlock(hoardLockType &lock)
369 mutex_unlock(&lock);
373 void
374 hoardYield(void)
376 _kern_thread_yield();
379 } // namespace BPrivate