2 * Copyright 2010, Ingo Weinhold <ingo_weinhold@gmx.de>.
3 * Copyright 2007, Hugo Santos. All Rights Reserved.
4 * Distributed under the terms of the MIT License.
8 #include "slab_private.h"
17 #include <kernel.h> // for ROUNDUP
20 #include <vm/VMAddressSpace.h>
22 #include "ObjectCache.h"
23 #include "MemoryManager.h"
26 #define DEBUG_ALLOCATOR
27 //#define TEST_ALL_CACHES_DURING_BOOT
29 static const size_t kBlockSizes
[] = {
30 16, 24, 32, 48, 64, 80, 96, 112,
31 128, 160, 192, 224, 256, 320, 384, 448,
32 512, 640, 768, 896, 1024, 1280, 1536, 1792,
33 2048, 2560, 3072, 3584, 4096, 4608, 5120, 5632,
34 6144, 6656, 7168, 7680, 8192,
38 static const size_t kNumBlockSizes
= sizeof(kBlockSizes
) / sizeof(size_t) - 1;
40 static object_cache
* sBlockCaches
[kNumBlockSizes
];
42 static addr_t sBootStrapMemory
= 0;
43 static size_t sBootStrapMemorySize
= 0;
44 static size_t sUsedBootStrapMemory
= 0;
47 RANGE_MARKER_FUNCTION_BEGIN(slab_allocator
)
51 size_to_index(size_t size
)
56 return 1 + (size
- 16 - 1) / 8;
58 return 3 + (size
- 32 - 1) / 16;
60 return 9 + (size
- 128 - 1) / 32;
62 return 13 + (size
- 256 - 1) / 64;
64 return 17 + (size
- 512 - 1) / 128;
66 return 21 + (size
- 1024 - 1) / 256;
68 return 25 + (size
- 2048 - 1) / 512;
75 block_alloc(size_t size
, size_t alignment
, uint32 flags
)
77 if (alignment
> kMinObjectAlignment
) {
78 // Make size >= alignment and a power of two. This is sufficient, since
79 // all of our object caches with power of two sizes are aligned. We may
80 // waste quite a bit of memory, but memalign() is very rarely used
81 // in the kernel and always with power of two size == alignment anyway.
82 ASSERT((alignment
& (alignment
- 1)) == 0);
83 while (alignment
< size
)
87 // If we're not using an object cache, make sure that the memory
88 // manager knows it has to align the allocation.
89 if (size
> kBlockSizes
[kNumBlockSizes
])
90 flags
|= CACHE_ALIGN_ON_SIZE
;
93 // allocate from the respective object cache, if any
94 int index
= size_to_index(size
);
96 return object_cache_alloc(sBlockCaches
[index
], flags
);
98 // the allocation is too large for our object caches -- ask the memory
101 if (MemoryManager::AllocateRaw(size
, flags
, block
) != B_OK
)
109 block_alloc_early(size_t size
)
111 int index
= size_to_index(size
);
112 if (index
>= 0 && sBlockCaches
[index
] != NULL
)
113 return object_cache_alloc(sBlockCaches
[index
], CACHE_DURING_BOOT
);
115 if (size
> SLAB_CHUNK_SIZE_SMALL
) {
116 // This is a sufficiently large allocation -- just ask the memory
119 if (MemoryManager::AllocateRaw(size
, 0, block
) != B_OK
)
125 // A small allocation, but no object cache yet. Use the bootstrap memory.
126 // This allocation must never be freed!
127 if (sBootStrapMemorySize
- sUsedBootStrapMemory
< size
) {
128 // We need more memory.
130 if (MemoryManager::AllocateRaw(SLAB_CHUNK_SIZE_SMALL
, 0, block
) != B_OK
)
132 sBootStrapMemory
= (addr_t
)block
;
133 sBootStrapMemorySize
= SLAB_CHUNK_SIZE_SMALL
;
134 sUsedBootStrapMemory
= 0;
137 size_t neededSize
= ROUNDUP(size
, sizeof(double));
138 if (sUsedBootStrapMemory
+ neededSize
> sBootStrapMemorySize
)
140 void* block
= (void*)(sBootStrapMemory
+ sUsedBootStrapMemory
);
141 sUsedBootStrapMemory
+= neededSize
;
148 block_free(void* block
, uint32 flags
)
153 ObjectCache
* cache
= MemoryManager::FreeRawOrReturnCache(block
, flags
);
155 // a regular small allocation
156 ASSERT(cache
->object_size
>= kBlockSizes
[0]);
157 ASSERT(cache
->object_size
<= kBlockSizes
[kNumBlockSizes
- 1]);
158 ASSERT(cache
== sBlockCaches
[size_to_index(cache
->object_size
)]);
159 object_cache_free(cache
, block
, flags
);
165 block_allocator_init_boot()
167 for (int index
= 0; kBlockSizes
[index
] != 0; index
++) {
169 snprintf(name
, sizeof(name
), "block allocator: %lu",
172 uint32 flags
= CACHE_DURING_BOOT
;
173 size_t size
= kBlockSizes
[index
];
175 // align the power of two objects to their size
176 size_t alignment
= (size
& (size
- 1)) == 0 ? size
: 0;
178 // For the larger allocation sizes disable the object depot, so we don't
179 // keep lot's of unused objects around.
181 flags
|= CACHE_NO_DEPOT
;
183 sBlockCaches
[index
] = create_object_cache_etc(name
, size
, alignment
, 0,
184 0, 0, flags
, NULL
, NULL
, NULL
, NULL
);
185 if (sBlockCaches
[index
] == NULL
)
186 panic("allocator: failed to init block cache");
192 block_allocator_init_rest()
194 #ifdef TEST_ALL_CACHES_DURING_BOOT
195 for (int index
= 0; kBlockSizes
[index
] != 0; index
++) {
196 block_free(block_alloc(kBlockSizes
[index
] - sizeof(boundary_tag
)), 0,
203 // #pragma mark - public API
206 #if USE_SLAB_ALLOCATOR_FOR_MALLOC
210 memalign(size_t alignment
, size_t size
)
212 return block_alloc(size
, alignment
, 0);
217 memalign_etc(size_t alignment
, size_t size
, uint32 flags
)
219 return block_alloc(size
, alignment
, flags
& CACHE_ALLOC_FLAGS
);
224 free_etc(void *address
, uint32 flags
)
226 block_free(address
, flags
& CACHE_ALLOC_FLAGS
);
233 return block_alloc(size
, 0, 0);
240 block_free(address
, 0);
245 realloc(void* address
, size_t newSize
)
248 block_free(address
, 0);
253 return block_alloc(newSize
, 0, 0);
256 ObjectCache
* cache
= MemoryManager::GetAllocationInfo(address
, oldSize
);
257 if (cache
== NULL
&& oldSize
== 0) {
258 panic("block_realloc(): allocation %p not known", address
);
262 if (oldSize
== newSize
)
265 void* newBlock
= block_alloc(newSize
, 0, 0);
266 if (newBlock
== NULL
)
269 memcpy(newBlock
, address
, std::min(oldSize
, newSize
));
271 block_free(address
, 0);
277 #endif // USE_SLAB_ALLOCATOR_FOR_MALLOC
280 RANGE_MARKER_FUNCTION_END(slab_allocator
)