vfs: check userland buffers before reading them.
[haiku.git] / src / system / kernel / slab / allocator.cpp
blobe863991897eadd3fe15a42d907ff59acfa0c2b70
1 /*
2 * Copyright 2010, Ingo Weinhold <ingo_weinhold@gmx.de>.
3 * Copyright 2007, Hugo Santos. All Rights Reserved.
4 * Distributed under the terms of the MIT License.
5 */
8 #include "slab_private.h"
10 #include <stdio.h>
11 #include <string.h>
13 #include <algorithm>
15 #include <debug.h>
16 #include <heap.h>
17 #include <kernel.h> // for ROUNDUP
18 #include <malloc.h>
19 #include <vm/vm.h>
20 #include <vm/VMAddressSpace.h>
22 #include "ObjectCache.h"
23 #include "MemoryManager.h"
26 #define DEBUG_ALLOCATOR
27 //#define TEST_ALL_CACHES_DURING_BOOT
29 static const size_t kBlockSizes[] = {
30 16, 24, 32, 48, 64, 80, 96, 112,
31 128, 160, 192, 224, 256, 320, 384, 448,
32 512, 640, 768, 896, 1024, 1280, 1536, 1792,
33 2048, 2560, 3072, 3584, 4096, 4608, 5120, 5632,
34 6144, 6656, 7168, 7680, 8192,
38 static const size_t kNumBlockSizes = sizeof(kBlockSizes) / sizeof(size_t) - 1;
40 static object_cache* sBlockCaches[kNumBlockSizes];
42 static addr_t sBootStrapMemory = 0;
43 static size_t sBootStrapMemorySize = 0;
44 static size_t sUsedBootStrapMemory = 0;
47 RANGE_MARKER_FUNCTION_BEGIN(slab_allocator)
50 static int
51 size_to_index(size_t size)
53 if (size <= 16)
54 return 0;
55 if (size <= 32)
56 return 1 + (size - 16 - 1) / 8;
57 if (size <= 128)
58 return 3 + (size - 32 - 1) / 16;
59 if (size <= 256)
60 return 9 + (size - 128 - 1) / 32;
61 if (size <= 512)
62 return 13 + (size - 256 - 1) / 64;
63 if (size <= 1024)
64 return 17 + (size - 512 - 1) / 128;
65 if (size <= 2048)
66 return 21 + (size - 1024 - 1) / 256;
67 if (size <= 8192)
68 return 25 + (size - 2048 - 1) / 512;
70 return -1;
74 void*
75 block_alloc(size_t size, size_t alignment, uint32 flags)
77 if (alignment > kMinObjectAlignment) {
78 // Make size >= alignment and a power of two. This is sufficient, since
79 // all of our object caches with power of two sizes are aligned. We may
80 // waste quite a bit of memory, but memalign() is very rarely used
81 // in the kernel and always with power of two size == alignment anyway.
82 ASSERT((alignment & (alignment - 1)) == 0);
83 while (alignment < size)
84 alignment <<= 1;
85 size = alignment;
87 // If we're not using an object cache, make sure that the memory
88 // manager knows it has to align the allocation.
89 if (size > kBlockSizes[kNumBlockSizes])
90 flags |= CACHE_ALIGN_ON_SIZE;
93 // allocate from the respective object cache, if any
94 int index = size_to_index(size);
95 if (index >= 0)
96 return object_cache_alloc(sBlockCaches[index], flags);
98 // the allocation is too large for our object caches -- ask the memory
99 // manager
100 void* block;
101 if (MemoryManager::AllocateRaw(size, flags, block) != B_OK)
102 return NULL;
104 return block;
108 void*
109 block_alloc_early(size_t size)
111 int index = size_to_index(size);
112 if (index >= 0 && sBlockCaches[index] != NULL)
113 return object_cache_alloc(sBlockCaches[index], CACHE_DURING_BOOT);
115 if (size > SLAB_CHUNK_SIZE_SMALL) {
116 // This is a sufficiently large allocation -- just ask the memory
117 // manager directly.
118 void* block;
119 if (MemoryManager::AllocateRaw(size, 0, block) != B_OK)
120 return NULL;
122 return block;
125 // A small allocation, but no object cache yet. Use the bootstrap memory.
126 // This allocation must never be freed!
127 if (sBootStrapMemorySize - sUsedBootStrapMemory < size) {
128 // We need more memory.
129 void* block;
130 if (MemoryManager::AllocateRaw(SLAB_CHUNK_SIZE_SMALL, 0, block) != B_OK)
131 return NULL;
132 sBootStrapMemory = (addr_t)block;
133 sBootStrapMemorySize = SLAB_CHUNK_SIZE_SMALL;
134 sUsedBootStrapMemory = 0;
137 size_t neededSize = ROUNDUP(size, sizeof(double));
138 if (sUsedBootStrapMemory + neededSize > sBootStrapMemorySize)
139 return NULL;
140 void* block = (void*)(sBootStrapMemory + sUsedBootStrapMemory);
141 sUsedBootStrapMemory += neededSize;
143 return block;
147 void
148 block_free(void* block, uint32 flags)
150 if (block == NULL)
151 return;
153 ObjectCache* cache = MemoryManager::FreeRawOrReturnCache(block, flags);
154 if (cache != NULL) {
155 // a regular small allocation
156 ASSERT(cache->object_size >= kBlockSizes[0]);
157 ASSERT(cache->object_size <= kBlockSizes[kNumBlockSizes - 1]);
158 ASSERT(cache == sBlockCaches[size_to_index(cache->object_size)]);
159 object_cache_free(cache, block, flags);
164 void
165 block_allocator_init_boot()
167 for (int index = 0; kBlockSizes[index] != 0; index++) {
168 char name[32];
169 snprintf(name, sizeof(name), "block allocator: %lu",
170 kBlockSizes[index]);
172 uint32 flags = CACHE_DURING_BOOT;
173 size_t size = kBlockSizes[index];
175 // align the power of two objects to their size
176 size_t alignment = (size & (size - 1)) == 0 ? size : 0;
178 // For the larger allocation sizes disable the object depot, so we don't
179 // keep lot's of unused objects around.
180 if (size > 2048)
181 flags |= CACHE_NO_DEPOT;
183 sBlockCaches[index] = create_object_cache_etc(name, size, alignment, 0,
184 0, 0, flags, NULL, NULL, NULL, NULL);
185 if (sBlockCaches[index] == NULL)
186 panic("allocator: failed to init block cache");
191 void
192 block_allocator_init_rest()
194 #ifdef TEST_ALL_CACHES_DURING_BOOT
195 for (int index = 0; kBlockSizes[index] != 0; index++) {
196 block_free(block_alloc(kBlockSizes[index] - sizeof(boundary_tag)), 0,
199 #endif
203 // #pragma mark - public API
206 #if USE_SLAB_ALLOCATOR_FOR_MALLOC
209 void*
210 memalign(size_t alignment, size_t size)
212 return block_alloc(size, alignment, 0);
216 void *
217 memalign_etc(size_t alignment, size_t size, uint32 flags)
219 return block_alloc(size, alignment, flags & CACHE_ALLOC_FLAGS);
223 void
224 free_etc(void *address, uint32 flags)
226 block_free(address, flags & CACHE_ALLOC_FLAGS);
230 void*
231 malloc(size_t size)
233 return block_alloc(size, 0, 0);
237 void
238 free(void* address)
240 block_free(address, 0);
244 void*
245 realloc(void* address, size_t newSize)
247 if (newSize == 0) {
248 block_free(address, 0);
249 return NULL;
252 if (address == NULL)
253 return block_alloc(newSize, 0, 0);
255 size_t oldSize;
256 ObjectCache* cache = MemoryManager::GetAllocationInfo(address, oldSize);
257 if (cache == NULL && oldSize == 0) {
258 panic("block_realloc(): allocation %p not known", address);
259 return NULL;
262 if (oldSize == newSize)
263 return address;
265 void* newBlock = block_alloc(newSize, 0, 0);
266 if (newBlock == NULL)
267 return NULL;
269 memcpy(newBlock, address, std::min(oldSize, newSize));
271 block_free(address, 0);
273 return newBlock;
277 #endif // USE_SLAB_ALLOCATOR_FOR_MALLOC
280 RANGE_MARKER_FUNCTION_END(slab_allocator)