libroot_debug: Merge guarded heap into libroot_debug.
[haiku.git] / src / system / kernel / slab / ObjectCache.cpp
blob76735560a5cd6b486e64adf36f12b8ca4d3a417b
1 /*
2 * Copyright 2008-2010, Axel Dörfler. All Rights Reserved.
3 * Copyright 2007, Hugo Santos. All Rights Reserved.
5 * Distributed under the terms of the MIT License.
6 */
9 #include "ObjectCache.h"
11 #include <string.h>
13 #include <util/AutoLock.h>
14 #include <vm/vm.h>
15 #include <vm/VMAddressSpace.h>
17 #include "MemoryManager.h"
18 #include "slab_private.h"
21 RANGE_MARKER_FUNCTION_BEGIN(SlabObjectCache)
24 static void
25 object_cache_return_object_wrapper(object_depot* depot, void* cookie,
26 void* object, uint32 flags)
28 ObjectCache* cache = (ObjectCache*)cookie;
30 MutexLocker _(cache->lock);
31 cache->ReturnObjectToSlab(cache->ObjectSlab(object), object, flags);
35 // #pragma mark -
38 ObjectCache::~ObjectCache()
43 status_t
44 ObjectCache::Init(const char* name, size_t objectSize, size_t alignment,
45 size_t maximum, size_t magazineCapacity, size_t maxMagazineCount,
46 uint32 flags, void* cookie, object_cache_constructor constructor,
47 object_cache_destructor destructor, object_cache_reclaimer reclaimer)
49 strlcpy(this->name, name, sizeof(this->name));
51 mutex_init(&lock, this->name);
53 if (objectSize < sizeof(object_link))
54 objectSize = sizeof(object_link);
56 if (alignment < kMinObjectAlignment)
57 alignment = kMinObjectAlignment;
59 if (alignment > 0 && (objectSize & (alignment - 1)))
60 object_size = objectSize + alignment - (objectSize & (alignment - 1));
61 else
62 object_size = objectSize;
64 TRACE_CACHE(this, "init %lu, %lu -> %lu", objectSize, alignment,
65 object_size);
67 this->alignment = alignment;
68 cache_color_cycle = 0;
69 total_objects = 0;
70 used_count = 0;
71 empty_count = 0;
72 pressure = 0;
73 min_object_reserve = 0;
75 maintenance_pending = false;
76 maintenance_in_progress = false;
77 maintenance_resize = false;
78 maintenance_delete = false;
80 usage = 0;
81 this->maximum = maximum;
83 this->flags = flags;
85 resize_request = NULL;
86 resize_entry_can_wait = NULL;
87 resize_entry_dont_wait = NULL;
89 // no gain in using the depot in single cpu setups
90 if (smp_get_num_cpus() == 1)
91 this->flags |= CACHE_NO_DEPOT;
93 if (!(this->flags & CACHE_NO_DEPOT)) {
94 // Determine usable magazine configuration values if none had been given
95 if (magazineCapacity == 0) {
96 magazineCapacity = objectSize < 256
97 ? 32 : (objectSize < 512 ? 16 : 8);
99 if (maxMagazineCount == 0)
100 maxMagazineCount = magazineCapacity / 2;
102 status_t status = object_depot_init(&depot, magazineCapacity,
103 maxMagazineCount, flags, this, object_cache_return_object_wrapper);
104 if (status != B_OK) {
105 mutex_destroy(&lock);
106 return status;
110 this->cookie = cookie;
111 this->constructor = constructor;
112 this->destructor = destructor;
113 this->reclaimer = reclaimer;
115 return B_OK;
119 slab*
120 ObjectCache::InitSlab(slab* slab, void* pages, size_t byteCount, uint32 flags)
122 TRACE_CACHE(this, "construct (%p, %p .. %p, %lu)", slab, pages,
123 ((uint8*)pages) + byteCount, byteCount);
125 slab->pages = pages;
126 slab->count = slab->size = byteCount / object_size;
127 slab->free = NULL;
129 size_t spareBytes = byteCount - (slab->size * object_size);
131 slab->offset = cache_color_cycle;
133 cache_color_cycle += alignment;
134 if (cache_color_cycle > spareBytes)
135 cache_color_cycle = 0;
137 TRACE_CACHE(this, " %lu objects, %lu spare bytes, offset %lu",
138 slab->size, spareBytes, slab->offset);
140 uint8* data = ((uint8*)pages) + slab->offset;
142 CREATE_PARANOIA_CHECK_SET(slab, "slab");
145 for (size_t i = 0; i < slab->size; i++) {
146 status_t status = B_OK;
147 if (constructor)
148 status = constructor(cookie, data);
150 if (status != B_OK) {
151 data = ((uint8*)pages) + slab->offset;
152 for (size_t j = 0; j < i; j++) {
153 if (destructor)
154 destructor(cookie, data);
155 data += object_size;
158 DELETE_PARANOIA_CHECK_SET(slab);
160 return NULL;
163 _push(slab->free, object_to_link(data, object_size));
165 ADD_PARANOIA_CHECK(PARANOIA_SUSPICIOUS, slab,
166 &object_to_link(data, object_size)->next, sizeof(void*));
168 data += object_size;
171 return slab;
175 void
176 ObjectCache::UninitSlab(slab* slab)
178 TRACE_CACHE(this, "destruct %p", slab);
180 if (slab->count != slab->size)
181 panic("cache: destroying a slab which isn't empty.");
183 usage -= slab_size;
184 total_objects -= slab->size;
186 DELETE_PARANOIA_CHECK_SET(slab);
188 uint8* data = ((uint8*)slab->pages) + slab->offset;
190 for (size_t i = 0; i < slab->size; i++) {
191 if (destructor)
192 destructor(cookie, data);
193 data += object_size;
198 void
199 ObjectCache::ReturnObjectToSlab(slab* source, void* object, uint32 flags)
201 if (source == NULL) {
202 panic("object_cache: free'd object %p has no slab", object);
203 return;
206 ParanoiaChecker _(source);
208 #if KDEBUG >= 1
209 uint8* objectsStart = (uint8*)source->pages + source->offset;
210 if (object < objectsStart
211 || object >= objectsStart + source->size * object_size
212 || ((uint8*)object - objectsStart) % object_size != 0) {
213 panic("object_cache: tried to free invalid object pointer %p", object);
214 return;
216 #endif // KDEBUG
218 object_link* link = object_to_link(object, object_size);
220 TRACE_CACHE(this, "returning %p (%p) to %p, %lu used (%lu empty slabs).",
221 object, link, source, source->size - source->count,
222 empty_count);
224 _push(source->free, link);
225 source->count++;
226 used_count--;
228 ADD_PARANOIA_CHECK(PARANOIA_SUSPICIOUS, source, &link->next, sizeof(void*));
230 if (source->count == source->size) {
231 partial.Remove(source);
233 if (empty_count < pressure
234 && total_objects - used_count - source->size
235 >= min_object_reserve) {
236 empty_count++;
237 empty.Add(source);
238 } else {
239 ReturnSlab(source, flags);
241 } else if (source->count == 1) {
242 full.Remove(source);
243 partial.Add(source);
248 void*
249 ObjectCache::ObjectAtIndex(slab* source, int32 index) const
251 return (uint8*)source->pages + source->offset + index * object_size;
255 #if PARANOID_KERNEL_FREE
257 bool
258 ObjectCache::AssertObjectNotFreed(void* object)
260 MutexLocker locker(lock);
262 slab* source = ObjectSlab(object);
263 if (!partial.Contains(source) && !full.Contains(source)) {
264 panic("object_cache: to be freed object %p: slab not part of cache!",
265 object);
266 return false;
269 object_link* link = object_to_link(object, object_size);
270 for (object_link* freeLink = source->free; freeLink != NULL;
271 freeLink = freeLink->next) {
272 if (freeLink == link) {
273 panic("object_cache: double free of %p (slab %p, cache %p)",
274 object, source, this);
275 return false;
279 return true;
282 #endif // PARANOID_KERNEL_FREE
285 #if SLAB_OBJECT_CACHE_ALLOCATION_TRACKING
287 status_t
288 ObjectCache::AllocateTrackingInfos(slab* slab, size_t byteCount, uint32 flags)
290 void* pages;
291 size_t objectCount = byteCount / object_size;
292 status_t result = MemoryManager::AllocateRaw(
293 objectCount * sizeof(AllocationTrackingInfo), flags, pages);
294 if (result == B_OK) {
295 slab->tracking = (AllocationTrackingInfo*)pages;
296 for (size_t i = 0; i < objectCount; i++)
297 slab->tracking[i].Clear();
300 return result;
304 void
305 ObjectCache::FreeTrackingInfos(slab* slab, uint32 flags)
307 MemoryManager::FreeRawOrReturnCache(slab->tracking, flags);
311 AllocationTrackingInfo*
312 ObjectCache::TrackingInfoFor(void* object) const
314 slab* objectSlab = ObjectSlab(object);
315 return &objectSlab->tracking[((addr_t)object - objectSlab->offset
316 - (addr_t)objectSlab->pages) / object_size];
319 #endif // SLAB_OBJECT_CACHE_ALLOCATION_TRACKING
322 RANGE_MARKER_FUNCTION_END(SlabObjectCache)