headers/bsd: Add sys/queue.h.
[haiku.git] / src / system / kernel / slab / HashedObjectCache.cpp
blobbca3f361b6e3d217a3b8a950a31945d1a766810e
1 /*
2 * Copyright 2008, Axel Dörfler. All Rights Reserved.
3 * Copyright 2007, Hugo Santos. All Rights Reserved.
5 * Distributed under the terms of the MIT License.
6 */
9 #include "HashedObjectCache.h"
11 #include "MemoryManager.h"
12 #include "slab_private.h"
15 RANGE_MARKER_FUNCTION_BEGIN(SlabHashedObjectCache)
18 static inline int
19 __fls0(size_t value)
21 if (value == 0)
22 return -1;
24 int bit;
25 for (bit = 0; value != 1; bit++)
26 value >>= 1;
27 return bit;
31 static HashedSlab*
32 allocate_slab(uint32 flags)
34 return (HashedSlab*)slab_internal_alloc(sizeof(HashedSlab), flags);
38 static void
39 free_slab(HashedSlab* slab, uint32 flags)
41 slab_internal_free(slab, flags);
45 // #pragma mark -
48 HashedObjectCache::HashedObjectCache()
50 hash_table(this)
55 /*static*/ HashedObjectCache*
56 HashedObjectCache::Create(const char* name, size_t object_size,
57 size_t alignment, size_t maximum, size_t magazineCapacity,
58 size_t maxMagazineCount, uint32 flags, void* cookie,
59 object_cache_constructor constructor, object_cache_destructor destructor,
60 object_cache_reclaimer reclaimer)
62 void* buffer = slab_internal_alloc(sizeof(HashedObjectCache), flags);
63 if (buffer == NULL)
64 return NULL;
66 HashedObjectCache* cache = new(buffer) HashedObjectCache();
68 // init the hash table
69 size_t hashSize = cache->hash_table.ResizeNeeded();
70 buffer = slab_internal_alloc(hashSize, flags);
71 if (buffer == NULL) {
72 cache->Delete();
73 return NULL;
76 cache->hash_table.Resize(buffer, hashSize, true);
78 if (cache->Init(name, object_size, alignment, maximum, magazineCapacity,
79 maxMagazineCount, flags, cookie, constructor, destructor,
80 reclaimer) != B_OK) {
81 cache->Delete();
82 return NULL;
85 if ((flags & CACHE_LARGE_SLAB) != 0)
86 cache->slab_size = 128 * object_size;
87 else
88 cache->slab_size = 8 * object_size;
90 cache->slab_size = MemoryManager::AcceptableChunkSize(cache->slab_size);
91 cache->lower_boundary = __fls0(cache->slab_size);
93 return cache;
97 void
98 HashedObjectCache::Delete()
100 this->~HashedObjectCache();
101 slab_internal_free(this, 0);
105 slab*
106 HashedObjectCache::CreateSlab(uint32 flags)
108 if (!check_cache_quota(this))
109 return NULL;
111 Unlock();
113 HashedSlab* slab = allocate_slab(flags);
114 if (slab != NULL) {
115 void* pages = NULL;
116 if (MemoryManager::Allocate(this, flags, pages) == B_OK
117 && AllocateTrackingInfos(slab, slab_size, flags) == B_OK) {
118 Lock();
119 if (InitSlab(slab, pages, slab_size, flags)) {
120 hash_table.InsertUnchecked(slab);
121 _ResizeHashTableIfNeeded(flags);
122 return slab;
124 Unlock();
125 FreeTrackingInfos(slab, flags);
128 if (pages != NULL)
129 MemoryManager::Free(pages, flags);
131 free_slab(slab, flags);
134 Lock();
135 return NULL;
139 void
140 HashedObjectCache::ReturnSlab(slab* _slab, uint32 flags)
142 HashedSlab* slab = static_cast<HashedSlab*>(_slab);
144 hash_table.RemoveUnchecked(slab);
145 _ResizeHashTableIfNeeded(flags);
147 UninitSlab(slab);
149 Unlock();
150 FreeTrackingInfos(slab, flags);
151 MemoryManager::Free(slab->pages, flags);
152 free_slab(slab, flags);
153 Lock();
157 slab*
158 HashedObjectCache::ObjectSlab(void* object) const
160 ASSERT_LOCKED_MUTEX(&lock);
162 HashedSlab* slab = hash_table.Lookup(::lower_boundary(object, slab_size));
163 if (slab == NULL) {
164 panic("hash object cache %p: unknown object %p", this, object);
165 return NULL;
168 return slab;
172 void
173 HashedObjectCache::_ResizeHashTableIfNeeded(uint32 flags)
175 size_t hashSize = hash_table.ResizeNeeded();
176 if (hashSize != 0) {
177 Unlock();
178 void* buffer = slab_internal_alloc(hashSize, flags);
179 Lock();
181 if (buffer != NULL) {
182 if (hash_table.ResizeNeeded() == hashSize) {
183 void* oldHash;
184 hash_table.Resize(buffer, hashSize, true, &oldHash);
185 if (oldHash != NULL) {
186 Unlock();
187 slab_internal_free(oldHash, flags);
188 Lock();
196 RANGE_MARKER_FUNCTION_END(SlabHashedObjectCache)