2 * Copyright 2008, Axel Dörfler. All Rights Reserved.
3 * Copyright 2007, Hugo Santos. All Rights Reserved.
5 * Distributed under the terms of the MIT License.
9 #include "HashedObjectCache.h"
11 #include "MemoryManager.h"
12 #include "slab_private.h"
15 RANGE_MARKER_FUNCTION_BEGIN(SlabHashedObjectCache
)
25 for (bit
= 0; value
!= 1; bit
++)
32 allocate_slab(uint32 flags
)
34 return (HashedSlab
*)slab_internal_alloc(sizeof(HashedSlab
), flags
);
39 free_slab(HashedSlab
* slab
, uint32 flags
)
41 slab_internal_free(slab
, flags
);
48 HashedObjectCache::HashedObjectCache()
55 /*static*/ HashedObjectCache
*
56 HashedObjectCache::Create(const char* name
, size_t object_size
,
57 size_t alignment
, size_t maximum
, size_t magazineCapacity
,
58 size_t maxMagazineCount
, uint32 flags
, void* cookie
,
59 object_cache_constructor constructor
, object_cache_destructor destructor
,
60 object_cache_reclaimer reclaimer
)
62 void* buffer
= slab_internal_alloc(sizeof(HashedObjectCache
), flags
);
66 HashedObjectCache
* cache
= new(buffer
) HashedObjectCache();
68 // init the hash table
69 size_t hashSize
= cache
->hash_table
.ResizeNeeded();
70 buffer
= slab_internal_alloc(hashSize
, flags
);
76 cache
->hash_table
.Resize(buffer
, hashSize
, true);
78 if (cache
->Init(name
, object_size
, alignment
, maximum
, magazineCapacity
,
79 maxMagazineCount
, flags
, cookie
, constructor
, destructor
,
85 if ((flags
& CACHE_LARGE_SLAB
) != 0)
86 cache
->slab_size
= 128 * object_size
;
88 cache
->slab_size
= 8 * object_size
;
90 cache
->slab_size
= MemoryManager::AcceptableChunkSize(cache
->slab_size
);
91 cache
->lower_boundary
= __fls0(cache
->slab_size
);
98 HashedObjectCache::Delete()
100 this->~HashedObjectCache();
101 slab_internal_free(this, 0);
106 HashedObjectCache::CreateSlab(uint32 flags
)
108 if (!check_cache_quota(this))
113 HashedSlab
* slab
= allocate_slab(flags
);
116 if (MemoryManager::Allocate(this, flags
, pages
) == B_OK
117 && AllocateTrackingInfos(slab
, slab_size
, flags
) == B_OK
) {
119 if (InitSlab(slab
, pages
, slab_size
, flags
)) {
120 hash_table
.InsertUnchecked(slab
);
121 _ResizeHashTableIfNeeded(flags
);
125 FreeTrackingInfos(slab
, flags
);
129 MemoryManager::Free(pages
, flags
);
131 free_slab(slab
, flags
);
140 HashedObjectCache::ReturnSlab(slab
* _slab
, uint32 flags
)
142 HashedSlab
* slab
= static_cast<HashedSlab
*>(_slab
);
144 hash_table
.RemoveUnchecked(slab
);
145 _ResizeHashTableIfNeeded(flags
);
150 FreeTrackingInfos(slab
, flags
);
151 MemoryManager::Free(slab
->pages
, flags
);
152 free_slab(slab
, flags
);
158 HashedObjectCache::ObjectSlab(void* object
) const
160 ASSERT_LOCKED_MUTEX(&lock
);
162 HashedSlab
* slab
= hash_table
.Lookup(::lower_boundary(object
, slab_size
));
164 panic("hash object cache %p: unknown object %p", this, object
);
173 HashedObjectCache::_ResizeHashTableIfNeeded(uint32 flags
)
175 size_t hashSize
= hash_table
.ResizeNeeded();
178 void* buffer
= slab_internal_alloc(hashSize
, flags
);
181 if (buffer
!= NULL
) {
182 if (hash_table
.ResizeNeeded() == hashSize
) {
184 hash_table
.Resize(buffer
, hashSize
, true, &oldHash
);
185 if (oldHash
!= NULL
) {
187 slab_internal_free(oldHash
, flags
);
196 RANGE_MARKER_FUNCTION_END(SlabHashedObjectCache
)