1 #include <linux/spinlock.h>
2 #include <linux/slab.h>
3 #include <linux/list.h>
4 #include <linux/list_bl.h>
5 #include <linux/module.h>
6 #include <linux/sched.h>
7 #include <linux/workqueue.h>
8 #include <linux/mbcache.h>
11 * Mbcache is a simple key-value store. Keys need not be unique, however
12 * key-value pairs are expected to be unique (we use this fact in
13 * mb_cache_entry_delete_block()).
15 * Ext2 and ext4 use this cache for deduplication of extended attribute blocks.
16 * They use hash of a block contents as a key and block number as a value.
17 * That's why keys need not be unique (different xattr blocks may end up having
18 * the same hash). However block number always uniquely identifies a cache
21 * We provide functions for creation and removal of entries, search by key,
22 * and a special "delete entry with given key-value pair" operation. Fixed
23 * size hash table is used for fast key lookups.
27 /* Hash table of entries */
28 struct hlist_bl_head
*c_hash
;
29 /* log2 of hash table size */
31 /* Maximum entries in cache to avoid degrading hash too much */
32 unsigned long c_max_entries
;
33 /* Protects c_list, c_entry_count */
34 spinlock_t c_list_lock
;
35 struct list_head c_list
;
36 /* Number of entries in cache */
37 unsigned long c_entry_count
;
38 struct shrinker c_shrink
;
39 /* Work for shrinking when the cache has too many entries */
40 struct work_struct c_shrink_work
;
43 static struct kmem_cache
*mb_entry_cache
;
45 static unsigned long mb_cache_shrink(struct mb_cache
*cache
,
46 unsigned long nr_to_scan
);
48 static inline struct hlist_bl_head
*mb_cache_entry_head(struct mb_cache
*cache
,
51 return &cache
->c_hash
[hash_32(key
, cache
->c_bucket_bits
)];
55 * Number of entries to reclaim synchronously when there are too many entries
58 #define SYNC_SHRINK_BATCH 64
61 * mb_cache_entry_create - create entry in cache
62 * @cache - cache where the entry should be created
63 * @mask - gfp mask with which the entry should be allocated
64 * @key - key of the entry
65 * @block - block that contains data
66 * @reusable - is the block reusable by other inodes?
68 * Creates entry in @cache with key @key and records that data is stored in
69 * block @block. The function returns -EBUSY if entry with the same key
70 * and for the same block already exists in cache. Otherwise 0 is returned.
72 int mb_cache_entry_create(struct mb_cache
*cache
, gfp_t mask
, u32 key
,
73 sector_t block
, bool reusable
)
75 struct mb_cache_entry
*entry
, *dup
;
76 struct hlist_bl_node
*dup_node
;
77 struct hlist_bl_head
*head
;
79 /* Schedule background reclaim if there are too many entries */
80 if (cache
->c_entry_count
>= cache
->c_max_entries
)
81 schedule_work(&cache
->c_shrink_work
);
82 /* Do some sync reclaim if background reclaim cannot keep up */
83 if (cache
->c_entry_count
>= 2*cache
->c_max_entries
)
84 mb_cache_shrink(cache
, SYNC_SHRINK_BATCH
);
86 entry
= kmem_cache_alloc(mb_entry_cache
, mask
);
90 INIT_LIST_HEAD(&entry
->e_list
);
91 /* One ref for hash, one ref returned */
92 atomic_set(&entry
->e_refcnt
, 1);
94 entry
->e_block
= block
;
95 entry
->e_reusable
= reusable
;
96 head
= mb_cache_entry_head(cache
, key
);
98 hlist_bl_for_each_entry(dup
, dup_node
, head
, e_hash_list
) {
99 if (dup
->e_key
== key
&& dup
->e_block
== block
) {
100 hlist_bl_unlock(head
);
101 kmem_cache_free(mb_entry_cache
, entry
);
105 hlist_bl_add_head(&entry
->e_hash_list
, head
);
106 hlist_bl_unlock(head
);
108 spin_lock(&cache
->c_list_lock
);
109 list_add_tail(&entry
->e_list
, &cache
->c_list
);
110 /* Grab ref for LRU list */
111 atomic_inc(&entry
->e_refcnt
);
112 cache
->c_entry_count
++;
113 spin_unlock(&cache
->c_list_lock
);
117 EXPORT_SYMBOL(mb_cache_entry_create
);
119 void __mb_cache_entry_free(struct mb_cache_entry
*entry
)
121 kmem_cache_free(mb_entry_cache
, entry
);
123 EXPORT_SYMBOL(__mb_cache_entry_free
);
125 static struct mb_cache_entry
*__entry_find(struct mb_cache
*cache
,
126 struct mb_cache_entry
*entry
,
129 struct mb_cache_entry
*old_entry
= entry
;
130 struct hlist_bl_node
*node
;
131 struct hlist_bl_head
*head
;
133 head
= mb_cache_entry_head(cache
, key
);
135 if (entry
&& !hlist_bl_unhashed(&entry
->e_hash_list
))
136 node
= entry
->e_hash_list
.next
;
138 node
= hlist_bl_first(head
);
140 entry
= hlist_bl_entry(node
, struct mb_cache_entry
,
142 if (entry
->e_key
== key
&& entry
->e_reusable
) {
143 atomic_inc(&entry
->e_refcnt
);
150 hlist_bl_unlock(head
);
152 mb_cache_entry_put(cache
, old_entry
);
158 * mb_cache_entry_find_first - find the first reusable entry with the given key
159 * @cache: cache where we should search
160 * @key: key to look for
162 * Search in @cache for a reusable entry with key @key. Grabs reference to the
163 * first reusable entry found and returns the entry.
165 struct mb_cache_entry
*mb_cache_entry_find_first(struct mb_cache
*cache
,
168 return __entry_find(cache
, NULL
, key
);
170 EXPORT_SYMBOL(mb_cache_entry_find_first
);
173 * mb_cache_entry_find_next - find next reusable entry with the same key
174 * @cache: cache where we should search
175 * @entry: entry to start search from
177 * Finds next reusable entry in the hash chain which has the same key as @entry.
178 * If @entry is unhashed (which can happen when deletion of entry races with the
179 * search), finds the first reusable entry in the hash chain. The function drops
180 * reference to @entry and returns with a reference to the found entry.
182 struct mb_cache_entry
*mb_cache_entry_find_next(struct mb_cache
*cache
,
183 struct mb_cache_entry
*entry
)
185 return __entry_find(cache
, entry
, entry
->e_key
);
187 EXPORT_SYMBOL(mb_cache_entry_find_next
);
190 * mb_cache_entry_get - get a cache entry by block number (and key)
191 * @cache - cache we work with
192 * @key - key of block number @block
193 * @block - block number
195 struct mb_cache_entry
*mb_cache_entry_get(struct mb_cache
*cache
, u32 key
,
198 struct hlist_bl_node
*node
;
199 struct hlist_bl_head
*head
;
200 struct mb_cache_entry
*entry
;
202 head
= mb_cache_entry_head(cache
, key
);
204 hlist_bl_for_each_entry(entry
, node
, head
, e_hash_list
) {
205 if (entry
->e_key
== key
&& entry
->e_block
== block
) {
206 atomic_inc(&entry
->e_refcnt
);
212 hlist_bl_unlock(head
);
215 EXPORT_SYMBOL(mb_cache_entry_get
);
217 /* mb_cache_entry_delete_block - remove information about block from cache
218 * @cache - cache we work with
219 * @key - key of block @block
220 * @block - block number
222 * Remove entry from cache @cache with key @key with data stored in @block.
224 void mb_cache_entry_delete_block(struct mb_cache
*cache
, u32 key
,
227 struct hlist_bl_node
*node
;
228 struct hlist_bl_head
*head
;
229 struct mb_cache_entry
*entry
;
231 head
= mb_cache_entry_head(cache
, key
);
233 hlist_bl_for_each_entry(entry
, node
, head
, e_hash_list
) {
234 if (entry
->e_key
== key
&& entry
->e_block
== block
) {
235 /* We keep hash list reference to keep entry alive */
236 hlist_bl_del_init(&entry
->e_hash_list
);
237 hlist_bl_unlock(head
);
238 spin_lock(&cache
->c_list_lock
);
239 if (!list_empty(&entry
->e_list
)) {
240 list_del_init(&entry
->e_list
);
241 cache
->c_entry_count
--;
242 atomic_dec(&entry
->e_refcnt
);
244 spin_unlock(&cache
->c_list_lock
);
245 mb_cache_entry_put(cache
, entry
);
249 hlist_bl_unlock(head
);
251 EXPORT_SYMBOL(mb_cache_entry_delete_block
);
253 /* mb_cache_entry_touch - cache entry got used
254 * @cache - cache the entry belongs to
255 * @entry - entry that got used
257 * Marks entry as used to give hit higher chances of surviving in cache.
259 void mb_cache_entry_touch(struct mb_cache
*cache
,
260 struct mb_cache_entry
*entry
)
262 entry
->e_referenced
= 1;
264 EXPORT_SYMBOL(mb_cache_entry_touch
);
266 static unsigned long mb_cache_count(struct shrinker
*shrink
,
267 struct shrink_control
*sc
)
269 struct mb_cache
*cache
= container_of(shrink
, struct mb_cache
,
272 return cache
->c_entry_count
;
275 /* Shrink number of entries in cache */
276 static unsigned long mb_cache_shrink(struct mb_cache
*cache
,
277 unsigned long nr_to_scan
)
279 struct mb_cache_entry
*entry
;
280 struct hlist_bl_head
*head
;
281 unsigned long shrunk
= 0;
283 spin_lock(&cache
->c_list_lock
);
284 while (nr_to_scan
-- && !list_empty(&cache
->c_list
)) {
285 entry
= list_first_entry(&cache
->c_list
,
286 struct mb_cache_entry
, e_list
);
287 if (entry
->e_referenced
) {
288 entry
->e_referenced
= 0;
289 list_move_tail(&entry
->e_list
, &cache
->c_list
);
292 list_del_init(&entry
->e_list
);
293 cache
->c_entry_count
--;
295 * We keep LRU list reference so that entry doesn't go away
298 spin_unlock(&cache
->c_list_lock
);
299 head
= mb_cache_entry_head(cache
, entry
->e_key
);
301 if (!hlist_bl_unhashed(&entry
->e_hash_list
)) {
302 hlist_bl_del_init(&entry
->e_hash_list
);
303 atomic_dec(&entry
->e_refcnt
);
305 hlist_bl_unlock(head
);
306 if (mb_cache_entry_put(cache
, entry
))
309 spin_lock(&cache
->c_list_lock
);
311 spin_unlock(&cache
->c_list_lock
);
316 static unsigned long mb_cache_scan(struct shrinker
*shrink
,
317 struct shrink_control
*sc
)
319 struct mb_cache
*cache
= container_of(shrink
, struct mb_cache
,
321 return mb_cache_shrink(cache
, sc
->nr_to_scan
);
324 /* We shrink 1/X of the cache when we have too many entries in it */
325 #define SHRINK_DIVISOR 16
327 static void mb_cache_shrink_worker(struct work_struct
*work
)
329 struct mb_cache
*cache
= container_of(work
, struct mb_cache
,
331 mb_cache_shrink(cache
, cache
->c_max_entries
/ SHRINK_DIVISOR
);
335 * mb_cache_create - create cache
336 * @bucket_bits: log2 of the hash table size
338 * Create cache for keys with 2^bucket_bits hash entries.
340 struct mb_cache
*mb_cache_create(int bucket_bits
)
342 struct mb_cache
*cache
;
343 unsigned long bucket_count
= 1UL << bucket_bits
;
346 cache
= kzalloc(sizeof(struct mb_cache
), GFP_KERNEL
);
349 cache
->c_bucket_bits
= bucket_bits
;
350 cache
->c_max_entries
= bucket_count
<< 4;
351 INIT_LIST_HEAD(&cache
->c_list
);
352 spin_lock_init(&cache
->c_list_lock
);
353 cache
->c_hash
= kmalloc(bucket_count
* sizeof(struct hlist_bl_head
),
355 if (!cache
->c_hash
) {
359 for (i
= 0; i
< bucket_count
; i
++)
360 INIT_HLIST_BL_HEAD(&cache
->c_hash
[i
]);
362 cache
->c_shrink
.count_objects
= mb_cache_count
;
363 cache
->c_shrink
.scan_objects
= mb_cache_scan
;
364 cache
->c_shrink
.seeks
= DEFAULT_SEEKS
;
365 if (register_shrinker(&cache
->c_shrink
)) {
366 kfree(cache
->c_hash
);
371 INIT_WORK(&cache
->c_shrink_work
, mb_cache_shrink_worker
);
378 EXPORT_SYMBOL(mb_cache_create
);
381 * mb_cache_destroy - destroy cache
382 * @cache: the cache to destroy
384 * Free all entries in cache and cache itself. Caller must make sure nobody
385 * (except shrinker) can reach @cache when calling this.
387 void mb_cache_destroy(struct mb_cache
*cache
)
389 struct mb_cache_entry
*entry
, *next
;
391 unregister_shrinker(&cache
->c_shrink
);
394 * We don't bother with any locking. Cache must not be used at this
397 list_for_each_entry_safe(entry
, next
, &cache
->c_list
, e_list
) {
398 if (!hlist_bl_unhashed(&entry
->e_hash_list
)) {
399 hlist_bl_del_init(&entry
->e_hash_list
);
400 atomic_dec(&entry
->e_refcnt
);
403 list_del(&entry
->e_list
);
404 WARN_ON(atomic_read(&entry
->e_refcnt
) != 1);
405 mb_cache_entry_put(cache
, entry
);
407 kfree(cache
->c_hash
);
410 EXPORT_SYMBOL(mb_cache_destroy
);
412 static int __init
mbcache_init(void)
414 mb_entry_cache
= kmem_cache_create("mbcache",
415 sizeof(struct mb_cache_entry
), 0,
416 SLAB_RECLAIM_ACCOUNT
|SLAB_MEM_SPREAD
, NULL
);
422 static void __exit
mbcache_exit(void)
424 kmem_cache_destroy(mb_entry_cache
);
427 module_init(mbcache_init
)
428 module_exit(mbcache_exit
)
430 MODULE_AUTHOR("Jan Kara <jack@suse.cz>");
431 MODULE_DESCRIPTION("Meta block cache (for extended attributes)");
432 MODULE_LICENSE("GPL");