1 // SPDX-License-Identifier: GPL-2.0-only
2 #include <linux/spinlock.h>
3 #include <linux/slab.h>
4 #include <linux/list.h>
5 #include <linux/list_bl.h>
6 #include <linux/module.h>
7 #include <linux/sched.h>
8 #include <linux/workqueue.h>
9 #include <linux/mbcache.h>
12 * Mbcache is a simple key-value store. Keys need not be unique, however
13 * key-value pairs are expected to be unique (we use this fact in
14 * mb_cache_entry_delete_or_get()).
16 * Ext2 and ext4 use this cache for deduplication of extended attribute blocks.
17 * Ext4 also uses it for deduplication of xattr values stored in inodes.
18 * They use hash of data as a key and provide a value that may represent a
19 * block or inode number. That's why keys need not be unique (hash of different
20 * data may be the same). However user provided value always uniquely
21 * identifies a cache entry.
23 * We provide functions for creation and removal of entries, search by key,
24 * and a special "delete entry with given key-value pair" operation. Fixed
25 * size hash table is used for fast key lookups.
29 /* Hash table of entries */
30 struct hlist_bl_head
*c_hash
;
31 /* log2 of hash table size */
33 /* Maximum entries in cache to avoid degrading hash too much */
34 unsigned long c_max_entries
;
35 /* Protects c_list, c_entry_count */
36 spinlock_t c_list_lock
;
37 struct list_head c_list
;
38 /* Number of entries in cache */
39 unsigned long c_entry_count
;
40 struct shrinker
*c_shrink
;
41 /* Work for shrinking when the cache has too many entries */
42 struct work_struct c_shrink_work
;
45 static struct kmem_cache
*mb_entry_cache
;
47 static unsigned long mb_cache_shrink(struct mb_cache
*cache
,
48 unsigned long nr_to_scan
);
50 static inline struct hlist_bl_head
*mb_cache_entry_head(struct mb_cache
*cache
,
53 return &cache
->c_hash
[hash_32(key
, cache
->c_bucket_bits
)];
57 * Number of entries to reclaim synchronously when there are too many entries
60 #define SYNC_SHRINK_BATCH 64
63 * mb_cache_entry_create - create entry in cache
64 * @cache - cache where the entry should be created
65 * @mask - gfp mask with which the entry should be allocated
66 * @key - key of the entry
67 * @value - value of the entry
68 * @reusable - is the entry reusable by others?
70 * Creates entry in @cache with key @key and value @value. The function returns
71 * -EBUSY if entry with the same key and value already exists in cache.
72 * Otherwise 0 is returned.
74 int mb_cache_entry_create(struct mb_cache
*cache
, gfp_t mask
, u32 key
,
75 u64 value
, bool reusable
)
77 struct mb_cache_entry
*entry
, *dup
;
78 struct hlist_bl_node
*dup_node
;
79 struct hlist_bl_head
*head
;
81 /* Schedule background reclaim if there are too many entries */
82 if (cache
->c_entry_count
>= cache
->c_max_entries
)
83 schedule_work(&cache
->c_shrink_work
);
84 /* Do some sync reclaim if background reclaim cannot keep up */
85 if (cache
->c_entry_count
>= 2*cache
->c_max_entries
)
86 mb_cache_shrink(cache
, SYNC_SHRINK_BATCH
);
88 entry
= kmem_cache_alloc(mb_entry_cache
, mask
);
92 INIT_LIST_HEAD(&entry
->e_list
);
94 * We create entry with two references. One reference is kept by the
95 * hash table, the other reference is used to protect us from
96 * mb_cache_entry_delete_or_get() until the entry is fully setup. This
97 * avoids nesting of cache->c_list_lock into hash table bit locks which
98 * is problematic for RT.
100 atomic_set(&entry
->e_refcnt
, 2);
102 entry
->e_value
= value
;
105 set_bit(MBE_REUSABLE_B
, &entry
->e_flags
);
106 head
= mb_cache_entry_head(cache
, key
);
108 hlist_bl_for_each_entry(dup
, dup_node
, head
, e_hash_list
) {
109 if (dup
->e_key
== key
&& dup
->e_value
== value
) {
110 hlist_bl_unlock(head
);
111 kmem_cache_free(mb_entry_cache
, entry
);
115 hlist_bl_add_head(&entry
->e_hash_list
, head
);
116 hlist_bl_unlock(head
);
117 spin_lock(&cache
->c_list_lock
);
118 list_add_tail(&entry
->e_list
, &cache
->c_list
);
119 cache
->c_entry_count
++;
120 spin_unlock(&cache
->c_list_lock
);
121 mb_cache_entry_put(cache
, entry
);
125 EXPORT_SYMBOL(mb_cache_entry_create
);
127 void __mb_cache_entry_free(struct mb_cache
*cache
, struct mb_cache_entry
*entry
)
129 struct hlist_bl_head
*head
;
131 head
= mb_cache_entry_head(cache
, entry
->e_key
);
133 hlist_bl_del(&entry
->e_hash_list
);
134 hlist_bl_unlock(head
);
135 kmem_cache_free(mb_entry_cache
, entry
);
137 EXPORT_SYMBOL(__mb_cache_entry_free
);
140 * mb_cache_entry_wait_unused - wait to be the last user of the entry
142 * @entry - entry to work on
144 * Wait to be the last user of the entry.
146 void mb_cache_entry_wait_unused(struct mb_cache_entry
*entry
)
148 wait_var_event(&entry
->e_refcnt
, atomic_read(&entry
->e_refcnt
) <= 2);
150 EXPORT_SYMBOL(mb_cache_entry_wait_unused
);
152 static struct mb_cache_entry
*__entry_find(struct mb_cache
*cache
,
153 struct mb_cache_entry
*entry
,
156 struct mb_cache_entry
*old_entry
= entry
;
157 struct hlist_bl_node
*node
;
158 struct hlist_bl_head
*head
;
160 head
= mb_cache_entry_head(cache
, key
);
162 if (entry
&& !hlist_bl_unhashed(&entry
->e_hash_list
))
163 node
= entry
->e_hash_list
.next
;
165 node
= hlist_bl_first(head
);
167 entry
= hlist_bl_entry(node
, struct mb_cache_entry
,
169 if (entry
->e_key
== key
&&
170 test_bit(MBE_REUSABLE_B
, &entry
->e_flags
) &&
171 atomic_inc_not_zero(&entry
->e_refcnt
))
177 hlist_bl_unlock(head
);
179 mb_cache_entry_put(cache
, old_entry
);
185 * mb_cache_entry_find_first - find the first reusable entry with the given key
186 * @cache: cache where we should search
187 * @key: key to look for
189 * Search in @cache for a reusable entry with key @key. Grabs reference to the
190 * first reusable entry found and returns the entry.
192 struct mb_cache_entry
*mb_cache_entry_find_first(struct mb_cache
*cache
,
195 return __entry_find(cache
, NULL
, key
);
197 EXPORT_SYMBOL(mb_cache_entry_find_first
);
200 * mb_cache_entry_find_next - find next reusable entry with the same key
201 * @cache: cache where we should search
202 * @entry: entry to start search from
204 * Finds next reusable entry in the hash chain which has the same key as @entry.
205 * If @entry is unhashed (which can happen when deletion of entry races with the
206 * search), finds the first reusable entry in the hash chain. The function drops
207 * reference to @entry and returns with a reference to the found entry.
209 struct mb_cache_entry
*mb_cache_entry_find_next(struct mb_cache
*cache
,
210 struct mb_cache_entry
*entry
)
212 return __entry_find(cache
, entry
, entry
->e_key
);
214 EXPORT_SYMBOL(mb_cache_entry_find_next
);
217 * mb_cache_entry_get - get a cache entry by value (and key)
218 * @cache - cache we work with
222 struct mb_cache_entry
*mb_cache_entry_get(struct mb_cache
*cache
, u32 key
,
225 struct hlist_bl_node
*node
;
226 struct hlist_bl_head
*head
;
227 struct mb_cache_entry
*entry
;
229 head
= mb_cache_entry_head(cache
, key
);
231 hlist_bl_for_each_entry(entry
, node
, head
, e_hash_list
) {
232 if (entry
->e_key
== key
&& entry
->e_value
== value
&&
233 atomic_inc_not_zero(&entry
->e_refcnt
))
238 hlist_bl_unlock(head
);
241 EXPORT_SYMBOL(mb_cache_entry_get
);
243 /* mb_cache_entry_delete_or_get - remove a cache entry if it has no users
244 * @cache - cache we work with
248 * Remove entry from cache @cache with key @key and value @value. The removal
249 * happens only if the entry is unused. The function returns NULL in case the
250 * entry was successfully removed or there's no entry in cache. Otherwise the
251 * function grabs reference of the entry that we failed to delete because it
252 * still has users and return it.
254 struct mb_cache_entry
*mb_cache_entry_delete_or_get(struct mb_cache
*cache
,
257 struct mb_cache_entry
*entry
;
259 entry
= mb_cache_entry_get(cache
, key
, value
);
264 * Drop the ref we got from mb_cache_entry_get() and the initial hash
265 * ref if we are the last user
267 if (atomic_cmpxchg(&entry
->e_refcnt
, 2, 0) != 2)
270 spin_lock(&cache
->c_list_lock
);
271 if (!list_empty(&entry
->e_list
))
272 list_del_init(&entry
->e_list
);
273 cache
->c_entry_count
--;
274 spin_unlock(&cache
->c_list_lock
);
275 __mb_cache_entry_free(cache
, entry
);
278 EXPORT_SYMBOL(mb_cache_entry_delete_or_get
);
280 /* mb_cache_entry_touch - cache entry got used
281 * @cache - cache the entry belongs to
282 * @entry - entry that got used
284 * Marks entry as used to give hit higher chances of surviving in cache.
286 void mb_cache_entry_touch(struct mb_cache
*cache
,
287 struct mb_cache_entry
*entry
)
289 set_bit(MBE_REFERENCED_B
, &entry
->e_flags
);
291 EXPORT_SYMBOL(mb_cache_entry_touch
);
293 static unsigned long mb_cache_count(struct shrinker
*shrink
,
294 struct shrink_control
*sc
)
296 struct mb_cache
*cache
= shrink
->private_data
;
298 return cache
->c_entry_count
;
301 /* Shrink number of entries in cache */
302 static unsigned long mb_cache_shrink(struct mb_cache
*cache
,
303 unsigned long nr_to_scan
)
305 struct mb_cache_entry
*entry
;
306 unsigned long shrunk
= 0;
308 spin_lock(&cache
->c_list_lock
);
309 while (nr_to_scan
-- && !list_empty(&cache
->c_list
)) {
310 entry
= list_first_entry(&cache
->c_list
,
311 struct mb_cache_entry
, e_list
);
312 /* Drop initial hash reference if there is no user */
313 if (test_bit(MBE_REFERENCED_B
, &entry
->e_flags
) ||
314 atomic_cmpxchg(&entry
->e_refcnt
, 1, 0) != 1) {
315 clear_bit(MBE_REFERENCED_B
, &entry
->e_flags
);
316 list_move_tail(&entry
->e_list
, &cache
->c_list
);
319 list_del_init(&entry
->e_list
);
320 cache
->c_entry_count
--;
321 spin_unlock(&cache
->c_list_lock
);
322 __mb_cache_entry_free(cache
, entry
);
325 spin_lock(&cache
->c_list_lock
);
327 spin_unlock(&cache
->c_list_lock
);
332 static unsigned long mb_cache_scan(struct shrinker
*shrink
,
333 struct shrink_control
*sc
)
335 struct mb_cache
*cache
= shrink
->private_data
;
336 return mb_cache_shrink(cache
, sc
->nr_to_scan
);
339 /* We shrink 1/X of the cache when we have too many entries in it */
340 #define SHRINK_DIVISOR 16
342 static void mb_cache_shrink_worker(struct work_struct
*work
)
344 struct mb_cache
*cache
= container_of(work
, struct mb_cache
,
346 mb_cache_shrink(cache
, cache
->c_max_entries
/ SHRINK_DIVISOR
);
350 * mb_cache_create - create cache
351 * @bucket_bits: log2 of the hash table size
353 * Create cache for keys with 2^bucket_bits hash entries.
355 struct mb_cache
*mb_cache_create(int bucket_bits
)
357 struct mb_cache
*cache
;
358 unsigned long bucket_count
= 1UL << bucket_bits
;
361 cache
= kzalloc(sizeof(struct mb_cache
), GFP_KERNEL
);
364 cache
->c_bucket_bits
= bucket_bits
;
365 cache
->c_max_entries
= bucket_count
<< 4;
366 INIT_LIST_HEAD(&cache
->c_list
);
367 spin_lock_init(&cache
->c_list_lock
);
368 cache
->c_hash
= kmalloc_array(bucket_count
,
369 sizeof(struct hlist_bl_head
),
371 if (!cache
->c_hash
) {
375 for (i
= 0; i
< bucket_count
; i
++)
376 INIT_HLIST_BL_HEAD(&cache
->c_hash
[i
]);
378 cache
->c_shrink
= shrinker_alloc(0, "mbcache-shrinker");
379 if (!cache
->c_shrink
) {
380 kfree(cache
->c_hash
);
385 cache
->c_shrink
->count_objects
= mb_cache_count
;
386 cache
->c_shrink
->scan_objects
= mb_cache_scan
;
387 cache
->c_shrink
->private_data
= cache
;
389 shrinker_register(cache
->c_shrink
);
391 INIT_WORK(&cache
->c_shrink_work
, mb_cache_shrink_worker
);
398 EXPORT_SYMBOL(mb_cache_create
);
401 * mb_cache_destroy - destroy cache
402 * @cache: the cache to destroy
404 * Free all entries in cache and cache itself. Caller must make sure nobody
405 * (except shrinker) can reach @cache when calling this.
407 void mb_cache_destroy(struct mb_cache
*cache
)
409 struct mb_cache_entry
*entry
, *next
;
411 shrinker_free(cache
->c_shrink
);
414 * We don't bother with any locking. Cache must not be used at this
417 list_for_each_entry_safe(entry
, next
, &cache
->c_list
, e_list
) {
418 list_del(&entry
->e_list
);
419 WARN_ON(atomic_read(&entry
->e_refcnt
) != 1);
420 mb_cache_entry_put(cache
, entry
);
422 kfree(cache
->c_hash
);
425 EXPORT_SYMBOL(mb_cache_destroy
);
427 static int __init
mbcache_init(void)
429 mb_entry_cache
= KMEM_CACHE(mb_cache_entry
, SLAB_RECLAIM_ACCOUNT
);
435 static void __exit
mbcache_exit(void)
437 kmem_cache_destroy(mb_entry_cache
);
440 module_init(mbcache_init
)
441 module_exit(mbcache_exit
)
443 MODULE_AUTHOR("Jan Kara <jack@suse.cz>");
444 MODULE_DESCRIPTION("Meta block cache (for extended attributes)");
445 MODULE_LICENSE("GPL");