Merge tag 'nfs-for-3.13-2' of git://git.linux-nfs.org/projects/trondmy/linux-nfs
[linux/fpc-iii.git] / drivers / md / dm-cache-policy-cleaner.c
blobb04d1f904d0763c43870e38c857fcd25baad4b7e
1 /*
2 * Copyright (C) 2012 Red Hat. All rights reserved.
4 * writeback cache policy supporting flushing out dirty cache blocks.
6 * This file is released under the GPL.
7 */
9 #include "dm-cache-policy.h"
10 #include "dm.h"
12 #include <linux/hash.h>
13 #include <linux/module.h>
14 #include <linux/slab.h>
15 #include <linux/vmalloc.h>
17 /*----------------------------------------------------------------*/
19 #define DM_MSG_PREFIX "cache cleaner"
21 /* Cache entry struct. */
22 struct wb_cache_entry {
23 struct list_head list;
24 struct hlist_node hlist;
26 dm_oblock_t oblock;
27 dm_cblock_t cblock;
28 bool dirty:1;
29 bool pending:1;
32 struct hash {
33 struct hlist_head *table;
34 dm_block_t hash_bits;
35 unsigned nr_buckets;
38 struct policy {
39 struct dm_cache_policy policy;
40 spinlock_t lock;
42 struct list_head free;
43 struct list_head clean;
44 struct list_head clean_pending;
45 struct list_head dirty;
48 * We know exactly how many cblocks will be needed,
49 * so we can allocate them up front.
51 dm_cblock_t cache_size, nr_cblocks_allocated;
52 struct wb_cache_entry *cblocks;
53 struct hash chash;
56 /*----------------------------------------------------------------------------*/
59 * Low-level functions.
61 static unsigned next_power(unsigned n, unsigned min)
63 return roundup_pow_of_two(max(n, min));
66 static struct policy *to_policy(struct dm_cache_policy *p)
68 return container_of(p, struct policy, policy);
71 static struct list_head *list_pop(struct list_head *q)
73 struct list_head *r = q->next;
75 list_del(r);
77 return r;
80 /*----------------------------------------------------------------------------*/
82 /* Allocate/free various resources. */
83 static int alloc_hash(struct hash *hash, unsigned elts)
85 hash->nr_buckets = next_power(elts >> 4, 16);
86 hash->hash_bits = ffs(hash->nr_buckets) - 1;
87 hash->table = vzalloc(sizeof(*hash->table) * hash->nr_buckets);
89 return hash->table ? 0 : -ENOMEM;
92 static void free_hash(struct hash *hash)
94 vfree(hash->table);
97 static int alloc_cache_blocks_with_hash(struct policy *p, dm_cblock_t cache_size)
99 int r = -ENOMEM;
101 p->cblocks = vzalloc(sizeof(*p->cblocks) * from_cblock(cache_size));
102 if (p->cblocks) {
103 unsigned u = from_cblock(cache_size);
105 while (u--)
106 list_add(&p->cblocks[u].list, &p->free);
108 p->nr_cblocks_allocated = 0;
110 /* Cache entries hash. */
111 r = alloc_hash(&p->chash, from_cblock(cache_size));
112 if (r)
113 vfree(p->cblocks);
116 return r;
119 static void free_cache_blocks_and_hash(struct policy *p)
121 free_hash(&p->chash);
122 vfree(p->cblocks);
125 static struct wb_cache_entry *alloc_cache_entry(struct policy *p)
127 struct wb_cache_entry *e;
129 BUG_ON(from_cblock(p->nr_cblocks_allocated) >= from_cblock(p->cache_size));
131 e = list_entry(list_pop(&p->free), struct wb_cache_entry, list);
132 p->nr_cblocks_allocated = to_cblock(from_cblock(p->nr_cblocks_allocated) + 1);
134 return e;
137 /*----------------------------------------------------------------------------*/
139 /* Hash functions (lookup, insert, remove). */
140 static struct wb_cache_entry *lookup_cache_entry(struct policy *p, dm_oblock_t oblock)
142 struct hash *hash = &p->chash;
143 unsigned h = hash_64(from_oblock(oblock), hash->hash_bits);
144 struct wb_cache_entry *cur;
145 struct hlist_head *bucket = &hash->table[h];
147 hlist_for_each_entry(cur, bucket, hlist) {
148 if (cur->oblock == oblock) {
149 /* Move upfront bucket for faster access. */
150 hlist_del(&cur->hlist);
151 hlist_add_head(&cur->hlist, bucket);
152 return cur;
156 return NULL;
159 static void insert_cache_hash_entry(struct policy *p, struct wb_cache_entry *e)
161 unsigned h = hash_64(from_oblock(e->oblock), p->chash.hash_bits);
163 hlist_add_head(&e->hlist, &p->chash.table[h]);
166 static void remove_cache_hash_entry(struct wb_cache_entry *e)
168 hlist_del(&e->hlist);
171 /* Public interface (see dm-cache-policy.h */
172 static int wb_map(struct dm_cache_policy *pe, dm_oblock_t oblock,
173 bool can_block, bool can_migrate, bool discarded_oblock,
174 struct bio *bio, struct policy_result *result)
176 struct policy *p = to_policy(pe);
177 struct wb_cache_entry *e;
178 unsigned long flags;
180 result->op = POLICY_MISS;
182 if (can_block)
183 spin_lock_irqsave(&p->lock, flags);
185 else if (!spin_trylock_irqsave(&p->lock, flags))
186 return -EWOULDBLOCK;
188 e = lookup_cache_entry(p, oblock);
189 if (e) {
190 result->op = POLICY_HIT;
191 result->cblock = e->cblock;
195 spin_unlock_irqrestore(&p->lock, flags);
197 return 0;
200 static int wb_lookup(struct dm_cache_policy *pe, dm_oblock_t oblock, dm_cblock_t *cblock)
202 int r;
203 struct policy *p = to_policy(pe);
204 struct wb_cache_entry *e;
205 unsigned long flags;
207 if (!spin_trylock_irqsave(&p->lock, flags))
208 return -EWOULDBLOCK;
210 e = lookup_cache_entry(p, oblock);
211 if (e) {
212 *cblock = e->cblock;
213 r = 0;
215 } else
216 r = -ENOENT;
218 spin_unlock_irqrestore(&p->lock, flags);
220 return r;
223 static void __set_clear_dirty(struct dm_cache_policy *pe, dm_oblock_t oblock, bool set)
225 struct policy *p = to_policy(pe);
226 struct wb_cache_entry *e;
228 e = lookup_cache_entry(p, oblock);
229 BUG_ON(!e);
231 if (set) {
232 if (!e->dirty) {
233 e->dirty = true;
234 list_move(&e->list, &p->dirty);
237 } else {
238 if (e->dirty) {
239 e->pending = false;
240 e->dirty = false;
241 list_move(&e->list, &p->clean);
246 static void wb_set_dirty(struct dm_cache_policy *pe, dm_oblock_t oblock)
248 struct policy *p = to_policy(pe);
249 unsigned long flags;
251 spin_lock_irqsave(&p->lock, flags);
252 __set_clear_dirty(pe, oblock, true);
253 spin_unlock_irqrestore(&p->lock, flags);
256 static void wb_clear_dirty(struct dm_cache_policy *pe, dm_oblock_t oblock)
258 struct policy *p = to_policy(pe);
259 unsigned long flags;
261 spin_lock_irqsave(&p->lock, flags);
262 __set_clear_dirty(pe, oblock, false);
263 spin_unlock_irqrestore(&p->lock, flags);
266 static void add_cache_entry(struct policy *p, struct wb_cache_entry *e)
268 insert_cache_hash_entry(p, e);
269 if (e->dirty)
270 list_add(&e->list, &p->dirty);
271 else
272 list_add(&e->list, &p->clean);
275 static int wb_load_mapping(struct dm_cache_policy *pe,
276 dm_oblock_t oblock, dm_cblock_t cblock,
277 uint32_t hint, bool hint_valid)
279 int r;
280 struct policy *p = to_policy(pe);
281 struct wb_cache_entry *e = alloc_cache_entry(p);
283 if (e) {
284 e->cblock = cblock;
285 e->oblock = oblock;
286 e->dirty = false; /* blocks default to clean */
287 add_cache_entry(p, e);
288 r = 0;
290 } else
291 r = -ENOMEM;
293 return r;
296 static void wb_destroy(struct dm_cache_policy *pe)
298 struct policy *p = to_policy(pe);
300 free_cache_blocks_and_hash(p);
301 kfree(p);
304 static struct wb_cache_entry *__wb_force_remove_mapping(struct policy *p, dm_oblock_t oblock)
306 struct wb_cache_entry *r = lookup_cache_entry(p, oblock);
308 BUG_ON(!r);
310 remove_cache_hash_entry(r);
311 list_del(&r->list);
313 return r;
316 static void wb_remove_mapping(struct dm_cache_policy *pe, dm_oblock_t oblock)
318 struct policy *p = to_policy(pe);
319 struct wb_cache_entry *e;
320 unsigned long flags;
322 spin_lock_irqsave(&p->lock, flags);
323 e = __wb_force_remove_mapping(p, oblock);
324 list_add_tail(&e->list, &p->free);
325 BUG_ON(!from_cblock(p->nr_cblocks_allocated));
326 p->nr_cblocks_allocated = to_cblock(from_cblock(p->nr_cblocks_allocated) - 1);
327 spin_unlock_irqrestore(&p->lock, flags);
330 static void wb_force_mapping(struct dm_cache_policy *pe,
331 dm_oblock_t current_oblock, dm_oblock_t oblock)
333 struct policy *p = to_policy(pe);
334 struct wb_cache_entry *e;
335 unsigned long flags;
337 spin_lock_irqsave(&p->lock, flags);
338 e = __wb_force_remove_mapping(p, current_oblock);
339 e->oblock = oblock;
340 add_cache_entry(p, e);
341 spin_unlock_irqrestore(&p->lock, flags);
344 static struct wb_cache_entry *get_next_dirty_entry(struct policy *p)
346 struct list_head *l;
347 struct wb_cache_entry *r;
349 if (list_empty(&p->dirty))
350 return NULL;
352 l = list_pop(&p->dirty);
353 r = container_of(l, struct wb_cache_entry, list);
354 list_add(l, &p->clean_pending);
356 return r;
359 static int wb_writeback_work(struct dm_cache_policy *pe,
360 dm_oblock_t *oblock,
361 dm_cblock_t *cblock)
363 int r = -ENOENT;
364 struct policy *p = to_policy(pe);
365 struct wb_cache_entry *e;
366 unsigned long flags;
368 spin_lock_irqsave(&p->lock, flags);
370 e = get_next_dirty_entry(p);
371 if (e) {
372 *oblock = e->oblock;
373 *cblock = e->cblock;
374 r = 0;
377 spin_unlock_irqrestore(&p->lock, flags);
379 return r;
382 static dm_cblock_t wb_residency(struct dm_cache_policy *pe)
384 return to_policy(pe)->nr_cblocks_allocated;
387 /* Init the policy plugin interface function pointers. */
388 static void init_policy_functions(struct policy *p)
390 p->policy.destroy = wb_destroy;
391 p->policy.map = wb_map;
392 p->policy.lookup = wb_lookup;
393 p->policy.set_dirty = wb_set_dirty;
394 p->policy.clear_dirty = wb_clear_dirty;
395 p->policy.load_mapping = wb_load_mapping;
396 p->policy.walk_mappings = NULL;
397 p->policy.remove_mapping = wb_remove_mapping;
398 p->policy.writeback_work = wb_writeback_work;
399 p->policy.force_mapping = wb_force_mapping;
400 p->policy.residency = wb_residency;
401 p->policy.tick = NULL;
404 static struct dm_cache_policy *wb_create(dm_cblock_t cache_size,
405 sector_t origin_size,
406 sector_t cache_block_size)
408 int r;
409 struct policy *p = kzalloc(sizeof(*p), GFP_KERNEL);
411 if (!p)
412 return NULL;
414 init_policy_functions(p);
415 INIT_LIST_HEAD(&p->free);
416 INIT_LIST_HEAD(&p->clean);
417 INIT_LIST_HEAD(&p->clean_pending);
418 INIT_LIST_HEAD(&p->dirty);
420 p->cache_size = cache_size;
421 spin_lock_init(&p->lock);
423 /* Allocate cache entry structs and add them to free list. */
424 r = alloc_cache_blocks_with_hash(p, cache_size);
425 if (!r)
426 return &p->policy;
428 kfree(p);
430 return NULL;
432 /*----------------------------------------------------------------------------*/
434 static struct dm_cache_policy_type wb_policy_type = {
435 .name = "cleaner",
436 .version = {1, 0, 0},
437 .hint_size = 0,
438 .owner = THIS_MODULE,
439 .create = wb_create
442 static int __init wb_init(void)
444 int r = dm_cache_policy_register(&wb_policy_type);
446 if (r < 0)
447 DMERR("register failed %d", r);
448 else
449 DMINFO("version %u.%u.%u loaded",
450 wb_policy_type.version[0],
451 wb_policy_type.version[1],
452 wb_policy_type.version[2]);
454 return r;
457 static void __exit wb_exit(void)
459 dm_cache_policy_unregister(&wb_policy_type);
462 module_init(wb_init);
463 module_exit(wb_exit);
465 MODULE_AUTHOR("Heinz Mauelshagen <dm-devel@redhat.com>");
466 MODULE_LICENSE("GPL");
467 MODULE_DESCRIPTION("cleaner cache policy");