2 * Copyright (C) 2012 Red Hat. All rights reserved.
4 * This file is released under the GPL.
7 #include "dm-cache-policy.h"
10 #include <linux/hash.h>
11 #include <linux/module.h>
12 #include <linux/mutex.h>
13 #include <linux/slab.h>
14 #include <linux/vmalloc.h>
16 #define DM_MSG_PREFIX "cache-policy-mq"
18 static struct kmem_cache
*mq_entry_cache
;
20 /*----------------------------------------------------------------*/
22 static unsigned next_power(unsigned n
, unsigned min
)
24 return roundup_pow_of_two(max(n
, min
));
27 /*----------------------------------------------------------------*/
29 static unsigned long *alloc_bitset(unsigned nr_entries
)
31 size_t s
= sizeof(unsigned long) * dm_div_up(nr_entries
, BITS_PER_LONG
);
35 static void free_bitset(unsigned long *bits
)
40 /*----------------------------------------------------------------*/
43 * Large, sequential ios are probably better left on the origin device since
44 * spindles tend to have good bandwidth.
46 * The io_tracker tries to spot when the io is in one of these sequential
49 * Two thresholds to switch between random and sequential io mode are defaulting
50 * as follows and can be adjusted via the constructor and message interfaces.
52 #define RANDOM_THRESHOLD_DEFAULT 4
53 #define SEQUENTIAL_THRESHOLD_DEFAULT 512
61 enum io_pattern pattern
;
63 unsigned nr_seq_samples
;
64 unsigned nr_rand_samples
;
65 unsigned thresholds
[2];
67 dm_oblock_t last_end_oblock
;
70 static void iot_init(struct io_tracker
*t
,
71 int sequential_threshold
, int random_threshold
)
73 t
->pattern
= PATTERN_RANDOM
;
74 t
->nr_seq_samples
= 0;
75 t
->nr_rand_samples
= 0;
76 t
->last_end_oblock
= 0;
77 t
->thresholds
[PATTERN_RANDOM
] = random_threshold
;
78 t
->thresholds
[PATTERN_SEQUENTIAL
] = sequential_threshold
;
81 static enum io_pattern
iot_pattern(struct io_tracker
*t
)
86 static void iot_update_stats(struct io_tracker
*t
, struct bio
*bio
)
88 if (bio
->bi_sector
== from_oblock(t
->last_end_oblock
) + 1)
92 * Just one non-sequential IO is enough to reset the
95 if (t
->nr_seq_samples
) {
96 t
->nr_seq_samples
= 0;
97 t
->nr_rand_samples
= 0;
100 t
->nr_rand_samples
++;
103 t
->last_end_oblock
= to_oblock(bio
->bi_sector
+ bio_sectors(bio
) - 1);
106 static void iot_check_for_pattern_switch(struct io_tracker
*t
)
108 switch (t
->pattern
) {
109 case PATTERN_SEQUENTIAL
:
110 if (t
->nr_rand_samples
>= t
->thresholds
[PATTERN_RANDOM
]) {
111 t
->pattern
= PATTERN_RANDOM
;
112 t
->nr_seq_samples
= t
->nr_rand_samples
= 0;
117 if (t
->nr_seq_samples
>= t
->thresholds
[PATTERN_SEQUENTIAL
]) {
118 t
->pattern
= PATTERN_SEQUENTIAL
;
119 t
->nr_seq_samples
= t
->nr_rand_samples
= 0;
125 static void iot_examine_bio(struct io_tracker
*t
, struct bio
*bio
)
127 iot_update_stats(t
, bio
);
128 iot_check_for_pattern_switch(t
);
131 /*----------------------------------------------------------------*/
135 * This queue is divided up into different levels. Allowing us to push
136 * entries to the back of any of the levels. Think of it as a partially
139 #define NR_QUEUE_LEVELS 16u
142 struct list_head qs
[NR_QUEUE_LEVELS
];
145 static void queue_init(struct queue
*q
)
149 for (i
= 0; i
< NR_QUEUE_LEVELS
; i
++)
150 INIT_LIST_HEAD(q
->qs
+ i
);
154 * Insert an entry to the back of the given level.
156 static void queue_push(struct queue
*q
, unsigned level
, struct list_head
*elt
)
158 list_add_tail(elt
, q
->qs
+ level
);
161 static void queue_remove(struct list_head
*elt
)
167 * Shifts all regions down one level. This has no effect on the order of
170 static void queue_shift_down(struct queue
*q
)
174 for (level
= 1; level
< NR_QUEUE_LEVELS
; level
++)
175 list_splice_init(q
->qs
+ level
, q
->qs
+ level
- 1);
179 * Gives us the oldest entry of the lowest popoulated level. If the first
180 * level is emptied then we shift down one level.
182 static struct list_head
*queue_pop(struct queue
*q
)
187 for (level
= 0; level
< NR_QUEUE_LEVELS
; level
++)
188 if (!list_empty(q
->qs
+ level
)) {
189 r
= q
->qs
[level
].next
;
192 /* have we just emptied the bottom level? */
193 if (level
== 0 && list_empty(q
->qs
))
202 static struct list_head
*list_pop(struct list_head
*lh
)
204 struct list_head
*r
= lh
->next
;
212 /*----------------------------------------------------------------*/
215 * Describes a cache entry. Used in both the cache and the pre_cache.
218 struct hlist_node hlist
;
219 struct list_head list
;
221 dm_cblock_t cblock
; /* valid iff in_cache */
224 * FIXME: pack these better
233 struct dm_cache_policy policy
;
235 /* protects everything */
237 dm_cblock_t cache_size
;
238 struct io_tracker tracker
;
241 * We maintain two queues of entries. The cache proper contains
242 * the currently active mappings. Whereas the pre_cache tracks
243 * blocks that are being hit frequently and potential candidates
244 * for promotion to the cache.
246 struct queue pre_cache
;
250 * Keeps track of time, incremented by the core. We use this to
251 * avoid attributing multiple hits within the same tick.
253 * Access to tick_protected should be done with the spin lock held.
254 * It's copied to tick at the start of the map function (within the
257 spinlock_t tick_lock
;
258 unsigned tick_protected
;
262 * A count of the number of times the map function has been called
263 * and found an entry in the pre_cache or cache. Currently used to
264 * calculate the generation.
269 * A generation is a longish period that is used to trigger some
270 * book keeping effects. eg, decrementing hit counts on entries.
271 * This is needed to allow the cache to evolve as io patterns
275 unsigned generation_period
; /* in lookups (will probably change) */
278 * Entries in the pre_cache whose hit count passes the promotion
279 * threshold move to the cache proper. Working out the correct
280 * value for the promotion_threshold is crucial to this policy.
282 unsigned promote_threshold
;
285 * We need cache_size entries for the cache, and choose to have
286 * cache_size entries for the pre_cache too. One motivation for
287 * using the same size is to make the hit counts directly
288 * comparable between pre_cache and cache.
291 unsigned nr_entries_allocated
;
292 struct list_head free
;
295 * Cache blocks may be unallocated. We store this info in a
298 unsigned long *allocation_bitset
;
299 unsigned nr_cblocks_allocated
;
300 unsigned find_free_nr_words
;
301 unsigned find_free_last_word
;
304 * The hash table allows us to quickly find an entry by origin
305 * block. Both pre_cache and cache entries are in here.
308 dm_block_t hash_bits
;
309 struct hlist_head
*table
;
312 /*----------------------------------------------------------------*/
313 /* Free/alloc mq cache entry structures. */
314 static void takeout_queue(struct list_head
*lh
, struct queue
*q
)
318 for (level
= 0; level
< NR_QUEUE_LEVELS
; level
++)
319 list_splice(q
->qs
+ level
, lh
);
322 static void free_entries(struct mq_policy
*mq
)
324 struct entry
*e
, *tmp
;
326 takeout_queue(&mq
->free
, &mq
->pre_cache
);
327 takeout_queue(&mq
->free
, &mq
->cache
);
329 list_for_each_entry_safe(e
, tmp
, &mq
->free
, list
)
330 kmem_cache_free(mq_entry_cache
, e
);
333 static int alloc_entries(struct mq_policy
*mq
, unsigned elts
)
335 unsigned u
= mq
->nr_entries
;
337 INIT_LIST_HEAD(&mq
->free
);
338 mq
->nr_entries_allocated
= 0;
341 struct entry
*e
= kmem_cache_zalloc(mq_entry_cache
, GFP_KERNEL
);
349 list_add(&e
->list
, &mq
->free
);
355 /*----------------------------------------------------------------*/
358 * Simple hash table implementation. Should replace with the standard hash
359 * table that's making its way upstream.
361 static void hash_insert(struct mq_policy
*mq
, struct entry
*e
)
363 unsigned h
= hash_64(from_oblock(e
->oblock
), mq
->hash_bits
);
365 hlist_add_head(&e
->hlist
, mq
->table
+ h
);
368 static struct entry
*hash_lookup(struct mq_policy
*mq
, dm_oblock_t oblock
)
370 unsigned h
= hash_64(from_oblock(oblock
), mq
->hash_bits
);
371 struct hlist_head
*bucket
= mq
->table
+ h
;
374 hlist_for_each_entry(e
, bucket
, hlist
)
375 if (e
->oblock
== oblock
) {
376 hlist_del(&e
->hlist
);
377 hlist_add_head(&e
->hlist
, bucket
);
384 static void hash_remove(struct entry
*e
)
386 hlist_del(&e
->hlist
);
389 /*----------------------------------------------------------------*/
392 * Allocates a new entry structure. The memory is allocated in one lump,
393 * so we just handing it out here. Returns NULL if all entries have
394 * already been allocated. Cannot fail otherwise.
396 static struct entry
*alloc_entry(struct mq_policy
*mq
)
400 if (mq
->nr_entries_allocated
>= mq
->nr_entries
) {
401 BUG_ON(!list_empty(&mq
->free
));
405 e
= list_entry(list_pop(&mq
->free
), struct entry
, list
);
406 INIT_LIST_HEAD(&e
->list
);
407 INIT_HLIST_NODE(&e
->hlist
);
409 mq
->nr_entries_allocated
++;
413 /*----------------------------------------------------------------*/
416 * Mark cache blocks allocated or not in the bitset.
418 static void alloc_cblock(struct mq_policy
*mq
, dm_cblock_t cblock
)
420 BUG_ON(from_cblock(cblock
) > from_cblock(mq
->cache_size
));
421 BUG_ON(test_bit(from_cblock(cblock
), mq
->allocation_bitset
));
423 set_bit(from_cblock(cblock
), mq
->allocation_bitset
);
424 mq
->nr_cblocks_allocated
++;
427 static void free_cblock(struct mq_policy
*mq
, dm_cblock_t cblock
)
429 BUG_ON(from_cblock(cblock
) > from_cblock(mq
->cache_size
));
430 BUG_ON(!test_bit(from_cblock(cblock
), mq
->allocation_bitset
));
432 clear_bit(from_cblock(cblock
), mq
->allocation_bitset
);
433 mq
->nr_cblocks_allocated
--;
436 static bool any_free_cblocks(struct mq_policy
*mq
)
438 return mq
->nr_cblocks_allocated
< from_cblock(mq
->cache_size
);
442 * Fills result out with a cache block that isn't in use, or return
443 * -ENOSPC. This does _not_ mark the cblock as allocated, the caller is
444 * reponsible for that.
446 static int __find_free_cblock(struct mq_policy
*mq
, unsigned begin
, unsigned end
,
447 dm_cblock_t
*result
, unsigned *last_word
)
452 for (w
= begin
; w
< end
; w
++) {
454 * ffz is undefined if no zero exists
456 if (mq
->allocation_bitset
[w
] != ~0UL) {
458 *result
= to_cblock((w
* BITS_PER_LONG
) + ffz(mq
->allocation_bitset
[w
]));
459 if (from_cblock(*result
) < from_cblock(mq
->cache_size
))
469 static int find_free_cblock(struct mq_policy
*mq
, dm_cblock_t
*result
)
473 if (!any_free_cblocks(mq
))
476 r
= __find_free_cblock(mq
, mq
->find_free_last_word
, mq
->find_free_nr_words
, result
, &mq
->find_free_last_word
);
477 if (r
== -ENOSPC
&& mq
->find_free_last_word
)
478 r
= __find_free_cblock(mq
, 0, mq
->find_free_last_word
, result
, &mq
->find_free_last_word
);
483 /*----------------------------------------------------------------*/
486 * Now we get to the meat of the policy. This section deals with deciding
487 * when to to add entries to the pre_cache and cache, and move between
492 * The queue level is based on the log2 of the hit count.
494 static unsigned queue_level(struct entry
*e
)
496 return min((unsigned) ilog2(e
->hit_count
), NR_QUEUE_LEVELS
- 1u);
500 * Inserts the entry into the pre_cache or the cache. Ensures the cache
501 * block is marked as allocated if necc. Inserts into the hash table. Sets the
502 * tick which records when the entry was last moved about.
504 static void push(struct mq_policy
*mq
, struct entry
*e
)
510 alloc_cblock(mq
, e
->cblock
);
511 queue_push(&mq
->cache
, queue_level(e
), &e
->list
);
513 queue_push(&mq
->pre_cache
, queue_level(e
), &e
->list
);
517 * Removes an entry from pre_cache or cache. Removes from the hash table.
518 * Frees off the cache block if necc.
520 static void del(struct mq_policy
*mq
, struct entry
*e
)
522 queue_remove(&e
->list
);
525 free_cblock(mq
, e
->cblock
);
529 * Like del, except it removes the first entry in the queue (ie. the least
532 static struct entry
*pop(struct mq_policy
*mq
, struct queue
*q
)
534 struct entry
*e
= container_of(queue_pop(q
), struct entry
, list
);
540 free_cblock(mq
, e
->cblock
);
547 * Has this entry already been updated?
549 static bool updated_this_tick(struct mq_policy
*mq
, struct entry
*e
)
551 return mq
->tick
== e
->tick
;
555 * The promotion threshold is adjusted every generation. As are the counts
558 * At the moment the threshold is taken by averaging the hit counts of some
559 * of the entries in the cache (the first 20 entries of the first level).
561 * We can be much cleverer than this though. For example, each promotion
562 * could bump up the threshold helping to prevent churn. Much more to do
566 #define MAX_TO_AVERAGE 20
568 static void check_generation(struct mq_policy
*mq
)
570 unsigned total
= 0, nr
= 0, count
= 0, level
;
571 struct list_head
*head
;
574 if ((mq
->hit_count
>= mq
->generation_period
) &&
575 (mq
->nr_cblocks_allocated
== from_cblock(mq
->cache_size
))) {
580 for (level
= 0; level
< NR_QUEUE_LEVELS
&& count
< MAX_TO_AVERAGE
; level
++) {
581 head
= mq
->cache
.qs
+ level
;
582 list_for_each_entry(e
, head
, list
) {
584 total
+= e
->hit_count
;
586 if (++count
>= MAX_TO_AVERAGE
)
591 mq
->promote_threshold
= nr
? total
/ nr
: 1;
592 if (mq
->promote_threshold
* nr
< total
)
593 mq
->promote_threshold
++;
598 * Whenever we use an entry we bump up it's hit counter, and push it to the
599 * back to it's current level.
601 static void requeue_and_update_tick(struct mq_policy
*mq
, struct entry
*e
)
603 if (updated_this_tick(mq
, e
))
608 check_generation(mq
);
610 /* generation adjustment, to stop the counts increasing forever. */
612 /* e->hit_count -= min(e->hit_count - 1, mq->generation - e->generation); */
613 e
->generation
= mq
->generation
;
620 * Demote the least recently used entry from the cache to the pre_cache.
621 * Returns the new cache entry to use, and the old origin block it was
624 * We drop the hit count on the demoted entry back to 1 to stop it bouncing
625 * straight back into the cache if it's subsequently hit. There are
626 * various options here, and more experimentation would be good:
628 * - just forget about the demoted entry completely (ie. don't insert it
630 * - divide the hit count rather that setting to some hard coded value.
631 * - set the hit count to a hard coded value other than 1, eg, is it better
632 * if it goes in at level 2?
634 static dm_cblock_t
demote_cblock(struct mq_policy
*mq
, dm_oblock_t
*oblock
)
637 struct entry
*demoted
= pop(mq
, &mq
->cache
);
640 result
= demoted
->cblock
;
641 *oblock
= demoted
->oblock
;
642 demoted
->in_cache
= false;
643 demoted
->hit_count
= 1;
650 * We modify the basic promotion_threshold depending on the specific io.
652 * If the origin block has been discarded then there's no cost to copy it
655 * We bias towards reads, since they can be demoted at no cost if they
656 * haven't been dirtied.
658 #define DISCARDED_PROMOTE_THRESHOLD 1
659 #define READ_PROMOTE_THRESHOLD 4
660 #define WRITE_PROMOTE_THRESHOLD 8
662 static unsigned adjusted_promote_threshold(struct mq_policy
*mq
,
663 bool discarded_oblock
, int data_dir
)
665 if (discarded_oblock
&& any_free_cblocks(mq
) && data_dir
== WRITE
)
667 * We don't need to do any copying at all, so give this a
668 * very low threshold. In practice this only triggers
669 * during initial population after a format.
671 return DISCARDED_PROMOTE_THRESHOLD
;
673 return data_dir
== READ
?
674 (mq
->promote_threshold
+ READ_PROMOTE_THRESHOLD
) :
675 (mq
->promote_threshold
+ WRITE_PROMOTE_THRESHOLD
);
678 static bool should_promote(struct mq_policy
*mq
, struct entry
*e
,
679 bool discarded_oblock
, int data_dir
)
681 return e
->hit_count
>=
682 adjusted_promote_threshold(mq
, discarded_oblock
, data_dir
);
685 static int cache_entry_found(struct mq_policy
*mq
,
687 struct policy_result
*result
)
689 requeue_and_update_tick(mq
, e
);
692 result
->op
= POLICY_HIT
;
693 result
->cblock
= e
->cblock
;
700 * Moves and entry from the pre_cache to the cache. The main work is
701 * finding which cache block to use.
703 static int pre_cache_to_cache(struct mq_policy
*mq
, struct entry
*e
,
704 struct policy_result
*result
)
708 if (find_free_cblock(mq
, &cblock
) == -ENOSPC
) {
709 result
->op
= POLICY_REPLACE
;
710 cblock
= demote_cblock(mq
, &result
->old_oblock
);
712 result
->op
= POLICY_NEW
;
714 result
->cblock
= e
->cblock
= cblock
;
723 static int pre_cache_entry_found(struct mq_policy
*mq
, struct entry
*e
,
724 bool can_migrate
, bool discarded_oblock
,
725 int data_dir
, struct policy_result
*result
)
728 bool updated
= updated_this_tick(mq
, e
);
730 requeue_and_update_tick(mq
, e
);
732 if ((!discarded_oblock
&& updated
) ||
733 !should_promote(mq
, e
, discarded_oblock
, data_dir
))
734 result
->op
= POLICY_MISS
;
735 else if (!can_migrate
)
738 r
= pre_cache_to_cache(mq
, e
, result
);
743 static void insert_in_pre_cache(struct mq_policy
*mq
,
746 struct entry
*e
= alloc_entry(mq
);
750 * There's no spare entry structure, so we grab the least
751 * used one from the pre_cache.
753 e
= pop(mq
, &mq
->pre_cache
);
756 DMWARN("couldn't pop from pre cache");
763 e
->generation
= mq
->generation
;
767 static void insert_in_cache(struct mq_policy
*mq
, dm_oblock_t oblock
,
768 struct policy_result
*result
)
773 if (find_free_cblock(mq
, &cblock
) == -ENOSPC
) {
774 result
->op
= POLICY_MISS
;
775 insert_in_pre_cache(mq
, oblock
);
781 result
->op
= POLICY_MISS
;
789 e
->generation
= mq
->generation
;
792 result
->op
= POLICY_NEW
;
793 result
->cblock
= e
->cblock
;
796 static int no_entry_found(struct mq_policy
*mq
, dm_oblock_t oblock
,
797 bool can_migrate
, bool discarded_oblock
,
798 int data_dir
, struct policy_result
*result
)
800 if (adjusted_promote_threshold(mq
, discarded_oblock
, data_dir
) == 1) {
802 insert_in_cache(mq
, oblock
, result
);
806 insert_in_pre_cache(mq
, oblock
);
807 result
->op
= POLICY_MISS
;
814 * Looks the oblock up in the hash table, then decides whether to put in
815 * pre_cache, or cache etc.
817 static int map(struct mq_policy
*mq
, dm_oblock_t oblock
,
818 bool can_migrate
, bool discarded_oblock
,
819 int data_dir
, struct policy_result
*result
)
822 struct entry
*e
= hash_lookup(mq
, oblock
);
824 if (e
&& e
->in_cache
)
825 r
= cache_entry_found(mq
, e
, result
);
826 else if (iot_pattern(&mq
->tracker
) == PATTERN_SEQUENTIAL
)
827 result
->op
= POLICY_MISS
;
829 r
= pre_cache_entry_found(mq
, e
, can_migrate
, discarded_oblock
,
832 r
= no_entry_found(mq
, oblock
, can_migrate
, discarded_oblock
,
835 if (r
== -EWOULDBLOCK
)
836 result
->op
= POLICY_MISS
;
841 /*----------------------------------------------------------------*/
844 * Public interface, via the policy struct. See dm-cache-policy.h for a
845 * description of these.
848 static struct mq_policy
*to_mq_policy(struct dm_cache_policy
*p
)
850 return container_of(p
, struct mq_policy
, policy
);
853 static void mq_destroy(struct dm_cache_policy
*p
)
855 struct mq_policy
*mq
= to_mq_policy(p
);
857 free_bitset(mq
->allocation_bitset
);
863 static void copy_tick(struct mq_policy
*mq
)
867 spin_lock_irqsave(&mq
->tick_lock
, flags
);
868 mq
->tick
= mq
->tick_protected
;
869 spin_unlock_irqrestore(&mq
->tick_lock
, flags
);
872 static int mq_map(struct dm_cache_policy
*p
, dm_oblock_t oblock
,
873 bool can_block
, bool can_migrate
, bool discarded_oblock
,
874 struct bio
*bio
, struct policy_result
*result
)
877 struct mq_policy
*mq
= to_mq_policy(p
);
879 result
->op
= POLICY_MISS
;
882 mutex_lock(&mq
->lock
);
883 else if (!mutex_trylock(&mq
->lock
))
888 iot_examine_bio(&mq
->tracker
, bio
);
889 r
= map(mq
, oblock
, can_migrate
, discarded_oblock
,
890 bio_data_dir(bio
), result
);
892 mutex_unlock(&mq
->lock
);
897 static int mq_lookup(struct dm_cache_policy
*p
, dm_oblock_t oblock
, dm_cblock_t
*cblock
)
900 struct mq_policy
*mq
= to_mq_policy(p
);
903 if (!mutex_trylock(&mq
->lock
))
906 e
= hash_lookup(mq
, oblock
);
907 if (e
&& e
->in_cache
) {
913 mutex_unlock(&mq
->lock
);
918 static int mq_load_mapping(struct dm_cache_policy
*p
,
919 dm_oblock_t oblock
, dm_cblock_t cblock
,
920 uint32_t hint
, bool hint_valid
)
922 struct mq_policy
*mq
= to_mq_policy(p
);
932 e
->hit_count
= hint_valid
? hint
: 1;
933 e
->generation
= mq
->generation
;
939 static int mq_walk_mappings(struct dm_cache_policy
*p
, policy_walk_fn fn
,
942 struct mq_policy
*mq
= to_mq_policy(p
);
947 mutex_lock(&mq
->lock
);
949 for (level
= 0; level
< NR_QUEUE_LEVELS
; level
++)
950 list_for_each_entry(e
, &mq
->cache
.qs
[level
], list
) {
951 r
= fn(context
, e
->cblock
, e
->oblock
, e
->hit_count
);
957 mutex_unlock(&mq
->lock
);
962 static void mq_remove_mapping(struct dm_cache_policy
*p
, dm_oblock_t oblock
)
964 struct mq_policy
*mq
= to_mq_policy(p
);
967 mutex_lock(&mq
->lock
);
969 e
= hash_lookup(mq
, oblock
);
971 BUG_ON(!e
|| !e
->in_cache
);
977 mutex_unlock(&mq
->lock
);
980 static void force_mapping(struct mq_policy
*mq
,
981 dm_oblock_t current_oblock
, dm_oblock_t new_oblock
)
983 struct entry
*e
= hash_lookup(mq
, current_oblock
);
985 BUG_ON(!e
|| !e
->in_cache
);
988 e
->oblock
= new_oblock
;
992 static void mq_force_mapping(struct dm_cache_policy
*p
,
993 dm_oblock_t current_oblock
, dm_oblock_t new_oblock
)
995 struct mq_policy
*mq
= to_mq_policy(p
);
997 mutex_lock(&mq
->lock
);
998 force_mapping(mq
, current_oblock
, new_oblock
);
999 mutex_unlock(&mq
->lock
);
1002 static dm_cblock_t
mq_residency(struct dm_cache_policy
*p
)
1004 struct mq_policy
*mq
= to_mq_policy(p
);
1006 /* FIXME: lock mutex, not sure we can block here */
1007 return to_cblock(mq
->nr_cblocks_allocated
);
1010 static void mq_tick(struct dm_cache_policy
*p
)
1012 struct mq_policy
*mq
= to_mq_policy(p
);
1013 unsigned long flags
;
1015 spin_lock_irqsave(&mq
->tick_lock
, flags
);
1016 mq
->tick_protected
++;
1017 spin_unlock_irqrestore(&mq
->tick_lock
, flags
);
1020 static int mq_set_config_value(struct dm_cache_policy
*p
,
1021 const char *key
, const char *value
)
1023 struct mq_policy
*mq
= to_mq_policy(p
);
1024 enum io_pattern pattern
;
1027 if (!strcasecmp(key
, "random_threshold"))
1028 pattern
= PATTERN_RANDOM
;
1029 else if (!strcasecmp(key
, "sequential_threshold"))
1030 pattern
= PATTERN_SEQUENTIAL
;
1034 if (kstrtoul(value
, 10, &tmp
))
1037 mq
->tracker
.thresholds
[pattern
] = tmp
;
1042 static int mq_emit_config_values(struct dm_cache_policy
*p
, char *result
, unsigned maxlen
)
1045 struct mq_policy
*mq
= to_mq_policy(p
);
1047 DMEMIT("4 random_threshold %u sequential_threshold %u",
1048 mq
->tracker
.thresholds
[PATTERN_RANDOM
],
1049 mq
->tracker
.thresholds
[PATTERN_SEQUENTIAL
]);
1054 /* Init the policy plugin interface function pointers. */
1055 static void init_policy_functions(struct mq_policy
*mq
)
1057 mq
->policy
.destroy
= mq_destroy
;
1058 mq
->policy
.map
= mq_map
;
1059 mq
->policy
.lookup
= mq_lookup
;
1060 mq
->policy
.load_mapping
= mq_load_mapping
;
1061 mq
->policy
.walk_mappings
= mq_walk_mappings
;
1062 mq
->policy
.remove_mapping
= mq_remove_mapping
;
1063 mq
->policy
.writeback_work
= NULL
;
1064 mq
->policy
.force_mapping
= mq_force_mapping
;
1065 mq
->policy
.residency
= mq_residency
;
1066 mq
->policy
.tick
= mq_tick
;
1067 mq
->policy
.emit_config_values
= mq_emit_config_values
;
1068 mq
->policy
.set_config_value
= mq_set_config_value
;
1071 static struct dm_cache_policy
*mq_create(dm_cblock_t cache_size
,
1072 sector_t origin_size
,
1073 sector_t cache_block_size
)
1076 struct mq_policy
*mq
= kzalloc(sizeof(*mq
), GFP_KERNEL
);
1081 init_policy_functions(mq
);
1082 iot_init(&mq
->tracker
, SEQUENTIAL_THRESHOLD_DEFAULT
, RANDOM_THRESHOLD_DEFAULT
);
1084 mq
->cache_size
= cache_size
;
1085 mq
->tick_protected
= 0;
1089 mq
->promote_threshold
= 0;
1090 mutex_init(&mq
->lock
);
1091 spin_lock_init(&mq
->tick_lock
);
1092 mq
->find_free_nr_words
= dm_div_up(from_cblock(mq
->cache_size
), BITS_PER_LONG
);
1093 mq
->find_free_last_word
= 0;
1095 queue_init(&mq
->pre_cache
);
1096 queue_init(&mq
->cache
);
1097 mq
->generation_period
= max((unsigned) from_cblock(cache_size
), 1024U);
1099 mq
->nr_entries
= 2 * from_cblock(cache_size
);
1100 r
= alloc_entries(mq
, mq
->nr_entries
);
1102 goto bad_cache_alloc
;
1104 mq
->nr_entries_allocated
= 0;
1105 mq
->nr_cblocks_allocated
= 0;
1107 mq
->nr_buckets
= next_power(from_cblock(cache_size
) / 2, 16);
1108 mq
->hash_bits
= ffs(mq
->nr_buckets
) - 1;
1109 mq
->table
= kzalloc(sizeof(*mq
->table
) * mq
->nr_buckets
, GFP_KERNEL
);
1111 goto bad_alloc_table
;
1113 mq
->allocation_bitset
= alloc_bitset(from_cblock(cache_size
));
1114 if (!mq
->allocation_bitset
)
1115 goto bad_alloc_bitset
;
1129 /*----------------------------------------------------------------*/
1131 static struct dm_cache_policy_type mq_policy_type
= {
1133 .version
= {1, 0, 0},
1135 .owner
= THIS_MODULE
,
1139 static struct dm_cache_policy_type default_policy_type
= {
1141 .version
= {1, 0, 0},
1143 .owner
= THIS_MODULE
,
1147 static int __init
mq_init(void)
1151 mq_entry_cache
= kmem_cache_create("dm_mq_policy_cache_entry",
1152 sizeof(struct entry
),
1153 __alignof__(struct entry
),
1155 if (!mq_entry_cache
)
1158 r
= dm_cache_policy_register(&mq_policy_type
);
1160 DMERR("register failed %d", r
);
1161 goto bad_register_mq
;
1164 r
= dm_cache_policy_register(&default_policy_type
);
1166 DMINFO("version %u.%u.%u loaded",
1167 mq_policy_type
.version
[0],
1168 mq_policy_type
.version
[1],
1169 mq_policy_type
.version
[2]);
1173 DMERR("register failed (as default) %d", r
);
1175 dm_cache_policy_unregister(&mq_policy_type
);
1177 kmem_cache_destroy(mq_entry_cache
);
1182 static void __exit
mq_exit(void)
1184 dm_cache_policy_unregister(&mq_policy_type
);
1185 dm_cache_policy_unregister(&default_policy_type
);
1187 kmem_cache_destroy(mq_entry_cache
);
1190 module_init(mq_init
);
1191 module_exit(mq_exit
);
1193 MODULE_AUTHOR("Joe Thornber <dm-devel@redhat.com>");
1194 MODULE_LICENSE("GPL");
1195 MODULE_DESCRIPTION("mq cache policy");
1197 MODULE_ALIAS("dm-cache-default");