2 * Copyright (C) 2012 Red Hat. All rights reserved.
4 * This file is released under the GPL.
7 #include "dm-cache-policy.h"
10 #include <linux/hash.h>
11 #include <linux/module.h>
12 #include <linux/mutex.h>
13 #include <linux/slab.h>
14 #include <linux/vmalloc.h>
16 #define DM_MSG_PREFIX "cache-policy-mq"
18 static struct kmem_cache
*mq_entry_cache
;
20 /*----------------------------------------------------------------*/
22 static unsigned next_power(unsigned n
, unsigned min
)
24 return roundup_pow_of_two(max(n
, min
));
27 /*----------------------------------------------------------------*/
30 * Large, sequential ios are probably better left on the origin device since
31 * spindles tend to have good bandwidth.
33 * The io_tracker tries to spot when the io is in one of these sequential
36 * Two thresholds to switch between random and sequential io mode are defaulting
37 * as follows and can be adjusted via the constructor and message interfaces.
39 #define RANDOM_THRESHOLD_DEFAULT 4
40 #define SEQUENTIAL_THRESHOLD_DEFAULT 512
48 enum io_pattern pattern
;
50 unsigned nr_seq_samples
;
51 unsigned nr_rand_samples
;
52 unsigned thresholds
[2];
54 dm_oblock_t last_end_oblock
;
57 static void iot_init(struct io_tracker
*t
,
58 int sequential_threshold
, int random_threshold
)
60 t
->pattern
= PATTERN_RANDOM
;
61 t
->nr_seq_samples
= 0;
62 t
->nr_rand_samples
= 0;
63 t
->last_end_oblock
= 0;
64 t
->thresholds
[PATTERN_RANDOM
] = random_threshold
;
65 t
->thresholds
[PATTERN_SEQUENTIAL
] = sequential_threshold
;
68 static enum io_pattern
iot_pattern(struct io_tracker
*t
)
73 static void iot_update_stats(struct io_tracker
*t
, struct bio
*bio
)
75 if (bio
->bi_iter
.bi_sector
== from_oblock(t
->last_end_oblock
) + 1)
79 * Just one non-sequential IO is enough to reset the
82 if (t
->nr_seq_samples
) {
83 t
->nr_seq_samples
= 0;
84 t
->nr_rand_samples
= 0;
90 t
->last_end_oblock
= to_oblock(bio_end_sector(bio
) - 1);
93 static void iot_check_for_pattern_switch(struct io_tracker
*t
)
96 case PATTERN_SEQUENTIAL
:
97 if (t
->nr_rand_samples
>= t
->thresholds
[PATTERN_RANDOM
]) {
98 t
->pattern
= PATTERN_RANDOM
;
99 t
->nr_seq_samples
= t
->nr_rand_samples
= 0;
104 if (t
->nr_seq_samples
>= t
->thresholds
[PATTERN_SEQUENTIAL
]) {
105 t
->pattern
= PATTERN_SEQUENTIAL
;
106 t
->nr_seq_samples
= t
->nr_rand_samples
= 0;
112 static void iot_examine_bio(struct io_tracker
*t
, struct bio
*bio
)
114 iot_update_stats(t
, bio
);
115 iot_check_for_pattern_switch(t
);
118 /*----------------------------------------------------------------*/
122 * This queue is divided up into different levels. Allowing us to push
123 * entries to the back of any of the levels. Think of it as a partially
126 #define NR_QUEUE_LEVELS 16u
129 struct list_head qs
[NR_QUEUE_LEVELS
];
132 static void queue_init(struct queue
*q
)
136 for (i
= 0; i
< NR_QUEUE_LEVELS
; i
++)
137 INIT_LIST_HEAD(q
->qs
+ i
);
141 * Checks to see if the queue is empty.
142 * FIXME: reduce cpu usage.
144 static bool queue_empty(struct queue
*q
)
148 for (i
= 0; i
< NR_QUEUE_LEVELS
; i
++)
149 if (!list_empty(q
->qs
+ i
))
156 * Insert an entry to the back of the given level.
158 static void queue_push(struct queue
*q
, unsigned level
, struct list_head
*elt
)
160 list_add_tail(elt
, q
->qs
+ level
);
163 static void queue_remove(struct list_head
*elt
)
169 * Shifts all regions down one level. This has no effect on the order of
172 static void queue_shift_down(struct queue
*q
)
176 for (level
= 1; level
< NR_QUEUE_LEVELS
; level
++)
177 list_splice_init(q
->qs
+ level
, q
->qs
+ level
- 1);
181 * Gives us the oldest entry of the lowest popoulated level. If the first
182 * level is emptied then we shift down one level.
184 static struct list_head
*queue_pop(struct queue
*q
)
189 for (level
= 0; level
< NR_QUEUE_LEVELS
; level
++)
190 if (!list_empty(q
->qs
+ level
)) {
191 r
= q
->qs
[level
].next
;
194 /* have we just emptied the bottom level? */
195 if (level
== 0 && list_empty(q
->qs
))
204 static struct list_head
*list_pop(struct list_head
*lh
)
206 struct list_head
*r
= lh
->next
;
214 /*----------------------------------------------------------------*/
217 * Describes a cache entry. Used in both the cache and the pre_cache.
220 struct hlist_node hlist
;
221 struct list_head list
;
225 * FIXME: pack these better
234 * Rather than storing the cblock in an entry, we allocate all entries in
235 * an array, and infer the cblock from the entry position.
237 * Free entries are linked together into a list.
240 struct entry
*entries
, *entries_end
;
241 struct list_head free
;
242 unsigned nr_allocated
;
245 static int epool_init(struct entry_pool
*ep
, unsigned nr_entries
)
249 ep
->entries
= vzalloc(sizeof(struct entry
) * nr_entries
);
253 ep
->entries_end
= ep
->entries
+ nr_entries
;
255 INIT_LIST_HEAD(&ep
->free
);
256 for (i
= 0; i
< nr_entries
; i
++)
257 list_add(&ep
->entries
[i
].list
, &ep
->free
);
259 ep
->nr_allocated
= 0;
264 static void epool_exit(struct entry_pool
*ep
)
269 static struct entry
*alloc_entry(struct entry_pool
*ep
)
273 if (list_empty(&ep
->free
))
276 e
= list_entry(list_pop(&ep
->free
), struct entry
, list
);
277 INIT_LIST_HEAD(&e
->list
);
278 INIT_HLIST_NODE(&e
->hlist
);
285 * This assumes the cblock hasn't already been allocated.
287 static struct entry
*alloc_particular_entry(struct entry_pool
*ep
, dm_cblock_t cblock
)
289 struct entry
*e
= ep
->entries
+ from_cblock(cblock
);
291 list_del_init(&e
->list
);
292 INIT_HLIST_NODE(&e
->hlist
);
298 static void free_entry(struct entry_pool
*ep
, struct entry
*e
)
300 BUG_ON(!ep
->nr_allocated
);
302 INIT_HLIST_NODE(&e
->hlist
);
303 list_add(&e
->list
, &ep
->free
);
307 * Returns NULL if the entry is free.
309 static struct entry
*epool_find(struct entry_pool
*ep
, dm_cblock_t cblock
)
311 struct entry
*e
= ep
->entries
+ from_cblock(cblock
);
312 return !hlist_unhashed(&e
->hlist
) ? e
: NULL
;
315 static bool epool_empty(struct entry_pool
*ep
)
317 return list_empty(&ep
->free
);
320 static bool in_pool(struct entry_pool
*ep
, struct entry
*e
)
322 return e
>= ep
->entries
&& e
< ep
->entries_end
;
325 static dm_cblock_t
infer_cblock(struct entry_pool
*ep
, struct entry
*e
)
327 return to_cblock(e
- ep
->entries
);
330 /*----------------------------------------------------------------*/
333 struct dm_cache_policy policy
;
335 /* protects everything */
337 dm_cblock_t cache_size
;
338 struct io_tracker tracker
;
341 * Entries come from two pools, one of pre-cache entries, and one
342 * for the cache proper.
344 struct entry_pool pre_cache_pool
;
345 struct entry_pool cache_pool
;
348 * We maintain three queues of entries. The cache proper,
349 * consisting of a clean and dirty queue, contains the currently
350 * active mappings. Whereas the pre_cache tracks blocks that
351 * are being hit frequently and potential candidates for promotion
354 struct queue pre_cache
;
355 struct queue cache_clean
;
356 struct queue cache_dirty
;
359 * Keeps track of time, incremented by the core. We use this to
360 * avoid attributing multiple hits within the same tick.
362 * Access to tick_protected should be done with the spin lock held.
363 * It's copied to tick at the start of the map function (within the
366 spinlock_t tick_lock
;
367 unsigned tick_protected
;
371 * A count of the number of times the map function has been called
372 * and found an entry in the pre_cache or cache. Currently used to
373 * calculate the generation.
378 * A generation is a longish period that is used to trigger some
379 * book keeping effects. eg, decrementing hit counts on entries.
380 * This is needed to allow the cache to evolve as io patterns
384 unsigned generation_period
; /* in lookups (will probably change) */
387 * Entries in the pre_cache whose hit count passes the promotion
388 * threshold move to the cache proper. Working out the correct
389 * value for the promotion_threshold is crucial to this policy.
391 unsigned promote_threshold
;
393 unsigned discard_promote_adjustment
;
394 unsigned read_promote_adjustment
;
395 unsigned write_promote_adjustment
;
398 * The hash table allows us to quickly find an entry by origin
399 * block. Both pre_cache and cache entries are in here.
402 dm_block_t hash_bits
;
403 struct hlist_head
*table
;
406 #define DEFAULT_DISCARD_PROMOTE_ADJUSTMENT 1
407 #define DEFAULT_READ_PROMOTE_ADJUSTMENT 4
408 #define DEFAULT_WRITE_PROMOTE_ADJUSTMENT 8
410 /*----------------------------------------------------------------*/
413 * Simple hash table implementation. Should replace with the standard hash
414 * table that's making its way upstream.
416 static void hash_insert(struct mq_policy
*mq
, struct entry
*e
)
418 unsigned h
= hash_64(from_oblock(e
->oblock
), mq
->hash_bits
);
420 hlist_add_head(&e
->hlist
, mq
->table
+ h
);
423 static struct entry
*hash_lookup(struct mq_policy
*mq
, dm_oblock_t oblock
)
425 unsigned h
= hash_64(from_oblock(oblock
), mq
->hash_bits
);
426 struct hlist_head
*bucket
= mq
->table
+ h
;
429 hlist_for_each_entry(e
, bucket
, hlist
)
430 if (e
->oblock
== oblock
) {
431 hlist_del(&e
->hlist
);
432 hlist_add_head(&e
->hlist
, bucket
);
439 static void hash_remove(struct entry
*e
)
441 hlist_del(&e
->hlist
);
444 /*----------------------------------------------------------------*/
446 static bool any_free_cblocks(struct mq_policy
*mq
)
448 return !epool_empty(&mq
->cache_pool
);
451 static bool any_clean_cblocks(struct mq_policy
*mq
)
453 return !queue_empty(&mq
->cache_clean
);
456 /*----------------------------------------------------------------*/
459 * Now we get to the meat of the policy. This section deals with deciding
460 * when to to add entries to the pre_cache and cache, and move between
465 * The queue level is based on the log2 of the hit count.
467 static unsigned queue_level(struct entry
*e
)
469 return min((unsigned) ilog2(e
->hit_count
), NR_QUEUE_LEVELS
- 1u);
472 static bool in_cache(struct mq_policy
*mq
, struct entry
*e
)
474 return in_pool(&mq
->cache_pool
, e
);
478 * Inserts the entry into the pre_cache or the cache. Ensures the cache
479 * block is marked as allocated if necc. Inserts into the hash table.
480 * Sets the tick which records when the entry was last moved about.
482 static void push(struct mq_policy
*mq
, struct entry
*e
)
488 queue_push(e
->dirty
? &mq
->cache_dirty
: &mq
->cache_clean
,
489 queue_level(e
), &e
->list
);
491 queue_push(&mq
->pre_cache
, queue_level(e
), &e
->list
);
495 * Removes an entry from pre_cache or cache. Removes from the hash table.
497 static void del(struct mq_policy
*mq
, struct entry
*e
)
499 queue_remove(&e
->list
);
504 * Like del, except it removes the first entry in the queue (ie. the least
507 static struct entry
*pop(struct mq_policy
*mq
, struct queue
*q
)
510 struct list_head
*h
= queue_pop(q
);
515 e
= container_of(h
, struct entry
, list
);
522 * Has this entry already been updated?
524 static bool updated_this_tick(struct mq_policy
*mq
, struct entry
*e
)
526 return mq
->tick
== e
->tick
;
530 * The promotion threshold is adjusted every generation. As are the counts
533 * At the moment the threshold is taken by averaging the hit counts of some
534 * of the entries in the cache (the first 20 entries across all levels in
535 * ascending order, giving preference to the clean entries at each level).
537 * We can be much cleverer than this though. For example, each promotion
538 * could bump up the threshold helping to prevent churn. Much more to do
542 #define MAX_TO_AVERAGE 20
544 static void check_generation(struct mq_policy
*mq
)
546 unsigned total
= 0, nr
= 0, count
= 0, level
;
547 struct list_head
*head
;
550 if ((mq
->hit_count
>= mq
->generation_period
) && (epool_empty(&mq
->cache_pool
))) {
554 for (level
= 0; level
< NR_QUEUE_LEVELS
&& count
< MAX_TO_AVERAGE
; level
++) {
555 head
= mq
->cache_clean
.qs
+ level
;
556 list_for_each_entry(e
, head
, list
) {
558 total
+= e
->hit_count
;
560 if (++count
>= MAX_TO_AVERAGE
)
564 head
= mq
->cache_dirty
.qs
+ level
;
565 list_for_each_entry(e
, head
, list
) {
567 total
+= e
->hit_count
;
569 if (++count
>= MAX_TO_AVERAGE
)
574 mq
->promote_threshold
= nr
? total
/ nr
: 1;
575 if (mq
->promote_threshold
* nr
< total
)
576 mq
->promote_threshold
++;
581 * Whenever we use an entry we bump up it's hit counter, and push it to the
582 * back to it's current level.
584 static void requeue_and_update_tick(struct mq_policy
*mq
, struct entry
*e
)
586 if (updated_this_tick(mq
, e
))
591 check_generation(mq
);
593 /* generation adjustment, to stop the counts increasing forever. */
595 /* e->hit_count -= min(e->hit_count - 1, mq->generation - e->generation); */
596 e
->generation
= mq
->generation
;
603 * Demote the least recently used entry from the cache to the pre_cache.
604 * Returns the new cache entry to use, and the old origin block it was
607 * We drop the hit count on the demoted entry back to 1 to stop it bouncing
608 * straight back into the cache if it's subsequently hit. There are
609 * various options here, and more experimentation would be good:
611 * - just forget about the demoted entry completely (ie. don't insert it
613 * - divide the hit count rather that setting to some hard coded value.
614 * - set the hit count to a hard coded value other than 1, eg, is it better
615 * if it goes in at level 2?
617 static int demote_cblock(struct mq_policy
*mq
, dm_oblock_t
*oblock
)
619 struct entry
*demoted
= pop(mq
, &mq
->cache_clean
);
623 * We could get a block from mq->cache_dirty, but that
624 * would add extra latency to the triggering bio as it
625 * waits for the writeback. Better to not promote this
626 * time and hope there's a clean block next time this block
631 *oblock
= demoted
->oblock
;
632 free_entry(&mq
->cache_pool
, demoted
);
635 * We used to put the demoted block into the pre-cache, but I think
636 * it's simpler to just let it work it's way up from zero again.
637 * Stops blocks flickering in and out of the cache.
644 * We modify the basic promotion_threshold depending on the specific io.
646 * If the origin block has been discarded then there's no cost to copy it
649 * We bias towards reads, since they can be demoted at no cost if they
650 * haven't been dirtied.
652 static unsigned adjusted_promote_threshold(struct mq_policy
*mq
,
653 bool discarded_oblock
, int data_dir
)
655 if (data_dir
== READ
)
656 return mq
->promote_threshold
+ mq
->read_promote_adjustment
;
658 if (discarded_oblock
&& (any_free_cblocks(mq
) || any_clean_cblocks(mq
))) {
660 * We don't need to do any copying at all, so give this a
661 * very low threshold.
663 return mq
->discard_promote_adjustment
;
666 return mq
->promote_threshold
+ mq
->write_promote_adjustment
;
669 static bool should_promote(struct mq_policy
*mq
, struct entry
*e
,
670 bool discarded_oblock
, int data_dir
)
672 return e
->hit_count
>=
673 adjusted_promote_threshold(mq
, discarded_oblock
, data_dir
);
676 static int cache_entry_found(struct mq_policy
*mq
,
678 struct policy_result
*result
)
680 requeue_and_update_tick(mq
, e
);
682 if (in_cache(mq
, e
)) {
683 result
->op
= POLICY_HIT
;
684 result
->cblock
= infer_cblock(&mq
->cache_pool
, e
);
691 * Moves an entry from the pre_cache to the cache. The main work is
692 * finding which cache block to use.
694 static int pre_cache_to_cache(struct mq_policy
*mq
, struct entry
*e
,
695 struct policy_result
*result
)
700 /* Ensure there's a free cblock in the cache */
701 if (epool_empty(&mq
->cache_pool
)) {
702 result
->op
= POLICY_REPLACE
;
703 r
= demote_cblock(mq
, &result
->old_oblock
);
705 result
->op
= POLICY_MISS
;
709 result
->op
= POLICY_NEW
;
711 new_e
= alloc_entry(&mq
->cache_pool
);
714 new_e
->oblock
= e
->oblock
;
715 new_e
->dirty
= false;
716 new_e
->hit_count
= e
->hit_count
;
717 new_e
->generation
= e
->generation
;
718 new_e
->tick
= e
->tick
;
721 free_entry(&mq
->pre_cache_pool
, e
);
724 result
->cblock
= infer_cblock(&mq
->cache_pool
, new_e
);
729 static int pre_cache_entry_found(struct mq_policy
*mq
, struct entry
*e
,
730 bool can_migrate
, bool discarded_oblock
,
731 int data_dir
, struct policy_result
*result
)
734 bool updated
= updated_this_tick(mq
, e
);
736 if ((!discarded_oblock
&& updated
) ||
737 !should_promote(mq
, e
, discarded_oblock
, data_dir
)) {
738 requeue_and_update_tick(mq
, e
);
739 result
->op
= POLICY_MISS
;
741 } else if (!can_migrate
)
745 requeue_and_update_tick(mq
, e
);
746 r
= pre_cache_to_cache(mq
, e
, result
);
752 static void insert_in_pre_cache(struct mq_policy
*mq
,
755 struct entry
*e
= alloc_entry(&mq
->pre_cache_pool
);
759 * There's no spare entry structure, so we grab the least
760 * used one from the pre_cache.
762 e
= pop(mq
, &mq
->pre_cache
);
765 DMWARN("couldn't pop from pre cache");
772 e
->generation
= mq
->generation
;
776 static void insert_in_cache(struct mq_policy
*mq
, dm_oblock_t oblock
,
777 struct policy_result
*result
)
782 if (epool_empty(&mq
->cache_pool
)) {
783 result
->op
= POLICY_REPLACE
;
784 r
= demote_cblock(mq
, &result
->old_oblock
);
786 result
->op
= POLICY_MISS
;
787 insert_in_pre_cache(mq
, oblock
);
792 * This will always succeed, since we've just demoted.
794 e
= alloc_entry(&mq
->cache_pool
);
798 e
= alloc_entry(&mq
->cache_pool
);
799 result
->op
= POLICY_NEW
;
805 e
->generation
= mq
->generation
;
808 result
->cblock
= infer_cblock(&mq
->cache_pool
, e
);
811 static int no_entry_found(struct mq_policy
*mq
, dm_oblock_t oblock
,
812 bool can_migrate
, bool discarded_oblock
,
813 int data_dir
, struct policy_result
*result
)
815 if (adjusted_promote_threshold(mq
, discarded_oblock
, data_dir
) <= 1) {
817 insert_in_cache(mq
, oblock
, result
);
821 insert_in_pre_cache(mq
, oblock
);
822 result
->op
= POLICY_MISS
;
829 * Looks the oblock up in the hash table, then decides whether to put in
830 * pre_cache, or cache etc.
832 static int map(struct mq_policy
*mq
, dm_oblock_t oblock
,
833 bool can_migrate
, bool discarded_oblock
,
834 int data_dir
, struct policy_result
*result
)
837 struct entry
*e
= hash_lookup(mq
, oblock
);
839 if (e
&& in_cache(mq
, e
))
840 r
= cache_entry_found(mq
, e
, result
);
842 else if (iot_pattern(&mq
->tracker
) == PATTERN_SEQUENTIAL
)
843 result
->op
= POLICY_MISS
;
846 r
= pre_cache_entry_found(mq
, e
, can_migrate
, discarded_oblock
,
850 r
= no_entry_found(mq
, oblock
, can_migrate
, discarded_oblock
,
853 if (r
== -EWOULDBLOCK
)
854 result
->op
= POLICY_MISS
;
859 /*----------------------------------------------------------------*/
862 * Public interface, via the policy struct. See dm-cache-policy.h for a
863 * description of these.
866 static struct mq_policy
*to_mq_policy(struct dm_cache_policy
*p
)
868 return container_of(p
, struct mq_policy
, policy
);
871 static void mq_destroy(struct dm_cache_policy
*p
)
873 struct mq_policy
*mq
= to_mq_policy(p
);
876 epool_exit(&mq
->cache_pool
);
877 epool_exit(&mq
->pre_cache_pool
);
881 static void copy_tick(struct mq_policy
*mq
)
885 spin_lock_irqsave(&mq
->tick_lock
, flags
);
886 mq
->tick
= mq
->tick_protected
;
887 spin_unlock_irqrestore(&mq
->tick_lock
, flags
);
890 static int mq_map(struct dm_cache_policy
*p
, dm_oblock_t oblock
,
891 bool can_block
, bool can_migrate
, bool discarded_oblock
,
892 struct bio
*bio
, struct policy_result
*result
)
895 struct mq_policy
*mq
= to_mq_policy(p
);
897 result
->op
= POLICY_MISS
;
900 mutex_lock(&mq
->lock
);
901 else if (!mutex_trylock(&mq
->lock
))
906 iot_examine_bio(&mq
->tracker
, bio
);
907 r
= map(mq
, oblock
, can_migrate
, discarded_oblock
,
908 bio_data_dir(bio
), result
);
910 mutex_unlock(&mq
->lock
);
915 static int mq_lookup(struct dm_cache_policy
*p
, dm_oblock_t oblock
, dm_cblock_t
*cblock
)
918 struct mq_policy
*mq
= to_mq_policy(p
);
921 if (!mutex_trylock(&mq
->lock
))
924 e
= hash_lookup(mq
, oblock
);
925 if (e
&& in_cache(mq
, e
)) {
926 *cblock
= infer_cblock(&mq
->cache_pool
, e
);
931 mutex_unlock(&mq
->lock
);
936 static void __mq_set_clear_dirty(struct mq_policy
*mq
, dm_oblock_t oblock
, bool set
)
940 e
= hash_lookup(mq
, oblock
);
941 BUG_ON(!e
|| !in_cache(mq
, e
));
948 static void mq_set_dirty(struct dm_cache_policy
*p
, dm_oblock_t oblock
)
950 struct mq_policy
*mq
= to_mq_policy(p
);
952 mutex_lock(&mq
->lock
);
953 __mq_set_clear_dirty(mq
, oblock
, true);
954 mutex_unlock(&mq
->lock
);
957 static void mq_clear_dirty(struct dm_cache_policy
*p
, dm_oblock_t oblock
)
959 struct mq_policy
*mq
= to_mq_policy(p
);
961 mutex_lock(&mq
->lock
);
962 __mq_set_clear_dirty(mq
, oblock
, false);
963 mutex_unlock(&mq
->lock
);
966 static int mq_load_mapping(struct dm_cache_policy
*p
,
967 dm_oblock_t oblock
, dm_cblock_t cblock
,
968 uint32_t hint
, bool hint_valid
)
970 struct mq_policy
*mq
= to_mq_policy(p
);
973 e
= alloc_particular_entry(&mq
->cache_pool
, cblock
);
975 e
->dirty
= false; /* this gets corrected in a minute */
976 e
->hit_count
= hint_valid
? hint
: 1;
977 e
->generation
= mq
->generation
;
983 static int mq_save_hints(struct mq_policy
*mq
, struct queue
*q
,
984 policy_walk_fn fn
, void *context
)
990 for (level
= 0; level
< NR_QUEUE_LEVELS
; level
++)
991 list_for_each_entry(e
, q
->qs
+ level
, list
) {
992 r
= fn(context
, infer_cblock(&mq
->cache_pool
, e
),
993 e
->oblock
, e
->hit_count
);
1001 static int mq_walk_mappings(struct dm_cache_policy
*p
, policy_walk_fn fn
,
1004 struct mq_policy
*mq
= to_mq_policy(p
);
1007 mutex_lock(&mq
->lock
);
1009 r
= mq_save_hints(mq
, &mq
->cache_clean
, fn
, context
);
1011 r
= mq_save_hints(mq
, &mq
->cache_dirty
, fn
, context
);
1013 mutex_unlock(&mq
->lock
);
1018 static void __remove_mapping(struct mq_policy
*mq
, dm_oblock_t oblock
)
1022 e
= hash_lookup(mq
, oblock
);
1023 BUG_ON(!e
|| !in_cache(mq
, e
));
1026 free_entry(&mq
->cache_pool
, e
);
1029 static void mq_remove_mapping(struct dm_cache_policy
*p
, dm_oblock_t oblock
)
1031 struct mq_policy
*mq
= to_mq_policy(p
);
1033 mutex_lock(&mq
->lock
);
1034 __remove_mapping(mq
, oblock
);
1035 mutex_unlock(&mq
->lock
);
1038 static int __remove_cblock(struct mq_policy
*mq
, dm_cblock_t cblock
)
1040 struct entry
*e
= epool_find(&mq
->cache_pool
, cblock
);
1046 free_entry(&mq
->cache_pool
, e
);
1051 static int mq_remove_cblock(struct dm_cache_policy
*p
, dm_cblock_t cblock
)
1054 struct mq_policy
*mq
= to_mq_policy(p
);
1056 mutex_lock(&mq
->lock
);
1057 r
= __remove_cblock(mq
, cblock
);
1058 mutex_unlock(&mq
->lock
);
1063 static int __mq_writeback_work(struct mq_policy
*mq
, dm_oblock_t
*oblock
,
1064 dm_cblock_t
*cblock
)
1066 struct entry
*e
= pop(mq
, &mq
->cache_dirty
);
1071 *oblock
= e
->oblock
;
1072 *cblock
= infer_cblock(&mq
->cache_pool
, e
);
1079 static int mq_writeback_work(struct dm_cache_policy
*p
, dm_oblock_t
*oblock
,
1080 dm_cblock_t
*cblock
)
1083 struct mq_policy
*mq
= to_mq_policy(p
);
1085 mutex_lock(&mq
->lock
);
1086 r
= __mq_writeback_work(mq
, oblock
, cblock
);
1087 mutex_unlock(&mq
->lock
);
1092 static void __force_mapping(struct mq_policy
*mq
,
1093 dm_oblock_t current_oblock
, dm_oblock_t new_oblock
)
1095 struct entry
*e
= hash_lookup(mq
, current_oblock
);
1097 if (e
&& in_cache(mq
, e
)) {
1099 e
->oblock
= new_oblock
;
1105 static void mq_force_mapping(struct dm_cache_policy
*p
,
1106 dm_oblock_t current_oblock
, dm_oblock_t new_oblock
)
1108 struct mq_policy
*mq
= to_mq_policy(p
);
1110 mutex_lock(&mq
->lock
);
1111 __force_mapping(mq
, current_oblock
, new_oblock
);
1112 mutex_unlock(&mq
->lock
);
1115 static dm_cblock_t
mq_residency(struct dm_cache_policy
*p
)
1118 struct mq_policy
*mq
= to_mq_policy(p
);
1120 mutex_lock(&mq
->lock
);
1121 r
= to_cblock(mq
->cache_pool
.nr_allocated
);
1122 mutex_unlock(&mq
->lock
);
1127 static void mq_tick(struct dm_cache_policy
*p
)
1129 struct mq_policy
*mq
= to_mq_policy(p
);
1130 unsigned long flags
;
1132 spin_lock_irqsave(&mq
->tick_lock
, flags
);
1133 mq
->tick_protected
++;
1134 spin_unlock_irqrestore(&mq
->tick_lock
, flags
);
1137 static int mq_set_config_value(struct dm_cache_policy
*p
,
1138 const char *key
, const char *value
)
1140 struct mq_policy
*mq
= to_mq_policy(p
);
1143 if (kstrtoul(value
, 10, &tmp
))
1146 if (!strcasecmp(key
, "random_threshold")) {
1147 mq
->tracker
.thresholds
[PATTERN_RANDOM
] = tmp
;
1149 } else if (!strcasecmp(key
, "sequential_threshold")) {
1150 mq
->tracker
.thresholds
[PATTERN_SEQUENTIAL
] = tmp
;
1152 } else if (!strcasecmp(key
, "discard_promote_adjustment"))
1153 mq
->discard_promote_adjustment
= tmp
;
1155 else if (!strcasecmp(key
, "read_promote_adjustment"))
1156 mq
->read_promote_adjustment
= tmp
;
1158 else if (!strcasecmp(key
, "write_promote_adjustment"))
1159 mq
->write_promote_adjustment
= tmp
;
1167 static int mq_emit_config_values(struct dm_cache_policy
*p
, char *result
, unsigned maxlen
)
1170 struct mq_policy
*mq
= to_mq_policy(p
);
1172 DMEMIT("10 random_threshold %u "
1173 "sequential_threshold %u "
1174 "discard_promote_adjustment %u "
1175 "read_promote_adjustment %u "
1176 "write_promote_adjustment %u",
1177 mq
->tracker
.thresholds
[PATTERN_RANDOM
],
1178 mq
->tracker
.thresholds
[PATTERN_SEQUENTIAL
],
1179 mq
->discard_promote_adjustment
,
1180 mq
->read_promote_adjustment
,
1181 mq
->write_promote_adjustment
);
1186 /* Init the policy plugin interface function pointers. */
1187 static void init_policy_functions(struct mq_policy
*mq
)
1189 mq
->policy
.destroy
= mq_destroy
;
1190 mq
->policy
.map
= mq_map
;
1191 mq
->policy
.lookup
= mq_lookup
;
1192 mq
->policy
.set_dirty
= mq_set_dirty
;
1193 mq
->policy
.clear_dirty
= mq_clear_dirty
;
1194 mq
->policy
.load_mapping
= mq_load_mapping
;
1195 mq
->policy
.walk_mappings
= mq_walk_mappings
;
1196 mq
->policy
.remove_mapping
= mq_remove_mapping
;
1197 mq
->policy
.remove_cblock
= mq_remove_cblock
;
1198 mq
->policy
.writeback_work
= mq_writeback_work
;
1199 mq
->policy
.force_mapping
= mq_force_mapping
;
1200 mq
->policy
.residency
= mq_residency
;
1201 mq
->policy
.tick
= mq_tick
;
1202 mq
->policy
.emit_config_values
= mq_emit_config_values
;
1203 mq
->policy
.set_config_value
= mq_set_config_value
;
1206 static struct dm_cache_policy
*mq_create(dm_cblock_t cache_size
,
1207 sector_t origin_size
,
1208 sector_t cache_block_size
)
1210 struct mq_policy
*mq
= kzalloc(sizeof(*mq
), GFP_KERNEL
);
1215 init_policy_functions(mq
);
1216 iot_init(&mq
->tracker
, SEQUENTIAL_THRESHOLD_DEFAULT
, RANDOM_THRESHOLD_DEFAULT
);
1217 mq
->cache_size
= cache_size
;
1219 if (epool_init(&mq
->pre_cache_pool
, from_cblock(cache_size
))) {
1220 DMERR("couldn't initialize pool of pre-cache entries");
1221 goto bad_pre_cache_init
;
1224 if (epool_init(&mq
->cache_pool
, from_cblock(cache_size
))) {
1225 DMERR("couldn't initialize pool of cache entries");
1226 goto bad_cache_init
;
1229 mq
->tick_protected
= 0;
1233 mq
->promote_threshold
= 0;
1234 mq
->discard_promote_adjustment
= DEFAULT_DISCARD_PROMOTE_ADJUSTMENT
;
1235 mq
->read_promote_adjustment
= DEFAULT_READ_PROMOTE_ADJUSTMENT
;
1236 mq
->write_promote_adjustment
= DEFAULT_WRITE_PROMOTE_ADJUSTMENT
;
1237 mutex_init(&mq
->lock
);
1238 spin_lock_init(&mq
->tick_lock
);
1240 queue_init(&mq
->pre_cache
);
1241 queue_init(&mq
->cache_clean
);
1242 queue_init(&mq
->cache_dirty
);
1244 mq
->generation_period
= max((unsigned) from_cblock(cache_size
), 1024U);
1246 mq
->nr_buckets
= next_power(from_cblock(cache_size
) / 2, 16);
1247 mq
->hash_bits
= ffs(mq
->nr_buckets
) - 1;
1248 mq
->table
= kzalloc(sizeof(*mq
->table
) * mq
->nr_buckets
, GFP_KERNEL
);
1250 goto bad_alloc_table
;
1255 epool_exit(&mq
->cache_pool
);
1257 epool_exit(&mq
->pre_cache_pool
);
1264 /*----------------------------------------------------------------*/
1266 static struct dm_cache_policy_type mq_policy_type
= {
1268 .version
= {1, 2, 0},
1270 .owner
= THIS_MODULE
,
1274 static struct dm_cache_policy_type default_policy_type
= {
1276 .version
= {1, 2, 0},
1278 .owner
= THIS_MODULE
,
1279 .create
= mq_create
,
1280 .real
= &mq_policy_type
1283 static int __init
mq_init(void)
1287 mq_entry_cache
= kmem_cache_create("dm_mq_policy_cache_entry",
1288 sizeof(struct entry
),
1289 __alignof__(struct entry
),
1291 if (!mq_entry_cache
)
1294 r
= dm_cache_policy_register(&mq_policy_type
);
1296 DMERR("register failed %d", r
);
1297 goto bad_register_mq
;
1300 r
= dm_cache_policy_register(&default_policy_type
);
1302 DMINFO("version %u.%u.%u loaded",
1303 mq_policy_type
.version
[0],
1304 mq_policy_type
.version
[1],
1305 mq_policy_type
.version
[2]);
1309 DMERR("register failed (as default) %d", r
);
1311 dm_cache_policy_unregister(&mq_policy_type
);
1313 kmem_cache_destroy(mq_entry_cache
);
1318 static void __exit
mq_exit(void)
1320 dm_cache_policy_unregister(&mq_policy_type
);
1321 dm_cache_policy_unregister(&default_policy_type
);
1323 kmem_cache_destroy(mq_entry_cache
);
1326 module_init(mq_init
);
1327 module_exit(mq_exit
);
1329 MODULE_AUTHOR("Joe Thornber <dm-devel@redhat.com>");
1330 MODULE_LICENSE("GPL");
1331 MODULE_DESCRIPTION("mq cache policy");
1333 MODULE_ALIAS("dm-cache-default");