2 * Copyright (C) 2012 Red Hat. All rights reserved.
4 * This file is released under the GPL.
7 #include "dm-cache-policy.h"
10 #include <linux/hash.h>
11 #include <linux/module.h>
12 #include <linux/mutex.h>
13 #include <linux/slab.h>
14 #include <linux/vmalloc.h>
16 #define DM_MSG_PREFIX "cache-policy-mq"
18 static struct kmem_cache
*mq_entry_cache
;
20 /*----------------------------------------------------------------*/
22 static unsigned next_power(unsigned n
, unsigned min
)
24 return roundup_pow_of_two(max(n
, min
));
27 /*----------------------------------------------------------------*/
30 * Large, sequential ios are probably better left on the origin device since
31 * spindles tend to have good bandwidth.
33 * The io_tracker tries to spot when the io is in one of these sequential
36 * Two thresholds to switch between random and sequential io mode are defaulting
37 * as follows and can be adjusted via the constructor and message interfaces.
39 #define RANDOM_THRESHOLD_DEFAULT 4
40 #define SEQUENTIAL_THRESHOLD_DEFAULT 512
48 enum io_pattern pattern
;
50 unsigned nr_seq_samples
;
51 unsigned nr_rand_samples
;
52 unsigned thresholds
[2];
54 dm_oblock_t last_end_oblock
;
57 static void iot_init(struct io_tracker
*t
,
58 int sequential_threshold
, int random_threshold
)
60 t
->pattern
= PATTERN_RANDOM
;
61 t
->nr_seq_samples
= 0;
62 t
->nr_rand_samples
= 0;
63 t
->last_end_oblock
= 0;
64 t
->thresholds
[PATTERN_RANDOM
] = random_threshold
;
65 t
->thresholds
[PATTERN_SEQUENTIAL
] = sequential_threshold
;
68 static enum io_pattern
iot_pattern(struct io_tracker
*t
)
73 static void iot_update_stats(struct io_tracker
*t
, struct bio
*bio
)
75 if (bio
->bi_iter
.bi_sector
== from_oblock(t
->last_end_oblock
) + 1)
79 * Just one non-sequential IO is enough to reset the
82 if (t
->nr_seq_samples
) {
83 t
->nr_seq_samples
= 0;
84 t
->nr_rand_samples
= 0;
90 t
->last_end_oblock
= to_oblock(bio_end_sector(bio
) - 1);
93 static void iot_check_for_pattern_switch(struct io_tracker
*t
)
96 case PATTERN_SEQUENTIAL
:
97 if (t
->nr_rand_samples
>= t
->thresholds
[PATTERN_RANDOM
]) {
98 t
->pattern
= PATTERN_RANDOM
;
99 t
->nr_seq_samples
= t
->nr_rand_samples
= 0;
104 if (t
->nr_seq_samples
>= t
->thresholds
[PATTERN_SEQUENTIAL
]) {
105 t
->pattern
= PATTERN_SEQUENTIAL
;
106 t
->nr_seq_samples
= t
->nr_rand_samples
= 0;
112 static void iot_examine_bio(struct io_tracker
*t
, struct bio
*bio
)
114 iot_update_stats(t
, bio
);
115 iot_check_for_pattern_switch(t
);
118 /*----------------------------------------------------------------*/
122 * This queue is divided up into different levels. Allowing us to push
123 * entries to the back of any of the levels. Think of it as a partially
126 #define NR_QUEUE_LEVELS 16u
129 struct list_head qs
[NR_QUEUE_LEVELS
];
132 static void queue_init(struct queue
*q
)
136 for (i
= 0; i
< NR_QUEUE_LEVELS
; i
++)
137 INIT_LIST_HEAD(q
->qs
+ i
);
141 * Checks to see if the queue is empty.
142 * FIXME: reduce cpu usage.
144 static bool queue_empty(struct queue
*q
)
148 for (i
= 0; i
< NR_QUEUE_LEVELS
; i
++)
149 if (!list_empty(q
->qs
+ i
))
156 * Insert an entry to the back of the given level.
158 static void queue_push(struct queue
*q
, unsigned level
, struct list_head
*elt
)
160 list_add_tail(elt
, q
->qs
+ level
);
163 static void queue_remove(struct list_head
*elt
)
169 * Shifts all regions down one level. This has no effect on the order of
172 static void queue_shift_down(struct queue
*q
)
176 for (level
= 1; level
< NR_QUEUE_LEVELS
; level
++)
177 list_splice_init(q
->qs
+ level
, q
->qs
+ level
- 1);
181 * Gives us the oldest entry of the lowest popoulated level. If the first
182 * level is emptied then we shift down one level.
184 static struct list_head
*queue_peek(struct queue
*q
)
188 for (level
= 0; level
< NR_QUEUE_LEVELS
; level
++)
189 if (!list_empty(q
->qs
+ level
))
190 return q
->qs
[level
].next
;
195 static struct list_head
*queue_pop(struct queue
*q
)
197 struct list_head
*r
= queue_peek(q
);
202 /* have we just emptied the bottom level? */
203 if (list_empty(q
->qs
))
210 static struct list_head
*list_pop(struct list_head
*lh
)
212 struct list_head
*r
= lh
->next
;
220 /*----------------------------------------------------------------*/
223 * Describes a cache entry. Used in both the cache and the pre_cache.
226 struct hlist_node hlist
;
227 struct list_head list
;
231 * FIXME: pack these better
240 * Rather than storing the cblock in an entry, we allocate all entries in
241 * an array, and infer the cblock from the entry position.
243 * Free entries are linked together into a list.
246 struct entry
*entries
, *entries_end
;
247 struct list_head free
;
248 unsigned nr_allocated
;
251 static int epool_init(struct entry_pool
*ep
, unsigned nr_entries
)
255 ep
->entries
= vzalloc(sizeof(struct entry
) * nr_entries
);
259 ep
->entries_end
= ep
->entries
+ nr_entries
;
261 INIT_LIST_HEAD(&ep
->free
);
262 for (i
= 0; i
< nr_entries
; i
++)
263 list_add(&ep
->entries
[i
].list
, &ep
->free
);
265 ep
->nr_allocated
= 0;
270 static void epool_exit(struct entry_pool
*ep
)
275 static struct entry
*alloc_entry(struct entry_pool
*ep
)
279 if (list_empty(&ep
->free
))
282 e
= list_entry(list_pop(&ep
->free
), struct entry
, list
);
283 INIT_LIST_HEAD(&e
->list
);
284 INIT_HLIST_NODE(&e
->hlist
);
291 * This assumes the cblock hasn't already been allocated.
293 static struct entry
*alloc_particular_entry(struct entry_pool
*ep
, dm_cblock_t cblock
)
295 struct entry
*e
= ep
->entries
+ from_cblock(cblock
);
297 list_del_init(&e
->list
);
298 INIT_HLIST_NODE(&e
->hlist
);
304 static void free_entry(struct entry_pool
*ep
, struct entry
*e
)
306 BUG_ON(!ep
->nr_allocated
);
308 INIT_HLIST_NODE(&e
->hlist
);
309 list_add(&e
->list
, &ep
->free
);
313 * Returns NULL if the entry is free.
315 static struct entry
*epool_find(struct entry_pool
*ep
, dm_cblock_t cblock
)
317 struct entry
*e
= ep
->entries
+ from_cblock(cblock
);
318 return !hlist_unhashed(&e
->hlist
) ? e
: NULL
;
321 static bool epool_empty(struct entry_pool
*ep
)
323 return list_empty(&ep
->free
);
326 static bool in_pool(struct entry_pool
*ep
, struct entry
*e
)
328 return e
>= ep
->entries
&& e
< ep
->entries_end
;
331 static dm_cblock_t
infer_cblock(struct entry_pool
*ep
, struct entry
*e
)
333 return to_cblock(e
- ep
->entries
);
336 /*----------------------------------------------------------------*/
339 struct dm_cache_policy policy
;
341 /* protects everything */
343 dm_cblock_t cache_size
;
344 struct io_tracker tracker
;
347 * Entries come from two pools, one of pre-cache entries, and one
348 * for the cache proper.
350 struct entry_pool pre_cache_pool
;
351 struct entry_pool cache_pool
;
354 * We maintain three queues of entries. The cache proper,
355 * consisting of a clean and dirty queue, contains the currently
356 * active mappings. Whereas the pre_cache tracks blocks that
357 * are being hit frequently and potential candidates for promotion
360 struct queue pre_cache
;
361 struct queue cache_clean
;
362 struct queue cache_dirty
;
365 * Keeps track of time, incremented by the core. We use this to
366 * avoid attributing multiple hits within the same tick.
368 * Access to tick_protected should be done with the spin lock held.
369 * It's copied to tick at the start of the map function (within the
372 spinlock_t tick_lock
;
373 unsigned tick_protected
;
377 * A count of the number of times the map function has been called
378 * and found an entry in the pre_cache or cache. Currently used to
379 * calculate the generation.
384 * A generation is a longish period that is used to trigger some
385 * book keeping effects. eg, decrementing hit counts on entries.
386 * This is needed to allow the cache to evolve as io patterns
390 unsigned generation_period
; /* in lookups (will probably change) */
392 unsigned discard_promote_adjustment
;
393 unsigned read_promote_adjustment
;
394 unsigned write_promote_adjustment
;
397 * The hash table allows us to quickly find an entry by origin
398 * block. Both pre_cache and cache entries are in here.
401 dm_block_t hash_bits
;
402 struct hlist_head
*table
;
405 #define DEFAULT_DISCARD_PROMOTE_ADJUSTMENT 1
406 #define DEFAULT_READ_PROMOTE_ADJUSTMENT 4
407 #define DEFAULT_WRITE_PROMOTE_ADJUSTMENT 8
408 #define DISCOURAGE_DEMOTING_DIRTY_THRESHOLD 128
410 /*----------------------------------------------------------------*/
413 * Simple hash table implementation. Should replace with the standard hash
414 * table that's making its way upstream.
416 static void hash_insert(struct mq_policy
*mq
, struct entry
*e
)
418 unsigned h
= hash_64(from_oblock(e
->oblock
), mq
->hash_bits
);
420 hlist_add_head(&e
->hlist
, mq
->table
+ h
);
423 static struct entry
*hash_lookup(struct mq_policy
*mq
, dm_oblock_t oblock
)
425 unsigned h
= hash_64(from_oblock(oblock
), mq
->hash_bits
);
426 struct hlist_head
*bucket
= mq
->table
+ h
;
429 hlist_for_each_entry(e
, bucket
, hlist
)
430 if (e
->oblock
== oblock
) {
431 hlist_del(&e
->hlist
);
432 hlist_add_head(&e
->hlist
, bucket
);
439 static void hash_remove(struct entry
*e
)
441 hlist_del(&e
->hlist
);
444 /*----------------------------------------------------------------*/
446 static bool any_free_cblocks(struct mq_policy
*mq
)
448 return !epool_empty(&mq
->cache_pool
);
451 static bool any_clean_cblocks(struct mq_policy
*mq
)
453 return !queue_empty(&mq
->cache_clean
);
456 /*----------------------------------------------------------------*/
459 * Now we get to the meat of the policy. This section deals with deciding
460 * when to to add entries to the pre_cache and cache, and move between
465 * The queue level is based on the log2 of the hit count.
467 static unsigned queue_level(struct entry
*e
)
469 return min((unsigned) ilog2(e
->hit_count
), NR_QUEUE_LEVELS
- 1u);
472 static bool in_cache(struct mq_policy
*mq
, struct entry
*e
)
474 return in_pool(&mq
->cache_pool
, e
);
478 * Inserts the entry into the pre_cache or the cache. Ensures the cache
479 * block is marked as allocated if necc. Inserts into the hash table.
480 * Sets the tick which records when the entry was last moved about.
482 static void push(struct mq_policy
*mq
, struct entry
*e
)
488 queue_push(e
->dirty
? &mq
->cache_dirty
: &mq
->cache_clean
,
489 queue_level(e
), &e
->list
);
491 queue_push(&mq
->pre_cache
, queue_level(e
), &e
->list
);
495 * Removes an entry from pre_cache or cache. Removes from the hash table.
497 static void del(struct mq_policy
*mq
, struct entry
*e
)
499 queue_remove(&e
->list
);
504 * Like del, except it removes the first entry in the queue (ie. the least
507 static struct entry
*pop(struct mq_policy
*mq
, struct queue
*q
)
510 struct list_head
*h
= queue_pop(q
);
515 e
= container_of(h
, struct entry
, list
);
521 static struct entry
*peek(struct queue
*q
)
523 struct list_head
*h
= queue_peek(q
);
524 return h
? container_of(h
, struct entry
, list
) : NULL
;
528 * Has this entry already been updated?
530 static bool updated_this_tick(struct mq_policy
*mq
, struct entry
*e
)
532 return mq
->tick
== e
->tick
;
536 * The promotion threshold is adjusted every generation. As are the counts
539 * At the moment the threshold is taken by averaging the hit counts of some
540 * of the entries in the cache (the first 20 entries across all levels in
541 * ascending order, giving preference to the clean entries at each level).
543 * We can be much cleverer than this though. For example, each promotion
544 * could bump up the threshold helping to prevent churn. Much more to do
548 #define MAX_TO_AVERAGE 20
550 static void check_generation(struct mq_policy
*mq
)
552 unsigned total
= 0, nr
= 0, count
= 0, level
;
553 struct list_head
*head
;
556 if ((mq
->hit_count
>= mq
->generation_period
) && (epool_empty(&mq
->cache_pool
))) {
560 for (level
= 0; level
< NR_QUEUE_LEVELS
&& count
< MAX_TO_AVERAGE
; level
++) {
561 head
= mq
->cache_clean
.qs
+ level
;
562 list_for_each_entry(e
, head
, list
) {
564 total
+= e
->hit_count
;
566 if (++count
>= MAX_TO_AVERAGE
)
570 head
= mq
->cache_dirty
.qs
+ level
;
571 list_for_each_entry(e
, head
, list
) {
573 total
+= e
->hit_count
;
575 if (++count
>= MAX_TO_AVERAGE
)
583 * Whenever we use an entry we bump up it's hit counter, and push it to the
584 * back to it's current level.
586 static void requeue_and_update_tick(struct mq_policy
*mq
, struct entry
*e
)
588 if (updated_this_tick(mq
, e
))
593 check_generation(mq
);
595 /* generation adjustment, to stop the counts increasing forever. */
597 /* e->hit_count -= min(e->hit_count - 1, mq->generation - e->generation); */
598 e
->generation
= mq
->generation
;
605 * Demote the least recently used entry from the cache to the pre_cache.
606 * Returns the new cache entry to use, and the old origin block it was
609 * We drop the hit count on the demoted entry back to 1 to stop it bouncing
610 * straight back into the cache if it's subsequently hit. There are
611 * various options here, and more experimentation would be good:
613 * - just forget about the demoted entry completely (ie. don't insert it
615 * - divide the hit count rather that setting to some hard coded value.
616 * - set the hit count to a hard coded value other than 1, eg, is it better
617 * if it goes in at level 2?
619 static int demote_cblock(struct mq_policy
*mq
, dm_oblock_t
*oblock
)
621 struct entry
*demoted
= pop(mq
, &mq
->cache_clean
);
625 * We could get a block from mq->cache_dirty, but that
626 * would add extra latency to the triggering bio as it
627 * waits for the writeback. Better to not promote this
628 * time and hope there's a clean block next time this block
633 *oblock
= demoted
->oblock
;
634 free_entry(&mq
->cache_pool
, demoted
);
637 * We used to put the demoted block into the pre-cache, but I think
638 * it's simpler to just let it work it's way up from zero again.
639 * Stops blocks flickering in and out of the cache.
646 * Entries in the pre_cache whose hit count passes the promotion
647 * threshold move to the cache proper. Working out the correct
648 * value for the promotion_threshold is crucial to this policy.
650 static unsigned promote_threshold(struct mq_policy
*mq
)
654 if (any_free_cblocks(mq
))
657 e
= peek(&mq
->cache_clean
);
661 e
= peek(&mq
->cache_dirty
);
663 return e
->hit_count
+ DISCOURAGE_DEMOTING_DIRTY_THRESHOLD
;
665 /* This should never happen */
670 * We modify the basic promotion_threshold depending on the specific io.
672 * If the origin block has been discarded then there's no cost to copy it
675 * We bias towards reads, since they can be demoted at no cost if they
676 * haven't been dirtied.
678 static unsigned adjusted_promote_threshold(struct mq_policy
*mq
,
679 bool discarded_oblock
, int data_dir
)
681 if (data_dir
== READ
)
682 return promote_threshold(mq
) + mq
->read_promote_adjustment
;
684 if (discarded_oblock
&& (any_free_cblocks(mq
) || any_clean_cblocks(mq
))) {
686 * We don't need to do any copying at all, so give this a
687 * very low threshold.
689 return mq
->discard_promote_adjustment
;
692 return promote_threshold(mq
) + mq
->write_promote_adjustment
;
695 static bool should_promote(struct mq_policy
*mq
, struct entry
*e
,
696 bool discarded_oblock
, int data_dir
)
698 return e
->hit_count
>=
699 adjusted_promote_threshold(mq
, discarded_oblock
, data_dir
);
702 static int cache_entry_found(struct mq_policy
*mq
,
704 struct policy_result
*result
)
706 requeue_and_update_tick(mq
, e
);
708 if (in_cache(mq
, e
)) {
709 result
->op
= POLICY_HIT
;
710 result
->cblock
= infer_cblock(&mq
->cache_pool
, e
);
717 * Moves an entry from the pre_cache to the cache. The main work is
718 * finding which cache block to use.
720 static int pre_cache_to_cache(struct mq_policy
*mq
, struct entry
*e
,
721 struct policy_result
*result
)
726 /* Ensure there's a free cblock in the cache */
727 if (epool_empty(&mq
->cache_pool
)) {
728 result
->op
= POLICY_REPLACE
;
729 r
= demote_cblock(mq
, &result
->old_oblock
);
731 result
->op
= POLICY_MISS
;
735 result
->op
= POLICY_NEW
;
737 new_e
= alloc_entry(&mq
->cache_pool
);
740 new_e
->oblock
= e
->oblock
;
741 new_e
->dirty
= false;
742 new_e
->hit_count
= e
->hit_count
;
743 new_e
->generation
= e
->generation
;
744 new_e
->tick
= e
->tick
;
747 free_entry(&mq
->pre_cache_pool
, e
);
750 result
->cblock
= infer_cblock(&mq
->cache_pool
, new_e
);
755 static int pre_cache_entry_found(struct mq_policy
*mq
, struct entry
*e
,
756 bool can_migrate
, bool discarded_oblock
,
757 int data_dir
, struct policy_result
*result
)
760 bool updated
= updated_this_tick(mq
, e
);
762 if ((!discarded_oblock
&& updated
) ||
763 !should_promote(mq
, e
, discarded_oblock
, data_dir
)) {
764 requeue_and_update_tick(mq
, e
);
765 result
->op
= POLICY_MISS
;
767 } else if (!can_migrate
)
771 requeue_and_update_tick(mq
, e
);
772 r
= pre_cache_to_cache(mq
, e
, result
);
778 static void insert_in_pre_cache(struct mq_policy
*mq
,
781 struct entry
*e
= alloc_entry(&mq
->pre_cache_pool
);
785 * There's no spare entry structure, so we grab the least
786 * used one from the pre_cache.
788 e
= pop(mq
, &mq
->pre_cache
);
791 DMWARN("couldn't pop from pre cache");
798 e
->generation
= mq
->generation
;
802 static void insert_in_cache(struct mq_policy
*mq
, dm_oblock_t oblock
,
803 struct policy_result
*result
)
808 if (epool_empty(&mq
->cache_pool
)) {
809 result
->op
= POLICY_REPLACE
;
810 r
= demote_cblock(mq
, &result
->old_oblock
);
812 result
->op
= POLICY_MISS
;
813 insert_in_pre_cache(mq
, oblock
);
818 * This will always succeed, since we've just demoted.
820 e
= alloc_entry(&mq
->cache_pool
);
824 e
= alloc_entry(&mq
->cache_pool
);
825 result
->op
= POLICY_NEW
;
831 e
->generation
= mq
->generation
;
834 result
->cblock
= infer_cblock(&mq
->cache_pool
, e
);
837 static int no_entry_found(struct mq_policy
*mq
, dm_oblock_t oblock
,
838 bool can_migrate
, bool discarded_oblock
,
839 int data_dir
, struct policy_result
*result
)
841 if (adjusted_promote_threshold(mq
, discarded_oblock
, data_dir
) <= 1) {
843 insert_in_cache(mq
, oblock
, result
);
847 insert_in_pre_cache(mq
, oblock
);
848 result
->op
= POLICY_MISS
;
855 * Looks the oblock up in the hash table, then decides whether to put in
856 * pre_cache, or cache etc.
858 static int map(struct mq_policy
*mq
, dm_oblock_t oblock
,
859 bool can_migrate
, bool discarded_oblock
,
860 int data_dir
, struct policy_result
*result
)
863 struct entry
*e
= hash_lookup(mq
, oblock
);
865 if (e
&& in_cache(mq
, e
))
866 r
= cache_entry_found(mq
, e
, result
);
868 else if (mq
->tracker
.thresholds
[PATTERN_SEQUENTIAL
] &&
869 iot_pattern(&mq
->tracker
) == PATTERN_SEQUENTIAL
)
870 result
->op
= POLICY_MISS
;
873 r
= pre_cache_entry_found(mq
, e
, can_migrate
, discarded_oblock
,
877 r
= no_entry_found(mq
, oblock
, can_migrate
, discarded_oblock
,
880 if (r
== -EWOULDBLOCK
)
881 result
->op
= POLICY_MISS
;
886 /*----------------------------------------------------------------*/
889 * Public interface, via the policy struct. See dm-cache-policy.h for a
890 * description of these.
893 static struct mq_policy
*to_mq_policy(struct dm_cache_policy
*p
)
895 return container_of(p
, struct mq_policy
, policy
);
898 static void mq_destroy(struct dm_cache_policy
*p
)
900 struct mq_policy
*mq
= to_mq_policy(p
);
903 epool_exit(&mq
->cache_pool
);
904 epool_exit(&mq
->pre_cache_pool
);
908 static void copy_tick(struct mq_policy
*mq
)
912 spin_lock_irqsave(&mq
->tick_lock
, flags
);
913 mq
->tick
= mq
->tick_protected
;
914 spin_unlock_irqrestore(&mq
->tick_lock
, flags
);
917 static int mq_map(struct dm_cache_policy
*p
, dm_oblock_t oblock
,
918 bool can_block
, bool can_migrate
, bool discarded_oblock
,
919 struct bio
*bio
, struct policy_result
*result
)
922 struct mq_policy
*mq
= to_mq_policy(p
);
924 result
->op
= POLICY_MISS
;
927 mutex_lock(&mq
->lock
);
928 else if (!mutex_trylock(&mq
->lock
))
933 iot_examine_bio(&mq
->tracker
, bio
);
934 r
= map(mq
, oblock
, can_migrate
, discarded_oblock
,
935 bio_data_dir(bio
), result
);
937 mutex_unlock(&mq
->lock
);
942 static int mq_lookup(struct dm_cache_policy
*p
, dm_oblock_t oblock
, dm_cblock_t
*cblock
)
945 struct mq_policy
*mq
= to_mq_policy(p
);
948 if (!mutex_trylock(&mq
->lock
))
951 e
= hash_lookup(mq
, oblock
);
952 if (e
&& in_cache(mq
, e
)) {
953 *cblock
= infer_cblock(&mq
->cache_pool
, e
);
958 mutex_unlock(&mq
->lock
);
963 static void __mq_set_clear_dirty(struct mq_policy
*mq
, dm_oblock_t oblock
, bool set
)
967 e
= hash_lookup(mq
, oblock
);
968 BUG_ON(!e
|| !in_cache(mq
, e
));
975 static void mq_set_dirty(struct dm_cache_policy
*p
, dm_oblock_t oblock
)
977 struct mq_policy
*mq
= to_mq_policy(p
);
979 mutex_lock(&mq
->lock
);
980 __mq_set_clear_dirty(mq
, oblock
, true);
981 mutex_unlock(&mq
->lock
);
984 static void mq_clear_dirty(struct dm_cache_policy
*p
, dm_oblock_t oblock
)
986 struct mq_policy
*mq
= to_mq_policy(p
);
988 mutex_lock(&mq
->lock
);
989 __mq_set_clear_dirty(mq
, oblock
, false);
990 mutex_unlock(&mq
->lock
);
993 static int mq_load_mapping(struct dm_cache_policy
*p
,
994 dm_oblock_t oblock
, dm_cblock_t cblock
,
995 uint32_t hint
, bool hint_valid
)
997 struct mq_policy
*mq
= to_mq_policy(p
);
1000 e
= alloc_particular_entry(&mq
->cache_pool
, cblock
);
1002 e
->dirty
= false; /* this gets corrected in a minute */
1003 e
->hit_count
= hint_valid
? hint
: 1;
1004 e
->generation
= mq
->generation
;
1010 static int mq_save_hints(struct mq_policy
*mq
, struct queue
*q
,
1011 policy_walk_fn fn
, void *context
)
1017 for (level
= 0; level
< NR_QUEUE_LEVELS
; level
++)
1018 list_for_each_entry(e
, q
->qs
+ level
, list
) {
1019 r
= fn(context
, infer_cblock(&mq
->cache_pool
, e
),
1020 e
->oblock
, e
->hit_count
);
1028 static int mq_walk_mappings(struct dm_cache_policy
*p
, policy_walk_fn fn
,
1031 struct mq_policy
*mq
= to_mq_policy(p
);
1034 mutex_lock(&mq
->lock
);
1036 r
= mq_save_hints(mq
, &mq
->cache_clean
, fn
, context
);
1038 r
= mq_save_hints(mq
, &mq
->cache_dirty
, fn
, context
);
1040 mutex_unlock(&mq
->lock
);
1045 static void __remove_mapping(struct mq_policy
*mq
, dm_oblock_t oblock
)
1049 e
= hash_lookup(mq
, oblock
);
1050 BUG_ON(!e
|| !in_cache(mq
, e
));
1053 free_entry(&mq
->cache_pool
, e
);
1056 static void mq_remove_mapping(struct dm_cache_policy
*p
, dm_oblock_t oblock
)
1058 struct mq_policy
*mq
= to_mq_policy(p
);
1060 mutex_lock(&mq
->lock
);
1061 __remove_mapping(mq
, oblock
);
1062 mutex_unlock(&mq
->lock
);
1065 static int __remove_cblock(struct mq_policy
*mq
, dm_cblock_t cblock
)
1067 struct entry
*e
= epool_find(&mq
->cache_pool
, cblock
);
1073 free_entry(&mq
->cache_pool
, e
);
1078 static int mq_remove_cblock(struct dm_cache_policy
*p
, dm_cblock_t cblock
)
1081 struct mq_policy
*mq
= to_mq_policy(p
);
1083 mutex_lock(&mq
->lock
);
1084 r
= __remove_cblock(mq
, cblock
);
1085 mutex_unlock(&mq
->lock
);
1090 static int __mq_writeback_work(struct mq_policy
*mq
, dm_oblock_t
*oblock
,
1091 dm_cblock_t
*cblock
)
1093 struct entry
*e
= pop(mq
, &mq
->cache_dirty
);
1098 *oblock
= e
->oblock
;
1099 *cblock
= infer_cblock(&mq
->cache_pool
, e
);
1106 static int mq_writeback_work(struct dm_cache_policy
*p
, dm_oblock_t
*oblock
,
1107 dm_cblock_t
*cblock
)
1110 struct mq_policy
*mq
= to_mq_policy(p
);
1112 mutex_lock(&mq
->lock
);
1113 r
= __mq_writeback_work(mq
, oblock
, cblock
);
1114 mutex_unlock(&mq
->lock
);
1119 static void __force_mapping(struct mq_policy
*mq
,
1120 dm_oblock_t current_oblock
, dm_oblock_t new_oblock
)
1122 struct entry
*e
= hash_lookup(mq
, current_oblock
);
1124 if (e
&& in_cache(mq
, e
)) {
1126 e
->oblock
= new_oblock
;
1132 static void mq_force_mapping(struct dm_cache_policy
*p
,
1133 dm_oblock_t current_oblock
, dm_oblock_t new_oblock
)
1135 struct mq_policy
*mq
= to_mq_policy(p
);
1137 mutex_lock(&mq
->lock
);
1138 __force_mapping(mq
, current_oblock
, new_oblock
);
1139 mutex_unlock(&mq
->lock
);
1142 static dm_cblock_t
mq_residency(struct dm_cache_policy
*p
)
1145 struct mq_policy
*mq
= to_mq_policy(p
);
1147 mutex_lock(&mq
->lock
);
1148 r
= to_cblock(mq
->cache_pool
.nr_allocated
);
1149 mutex_unlock(&mq
->lock
);
1154 static void mq_tick(struct dm_cache_policy
*p
)
1156 struct mq_policy
*mq
= to_mq_policy(p
);
1157 unsigned long flags
;
1159 spin_lock_irqsave(&mq
->tick_lock
, flags
);
1160 mq
->tick_protected
++;
1161 spin_unlock_irqrestore(&mq
->tick_lock
, flags
);
1164 static int mq_set_config_value(struct dm_cache_policy
*p
,
1165 const char *key
, const char *value
)
1167 struct mq_policy
*mq
= to_mq_policy(p
);
1170 if (kstrtoul(value
, 10, &tmp
))
1173 if (!strcasecmp(key
, "random_threshold")) {
1174 mq
->tracker
.thresholds
[PATTERN_RANDOM
] = tmp
;
1176 } else if (!strcasecmp(key
, "sequential_threshold")) {
1177 mq
->tracker
.thresholds
[PATTERN_SEQUENTIAL
] = tmp
;
1179 } else if (!strcasecmp(key
, "discard_promote_adjustment"))
1180 mq
->discard_promote_adjustment
= tmp
;
1182 else if (!strcasecmp(key
, "read_promote_adjustment"))
1183 mq
->read_promote_adjustment
= tmp
;
1185 else if (!strcasecmp(key
, "write_promote_adjustment"))
1186 mq
->write_promote_adjustment
= tmp
;
1194 static int mq_emit_config_values(struct dm_cache_policy
*p
, char *result
, unsigned maxlen
)
1197 struct mq_policy
*mq
= to_mq_policy(p
);
1199 DMEMIT("10 random_threshold %u "
1200 "sequential_threshold %u "
1201 "discard_promote_adjustment %u "
1202 "read_promote_adjustment %u "
1203 "write_promote_adjustment %u",
1204 mq
->tracker
.thresholds
[PATTERN_RANDOM
],
1205 mq
->tracker
.thresholds
[PATTERN_SEQUENTIAL
],
1206 mq
->discard_promote_adjustment
,
1207 mq
->read_promote_adjustment
,
1208 mq
->write_promote_adjustment
);
1213 /* Init the policy plugin interface function pointers. */
1214 static void init_policy_functions(struct mq_policy
*mq
)
1216 mq
->policy
.destroy
= mq_destroy
;
1217 mq
->policy
.map
= mq_map
;
1218 mq
->policy
.lookup
= mq_lookup
;
1219 mq
->policy
.set_dirty
= mq_set_dirty
;
1220 mq
->policy
.clear_dirty
= mq_clear_dirty
;
1221 mq
->policy
.load_mapping
= mq_load_mapping
;
1222 mq
->policy
.walk_mappings
= mq_walk_mappings
;
1223 mq
->policy
.remove_mapping
= mq_remove_mapping
;
1224 mq
->policy
.remove_cblock
= mq_remove_cblock
;
1225 mq
->policy
.writeback_work
= mq_writeback_work
;
1226 mq
->policy
.force_mapping
= mq_force_mapping
;
1227 mq
->policy
.residency
= mq_residency
;
1228 mq
->policy
.tick
= mq_tick
;
1229 mq
->policy
.emit_config_values
= mq_emit_config_values
;
1230 mq
->policy
.set_config_value
= mq_set_config_value
;
1233 static struct dm_cache_policy
*mq_create(dm_cblock_t cache_size
,
1234 sector_t origin_size
,
1235 sector_t cache_block_size
)
1237 struct mq_policy
*mq
= kzalloc(sizeof(*mq
), GFP_KERNEL
);
1242 init_policy_functions(mq
);
1243 iot_init(&mq
->tracker
, SEQUENTIAL_THRESHOLD_DEFAULT
, RANDOM_THRESHOLD_DEFAULT
);
1244 mq
->cache_size
= cache_size
;
1246 if (epool_init(&mq
->pre_cache_pool
, from_cblock(cache_size
))) {
1247 DMERR("couldn't initialize pool of pre-cache entries");
1248 goto bad_pre_cache_init
;
1251 if (epool_init(&mq
->cache_pool
, from_cblock(cache_size
))) {
1252 DMERR("couldn't initialize pool of cache entries");
1253 goto bad_cache_init
;
1256 mq
->tick_protected
= 0;
1260 mq
->discard_promote_adjustment
= DEFAULT_DISCARD_PROMOTE_ADJUSTMENT
;
1261 mq
->read_promote_adjustment
= DEFAULT_READ_PROMOTE_ADJUSTMENT
;
1262 mq
->write_promote_adjustment
= DEFAULT_WRITE_PROMOTE_ADJUSTMENT
;
1263 mutex_init(&mq
->lock
);
1264 spin_lock_init(&mq
->tick_lock
);
1266 queue_init(&mq
->pre_cache
);
1267 queue_init(&mq
->cache_clean
);
1268 queue_init(&mq
->cache_dirty
);
1270 mq
->generation_period
= max((unsigned) from_cblock(cache_size
), 1024U);
1272 mq
->nr_buckets
= next_power(from_cblock(cache_size
) / 2, 16);
1273 mq
->hash_bits
= ffs(mq
->nr_buckets
) - 1;
1274 mq
->table
= vzalloc(sizeof(*mq
->table
) * mq
->nr_buckets
);
1276 goto bad_alloc_table
;
1281 epool_exit(&mq
->cache_pool
);
1283 epool_exit(&mq
->pre_cache_pool
);
1290 /*----------------------------------------------------------------*/
1292 static struct dm_cache_policy_type mq_policy_type
= {
1294 .version
= {1, 3, 0},
1296 .owner
= THIS_MODULE
,
1300 static struct dm_cache_policy_type default_policy_type
= {
1302 .version
= {1, 3, 0},
1304 .owner
= THIS_MODULE
,
1305 .create
= mq_create
,
1306 .real
= &mq_policy_type
1309 static int __init
mq_init(void)
1313 mq_entry_cache
= kmem_cache_create("dm_mq_policy_cache_entry",
1314 sizeof(struct entry
),
1315 __alignof__(struct entry
),
1317 if (!mq_entry_cache
)
1320 r
= dm_cache_policy_register(&mq_policy_type
);
1322 DMERR("register failed %d", r
);
1323 goto bad_register_mq
;
1326 r
= dm_cache_policy_register(&default_policy_type
);
1328 DMINFO("version %u.%u.%u loaded",
1329 mq_policy_type
.version
[0],
1330 mq_policy_type
.version
[1],
1331 mq_policy_type
.version
[2]);
1335 DMERR("register failed (as default) %d", r
);
1337 dm_cache_policy_unregister(&mq_policy_type
);
1339 kmem_cache_destroy(mq_entry_cache
);
1344 static void __exit
mq_exit(void)
1346 dm_cache_policy_unregister(&mq_policy_type
);
1347 dm_cache_policy_unregister(&default_policy_type
);
1349 kmem_cache_destroy(mq_entry_cache
);
1352 module_init(mq_init
);
1353 module_exit(mq_exit
);
1355 MODULE_AUTHOR("Joe Thornber <dm-devel@redhat.com>");
1356 MODULE_LICENSE("GPL");
1357 MODULE_DESCRIPTION("mq cache policy");
1359 MODULE_ALIAS("dm-cache-default");