2 * Copyright (C) 2012 Red Hat. All rights reserved.
4 * This file is released under the GPL.
7 #include "dm-cache-policy.h"
10 #include <linux/hash.h>
11 #include <linux/jiffies.h>
12 #include <linux/module.h>
13 #include <linux/mutex.h>
14 #include <linux/slab.h>
15 #include <linux/vmalloc.h>
17 #define DM_MSG_PREFIX "cache-policy-mq"
19 static struct kmem_cache
*mq_entry_cache
;
21 /*----------------------------------------------------------------*/
23 static unsigned next_power(unsigned n
, unsigned min
)
25 return roundup_pow_of_two(max(n
, min
));
28 /*----------------------------------------------------------------*/
31 * Large, sequential ios are probably better left on the origin device since
32 * spindles tend to have good bandwidth.
34 * The io_tracker tries to spot when the io is in one of these sequential
37 * Two thresholds to switch between random and sequential io mode are defaulting
38 * as follows and can be adjusted via the constructor and message interfaces.
40 #define RANDOM_THRESHOLD_DEFAULT 4
41 #define SEQUENTIAL_THRESHOLD_DEFAULT 512
49 enum io_pattern pattern
;
51 unsigned nr_seq_samples
;
52 unsigned nr_rand_samples
;
53 unsigned thresholds
[2];
55 dm_oblock_t last_end_oblock
;
58 static void iot_init(struct io_tracker
*t
,
59 int sequential_threshold
, int random_threshold
)
61 t
->pattern
= PATTERN_RANDOM
;
62 t
->nr_seq_samples
= 0;
63 t
->nr_rand_samples
= 0;
64 t
->last_end_oblock
= 0;
65 t
->thresholds
[PATTERN_RANDOM
] = random_threshold
;
66 t
->thresholds
[PATTERN_SEQUENTIAL
] = sequential_threshold
;
69 static enum io_pattern
iot_pattern(struct io_tracker
*t
)
74 static void iot_update_stats(struct io_tracker
*t
, struct bio
*bio
)
76 if (bio
->bi_iter
.bi_sector
== from_oblock(t
->last_end_oblock
) + 1)
80 * Just one non-sequential IO is enough to reset the
83 if (t
->nr_seq_samples
) {
84 t
->nr_seq_samples
= 0;
85 t
->nr_rand_samples
= 0;
91 t
->last_end_oblock
= to_oblock(bio_end_sector(bio
) - 1);
94 static void iot_check_for_pattern_switch(struct io_tracker
*t
)
97 case PATTERN_SEQUENTIAL
:
98 if (t
->nr_rand_samples
>= t
->thresholds
[PATTERN_RANDOM
]) {
99 t
->pattern
= PATTERN_RANDOM
;
100 t
->nr_seq_samples
= t
->nr_rand_samples
= 0;
105 if (t
->nr_seq_samples
>= t
->thresholds
[PATTERN_SEQUENTIAL
]) {
106 t
->pattern
= PATTERN_SEQUENTIAL
;
107 t
->nr_seq_samples
= t
->nr_rand_samples
= 0;
113 static void iot_examine_bio(struct io_tracker
*t
, struct bio
*bio
)
115 iot_update_stats(t
, bio
);
116 iot_check_for_pattern_switch(t
);
119 /*----------------------------------------------------------------*/
123 * This queue is divided up into different levels. Allowing us to push
124 * entries to the back of any of the levels. Think of it as a partially
127 #define NR_QUEUE_LEVELS 16u
128 #define NR_SENTINELS NR_QUEUE_LEVELS * 3
130 #define WRITEBACK_PERIOD HZ
134 bool current_writeback_sentinels
;
135 unsigned long next_writeback
;
136 struct list_head qs
[NR_QUEUE_LEVELS
];
137 struct list_head sentinels
[NR_SENTINELS
];
140 static void queue_init(struct queue
*q
)
145 q
->current_writeback_sentinels
= false;
146 q
->next_writeback
= 0;
147 for (i
= 0; i
< NR_QUEUE_LEVELS
; i
++) {
148 INIT_LIST_HEAD(q
->qs
+ i
);
149 INIT_LIST_HEAD(q
->sentinels
+ i
);
150 INIT_LIST_HEAD(q
->sentinels
+ NR_QUEUE_LEVELS
+ i
);
151 INIT_LIST_HEAD(q
->sentinels
+ (2 * NR_QUEUE_LEVELS
) + i
);
155 static unsigned queue_size(struct queue
*q
)
160 static bool queue_empty(struct queue
*q
)
162 return q
->nr_elts
== 0;
166 * Insert an entry to the back of the given level.
168 static void queue_push(struct queue
*q
, unsigned level
, struct list_head
*elt
)
171 list_add_tail(elt
, q
->qs
+ level
);
174 static void queue_remove(struct queue
*q
, struct list_head
*elt
)
180 static bool is_sentinel(struct queue
*q
, struct list_head
*h
)
182 return (h
>= q
->sentinels
) && (h
< (q
->sentinels
+ NR_SENTINELS
));
186 * Gives us the oldest entry of the lowest popoulated level. If the first
187 * level is emptied then we shift down one level.
189 static struct list_head
*queue_peek(struct queue
*q
)
194 for (level
= 0; level
< NR_QUEUE_LEVELS
; level
++)
195 list_for_each(h
, q
->qs
+ level
)
196 if (!is_sentinel(q
, h
))
202 static struct list_head
*queue_pop(struct queue
*q
)
204 struct list_head
*r
= queue_peek(q
);
215 * Pops an entry from a level that is not past a sentinel.
217 static struct list_head
*queue_pop_old(struct queue
*q
)
222 for (level
= 0; level
< NR_QUEUE_LEVELS
; level
++)
223 list_for_each(h
, q
->qs
+ level
) {
224 if (is_sentinel(q
, h
))
235 static struct list_head
*list_pop(struct list_head
*lh
)
237 struct list_head
*r
= lh
->next
;
245 static struct list_head
*writeback_sentinel(struct queue
*q
, unsigned level
)
247 if (q
->current_writeback_sentinels
)
248 return q
->sentinels
+ NR_QUEUE_LEVELS
+ level
;
250 return q
->sentinels
+ 2 * NR_QUEUE_LEVELS
+ level
;
253 static void queue_update_writeback_sentinels(struct queue
*q
)
258 if (time_after(jiffies
, q
->next_writeback
)) {
259 for (i
= 0; i
< NR_QUEUE_LEVELS
; i
++) {
260 h
= writeback_sentinel(q
, i
);
262 list_add_tail(h
, q
->qs
+ i
);
265 q
->next_writeback
= jiffies
+ WRITEBACK_PERIOD
;
266 q
->current_writeback_sentinels
= !q
->current_writeback_sentinels
;
271 * Sometimes we want to iterate through entries that have been pushed since
272 * a certain event. We use sentinel entries on the queues to delimit these
275 static void queue_tick(struct queue
*q
)
279 for (i
= 0; i
< NR_QUEUE_LEVELS
; i
++) {
280 list_del(q
->sentinels
+ i
);
281 list_add_tail(q
->sentinels
+ i
, q
->qs
+ i
);
285 typedef void (*iter_fn
)(struct list_head
*, void *);
286 static void queue_iterate_tick(struct queue
*q
, iter_fn fn
, void *context
)
291 for (i
= 0; i
< NR_QUEUE_LEVELS
; i
++) {
292 list_for_each_prev(h
, q
->qs
+ i
) {
293 if (is_sentinel(q
, h
))
301 /*----------------------------------------------------------------*/
304 * Describes a cache entry. Used in both the cache and the pre_cache.
307 struct hlist_node hlist
;
308 struct list_head list
;
312 * FIXME: pack these better
319 * Rather than storing the cblock in an entry, we allocate all entries in
320 * an array, and infer the cblock from the entry position.
322 * Free entries are linked together into a list.
325 struct entry
*entries
, *entries_end
;
326 struct list_head free
;
327 unsigned nr_allocated
;
330 static int epool_init(struct entry_pool
*ep
, unsigned nr_entries
)
334 ep
->entries
= vzalloc(sizeof(struct entry
) * nr_entries
);
338 ep
->entries_end
= ep
->entries
+ nr_entries
;
340 INIT_LIST_HEAD(&ep
->free
);
341 for (i
= 0; i
< nr_entries
; i
++)
342 list_add(&ep
->entries
[i
].list
, &ep
->free
);
344 ep
->nr_allocated
= 0;
349 static void epool_exit(struct entry_pool
*ep
)
354 static struct entry
*alloc_entry(struct entry_pool
*ep
)
358 if (list_empty(&ep
->free
))
361 e
= list_entry(list_pop(&ep
->free
), struct entry
, list
);
362 INIT_LIST_HEAD(&e
->list
);
363 INIT_HLIST_NODE(&e
->hlist
);
370 * This assumes the cblock hasn't already been allocated.
372 static struct entry
*alloc_particular_entry(struct entry_pool
*ep
, dm_cblock_t cblock
)
374 struct entry
*e
= ep
->entries
+ from_cblock(cblock
);
376 list_del_init(&e
->list
);
377 INIT_HLIST_NODE(&e
->hlist
);
383 static void free_entry(struct entry_pool
*ep
, struct entry
*e
)
385 BUG_ON(!ep
->nr_allocated
);
387 INIT_HLIST_NODE(&e
->hlist
);
388 list_add(&e
->list
, &ep
->free
);
392 * Returns NULL if the entry is free.
394 static struct entry
*epool_find(struct entry_pool
*ep
, dm_cblock_t cblock
)
396 struct entry
*e
= ep
->entries
+ from_cblock(cblock
);
397 return !hlist_unhashed(&e
->hlist
) ? e
: NULL
;
400 static bool epool_empty(struct entry_pool
*ep
)
402 return list_empty(&ep
->free
);
405 static bool in_pool(struct entry_pool
*ep
, struct entry
*e
)
407 return e
>= ep
->entries
&& e
< ep
->entries_end
;
410 static dm_cblock_t
infer_cblock(struct entry_pool
*ep
, struct entry
*e
)
412 return to_cblock(e
- ep
->entries
);
415 /*----------------------------------------------------------------*/
418 struct dm_cache_policy policy
;
420 /* protects everything */
422 dm_cblock_t cache_size
;
423 struct io_tracker tracker
;
426 * Entries come from two pools, one of pre-cache entries, and one
427 * for the cache proper.
429 struct entry_pool pre_cache_pool
;
430 struct entry_pool cache_pool
;
433 * We maintain three queues of entries. The cache proper,
434 * consisting of a clean and dirty queue, contains the currently
435 * active mappings. Whereas the pre_cache tracks blocks that
436 * are being hit frequently and potential candidates for promotion
439 struct queue pre_cache
;
440 struct queue cache_clean
;
441 struct queue cache_dirty
;
444 * Keeps track of time, incremented by the core. We use this to
445 * avoid attributing multiple hits within the same tick.
447 * Access to tick_protected should be done with the spin lock held.
448 * It's copied to tick at the start of the map function (within the
451 spinlock_t tick_lock
;
452 unsigned tick_protected
;
456 * A count of the number of times the map function has been called
457 * and found an entry in the pre_cache or cache. Currently used to
458 * calculate the generation.
463 * A generation is a longish period that is used to trigger some
464 * book keeping effects. eg, decrementing hit counts on entries.
465 * This is needed to allow the cache to evolve as io patterns
469 unsigned generation_period
; /* in lookups (will probably change) */
471 unsigned discard_promote_adjustment
;
472 unsigned read_promote_adjustment
;
473 unsigned write_promote_adjustment
;
476 * The hash table allows us to quickly find an entry by origin
477 * block. Both pre_cache and cache entries are in here.
480 dm_block_t hash_bits
;
481 struct hlist_head
*table
;
484 #define DEFAULT_DISCARD_PROMOTE_ADJUSTMENT 1
485 #define DEFAULT_READ_PROMOTE_ADJUSTMENT 4
486 #define DEFAULT_WRITE_PROMOTE_ADJUSTMENT 8
487 #define DISCOURAGE_DEMOTING_DIRTY_THRESHOLD 128
489 /*----------------------------------------------------------------*/
492 * Simple hash table implementation. Should replace with the standard hash
493 * table that's making its way upstream.
495 static void hash_insert(struct mq_policy
*mq
, struct entry
*e
)
497 unsigned h
= hash_64(from_oblock(e
->oblock
), mq
->hash_bits
);
499 hlist_add_head(&e
->hlist
, mq
->table
+ h
);
502 static struct entry
*hash_lookup(struct mq_policy
*mq
, dm_oblock_t oblock
)
504 unsigned h
= hash_64(from_oblock(oblock
), mq
->hash_bits
);
505 struct hlist_head
*bucket
= mq
->table
+ h
;
508 hlist_for_each_entry(e
, bucket
, hlist
)
509 if (e
->oblock
== oblock
) {
510 hlist_del(&e
->hlist
);
511 hlist_add_head(&e
->hlist
, bucket
);
518 static void hash_remove(struct entry
*e
)
520 hlist_del(&e
->hlist
);
523 /*----------------------------------------------------------------*/
525 static bool any_free_cblocks(struct mq_policy
*mq
)
527 return !epool_empty(&mq
->cache_pool
);
530 static bool any_clean_cblocks(struct mq_policy
*mq
)
532 return !queue_empty(&mq
->cache_clean
);
535 /*----------------------------------------------------------------*/
538 * Now we get to the meat of the policy. This section deals with deciding
539 * when to to add entries to the pre_cache and cache, and move between
544 * The queue level is based on the log2 of the hit count.
546 static unsigned queue_level(struct entry
*e
)
548 return min((unsigned) ilog2(e
->hit_count
), NR_QUEUE_LEVELS
- 1u);
551 static bool in_cache(struct mq_policy
*mq
, struct entry
*e
)
553 return in_pool(&mq
->cache_pool
, e
);
557 * Inserts the entry into the pre_cache or the cache. Ensures the cache
558 * block is marked as allocated if necc. Inserts into the hash table.
559 * Sets the tick which records when the entry was last moved about.
561 static void push(struct mq_policy
*mq
, struct entry
*e
)
566 queue_push(e
->dirty
? &mq
->cache_dirty
: &mq
->cache_clean
,
567 queue_level(e
), &e
->list
);
569 queue_push(&mq
->pre_cache
, queue_level(e
), &e
->list
);
573 * Removes an entry from pre_cache or cache. Removes from the hash table.
575 static void del(struct mq_policy
*mq
, struct entry
*e
)
578 queue_remove(e
->dirty
? &mq
->cache_dirty
: &mq
->cache_clean
, &e
->list
);
580 queue_remove(&mq
->pre_cache
, &e
->list
);
586 * Like del, except it removes the first entry in the queue (ie. the least
589 static struct entry
*pop(struct mq_policy
*mq
, struct queue
*q
)
592 struct list_head
*h
= queue_pop(q
);
597 e
= container_of(h
, struct entry
, list
);
603 static struct entry
*pop_old(struct mq_policy
*mq
, struct queue
*q
)
606 struct list_head
*h
= queue_pop_old(q
);
611 e
= container_of(h
, struct entry
, list
);
617 static struct entry
*peek(struct queue
*q
)
619 struct list_head
*h
= queue_peek(q
);
620 return h
? container_of(h
, struct entry
, list
) : NULL
;
624 * The promotion threshold is adjusted every generation. As are the counts
627 * At the moment the threshold is taken by averaging the hit counts of some
628 * of the entries in the cache (the first 20 entries across all levels in
629 * ascending order, giving preference to the clean entries at each level).
631 * We can be much cleverer than this though. For example, each promotion
632 * could bump up the threshold helping to prevent churn. Much more to do
636 #define MAX_TO_AVERAGE 20
638 static void check_generation(struct mq_policy
*mq
)
640 unsigned total
= 0, nr
= 0, count
= 0, level
;
641 struct list_head
*head
;
644 if ((mq
->hit_count
>= mq
->generation_period
) && (epool_empty(&mq
->cache_pool
))) {
648 for (level
= 0; level
< NR_QUEUE_LEVELS
&& count
< MAX_TO_AVERAGE
; level
++) {
649 head
= mq
->cache_clean
.qs
+ level
;
650 list_for_each_entry(e
, head
, list
) {
652 total
+= e
->hit_count
;
654 if (++count
>= MAX_TO_AVERAGE
)
658 head
= mq
->cache_dirty
.qs
+ level
;
659 list_for_each_entry(e
, head
, list
) {
661 total
+= e
->hit_count
;
663 if (++count
>= MAX_TO_AVERAGE
)
671 * Whenever we use an entry we bump up it's hit counter, and push it to the
672 * back to it's current level.
674 static void requeue(struct mq_policy
*mq
, struct entry
*e
)
676 check_generation(mq
);
682 * Demote the least recently used entry from the cache to the pre_cache.
683 * Returns the new cache entry to use, and the old origin block it was
686 * We drop the hit count on the demoted entry back to 1 to stop it bouncing
687 * straight back into the cache if it's subsequently hit. There are
688 * various options here, and more experimentation would be good:
690 * - just forget about the demoted entry completely (ie. don't insert it
692 * - divide the hit count rather that setting to some hard coded value.
693 * - set the hit count to a hard coded value other than 1, eg, is it better
694 * if it goes in at level 2?
696 static int demote_cblock(struct mq_policy
*mq
,
697 struct policy_locker
*locker
, dm_oblock_t
*oblock
)
699 struct entry
*demoted
= peek(&mq
->cache_clean
);
703 * We could get a block from mq->cache_dirty, but that
704 * would add extra latency to the triggering bio as it
705 * waits for the writeback. Better to not promote this
706 * time and hope there's a clean block next time this block
711 if (locker
->fn(locker
, demoted
->oblock
))
713 * We couldn't lock the demoted block.
718 *oblock
= demoted
->oblock
;
719 free_entry(&mq
->cache_pool
, demoted
);
722 * We used to put the demoted block into the pre-cache, but I think
723 * it's simpler to just let it work it's way up from zero again.
724 * Stops blocks flickering in and out of the cache.
731 * Entries in the pre_cache whose hit count passes the promotion
732 * threshold move to the cache proper. Working out the correct
733 * value for the promotion_threshold is crucial to this policy.
735 static unsigned promote_threshold(struct mq_policy
*mq
)
739 if (any_free_cblocks(mq
))
742 e
= peek(&mq
->cache_clean
);
746 e
= peek(&mq
->cache_dirty
);
748 return e
->hit_count
+ DISCOURAGE_DEMOTING_DIRTY_THRESHOLD
;
750 /* This should never happen */
755 * We modify the basic promotion_threshold depending on the specific io.
757 * If the origin block has been discarded then there's no cost to copy it
760 * We bias towards reads, since they can be demoted at no cost if they
761 * haven't been dirtied.
763 static unsigned adjusted_promote_threshold(struct mq_policy
*mq
,
764 bool discarded_oblock
, int data_dir
)
766 if (data_dir
== READ
)
767 return promote_threshold(mq
) + mq
->read_promote_adjustment
;
769 if (discarded_oblock
&& (any_free_cblocks(mq
) || any_clean_cblocks(mq
))) {
771 * We don't need to do any copying at all, so give this a
772 * very low threshold.
774 return mq
->discard_promote_adjustment
;
777 return promote_threshold(mq
) + mq
->write_promote_adjustment
;
780 static bool should_promote(struct mq_policy
*mq
, struct entry
*e
,
781 bool discarded_oblock
, int data_dir
)
783 return e
->hit_count
>=
784 adjusted_promote_threshold(mq
, discarded_oblock
, data_dir
);
787 static int cache_entry_found(struct mq_policy
*mq
,
789 struct policy_result
*result
)
793 if (in_cache(mq
, e
)) {
794 result
->op
= POLICY_HIT
;
795 result
->cblock
= infer_cblock(&mq
->cache_pool
, e
);
802 * Moves an entry from the pre_cache to the cache. The main work is
803 * finding which cache block to use.
805 static int pre_cache_to_cache(struct mq_policy
*mq
, struct entry
*e
,
806 struct policy_locker
*locker
,
807 struct policy_result
*result
)
812 /* Ensure there's a free cblock in the cache */
813 if (epool_empty(&mq
->cache_pool
)) {
814 result
->op
= POLICY_REPLACE
;
815 r
= demote_cblock(mq
, locker
, &result
->old_oblock
);
817 result
->op
= POLICY_MISS
;
822 result
->op
= POLICY_NEW
;
824 new_e
= alloc_entry(&mq
->cache_pool
);
827 new_e
->oblock
= e
->oblock
;
828 new_e
->dirty
= false;
829 new_e
->hit_count
= e
->hit_count
;
832 free_entry(&mq
->pre_cache_pool
, e
);
835 result
->cblock
= infer_cblock(&mq
->cache_pool
, new_e
);
840 static int pre_cache_entry_found(struct mq_policy
*mq
, struct entry
*e
,
841 bool can_migrate
, bool discarded_oblock
,
842 int data_dir
, struct policy_locker
*locker
,
843 struct policy_result
*result
)
847 if (!should_promote(mq
, e
, discarded_oblock
, data_dir
)) {
849 result
->op
= POLICY_MISS
;
851 } else if (!can_migrate
)
856 r
= pre_cache_to_cache(mq
, e
, locker
, result
);
862 static void insert_in_pre_cache(struct mq_policy
*mq
,
865 struct entry
*e
= alloc_entry(&mq
->pre_cache_pool
);
869 * There's no spare entry structure, so we grab the least
870 * used one from the pre_cache.
872 e
= pop(mq
, &mq
->pre_cache
);
875 DMWARN("couldn't pop from pre cache");
885 static void insert_in_cache(struct mq_policy
*mq
, dm_oblock_t oblock
,
886 struct policy_locker
*locker
,
887 struct policy_result
*result
)
892 if (epool_empty(&mq
->cache_pool
)) {
893 result
->op
= POLICY_REPLACE
;
894 r
= demote_cblock(mq
, locker
, &result
->old_oblock
);
896 result
->op
= POLICY_MISS
;
897 insert_in_pre_cache(mq
, oblock
);
902 * This will always succeed, since we've just demoted.
904 e
= alloc_entry(&mq
->cache_pool
);
908 e
= alloc_entry(&mq
->cache_pool
);
909 result
->op
= POLICY_NEW
;
917 result
->cblock
= infer_cblock(&mq
->cache_pool
, e
);
920 static int no_entry_found(struct mq_policy
*mq
, dm_oblock_t oblock
,
921 bool can_migrate
, bool discarded_oblock
,
922 int data_dir
, struct policy_locker
*locker
,
923 struct policy_result
*result
)
925 if (adjusted_promote_threshold(mq
, discarded_oblock
, data_dir
) <= 1) {
927 insert_in_cache(mq
, oblock
, locker
, result
);
931 insert_in_pre_cache(mq
, oblock
);
932 result
->op
= POLICY_MISS
;
939 * Looks the oblock up in the hash table, then decides whether to put in
940 * pre_cache, or cache etc.
942 static int map(struct mq_policy
*mq
, dm_oblock_t oblock
,
943 bool can_migrate
, bool discarded_oblock
,
944 int data_dir
, struct policy_locker
*locker
,
945 struct policy_result
*result
)
948 struct entry
*e
= hash_lookup(mq
, oblock
);
950 if (e
&& in_cache(mq
, e
))
951 r
= cache_entry_found(mq
, e
, result
);
953 else if (mq
->tracker
.thresholds
[PATTERN_SEQUENTIAL
] &&
954 iot_pattern(&mq
->tracker
) == PATTERN_SEQUENTIAL
)
955 result
->op
= POLICY_MISS
;
958 r
= pre_cache_entry_found(mq
, e
, can_migrate
, discarded_oblock
,
959 data_dir
, locker
, result
);
962 r
= no_entry_found(mq
, oblock
, can_migrate
, discarded_oblock
,
963 data_dir
, locker
, result
);
965 if (r
== -EWOULDBLOCK
)
966 result
->op
= POLICY_MISS
;
971 /*----------------------------------------------------------------*/
974 * Public interface, via the policy struct. See dm-cache-policy.h for a
975 * description of these.
978 static struct mq_policy
*to_mq_policy(struct dm_cache_policy
*p
)
980 return container_of(p
, struct mq_policy
, policy
);
983 static void mq_destroy(struct dm_cache_policy
*p
)
985 struct mq_policy
*mq
= to_mq_policy(p
);
988 epool_exit(&mq
->cache_pool
);
989 epool_exit(&mq
->pre_cache_pool
);
993 static void update_pre_cache_hits(struct list_head
*h
, void *context
)
995 struct entry
*e
= container_of(h
, struct entry
, list
);
999 static void update_cache_hits(struct list_head
*h
, void *context
)
1001 struct mq_policy
*mq
= context
;
1002 struct entry
*e
= container_of(h
, struct entry
, list
);
1007 static void copy_tick(struct mq_policy
*mq
)
1009 unsigned long flags
, tick
;
1011 spin_lock_irqsave(&mq
->tick_lock
, flags
);
1012 tick
= mq
->tick_protected
;
1013 if (tick
!= mq
->tick
) {
1014 queue_iterate_tick(&mq
->pre_cache
, update_pre_cache_hits
, mq
);
1015 queue_iterate_tick(&mq
->cache_dirty
, update_cache_hits
, mq
);
1016 queue_iterate_tick(&mq
->cache_clean
, update_cache_hits
, mq
);
1020 queue_tick(&mq
->pre_cache
);
1021 queue_tick(&mq
->cache_dirty
);
1022 queue_tick(&mq
->cache_clean
);
1023 queue_update_writeback_sentinels(&mq
->cache_dirty
);
1024 spin_unlock_irqrestore(&mq
->tick_lock
, flags
);
1027 static int mq_map(struct dm_cache_policy
*p
, dm_oblock_t oblock
,
1028 bool can_block
, bool can_migrate
, bool discarded_oblock
,
1029 struct bio
*bio
, struct policy_locker
*locker
,
1030 struct policy_result
*result
)
1033 struct mq_policy
*mq
= to_mq_policy(p
);
1035 result
->op
= POLICY_MISS
;
1038 mutex_lock(&mq
->lock
);
1039 else if (!mutex_trylock(&mq
->lock
))
1040 return -EWOULDBLOCK
;
1044 iot_examine_bio(&mq
->tracker
, bio
);
1045 r
= map(mq
, oblock
, can_migrate
, discarded_oblock
,
1046 bio_data_dir(bio
), locker
, result
);
1048 mutex_unlock(&mq
->lock
);
1053 static int mq_lookup(struct dm_cache_policy
*p
, dm_oblock_t oblock
, dm_cblock_t
*cblock
)
1056 struct mq_policy
*mq
= to_mq_policy(p
);
1059 if (!mutex_trylock(&mq
->lock
))
1060 return -EWOULDBLOCK
;
1062 e
= hash_lookup(mq
, oblock
);
1063 if (e
&& in_cache(mq
, e
)) {
1064 *cblock
= infer_cblock(&mq
->cache_pool
, e
);
1069 mutex_unlock(&mq
->lock
);
1074 static void __mq_set_clear_dirty(struct mq_policy
*mq
, dm_oblock_t oblock
, bool set
)
1078 e
= hash_lookup(mq
, oblock
);
1079 BUG_ON(!e
|| !in_cache(mq
, e
));
1086 static void mq_set_dirty(struct dm_cache_policy
*p
, dm_oblock_t oblock
)
1088 struct mq_policy
*mq
= to_mq_policy(p
);
1090 mutex_lock(&mq
->lock
);
1091 __mq_set_clear_dirty(mq
, oblock
, true);
1092 mutex_unlock(&mq
->lock
);
1095 static void mq_clear_dirty(struct dm_cache_policy
*p
, dm_oblock_t oblock
)
1097 struct mq_policy
*mq
= to_mq_policy(p
);
1099 mutex_lock(&mq
->lock
);
1100 __mq_set_clear_dirty(mq
, oblock
, false);
1101 mutex_unlock(&mq
->lock
);
1104 static int mq_load_mapping(struct dm_cache_policy
*p
,
1105 dm_oblock_t oblock
, dm_cblock_t cblock
,
1106 uint32_t hint
, bool hint_valid
)
1108 struct mq_policy
*mq
= to_mq_policy(p
);
1111 e
= alloc_particular_entry(&mq
->cache_pool
, cblock
);
1113 e
->dirty
= false; /* this gets corrected in a minute */
1114 e
->hit_count
= hint_valid
? hint
: 1;
1120 static int mq_save_hints(struct mq_policy
*mq
, struct queue
*q
,
1121 policy_walk_fn fn
, void *context
)
1125 struct list_head
*h
;
1128 for (level
= 0; level
< NR_QUEUE_LEVELS
; level
++)
1129 list_for_each(h
, q
->qs
+ level
) {
1130 if (is_sentinel(q
, h
))
1133 e
= container_of(h
, struct entry
, list
);
1134 r
= fn(context
, infer_cblock(&mq
->cache_pool
, e
),
1135 e
->oblock
, e
->hit_count
);
1143 static int mq_walk_mappings(struct dm_cache_policy
*p
, policy_walk_fn fn
,
1146 struct mq_policy
*mq
= to_mq_policy(p
);
1149 mutex_lock(&mq
->lock
);
1151 r
= mq_save_hints(mq
, &mq
->cache_clean
, fn
, context
);
1153 r
= mq_save_hints(mq
, &mq
->cache_dirty
, fn
, context
);
1155 mutex_unlock(&mq
->lock
);
1160 static void __remove_mapping(struct mq_policy
*mq
, dm_oblock_t oblock
)
1164 e
= hash_lookup(mq
, oblock
);
1165 BUG_ON(!e
|| !in_cache(mq
, e
));
1168 free_entry(&mq
->cache_pool
, e
);
1171 static void mq_remove_mapping(struct dm_cache_policy
*p
, dm_oblock_t oblock
)
1173 struct mq_policy
*mq
= to_mq_policy(p
);
1175 mutex_lock(&mq
->lock
);
1176 __remove_mapping(mq
, oblock
);
1177 mutex_unlock(&mq
->lock
);
1180 static int __remove_cblock(struct mq_policy
*mq
, dm_cblock_t cblock
)
1182 struct entry
*e
= epool_find(&mq
->cache_pool
, cblock
);
1188 free_entry(&mq
->cache_pool
, e
);
1193 static int mq_remove_cblock(struct dm_cache_policy
*p
, dm_cblock_t cblock
)
1196 struct mq_policy
*mq
= to_mq_policy(p
);
1198 mutex_lock(&mq
->lock
);
1199 r
= __remove_cblock(mq
, cblock
);
1200 mutex_unlock(&mq
->lock
);
1205 #define CLEAN_TARGET_PERCENTAGE 25
1207 static bool clean_target_met(struct mq_policy
*mq
)
1210 * Cache entries may not be populated. So we're cannot rely on the
1211 * size of the clean queue.
1213 unsigned nr_clean
= from_cblock(mq
->cache_size
) - queue_size(&mq
->cache_dirty
);
1214 unsigned target
= from_cblock(mq
->cache_size
) * CLEAN_TARGET_PERCENTAGE
/ 100;
1216 return nr_clean
>= target
;
1219 static int __mq_writeback_work(struct mq_policy
*mq
, dm_oblock_t
*oblock
,
1220 dm_cblock_t
*cblock
)
1222 struct entry
*e
= pop_old(mq
, &mq
->cache_dirty
);
1224 if (!e
&& !clean_target_met(mq
))
1225 e
= pop(mq
, &mq
->cache_dirty
);
1230 *oblock
= e
->oblock
;
1231 *cblock
= infer_cblock(&mq
->cache_pool
, e
);
1238 static int mq_writeback_work(struct dm_cache_policy
*p
, dm_oblock_t
*oblock
,
1239 dm_cblock_t
*cblock
, bool critical_only
)
1242 struct mq_policy
*mq
= to_mq_policy(p
);
1244 mutex_lock(&mq
->lock
);
1245 r
= __mq_writeback_work(mq
, oblock
, cblock
);
1246 mutex_unlock(&mq
->lock
);
1251 static void __force_mapping(struct mq_policy
*mq
,
1252 dm_oblock_t current_oblock
, dm_oblock_t new_oblock
)
1254 struct entry
*e
= hash_lookup(mq
, current_oblock
);
1256 if (e
&& in_cache(mq
, e
)) {
1258 e
->oblock
= new_oblock
;
1264 static void mq_force_mapping(struct dm_cache_policy
*p
,
1265 dm_oblock_t current_oblock
, dm_oblock_t new_oblock
)
1267 struct mq_policy
*mq
= to_mq_policy(p
);
1269 mutex_lock(&mq
->lock
);
1270 __force_mapping(mq
, current_oblock
, new_oblock
);
1271 mutex_unlock(&mq
->lock
);
1274 static dm_cblock_t
mq_residency(struct dm_cache_policy
*p
)
1277 struct mq_policy
*mq
= to_mq_policy(p
);
1279 mutex_lock(&mq
->lock
);
1280 r
= to_cblock(mq
->cache_pool
.nr_allocated
);
1281 mutex_unlock(&mq
->lock
);
1286 static void mq_tick(struct dm_cache_policy
*p
, bool can_block
)
1288 struct mq_policy
*mq
= to_mq_policy(p
);
1289 unsigned long flags
;
1291 spin_lock_irqsave(&mq
->tick_lock
, flags
);
1292 mq
->tick_protected
++;
1293 spin_unlock_irqrestore(&mq
->tick_lock
, flags
);
1296 mutex_lock(&mq
->lock
);
1298 mutex_unlock(&mq
->lock
);
1302 static int mq_set_config_value(struct dm_cache_policy
*p
,
1303 const char *key
, const char *value
)
1305 struct mq_policy
*mq
= to_mq_policy(p
);
1308 if (kstrtoul(value
, 10, &tmp
))
1311 if (!strcasecmp(key
, "random_threshold")) {
1312 mq
->tracker
.thresholds
[PATTERN_RANDOM
] = tmp
;
1314 } else if (!strcasecmp(key
, "sequential_threshold")) {
1315 mq
->tracker
.thresholds
[PATTERN_SEQUENTIAL
] = tmp
;
1317 } else if (!strcasecmp(key
, "discard_promote_adjustment"))
1318 mq
->discard_promote_adjustment
= tmp
;
1320 else if (!strcasecmp(key
, "read_promote_adjustment"))
1321 mq
->read_promote_adjustment
= tmp
;
1323 else if (!strcasecmp(key
, "write_promote_adjustment"))
1324 mq
->write_promote_adjustment
= tmp
;
1332 static int mq_emit_config_values(struct dm_cache_policy
*p
, char *result
,
1333 unsigned maxlen
, ssize_t
*sz_ptr
)
1335 ssize_t sz
= *sz_ptr
;
1336 struct mq_policy
*mq
= to_mq_policy(p
);
1338 DMEMIT("10 random_threshold %u "
1339 "sequential_threshold %u "
1340 "discard_promote_adjustment %u "
1341 "read_promote_adjustment %u "
1342 "write_promote_adjustment %u ",
1343 mq
->tracker
.thresholds
[PATTERN_RANDOM
],
1344 mq
->tracker
.thresholds
[PATTERN_SEQUENTIAL
],
1345 mq
->discard_promote_adjustment
,
1346 mq
->read_promote_adjustment
,
1347 mq
->write_promote_adjustment
);
1353 /* Init the policy plugin interface function pointers. */
1354 static void init_policy_functions(struct mq_policy
*mq
)
1356 mq
->policy
.destroy
= mq_destroy
;
1357 mq
->policy
.map
= mq_map
;
1358 mq
->policy
.lookup
= mq_lookup
;
1359 mq
->policy
.set_dirty
= mq_set_dirty
;
1360 mq
->policy
.clear_dirty
= mq_clear_dirty
;
1361 mq
->policy
.load_mapping
= mq_load_mapping
;
1362 mq
->policy
.walk_mappings
= mq_walk_mappings
;
1363 mq
->policy
.remove_mapping
= mq_remove_mapping
;
1364 mq
->policy
.remove_cblock
= mq_remove_cblock
;
1365 mq
->policy
.writeback_work
= mq_writeback_work
;
1366 mq
->policy
.force_mapping
= mq_force_mapping
;
1367 mq
->policy
.residency
= mq_residency
;
1368 mq
->policy
.tick
= mq_tick
;
1369 mq
->policy
.emit_config_values
= mq_emit_config_values
;
1370 mq
->policy
.set_config_value
= mq_set_config_value
;
1373 static struct dm_cache_policy
*mq_create(dm_cblock_t cache_size
,
1374 sector_t origin_size
,
1375 sector_t cache_block_size
)
1377 struct mq_policy
*mq
= kzalloc(sizeof(*mq
), GFP_KERNEL
);
1382 init_policy_functions(mq
);
1383 iot_init(&mq
->tracker
, SEQUENTIAL_THRESHOLD_DEFAULT
, RANDOM_THRESHOLD_DEFAULT
);
1384 mq
->cache_size
= cache_size
;
1386 if (epool_init(&mq
->pre_cache_pool
, from_cblock(cache_size
))) {
1387 DMERR("couldn't initialize pool of pre-cache entries");
1388 goto bad_pre_cache_init
;
1391 if (epool_init(&mq
->cache_pool
, from_cblock(cache_size
))) {
1392 DMERR("couldn't initialize pool of cache entries");
1393 goto bad_cache_init
;
1396 mq
->tick_protected
= 0;
1400 mq
->discard_promote_adjustment
= DEFAULT_DISCARD_PROMOTE_ADJUSTMENT
;
1401 mq
->read_promote_adjustment
= DEFAULT_READ_PROMOTE_ADJUSTMENT
;
1402 mq
->write_promote_adjustment
= DEFAULT_WRITE_PROMOTE_ADJUSTMENT
;
1403 mutex_init(&mq
->lock
);
1404 spin_lock_init(&mq
->tick_lock
);
1406 queue_init(&mq
->pre_cache
);
1407 queue_init(&mq
->cache_clean
);
1408 queue_init(&mq
->cache_dirty
);
1410 mq
->generation_period
= max((unsigned) from_cblock(cache_size
), 1024U);
1412 mq
->nr_buckets
= next_power(from_cblock(cache_size
) / 2, 16);
1413 mq
->hash_bits
= ffs(mq
->nr_buckets
) - 1;
1414 mq
->table
= vzalloc(sizeof(*mq
->table
) * mq
->nr_buckets
);
1416 goto bad_alloc_table
;
1421 epool_exit(&mq
->cache_pool
);
1423 epool_exit(&mq
->pre_cache_pool
);
1430 /*----------------------------------------------------------------*/
1432 static struct dm_cache_policy_type mq_policy_type
= {
1434 .version
= {1, 4, 0},
1436 .owner
= THIS_MODULE
,
1440 static int __init
mq_init(void)
1444 mq_entry_cache
= kmem_cache_create("dm_mq_policy_cache_entry",
1445 sizeof(struct entry
),
1446 __alignof__(struct entry
),
1448 if (!mq_entry_cache
)
1451 r
= dm_cache_policy_register(&mq_policy_type
);
1453 DMERR("register failed %d", r
);
1454 kmem_cache_destroy(mq_entry_cache
);
1461 static void __exit
mq_exit(void)
1463 dm_cache_policy_unregister(&mq_policy_type
);
1465 kmem_cache_destroy(mq_entry_cache
);
1468 module_init(mq_init
);
1469 module_exit(mq_exit
);
1471 MODULE_AUTHOR("Joe Thornber <dm-devel@redhat.com>");
1472 MODULE_LICENSE("GPL");
1473 MODULE_DESCRIPTION("mq cache policy");