2 * Copyright (C) 2015 Red Hat. All rights reserved.
4 * This file is released under the GPL.
7 #include "dm-cache-background-tracker.h"
8 #include "dm-cache-policy-internal.h"
9 #include "dm-cache-policy.h"
12 #include <linux/hash.h>
13 #include <linux/jiffies.h>
14 #include <linux/module.h>
15 #include <linux/mutex.h>
16 #include <linux/vmalloc.h>
17 #include <linux/math64.h>
19 #define DM_MSG_PREFIX "cache-policy-smq"
21 /*----------------------------------------------------------------*/
24 * Safe division functions that return zero on divide by zero.
26 static unsigned safe_div(unsigned n
, unsigned d
)
28 return d
? n
/ d
: 0u;
31 static unsigned safe_mod(unsigned n
, unsigned d
)
33 return d
? n
% d
: 0u;
36 /*----------------------------------------------------------------*/
39 unsigned hash_next
:28;
51 /*----------------------------------------------------------------*/
53 #define INDEXER_NULL ((1u << 28u) - 1u)
56 * An entry_space manages a set of entries that we use for the queues.
57 * The clean and dirty queues share entries, so this object is separate
58 * from the queue itself.
65 static int space_init(struct entry_space
*es
, unsigned nr_entries
)
68 es
->begin
= es
->end
= NULL
;
72 es
->begin
= vzalloc(sizeof(struct entry
) * nr_entries
);
76 es
->end
= es
->begin
+ nr_entries
;
80 static void space_exit(struct entry_space
*es
)
85 static struct entry
*__get_entry(struct entry_space
*es
, unsigned block
)
89 e
= es
->begin
+ block
;
95 static unsigned to_index(struct entry_space
*es
, struct entry
*e
)
97 BUG_ON(e
< es
->begin
|| e
>= es
->end
);
101 static struct entry
*to_entry(struct entry_space
*es
, unsigned block
)
103 if (block
== INDEXER_NULL
)
106 return __get_entry(es
, block
);
109 /*----------------------------------------------------------------*/
112 unsigned nr_elts
; /* excluding sentinel entries */
116 static void l_init(struct ilist
*l
)
119 l
->head
= l
->tail
= INDEXER_NULL
;
122 static struct entry
*l_head(struct entry_space
*es
, struct ilist
*l
)
124 return to_entry(es
, l
->head
);
127 static struct entry
*l_tail(struct entry_space
*es
, struct ilist
*l
)
129 return to_entry(es
, l
->tail
);
132 static struct entry
*l_next(struct entry_space
*es
, struct entry
*e
)
134 return to_entry(es
, e
->next
);
137 static struct entry
*l_prev(struct entry_space
*es
, struct entry
*e
)
139 return to_entry(es
, e
->prev
);
142 static bool l_empty(struct ilist
*l
)
144 return l
->head
== INDEXER_NULL
;
147 static void l_add_head(struct entry_space
*es
, struct ilist
*l
, struct entry
*e
)
149 struct entry
*head
= l_head(es
, l
);
152 e
->prev
= INDEXER_NULL
;
155 head
->prev
= l
->head
= to_index(es
, e
);
157 l
->head
= l
->tail
= to_index(es
, e
);
163 static void l_add_tail(struct entry_space
*es
, struct ilist
*l
, struct entry
*e
)
165 struct entry
*tail
= l_tail(es
, l
);
167 e
->next
= INDEXER_NULL
;
171 tail
->next
= l
->tail
= to_index(es
, e
);
173 l
->head
= l
->tail
= to_index(es
, e
);
179 static void l_add_before(struct entry_space
*es
, struct ilist
*l
,
180 struct entry
*old
, struct entry
*e
)
182 struct entry
*prev
= l_prev(es
, old
);
185 l_add_head(es
, l
, e
);
189 e
->next
= to_index(es
, old
);
190 prev
->next
= old
->prev
= to_index(es
, e
);
197 static void l_del(struct entry_space
*es
, struct ilist
*l
, struct entry
*e
)
199 struct entry
*prev
= l_prev(es
, e
);
200 struct entry
*next
= l_next(es
, e
);
203 prev
->next
= e
->next
;
208 next
->prev
= e
->prev
;
216 static struct entry
*l_pop_head(struct entry_space
*es
, struct ilist
*l
)
220 for (e
= l_head(es
, l
); e
; e
= l_next(es
, e
))
229 static struct entry
*l_pop_tail(struct entry_space
*es
, struct ilist
*l
)
233 for (e
= l_tail(es
, l
); e
; e
= l_prev(es
, e
))
242 /*----------------------------------------------------------------*/
245 * The stochastic-multi-queue is a set of lru lists stacked into levels.
246 * Entries are moved up levels when they are used, which loosely orders the
247 * most accessed entries in the top levels and least in the bottom. This
248 * structure is *much* better than a single lru list.
250 #define MAX_LEVELS 64u
253 struct entry_space
*es
;
257 struct ilist qs
[MAX_LEVELS
];
260 * We maintain a count of the number of entries we would like in each
263 unsigned last_target_nr_elts
;
264 unsigned nr_top_levels
;
265 unsigned nr_in_top_levels
;
266 unsigned target_count
[MAX_LEVELS
];
269 static void q_init(struct queue
*q
, struct entry_space
*es
, unsigned nr_levels
)
275 q
->nr_levels
= nr_levels
;
277 for (i
= 0; i
< q
->nr_levels
; i
++) {
279 q
->target_count
[i
] = 0u;
282 q
->last_target_nr_elts
= 0u;
283 q
->nr_top_levels
= 0u;
284 q
->nr_in_top_levels
= 0u;
287 static unsigned q_size(struct queue
*q
)
293 * Insert an entry to the back of the given level.
295 static void q_push(struct queue
*q
, struct entry
*e
)
297 BUG_ON(e
->pending_work
);
302 l_add_tail(q
->es
, q
->qs
+ e
->level
, e
);
305 static void q_push_front(struct queue
*q
, struct entry
*e
)
307 BUG_ON(e
->pending_work
);
312 l_add_head(q
->es
, q
->qs
+ e
->level
, e
);
315 static void q_push_before(struct queue
*q
, struct entry
*old
, struct entry
*e
)
317 BUG_ON(e
->pending_work
);
322 l_add_before(q
->es
, q
->qs
+ e
->level
, old
, e
);
325 static void q_del(struct queue
*q
, struct entry
*e
)
327 l_del(q
->es
, q
->qs
+ e
->level
, e
);
333 * Return the oldest entry of the lowest populated level.
335 static struct entry
*q_peek(struct queue
*q
, unsigned max_level
, bool can_cross_sentinel
)
340 max_level
= min(max_level
, q
->nr_levels
);
342 for (level
= 0; level
< max_level
; level
++)
343 for (e
= l_head(q
->es
, q
->qs
+ level
); e
; e
= l_next(q
->es
, e
)) {
345 if (can_cross_sentinel
)
357 static struct entry
*q_pop(struct queue
*q
)
359 struct entry
*e
= q_peek(q
, q
->nr_levels
, true);
368 * This function assumes there is a non-sentinel entry to pop. It's only
369 * used by redistribute, so we know this is true. It also doesn't adjust
370 * the q->nr_elts count.
372 static struct entry
*__redist_pop_from(struct queue
*q
, unsigned level
)
376 for (; level
< q
->nr_levels
; level
++)
377 for (e
= l_head(q
->es
, q
->qs
+ level
); e
; e
= l_next(q
->es
, e
))
379 l_del(q
->es
, q
->qs
+ e
->level
, e
);
386 static void q_set_targets_subrange_(struct queue
*q
, unsigned nr_elts
, unsigned lbegin
, unsigned lend
)
388 unsigned level
, nr_levels
, entries_per_level
, remainder
;
390 BUG_ON(lbegin
> lend
);
391 BUG_ON(lend
> q
->nr_levels
);
392 nr_levels
= lend
- lbegin
;
393 entries_per_level
= safe_div(nr_elts
, nr_levels
);
394 remainder
= safe_mod(nr_elts
, nr_levels
);
396 for (level
= lbegin
; level
< lend
; level
++)
397 q
->target_count
[level
] =
398 (level
< (lbegin
+ remainder
)) ? entries_per_level
+ 1u : entries_per_level
;
402 * Typically we have fewer elements in the top few levels which allows us
403 * to adjust the promote threshold nicely.
405 static void q_set_targets(struct queue
*q
)
407 if (q
->last_target_nr_elts
== q
->nr_elts
)
410 q
->last_target_nr_elts
= q
->nr_elts
;
412 if (q
->nr_top_levels
> q
->nr_levels
)
413 q_set_targets_subrange_(q
, q
->nr_elts
, 0, q
->nr_levels
);
416 q_set_targets_subrange_(q
, q
->nr_in_top_levels
,
417 q
->nr_levels
- q
->nr_top_levels
, q
->nr_levels
);
419 if (q
->nr_in_top_levels
< q
->nr_elts
)
420 q_set_targets_subrange_(q
, q
->nr_elts
- q
->nr_in_top_levels
,
421 0, q
->nr_levels
- q
->nr_top_levels
);
423 q_set_targets_subrange_(q
, 0, 0, q
->nr_levels
- q
->nr_top_levels
);
427 static void q_redistribute(struct queue
*q
)
429 unsigned target
, level
;
430 struct ilist
*l
, *l_above
;
435 for (level
= 0u; level
< q
->nr_levels
- 1u; level
++) {
437 target
= q
->target_count
[level
];
440 * Pull down some entries from the level above.
442 while (l
->nr_elts
< target
) {
443 e
= __redist_pop_from(q
, level
+ 1u);
450 l_add_tail(q
->es
, l
, e
);
454 * Push some entries up.
456 l_above
= q
->qs
+ level
+ 1u;
457 while (l
->nr_elts
> target
) {
458 e
= l_pop_tail(q
->es
, l
);
464 e
->level
= level
+ 1u;
465 l_add_tail(q
->es
, l_above
, e
);
470 static void q_requeue(struct queue
*q
, struct entry
*e
, unsigned extra_levels
,
471 struct entry
*s1
, struct entry
*s2
)
474 unsigned sentinels_passed
= 0;
475 unsigned new_level
= min(q
->nr_levels
- 1u, e
->level
+ extra_levels
);
477 /* try and find an entry to swap with */
478 if (extra_levels
&& (e
->level
< q
->nr_levels
- 1u)) {
479 for (de
= l_head(q
->es
, q
->qs
+ new_level
); de
&& de
->sentinel
; de
= l_next(q
->es
, de
))
484 de
->level
= e
->level
;
486 switch (sentinels_passed
) {
488 q_push_before(q
, s1
, de
);
492 q_push_before(q
, s2
, de
);
504 e
->level
= new_level
;
508 /*----------------------------------------------------------------*/
511 #define SIXTEENTH (1u << (FP_SHIFT - 4u))
512 #define EIGHTH (1u << (FP_SHIFT - 3u))
515 unsigned hit_threshold
;
526 static void stats_init(struct stats
*s
, unsigned nr_levels
)
528 s
->hit_threshold
= (nr_levels
* 3u) / 4u;
533 static void stats_reset(struct stats
*s
)
535 s
->hits
= s
->misses
= 0u;
538 static void stats_level_accessed(struct stats
*s
, unsigned level
)
540 if (level
>= s
->hit_threshold
)
546 static void stats_miss(struct stats
*s
)
552 * There are times when we don't have any confidence in the hotspot queue.
553 * Such as when a fresh cache is created and the blocks have been spread
554 * out across the levels, or if an io load changes. We detect this by
555 * seeing how often a lookup is in the top levels of the hotspot queue.
557 static enum performance
stats_assess(struct stats
*s
)
559 unsigned confidence
= safe_div(s
->hits
<< FP_SHIFT
, s
->hits
+ s
->misses
);
561 if (confidence
< SIXTEENTH
)
564 else if (confidence
< EIGHTH
)
571 /*----------------------------------------------------------------*/
573 struct smq_hash_table
{
574 struct entry_space
*es
;
575 unsigned long long hash_bits
;
580 * All cache entries are stored in a chained hash table. To save space we
581 * use indexing again, and only store indexes to the next entry.
583 static int h_init(struct smq_hash_table
*ht
, struct entry_space
*es
, unsigned nr_entries
)
585 unsigned i
, nr_buckets
;
588 nr_buckets
= roundup_pow_of_two(max(nr_entries
/ 4u, 16u));
589 ht
->hash_bits
= __ffs(nr_buckets
);
591 ht
->buckets
= vmalloc(sizeof(*ht
->buckets
) * nr_buckets
);
595 for (i
= 0; i
< nr_buckets
; i
++)
596 ht
->buckets
[i
] = INDEXER_NULL
;
601 static void h_exit(struct smq_hash_table
*ht
)
606 static struct entry
*h_head(struct smq_hash_table
*ht
, unsigned bucket
)
608 return to_entry(ht
->es
, ht
->buckets
[bucket
]);
611 static struct entry
*h_next(struct smq_hash_table
*ht
, struct entry
*e
)
613 return to_entry(ht
->es
, e
->hash_next
);
616 static void __h_insert(struct smq_hash_table
*ht
, unsigned bucket
, struct entry
*e
)
618 e
->hash_next
= ht
->buckets
[bucket
];
619 ht
->buckets
[bucket
] = to_index(ht
->es
, e
);
622 static void h_insert(struct smq_hash_table
*ht
, struct entry
*e
)
624 unsigned h
= hash_64(from_oblock(e
->oblock
), ht
->hash_bits
);
625 __h_insert(ht
, h
, e
);
628 static struct entry
*__h_lookup(struct smq_hash_table
*ht
, unsigned h
, dm_oblock_t oblock
,
634 for (e
= h_head(ht
, h
); e
; e
= h_next(ht
, e
)) {
635 if (e
->oblock
== oblock
)
644 static void __h_unlink(struct smq_hash_table
*ht
, unsigned h
,
645 struct entry
*e
, struct entry
*prev
)
648 prev
->hash_next
= e
->hash_next
;
650 ht
->buckets
[h
] = e
->hash_next
;
654 * Also moves each entry to the front of the bucket.
656 static struct entry
*h_lookup(struct smq_hash_table
*ht
, dm_oblock_t oblock
)
658 struct entry
*e
, *prev
;
659 unsigned h
= hash_64(from_oblock(oblock
), ht
->hash_bits
);
661 e
= __h_lookup(ht
, h
, oblock
, &prev
);
664 * Move to the front because this entry is likely
667 __h_unlink(ht
, h
, e
, prev
);
668 __h_insert(ht
, h
, e
);
674 static void h_remove(struct smq_hash_table
*ht
, struct entry
*e
)
676 unsigned h
= hash_64(from_oblock(e
->oblock
), ht
->hash_bits
);
680 * The down side of using a singly linked list is we have to
681 * iterate the bucket to remove an item.
683 e
= __h_lookup(ht
, h
, e
->oblock
, &prev
);
685 __h_unlink(ht
, h
, e
, prev
);
688 /*----------------------------------------------------------------*/
691 struct entry_space
*es
;
694 unsigned nr_allocated
;
698 static void init_allocator(struct entry_alloc
*ea
, struct entry_space
*es
,
699 unsigned begin
, unsigned end
)
704 ea
->nr_allocated
= 0u;
708 for (i
= begin
; i
!= end
; i
++)
709 l_add_tail(ea
->es
, &ea
->free
, __get_entry(ea
->es
, i
));
712 static void init_entry(struct entry
*e
)
715 * We can't memset because that would clear the hotspot and
716 * sentinel bits which remain constant.
718 e
->hash_next
= INDEXER_NULL
;
719 e
->next
= INDEXER_NULL
;
720 e
->prev
= INDEXER_NULL
;
722 e
->dirty
= true; /* FIXME: audit */
725 e
->pending_work
= false;
728 static struct entry
*alloc_entry(struct entry_alloc
*ea
)
732 if (l_empty(&ea
->free
))
735 e
= l_pop_head(ea
->es
, &ea
->free
);
743 * This assumes the cblock hasn't already been allocated.
745 static struct entry
*alloc_particular_entry(struct entry_alloc
*ea
, unsigned i
)
747 struct entry
*e
= __get_entry(ea
->es
, ea
->begin
+ i
);
749 BUG_ON(e
->allocated
);
751 l_del(ea
->es
, &ea
->free
, e
);
758 static void free_entry(struct entry_alloc
*ea
, struct entry
*e
)
760 BUG_ON(!ea
->nr_allocated
);
761 BUG_ON(!e
->allocated
);
764 e
->allocated
= false;
765 l_add_tail(ea
->es
, &ea
->free
, e
);
768 static bool allocator_empty(struct entry_alloc
*ea
)
770 return l_empty(&ea
->free
);
773 static unsigned get_index(struct entry_alloc
*ea
, struct entry
*e
)
775 return to_index(ea
->es
, e
) - ea
->begin
;
778 static struct entry
*get_entry(struct entry_alloc
*ea
, unsigned index
)
780 return __get_entry(ea
->es
, ea
->begin
+ index
);
783 /*----------------------------------------------------------------*/
785 #define NR_HOTSPOT_LEVELS 64u
786 #define NR_CACHE_LEVELS 64u
788 #define WRITEBACK_PERIOD (10ul * HZ)
789 #define DEMOTE_PERIOD (60ul * HZ)
791 #define HOTSPOT_UPDATE_PERIOD (HZ)
792 #define CACHE_UPDATE_PERIOD (60ul * HZ)
795 struct dm_cache_policy policy
;
797 /* protects everything */
799 dm_cblock_t cache_size
;
800 sector_t cache_block_size
;
802 sector_t hotspot_block_size
;
803 unsigned nr_hotspot_blocks
;
804 unsigned cache_blocks_per_hotspot_block
;
805 unsigned hotspot_level_jump
;
807 struct entry_space es
;
808 struct entry_alloc writeback_sentinel_alloc
;
809 struct entry_alloc demote_sentinel_alloc
;
810 struct entry_alloc hotspot_alloc
;
811 struct entry_alloc cache_alloc
;
813 unsigned long *hotspot_hit_bits
;
814 unsigned long *cache_hit_bits
;
817 * We maintain three queues of entries. The cache proper,
818 * consisting of a clean and dirty queue, containing the currently
819 * active mappings. The hotspot queue uses a larger block size to
820 * track blocks that are being hit frequently and potential
821 * candidates for promotion to the cache.
823 struct queue hotspot
;
827 struct stats hotspot_stats
;
828 struct stats cache_stats
;
831 * Keeps track of time, incremented by the core. We use this to
832 * avoid attributing multiple hits within the same tick.
837 * The hash tables allows us to quickly find an entry by origin
840 struct smq_hash_table table
;
841 struct smq_hash_table hotspot_table
;
843 bool current_writeback_sentinels
;
844 unsigned long next_writeback_period
;
846 bool current_demote_sentinels
;
847 unsigned long next_demote_period
;
849 unsigned write_promote_level
;
850 unsigned read_promote_level
;
852 unsigned long next_hotspot_period
;
853 unsigned long next_cache_period
;
855 struct background_tracker
*bg_work
;
857 bool migrations_allowed
;
860 /*----------------------------------------------------------------*/
862 static struct entry
*get_sentinel(struct entry_alloc
*ea
, unsigned level
, bool which
)
864 return get_entry(ea
, which
? level
: NR_CACHE_LEVELS
+ level
);
867 static struct entry
*writeback_sentinel(struct smq_policy
*mq
, unsigned level
)
869 return get_sentinel(&mq
->writeback_sentinel_alloc
, level
, mq
->current_writeback_sentinels
);
872 static struct entry
*demote_sentinel(struct smq_policy
*mq
, unsigned level
)
874 return get_sentinel(&mq
->demote_sentinel_alloc
, level
, mq
->current_demote_sentinels
);
877 static void __update_writeback_sentinels(struct smq_policy
*mq
)
880 struct queue
*q
= &mq
->dirty
;
881 struct entry
*sentinel
;
883 for (level
= 0; level
< q
->nr_levels
; level
++) {
884 sentinel
= writeback_sentinel(mq
, level
);
890 static void __update_demote_sentinels(struct smq_policy
*mq
)
893 struct queue
*q
= &mq
->clean
;
894 struct entry
*sentinel
;
896 for (level
= 0; level
< q
->nr_levels
; level
++) {
897 sentinel
= demote_sentinel(mq
, level
);
903 static void update_sentinels(struct smq_policy
*mq
)
905 if (time_after(jiffies
, mq
->next_writeback_period
)) {
906 mq
->next_writeback_period
= jiffies
+ WRITEBACK_PERIOD
;
907 mq
->current_writeback_sentinels
= !mq
->current_writeback_sentinels
;
908 __update_writeback_sentinels(mq
);
911 if (time_after(jiffies
, mq
->next_demote_period
)) {
912 mq
->next_demote_period
= jiffies
+ DEMOTE_PERIOD
;
913 mq
->current_demote_sentinels
= !mq
->current_demote_sentinels
;
914 __update_demote_sentinels(mq
);
918 static void __sentinels_init(struct smq_policy
*mq
)
921 struct entry
*sentinel
;
923 for (level
= 0; level
< NR_CACHE_LEVELS
; level
++) {
924 sentinel
= writeback_sentinel(mq
, level
);
925 sentinel
->level
= level
;
926 q_push(&mq
->dirty
, sentinel
);
928 sentinel
= demote_sentinel(mq
, level
);
929 sentinel
->level
= level
;
930 q_push(&mq
->clean
, sentinel
);
934 static void sentinels_init(struct smq_policy
*mq
)
936 mq
->next_writeback_period
= jiffies
+ WRITEBACK_PERIOD
;
937 mq
->next_demote_period
= jiffies
+ DEMOTE_PERIOD
;
939 mq
->current_writeback_sentinels
= false;
940 mq
->current_demote_sentinels
= false;
941 __sentinels_init(mq
);
943 mq
->current_writeback_sentinels
= !mq
->current_writeback_sentinels
;
944 mq
->current_demote_sentinels
= !mq
->current_demote_sentinels
;
945 __sentinels_init(mq
);
948 /*----------------------------------------------------------------*/
950 static void del_queue(struct smq_policy
*mq
, struct entry
*e
)
952 q_del(e
->dirty
? &mq
->dirty
: &mq
->clean
, e
);
955 static void push_queue(struct smq_policy
*mq
, struct entry
*e
)
958 q_push(&mq
->dirty
, e
);
960 q_push(&mq
->clean
, e
);
963 // !h, !q, a -> h, q, a
964 static void push(struct smq_policy
*mq
, struct entry
*e
)
966 h_insert(&mq
->table
, e
);
967 if (!e
->pending_work
)
971 static void push_queue_front(struct smq_policy
*mq
, struct entry
*e
)
974 q_push_front(&mq
->dirty
, e
);
976 q_push_front(&mq
->clean
, e
);
979 static void push_front(struct smq_policy
*mq
, struct entry
*e
)
981 h_insert(&mq
->table
, e
);
982 if (!e
->pending_work
)
983 push_queue_front(mq
, e
);
986 static dm_cblock_t
infer_cblock(struct smq_policy
*mq
, struct entry
*e
)
988 return to_cblock(get_index(&mq
->cache_alloc
, e
));
991 static void requeue(struct smq_policy
*mq
, struct entry
*e
)
994 * Pending work has temporarily been taken out of the queues.
999 if (!test_and_set_bit(from_cblock(infer_cblock(mq
, e
)), mq
->cache_hit_bits
)) {
1001 q_requeue(&mq
->clean
, e
, 1u, NULL
, NULL
);
1005 q_requeue(&mq
->dirty
, e
, 1u,
1006 get_sentinel(&mq
->writeback_sentinel_alloc
, e
->level
, !mq
->current_writeback_sentinels
),
1007 get_sentinel(&mq
->writeback_sentinel_alloc
, e
->level
, mq
->current_writeback_sentinels
));
1011 static unsigned default_promote_level(struct smq_policy
*mq
)
1014 * The promote level depends on the current performance of the
1017 * If the cache is performing badly, then we can't afford
1018 * to promote much without causing performance to drop below that
1019 * of the origin device.
1021 * If the cache is performing well, then we don't need to promote
1022 * much. If it isn't broken, don't fix it.
1024 * If the cache is middling then we promote more.
1026 * This scheme reminds me of a graph of entropy vs probability of a
1029 static unsigned table
[] = {1, 1, 1, 2, 4, 6, 7, 8, 7, 6, 4, 4, 3, 3, 2, 2, 1};
1031 unsigned hits
= mq
->cache_stats
.hits
;
1032 unsigned misses
= mq
->cache_stats
.misses
;
1033 unsigned index
= safe_div(hits
<< 4u, hits
+ misses
);
1034 return table
[index
];
1037 static void update_promote_levels(struct smq_policy
*mq
)
1040 * If there are unused cache entries then we want to be really
1043 unsigned threshold_level
= allocator_empty(&mq
->cache_alloc
) ?
1044 default_promote_level(mq
) : (NR_HOTSPOT_LEVELS
/ 2u);
1046 threshold_level
= max(threshold_level
, NR_HOTSPOT_LEVELS
);
1049 * If the hotspot queue is performing badly then we have little
1050 * confidence that we know which blocks to promote. So we cut down
1051 * the amount of promotions.
1053 switch (stats_assess(&mq
->hotspot_stats
)) {
1055 threshold_level
/= 4u;
1059 threshold_level
/= 2u;
1066 mq
->read_promote_level
= NR_HOTSPOT_LEVELS
- threshold_level
;
1067 mq
->write_promote_level
= (NR_HOTSPOT_LEVELS
- threshold_level
);
1071 * If the hotspot queue is performing badly, then we try and move entries
1072 * around more quickly.
1074 static void update_level_jump(struct smq_policy
*mq
)
1076 switch (stats_assess(&mq
->hotspot_stats
)) {
1078 mq
->hotspot_level_jump
= 4u;
1082 mq
->hotspot_level_jump
= 2u;
1086 mq
->hotspot_level_jump
= 1u;
1091 static void end_hotspot_period(struct smq_policy
*mq
)
1093 clear_bitset(mq
->hotspot_hit_bits
, mq
->nr_hotspot_blocks
);
1094 update_promote_levels(mq
);
1096 if (time_after(jiffies
, mq
->next_hotspot_period
)) {
1097 update_level_jump(mq
);
1098 q_redistribute(&mq
->hotspot
);
1099 stats_reset(&mq
->hotspot_stats
);
1100 mq
->next_hotspot_period
= jiffies
+ HOTSPOT_UPDATE_PERIOD
;
1104 static void end_cache_period(struct smq_policy
*mq
)
1106 if (time_after(jiffies
, mq
->next_cache_period
)) {
1107 clear_bitset(mq
->cache_hit_bits
, from_cblock(mq
->cache_size
));
1109 q_redistribute(&mq
->dirty
);
1110 q_redistribute(&mq
->clean
);
1111 stats_reset(&mq
->cache_stats
);
1113 mq
->next_cache_period
= jiffies
+ CACHE_UPDATE_PERIOD
;
1117 /*----------------------------------------------------------------*/
1120 * Targets are given as a percentage.
1122 #define CLEAN_TARGET 25u
1123 #define FREE_TARGET 25u
1125 static unsigned percent_to_target(struct smq_policy
*mq
, unsigned p
)
1127 return from_cblock(mq
->cache_size
) * p
/ 100u;
1130 static bool clean_target_met(struct smq_policy
*mq
, bool idle
)
1133 * Cache entries may not be populated. So we cannot rely on the
1134 * size of the clean queue.
1138 * We'd like to clean everything.
1140 return q_size(&mq
->dirty
) == 0u;
1144 * If we're busy we don't worry about cleaning at all.
1149 static bool free_target_met(struct smq_policy
*mq
)
1153 nr_free
= from_cblock(mq
->cache_size
) - mq
->cache_alloc
.nr_allocated
;
1154 return (nr_free
+ btracker_nr_demotions_queued(mq
->bg_work
)) >=
1155 percent_to_target(mq
, FREE_TARGET
);
1158 /*----------------------------------------------------------------*/
1160 static void mark_pending(struct smq_policy
*mq
, struct entry
*e
)
1162 BUG_ON(e
->sentinel
);
1163 BUG_ON(!e
->allocated
);
1164 BUG_ON(e
->pending_work
);
1165 e
->pending_work
= true;
1168 static void clear_pending(struct smq_policy
*mq
, struct entry
*e
)
1170 BUG_ON(!e
->pending_work
);
1171 e
->pending_work
= false;
1174 static void queue_writeback(struct smq_policy
*mq
, bool idle
)
1177 struct policy_work work
;
1180 e
= q_peek(&mq
->dirty
, mq
->dirty
.nr_levels
, idle
);
1182 mark_pending(mq
, e
);
1183 q_del(&mq
->dirty
, e
);
1185 work
.op
= POLICY_WRITEBACK
;
1186 work
.oblock
= e
->oblock
;
1187 work
.cblock
= infer_cblock(mq
, e
);
1189 r
= btracker_queue(mq
->bg_work
, &work
, NULL
);
1191 clear_pending(mq
, e
);
1192 q_push_front(&mq
->dirty
, e
);
1197 static void queue_demotion(struct smq_policy
*mq
)
1200 struct policy_work work
;
1203 if (unlikely(WARN_ON_ONCE(!mq
->migrations_allowed
)))
1206 e
= q_peek(&mq
->clean
, mq
->clean
.nr_levels
/ 2, true);
1208 if (!clean_target_met(mq
, true))
1209 queue_writeback(mq
, false);
1213 mark_pending(mq
, e
);
1214 q_del(&mq
->clean
, e
);
1216 work
.op
= POLICY_DEMOTE
;
1217 work
.oblock
= e
->oblock
;
1218 work
.cblock
= infer_cblock(mq
, e
);
1219 r
= btracker_queue(mq
->bg_work
, &work
, NULL
);
1221 clear_pending(mq
, e
);
1222 q_push_front(&mq
->clean
, e
);
1226 static void queue_promotion(struct smq_policy
*mq
, dm_oblock_t oblock
,
1227 struct policy_work
**workp
)
1231 struct policy_work work
;
1233 if (!mq
->migrations_allowed
)
1236 if (allocator_empty(&mq
->cache_alloc
)) {
1238 * We always claim to be 'idle' to ensure some demotions happen
1239 * with continuous loads.
1241 if (!free_target_met(mq
))
1246 if (btracker_promotion_already_present(mq
->bg_work
, oblock
))
1250 * We allocate the entry now to reserve the cblock. If the
1251 * background work is aborted we must remember to free it.
1253 e
= alloc_entry(&mq
->cache_alloc
);
1255 e
->pending_work
= true;
1256 work
.op
= POLICY_PROMOTE
;
1257 work
.oblock
= oblock
;
1258 work
.cblock
= infer_cblock(mq
, e
);
1259 r
= btracker_queue(mq
->bg_work
, &work
, workp
);
1261 free_entry(&mq
->cache_alloc
, e
);
1264 /*----------------------------------------------------------------*/
1266 enum promote_result
{
1273 * Converts a boolean into a promote result.
1275 static enum promote_result
maybe_promote(bool promote
)
1277 return promote
? PROMOTE_PERMANENT
: PROMOTE_NOT
;
1280 static enum promote_result
should_promote(struct smq_policy
*mq
, struct entry
*hs_e
,
1281 int data_dir
, bool fast_promote
)
1283 if (data_dir
== WRITE
) {
1284 if (!allocator_empty(&mq
->cache_alloc
) && fast_promote
)
1285 return PROMOTE_TEMPORARY
;
1287 return maybe_promote(hs_e
->level
>= mq
->write_promote_level
);
1289 return maybe_promote(hs_e
->level
>= mq
->read_promote_level
);
1292 static dm_oblock_t
to_hblock(struct smq_policy
*mq
, dm_oblock_t b
)
1294 sector_t r
= from_oblock(b
);
1295 (void) sector_div(r
, mq
->cache_blocks_per_hotspot_block
);
1296 return to_oblock(r
);
1299 static struct entry
*update_hotspot_queue(struct smq_policy
*mq
, dm_oblock_t b
)
1302 dm_oblock_t hb
= to_hblock(mq
, b
);
1303 struct entry
*e
= h_lookup(&mq
->hotspot_table
, hb
);
1306 stats_level_accessed(&mq
->hotspot_stats
, e
->level
);
1308 hi
= get_index(&mq
->hotspot_alloc
, e
);
1309 q_requeue(&mq
->hotspot
, e
,
1310 test_and_set_bit(hi
, mq
->hotspot_hit_bits
) ?
1311 0u : mq
->hotspot_level_jump
,
1315 stats_miss(&mq
->hotspot_stats
);
1317 e
= alloc_entry(&mq
->hotspot_alloc
);
1319 e
= q_pop(&mq
->hotspot
);
1321 h_remove(&mq
->hotspot_table
, e
);
1322 hi
= get_index(&mq
->hotspot_alloc
, e
);
1323 clear_bit(hi
, mq
->hotspot_hit_bits
);
1330 q_push(&mq
->hotspot
, e
);
1331 h_insert(&mq
->hotspot_table
, e
);
1338 /*----------------------------------------------------------------*/
1341 * Public interface, via the policy struct. See dm-cache-policy.h for a
1342 * description of these.
1345 static struct smq_policy
*to_smq_policy(struct dm_cache_policy
*p
)
1347 return container_of(p
, struct smq_policy
, policy
);
1350 static void smq_destroy(struct dm_cache_policy
*p
)
1352 struct smq_policy
*mq
= to_smq_policy(p
);
1354 btracker_destroy(mq
->bg_work
);
1355 h_exit(&mq
->hotspot_table
);
1357 free_bitset(mq
->hotspot_hit_bits
);
1358 free_bitset(mq
->cache_hit_bits
);
1359 space_exit(&mq
->es
);
1363 /*----------------------------------------------------------------*/
1365 static int __lookup(struct smq_policy
*mq
, dm_oblock_t oblock
, dm_cblock_t
*cblock
,
1366 int data_dir
, bool fast_copy
,
1367 struct policy_work
**work
, bool *background_work
)
1369 struct entry
*e
, *hs_e
;
1370 enum promote_result pr
;
1372 *background_work
= false;
1374 e
= h_lookup(&mq
->table
, oblock
);
1376 stats_level_accessed(&mq
->cache_stats
, e
->level
);
1379 *cblock
= infer_cblock(mq
, e
);
1383 stats_miss(&mq
->cache_stats
);
1386 * The hotspot queue only gets updated with misses.
1388 hs_e
= update_hotspot_queue(mq
, oblock
);
1390 pr
= should_promote(mq
, hs_e
, data_dir
, fast_copy
);
1391 if (pr
!= PROMOTE_NOT
) {
1392 queue_promotion(mq
, oblock
, work
);
1393 *background_work
= true;
1400 static int smq_lookup(struct dm_cache_policy
*p
, dm_oblock_t oblock
, dm_cblock_t
*cblock
,
1401 int data_dir
, bool fast_copy
,
1402 bool *background_work
)
1405 unsigned long flags
;
1406 struct smq_policy
*mq
= to_smq_policy(p
);
1408 spin_lock_irqsave(&mq
->lock
, flags
);
1409 r
= __lookup(mq
, oblock
, cblock
,
1410 data_dir
, fast_copy
,
1411 NULL
, background_work
);
1412 spin_unlock_irqrestore(&mq
->lock
, flags
);
1417 static int smq_lookup_with_work(struct dm_cache_policy
*p
,
1418 dm_oblock_t oblock
, dm_cblock_t
*cblock
,
1419 int data_dir
, bool fast_copy
,
1420 struct policy_work
**work
)
1423 bool background_queued
;
1424 unsigned long flags
;
1425 struct smq_policy
*mq
= to_smq_policy(p
);
1427 spin_lock_irqsave(&mq
->lock
, flags
);
1428 r
= __lookup(mq
, oblock
, cblock
, data_dir
, fast_copy
, work
, &background_queued
);
1429 spin_unlock_irqrestore(&mq
->lock
, flags
);
1434 static int smq_get_background_work(struct dm_cache_policy
*p
, bool idle
,
1435 struct policy_work
**result
)
1438 unsigned long flags
;
1439 struct smq_policy
*mq
= to_smq_policy(p
);
1441 spin_lock_irqsave(&mq
->lock
, flags
);
1442 r
= btracker_issue(mq
->bg_work
, result
);
1443 if (r
== -ENODATA
) {
1444 if (!clean_target_met(mq
, idle
)) {
1445 queue_writeback(mq
, idle
);
1446 r
= btracker_issue(mq
->bg_work
, result
);
1449 spin_unlock_irqrestore(&mq
->lock
, flags
);
1455 * We need to clear any pending work flags that have been set, and in the
1456 * case of promotion free the entry for the destination cblock.
1458 static void __complete_background_work(struct smq_policy
*mq
,
1459 struct policy_work
*work
,
1462 struct entry
*e
= get_entry(&mq
->cache_alloc
,
1463 from_cblock(work
->cblock
));
1466 case POLICY_PROMOTE
:
1468 clear_pending(mq
, e
);
1470 e
->oblock
= work
->oblock
;
1471 e
->level
= NR_CACHE_LEVELS
- 1;
1475 free_entry(&mq
->cache_alloc
, e
);
1483 h_remove(&mq
->table
, e
);
1484 free_entry(&mq
->cache_alloc
, e
);
1487 clear_pending(mq
, e
);
1493 case POLICY_WRITEBACK
:
1495 clear_pending(mq
, e
);
1501 btracker_complete(mq
->bg_work
, work
);
1504 static void smq_complete_background_work(struct dm_cache_policy
*p
,
1505 struct policy_work
*work
,
1508 unsigned long flags
;
1509 struct smq_policy
*mq
= to_smq_policy(p
);
1511 spin_lock_irqsave(&mq
->lock
, flags
);
1512 __complete_background_work(mq
, work
, success
);
1513 spin_unlock_irqrestore(&mq
->lock
, flags
);
1516 // in_hash(oblock) -> in_hash(oblock)
1517 static void __smq_set_clear_dirty(struct smq_policy
*mq
, dm_cblock_t cblock
, bool set
)
1519 struct entry
*e
= get_entry(&mq
->cache_alloc
, from_cblock(cblock
));
1521 if (e
->pending_work
)
1530 static void smq_set_dirty(struct dm_cache_policy
*p
, dm_cblock_t cblock
)
1532 unsigned long flags
;
1533 struct smq_policy
*mq
= to_smq_policy(p
);
1535 spin_lock_irqsave(&mq
->lock
, flags
);
1536 __smq_set_clear_dirty(mq
, cblock
, true);
1537 spin_unlock_irqrestore(&mq
->lock
, flags
);
1540 static void smq_clear_dirty(struct dm_cache_policy
*p
, dm_cblock_t cblock
)
1542 struct smq_policy
*mq
= to_smq_policy(p
);
1543 unsigned long flags
;
1545 spin_lock_irqsave(&mq
->lock
, flags
);
1546 __smq_set_clear_dirty(mq
, cblock
, false);
1547 spin_unlock_irqrestore(&mq
->lock
, flags
);
1550 static unsigned random_level(dm_cblock_t cblock
)
1552 return hash_32(from_cblock(cblock
), 9) & (NR_CACHE_LEVELS
- 1);
1555 static int smq_load_mapping(struct dm_cache_policy
*p
,
1556 dm_oblock_t oblock
, dm_cblock_t cblock
,
1557 bool dirty
, uint32_t hint
, bool hint_valid
)
1559 struct smq_policy
*mq
= to_smq_policy(p
);
1562 e
= alloc_particular_entry(&mq
->cache_alloc
, from_cblock(cblock
));
1565 e
->level
= hint_valid
? min(hint
, NR_CACHE_LEVELS
- 1) : random_level(cblock
);
1566 e
->pending_work
= false;
1569 * When we load mappings we push ahead of both sentinels in order to
1570 * allow demotions and cleaning to occur immediately.
1577 static int smq_invalidate_mapping(struct dm_cache_policy
*p
, dm_cblock_t cblock
)
1579 struct smq_policy
*mq
= to_smq_policy(p
);
1580 struct entry
*e
= get_entry(&mq
->cache_alloc
, from_cblock(cblock
));
1585 // FIXME: what if this block has pending background work?
1587 h_remove(&mq
->table
, e
);
1588 free_entry(&mq
->cache_alloc
, e
);
1592 static uint32_t smq_get_hint(struct dm_cache_policy
*p
, dm_cblock_t cblock
)
1594 struct smq_policy
*mq
= to_smq_policy(p
);
1595 struct entry
*e
= get_entry(&mq
->cache_alloc
, from_cblock(cblock
));
1603 static dm_cblock_t
smq_residency(struct dm_cache_policy
*p
)
1606 unsigned long flags
;
1607 struct smq_policy
*mq
= to_smq_policy(p
);
1609 spin_lock_irqsave(&mq
->lock
, flags
);
1610 r
= to_cblock(mq
->cache_alloc
.nr_allocated
);
1611 spin_unlock_irqrestore(&mq
->lock
, flags
);
1616 static void smq_tick(struct dm_cache_policy
*p
, bool can_block
)
1618 struct smq_policy
*mq
= to_smq_policy(p
);
1619 unsigned long flags
;
1621 spin_lock_irqsave(&mq
->lock
, flags
);
1623 update_sentinels(mq
);
1624 end_hotspot_period(mq
);
1625 end_cache_period(mq
);
1626 spin_unlock_irqrestore(&mq
->lock
, flags
);
1629 static void smq_allow_migrations(struct dm_cache_policy
*p
, bool allow
)
1631 struct smq_policy
*mq
= to_smq_policy(p
);
1632 mq
->migrations_allowed
= allow
;
1636 * smq has no config values, but the old mq policy did. To avoid breaking
1637 * software we continue to accept these configurables for the mq policy,
1638 * but they have no effect.
1640 static int mq_set_config_value(struct dm_cache_policy
*p
,
1641 const char *key
, const char *value
)
1645 if (kstrtoul(value
, 10, &tmp
))
1648 if (!strcasecmp(key
, "random_threshold") ||
1649 !strcasecmp(key
, "sequential_threshold") ||
1650 !strcasecmp(key
, "discard_promote_adjustment") ||
1651 !strcasecmp(key
, "read_promote_adjustment") ||
1652 !strcasecmp(key
, "write_promote_adjustment")) {
1653 DMWARN("tunable '%s' no longer has any effect, mq policy is now an alias for smq", key
);
1660 static int mq_emit_config_values(struct dm_cache_policy
*p
, char *result
,
1661 unsigned maxlen
, ssize_t
*sz_ptr
)
1663 ssize_t sz
= *sz_ptr
;
1665 DMEMIT("10 random_threshold 0 "
1666 "sequential_threshold 0 "
1667 "discard_promote_adjustment 0 "
1668 "read_promote_adjustment 0 "
1669 "write_promote_adjustment 0 ");
1675 /* Init the policy plugin interface function pointers. */
1676 static void init_policy_functions(struct smq_policy
*mq
, bool mimic_mq
)
1678 mq
->policy
.destroy
= smq_destroy
;
1679 mq
->policy
.lookup
= smq_lookup
;
1680 mq
->policy
.lookup_with_work
= smq_lookup_with_work
;
1681 mq
->policy
.get_background_work
= smq_get_background_work
;
1682 mq
->policy
.complete_background_work
= smq_complete_background_work
;
1683 mq
->policy
.set_dirty
= smq_set_dirty
;
1684 mq
->policy
.clear_dirty
= smq_clear_dirty
;
1685 mq
->policy
.load_mapping
= smq_load_mapping
;
1686 mq
->policy
.invalidate_mapping
= smq_invalidate_mapping
;
1687 mq
->policy
.get_hint
= smq_get_hint
;
1688 mq
->policy
.residency
= smq_residency
;
1689 mq
->policy
.tick
= smq_tick
;
1690 mq
->policy
.allow_migrations
= smq_allow_migrations
;
1693 mq
->policy
.set_config_value
= mq_set_config_value
;
1694 mq
->policy
.emit_config_values
= mq_emit_config_values
;
1698 static bool too_many_hotspot_blocks(sector_t origin_size
,
1699 sector_t hotspot_block_size
,
1700 unsigned nr_hotspot_blocks
)
1702 return (hotspot_block_size
* nr_hotspot_blocks
) > origin_size
;
1705 static void calc_hotspot_params(sector_t origin_size
,
1706 sector_t cache_block_size
,
1707 unsigned nr_cache_blocks
,
1708 sector_t
*hotspot_block_size
,
1709 unsigned *nr_hotspot_blocks
)
1711 *hotspot_block_size
= cache_block_size
* 16u;
1712 *nr_hotspot_blocks
= max(nr_cache_blocks
/ 4u, 1024u);
1714 while ((*hotspot_block_size
> cache_block_size
) &&
1715 too_many_hotspot_blocks(origin_size
, *hotspot_block_size
, *nr_hotspot_blocks
))
1716 *hotspot_block_size
/= 2u;
1719 static struct dm_cache_policy
*__smq_create(dm_cblock_t cache_size
,
1720 sector_t origin_size
,
1721 sector_t cache_block_size
,
1723 bool migrations_allowed
)
1726 unsigned nr_sentinels_per_queue
= 2u * NR_CACHE_LEVELS
;
1727 unsigned total_sentinels
= 2u * nr_sentinels_per_queue
;
1728 struct smq_policy
*mq
= kzalloc(sizeof(*mq
), GFP_KERNEL
);
1733 init_policy_functions(mq
, mimic_mq
);
1734 mq
->cache_size
= cache_size
;
1735 mq
->cache_block_size
= cache_block_size
;
1737 calc_hotspot_params(origin_size
, cache_block_size
, from_cblock(cache_size
),
1738 &mq
->hotspot_block_size
, &mq
->nr_hotspot_blocks
);
1740 mq
->cache_blocks_per_hotspot_block
= div64_u64(mq
->hotspot_block_size
, mq
->cache_block_size
);
1741 mq
->hotspot_level_jump
= 1u;
1742 if (space_init(&mq
->es
, total_sentinels
+ mq
->nr_hotspot_blocks
+ from_cblock(cache_size
))) {
1743 DMERR("couldn't initialize entry space");
1747 init_allocator(&mq
->writeback_sentinel_alloc
, &mq
->es
, 0, nr_sentinels_per_queue
);
1748 for (i
= 0; i
< nr_sentinels_per_queue
; i
++)
1749 get_entry(&mq
->writeback_sentinel_alloc
, i
)->sentinel
= true;
1751 init_allocator(&mq
->demote_sentinel_alloc
, &mq
->es
, nr_sentinels_per_queue
, total_sentinels
);
1752 for (i
= 0; i
< nr_sentinels_per_queue
; i
++)
1753 get_entry(&mq
->demote_sentinel_alloc
, i
)->sentinel
= true;
1755 init_allocator(&mq
->hotspot_alloc
, &mq
->es
, total_sentinels
,
1756 total_sentinels
+ mq
->nr_hotspot_blocks
);
1758 init_allocator(&mq
->cache_alloc
, &mq
->es
,
1759 total_sentinels
+ mq
->nr_hotspot_blocks
,
1760 total_sentinels
+ mq
->nr_hotspot_blocks
+ from_cblock(cache_size
));
1762 mq
->hotspot_hit_bits
= alloc_bitset(mq
->nr_hotspot_blocks
);
1763 if (!mq
->hotspot_hit_bits
) {
1764 DMERR("couldn't allocate hotspot hit bitset");
1765 goto bad_hotspot_hit_bits
;
1767 clear_bitset(mq
->hotspot_hit_bits
, mq
->nr_hotspot_blocks
);
1769 if (from_cblock(cache_size
)) {
1770 mq
->cache_hit_bits
= alloc_bitset(from_cblock(cache_size
));
1771 if (!mq
->cache_hit_bits
) {
1772 DMERR("couldn't allocate cache hit bitset");
1773 goto bad_cache_hit_bits
;
1775 clear_bitset(mq
->cache_hit_bits
, from_cblock(mq
->cache_size
));
1777 mq
->cache_hit_bits
= NULL
;
1780 spin_lock_init(&mq
->lock
);
1782 q_init(&mq
->hotspot
, &mq
->es
, NR_HOTSPOT_LEVELS
);
1783 mq
->hotspot
.nr_top_levels
= 8;
1784 mq
->hotspot
.nr_in_top_levels
= min(mq
->nr_hotspot_blocks
/ NR_HOTSPOT_LEVELS
,
1785 from_cblock(mq
->cache_size
) / mq
->cache_blocks_per_hotspot_block
);
1787 q_init(&mq
->clean
, &mq
->es
, NR_CACHE_LEVELS
);
1788 q_init(&mq
->dirty
, &mq
->es
, NR_CACHE_LEVELS
);
1790 stats_init(&mq
->hotspot_stats
, NR_HOTSPOT_LEVELS
);
1791 stats_init(&mq
->cache_stats
, NR_CACHE_LEVELS
);
1793 if (h_init(&mq
->table
, &mq
->es
, from_cblock(cache_size
)))
1794 goto bad_alloc_table
;
1796 if (h_init(&mq
->hotspot_table
, &mq
->es
, mq
->nr_hotspot_blocks
))
1797 goto bad_alloc_hotspot_table
;
1800 mq
->write_promote_level
= mq
->read_promote_level
= NR_HOTSPOT_LEVELS
;
1802 mq
->next_hotspot_period
= jiffies
;
1803 mq
->next_cache_period
= jiffies
;
1805 mq
->bg_work
= btracker_create(4096); /* FIXME: hard coded value */
1809 mq
->migrations_allowed
= migrations_allowed
;
1814 h_exit(&mq
->hotspot_table
);
1815 bad_alloc_hotspot_table
:
1818 free_bitset(mq
->cache_hit_bits
);
1820 free_bitset(mq
->hotspot_hit_bits
);
1821 bad_hotspot_hit_bits
:
1822 space_exit(&mq
->es
);
1829 static struct dm_cache_policy
*smq_create(dm_cblock_t cache_size
,
1830 sector_t origin_size
,
1831 sector_t cache_block_size
)
1833 return __smq_create(cache_size
, origin_size
, cache_block_size
, false, true);
1836 static struct dm_cache_policy
*mq_create(dm_cblock_t cache_size
,
1837 sector_t origin_size
,
1838 sector_t cache_block_size
)
1840 return __smq_create(cache_size
, origin_size
, cache_block_size
, true, true);
1843 static struct dm_cache_policy
*cleaner_create(dm_cblock_t cache_size
,
1844 sector_t origin_size
,
1845 sector_t cache_block_size
)
1847 return __smq_create(cache_size
, origin_size
, cache_block_size
, false, false);
1850 /*----------------------------------------------------------------*/
1852 static struct dm_cache_policy_type smq_policy_type
= {
1854 .version
= {2, 0, 0},
1856 .owner
= THIS_MODULE
,
1857 .create
= smq_create
1860 static struct dm_cache_policy_type mq_policy_type
= {
1862 .version
= {2, 0, 0},
1864 .owner
= THIS_MODULE
,
1865 .create
= mq_create
,
1868 static struct dm_cache_policy_type cleaner_policy_type
= {
1870 .version
= {2, 0, 0},
1872 .owner
= THIS_MODULE
,
1873 .create
= cleaner_create
,
1876 static struct dm_cache_policy_type default_policy_type
= {
1878 .version
= {2, 0, 0},
1880 .owner
= THIS_MODULE
,
1881 .create
= smq_create
,
1882 .real
= &smq_policy_type
1885 static int __init
smq_init(void)
1889 r
= dm_cache_policy_register(&smq_policy_type
);
1891 DMERR("register failed %d", r
);
1895 r
= dm_cache_policy_register(&mq_policy_type
);
1897 DMERR("register failed (as mq) %d", r
);
1901 r
= dm_cache_policy_register(&cleaner_policy_type
);
1903 DMERR("register failed (as cleaner) %d", r
);
1907 r
= dm_cache_policy_register(&default_policy_type
);
1909 DMERR("register failed (as default) %d", r
);
1916 dm_cache_policy_unregister(&cleaner_policy_type
);
1918 dm_cache_policy_unregister(&mq_policy_type
);
1920 dm_cache_policy_unregister(&smq_policy_type
);
1925 static void __exit
smq_exit(void)
1927 dm_cache_policy_unregister(&cleaner_policy_type
);
1928 dm_cache_policy_unregister(&smq_policy_type
);
1929 dm_cache_policy_unregister(&mq_policy_type
);
1930 dm_cache_policy_unregister(&default_policy_type
);
1933 module_init(smq_init
);
1934 module_exit(smq_exit
);
1936 MODULE_AUTHOR("Joe Thornber <dm-devel@redhat.com>");
1937 MODULE_LICENSE("GPL");
1938 MODULE_DESCRIPTION("smq cache policy");
1940 MODULE_ALIAS("dm-cache-default");
1941 MODULE_ALIAS("dm-cache-mq");
1942 MODULE_ALIAS("dm-cache-cleaner");