1 // SPDX-License-Identifier: GPL-2.0
3 * Primary bucket allocation code
5 * Copyright 2012 Google, Inc.
7 * Allocation in bcache is done in terms of buckets:
9 * Each bucket has associated an 8 bit gen; this gen corresponds to the gen in
10 * btree pointers - they must match for the pointer to be considered valid.
12 * Thus (assuming a bucket has no dirty data or metadata in it) we can reuse a
13 * bucket simply by incrementing its gen.
15 * The gens (along with the priorities; it's really the gens are important but
16 * the code is named as if it's the priorities) are written in an arbitrary list
17 * of buckets on disk, with a pointer to them in the journal header.
19 * When we invalidate a bucket, we have to write its new gen to disk and wait
20 * for that write to complete before we use it - otherwise after a crash we
21 * could have pointers that appeared to be good but pointed to data that had
24 * Since the gens and priorities are all stored contiguously on disk, we can
25 * batch this up: We fill up the free_inc list with freshly invalidated buckets,
26 * call prio_write(), and when prio_write() finishes we pull buckets off the
27 * free_inc list and optionally discard them.
29 * free_inc isn't the only freelist - if it was, we'd often to sleep while
30 * priorities and gens were being written before we could allocate. c->free is a
31 * smaller freelist, and buckets on that list are always ready to be used.
33 * If we've got discards enabled, that happens when a bucket moves from the
34 * free_inc list to the free list.
36 * There is another freelist, because sometimes we have buckets that we know
37 * have nothing pointing into them - these we can reuse without waiting for
38 * priorities to be rewritten. These come from freed btree nodes and buckets
39 * that garbage collection discovered no longer had valid keys pointing into
40 * them (because they were overwritten). That's the unused list - buckets on the
41 * unused list move to the free list, optionally being discarded in the process.
43 * It's also important to ensure that gens don't wrap around - with respect to
44 * either the oldest gen in the btree or the gen on disk. This is quite
45 * difficult to do in practice, but we explicitly guard against it anyways - if
46 * a bucket is in danger of wrapping around we simply skip invalidating it that
47 * time around, and we garbage collect or rewrite the priorities sooner than we
48 * would have otherwise.
50 * bch_bucket_alloc() allocates a single bucket from a specific cache.
52 * bch_bucket_alloc_set() allocates one or more buckets from different caches
55 * free_some_buckets() drives all the processes described above. It's called
56 * from bch_bucket_alloc() and a few other places that need to make sure free
59 * invalidate_buckets_(lru|fifo)() find buckets that are available to be
60 * invalidated, and then invalidate them and stick them on the free_inc list -
61 * in either lru or fifo order.
67 #include <linux/blkdev.h>
68 #include <linux/kthread.h>
69 #include <linux/random.h>
70 #include <trace/events/bcache.h>
72 #define MAX_OPEN_BUCKETS 128
74 /* Bucket heap / gen */
76 uint8_t bch_inc_gen(struct cache
*ca
, struct bucket
*b
)
78 uint8_t ret
= ++b
->gen
;
80 ca
->set
->need_gc
= max(ca
->set
->need_gc
, bucket_gc_gen(b
));
81 WARN_ON_ONCE(ca
->set
->need_gc
> BUCKET_GC_GEN_MAX
);
86 void bch_rescale_priorities(struct cache_set
*c
, int sectors
)
90 unsigned next
= c
->nbuckets
* c
->sb
.bucket_size
/ 1024;
94 atomic_sub(sectors
, &c
->rescale
);
97 r
= atomic_read(&c
->rescale
);
101 } while (atomic_cmpxchg(&c
->rescale
, r
, r
+ next
) != r
);
103 mutex_lock(&c
->bucket_lock
);
105 c
->min_prio
= USHRT_MAX
;
107 for_each_cache(ca
, c
, i
)
108 for_each_bucket(b
, ca
)
110 b
->prio
!= BTREE_PRIO
&&
111 !atomic_read(&b
->pin
)) {
113 c
->min_prio
= min(c
->min_prio
, b
->prio
);
116 mutex_unlock(&c
->bucket_lock
);
120 * Background allocation thread: scans for buckets to be invalidated,
121 * invalidates them, rewrites prios/gens (marking them as invalidated on disk),
122 * then optionally issues discard commands to the newly free buckets, then puts
123 * them on the various freelists.
126 static inline bool can_inc_bucket_gen(struct bucket
*b
)
128 return bucket_gc_gen(b
) < BUCKET_GC_GEN_MAX
;
131 bool bch_can_invalidate_bucket(struct cache
*ca
, struct bucket
*b
)
133 BUG_ON(!ca
->set
->gc_mark_valid
);
135 return (!GC_MARK(b
) ||
136 GC_MARK(b
) == GC_MARK_RECLAIMABLE
) &&
137 !atomic_read(&b
->pin
) &&
138 can_inc_bucket_gen(b
);
141 void __bch_invalidate_one_bucket(struct cache
*ca
, struct bucket
*b
)
143 lockdep_assert_held(&ca
->set
->bucket_lock
);
144 BUG_ON(GC_MARK(b
) && GC_MARK(b
) != GC_MARK_RECLAIMABLE
);
146 if (GC_SECTORS_USED(b
))
147 trace_bcache_invalidate(ca
, b
- ca
->buckets
);
150 b
->prio
= INITIAL_PRIO
;
154 static void bch_invalidate_one_bucket(struct cache
*ca
, struct bucket
*b
)
156 __bch_invalidate_one_bucket(ca
, b
);
158 fifo_push(&ca
->free_inc
, b
- ca
->buckets
);
162 * Determines what order we're going to reuse buckets, smallest bucket_prio()
163 * first: we also take into account the number of sectors of live data in that
164 * bucket, and in order for that multiply to make sense we have to scale bucket
166 * Thus, we scale the bucket priorities so that the bucket with the smallest
167 * prio is worth 1/8th of what INITIAL_PRIO is worth.
170 #define bucket_prio(b) \
172 unsigned min_prio = (INITIAL_PRIO - ca->set->min_prio) / 8; \
174 (b->prio - ca->set->min_prio + min_prio) * GC_SECTORS_USED(b); \
177 #define bucket_max_cmp(l, r) (bucket_prio(l) < bucket_prio(r))
178 #define bucket_min_cmp(l, r) (bucket_prio(l) > bucket_prio(r))
180 static void invalidate_buckets_lru(struct cache
*ca
)
187 for_each_bucket(b
, ca
) {
188 if (!bch_can_invalidate_bucket(ca
, b
))
191 if (!heap_full(&ca
->heap
))
192 heap_add(&ca
->heap
, b
, bucket_max_cmp
);
193 else if (bucket_max_cmp(b
, heap_peek(&ca
->heap
))) {
194 ca
->heap
.data
[0] = b
;
195 heap_sift(&ca
->heap
, 0, bucket_max_cmp
);
199 for (i
= ca
->heap
.used
/ 2 - 1; i
>= 0; --i
)
200 heap_sift(&ca
->heap
, i
, bucket_min_cmp
);
202 while (!fifo_full(&ca
->free_inc
)) {
203 if (!heap_pop(&ca
->heap
, b
, bucket_min_cmp
)) {
205 * We don't want to be calling invalidate_buckets()
206 * multiple times when it can't do anything
208 ca
->invalidate_needs_gc
= 1;
213 bch_invalidate_one_bucket(ca
, b
);
217 static void invalidate_buckets_fifo(struct cache
*ca
)
222 while (!fifo_full(&ca
->free_inc
)) {
223 if (ca
->fifo_last_bucket
< ca
->sb
.first_bucket
||
224 ca
->fifo_last_bucket
>= ca
->sb
.nbuckets
)
225 ca
->fifo_last_bucket
= ca
->sb
.first_bucket
;
227 b
= ca
->buckets
+ ca
->fifo_last_bucket
++;
229 if (bch_can_invalidate_bucket(ca
, b
))
230 bch_invalidate_one_bucket(ca
, b
);
232 if (++checked
>= ca
->sb
.nbuckets
) {
233 ca
->invalidate_needs_gc
= 1;
240 static void invalidate_buckets_random(struct cache
*ca
)
245 while (!fifo_full(&ca
->free_inc
)) {
247 get_random_bytes(&n
, sizeof(n
));
249 n
%= (size_t) (ca
->sb
.nbuckets
- ca
->sb
.first_bucket
);
250 n
+= ca
->sb
.first_bucket
;
254 if (bch_can_invalidate_bucket(ca
, b
))
255 bch_invalidate_one_bucket(ca
, b
);
257 if (++checked
>= ca
->sb
.nbuckets
/ 2) {
258 ca
->invalidate_needs_gc
= 1;
265 static void invalidate_buckets(struct cache
*ca
)
267 BUG_ON(ca
->invalidate_needs_gc
);
269 switch (CACHE_REPLACEMENT(&ca
->sb
)) {
270 case CACHE_REPLACEMENT_LRU
:
271 invalidate_buckets_lru(ca
);
273 case CACHE_REPLACEMENT_FIFO
:
274 invalidate_buckets_fifo(ca
);
276 case CACHE_REPLACEMENT_RANDOM
:
277 invalidate_buckets_random(ca
);
282 #define allocator_wait(ca, cond) \
285 set_current_state(TASK_INTERRUPTIBLE); \
289 mutex_unlock(&(ca)->set->bucket_lock); \
290 if (kthread_should_stop() || \
291 test_bit(CACHE_SET_IO_DISABLE, &ca->set->flags)) { \
292 set_current_state(TASK_RUNNING); \
297 mutex_lock(&(ca)->set->bucket_lock); \
299 __set_current_state(TASK_RUNNING); \
302 static int bch_allocator_push(struct cache
*ca
, long bucket
)
306 /* Prios/gens are actually the most important reserve */
307 if (fifo_push(&ca
->free
[RESERVE_PRIO
], bucket
))
310 for (i
= 0; i
< RESERVE_NR
; i
++)
311 if (fifo_push(&ca
->free
[i
], bucket
))
317 static int bch_allocator_thread(void *arg
)
319 struct cache
*ca
= arg
;
321 mutex_lock(&ca
->set
->bucket_lock
);
325 * First, we pull buckets off of the unused and free_inc lists,
326 * possibly issue discards to them, then we add the bucket to
329 while (!fifo_empty(&ca
->free_inc
)) {
332 fifo_pop(&ca
->free_inc
, bucket
);
335 mutex_unlock(&ca
->set
->bucket_lock
);
336 blkdev_issue_discard(ca
->bdev
,
337 bucket_to_sector(ca
->set
, bucket
),
338 ca
->sb
.bucket_size
, GFP_KERNEL
, 0);
339 mutex_lock(&ca
->set
->bucket_lock
);
342 allocator_wait(ca
, bch_allocator_push(ca
, bucket
));
343 wake_up(&ca
->set
->btree_cache_wait
);
344 wake_up(&ca
->set
->bucket_wait
);
348 * We've run out of free buckets, we need to find some buckets
349 * we can invalidate. First, invalidate them in memory and add
350 * them to the free_inc list:
354 allocator_wait(ca
, ca
->set
->gc_mark_valid
&&
355 !ca
->invalidate_needs_gc
);
356 invalidate_buckets(ca
);
359 * Now, we write their new gens to disk so we can start writing
362 allocator_wait(ca
, !atomic_read(&ca
->set
->prio_blocked
));
363 if (CACHE_SYNC(&ca
->set
->sb
)) {
365 * This could deadlock if an allocation with a btree
366 * node locked ever blocked - having the btree node
367 * locked would block garbage collection, but here we're
368 * waiting on garbage collection before we invalidate
371 * But this should be safe since the btree code always
372 * uses btree_check_reserve() before allocating now, and
373 * if it fails it blocks without btree nodes locked.
375 if (!fifo_full(&ca
->free_inc
))
376 goto retry_invalidate
;
382 wait_for_kthread_stop();
388 long bch_bucket_alloc(struct cache
*ca
, unsigned reserve
, bool wait
)
395 if (fifo_pop(&ca
->free
[RESERVE_NONE
], r
) ||
396 fifo_pop(&ca
->free
[reserve
], r
))
400 trace_bcache_alloc_fail(ca
, reserve
);
405 prepare_to_wait(&ca
->set
->bucket_wait
, &w
,
406 TASK_UNINTERRUPTIBLE
);
408 mutex_unlock(&ca
->set
->bucket_lock
);
410 mutex_lock(&ca
->set
->bucket_lock
);
411 } while (!fifo_pop(&ca
->free
[RESERVE_NONE
], r
) &&
412 !fifo_pop(&ca
->free
[reserve
], r
));
414 finish_wait(&ca
->set
->bucket_wait
, &w
);
416 if (ca
->alloc_thread
)
417 wake_up_process(ca
->alloc_thread
);
419 trace_bcache_alloc(ca
, reserve
);
421 if (expensive_debug_checks(ca
->set
)) {
426 for (iter
= 0; iter
< prio_buckets(ca
) * 2; iter
++)
427 BUG_ON(ca
->prio_buckets
[iter
] == (uint64_t) r
);
429 for (j
= 0; j
< RESERVE_NR
; j
++)
430 fifo_for_each(i
, &ca
->free
[j
], iter
)
432 fifo_for_each(i
, &ca
->free_inc
, iter
)
438 BUG_ON(atomic_read(&b
->pin
) != 1);
440 SET_GC_SECTORS_USED(b
, ca
->sb
.bucket_size
);
442 if (reserve
<= RESERVE_PRIO
) {
443 SET_GC_MARK(b
, GC_MARK_METADATA
);
445 b
->prio
= BTREE_PRIO
;
447 SET_GC_MARK(b
, GC_MARK_RECLAIMABLE
);
449 b
->prio
= INITIAL_PRIO
;
452 if (ca
->set
->avail_nbuckets
> 0) {
453 ca
->set
->avail_nbuckets
--;
454 bch_update_bucket_in_use(ca
->set
, &ca
->set
->gc_stats
);
460 void __bch_bucket_free(struct cache
*ca
, struct bucket
*b
)
463 SET_GC_SECTORS_USED(b
, 0);
465 if (ca
->set
->avail_nbuckets
< ca
->set
->nbuckets
) {
466 ca
->set
->avail_nbuckets
++;
467 bch_update_bucket_in_use(ca
->set
, &ca
->set
->gc_stats
);
471 void bch_bucket_free(struct cache_set
*c
, struct bkey
*k
)
475 for (i
= 0; i
< KEY_PTRS(k
); i
++)
476 __bch_bucket_free(PTR_CACHE(c
, k
, i
),
477 PTR_BUCKET(c
, k
, i
));
480 int __bch_bucket_alloc_set(struct cache_set
*c
, unsigned reserve
,
481 struct bkey
*k
, int n
, bool wait
)
485 lockdep_assert_held(&c
->bucket_lock
);
486 BUG_ON(!n
|| n
> c
->caches_loaded
|| n
> 8);
490 /* sort by free space/prio of oldest data in caches */
492 for (i
= 0; i
< n
; i
++) {
493 struct cache
*ca
= c
->cache_by_alloc
[i
];
494 long b
= bch_bucket_alloc(ca
, reserve
, wait
);
499 k
->ptr
[i
] = MAKE_PTR(ca
->buckets
[b
].gen
,
500 bucket_to_sector(c
, b
),
503 SET_KEY_PTRS(k
, i
+ 1);
508 bch_bucket_free(c
, k
);
513 int bch_bucket_alloc_set(struct cache_set
*c
, unsigned reserve
,
514 struct bkey
*k
, int n
, bool wait
)
517 mutex_lock(&c
->bucket_lock
);
518 ret
= __bch_bucket_alloc_set(c
, reserve
, k
, n
, wait
);
519 mutex_unlock(&c
->bucket_lock
);
523 /* Sector allocator */
526 struct list_head list
;
527 unsigned last_write_point
;
528 unsigned sectors_free
;
533 * We keep multiple buckets open for writes, and try to segregate different
534 * write streams for better cache utilization: first we try to segregate flash
535 * only volume write streams from cached devices, secondly we look for a bucket
536 * where the last write to it was sequential with the current write, and
537 * failing that we look for a bucket that was last used by the same task.
539 * The ideas is if you've got multiple tasks pulling data into the cache at the
540 * same time, you'll get better cache utilization if you try to segregate their
541 * data and preserve locality.
543 * For example, dirty sectors of flash only volume is not reclaimable, if their
544 * dirty sectors mixed with dirty sectors of cached device, such buckets will
545 * be marked as dirty and won't be reclaimed, though the dirty data of cached
546 * device have been written back to backend device.
548 * And say you've starting Firefox at the same time you're copying a
549 * bunch of files. Firefox will likely end up being fairly hot and stay in the
550 * cache awhile, but the data you copied might not be; if you wrote all that
551 * data to the same buckets it'd get invalidated at the same time.
553 * Both of those tasks will be doing fairly random IO so we can't rely on
554 * detecting sequential IO to segregate their data, but going off of the task
555 * should be a sane heuristic.
557 static struct open_bucket
*pick_data_bucket(struct cache_set
*c
,
558 const struct bkey
*search
,
559 unsigned write_point
,
562 struct open_bucket
*ret
, *ret_task
= NULL
;
564 list_for_each_entry_reverse(ret
, &c
->data_buckets
, list
)
565 if (UUID_FLASH_ONLY(&c
->uuids
[KEY_INODE(&ret
->key
)]) !=
566 UUID_FLASH_ONLY(&c
->uuids
[KEY_INODE(search
)]))
568 else if (!bkey_cmp(&ret
->key
, search
))
570 else if (ret
->last_write_point
== write_point
)
573 ret
= ret_task
?: list_first_entry(&c
->data_buckets
,
574 struct open_bucket
, list
);
576 if (!ret
->sectors_free
&& KEY_PTRS(alloc
)) {
577 ret
->sectors_free
= c
->sb
.bucket_size
;
578 bkey_copy(&ret
->key
, alloc
);
582 if (!ret
->sectors_free
)
589 * Allocates some space in the cache to write to, and k to point to the newly
590 * allocated space, and updates KEY_SIZE(k) and KEY_OFFSET(k) (to point to the
591 * end of the newly allocated space).
593 * May allocate fewer sectors than @sectors, KEY_SIZE(k) indicates how many
594 * sectors were actually allocated.
596 * If s->writeback is true, will not fail.
598 bool bch_alloc_sectors(struct cache_set
*c
, struct bkey
*k
, unsigned sectors
,
599 unsigned write_point
, unsigned write_prio
, bool wait
)
601 struct open_bucket
*b
;
602 BKEY_PADDED(key
) alloc
;
606 * We might have to allocate a new bucket, which we can't do with a
607 * spinlock held. So if we have to allocate, we drop the lock, allocate
608 * and then retry. KEY_PTRS() indicates whether alloc points to
609 * allocated bucket(s).
612 bkey_init(&alloc
.key
);
613 spin_lock(&c
->data_bucket_lock
);
615 while (!(b
= pick_data_bucket(c
, k
, write_point
, &alloc
.key
))) {
616 unsigned watermark
= write_prio
620 spin_unlock(&c
->data_bucket_lock
);
622 if (bch_bucket_alloc_set(c
, watermark
, &alloc
.key
, 1, wait
))
625 spin_lock(&c
->data_bucket_lock
);
629 * If we had to allocate, we might race and not need to allocate the
630 * second time we call pick_data_bucket(). If we allocated a bucket but
631 * didn't use it, drop the refcount bch_bucket_alloc_set() took:
633 if (KEY_PTRS(&alloc
.key
))
634 bkey_put(c
, &alloc
.key
);
636 for (i
= 0; i
< KEY_PTRS(&b
->key
); i
++)
637 EBUG_ON(ptr_stale(c
, &b
->key
, i
));
639 /* Set up the pointer to the space we're allocating: */
641 for (i
= 0; i
< KEY_PTRS(&b
->key
); i
++)
642 k
->ptr
[i
] = b
->key
.ptr
[i
];
644 sectors
= min(sectors
, b
->sectors_free
);
646 SET_KEY_OFFSET(k
, KEY_OFFSET(k
) + sectors
);
647 SET_KEY_SIZE(k
, sectors
);
648 SET_KEY_PTRS(k
, KEY_PTRS(&b
->key
));
651 * Move b to the end of the lru, and keep track of what this bucket was
654 list_move_tail(&b
->list
, &c
->data_buckets
);
655 bkey_copy_key(&b
->key
, k
);
656 b
->last_write_point
= write_point
;
658 b
->sectors_free
-= sectors
;
660 for (i
= 0; i
< KEY_PTRS(&b
->key
); i
++) {
661 SET_PTR_OFFSET(&b
->key
, i
, PTR_OFFSET(&b
->key
, i
) + sectors
);
663 atomic_long_add(sectors
,
664 &PTR_CACHE(c
, &b
->key
, i
)->sectors_written
);
667 if (b
->sectors_free
< c
->sb
.block_size
)
671 * k takes refcounts on the buckets it points to until it's inserted
672 * into the btree, but if we're done with this bucket we just transfer
673 * get_data_bucket()'s refcount.
676 for (i
= 0; i
< KEY_PTRS(&b
->key
); i
++)
677 atomic_inc(&PTR_BUCKET(c
, &b
->key
, i
)->pin
);
679 spin_unlock(&c
->data_bucket_lock
);
685 void bch_open_buckets_free(struct cache_set
*c
)
687 struct open_bucket
*b
;
689 while (!list_empty(&c
->data_buckets
)) {
690 b
= list_first_entry(&c
->data_buckets
,
691 struct open_bucket
, list
);
697 int bch_open_buckets_alloc(struct cache_set
*c
)
701 spin_lock_init(&c
->data_bucket_lock
);
703 for (i
= 0; i
< MAX_OPEN_BUCKETS
; i
++) {
704 struct open_bucket
*b
= kzalloc(sizeof(*b
), GFP_KERNEL
);
708 list_add(&b
->list
, &c
->data_buckets
);
714 int bch_cache_allocator_start(struct cache
*ca
)
716 struct task_struct
*k
= kthread_run(bch_allocator_thread
,
717 ca
, "bcache_allocator");
721 ca
->alloc_thread
= k
;