2 * Primary bucket allocation code
4 * Copyright 2012 Google, Inc.
6 * Allocation in bcache is done in terms of buckets:
8 * Each bucket has associated an 8 bit gen; this gen corresponds to the gen in
9 * btree pointers - they must match for the pointer to be considered valid.
11 * Thus (assuming a bucket has no dirty data or metadata in it) we can reuse a
12 * bucket simply by incrementing its gen.
14 * The gens (along with the priorities; it's really the gens are important but
15 * the code is named as if it's the priorities) are written in an arbitrary list
16 * of buckets on disk, with a pointer to them in the journal header.
18 * When we invalidate a bucket, we have to write its new gen to disk and wait
19 * for that write to complete before we use it - otherwise after a crash we
20 * could have pointers that appeared to be good but pointed to data that had
23 * Since the gens and priorities are all stored contiguously on disk, we can
24 * batch this up: We fill up the free_inc list with freshly invalidated buckets,
25 * call prio_write(), and when prio_write() finishes we pull buckets off the
26 * free_inc list and optionally discard them.
28 * free_inc isn't the only freelist - if it was, we'd often to sleep while
29 * priorities and gens were being written before we could allocate. c->free is a
30 * smaller freelist, and buckets on that list are always ready to be used.
32 * If we've got discards enabled, that happens when a bucket moves from the
33 * free_inc list to the free list.
35 * There is another freelist, because sometimes we have buckets that we know
36 * have nothing pointing into them - these we can reuse without waiting for
37 * priorities to be rewritten. These come from freed btree nodes and buckets
38 * that garbage collection discovered no longer had valid keys pointing into
39 * them (because they were overwritten). That's the unused list - buckets on the
40 * unused list move to the free list, optionally being discarded in the process.
42 * It's also important to ensure that gens don't wrap around - with respect to
43 * either the oldest gen in the btree or the gen on disk. This is quite
44 * difficult to do in practice, but we explicitly guard against it anyways - if
45 * a bucket is in danger of wrapping around we simply skip invalidating it that
46 * time around, and we garbage collect or rewrite the priorities sooner than we
47 * would have otherwise.
49 * bch_bucket_alloc() allocates a single bucket from a specific cache.
51 * bch_bucket_alloc_set() allocates one or more buckets from different caches
54 * free_some_buckets() drives all the processes described above. It's called
55 * from bch_bucket_alloc() and a few other places that need to make sure free
58 * invalidate_buckets_(lru|fifo)() find buckets that are available to be
59 * invalidated, and then invalidate them and stick them on the free_inc list -
60 * in either lru or fifo order.
66 #include <linux/freezer.h>
67 #include <linux/kthread.h>
68 #include <linux/random.h>
69 #include <trace/events/bcache.h>
71 #define MAX_IN_FLIGHT_DISCARDS 8U
73 /* Bucket heap / gen */
75 uint8_t bch_inc_gen(struct cache
*ca
, struct bucket
*b
)
77 uint8_t ret
= ++b
->gen
;
79 ca
->set
->need_gc
= max(ca
->set
->need_gc
, bucket_gc_gen(b
));
80 WARN_ON_ONCE(ca
->set
->need_gc
> BUCKET_GC_GEN_MAX
);
82 if (CACHE_SYNC(&ca
->set
->sb
)) {
83 ca
->need_save_prio
= max(ca
->need_save_prio
,
85 WARN_ON_ONCE(ca
->need_save_prio
> BUCKET_DISK_GEN_MAX
);
91 void bch_rescale_priorities(struct cache_set
*c
, int sectors
)
95 unsigned next
= c
->nbuckets
* c
->sb
.bucket_size
/ 1024;
99 atomic_sub(sectors
, &c
->rescale
);
102 r
= atomic_read(&c
->rescale
);
106 } while (atomic_cmpxchg(&c
->rescale
, r
, r
+ next
) != r
);
108 mutex_lock(&c
->bucket_lock
);
110 c
->min_prio
= USHRT_MAX
;
112 for_each_cache(ca
, c
, i
)
113 for_each_bucket(b
, ca
)
115 b
->prio
!= BTREE_PRIO
&&
116 !atomic_read(&b
->pin
)) {
118 c
->min_prio
= min(c
->min_prio
, b
->prio
);
121 mutex_unlock(&c
->bucket_lock
);
127 struct list_head list
;
128 struct work_struct work
;
136 static void discard_finish(struct work_struct
*w
)
138 struct discard
*d
= container_of(w
, struct discard
, work
);
139 struct cache
*ca
= d
->ca
;
140 char buf
[BDEVNAME_SIZE
];
142 if (!test_bit(BIO_UPTODATE
, &d
->bio
.bi_flags
)) {
143 pr_notice("discard error on %s, disabling",
144 bdevname(ca
->bdev
, buf
));
148 mutex_lock(&ca
->set
->bucket_lock
);
150 fifo_push(&ca
->free
, d
->bucket
);
151 list_add(&d
->list
, &ca
->discards
);
152 atomic_dec(&ca
->discards_in_flight
);
154 mutex_unlock(&ca
->set
->bucket_lock
);
156 closure_wake_up(&ca
->set
->bucket_wait
);
157 wake_up_process(ca
->alloc_thread
);
159 closure_put(&ca
->set
->cl
);
162 static void discard_endio(struct bio
*bio
, int error
)
164 struct discard
*d
= container_of(bio
, struct discard
, bio
);
165 schedule_work(&d
->work
);
168 static void do_discard(struct cache
*ca
, long bucket
)
170 struct discard
*d
= list_first_entry(&ca
->discards
,
171 struct discard
, list
);
176 atomic_inc(&ca
->discards_in_flight
);
177 closure_get(&ca
->set
->cl
);
181 d
->bio
.bi_sector
= bucket_to_sector(ca
->set
, d
->bucket
);
182 d
->bio
.bi_bdev
= ca
->bdev
;
183 d
->bio
.bi_rw
= REQ_WRITE
|REQ_DISCARD
;
184 d
->bio
.bi_max_vecs
= 1;
185 d
->bio
.bi_io_vec
= d
->bio
.bi_inline_vecs
;
186 d
->bio
.bi_size
= bucket_bytes(ca
);
187 d
->bio
.bi_end_io
= discard_endio
;
188 bio_set_prio(&d
->bio
, IOPRIO_PRIO_VALUE(IOPRIO_CLASS_IDLE
, 0));
190 submit_bio(0, &d
->bio
);
195 static inline bool can_inc_bucket_gen(struct bucket
*b
)
197 return bucket_gc_gen(b
) < BUCKET_GC_GEN_MAX
&&
198 bucket_disk_gen(b
) < BUCKET_DISK_GEN_MAX
;
201 bool bch_bucket_add_unused(struct cache
*ca
, struct bucket
*b
)
203 BUG_ON(GC_MARK(b
) || GC_SECTORS_USED(b
));
205 if (fifo_used(&ca
->free
) > ca
->watermark
[WATERMARK_MOVINGGC
] &&
206 CACHE_REPLACEMENT(&ca
->sb
) == CACHE_REPLACEMENT_FIFO
)
211 if (can_inc_bucket_gen(b
) &&
212 fifo_push(&ca
->unused
, b
- ca
->buckets
)) {
220 static bool can_invalidate_bucket(struct cache
*ca
, struct bucket
*b
)
222 return GC_MARK(b
) == GC_MARK_RECLAIMABLE
&&
223 !atomic_read(&b
->pin
) &&
224 can_inc_bucket_gen(b
);
227 static void invalidate_one_bucket(struct cache
*ca
, struct bucket
*b
)
230 b
->prio
= INITIAL_PRIO
;
232 fifo_push(&ca
->free_inc
, b
- ca
->buckets
);
235 #define bucket_prio(b) \
236 (((unsigned) (b->prio - ca->set->min_prio)) * GC_SECTORS_USED(b))
238 #define bucket_max_cmp(l, r) (bucket_prio(l) < bucket_prio(r))
239 #define bucket_min_cmp(l, r) (bucket_prio(l) > bucket_prio(r))
241 static void invalidate_buckets_lru(struct cache
*ca
)
248 for_each_bucket(b
, ca
) {
250 * If we fill up the unused list, if we then return before
251 * adding anything to the free_inc list we'll skip writing
252 * prios/gens and just go back to allocating from the unused
255 if (fifo_full(&ca
->unused
))
258 if (!can_invalidate_bucket(ca
, b
))
261 if (!GC_SECTORS_USED(b
) &&
262 bch_bucket_add_unused(ca
, b
))
265 if (!heap_full(&ca
->heap
))
266 heap_add(&ca
->heap
, b
, bucket_max_cmp
);
267 else if (bucket_max_cmp(b
, heap_peek(&ca
->heap
))) {
268 ca
->heap
.data
[0] = b
;
269 heap_sift(&ca
->heap
, 0, bucket_max_cmp
);
273 for (i
= ca
->heap
.used
/ 2 - 1; i
>= 0; --i
)
274 heap_sift(&ca
->heap
, i
, bucket_min_cmp
);
276 while (!fifo_full(&ca
->free_inc
)) {
277 if (!heap_pop(&ca
->heap
, b
, bucket_min_cmp
)) {
279 * We don't want to be calling invalidate_buckets()
280 * multiple times when it can't do anything
282 ca
->invalidate_needs_gc
= 1;
283 bch_queue_gc(ca
->set
);
287 invalidate_one_bucket(ca
, b
);
291 static void invalidate_buckets_fifo(struct cache
*ca
)
296 while (!fifo_full(&ca
->free_inc
)) {
297 if (ca
->fifo_last_bucket
< ca
->sb
.first_bucket
||
298 ca
->fifo_last_bucket
>= ca
->sb
.nbuckets
)
299 ca
->fifo_last_bucket
= ca
->sb
.first_bucket
;
301 b
= ca
->buckets
+ ca
->fifo_last_bucket
++;
303 if (can_invalidate_bucket(ca
, b
))
304 invalidate_one_bucket(ca
, b
);
306 if (++checked
>= ca
->sb
.nbuckets
) {
307 ca
->invalidate_needs_gc
= 1;
308 bch_queue_gc(ca
->set
);
314 static void invalidate_buckets_random(struct cache
*ca
)
319 while (!fifo_full(&ca
->free_inc
)) {
321 get_random_bytes(&n
, sizeof(n
));
323 n
%= (size_t) (ca
->sb
.nbuckets
- ca
->sb
.first_bucket
);
324 n
+= ca
->sb
.first_bucket
;
328 if (can_invalidate_bucket(ca
, b
))
329 invalidate_one_bucket(ca
, b
);
331 if (++checked
>= ca
->sb
.nbuckets
/ 2) {
332 ca
->invalidate_needs_gc
= 1;
333 bch_queue_gc(ca
->set
);
339 static void invalidate_buckets(struct cache
*ca
)
341 if (ca
->invalidate_needs_gc
)
344 switch (CACHE_REPLACEMENT(&ca
->sb
)) {
345 case CACHE_REPLACEMENT_LRU
:
346 invalidate_buckets_lru(ca
);
348 case CACHE_REPLACEMENT_FIFO
:
349 invalidate_buckets_fifo(ca
);
351 case CACHE_REPLACEMENT_RANDOM
:
352 invalidate_buckets_random(ca
);
356 trace_bcache_alloc_invalidate(ca
);
359 #define allocator_wait(ca, cond) \
362 set_current_state(TASK_INTERRUPTIBLE); \
366 mutex_unlock(&(ca)->set->bucket_lock); \
367 if (kthread_should_stop()) \
372 mutex_lock(&(ca)->set->bucket_lock); \
374 __set_current_state(TASK_RUNNING); \
377 static int bch_allocator_thread(void *arg
)
379 struct cache
*ca
= arg
;
381 mutex_lock(&ca
->set
->bucket_lock
);
385 * First, we pull buckets off of the unused and free_inc lists,
386 * possibly issue discards to them, then we add the bucket to
392 if ((!atomic_read(&ca
->set
->prio_blocked
) ||
393 !CACHE_SYNC(&ca
->set
->sb
)) &&
394 !fifo_empty(&ca
->unused
))
395 fifo_pop(&ca
->unused
, bucket
);
396 else if (!fifo_empty(&ca
->free_inc
))
397 fifo_pop(&ca
->free_inc
, bucket
);
401 allocator_wait(ca
, (int) fifo_free(&ca
->free
) >
402 atomic_read(&ca
->discards_in_flight
));
405 allocator_wait(ca
, !list_empty(&ca
->discards
));
406 do_discard(ca
, bucket
);
408 fifo_push(&ca
->free
, bucket
);
409 closure_wake_up(&ca
->set
->bucket_wait
);
414 * We've run out of free buckets, we need to find some buckets
415 * we can invalidate. First, invalidate them in memory and add
416 * them to the free_inc list:
419 allocator_wait(ca
, ca
->set
->gc_mark_valid
&&
420 (ca
->need_save_prio
> 64 ||
421 !ca
->invalidate_needs_gc
));
422 invalidate_buckets(ca
);
425 * Now, we write their new gens to disk so we can start writing
428 allocator_wait(ca
, !atomic_read(&ca
->set
->prio_blocked
));
429 if (CACHE_SYNC(&ca
->set
->sb
) &&
430 (!fifo_empty(&ca
->free_inc
) ||
431 ca
->need_save_prio
> 64))
436 long bch_bucket_alloc(struct cache
*ca
, unsigned watermark
, struct closure
*cl
)
440 wake_up_process(ca
->alloc_thread
);
442 if (fifo_used(&ca
->free
) > ca
->watermark
[watermark
] &&
443 fifo_pop(&ca
->free
, r
)) {
444 struct bucket
*b
= ca
->buckets
+ r
;
445 #ifdef CONFIG_BCACHE_EDEBUG
449 for (iter
= 0; iter
< prio_buckets(ca
) * 2; iter
++)
450 BUG_ON(ca
->prio_buckets
[iter
] == (uint64_t) r
);
452 fifo_for_each(i
, &ca
->free
, iter
)
454 fifo_for_each(i
, &ca
->free_inc
, iter
)
456 fifo_for_each(i
, &ca
->unused
, iter
)
459 BUG_ON(atomic_read(&b
->pin
) != 1);
461 SET_GC_SECTORS_USED(b
, ca
->sb
.bucket_size
);
463 if (watermark
<= WATERMARK_METADATA
) {
464 SET_GC_MARK(b
, GC_MARK_METADATA
);
465 b
->prio
= BTREE_PRIO
;
467 SET_GC_MARK(b
, GC_MARK_RECLAIMABLE
);
468 b
->prio
= INITIAL_PRIO
;
474 trace_bcache_alloc_fail(ca
);
477 closure_wait(&ca
->set
->bucket_wait
, cl
);
479 if (closure_blocking(cl
)) {
480 mutex_unlock(&ca
->set
->bucket_lock
);
482 mutex_lock(&ca
->set
->bucket_lock
);
490 void bch_bucket_free(struct cache_set
*c
, struct bkey
*k
)
494 for (i
= 0; i
< KEY_PTRS(k
); i
++) {
495 struct bucket
*b
= PTR_BUCKET(c
, k
, i
);
497 SET_GC_MARK(b
, GC_MARK_RECLAIMABLE
);
498 SET_GC_SECTORS_USED(b
, 0);
499 bch_bucket_add_unused(PTR_CACHE(c
, k
, i
), b
);
503 int __bch_bucket_alloc_set(struct cache_set
*c
, unsigned watermark
,
504 struct bkey
*k
, int n
, struct closure
*cl
)
508 lockdep_assert_held(&c
->bucket_lock
);
509 BUG_ON(!n
|| n
> c
->caches_loaded
|| n
> 8);
513 /* sort by free space/prio of oldest data in caches */
515 for (i
= 0; i
< n
; i
++) {
516 struct cache
*ca
= c
->cache_by_alloc
[i
];
517 long b
= bch_bucket_alloc(ca
, watermark
, cl
);
522 k
->ptr
[i
] = PTR(ca
->buckets
[b
].gen
,
523 bucket_to_sector(c
, b
),
526 SET_KEY_PTRS(k
, i
+ 1);
531 bch_bucket_free(c
, k
);
536 int bch_bucket_alloc_set(struct cache_set
*c
, unsigned watermark
,
537 struct bkey
*k
, int n
, struct closure
*cl
)
540 mutex_lock(&c
->bucket_lock
);
541 ret
= __bch_bucket_alloc_set(c
, watermark
, k
, n
, cl
);
542 mutex_unlock(&c
->bucket_lock
);
548 int bch_cache_allocator_start(struct cache
*ca
)
550 struct task_struct
*k
= kthread_run(bch_allocator_thread
,
551 ca
, "bcache_allocator");
555 ca
->alloc_thread
= k
;
559 void bch_cache_allocator_exit(struct cache
*ca
)
563 while (!list_empty(&ca
->discards
)) {
564 d
= list_first_entry(&ca
->discards
, struct discard
, list
);
565 cancel_work_sync(&d
->work
);
571 int bch_cache_allocator_init(struct cache
*ca
)
577 * Prio/gen writes first
578 * Then 8 for btree allocations
579 * Then half for the moving garbage collector
582 ca
->watermark
[WATERMARK_PRIO
] = 0;
584 ca
->watermark
[WATERMARK_METADATA
] = prio_buckets(ca
);
586 ca
->watermark
[WATERMARK_MOVINGGC
] = 8 +
587 ca
->watermark
[WATERMARK_METADATA
];
589 ca
->watermark
[WATERMARK_NONE
] = ca
->free
.size
/ 2 +
590 ca
->watermark
[WATERMARK_MOVINGGC
];
592 for (i
= 0; i
< MAX_IN_FLIGHT_DISCARDS
; i
++) {
593 struct discard
*d
= kzalloc(sizeof(*d
), GFP_KERNEL
);
598 INIT_WORK(&d
->work
, discard_finish
);
599 list_add(&d
->list
, &ca
->discards
);