2 * Copyright (C) 2010 Kent Overstreet <kent.overstreet@gmail.com>
4 * Uses a block device as cache for other block devices; optimized for SSDs.
5 * All allocation is done in buckets, which should match the erase block size
8 * Buckets containing cached data are kept on a heap sorted by priority;
9 * bucket priority is increased on cache hit, and periodically all the buckets
10 * on the heap have their priority scaled down. This currently is just used as
11 * an LRU but in the future should allow for more intelligent heuristics.
13 * Buckets have an 8 bit counter; freeing is accomplished by incrementing the
14 * counter. Garbage collection is used to remove stale pointers.
16 * Indexing is done via a btree; nodes are not necessarily fully sorted, rather
17 * as keys are inserted we only sort the pages that have not yet been written.
18 * When garbage collection is run, we resort the entire node.
20 * All configuration is done via sysfs; see Documentation/bcache.txt.
28 #include <linux/slab.h>
29 #include <linux/bitops.h>
30 #include <linux/hash.h>
31 #include <linux/kthread.h>
32 #include <linux/prefetch.h>
33 #include <linux/random.h>
34 #include <linux/rcupdate.h>
35 #include <linux/sched/clock.h>
36 #include <linux/rculist.h>
38 #include <trace/events/bcache.h>
42 * register_bcache: Return errors out to userspace correctly
44 * Writeback: don't undirty key until after a cache flush
46 * Create an iterator for key pointers
48 * On btree write error, mark bucket such that it won't be freed from the cache
51 * Check for bad keys in replay
53 * Refcount journal entries in journal_replay
56 * Finish incremental gc
57 * Gc should free old UUIDs, data for invalid UUIDs
59 * Provide a way to list backing device UUIDs we have data cached for, and
60 * probably how long it's been since we've seen them, and a way to invalidate
61 * dirty data for devices that will never be attached again
63 * Keep 1 min/5 min/15 min statistics of how busy a block device has been, so
64 * that based on that and how much dirty data we have we can keep writeback
67 * Add a tracepoint or somesuch to watch for writeback starvation
69 * When btree depth > 1 and splitting an interior node, we have to make sure
70 * alloc_bucket() cannot fail. This should be true but is not completely
75 * If data write is less than hard sector size of ssd, round up offset in open
76 * bucket to the next whole sector
78 * Superblock needs to be fleshed out for multiple cache devices
80 * Add a sysfs tunable for the number of writeback IOs in flight
82 * Add a sysfs tunable for the number of open data buckets
84 * IO tracking: Can we track when one process is doing io on behalf of another?
85 * IO tracking: Don't use just an average, weigh more recent stuff higher
87 * Test module load/unload
90 #define MAX_NEED_GC 64
91 #define MAX_SAVE_PRIO 72
93 #define PTR_DIRTY_BIT (((uint64_t) 1 << 36))
95 #define PTR_HASH(c, k) \
96 (((k)->ptr[0] >> c->bucket_bits) | PTR_GEN(k, 0))
98 #define insert_lock(s, b) ((b)->level <= (s)->lock)
101 * These macros are for recursing down the btree - they handle the details of
102 * locking and looking up nodes in the cache for you. They're best treated as
103 * mere syntax when reading code that uses them.
105 * op->lock determines whether we take a read or a write lock at a given depth.
106 * If you've got a read lock and find that you need a write lock (i.e. you're
107 * going to have to split), set op->lock and return -EINTR; btree_root() will
108 * call you again and you'll have the correct lock.
112 * btree - recurse down the btree on a specified key
113 * @fn: function to call, which will be passed the child node
114 * @key: key to recurse on
115 * @b: parent btree node
116 * @op: pointer to struct btree_op
118 #define btree(fn, key, b, op, ...) \
120 int _r, l = (b)->level - 1; \
121 bool _w = l <= (op)->lock; \
122 struct btree *_child = bch_btree_node_get((b)->c, op, key, l, \
124 if (!IS_ERR(_child)) { \
125 _r = bch_btree_ ## fn(_child, op, ##__VA_ARGS__); \
126 rw_unlock(_w, _child); \
128 _r = PTR_ERR(_child); \
133 * btree_root - call a function on the root of the btree
134 * @fn: function to call, which will be passed the child node
136 * @op: pointer to struct btree_op
138 #define btree_root(fn, c, op, ...) \
142 struct btree *_b = (c)->root; \
143 bool _w = insert_lock(op, _b); \
144 rw_lock(_w, _b, _b->level); \
145 if (_b == (c)->root && \
146 _w == insert_lock(op, _b)) { \
147 _r = bch_btree_ ## fn(_b, op, ##__VA_ARGS__); \
150 bch_cannibalize_unlock(c); \
153 } while (_r == -EINTR); \
155 finish_wait(&(c)->btree_cache_wait, &(op)->wait); \
159 static inline struct bset
*write_block(struct btree
*b
)
161 return ((void *) btree_bset_first(b
)) + b
->written
* block_bytes(b
->c
);
164 static void bch_btree_init_next(struct btree
*b
)
166 /* If not a leaf node, always sort */
167 if (b
->level
&& b
->keys
.nsets
)
168 bch_btree_sort(&b
->keys
, &b
->c
->sort
);
170 bch_btree_sort_lazy(&b
->keys
, &b
->c
->sort
);
172 if (b
->written
< btree_blocks(b
))
173 bch_bset_init_next(&b
->keys
, write_block(b
),
174 bset_magic(&b
->c
->sb
));
178 /* Btree key manipulation */
180 void bkey_put(struct cache_set
*c
, struct bkey
*k
)
184 for (i
= 0; i
< KEY_PTRS(k
); i
++)
185 if (ptr_available(c
, k
, i
))
186 atomic_dec_bug(&PTR_BUCKET(c
, k
, i
)->pin
);
191 static uint64_t btree_csum_set(struct btree
*b
, struct bset
*i
)
193 uint64_t crc
= b
->key
.ptr
[0];
194 void *data
= (void *) i
+ 8, *end
= bset_bkey_last(i
);
196 crc
= bch_crc64_update(crc
, data
, end
- data
);
197 return crc
^ 0xffffffffffffffffULL
;
200 void bch_btree_node_read_done(struct btree
*b
)
202 const char *err
= "bad btree header";
203 struct bset
*i
= btree_bset_first(b
);
204 struct btree_iter
*iter
;
206 iter
= mempool_alloc(b
->c
->fill_iter
, GFP_NOIO
);
207 iter
->size
= b
->c
->sb
.bucket_size
/ b
->c
->sb
.block_size
;
210 #ifdef CONFIG_BCACHE_DEBUG
218 b
->written
< btree_blocks(b
) && i
->seq
== b
->keys
.set
[0].data
->seq
;
219 i
= write_block(b
)) {
220 err
= "unsupported bset version";
221 if (i
->version
> BCACHE_BSET_VERSION
)
224 err
= "bad btree header";
225 if (b
->written
+ set_blocks(i
, block_bytes(b
->c
)) >
230 if (i
->magic
!= bset_magic(&b
->c
->sb
))
233 err
= "bad checksum";
234 switch (i
->version
) {
236 if (i
->csum
!= csum_set(i
))
239 case BCACHE_BSET_VERSION
:
240 if (i
->csum
!= btree_csum_set(b
, i
))
246 if (i
!= b
->keys
.set
[0].data
&& !i
->keys
)
249 bch_btree_iter_push(iter
, i
->start
, bset_bkey_last(i
));
251 b
->written
+= set_blocks(i
, block_bytes(b
->c
));
254 err
= "corrupted btree";
255 for (i
= write_block(b
);
256 bset_sector_offset(&b
->keys
, i
) < KEY_SIZE(&b
->key
);
257 i
= ((void *) i
) + block_bytes(b
->c
))
258 if (i
->seq
== b
->keys
.set
[0].data
->seq
)
261 bch_btree_sort_and_fix_extents(&b
->keys
, iter
, &b
->c
->sort
);
263 i
= b
->keys
.set
[0].data
;
264 err
= "short btree key";
265 if (b
->keys
.set
[0].size
&&
266 bkey_cmp(&b
->key
, &b
->keys
.set
[0].end
) < 0)
269 if (b
->written
< btree_blocks(b
))
270 bch_bset_init_next(&b
->keys
, write_block(b
),
271 bset_magic(&b
->c
->sb
));
273 mempool_free(iter
, b
->c
->fill_iter
);
276 set_btree_node_io_error(b
);
277 bch_cache_set_error(b
->c
, "%s at bucket %zu, block %u, %u keys",
278 err
, PTR_BUCKET_NR(b
->c
, &b
->key
, 0),
279 bset_block_offset(b
, i
), i
->keys
);
283 static void btree_node_read_endio(struct bio
*bio
)
285 struct closure
*cl
= bio
->bi_private
;
289 static void bch_btree_node_read(struct btree
*b
)
291 uint64_t start_time
= local_clock();
295 trace_bcache_btree_read(b
);
297 closure_init_stack(&cl
);
299 bio
= bch_bbio_alloc(b
->c
);
300 bio
->bi_iter
.bi_size
= KEY_SIZE(&b
->key
) << 9;
301 bio
->bi_end_io
= btree_node_read_endio
;
302 bio
->bi_private
= &cl
;
303 bio
->bi_opf
= REQ_OP_READ
| REQ_META
;
305 bch_bio_map(bio
, b
->keys
.set
[0].data
);
307 bch_submit_bbio(bio
, b
->c
, &b
->key
, 0);
311 set_btree_node_io_error(b
);
313 bch_bbio_free(bio
, b
->c
);
315 if (btree_node_io_error(b
))
318 bch_btree_node_read_done(b
);
319 bch_time_stats_update(&b
->c
->btree_read_time
, start_time
);
323 bch_cache_set_error(b
->c
, "io error reading bucket %zu",
324 PTR_BUCKET_NR(b
->c
, &b
->key
, 0));
327 static void btree_complete_write(struct btree
*b
, struct btree_write
*w
)
329 if (w
->prio_blocked
&&
330 !atomic_sub_return(w
->prio_blocked
, &b
->c
->prio_blocked
))
331 wake_up_allocators(b
->c
);
334 atomic_dec_bug(w
->journal
);
335 __closure_wake_up(&b
->c
->journal
.wait
);
342 static void btree_node_write_unlock(struct closure
*cl
)
344 struct btree
*b
= container_of(cl
, struct btree
, io
);
349 static void __btree_node_write_done(struct closure
*cl
)
351 struct btree
*b
= container_of(cl
, struct btree
, io
);
352 struct btree_write
*w
= btree_prev_write(b
);
354 bch_bbio_free(b
->bio
, b
->c
);
356 btree_complete_write(b
, w
);
358 if (btree_node_dirty(b
))
359 schedule_delayed_work(&b
->work
, 30 * HZ
);
361 closure_return_with_destructor(cl
, btree_node_write_unlock
);
364 static void btree_node_write_done(struct closure
*cl
)
366 struct btree
*b
= container_of(cl
, struct btree
, io
);
368 bio_free_pages(b
->bio
);
369 __btree_node_write_done(cl
);
372 static void btree_node_write_endio(struct bio
*bio
)
374 struct closure
*cl
= bio
->bi_private
;
375 struct btree
*b
= container_of(cl
, struct btree
, io
);
378 set_btree_node_io_error(b
);
380 bch_bbio_count_io_errors(b
->c
, bio
, bio
->bi_status
, "writing btree");
384 static void do_btree_node_write(struct btree
*b
)
386 struct closure
*cl
= &b
->io
;
387 struct bset
*i
= btree_bset_last(b
);
390 i
->version
= BCACHE_BSET_VERSION
;
391 i
->csum
= btree_csum_set(b
, i
);
394 b
->bio
= bch_bbio_alloc(b
->c
);
396 b
->bio
->bi_end_io
= btree_node_write_endio
;
397 b
->bio
->bi_private
= cl
;
398 b
->bio
->bi_iter
.bi_size
= roundup(set_bytes(i
), block_bytes(b
->c
));
399 b
->bio
->bi_opf
= REQ_OP_WRITE
| REQ_META
| REQ_FUA
;
400 bch_bio_map(b
->bio
, i
);
403 * If we're appending to a leaf node, we don't technically need FUA -
404 * this write just needs to be persisted before the next journal write,
405 * which will be marked FLUSH|FUA.
407 * Similarly if we're writing a new btree root - the pointer is going to
408 * be in the next journal entry.
410 * But if we're writing a new btree node (that isn't a root) or
411 * appending to a non leaf btree node, we need either FUA or a flush
412 * when we write the parent with the new pointer. FUA is cheaper than a
413 * flush, and writes appending to leaf nodes aren't blocking anything so
414 * just make all btree node writes FUA to keep things sane.
417 bkey_copy(&k
.key
, &b
->key
);
418 SET_PTR_OFFSET(&k
.key
, 0, PTR_OFFSET(&k
.key
, 0) +
419 bset_sector_offset(&b
->keys
, i
));
421 if (!bio_alloc_pages(b
->bio
, __GFP_NOWARN
|GFP_NOWAIT
)) {
424 void *base
= (void *) ((unsigned long) i
& ~(PAGE_SIZE
- 1));
426 bio_for_each_segment_all(bv
, b
->bio
, j
)
427 memcpy(page_address(bv
->bv_page
),
428 base
+ j
* PAGE_SIZE
, PAGE_SIZE
);
430 bch_submit_bbio(b
->bio
, b
->c
, &k
.key
, 0);
432 continue_at(cl
, btree_node_write_done
, NULL
);
435 bch_bio_map(b
->bio
, i
);
437 bch_submit_bbio(b
->bio
, b
->c
, &k
.key
, 0);
440 continue_at_nobarrier(cl
, __btree_node_write_done
, NULL
);
444 void __bch_btree_node_write(struct btree
*b
, struct closure
*parent
)
446 struct bset
*i
= btree_bset_last(b
);
448 lockdep_assert_held(&b
->write_lock
);
450 trace_bcache_btree_write(b
);
452 BUG_ON(current
->bio_list
);
453 BUG_ON(b
->written
>= btree_blocks(b
));
454 BUG_ON(b
->written
&& !i
->keys
);
455 BUG_ON(btree_bset_first(b
)->seq
!= i
->seq
);
456 bch_check_keys(&b
->keys
, "writing");
458 cancel_delayed_work(&b
->work
);
460 /* If caller isn't waiting for write, parent refcount is cache set */
462 closure_init(&b
->io
, parent
?: &b
->c
->cl
);
464 clear_bit(BTREE_NODE_dirty
, &b
->flags
);
465 change_bit(BTREE_NODE_write_idx
, &b
->flags
);
467 do_btree_node_write(b
);
469 atomic_long_add(set_blocks(i
, block_bytes(b
->c
)) * b
->c
->sb
.block_size
,
470 &PTR_CACHE(b
->c
, &b
->key
, 0)->btree_sectors_written
);
472 b
->written
+= set_blocks(i
, block_bytes(b
->c
));
475 void bch_btree_node_write(struct btree
*b
, struct closure
*parent
)
477 unsigned nsets
= b
->keys
.nsets
;
479 lockdep_assert_held(&b
->lock
);
481 __bch_btree_node_write(b
, parent
);
484 * do verify if there was more than one set initially (i.e. we did a
485 * sort) and we sorted down to a single set:
487 if (nsets
&& !b
->keys
.nsets
)
490 bch_btree_init_next(b
);
493 static void bch_btree_node_write_sync(struct btree
*b
)
497 closure_init_stack(&cl
);
499 mutex_lock(&b
->write_lock
);
500 bch_btree_node_write(b
, &cl
);
501 mutex_unlock(&b
->write_lock
);
506 static void btree_node_write_work(struct work_struct
*w
)
508 struct btree
*b
= container_of(to_delayed_work(w
), struct btree
, work
);
510 mutex_lock(&b
->write_lock
);
511 if (btree_node_dirty(b
))
512 __bch_btree_node_write(b
, NULL
);
513 mutex_unlock(&b
->write_lock
);
516 static void bch_btree_leaf_dirty(struct btree
*b
, atomic_t
*journal_ref
)
518 struct bset
*i
= btree_bset_last(b
);
519 struct btree_write
*w
= btree_current_write(b
);
521 lockdep_assert_held(&b
->write_lock
);
526 if (!btree_node_dirty(b
))
527 schedule_delayed_work(&b
->work
, 30 * HZ
);
529 set_btree_node_dirty(b
);
533 journal_pin_cmp(b
->c
, w
->journal
, journal_ref
)) {
534 atomic_dec_bug(w
->journal
);
539 w
->journal
= journal_ref
;
540 atomic_inc(w
->journal
);
544 /* Force write if set is too big */
545 if (set_bytes(i
) > PAGE_SIZE
- 48 &&
547 bch_btree_node_write(b
, NULL
);
551 * Btree in memory cache - allocation/freeing
552 * mca -> memory cache
555 #define mca_reserve(c) (((c->root && c->root->level) \
556 ? c->root->level : 1) * 8 + 16)
557 #define mca_can_free(c) \
558 max_t(int, 0, c->btree_cache_used - mca_reserve(c))
560 static void mca_data_free(struct btree
*b
)
562 BUG_ON(b
->io_mutex
.count
!= 1);
564 bch_btree_keys_free(&b
->keys
);
566 b
->c
->btree_cache_used
--;
567 list_move(&b
->list
, &b
->c
->btree_cache_freed
);
570 static void mca_bucket_free(struct btree
*b
)
572 BUG_ON(btree_node_dirty(b
));
575 hlist_del_init_rcu(&b
->hash
);
576 list_move(&b
->list
, &b
->c
->btree_cache_freeable
);
579 static unsigned btree_order(struct bkey
*k
)
581 return ilog2(KEY_SIZE(k
) / PAGE_SECTORS
?: 1);
584 static void mca_data_alloc(struct btree
*b
, struct bkey
*k
, gfp_t gfp
)
586 if (!bch_btree_keys_alloc(&b
->keys
,
588 ilog2(b
->c
->btree_pages
),
591 b
->c
->btree_cache_used
++;
592 list_move(&b
->list
, &b
->c
->btree_cache
);
594 list_move(&b
->list
, &b
->c
->btree_cache_freed
);
598 static struct btree
*mca_bucket_alloc(struct cache_set
*c
,
599 struct bkey
*k
, gfp_t gfp
)
601 struct btree
*b
= kzalloc(sizeof(struct btree
), gfp
);
605 init_rwsem(&b
->lock
);
606 lockdep_set_novalidate_class(&b
->lock
);
607 mutex_init(&b
->write_lock
);
608 lockdep_set_novalidate_class(&b
->write_lock
);
609 INIT_LIST_HEAD(&b
->list
);
610 INIT_DELAYED_WORK(&b
->work
, btree_node_write_work
);
612 sema_init(&b
->io_mutex
, 1);
614 mca_data_alloc(b
, k
, gfp
);
618 static int mca_reap(struct btree
*b
, unsigned min_order
, bool flush
)
622 closure_init_stack(&cl
);
623 lockdep_assert_held(&b
->c
->bucket_lock
);
625 if (!down_write_trylock(&b
->lock
))
628 BUG_ON(btree_node_dirty(b
) && !b
->keys
.set
[0].data
);
630 if (b
->keys
.page_order
< min_order
)
634 if (btree_node_dirty(b
))
637 if (down_trylock(&b
->io_mutex
))
642 mutex_lock(&b
->write_lock
);
643 if (btree_node_dirty(b
))
644 __bch_btree_node_write(b
, &cl
);
645 mutex_unlock(&b
->write_lock
);
649 /* wait for any in flight btree write */
659 static unsigned long bch_mca_scan(struct shrinker
*shrink
,
660 struct shrink_control
*sc
)
662 struct cache_set
*c
= container_of(shrink
, struct cache_set
, shrink
);
664 unsigned long i
, nr
= sc
->nr_to_scan
;
665 unsigned long freed
= 0;
667 if (c
->shrinker_disabled
)
670 if (c
->btree_cache_alloc_lock
)
673 /* Return -1 if we can't do anything right now */
674 if (sc
->gfp_mask
& __GFP_IO
)
675 mutex_lock(&c
->bucket_lock
);
676 else if (!mutex_trylock(&c
->bucket_lock
))
680 * It's _really_ critical that we don't free too many btree nodes - we
681 * have to always leave ourselves a reserve. The reserve is how we
682 * guarantee that allocating memory for a new btree node can always
683 * succeed, so that inserting keys into the btree can always succeed and
684 * IO can always make forward progress:
686 nr
/= c
->btree_pages
;
687 nr
= min_t(unsigned long, nr
, mca_can_free(c
));
690 list_for_each_entry_safe(b
, t
, &c
->btree_cache_freeable
, list
) {
695 !mca_reap(b
, 0, false)) {
702 for (i
= 0; (nr
--) && i
< c
->btree_cache_used
; i
++) {
703 if (list_empty(&c
->btree_cache
))
706 b
= list_first_entry(&c
->btree_cache
, struct btree
, list
);
707 list_rotate_left(&c
->btree_cache
);
710 !mca_reap(b
, 0, false)) {
719 mutex_unlock(&c
->bucket_lock
);
723 static unsigned long bch_mca_count(struct shrinker
*shrink
,
724 struct shrink_control
*sc
)
726 struct cache_set
*c
= container_of(shrink
, struct cache_set
, shrink
);
728 if (c
->shrinker_disabled
)
731 if (c
->btree_cache_alloc_lock
)
734 return mca_can_free(c
) * c
->btree_pages
;
737 void bch_btree_cache_free(struct cache_set
*c
)
741 closure_init_stack(&cl
);
743 if (c
->shrink
.list
.next
)
744 unregister_shrinker(&c
->shrink
);
746 mutex_lock(&c
->bucket_lock
);
748 #ifdef CONFIG_BCACHE_DEBUG
750 list_move(&c
->verify_data
->list
, &c
->btree_cache
);
752 free_pages((unsigned long) c
->verify_ondisk
, ilog2(bucket_pages(c
)));
755 list_splice(&c
->btree_cache_freeable
,
758 while (!list_empty(&c
->btree_cache
)) {
759 b
= list_first_entry(&c
->btree_cache
, struct btree
, list
);
761 if (btree_node_dirty(b
))
762 btree_complete_write(b
, btree_current_write(b
));
763 clear_bit(BTREE_NODE_dirty
, &b
->flags
);
768 while (!list_empty(&c
->btree_cache_freed
)) {
769 b
= list_first_entry(&c
->btree_cache_freed
,
772 cancel_delayed_work_sync(&b
->work
);
776 mutex_unlock(&c
->bucket_lock
);
779 int bch_btree_cache_alloc(struct cache_set
*c
)
783 for (i
= 0; i
< mca_reserve(c
); i
++)
784 if (!mca_bucket_alloc(c
, &ZERO_KEY
, GFP_KERNEL
))
787 list_splice_init(&c
->btree_cache
,
788 &c
->btree_cache_freeable
);
790 #ifdef CONFIG_BCACHE_DEBUG
791 mutex_init(&c
->verify_lock
);
793 c
->verify_ondisk
= (void *)
794 __get_free_pages(GFP_KERNEL
, ilog2(bucket_pages(c
)));
796 c
->verify_data
= mca_bucket_alloc(c
, &ZERO_KEY
, GFP_KERNEL
);
798 if (c
->verify_data
&&
799 c
->verify_data
->keys
.set
->data
)
800 list_del_init(&c
->verify_data
->list
);
802 c
->verify_data
= NULL
;
805 c
->shrink
.count_objects
= bch_mca_count
;
806 c
->shrink
.scan_objects
= bch_mca_scan
;
808 c
->shrink
.batch
= c
->btree_pages
* 2;
809 register_shrinker(&c
->shrink
);
814 /* Btree in memory cache - hash table */
816 static struct hlist_head
*mca_hash(struct cache_set
*c
, struct bkey
*k
)
818 return &c
->bucket_hash
[hash_32(PTR_HASH(c
, k
), BUCKET_HASH_BITS
)];
821 static struct btree
*mca_find(struct cache_set
*c
, struct bkey
*k
)
826 hlist_for_each_entry_rcu(b
, mca_hash(c
, k
), hash
)
827 if (PTR_HASH(c
, &b
->key
) == PTR_HASH(c
, k
))
835 static int mca_cannibalize_lock(struct cache_set
*c
, struct btree_op
*op
)
837 struct task_struct
*old
;
839 old
= cmpxchg(&c
->btree_cache_alloc_lock
, NULL
, current
);
840 if (old
&& old
!= current
) {
842 prepare_to_wait(&c
->btree_cache_wait
, &op
->wait
,
843 TASK_UNINTERRUPTIBLE
);
850 static struct btree
*mca_cannibalize(struct cache_set
*c
, struct btree_op
*op
,
855 trace_bcache_btree_cache_cannibalize(c
);
857 if (mca_cannibalize_lock(c
, op
))
858 return ERR_PTR(-EINTR
);
860 list_for_each_entry_reverse(b
, &c
->btree_cache
, list
)
861 if (!mca_reap(b
, btree_order(k
), false))
864 list_for_each_entry_reverse(b
, &c
->btree_cache
, list
)
865 if (!mca_reap(b
, btree_order(k
), true))
868 WARN(1, "btree cache cannibalize failed\n");
869 return ERR_PTR(-ENOMEM
);
873 * We can only have one thread cannibalizing other cached btree nodes at a time,
874 * or we'll deadlock. We use an open coded mutex to ensure that, which a
875 * cannibalize_bucket() will take. This means every time we unlock the root of
876 * the btree, we need to release this lock if we have it held.
878 static void bch_cannibalize_unlock(struct cache_set
*c
)
880 if (c
->btree_cache_alloc_lock
== current
) {
881 c
->btree_cache_alloc_lock
= NULL
;
882 wake_up(&c
->btree_cache_wait
);
886 static struct btree
*mca_alloc(struct cache_set
*c
, struct btree_op
*op
,
887 struct bkey
*k
, int level
)
891 BUG_ON(current
->bio_list
);
893 lockdep_assert_held(&c
->bucket_lock
);
898 /* btree_free() doesn't free memory; it sticks the node on the end of
899 * the list. Check if there's any freed nodes there:
901 list_for_each_entry(b
, &c
->btree_cache_freeable
, list
)
902 if (!mca_reap(b
, btree_order(k
), false))
905 /* We never free struct btree itself, just the memory that holds the on
906 * disk node. Check the freed list before allocating a new one:
908 list_for_each_entry(b
, &c
->btree_cache_freed
, list
)
909 if (!mca_reap(b
, 0, false)) {
910 mca_data_alloc(b
, k
, __GFP_NOWARN
|GFP_NOIO
);
911 if (!b
->keys
.set
[0].data
)
917 b
= mca_bucket_alloc(c
, k
, __GFP_NOWARN
|GFP_NOIO
);
921 BUG_ON(!down_write_trylock(&b
->lock
));
922 if (!b
->keys
.set
->data
)
925 BUG_ON(b
->io_mutex
.count
!= 1);
927 bkey_copy(&b
->key
, k
);
928 list_move(&b
->list
, &c
->btree_cache
);
929 hlist_del_init_rcu(&b
->hash
);
930 hlist_add_head_rcu(&b
->hash
, mca_hash(c
, k
));
932 lock_set_subclass(&b
->lock
.dep_map
, level
+ 1, _THIS_IP_
);
933 b
->parent
= (void *) ~0UL;
939 bch_btree_keys_init(&b
->keys
, &bch_extent_keys_ops
,
940 &b
->c
->expensive_debug_checks
);
942 bch_btree_keys_init(&b
->keys
, &bch_btree_keys_ops
,
943 &b
->c
->expensive_debug_checks
);
950 b
= mca_cannibalize(c
, op
, k
);
958 * bch_btree_node_get - find a btree node in the cache and lock it, reading it
959 * in from disk if necessary.
961 * If IO is necessary and running under generic_make_request, returns -EAGAIN.
963 * The btree node will have either a read or a write lock held, depending on
964 * level and op->lock.
966 struct btree
*bch_btree_node_get(struct cache_set
*c
, struct btree_op
*op
,
967 struct bkey
*k
, int level
, bool write
,
968 struct btree
*parent
)
978 if (current
->bio_list
)
979 return ERR_PTR(-EAGAIN
);
981 mutex_lock(&c
->bucket_lock
);
982 b
= mca_alloc(c
, op
, k
, level
);
983 mutex_unlock(&c
->bucket_lock
);
990 bch_btree_node_read(b
);
993 downgrade_write(&b
->lock
);
995 rw_lock(write
, b
, level
);
996 if (PTR_HASH(c
, &b
->key
) != PTR_HASH(c
, k
)) {
1000 BUG_ON(b
->level
!= level
);
1006 for (; i
<= b
->keys
.nsets
&& b
->keys
.set
[i
].size
; i
++) {
1007 prefetch(b
->keys
.set
[i
].tree
);
1008 prefetch(b
->keys
.set
[i
].data
);
1011 for (; i
<= b
->keys
.nsets
; i
++)
1012 prefetch(b
->keys
.set
[i
].data
);
1014 if (btree_node_io_error(b
)) {
1015 rw_unlock(write
, b
);
1016 return ERR_PTR(-EIO
);
1019 BUG_ON(!b
->written
);
1024 static void btree_node_prefetch(struct btree
*parent
, struct bkey
*k
)
1028 mutex_lock(&parent
->c
->bucket_lock
);
1029 b
= mca_alloc(parent
->c
, NULL
, k
, parent
->level
- 1);
1030 mutex_unlock(&parent
->c
->bucket_lock
);
1032 if (!IS_ERR_OR_NULL(b
)) {
1034 bch_btree_node_read(b
);
1041 static void btree_node_free(struct btree
*b
)
1043 trace_bcache_btree_node_free(b
);
1045 BUG_ON(b
== b
->c
->root
);
1047 mutex_lock(&b
->write_lock
);
1049 if (btree_node_dirty(b
))
1050 btree_complete_write(b
, btree_current_write(b
));
1051 clear_bit(BTREE_NODE_dirty
, &b
->flags
);
1053 mutex_unlock(&b
->write_lock
);
1055 cancel_delayed_work(&b
->work
);
1057 mutex_lock(&b
->c
->bucket_lock
);
1058 bch_bucket_free(b
->c
, &b
->key
);
1060 mutex_unlock(&b
->c
->bucket_lock
);
1063 struct btree
*__bch_btree_node_alloc(struct cache_set
*c
, struct btree_op
*op
,
1064 int level
, bool wait
,
1065 struct btree
*parent
)
1068 struct btree
*b
= ERR_PTR(-EAGAIN
);
1070 mutex_lock(&c
->bucket_lock
);
1072 if (__bch_bucket_alloc_set(c
, RESERVE_BTREE
, &k
.key
, 1, wait
))
1075 bkey_put(c
, &k
.key
);
1076 SET_KEY_SIZE(&k
.key
, c
->btree_pages
* PAGE_SECTORS
);
1078 b
= mca_alloc(c
, op
, &k
.key
, level
);
1084 "Tried to allocate bucket that was in btree cache");
1090 bch_bset_init_next(&b
->keys
, b
->keys
.set
->data
, bset_magic(&b
->c
->sb
));
1092 mutex_unlock(&c
->bucket_lock
);
1094 trace_bcache_btree_node_alloc(b
);
1097 bch_bucket_free(c
, &k
.key
);
1099 mutex_unlock(&c
->bucket_lock
);
1101 trace_bcache_btree_node_alloc_fail(c
);
1105 static struct btree
*bch_btree_node_alloc(struct cache_set
*c
,
1106 struct btree_op
*op
, int level
,
1107 struct btree
*parent
)
1109 return __bch_btree_node_alloc(c
, op
, level
, op
!= NULL
, parent
);
1112 static struct btree
*btree_node_alloc_replacement(struct btree
*b
,
1113 struct btree_op
*op
)
1115 struct btree
*n
= bch_btree_node_alloc(b
->c
, op
, b
->level
, b
->parent
);
1116 if (!IS_ERR_OR_NULL(n
)) {
1117 mutex_lock(&n
->write_lock
);
1118 bch_btree_sort_into(&b
->keys
, &n
->keys
, &b
->c
->sort
);
1119 bkey_copy_key(&n
->key
, &b
->key
);
1120 mutex_unlock(&n
->write_lock
);
1126 static void make_btree_freeing_key(struct btree
*b
, struct bkey
*k
)
1130 mutex_lock(&b
->c
->bucket_lock
);
1132 atomic_inc(&b
->c
->prio_blocked
);
1134 bkey_copy(k
, &b
->key
);
1135 bkey_copy_key(k
, &ZERO_KEY
);
1137 for (i
= 0; i
< KEY_PTRS(k
); i
++)
1139 bch_inc_gen(PTR_CACHE(b
->c
, &b
->key
, i
),
1140 PTR_BUCKET(b
->c
, &b
->key
, i
)));
1142 mutex_unlock(&b
->c
->bucket_lock
);
1145 static int btree_check_reserve(struct btree
*b
, struct btree_op
*op
)
1147 struct cache_set
*c
= b
->c
;
1149 unsigned i
, reserve
= (c
->root
->level
- b
->level
) * 2 + 1;
1151 mutex_lock(&c
->bucket_lock
);
1153 for_each_cache(ca
, c
, i
)
1154 if (fifo_used(&ca
->free
[RESERVE_BTREE
]) < reserve
) {
1156 prepare_to_wait(&c
->btree_cache_wait
, &op
->wait
,
1157 TASK_UNINTERRUPTIBLE
);
1158 mutex_unlock(&c
->bucket_lock
);
1162 mutex_unlock(&c
->bucket_lock
);
1164 return mca_cannibalize_lock(b
->c
, op
);
1167 /* Garbage collection */
1169 static uint8_t __bch_btree_mark_key(struct cache_set
*c
, int level
,
1177 * ptr_invalid() can't return true for the keys that mark btree nodes as
1178 * freed, but since ptr_bad() returns true we'll never actually use them
1179 * for anything and thus we don't want mark their pointers here
1181 if (!bkey_cmp(k
, &ZERO_KEY
))
1184 for (i
= 0; i
< KEY_PTRS(k
); i
++) {
1185 if (!ptr_available(c
, k
, i
))
1188 g
= PTR_BUCKET(c
, k
, i
);
1190 if (gen_after(g
->last_gc
, PTR_GEN(k
, i
)))
1191 g
->last_gc
= PTR_GEN(k
, i
);
1193 if (ptr_stale(c
, k
, i
)) {
1194 stale
= max(stale
, ptr_stale(c
, k
, i
));
1198 cache_bug_on(GC_MARK(g
) &&
1199 (GC_MARK(g
) == GC_MARK_METADATA
) != (level
!= 0),
1200 c
, "inconsistent ptrs: mark = %llu, level = %i",
1204 SET_GC_MARK(g
, GC_MARK_METADATA
);
1205 else if (KEY_DIRTY(k
))
1206 SET_GC_MARK(g
, GC_MARK_DIRTY
);
1207 else if (!GC_MARK(g
))
1208 SET_GC_MARK(g
, GC_MARK_RECLAIMABLE
);
1210 /* guard against overflow */
1211 SET_GC_SECTORS_USED(g
, min_t(unsigned,
1212 GC_SECTORS_USED(g
) + KEY_SIZE(k
),
1213 MAX_GC_SECTORS_USED
));
1215 BUG_ON(!GC_SECTORS_USED(g
));
1221 #define btree_mark_key(b, k) __bch_btree_mark_key(b->c, b->level, k)
1223 void bch_initial_mark_key(struct cache_set
*c
, int level
, struct bkey
*k
)
1227 for (i
= 0; i
< KEY_PTRS(k
); i
++)
1228 if (ptr_available(c
, k
, i
) &&
1229 !ptr_stale(c
, k
, i
)) {
1230 struct bucket
*b
= PTR_BUCKET(c
, k
, i
);
1232 b
->gen
= PTR_GEN(k
, i
);
1234 if (level
&& bkey_cmp(k
, &ZERO_KEY
))
1235 b
->prio
= BTREE_PRIO
;
1236 else if (!level
&& b
->prio
== BTREE_PRIO
)
1237 b
->prio
= INITIAL_PRIO
;
1240 __bch_btree_mark_key(c
, level
, k
);
1243 static bool btree_gc_mark_node(struct btree
*b
, struct gc_stat
*gc
)
1246 unsigned keys
= 0, good_keys
= 0;
1248 struct btree_iter iter
;
1249 struct bset_tree
*t
;
1253 for_each_key_filter(&b
->keys
, k
, &iter
, bch_ptr_invalid
) {
1254 stale
= max(stale
, btree_mark_key(b
, k
));
1257 if (bch_ptr_bad(&b
->keys
, k
))
1260 gc
->key_bytes
+= bkey_u64s(k
);
1264 gc
->data
+= KEY_SIZE(k
);
1267 for (t
= b
->keys
.set
; t
<= &b
->keys
.set
[b
->keys
.nsets
]; t
++)
1268 btree_bug_on(t
->size
&&
1269 bset_written(&b
->keys
, t
) &&
1270 bkey_cmp(&b
->key
, &t
->end
) < 0,
1271 b
, "found short btree key in gc");
1273 if (b
->c
->gc_always_rewrite
)
1279 if ((keys
- good_keys
) * 2 > keys
)
1285 #define GC_MERGE_NODES 4U
1287 struct gc_merge_info
{
1292 static int bch_btree_insert_node(struct btree
*, struct btree_op
*,
1293 struct keylist
*, atomic_t
*, struct bkey
*);
1295 static int btree_gc_coalesce(struct btree
*b
, struct btree_op
*op
,
1296 struct gc_stat
*gc
, struct gc_merge_info
*r
)
1298 unsigned i
, nodes
= 0, keys
= 0, blocks
;
1299 struct btree
*new_nodes
[GC_MERGE_NODES
];
1300 struct keylist keylist
;
1304 bch_keylist_init(&keylist
);
1306 if (btree_check_reserve(b
, NULL
))
1309 memset(new_nodes
, 0, sizeof(new_nodes
));
1310 closure_init_stack(&cl
);
1312 while (nodes
< GC_MERGE_NODES
&& !IS_ERR_OR_NULL(r
[nodes
].b
))
1313 keys
+= r
[nodes
++].keys
;
1315 blocks
= btree_default_blocks(b
->c
) * 2 / 3;
1318 __set_blocks(b
->keys
.set
[0].data
, keys
,
1319 block_bytes(b
->c
)) > blocks
* (nodes
- 1))
1322 for (i
= 0; i
< nodes
; i
++) {
1323 new_nodes
[i
] = btree_node_alloc_replacement(r
[i
].b
, NULL
);
1324 if (IS_ERR_OR_NULL(new_nodes
[i
]))
1325 goto out_nocoalesce
;
1329 * We have to check the reserve here, after we've allocated our new
1330 * nodes, to make sure the insert below will succeed - we also check
1331 * before as an optimization to potentially avoid a bunch of expensive
1334 if (btree_check_reserve(b
, NULL
))
1335 goto out_nocoalesce
;
1337 for (i
= 0; i
< nodes
; i
++)
1338 mutex_lock(&new_nodes
[i
]->write_lock
);
1340 for (i
= nodes
- 1; i
> 0; --i
) {
1341 struct bset
*n1
= btree_bset_first(new_nodes
[i
]);
1342 struct bset
*n2
= btree_bset_first(new_nodes
[i
- 1]);
1343 struct bkey
*k
, *last
= NULL
;
1349 k
< bset_bkey_last(n2
);
1351 if (__set_blocks(n1
, n1
->keys
+ keys
+
1353 block_bytes(b
->c
)) > blocks
)
1357 keys
+= bkey_u64s(k
);
1361 * Last node we're not getting rid of - we're getting
1362 * rid of the node at r[0]. Have to try and fit all of
1363 * the remaining keys into this node; we can't ensure
1364 * they will always fit due to rounding and variable
1365 * length keys (shouldn't be possible in practice,
1368 if (__set_blocks(n1
, n1
->keys
+ n2
->keys
,
1369 block_bytes(b
->c
)) >
1370 btree_blocks(new_nodes
[i
]))
1371 goto out_nocoalesce
;
1374 /* Take the key of the node we're getting rid of */
1378 BUG_ON(__set_blocks(n1
, n1
->keys
+ keys
, block_bytes(b
->c
)) >
1379 btree_blocks(new_nodes
[i
]));
1382 bkey_copy_key(&new_nodes
[i
]->key
, last
);
1384 memcpy(bset_bkey_last(n1
),
1386 (void *) bset_bkey_idx(n2
, keys
) - (void *) n2
->start
);
1389 r
[i
].keys
= n1
->keys
;
1392 bset_bkey_idx(n2
, keys
),
1393 (void *) bset_bkey_last(n2
) -
1394 (void *) bset_bkey_idx(n2
, keys
));
1398 if (__bch_keylist_realloc(&keylist
,
1399 bkey_u64s(&new_nodes
[i
]->key
)))
1400 goto out_nocoalesce
;
1402 bch_btree_node_write(new_nodes
[i
], &cl
);
1403 bch_keylist_add(&keylist
, &new_nodes
[i
]->key
);
1406 for (i
= 0; i
< nodes
; i
++)
1407 mutex_unlock(&new_nodes
[i
]->write_lock
);
1411 /* We emptied out this node */
1412 BUG_ON(btree_bset_first(new_nodes
[0])->keys
);
1413 btree_node_free(new_nodes
[0]);
1414 rw_unlock(true, new_nodes
[0]);
1415 new_nodes
[0] = NULL
;
1417 for (i
= 0; i
< nodes
; i
++) {
1418 if (__bch_keylist_realloc(&keylist
, bkey_u64s(&r
[i
].b
->key
)))
1419 goto out_nocoalesce
;
1421 make_btree_freeing_key(r
[i
].b
, keylist
.top
);
1422 bch_keylist_push(&keylist
);
1425 bch_btree_insert_node(b
, op
, &keylist
, NULL
, NULL
);
1426 BUG_ON(!bch_keylist_empty(&keylist
));
1428 for (i
= 0; i
< nodes
; i
++) {
1429 btree_node_free(r
[i
].b
);
1430 rw_unlock(true, r
[i
].b
);
1432 r
[i
].b
= new_nodes
[i
];
1435 memmove(r
, r
+ 1, sizeof(r
[0]) * (nodes
- 1));
1436 r
[nodes
- 1].b
= ERR_PTR(-EINTR
);
1438 trace_bcache_btree_gc_coalesce(nodes
);
1441 bch_keylist_free(&keylist
);
1443 /* Invalidated our iterator */
1448 bch_keylist_free(&keylist
);
1450 while ((k
= bch_keylist_pop(&keylist
)))
1451 if (!bkey_cmp(k
, &ZERO_KEY
))
1452 atomic_dec(&b
->c
->prio_blocked
);
1454 for (i
= 0; i
< nodes
; i
++)
1455 if (!IS_ERR_OR_NULL(new_nodes
[i
])) {
1456 btree_node_free(new_nodes
[i
]);
1457 rw_unlock(true, new_nodes
[i
]);
1462 static int btree_gc_rewrite_node(struct btree
*b
, struct btree_op
*op
,
1463 struct btree
*replace
)
1465 struct keylist keys
;
1468 if (btree_check_reserve(b
, NULL
))
1471 n
= btree_node_alloc_replacement(replace
, NULL
);
1473 /* recheck reserve after allocating replacement node */
1474 if (btree_check_reserve(b
, NULL
)) {
1480 bch_btree_node_write_sync(n
);
1482 bch_keylist_init(&keys
);
1483 bch_keylist_add(&keys
, &n
->key
);
1485 make_btree_freeing_key(replace
, keys
.top
);
1486 bch_keylist_push(&keys
);
1488 bch_btree_insert_node(b
, op
, &keys
, NULL
, NULL
);
1489 BUG_ON(!bch_keylist_empty(&keys
));
1491 btree_node_free(replace
);
1494 /* Invalidated our iterator */
1498 static unsigned btree_gc_count_keys(struct btree
*b
)
1501 struct btree_iter iter
;
1504 for_each_key_filter(&b
->keys
, k
, &iter
, bch_ptr_bad
)
1505 ret
+= bkey_u64s(k
);
1510 static int btree_gc_recurse(struct btree
*b
, struct btree_op
*op
,
1511 struct closure
*writes
, struct gc_stat
*gc
)
1514 bool should_rewrite
;
1516 struct btree_iter iter
;
1517 struct gc_merge_info r
[GC_MERGE_NODES
];
1518 struct gc_merge_info
*i
, *last
= r
+ ARRAY_SIZE(r
) - 1;
1520 bch_btree_iter_init(&b
->keys
, &iter
, &b
->c
->gc_done
);
1522 for (i
= r
; i
< r
+ ARRAY_SIZE(r
); i
++)
1523 i
->b
= ERR_PTR(-EINTR
);
1526 k
= bch_btree_iter_next_filter(&iter
, &b
->keys
, bch_ptr_bad
);
1528 r
->b
= bch_btree_node_get(b
->c
, op
, k
, b
->level
- 1,
1531 ret
= PTR_ERR(r
->b
);
1535 r
->keys
= btree_gc_count_keys(r
->b
);
1537 ret
= btree_gc_coalesce(b
, op
, gc
, r
);
1545 if (!IS_ERR(last
->b
)) {
1546 should_rewrite
= btree_gc_mark_node(last
->b
, gc
);
1547 if (should_rewrite
) {
1548 ret
= btree_gc_rewrite_node(b
, op
, last
->b
);
1553 if (last
->b
->level
) {
1554 ret
= btree_gc_recurse(last
->b
, op
, writes
, gc
);
1559 bkey_copy_key(&b
->c
->gc_done
, &last
->b
->key
);
1562 * Must flush leaf nodes before gc ends, since replace
1563 * operations aren't journalled
1565 mutex_lock(&last
->b
->write_lock
);
1566 if (btree_node_dirty(last
->b
))
1567 bch_btree_node_write(last
->b
, writes
);
1568 mutex_unlock(&last
->b
->write_lock
);
1569 rw_unlock(true, last
->b
);
1572 memmove(r
+ 1, r
, sizeof(r
[0]) * (GC_MERGE_NODES
- 1));
1575 if (need_resched()) {
1581 for (i
= r
; i
< r
+ ARRAY_SIZE(r
); i
++)
1582 if (!IS_ERR_OR_NULL(i
->b
)) {
1583 mutex_lock(&i
->b
->write_lock
);
1584 if (btree_node_dirty(i
->b
))
1585 bch_btree_node_write(i
->b
, writes
);
1586 mutex_unlock(&i
->b
->write_lock
);
1587 rw_unlock(true, i
->b
);
1593 static int bch_btree_gc_root(struct btree
*b
, struct btree_op
*op
,
1594 struct closure
*writes
, struct gc_stat
*gc
)
1596 struct btree
*n
= NULL
;
1598 bool should_rewrite
;
1600 should_rewrite
= btree_gc_mark_node(b
, gc
);
1601 if (should_rewrite
) {
1602 n
= btree_node_alloc_replacement(b
, NULL
);
1604 if (!IS_ERR_OR_NULL(n
)) {
1605 bch_btree_node_write_sync(n
);
1607 bch_btree_set_root(n
);
1615 __bch_btree_mark_key(b
->c
, b
->level
+ 1, &b
->key
);
1618 ret
= btree_gc_recurse(b
, op
, writes
, gc
);
1623 bkey_copy_key(&b
->c
->gc_done
, &b
->key
);
1628 static void btree_gc_start(struct cache_set
*c
)
1634 if (!c
->gc_mark_valid
)
1637 mutex_lock(&c
->bucket_lock
);
1639 c
->gc_mark_valid
= 0;
1640 c
->gc_done
= ZERO_KEY
;
1642 for_each_cache(ca
, c
, i
)
1643 for_each_bucket(b
, ca
) {
1644 b
->last_gc
= b
->gen
;
1645 if (!atomic_read(&b
->pin
)) {
1647 SET_GC_SECTORS_USED(b
, 0);
1651 mutex_unlock(&c
->bucket_lock
);
1654 static size_t bch_btree_gc_finish(struct cache_set
*c
)
1656 size_t available
= 0;
1661 mutex_lock(&c
->bucket_lock
);
1664 c
->gc_mark_valid
= 1;
1667 for (i
= 0; i
< KEY_PTRS(&c
->uuid_bucket
); i
++)
1668 SET_GC_MARK(PTR_BUCKET(c
, &c
->uuid_bucket
, i
),
1671 /* don't reclaim buckets to which writeback keys point */
1673 for (i
= 0; i
< c
->nr_uuids
; i
++) {
1674 struct bcache_device
*d
= c
->devices
[i
];
1675 struct cached_dev
*dc
;
1676 struct keybuf_key
*w
, *n
;
1679 if (!d
|| UUID_FLASH_ONLY(&c
->uuids
[i
]))
1681 dc
= container_of(d
, struct cached_dev
, disk
);
1683 spin_lock(&dc
->writeback_keys
.lock
);
1684 rbtree_postorder_for_each_entry_safe(w
, n
,
1685 &dc
->writeback_keys
.keys
, node
)
1686 for (j
= 0; j
< KEY_PTRS(&w
->key
); j
++)
1687 SET_GC_MARK(PTR_BUCKET(c
, &w
->key
, j
),
1689 spin_unlock(&dc
->writeback_keys
.lock
);
1693 for_each_cache(ca
, c
, i
) {
1696 ca
->invalidate_needs_gc
= 0;
1698 for (i
= ca
->sb
.d
; i
< ca
->sb
.d
+ ca
->sb
.keys
; i
++)
1699 SET_GC_MARK(ca
->buckets
+ *i
, GC_MARK_METADATA
);
1701 for (i
= ca
->prio_buckets
;
1702 i
< ca
->prio_buckets
+ prio_buckets(ca
) * 2; i
++)
1703 SET_GC_MARK(ca
->buckets
+ *i
, GC_MARK_METADATA
);
1705 for_each_bucket(b
, ca
) {
1706 c
->need_gc
= max(c
->need_gc
, bucket_gc_gen(b
));
1708 if (atomic_read(&b
->pin
))
1711 BUG_ON(!GC_MARK(b
) && GC_SECTORS_USED(b
));
1713 if (!GC_MARK(b
) || GC_MARK(b
) == GC_MARK_RECLAIMABLE
)
1718 mutex_unlock(&c
->bucket_lock
);
1722 static void bch_btree_gc(struct cache_set
*c
)
1725 unsigned long available
;
1726 struct gc_stat stats
;
1727 struct closure writes
;
1729 uint64_t start_time
= local_clock();
1731 trace_bcache_gc_start(c
);
1733 memset(&stats
, 0, sizeof(struct gc_stat
));
1734 closure_init_stack(&writes
);
1735 bch_btree_op_init(&op
, SHRT_MAX
);
1740 ret
= btree_root(gc_root
, c
, &op
, &writes
, &stats
);
1741 closure_sync(&writes
);
1744 if (ret
&& ret
!= -EAGAIN
)
1745 pr_warn("gc failed!");
1748 available
= bch_btree_gc_finish(c
);
1749 wake_up_allocators(c
);
1751 bch_time_stats_update(&c
->btree_gc_time
, start_time
);
1753 stats
.key_bytes
*= sizeof(uint64_t);
1755 stats
.in_use
= (c
->nbuckets
- available
) * 100 / c
->nbuckets
;
1756 memcpy(&c
->gc_stats
, &stats
, sizeof(struct gc_stat
));
1758 trace_bcache_gc_end(c
);
1763 static bool gc_should_run(struct cache_set
*c
)
1768 for_each_cache(ca
, c
, i
)
1769 if (ca
->invalidate_needs_gc
)
1772 if (atomic_read(&c
->sectors_to_gc
) < 0)
1778 static int bch_gc_thread(void *arg
)
1780 struct cache_set
*c
= arg
;
1783 wait_event_interruptible(c
->gc_wait
,
1784 kthread_should_stop() || gc_should_run(c
));
1786 if (kthread_should_stop())
1796 int bch_gc_thread_start(struct cache_set
*c
)
1798 c
->gc_thread
= kthread_run(bch_gc_thread
, c
, "bcache_gc");
1799 if (IS_ERR(c
->gc_thread
))
1800 return PTR_ERR(c
->gc_thread
);
1805 /* Initial partial gc */
1807 static int bch_btree_check_recurse(struct btree
*b
, struct btree_op
*op
)
1810 struct bkey
*k
, *p
= NULL
;
1811 struct btree_iter iter
;
1813 for_each_key_filter(&b
->keys
, k
, &iter
, bch_ptr_invalid
)
1814 bch_initial_mark_key(b
->c
, b
->level
, k
);
1816 bch_initial_mark_key(b
->c
, b
->level
+ 1, &b
->key
);
1819 bch_btree_iter_init(&b
->keys
, &iter
, NULL
);
1822 k
= bch_btree_iter_next_filter(&iter
, &b
->keys
,
1825 btree_node_prefetch(b
, k
);
1828 ret
= btree(check_recurse
, p
, b
, op
);
1831 } while (p
&& !ret
);
1837 int bch_btree_check(struct cache_set
*c
)
1841 bch_btree_op_init(&op
, SHRT_MAX
);
1843 return btree_root(check_recurse
, c
, &op
);
1846 void bch_initial_gc_finish(struct cache_set
*c
)
1852 bch_btree_gc_finish(c
);
1854 mutex_lock(&c
->bucket_lock
);
1857 * We need to put some unused buckets directly on the prio freelist in
1858 * order to get the allocator thread started - it needs freed buckets in
1859 * order to rewrite the prios and gens, and it needs to rewrite prios
1860 * and gens in order to free buckets.
1862 * This is only safe for buckets that have no live data in them, which
1863 * there should always be some of.
1865 for_each_cache(ca
, c
, i
) {
1866 for_each_bucket(b
, ca
) {
1867 if (fifo_full(&ca
->free
[RESERVE_PRIO
]))
1870 if (bch_can_invalidate_bucket(ca
, b
) &&
1872 __bch_invalidate_one_bucket(ca
, b
);
1873 fifo_push(&ca
->free
[RESERVE_PRIO
],
1879 mutex_unlock(&c
->bucket_lock
);
1882 /* Btree insertion */
1884 static bool btree_insert_key(struct btree
*b
, struct bkey
*k
,
1885 struct bkey
*replace_key
)
1889 BUG_ON(bkey_cmp(k
, &b
->key
) > 0);
1891 status
= bch_btree_insert_key(&b
->keys
, k
, replace_key
);
1892 if (status
!= BTREE_INSERT_STATUS_NO_INSERT
) {
1893 bch_check_keys(&b
->keys
, "%u for %s", status
,
1894 replace_key
? "replace" : "insert");
1896 trace_bcache_btree_insert_key(b
, k
, replace_key
!= NULL
,
1903 static size_t insert_u64s_remaining(struct btree
*b
)
1905 long ret
= bch_btree_keys_u64s_remaining(&b
->keys
);
1908 * Might land in the middle of an existing extent and have to split it
1910 if (b
->keys
.ops
->is_extents
)
1911 ret
-= KEY_MAX_U64S
;
1913 return max(ret
, 0L);
1916 static bool bch_btree_insert_keys(struct btree
*b
, struct btree_op
*op
,
1917 struct keylist
*insert_keys
,
1918 struct bkey
*replace_key
)
1921 int oldsize
= bch_count_data(&b
->keys
);
1923 while (!bch_keylist_empty(insert_keys
)) {
1924 struct bkey
*k
= insert_keys
->keys
;
1926 if (bkey_u64s(k
) > insert_u64s_remaining(b
))
1929 if (bkey_cmp(k
, &b
->key
) <= 0) {
1933 ret
|= btree_insert_key(b
, k
, replace_key
);
1934 bch_keylist_pop_front(insert_keys
);
1935 } else if (bkey_cmp(&START_KEY(k
), &b
->key
) < 0) {
1936 BKEY_PADDED(key
) temp
;
1937 bkey_copy(&temp
.key
, insert_keys
->keys
);
1939 bch_cut_back(&b
->key
, &temp
.key
);
1940 bch_cut_front(&b
->key
, insert_keys
->keys
);
1942 ret
|= btree_insert_key(b
, &temp
.key
, replace_key
);
1950 op
->insert_collision
= true;
1952 BUG_ON(!bch_keylist_empty(insert_keys
) && b
->level
);
1954 BUG_ON(bch_count_data(&b
->keys
) < oldsize
);
1958 static int btree_split(struct btree
*b
, struct btree_op
*op
,
1959 struct keylist
*insert_keys
,
1960 struct bkey
*replace_key
)
1963 struct btree
*n1
, *n2
= NULL
, *n3
= NULL
;
1964 uint64_t start_time
= local_clock();
1966 struct keylist parent_keys
;
1968 closure_init_stack(&cl
);
1969 bch_keylist_init(&parent_keys
);
1971 if (btree_check_reserve(b
, op
)) {
1975 WARN(1, "insufficient reserve for split\n");
1978 n1
= btree_node_alloc_replacement(b
, op
);
1982 split
= set_blocks(btree_bset_first(n1
),
1983 block_bytes(n1
->c
)) > (btree_blocks(b
) * 4) / 5;
1988 trace_bcache_btree_node_split(b
, btree_bset_first(n1
)->keys
);
1990 n2
= bch_btree_node_alloc(b
->c
, op
, b
->level
, b
->parent
);
1995 n3
= bch_btree_node_alloc(b
->c
, op
, b
->level
+ 1, NULL
);
2000 mutex_lock(&n1
->write_lock
);
2001 mutex_lock(&n2
->write_lock
);
2003 bch_btree_insert_keys(n1
, op
, insert_keys
, replace_key
);
2006 * Has to be a linear search because we don't have an auxiliary
2010 while (keys
< (btree_bset_first(n1
)->keys
* 3) / 5)
2011 keys
+= bkey_u64s(bset_bkey_idx(btree_bset_first(n1
),
2014 bkey_copy_key(&n1
->key
,
2015 bset_bkey_idx(btree_bset_first(n1
), keys
));
2016 keys
+= bkey_u64s(bset_bkey_idx(btree_bset_first(n1
), keys
));
2018 btree_bset_first(n2
)->keys
= btree_bset_first(n1
)->keys
- keys
;
2019 btree_bset_first(n1
)->keys
= keys
;
2021 memcpy(btree_bset_first(n2
)->start
,
2022 bset_bkey_last(btree_bset_first(n1
)),
2023 btree_bset_first(n2
)->keys
* sizeof(uint64_t));
2025 bkey_copy_key(&n2
->key
, &b
->key
);
2027 bch_keylist_add(&parent_keys
, &n2
->key
);
2028 bch_btree_node_write(n2
, &cl
);
2029 mutex_unlock(&n2
->write_lock
);
2030 rw_unlock(true, n2
);
2032 trace_bcache_btree_node_compact(b
, btree_bset_first(n1
)->keys
);
2034 mutex_lock(&n1
->write_lock
);
2035 bch_btree_insert_keys(n1
, op
, insert_keys
, replace_key
);
2038 bch_keylist_add(&parent_keys
, &n1
->key
);
2039 bch_btree_node_write(n1
, &cl
);
2040 mutex_unlock(&n1
->write_lock
);
2043 /* Depth increases, make a new root */
2044 mutex_lock(&n3
->write_lock
);
2045 bkey_copy_key(&n3
->key
, &MAX_KEY
);
2046 bch_btree_insert_keys(n3
, op
, &parent_keys
, NULL
);
2047 bch_btree_node_write(n3
, &cl
);
2048 mutex_unlock(&n3
->write_lock
);
2051 bch_btree_set_root(n3
);
2052 rw_unlock(true, n3
);
2053 } else if (!b
->parent
) {
2054 /* Root filled up but didn't need to be split */
2056 bch_btree_set_root(n1
);
2058 /* Split a non root node */
2060 make_btree_freeing_key(b
, parent_keys
.top
);
2061 bch_keylist_push(&parent_keys
);
2063 bch_btree_insert_node(b
->parent
, op
, &parent_keys
, NULL
, NULL
);
2064 BUG_ON(!bch_keylist_empty(&parent_keys
));
2068 rw_unlock(true, n1
);
2070 bch_time_stats_update(&b
->c
->btree_split_time
, start_time
);
2074 bkey_put(b
->c
, &n2
->key
);
2075 btree_node_free(n2
);
2076 rw_unlock(true, n2
);
2078 bkey_put(b
->c
, &n1
->key
);
2079 btree_node_free(n1
);
2080 rw_unlock(true, n1
);
2082 WARN(1, "bcache: btree split failed (level %u)", b
->level
);
2084 if (n3
== ERR_PTR(-EAGAIN
) ||
2085 n2
== ERR_PTR(-EAGAIN
) ||
2086 n1
== ERR_PTR(-EAGAIN
))
2092 static int bch_btree_insert_node(struct btree
*b
, struct btree_op
*op
,
2093 struct keylist
*insert_keys
,
2094 atomic_t
*journal_ref
,
2095 struct bkey
*replace_key
)
2099 BUG_ON(b
->level
&& replace_key
);
2101 closure_init_stack(&cl
);
2103 mutex_lock(&b
->write_lock
);
2105 if (write_block(b
) != btree_bset_last(b
) &&
2106 b
->keys
.last_set_unwritten
)
2107 bch_btree_init_next(b
); /* just wrote a set */
2109 if (bch_keylist_nkeys(insert_keys
) > insert_u64s_remaining(b
)) {
2110 mutex_unlock(&b
->write_lock
);
2114 BUG_ON(write_block(b
) != btree_bset_last(b
));
2116 if (bch_btree_insert_keys(b
, op
, insert_keys
, replace_key
)) {
2118 bch_btree_leaf_dirty(b
, journal_ref
);
2120 bch_btree_node_write(b
, &cl
);
2123 mutex_unlock(&b
->write_lock
);
2125 /* wait for btree node write if necessary, after unlock */
2130 if (current
->bio_list
) {
2131 op
->lock
= b
->c
->root
->level
+ 1;
2133 } else if (op
->lock
<= b
->c
->root
->level
) {
2134 op
->lock
= b
->c
->root
->level
+ 1;
2137 /* Invalidated all iterators */
2138 int ret
= btree_split(b
, op
, insert_keys
, replace_key
);
2140 if (bch_keylist_empty(insert_keys
))
2148 int bch_btree_insert_check_key(struct btree
*b
, struct btree_op
*op
,
2149 struct bkey
*check_key
)
2152 uint64_t btree_ptr
= b
->key
.ptr
[0];
2153 unsigned long seq
= b
->seq
;
2154 struct keylist insert
;
2155 bool upgrade
= op
->lock
== -1;
2157 bch_keylist_init(&insert
);
2160 rw_unlock(false, b
);
2161 rw_lock(true, b
, b
->level
);
2163 if (b
->key
.ptr
[0] != btree_ptr
||
2164 b
->seq
!= seq
+ 1) {
2165 op
->lock
= b
->level
;
2170 SET_KEY_PTRS(check_key
, 1);
2171 get_random_bytes(&check_key
->ptr
[0], sizeof(uint64_t));
2173 SET_PTR_DEV(check_key
, 0, PTR_CHECK_DEV
);
2175 bch_keylist_add(&insert
, check_key
);
2177 ret
= bch_btree_insert_node(b
, op
, &insert
, NULL
, NULL
);
2179 BUG_ON(!ret
&& !bch_keylist_empty(&insert
));
2182 downgrade_write(&b
->lock
);
2186 struct btree_insert_op
{
2188 struct keylist
*keys
;
2189 atomic_t
*journal_ref
;
2190 struct bkey
*replace_key
;
2193 static int btree_insert_fn(struct btree_op
*b_op
, struct btree
*b
)
2195 struct btree_insert_op
*op
= container_of(b_op
,
2196 struct btree_insert_op
, op
);
2198 int ret
= bch_btree_insert_node(b
, &op
->op
, op
->keys
,
2199 op
->journal_ref
, op
->replace_key
);
2200 if (ret
&& !bch_keylist_empty(op
->keys
))
2206 int bch_btree_insert(struct cache_set
*c
, struct keylist
*keys
,
2207 atomic_t
*journal_ref
, struct bkey
*replace_key
)
2209 struct btree_insert_op op
;
2212 BUG_ON(current
->bio_list
);
2213 BUG_ON(bch_keylist_empty(keys
));
2215 bch_btree_op_init(&op
.op
, 0);
2217 op
.journal_ref
= journal_ref
;
2218 op
.replace_key
= replace_key
;
2220 while (!ret
&& !bch_keylist_empty(keys
)) {
2222 ret
= bch_btree_map_leaf_nodes(&op
.op
, c
,
2223 &START_KEY(keys
->keys
),
2230 pr_err("error %i", ret
);
2232 while ((k
= bch_keylist_pop(keys
)))
2234 } else if (op
.op
.insert_collision
)
2240 void bch_btree_set_root(struct btree
*b
)
2245 closure_init_stack(&cl
);
2247 trace_bcache_btree_set_root(b
);
2249 BUG_ON(!b
->written
);
2251 for (i
= 0; i
< KEY_PTRS(&b
->key
); i
++)
2252 BUG_ON(PTR_BUCKET(b
->c
, &b
->key
, i
)->prio
!= BTREE_PRIO
);
2254 mutex_lock(&b
->c
->bucket_lock
);
2255 list_del_init(&b
->list
);
2256 mutex_unlock(&b
->c
->bucket_lock
);
2260 bch_journal_meta(b
->c
, &cl
);
2264 /* Map across nodes or keys */
2266 static int bch_btree_map_nodes_recurse(struct btree
*b
, struct btree_op
*op
,
2268 btree_map_nodes_fn
*fn
, int flags
)
2270 int ret
= MAP_CONTINUE
;
2274 struct btree_iter iter
;
2276 bch_btree_iter_init(&b
->keys
, &iter
, from
);
2278 while ((k
= bch_btree_iter_next_filter(&iter
, &b
->keys
,
2280 ret
= btree(map_nodes_recurse
, k
, b
,
2281 op
, from
, fn
, flags
);
2284 if (ret
!= MAP_CONTINUE
)
2289 if (!b
->level
|| flags
== MAP_ALL_NODES
)
2295 int __bch_btree_map_nodes(struct btree_op
*op
, struct cache_set
*c
,
2296 struct bkey
*from
, btree_map_nodes_fn
*fn
, int flags
)
2298 return btree_root(map_nodes_recurse
, c
, op
, from
, fn
, flags
);
2301 static int bch_btree_map_keys_recurse(struct btree
*b
, struct btree_op
*op
,
2302 struct bkey
*from
, btree_map_keys_fn
*fn
,
2305 int ret
= MAP_CONTINUE
;
2307 struct btree_iter iter
;
2309 bch_btree_iter_init(&b
->keys
, &iter
, from
);
2311 while ((k
= bch_btree_iter_next_filter(&iter
, &b
->keys
, bch_ptr_bad
))) {
2314 : btree(map_keys_recurse
, k
, b
, op
, from
, fn
, flags
);
2317 if (ret
!= MAP_CONTINUE
)
2321 if (!b
->level
&& (flags
& MAP_END_KEY
))
2322 ret
= fn(op
, b
, &KEY(KEY_INODE(&b
->key
),
2323 KEY_OFFSET(&b
->key
), 0));
2328 int bch_btree_map_keys(struct btree_op
*op
, struct cache_set
*c
,
2329 struct bkey
*from
, btree_map_keys_fn
*fn
, int flags
)
2331 return btree_root(map_keys_recurse
, c
, op
, from
, fn
, flags
);
2336 static inline int keybuf_cmp(struct keybuf_key
*l
, struct keybuf_key
*r
)
2338 /* Overlapping keys compare equal */
2339 if (bkey_cmp(&l
->key
, &START_KEY(&r
->key
)) <= 0)
2341 if (bkey_cmp(&START_KEY(&l
->key
), &r
->key
) >= 0)
2346 static inline int keybuf_nonoverlapping_cmp(struct keybuf_key
*l
,
2347 struct keybuf_key
*r
)
2349 return clamp_t(int64_t, bkey_cmp(&l
->key
, &r
->key
), -1, 1);
2357 keybuf_pred_fn
*pred
;
2360 static int refill_keybuf_fn(struct btree_op
*op
, struct btree
*b
,
2363 struct refill
*refill
= container_of(op
, struct refill
, op
);
2364 struct keybuf
*buf
= refill
->buf
;
2365 int ret
= MAP_CONTINUE
;
2367 if (bkey_cmp(k
, refill
->end
) >= 0) {
2372 if (!KEY_SIZE(k
)) /* end key */
2375 if (refill
->pred(buf
, k
)) {
2376 struct keybuf_key
*w
;
2378 spin_lock(&buf
->lock
);
2380 w
= array_alloc(&buf
->freelist
);
2382 spin_unlock(&buf
->lock
);
2387 bkey_copy(&w
->key
, k
);
2389 if (RB_INSERT(&buf
->keys
, w
, node
, keybuf_cmp
))
2390 array_free(&buf
->freelist
, w
);
2394 if (array_freelist_empty(&buf
->freelist
))
2397 spin_unlock(&buf
->lock
);
2400 buf
->last_scanned
= *k
;
2404 void bch_refill_keybuf(struct cache_set
*c
, struct keybuf
*buf
,
2405 struct bkey
*end
, keybuf_pred_fn
*pred
)
2407 struct bkey start
= buf
->last_scanned
;
2408 struct refill refill
;
2412 bch_btree_op_init(&refill
.op
, -1);
2413 refill
.nr_found
= 0;
2418 bch_btree_map_keys(&refill
.op
, c
, &buf
->last_scanned
,
2419 refill_keybuf_fn
, MAP_END_KEY
);
2421 trace_bcache_keyscan(refill
.nr_found
,
2422 KEY_INODE(&start
), KEY_OFFSET(&start
),
2423 KEY_INODE(&buf
->last_scanned
),
2424 KEY_OFFSET(&buf
->last_scanned
));
2426 spin_lock(&buf
->lock
);
2428 if (!RB_EMPTY_ROOT(&buf
->keys
)) {
2429 struct keybuf_key
*w
;
2430 w
= RB_FIRST(&buf
->keys
, struct keybuf_key
, node
);
2431 buf
->start
= START_KEY(&w
->key
);
2433 w
= RB_LAST(&buf
->keys
, struct keybuf_key
, node
);
2436 buf
->start
= MAX_KEY
;
2440 spin_unlock(&buf
->lock
);
2443 static void __bch_keybuf_del(struct keybuf
*buf
, struct keybuf_key
*w
)
2445 rb_erase(&w
->node
, &buf
->keys
);
2446 array_free(&buf
->freelist
, w
);
2449 void bch_keybuf_del(struct keybuf
*buf
, struct keybuf_key
*w
)
2451 spin_lock(&buf
->lock
);
2452 __bch_keybuf_del(buf
, w
);
2453 spin_unlock(&buf
->lock
);
2456 bool bch_keybuf_check_overlapping(struct keybuf
*buf
, struct bkey
*start
,
2460 struct keybuf_key
*p
, *w
, s
;
2463 if (bkey_cmp(end
, &buf
->start
) <= 0 ||
2464 bkey_cmp(start
, &buf
->end
) >= 0)
2467 spin_lock(&buf
->lock
);
2468 w
= RB_GREATER(&buf
->keys
, s
, node
, keybuf_nonoverlapping_cmp
);
2470 while (w
&& bkey_cmp(&START_KEY(&w
->key
), end
) < 0) {
2472 w
= RB_NEXT(w
, node
);
2477 __bch_keybuf_del(buf
, p
);
2480 spin_unlock(&buf
->lock
);
2484 struct keybuf_key
*bch_keybuf_next(struct keybuf
*buf
)
2486 struct keybuf_key
*w
;
2487 spin_lock(&buf
->lock
);
2489 w
= RB_FIRST(&buf
->keys
, struct keybuf_key
, node
);
2491 while (w
&& w
->private)
2492 w
= RB_NEXT(w
, node
);
2495 w
->private = ERR_PTR(-EINTR
);
2497 spin_unlock(&buf
->lock
);
2501 struct keybuf_key
*bch_keybuf_next_rescan(struct cache_set
*c
,
2504 keybuf_pred_fn
*pred
)
2506 struct keybuf_key
*ret
;
2509 ret
= bch_keybuf_next(buf
);
2513 if (bkey_cmp(&buf
->last_scanned
, end
) >= 0) {
2514 pr_debug("scan finished");
2518 bch_refill_keybuf(c
, buf
, end
, pred
);
2524 void bch_keybuf_init(struct keybuf
*buf
)
2526 buf
->last_scanned
= MAX_KEY
;
2527 buf
->keys
= RB_ROOT
;
2529 spin_lock_init(&buf
->lock
);
2530 array_allocator_init(&buf
->freelist
);