1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (C) 2010 Kent Overstreet <kent.overstreet@gmail.com>
5 * Uses a block device as cache for other block devices; optimized for SSDs.
6 * All allocation is done in buckets, which should match the erase block size
9 * Buckets containing cached data are kept on a heap sorted by priority;
10 * bucket priority is increased on cache hit, and periodically all the buckets
11 * on the heap have their priority scaled down. This currently is just used as
12 * an LRU but in the future should allow for more intelligent heuristics.
14 * Buckets have an 8 bit counter; freeing is accomplished by incrementing the
15 * counter. Garbage collection is used to remove stale pointers.
17 * Indexing is done via a btree; nodes are not necessarily fully sorted, rather
18 * as keys are inserted we only sort the pages that have not yet been written.
19 * When garbage collection is run, we resort the entire node.
21 * All configuration is done via sysfs; see Documentation/admin-guide/bcache.rst.
28 #include "writeback.h"
30 static void sort_key_next(struct btree_iter
*iter
,
31 struct btree_iter_set
*i
)
33 i
->k
= bkey_next(i
->k
);
36 *i
= iter
->heap
.data
[--iter
->heap
.nr
];
39 static bool new_bch_key_sort_cmp(const void *l
, const void *r
, void *args
)
41 struct btree_iter_set
*_l
= (struct btree_iter_set
*)l
;
42 struct btree_iter_set
*_r
= (struct btree_iter_set
*)r
;
43 int64_t c
= bkey_cmp(_l
->k
, _r
->k
);
45 return !(c
? c
> 0 : _l
->k
< _r
->k
);
48 static bool __ptr_invalid(struct cache_set
*c
, const struct bkey
*k
)
52 for (i
= 0; i
< KEY_PTRS(k
); i
++)
53 if (ptr_available(c
, k
, i
)) {
54 struct cache
*ca
= c
->cache
;
55 size_t bucket
= PTR_BUCKET_NR(c
, k
, i
);
56 size_t r
= bucket_remainder(c
, PTR_OFFSET(k
, i
));
58 if (KEY_SIZE(k
) + r
> c
->cache
->sb
.bucket_size
||
59 bucket
< ca
->sb
.first_bucket
||
60 bucket
>= ca
->sb
.nbuckets
)
67 /* Common among btree and extent ptrs */
69 static const char *bch_ptr_status(struct cache_set
*c
, const struct bkey
*k
)
73 for (i
= 0; i
< KEY_PTRS(k
); i
++)
74 if (ptr_available(c
, k
, i
)) {
75 struct cache
*ca
= c
->cache
;
76 size_t bucket
= PTR_BUCKET_NR(c
, k
, i
);
77 size_t r
= bucket_remainder(c
, PTR_OFFSET(k
, i
));
79 if (KEY_SIZE(k
) + r
> c
->cache
->sb
.bucket_size
)
80 return "bad, length too big";
81 if (bucket
< ca
->sb
.first_bucket
)
82 return "bad, short offset";
83 if (bucket
>= ca
->sb
.nbuckets
)
84 return "bad, offset past end of device";
85 if (ptr_stale(c
, k
, i
))
89 if (!bkey_cmp(k
, &ZERO_KEY
))
90 return "bad, null key";
92 return "bad, no pointers";
98 void bch_extent_to_text(char *buf
, size_t size
, const struct bkey
*k
)
101 char *out
= buf
, *end
= buf
+ size
;
103 #define p(...) (out += scnprintf(out, end - out, __VA_ARGS__))
105 p("%llu:%llu len %llu -> [", KEY_INODE(k
), KEY_START(k
), KEY_SIZE(k
));
107 for (i
= 0; i
< KEY_PTRS(k
); i
++) {
111 if (PTR_DEV(k
, i
) == PTR_CHECK_DEV
)
114 p("%llu:%llu gen %llu", PTR_DEV(k
, i
),
115 PTR_OFFSET(k
, i
), PTR_GEN(k
, i
));
123 p(" cs%llu %llx", KEY_CSUM(k
), k
->ptr
[1]);
127 static void bch_bkey_dump(struct btree_keys
*keys
, const struct bkey
*k
)
129 struct btree
*b
= container_of(keys
, struct btree
, keys
);
133 bch_extent_to_text(buf
, sizeof(buf
), k
);
136 for (j
= 0; j
< KEY_PTRS(k
); j
++) {
137 size_t n
= PTR_BUCKET_NR(b
->c
, k
, j
);
139 pr_cont(" bucket %zu", n
);
140 if (n
>= b
->c
->cache
->sb
.first_bucket
&& n
< b
->c
->cache
->sb
.nbuckets
)
142 PTR_BUCKET(b
->c
, k
, j
)->prio
);
145 pr_cont(" %s\n", bch_ptr_status(b
->c
, k
));
150 bool __bch_btree_ptr_invalid(struct cache_set
*c
, const struct bkey
*k
)
154 if (!KEY_PTRS(k
) || !KEY_SIZE(k
) || KEY_DIRTY(k
))
157 if (__ptr_invalid(c
, k
))
162 bch_extent_to_text(buf
, sizeof(buf
), k
);
163 cache_bug(c
, "spotted btree ptr %s: %s", buf
, bch_ptr_status(c
, k
));
167 static bool bch_btree_ptr_invalid(struct btree_keys
*bk
, const struct bkey
*k
)
169 struct btree
*b
= container_of(bk
, struct btree
, keys
);
171 return __bch_btree_ptr_invalid(b
->c
, k
);
174 static bool btree_ptr_bad_expensive(struct btree
*b
, const struct bkey
*k
)
180 if (mutex_trylock(&b
->c
->bucket_lock
)) {
181 for (i
= 0; i
< KEY_PTRS(k
); i
++)
182 if (ptr_available(b
->c
, k
, i
)) {
183 g
= PTR_BUCKET(b
->c
, k
, i
);
186 g
->prio
!= BTREE_PRIO
||
187 (b
->c
->gc_mark_valid
&&
188 GC_MARK(g
) != GC_MARK_METADATA
))
192 mutex_unlock(&b
->c
->bucket_lock
);
197 mutex_unlock(&b
->c
->bucket_lock
);
198 bch_extent_to_text(buf
, sizeof(buf
), k
);
200 "inconsistent btree pointer %s: bucket %zi pin %i prio %i gen %i last_gc %i mark %llu",
201 buf
, PTR_BUCKET_NR(b
->c
, k
, i
), atomic_read(&g
->pin
),
202 g
->prio
, g
->gen
, g
->last_gc
, GC_MARK(g
));
206 static bool bch_btree_ptr_bad(struct btree_keys
*bk
, const struct bkey
*k
)
208 struct btree
*b
= container_of(bk
, struct btree
, keys
);
211 if (!bkey_cmp(k
, &ZERO_KEY
) ||
213 bch_ptr_invalid(bk
, k
))
216 for (i
= 0; i
< KEY_PTRS(k
); i
++)
217 if (!ptr_available(b
->c
, k
, i
) ||
218 ptr_stale(b
->c
, k
, i
))
221 if (expensive_debug_checks(b
->c
) &&
222 btree_ptr_bad_expensive(b
, k
))
228 static bool bch_btree_ptr_insert_fixup(struct btree_keys
*bk
,
230 struct btree_iter
*iter
,
231 struct bkey
*replace_key
)
233 struct btree
*b
= container_of(bk
, struct btree
, keys
);
235 if (!KEY_OFFSET(insert
))
236 btree_current_write(b
)->prio_blocked
++;
241 const struct btree_keys_ops bch_btree_keys_ops
= {
242 .sort_cmp
= new_bch_key_sort_cmp
,
243 .insert_fixup
= bch_btree_ptr_insert_fixup
,
244 .key_invalid
= bch_btree_ptr_invalid
,
245 .key_bad
= bch_btree_ptr_bad
,
246 .key_to_text
= bch_extent_to_text
,
247 .key_dump
= bch_bkey_dump
,
253 * Returns true if l > r - unless l == r, in which case returns true if l is
256 * Necessary for btree_sort_fixup() - if there are multiple keys that compare
257 * equal in different sets, we have to process them newest to oldest.
260 static bool new_bch_extent_sort_cmp(const void *l
, const void *r
, void __always_unused
*args
)
262 struct btree_iter_set
*_l
= (struct btree_iter_set
*)l
;
263 struct btree_iter_set
*_r
= (struct btree_iter_set
*)r
;
264 int64_t c
= bkey_cmp(&START_KEY(_l
->k
), &START_KEY(_r
->k
));
266 return !(c
? c
> 0 : _l
->k
< _r
->k
);
269 static struct bkey
*bch_extent_sort_fixup(struct btree_iter
*iter
,
272 const struct min_heap_callbacks callbacks
= {
273 .less
= new_bch_extent_sort_cmp
,
276 while (iter
->heap
.nr
> 1) {
277 struct btree_iter_set
*top
= iter
->heap
.data
, *i
= top
+ 1;
279 if (iter
->heap
.nr
> 2 &&
280 !new_bch_extent_sort_cmp(&i
[0], &i
[1], NULL
))
283 if (bkey_cmp(top
->k
, &START_KEY(i
->k
)) <= 0)
286 if (!KEY_SIZE(i
->k
)) {
287 sort_key_next(iter
, i
);
288 min_heap_sift_down(&iter
->heap
, i
- top
, &callbacks
, NULL
);
293 if (bkey_cmp(top
->k
, i
->k
) >= 0)
294 sort_key_next(iter
, i
);
296 bch_cut_front(top
->k
, i
->k
);
298 min_heap_sift_down(&iter
->heap
, i
- top
, &callbacks
, NULL
);
300 /* can't happen because of comparison func */
301 BUG_ON(!bkey_cmp(&START_KEY(top
->k
), &START_KEY(i
->k
)));
303 if (bkey_cmp(i
->k
, top
->k
) < 0) {
304 bkey_copy(tmp
, top
->k
);
306 bch_cut_back(&START_KEY(i
->k
), tmp
);
307 bch_cut_front(i
->k
, top
->k
);
308 min_heap_sift_down(&iter
->heap
, 0, &callbacks
, NULL
);
312 bch_cut_back(&START_KEY(i
->k
), top
->k
);
320 static void bch_subtract_dirty(struct bkey
*k
,
326 bcache_dev_sectors_dirty_add(c
, KEY_INODE(k
),
330 static bool bch_extent_insert_fixup(struct btree_keys
*b
,
332 struct btree_iter
*iter
,
333 struct bkey
*replace_key
)
335 struct cache_set
*c
= container_of(b
, struct btree
, keys
)->c
;
338 unsigned int old_size
, sectors_found
= 0;
340 BUG_ON(!KEY_OFFSET(insert
));
341 BUG_ON(!KEY_SIZE(insert
));
344 struct bkey
*k
= bch_btree_iter_next(iter
);
349 if (bkey_cmp(&START_KEY(k
), insert
) >= 0) {
356 if (bkey_cmp(k
, &START_KEY(insert
)) <= 0)
359 old_offset
= KEY_START(k
);
360 old_size
= KEY_SIZE(k
);
363 * We might overlap with 0 size extents; we can't skip these
364 * because if they're in the set we're inserting to we have to
365 * adjust them so they don't overlap with the key we're
366 * inserting. But we don't want to check them for replace
370 if (replace_key
&& KEY_SIZE(k
)) {
372 * k might have been split since we inserted/found the
373 * key we're replacing
376 uint64_t offset
= KEY_START(k
) -
377 KEY_START(replace_key
);
379 /* But it must be a subset of the replace key */
380 if (KEY_START(k
) < KEY_START(replace_key
) ||
381 KEY_OFFSET(k
) > KEY_OFFSET(replace_key
))
384 /* We didn't find a key that we were supposed to */
385 if (KEY_START(k
) > KEY_START(insert
) + sectors_found
)
388 if (!bch_bkey_equal_header(k
, replace_key
))
394 BUG_ON(!KEY_PTRS(replace_key
));
396 for (i
= 0; i
< KEY_PTRS(replace_key
); i
++)
397 if (k
->ptr
[i
] != replace_key
->ptr
[i
] + offset
)
400 sectors_found
= KEY_OFFSET(k
) - KEY_START(insert
);
403 if (bkey_cmp(insert
, k
) < 0 &&
404 bkey_cmp(&START_KEY(insert
), &START_KEY(k
)) > 0) {
406 * We overlapped in the middle of an existing key: that
407 * means we have to split the old key. But we have to do
408 * slightly different things depending on whether the
409 * old key has been written out yet.
414 bch_subtract_dirty(k
, c
, KEY_START(insert
),
417 if (bkey_written(b
, k
)) {
419 * We insert a new key to cover the top of the
420 * old key, and the old key is modified in place
421 * to represent the bottom split.
423 * It's completely arbitrary whether the new key
424 * is the top or the bottom, but it has to match
425 * up with what btree_sort_fixup() does - it
426 * doesn't check for this kind of overlap, it
427 * depends on us inserting a new key for the top
430 top
= bch_bset_search(b
, bset_tree_last(b
),
432 bch_bset_insert(b
, top
, k
);
434 BKEY_PADDED(key
) temp
;
435 bkey_copy(&temp
.key
, k
);
436 bch_bset_insert(b
, k
, &temp
.key
);
440 bch_cut_front(insert
, top
);
441 bch_cut_back(&START_KEY(insert
), k
);
442 bch_bset_fix_invalidated_key(b
, k
);
446 if (bkey_cmp(insert
, k
) < 0) {
447 bch_cut_front(insert
, k
);
449 if (bkey_cmp(&START_KEY(insert
), &START_KEY(k
)) > 0)
450 old_offset
= KEY_START(insert
);
452 if (bkey_written(b
, k
) &&
453 bkey_cmp(&START_KEY(insert
), &START_KEY(k
)) <= 0) {
455 * Completely overwrote, so we don't have to
456 * invalidate the binary search tree
460 __bch_cut_back(&START_KEY(insert
), k
);
461 bch_bset_fix_invalidated_key(b
, k
);
465 bch_subtract_dirty(k
, c
, old_offset
, old_size
- KEY_SIZE(k
));
470 if (!sectors_found
) {
472 } else if (sectors_found
< KEY_SIZE(insert
)) {
473 SET_KEY_OFFSET(insert
, KEY_OFFSET(insert
) -
474 (KEY_SIZE(insert
) - sectors_found
));
475 SET_KEY_SIZE(insert
, sectors_found
);
479 if (KEY_DIRTY(insert
))
480 bcache_dev_sectors_dirty_add(c
, KEY_INODE(insert
),
487 bool __bch_extent_invalid(struct cache_set
*c
, const struct bkey
*k
)
494 if (KEY_SIZE(k
) > KEY_OFFSET(k
))
497 if (__ptr_invalid(c
, k
))
502 bch_extent_to_text(buf
, sizeof(buf
), k
);
503 cache_bug(c
, "spotted extent %s: %s", buf
, bch_ptr_status(c
, k
));
507 static bool bch_extent_invalid(struct btree_keys
*bk
, const struct bkey
*k
)
509 struct btree
*b
= container_of(bk
, struct btree
, keys
);
511 return __bch_extent_invalid(b
->c
, k
);
514 static bool bch_extent_bad_expensive(struct btree
*b
, const struct bkey
*k
,
517 struct bucket
*g
= PTR_BUCKET(b
->c
, k
, ptr
);
520 if (mutex_trylock(&b
->c
->bucket_lock
)) {
521 if (b
->c
->gc_mark_valid
&&
523 GC_MARK(g
) == GC_MARK_METADATA
||
524 (GC_MARK(g
) != GC_MARK_DIRTY
&& KEY_DIRTY(k
))))
527 if (g
->prio
== BTREE_PRIO
)
530 mutex_unlock(&b
->c
->bucket_lock
);
535 mutex_unlock(&b
->c
->bucket_lock
);
536 bch_extent_to_text(buf
, sizeof(buf
), k
);
538 "inconsistent extent pointer %s:\nbucket %zu pin %i prio %i gen %i last_gc %i mark %llu",
539 buf
, PTR_BUCKET_NR(b
->c
, k
, ptr
), atomic_read(&g
->pin
),
540 g
->prio
, g
->gen
, g
->last_gc
, GC_MARK(g
));
544 static bool bch_extent_bad(struct btree_keys
*bk
, const struct bkey
*k
)
546 struct btree
*b
= container_of(bk
, struct btree
, keys
);
547 unsigned int i
, stale
;
551 bch_extent_invalid(bk
, k
))
554 for (i
= 0; i
< KEY_PTRS(k
); i
++)
555 if (!ptr_available(b
->c
, k
, i
))
558 for (i
= 0; i
< KEY_PTRS(k
); i
++) {
559 stale
= ptr_stale(b
->c
, k
, i
);
561 if (stale
&& KEY_DIRTY(k
)) {
562 bch_extent_to_text(buf
, sizeof(buf
), k
);
563 pr_info("stale dirty pointer, stale %u, key: %s\n",
567 btree_bug_on(stale
> BUCKET_GC_GEN_MAX
, b
,
568 "key too stale: %i, need_gc %u",
569 stale
, b
->c
->need_gc
);
574 if (expensive_debug_checks(b
->c
) &&
575 bch_extent_bad_expensive(b
, k
, i
))
582 static uint64_t merge_chksums(struct bkey
*l
, struct bkey
*r
)
584 return (l
->ptr
[KEY_PTRS(l
)] + r
->ptr
[KEY_PTRS(r
)]) &
585 ~((uint64_t)1 << 63);
588 static bool bch_extent_merge(struct btree_keys
*bk
,
592 struct btree
*b
= container_of(bk
, struct btree
, keys
);
595 if (key_merging_disabled(b
->c
))
598 for (i
= 0; i
< KEY_PTRS(l
); i
++)
599 if (l
->ptr
[i
] + MAKE_PTR(0, KEY_SIZE(l
), 0) != r
->ptr
[i
] ||
600 PTR_BUCKET_NR(b
->c
, l
, i
) != PTR_BUCKET_NR(b
->c
, r
, i
))
603 /* Keys with no pointers aren't restricted to one bucket and could
606 if (KEY_SIZE(l
) + KEY_SIZE(r
) > USHRT_MAX
) {
607 SET_KEY_OFFSET(l
, KEY_OFFSET(l
) + USHRT_MAX
- KEY_SIZE(l
));
608 SET_KEY_SIZE(l
, USHRT_MAX
);
616 l
->ptr
[KEY_PTRS(l
)] = merge_chksums(l
, r
);
621 SET_KEY_OFFSET(l
, KEY_OFFSET(l
) + KEY_SIZE(r
));
622 SET_KEY_SIZE(l
, KEY_SIZE(l
) + KEY_SIZE(r
));
627 const struct btree_keys_ops bch_extent_keys_ops
= {
628 .sort_cmp
= new_bch_extent_sort_cmp
,
629 .sort_fixup
= bch_extent_sort_fixup
,
630 .insert_fixup
= bch_extent_insert_fixup
,
631 .key_invalid
= bch_extent_invalid
,
632 .key_bad
= bch_extent_bad
,
633 .key_merge
= bch_extent_merge
,
634 .key_to_text
= bch_extent_to_text
,
635 .key_dump
= bch_bkey_dump
,