1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (C) 2010 Kent Overstreet <kent.overstreet@gmail.com>
5 * Uses a block device as cache for other block devices; optimized for SSDs.
6 * All allocation is done in buckets, which should match the erase block size
9 * Buckets containing cached data are kept on a heap sorted by priority;
10 * bucket priority is increased on cache hit, and periodically all the buckets
11 * on the heap have their priority scaled down. This currently is just used as
12 * an LRU but in the future should allow for more intelligent heuristics.
14 * Buckets have an 8 bit counter; freeing is accomplished by incrementing the
15 * counter. Garbage collection is used to remove stale pointers.
17 * Indexing is done via a btree; nodes are not necessarily fully sorted, rather
18 * as keys are inserted we only sort the pages that have not yet been written.
19 * When garbage collection is run, we resort the entire node.
21 * All configuration is done via sysfs; see Documentation/bcache.txt.
28 #include "writeback.h"
30 static void sort_key_next(struct btree_iter
*iter
,
31 struct btree_iter_set
*i
)
33 i
->k
= bkey_next(i
->k
);
36 *i
= iter
->data
[--iter
->used
];
39 static bool bch_key_sort_cmp(struct btree_iter_set l
,
40 struct btree_iter_set r
)
42 int64_t c
= bkey_cmp(l
.k
, r
.k
);
44 return c
? c
> 0 : l
.k
< r
.k
;
47 static bool __ptr_invalid(struct cache_set
*c
, const struct bkey
*k
)
51 for (i
= 0; i
< KEY_PTRS(k
); i
++)
52 if (ptr_available(c
, k
, i
)) {
53 struct cache
*ca
= PTR_CACHE(c
, k
, i
);
54 size_t bucket
= PTR_BUCKET_NR(c
, k
, i
);
55 size_t r
= bucket_remainder(c
, PTR_OFFSET(k
, i
));
57 if (KEY_SIZE(k
) + r
> c
->sb
.bucket_size
||
58 bucket
< ca
->sb
.first_bucket
||
59 bucket
>= ca
->sb
.nbuckets
)
66 /* Common among btree and extent ptrs */
68 static const char *bch_ptr_status(struct cache_set
*c
, const struct bkey
*k
)
72 for (i
= 0; i
< KEY_PTRS(k
); i
++)
73 if (ptr_available(c
, k
, i
)) {
74 struct cache
*ca
= PTR_CACHE(c
, k
, i
);
75 size_t bucket
= PTR_BUCKET_NR(c
, k
, i
);
76 size_t r
= bucket_remainder(c
, PTR_OFFSET(k
, i
));
78 if (KEY_SIZE(k
) + r
> c
->sb
.bucket_size
)
79 return "bad, length too big";
80 if (bucket
< ca
->sb
.first_bucket
)
81 return "bad, short offset";
82 if (bucket
>= ca
->sb
.nbuckets
)
83 return "bad, offset past end of device";
84 if (ptr_stale(c
, k
, i
))
88 if (!bkey_cmp(k
, &ZERO_KEY
))
89 return "bad, null key";
91 return "bad, no pointers";
97 void bch_extent_to_text(char *buf
, size_t size
, const struct bkey
*k
)
100 char *out
= buf
, *end
= buf
+ size
;
102 #define p(...) (out += scnprintf(out, end - out, __VA_ARGS__))
104 p("%llu:%llu len %llu -> [", KEY_INODE(k
), KEY_START(k
), KEY_SIZE(k
));
106 for (i
= 0; i
< KEY_PTRS(k
); i
++) {
110 if (PTR_DEV(k
, i
) == PTR_CHECK_DEV
)
113 p("%llu:%llu gen %llu", PTR_DEV(k
, i
),
114 PTR_OFFSET(k
, i
), PTR_GEN(k
, i
));
122 p(" cs%llu %llx", KEY_CSUM(k
), k
->ptr
[1]);
126 static void bch_bkey_dump(struct btree_keys
*keys
, const struct bkey
*k
)
128 struct btree
*b
= container_of(keys
, struct btree
, keys
);
132 bch_extent_to_text(buf
, sizeof(buf
), k
);
135 for (j
= 0; j
< KEY_PTRS(k
); j
++) {
136 size_t n
= PTR_BUCKET_NR(b
->c
, k
, j
);
137 printk(" bucket %zu", n
);
139 if (n
>= b
->c
->sb
.first_bucket
&& n
< b
->c
->sb
.nbuckets
)
141 PTR_BUCKET(b
->c
, k
, j
)->prio
);
144 printk(" %s\n", bch_ptr_status(b
->c
, k
));
149 bool __bch_btree_ptr_invalid(struct cache_set
*c
, const struct bkey
*k
)
153 if (!KEY_PTRS(k
) || !KEY_SIZE(k
) || KEY_DIRTY(k
))
156 if (__ptr_invalid(c
, k
))
161 bch_extent_to_text(buf
, sizeof(buf
), k
);
162 cache_bug(c
, "spotted btree ptr %s: %s", buf
, bch_ptr_status(c
, k
));
166 static bool bch_btree_ptr_invalid(struct btree_keys
*bk
, const struct bkey
*k
)
168 struct btree
*b
= container_of(bk
, struct btree
, keys
);
169 return __bch_btree_ptr_invalid(b
->c
, k
);
172 static bool btree_ptr_bad_expensive(struct btree
*b
, const struct bkey
*k
)
178 if (mutex_trylock(&b
->c
->bucket_lock
)) {
179 for (i
= 0; i
< KEY_PTRS(k
); i
++)
180 if (ptr_available(b
->c
, k
, i
)) {
181 g
= PTR_BUCKET(b
->c
, k
, i
);
184 g
->prio
!= BTREE_PRIO
||
185 (b
->c
->gc_mark_valid
&&
186 GC_MARK(g
) != GC_MARK_METADATA
))
190 mutex_unlock(&b
->c
->bucket_lock
);
195 mutex_unlock(&b
->c
->bucket_lock
);
196 bch_extent_to_text(buf
, sizeof(buf
), k
);
198 "inconsistent btree pointer %s: bucket %zi pin %i prio %i gen %i last_gc %i mark %llu",
199 buf
, PTR_BUCKET_NR(b
->c
, k
, i
), atomic_read(&g
->pin
),
200 g
->prio
, g
->gen
, g
->last_gc
, GC_MARK(g
));
204 static bool bch_btree_ptr_bad(struct btree_keys
*bk
, const struct bkey
*k
)
206 struct btree
*b
= container_of(bk
, struct btree
, keys
);
209 if (!bkey_cmp(k
, &ZERO_KEY
) ||
211 bch_ptr_invalid(bk
, k
))
214 for (i
= 0; i
< KEY_PTRS(k
); i
++)
215 if (!ptr_available(b
->c
, k
, i
) ||
216 ptr_stale(b
->c
, k
, i
))
219 if (expensive_debug_checks(b
->c
) &&
220 btree_ptr_bad_expensive(b
, k
))
226 static bool bch_btree_ptr_insert_fixup(struct btree_keys
*bk
,
228 struct btree_iter
*iter
,
229 struct bkey
*replace_key
)
231 struct btree
*b
= container_of(bk
, struct btree
, keys
);
233 if (!KEY_OFFSET(insert
))
234 btree_current_write(b
)->prio_blocked
++;
239 const struct btree_keys_ops bch_btree_keys_ops
= {
240 .sort_cmp
= bch_key_sort_cmp
,
241 .insert_fixup
= bch_btree_ptr_insert_fixup
,
242 .key_invalid
= bch_btree_ptr_invalid
,
243 .key_bad
= bch_btree_ptr_bad
,
244 .key_to_text
= bch_extent_to_text
,
245 .key_dump
= bch_bkey_dump
,
251 * Returns true if l > r - unless l == r, in which case returns true if l is
254 * Necessary for btree_sort_fixup() - if there are multiple keys that compare
255 * equal in different sets, we have to process them newest to oldest.
257 static bool bch_extent_sort_cmp(struct btree_iter_set l
,
258 struct btree_iter_set r
)
260 int64_t c
= bkey_cmp(&START_KEY(l
.k
), &START_KEY(r
.k
));
262 return c
? c
> 0 : l
.k
< r
.k
;
265 static struct bkey
*bch_extent_sort_fixup(struct btree_iter
*iter
,
268 while (iter
->used
> 1) {
269 struct btree_iter_set
*top
= iter
->data
, *i
= top
+ 1;
271 if (iter
->used
> 2 &&
272 bch_extent_sort_cmp(i
[0], i
[1]))
275 if (bkey_cmp(top
->k
, &START_KEY(i
->k
)) <= 0)
278 if (!KEY_SIZE(i
->k
)) {
279 sort_key_next(iter
, i
);
280 heap_sift(iter
, i
- top
, bch_extent_sort_cmp
);
285 if (bkey_cmp(top
->k
, i
->k
) >= 0)
286 sort_key_next(iter
, i
);
288 bch_cut_front(top
->k
, i
->k
);
290 heap_sift(iter
, i
- top
, bch_extent_sort_cmp
);
292 /* can't happen because of comparison func */
293 BUG_ON(!bkey_cmp(&START_KEY(top
->k
), &START_KEY(i
->k
)));
295 if (bkey_cmp(i
->k
, top
->k
) < 0) {
296 bkey_copy(tmp
, top
->k
);
298 bch_cut_back(&START_KEY(i
->k
), tmp
);
299 bch_cut_front(i
->k
, top
->k
);
300 heap_sift(iter
, 0, bch_extent_sort_cmp
);
304 bch_cut_back(&START_KEY(i
->k
), top
->k
);
312 static void bch_subtract_dirty(struct bkey
*k
,
318 bcache_dev_sectors_dirty_add(c
, KEY_INODE(k
),
322 static bool bch_extent_insert_fixup(struct btree_keys
*b
,
324 struct btree_iter
*iter
,
325 struct bkey
*replace_key
)
327 struct cache_set
*c
= container_of(b
, struct btree
, keys
)->c
;
330 unsigned old_size
, sectors_found
= 0;
332 BUG_ON(!KEY_OFFSET(insert
));
333 BUG_ON(!KEY_SIZE(insert
));
336 struct bkey
*k
= bch_btree_iter_next(iter
);
340 if (bkey_cmp(&START_KEY(k
), insert
) >= 0) {
347 if (bkey_cmp(k
, &START_KEY(insert
)) <= 0)
350 old_offset
= KEY_START(k
);
351 old_size
= KEY_SIZE(k
);
354 * We might overlap with 0 size extents; we can't skip these
355 * because if they're in the set we're inserting to we have to
356 * adjust them so they don't overlap with the key we're
357 * inserting. But we don't want to check them for replace
361 if (replace_key
&& KEY_SIZE(k
)) {
363 * k might have been split since we inserted/found the
364 * key we're replacing
367 uint64_t offset
= KEY_START(k
) -
368 KEY_START(replace_key
);
370 /* But it must be a subset of the replace key */
371 if (KEY_START(k
) < KEY_START(replace_key
) ||
372 KEY_OFFSET(k
) > KEY_OFFSET(replace_key
))
375 /* We didn't find a key that we were supposed to */
376 if (KEY_START(k
) > KEY_START(insert
) + sectors_found
)
379 if (!bch_bkey_equal_header(k
, replace_key
))
385 BUG_ON(!KEY_PTRS(replace_key
));
387 for (i
= 0; i
< KEY_PTRS(replace_key
); i
++)
388 if (k
->ptr
[i
] != replace_key
->ptr
[i
] + offset
)
391 sectors_found
= KEY_OFFSET(k
) - KEY_START(insert
);
394 if (bkey_cmp(insert
, k
) < 0 &&
395 bkey_cmp(&START_KEY(insert
), &START_KEY(k
)) > 0) {
397 * We overlapped in the middle of an existing key: that
398 * means we have to split the old key. But we have to do
399 * slightly different things depending on whether the
400 * old key has been written out yet.
405 bch_subtract_dirty(k
, c
, KEY_START(insert
),
408 if (bkey_written(b
, k
)) {
410 * We insert a new key to cover the top of the
411 * old key, and the old key is modified in place
412 * to represent the bottom split.
414 * It's completely arbitrary whether the new key
415 * is the top or the bottom, but it has to match
416 * up with what btree_sort_fixup() does - it
417 * doesn't check for this kind of overlap, it
418 * depends on us inserting a new key for the top
421 top
= bch_bset_search(b
, bset_tree_last(b
),
423 bch_bset_insert(b
, top
, k
);
425 BKEY_PADDED(key
) temp
;
426 bkey_copy(&temp
.key
, k
);
427 bch_bset_insert(b
, k
, &temp
.key
);
431 bch_cut_front(insert
, top
);
432 bch_cut_back(&START_KEY(insert
), k
);
433 bch_bset_fix_invalidated_key(b
, k
);
437 if (bkey_cmp(insert
, k
) < 0) {
438 bch_cut_front(insert
, k
);
440 if (bkey_cmp(&START_KEY(insert
), &START_KEY(k
)) > 0)
441 old_offset
= KEY_START(insert
);
443 if (bkey_written(b
, k
) &&
444 bkey_cmp(&START_KEY(insert
), &START_KEY(k
)) <= 0) {
446 * Completely overwrote, so we don't have to
447 * invalidate the binary search tree
451 __bch_cut_back(&START_KEY(insert
), k
);
452 bch_bset_fix_invalidated_key(b
, k
);
456 bch_subtract_dirty(k
, c
, old_offset
, old_size
- KEY_SIZE(k
));
461 if (!sectors_found
) {
463 } else if (sectors_found
< KEY_SIZE(insert
)) {
464 SET_KEY_OFFSET(insert
, KEY_OFFSET(insert
) -
465 (KEY_SIZE(insert
) - sectors_found
));
466 SET_KEY_SIZE(insert
, sectors_found
);
470 if (KEY_DIRTY(insert
))
471 bcache_dev_sectors_dirty_add(c
, KEY_INODE(insert
),
478 bool __bch_extent_invalid(struct cache_set
*c
, const struct bkey
*k
)
485 if (KEY_SIZE(k
) > KEY_OFFSET(k
))
488 if (__ptr_invalid(c
, k
))
493 bch_extent_to_text(buf
, sizeof(buf
), k
);
494 cache_bug(c
, "spotted extent %s: %s", buf
, bch_ptr_status(c
, k
));
498 static bool bch_extent_invalid(struct btree_keys
*bk
, const struct bkey
*k
)
500 struct btree
*b
= container_of(bk
, struct btree
, keys
);
501 return __bch_extent_invalid(b
->c
, k
);
504 static bool bch_extent_bad_expensive(struct btree
*b
, const struct bkey
*k
,
507 struct bucket
*g
= PTR_BUCKET(b
->c
, k
, ptr
);
510 if (mutex_trylock(&b
->c
->bucket_lock
)) {
511 if (b
->c
->gc_mark_valid
&&
513 GC_MARK(g
) == GC_MARK_METADATA
||
514 (GC_MARK(g
) != GC_MARK_DIRTY
&& KEY_DIRTY(k
))))
517 if (g
->prio
== BTREE_PRIO
)
520 mutex_unlock(&b
->c
->bucket_lock
);
525 mutex_unlock(&b
->c
->bucket_lock
);
526 bch_extent_to_text(buf
, sizeof(buf
), k
);
528 "inconsistent extent pointer %s:\nbucket %zu pin %i prio %i gen %i last_gc %i mark %llu",
529 buf
, PTR_BUCKET_NR(b
->c
, k
, ptr
), atomic_read(&g
->pin
),
530 g
->prio
, g
->gen
, g
->last_gc
, GC_MARK(g
));
534 static bool bch_extent_bad(struct btree_keys
*bk
, const struct bkey
*k
)
536 struct btree
*b
= container_of(bk
, struct btree
, keys
);
541 bch_extent_invalid(bk
, k
))
544 for (i
= 0; i
< KEY_PTRS(k
); i
++)
545 if (!ptr_available(b
->c
, k
, i
))
548 if (!expensive_debug_checks(b
->c
) && KEY_DIRTY(k
))
551 for (i
= 0; i
< KEY_PTRS(k
); i
++) {
552 g
= PTR_BUCKET(b
->c
, k
, i
);
553 stale
= ptr_stale(b
->c
, k
, i
);
555 btree_bug_on(stale
> 96, b
,
556 "key too stale: %i, need_gc %u",
557 stale
, b
->c
->need_gc
);
559 btree_bug_on(stale
&& KEY_DIRTY(k
) && KEY_SIZE(k
),
560 b
, "stale dirty pointer");
565 if (expensive_debug_checks(b
->c
) &&
566 bch_extent_bad_expensive(b
, k
, i
))
573 static uint64_t merge_chksums(struct bkey
*l
, struct bkey
*r
)
575 return (l
->ptr
[KEY_PTRS(l
)] + r
->ptr
[KEY_PTRS(r
)]) &
576 ~((uint64_t)1 << 63);
579 static bool bch_extent_merge(struct btree_keys
*bk
, struct bkey
*l
, struct bkey
*r
)
581 struct btree
*b
= container_of(bk
, struct btree
, keys
);
584 if (key_merging_disabled(b
->c
))
587 for (i
= 0; i
< KEY_PTRS(l
); i
++)
588 if (l
->ptr
[i
] + MAKE_PTR(0, KEY_SIZE(l
), 0) != r
->ptr
[i
] ||
589 PTR_BUCKET_NR(b
->c
, l
, i
) != PTR_BUCKET_NR(b
->c
, r
, i
))
592 /* Keys with no pointers aren't restricted to one bucket and could
595 if (KEY_SIZE(l
) + KEY_SIZE(r
) > USHRT_MAX
) {
596 SET_KEY_OFFSET(l
, KEY_OFFSET(l
) + USHRT_MAX
- KEY_SIZE(l
));
597 SET_KEY_SIZE(l
, USHRT_MAX
);
605 l
->ptr
[KEY_PTRS(l
)] = merge_chksums(l
, r
);
610 SET_KEY_OFFSET(l
, KEY_OFFSET(l
) + KEY_SIZE(r
));
611 SET_KEY_SIZE(l
, KEY_SIZE(l
) + KEY_SIZE(r
));
616 const struct btree_keys_ops bch_extent_keys_ops
= {
617 .sort_cmp
= bch_extent_sort_cmp
,
618 .sort_fixup
= bch_extent_sort_fixup
,
619 .insert_fixup
= bch_extent_insert_fixup
,
620 .key_invalid
= bch_extent_invalid
,
621 .key_bad
= bch_extent_bad
,
622 .key_merge
= bch_extent_merge
,
623 .key_to_text
= bch_extent_to_text
,
624 .key_dump
= bch_bkey_dump
,