accel/ivpu: Move recovery work to system_unbound_wq
[drm/drm-misc.git] / drivers / md / bcache / extents.c
blob4b84fda1530a7937a26b891fdec8910a018a67d0
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Copyright (C) 2010 Kent Overstreet <kent.overstreet@gmail.com>
5 * Uses a block device as cache for other block devices; optimized for SSDs.
6 * All allocation is done in buckets, which should match the erase block size
7 * of the device.
9 * Buckets containing cached data are kept on a heap sorted by priority;
10 * bucket priority is increased on cache hit, and periodically all the buckets
11 * on the heap have their priority scaled down. This currently is just used as
12 * an LRU but in the future should allow for more intelligent heuristics.
14 * Buckets have an 8 bit counter; freeing is accomplished by incrementing the
15 * counter. Garbage collection is used to remove stale pointers.
17 * Indexing is done via a btree; nodes are not necessarily fully sorted, rather
18 * as keys are inserted we only sort the pages that have not yet been written.
19 * When garbage collection is run, we resort the entire node.
21 * All configuration is done via sysfs; see Documentation/admin-guide/bcache.rst.
24 #include "bcache.h"
25 #include "btree.h"
26 #include "debug.h"
27 #include "extents.h"
28 #include "writeback.h"
30 static void sort_key_next(struct btree_iter *iter,
31 struct btree_iter_set *i)
33 i->k = bkey_next(i->k);
35 if (i->k == i->end)
36 *i = iter->heap.data[--iter->heap.nr];
39 static bool new_bch_key_sort_cmp(const void *l, const void *r, void *args)
41 struct btree_iter_set *_l = (struct btree_iter_set *)l;
42 struct btree_iter_set *_r = (struct btree_iter_set *)r;
43 int64_t c = bkey_cmp(_l->k, _r->k);
45 return !(c ? c > 0 : _l->k < _r->k);
48 static bool __ptr_invalid(struct cache_set *c, const struct bkey *k)
50 unsigned int i;
52 for (i = 0; i < KEY_PTRS(k); i++)
53 if (ptr_available(c, k, i)) {
54 struct cache *ca = c->cache;
55 size_t bucket = PTR_BUCKET_NR(c, k, i);
56 size_t r = bucket_remainder(c, PTR_OFFSET(k, i));
58 if (KEY_SIZE(k) + r > c->cache->sb.bucket_size ||
59 bucket < ca->sb.first_bucket ||
60 bucket >= ca->sb.nbuckets)
61 return true;
64 return false;
67 /* Common among btree and extent ptrs */
69 static const char *bch_ptr_status(struct cache_set *c, const struct bkey *k)
71 unsigned int i;
73 for (i = 0; i < KEY_PTRS(k); i++)
74 if (ptr_available(c, k, i)) {
75 struct cache *ca = c->cache;
76 size_t bucket = PTR_BUCKET_NR(c, k, i);
77 size_t r = bucket_remainder(c, PTR_OFFSET(k, i));
79 if (KEY_SIZE(k) + r > c->cache->sb.bucket_size)
80 return "bad, length too big";
81 if (bucket < ca->sb.first_bucket)
82 return "bad, short offset";
83 if (bucket >= ca->sb.nbuckets)
84 return "bad, offset past end of device";
85 if (ptr_stale(c, k, i))
86 return "stale";
89 if (!bkey_cmp(k, &ZERO_KEY))
90 return "bad, null key";
91 if (!KEY_PTRS(k))
92 return "bad, no pointers";
93 if (!KEY_SIZE(k))
94 return "zeroed key";
95 return "";
98 void bch_extent_to_text(char *buf, size_t size, const struct bkey *k)
100 unsigned int i = 0;
101 char *out = buf, *end = buf + size;
103 #define p(...) (out += scnprintf(out, end - out, __VA_ARGS__))
105 p("%llu:%llu len %llu -> [", KEY_INODE(k), KEY_START(k), KEY_SIZE(k));
107 for (i = 0; i < KEY_PTRS(k); i++) {
108 if (i)
109 p(", ");
111 if (PTR_DEV(k, i) == PTR_CHECK_DEV)
112 p("check dev");
113 else
114 p("%llu:%llu gen %llu", PTR_DEV(k, i),
115 PTR_OFFSET(k, i), PTR_GEN(k, i));
118 p("]");
120 if (KEY_DIRTY(k))
121 p(" dirty");
122 if (KEY_CSUM(k))
123 p(" cs%llu %llx", KEY_CSUM(k), k->ptr[1]);
124 #undef p
127 static void bch_bkey_dump(struct btree_keys *keys, const struct bkey *k)
129 struct btree *b = container_of(keys, struct btree, keys);
130 unsigned int j;
131 char buf[80];
133 bch_extent_to_text(buf, sizeof(buf), k);
134 pr_cont(" %s", buf);
136 for (j = 0; j < KEY_PTRS(k); j++) {
137 size_t n = PTR_BUCKET_NR(b->c, k, j);
139 pr_cont(" bucket %zu", n);
140 if (n >= b->c->cache->sb.first_bucket && n < b->c->cache->sb.nbuckets)
141 pr_cont(" prio %i",
142 PTR_BUCKET(b->c, k, j)->prio);
145 pr_cont(" %s\n", bch_ptr_status(b->c, k));
148 /* Btree ptrs */
150 bool __bch_btree_ptr_invalid(struct cache_set *c, const struct bkey *k)
152 char buf[80];
154 if (!KEY_PTRS(k) || !KEY_SIZE(k) || KEY_DIRTY(k))
155 goto bad;
157 if (__ptr_invalid(c, k))
158 goto bad;
160 return false;
161 bad:
162 bch_extent_to_text(buf, sizeof(buf), k);
163 cache_bug(c, "spotted btree ptr %s: %s", buf, bch_ptr_status(c, k));
164 return true;
167 static bool bch_btree_ptr_invalid(struct btree_keys *bk, const struct bkey *k)
169 struct btree *b = container_of(bk, struct btree, keys);
171 return __bch_btree_ptr_invalid(b->c, k);
174 static bool btree_ptr_bad_expensive(struct btree *b, const struct bkey *k)
176 unsigned int i;
177 char buf[80];
178 struct bucket *g;
180 if (mutex_trylock(&b->c->bucket_lock)) {
181 for (i = 0; i < KEY_PTRS(k); i++)
182 if (ptr_available(b->c, k, i)) {
183 g = PTR_BUCKET(b->c, k, i);
185 if (KEY_DIRTY(k) ||
186 g->prio != BTREE_PRIO ||
187 (b->c->gc_mark_valid &&
188 GC_MARK(g) != GC_MARK_METADATA))
189 goto err;
192 mutex_unlock(&b->c->bucket_lock);
195 return false;
196 err:
197 mutex_unlock(&b->c->bucket_lock);
198 bch_extent_to_text(buf, sizeof(buf), k);
199 btree_bug(b,
200 "inconsistent btree pointer %s: bucket %zi pin %i prio %i gen %i last_gc %i mark %llu",
201 buf, PTR_BUCKET_NR(b->c, k, i), atomic_read(&g->pin),
202 g->prio, g->gen, g->last_gc, GC_MARK(g));
203 return true;
206 static bool bch_btree_ptr_bad(struct btree_keys *bk, const struct bkey *k)
208 struct btree *b = container_of(bk, struct btree, keys);
209 unsigned int i;
211 if (!bkey_cmp(k, &ZERO_KEY) ||
212 !KEY_PTRS(k) ||
213 bch_ptr_invalid(bk, k))
214 return true;
216 for (i = 0; i < KEY_PTRS(k); i++)
217 if (!ptr_available(b->c, k, i) ||
218 ptr_stale(b->c, k, i))
219 return true;
221 if (expensive_debug_checks(b->c) &&
222 btree_ptr_bad_expensive(b, k))
223 return true;
225 return false;
228 static bool bch_btree_ptr_insert_fixup(struct btree_keys *bk,
229 struct bkey *insert,
230 struct btree_iter *iter,
231 struct bkey *replace_key)
233 struct btree *b = container_of(bk, struct btree, keys);
235 if (!KEY_OFFSET(insert))
236 btree_current_write(b)->prio_blocked++;
238 return false;
241 const struct btree_keys_ops bch_btree_keys_ops = {
242 .sort_cmp = new_bch_key_sort_cmp,
243 .insert_fixup = bch_btree_ptr_insert_fixup,
244 .key_invalid = bch_btree_ptr_invalid,
245 .key_bad = bch_btree_ptr_bad,
246 .key_to_text = bch_extent_to_text,
247 .key_dump = bch_bkey_dump,
250 /* Extents */
253 * Returns true if l > r - unless l == r, in which case returns true if l is
254 * older than r.
256 * Necessary for btree_sort_fixup() - if there are multiple keys that compare
257 * equal in different sets, we have to process them newest to oldest.
260 static bool new_bch_extent_sort_cmp(const void *l, const void *r, void __always_unused *args)
262 struct btree_iter_set *_l = (struct btree_iter_set *)l;
263 struct btree_iter_set *_r = (struct btree_iter_set *)r;
264 int64_t c = bkey_cmp(&START_KEY(_l->k), &START_KEY(_r->k));
266 return !(c ? c > 0 : _l->k < _r->k);
269 static struct bkey *bch_extent_sort_fixup(struct btree_iter *iter,
270 struct bkey *tmp)
272 const struct min_heap_callbacks callbacks = {
273 .less = new_bch_extent_sort_cmp,
274 .swp = NULL,
276 while (iter->heap.nr > 1) {
277 struct btree_iter_set *top = iter->heap.data, *i = top + 1;
279 if (iter->heap.nr > 2 &&
280 !new_bch_extent_sort_cmp(&i[0], &i[1], NULL))
281 i++;
283 if (bkey_cmp(top->k, &START_KEY(i->k)) <= 0)
284 break;
286 if (!KEY_SIZE(i->k)) {
287 sort_key_next(iter, i);
288 min_heap_sift_down(&iter->heap, i - top, &callbacks, NULL);
289 continue;
292 if (top->k > i->k) {
293 if (bkey_cmp(top->k, i->k) >= 0)
294 sort_key_next(iter, i);
295 else
296 bch_cut_front(top->k, i->k);
298 min_heap_sift_down(&iter->heap, i - top, &callbacks, NULL);
299 } else {
300 /* can't happen because of comparison func */
301 BUG_ON(!bkey_cmp(&START_KEY(top->k), &START_KEY(i->k)));
303 if (bkey_cmp(i->k, top->k) < 0) {
304 bkey_copy(tmp, top->k);
306 bch_cut_back(&START_KEY(i->k), tmp);
307 bch_cut_front(i->k, top->k);
308 min_heap_sift_down(&iter->heap, 0, &callbacks, NULL);
310 return tmp;
311 } else {
312 bch_cut_back(&START_KEY(i->k), top->k);
317 return NULL;
320 static void bch_subtract_dirty(struct bkey *k,
321 struct cache_set *c,
322 uint64_t offset,
323 int sectors)
325 if (KEY_DIRTY(k))
326 bcache_dev_sectors_dirty_add(c, KEY_INODE(k),
327 offset, -sectors);
330 static bool bch_extent_insert_fixup(struct btree_keys *b,
331 struct bkey *insert,
332 struct btree_iter *iter,
333 struct bkey *replace_key)
335 struct cache_set *c = container_of(b, struct btree, keys)->c;
337 uint64_t old_offset;
338 unsigned int old_size, sectors_found = 0;
340 BUG_ON(!KEY_OFFSET(insert));
341 BUG_ON(!KEY_SIZE(insert));
343 while (1) {
344 struct bkey *k = bch_btree_iter_next(iter);
346 if (!k)
347 break;
349 if (bkey_cmp(&START_KEY(k), insert) >= 0) {
350 if (KEY_SIZE(k))
351 break;
352 else
353 continue;
356 if (bkey_cmp(k, &START_KEY(insert)) <= 0)
357 continue;
359 old_offset = KEY_START(k);
360 old_size = KEY_SIZE(k);
363 * We might overlap with 0 size extents; we can't skip these
364 * because if they're in the set we're inserting to we have to
365 * adjust them so they don't overlap with the key we're
366 * inserting. But we don't want to check them for replace
367 * operations.
370 if (replace_key && KEY_SIZE(k)) {
372 * k might have been split since we inserted/found the
373 * key we're replacing
375 unsigned int i;
376 uint64_t offset = KEY_START(k) -
377 KEY_START(replace_key);
379 /* But it must be a subset of the replace key */
380 if (KEY_START(k) < KEY_START(replace_key) ||
381 KEY_OFFSET(k) > KEY_OFFSET(replace_key))
382 goto check_failed;
384 /* We didn't find a key that we were supposed to */
385 if (KEY_START(k) > KEY_START(insert) + sectors_found)
386 goto check_failed;
388 if (!bch_bkey_equal_header(k, replace_key))
389 goto check_failed;
391 /* skip past gen */
392 offset <<= 8;
394 BUG_ON(!KEY_PTRS(replace_key));
396 for (i = 0; i < KEY_PTRS(replace_key); i++)
397 if (k->ptr[i] != replace_key->ptr[i] + offset)
398 goto check_failed;
400 sectors_found = KEY_OFFSET(k) - KEY_START(insert);
403 if (bkey_cmp(insert, k) < 0 &&
404 bkey_cmp(&START_KEY(insert), &START_KEY(k)) > 0) {
406 * We overlapped in the middle of an existing key: that
407 * means we have to split the old key. But we have to do
408 * slightly different things depending on whether the
409 * old key has been written out yet.
412 struct bkey *top;
414 bch_subtract_dirty(k, c, KEY_START(insert),
415 KEY_SIZE(insert));
417 if (bkey_written(b, k)) {
419 * We insert a new key to cover the top of the
420 * old key, and the old key is modified in place
421 * to represent the bottom split.
423 * It's completely arbitrary whether the new key
424 * is the top or the bottom, but it has to match
425 * up with what btree_sort_fixup() does - it
426 * doesn't check for this kind of overlap, it
427 * depends on us inserting a new key for the top
428 * here.
430 top = bch_bset_search(b, bset_tree_last(b),
431 insert);
432 bch_bset_insert(b, top, k);
433 } else {
434 BKEY_PADDED(key) temp;
435 bkey_copy(&temp.key, k);
436 bch_bset_insert(b, k, &temp.key);
437 top = bkey_next(k);
440 bch_cut_front(insert, top);
441 bch_cut_back(&START_KEY(insert), k);
442 bch_bset_fix_invalidated_key(b, k);
443 goto out;
446 if (bkey_cmp(insert, k) < 0) {
447 bch_cut_front(insert, k);
448 } else {
449 if (bkey_cmp(&START_KEY(insert), &START_KEY(k)) > 0)
450 old_offset = KEY_START(insert);
452 if (bkey_written(b, k) &&
453 bkey_cmp(&START_KEY(insert), &START_KEY(k)) <= 0) {
455 * Completely overwrote, so we don't have to
456 * invalidate the binary search tree
458 bch_cut_front(k, k);
459 } else {
460 __bch_cut_back(&START_KEY(insert), k);
461 bch_bset_fix_invalidated_key(b, k);
465 bch_subtract_dirty(k, c, old_offset, old_size - KEY_SIZE(k));
468 check_failed:
469 if (replace_key) {
470 if (!sectors_found) {
471 return true;
472 } else if (sectors_found < KEY_SIZE(insert)) {
473 SET_KEY_OFFSET(insert, KEY_OFFSET(insert) -
474 (KEY_SIZE(insert) - sectors_found));
475 SET_KEY_SIZE(insert, sectors_found);
478 out:
479 if (KEY_DIRTY(insert))
480 bcache_dev_sectors_dirty_add(c, KEY_INODE(insert),
481 KEY_START(insert),
482 KEY_SIZE(insert));
484 return false;
487 bool __bch_extent_invalid(struct cache_set *c, const struct bkey *k)
489 char buf[80];
491 if (!KEY_SIZE(k))
492 return true;
494 if (KEY_SIZE(k) > KEY_OFFSET(k))
495 goto bad;
497 if (__ptr_invalid(c, k))
498 goto bad;
500 return false;
501 bad:
502 bch_extent_to_text(buf, sizeof(buf), k);
503 cache_bug(c, "spotted extent %s: %s", buf, bch_ptr_status(c, k));
504 return true;
507 static bool bch_extent_invalid(struct btree_keys *bk, const struct bkey *k)
509 struct btree *b = container_of(bk, struct btree, keys);
511 return __bch_extent_invalid(b->c, k);
514 static bool bch_extent_bad_expensive(struct btree *b, const struct bkey *k,
515 unsigned int ptr)
517 struct bucket *g = PTR_BUCKET(b->c, k, ptr);
518 char buf[80];
520 if (mutex_trylock(&b->c->bucket_lock)) {
521 if (b->c->gc_mark_valid &&
522 (!GC_MARK(g) ||
523 GC_MARK(g) == GC_MARK_METADATA ||
524 (GC_MARK(g) != GC_MARK_DIRTY && KEY_DIRTY(k))))
525 goto err;
527 if (g->prio == BTREE_PRIO)
528 goto err;
530 mutex_unlock(&b->c->bucket_lock);
533 return false;
534 err:
535 mutex_unlock(&b->c->bucket_lock);
536 bch_extent_to_text(buf, sizeof(buf), k);
537 btree_bug(b,
538 "inconsistent extent pointer %s:\nbucket %zu pin %i prio %i gen %i last_gc %i mark %llu",
539 buf, PTR_BUCKET_NR(b->c, k, ptr), atomic_read(&g->pin),
540 g->prio, g->gen, g->last_gc, GC_MARK(g));
541 return true;
544 static bool bch_extent_bad(struct btree_keys *bk, const struct bkey *k)
546 struct btree *b = container_of(bk, struct btree, keys);
547 unsigned int i, stale;
548 char buf[80];
550 if (!KEY_PTRS(k) ||
551 bch_extent_invalid(bk, k))
552 return true;
554 for (i = 0; i < KEY_PTRS(k); i++)
555 if (!ptr_available(b->c, k, i))
556 return true;
558 for (i = 0; i < KEY_PTRS(k); i++) {
559 stale = ptr_stale(b->c, k, i);
561 if (stale && KEY_DIRTY(k)) {
562 bch_extent_to_text(buf, sizeof(buf), k);
563 pr_info("stale dirty pointer, stale %u, key: %s\n",
564 stale, buf);
567 btree_bug_on(stale > BUCKET_GC_GEN_MAX, b,
568 "key too stale: %i, need_gc %u",
569 stale, b->c->need_gc);
571 if (stale)
572 return true;
574 if (expensive_debug_checks(b->c) &&
575 bch_extent_bad_expensive(b, k, i))
576 return true;
579 return false;
582 static uint64_t merge_chksums(struct bkey *l, struct bkey *r)
584 return (l->ptr[KEY_PTRS(l)] + r->ptr[KEY_PTRS(r)]) &
585 ~((uint64_t)1 << 63);
588 static bool bch_extent_merge(struct btree_keys *bk,
589 struct bkey *l,
590 struct bkey *r)
592 struct btree *b = container_of(bk, struct btree, keys);
593 unsigned int i;
595 if (key_merging_disabled(b->c))
596 return false;
598 for (i = 0; i < KEY_PTRS(l); i++)
599 if (l->ptr[i] + MAKE_PTR(0, KEY_SIZE(l), 0) != r->ptr[i] ||
600 PTR_BUCKET_NR(b->c, l, i) != PTR_BUCKET_NR(b->c, r, i))
601 return false;
603 /* Keys with no pointers aren't restricted to one bucket and could
604 * overflow KEY_SIZE
606 if (KEY_SIZE(l) + KEY_SIZE(r) > USHRT_MAX) {
607 SET_KEY_OFFSET(l, KEY_OFFSET(l) + USHRT_MAX - KEY_SIZE(l));
608 SET_KEY_SIZE(l, USHRT_MAX);
610 bch_cut_front(l, r);
611 return false;
614 if (KEY_CSUM(l)) {
615 if (KEY_CSUM(r))
616 l->ptr[KEY_PTRS(l)] = merge_chksums(l, r);
617 else
618 SET_KEY_CSUM(l, 0);
621 SET_KEY_OFFSET(l, KEY_OFFSET(l) + KEY_SIZE(r));
622 SET_KEY_SIZE(l, KEY_SIZE(l) + KEY_SIZE(r));
624 return true;
627 const struct btree_keys_ops bch_extent_keys_ops = {
628 .sort_cmp = new_bch_extent_sort_cmp,
629 .sort_fixup = bch_extent_sort_fixup,
630 .insert_fixup = bch_extent_insert_fixup,
631 .key_invalid = bch_extent_invalid,
632 .key_bad = bch_extent_bad,
633 .key_merge = bch_extent_merge,
634 .key_to_text = bch_extent_to_text,
635 .key_dump = bch_bkey_dump,
636 .is_extents = true,