1 /* SPDX-License-Identifier: GPL-2.0 */
5 #include <linux/bcache.h>
6 #include <linux/kernel.h>
7 #include <linux/types.h>
9 #include "util.h" /* for time_stats */
14 * A bkey contains a key, a size field, a variable number of pointers, and some
15 * ancillary flag bits.
17 * We use two different functions for validating bkeys, bch_ptr_invalid and
20 * bch_ptr_invalid() primarily filters out keys and pointers that would be
21 * invalid due to some sort of bug, whereas bch_ptr_bad() filters out keys and
22 * pointer that occur in normal practice but don't point to real data.
24 * The one exception to the rule that ptr_invalid() filters out invalid keys is
25 * that it also filters out keys of size 0 - these are keys that have been
26 * completely overwritten. It'd be safe to delete these in memory while leaving
27 * them on disk, just unnecessary work - so we filter them out when resorting
30 * We can't filter out stale keys when we're resorting, because garbage
31 * collection needs to find them to ensure bucket gens don't wrap around -
32 * unless we're rewriting the btree node those stale keys still exist on disk.
34 * We also implement functions here for removing some number of sectors from the
35 * front or the back of a bkey - this is mainly used for fixing overlapping
36 * extents, by removing the overlapping sectors from the older key.
40 * A bset is an array of bkeys laid out contiguously in memory in sorted order,
41 * along with a header. A btree node is made up of a number of these, written at
44 * There could be many of them on disk, but we never allow there to be more than
45 * 4 in memory - we lazily resort as needed.
47 * We implement code here for creating and maintaining auxiliary search trees
48 * (described below) for searching an individial bset, and on top of that we
49 * implement a btree iterator.
53 * Most of the code in bcache doesn't care about an individual bset - it needs
54 * to search entire btree nodes and iterate over them in sorted order.
56 * The btree iterator code serves both functions; it iterates through the keys
57 * in a btree node in sorted order, starting from either keys after a specific
58 * point (if you pass it a search key) or the start of the btree node.
60 * AUXILIARY SEARCH TREES:
62 * Since keys are variable length, we can't use a binary search on a bset - we
63 * wouldn't be able to find the start of the next key. But binary searches are
64 * slow anyways, due to terrible cache behaviour; bcache originally used binary
65 * searches and that code topped out at under 50k lookups/second.
67 * So we need to construct some sort of lookup table. Since we only insert keys
68 * into the last (unwritten) set, most of the keys within a given btree node are
69 * usually in sets that are mostly constant. We use two different types of
70 * lookup tables to take advantage of this.
72 * Both lookup tables share in common that they don't index every key in the
73 * set; they index one key every BSET_CACHELINE bytes, and then a linear search
74 * is used for the rest.
76 * For sets that have been written to disk and are no longer being inserted
77 * into, we construct a binary search tree in an array - traversing a binary
78 * search tree in an array gives excellent locality of reference and is very
79 * fast, since both children of any node are adjacent to each other in memory
80 * (and their grandchildren, and great grandchildren...) - this means
81 * prefetching can be used to great effect.
83 * It's quite useful performance wise to keep these nodes small - not just
84 * because they're more likely to be in L2, but also because we can prefetch
85 * more nodes on a single cacheline and thus prefetch more iterations in advance
86 * when traversing this tree.
88 * Nodes in the auxiliary search tree must contain both a key to compare against
89 * (we don't want to fetch the key from the set, that would defeat the purpose),
90 * and a pointer to the key. We use a few tricks to compress both of these.
92 * To compress the pointer, we take advantage of the fact that one node in the
93 * search tree corresponds to precisely BSET_CACHELINE bytes in the set. We have
94 * a function (to_inorder()) that takes the index of a node in a binary tree and
95 * returns what its index would be in an inorder traversal, so we only have to
96 * store the low bits of the offset.
98 * The key is 84 bits (KEY_DEV + key->key, the offset on the device). To
99 * compress that, we take advantage of the fact that when we're traversing the
100 * search tree at every iteration we know that both our search key and the key
101 * we're looking for lie within some range - bounded by our previous
102 * comparisons. (We special case the start of a search so that this is true even
103 * at the root of the tree).
105 * So we know the key we're looking for is between a and b, and a and b don't
106 * differ higher than bit 50, we don't need to check anything higher than bit
109 * We don't usually need the rest of the bits, either; we only need enough bits
110 * to partition the key range we're currently checking. Consider key n - the
111 * key our auxiliary search tree node corresponds to, and key p, the key
112 * immediately preceding n. The lowest bit we need to store in the auxiliary
113 * search tree is the highest bit that differs between n and p.
115 * Note that this could be bit 0 - we might sometimes need all 80 bits to do the
116 * comparison. But we'd really like our nodes in the auxiliary search tree to be
119 * The solution is to make them fixed size, and when we're constructing a node
120 * check if p and n differed in the bits we needed them to. If they don't we
121 * flag that node, and when doing lookups we fallback to comparing against the
122 * real key. As long as this doesn't happen to often (and it seems to reliably
123 * happen a bit less than 1% of the time), we win - even on failures, that key
124 * is then more likely to be in cache than if we were doing binary searches all
125 * the way, since we're touching so much less memory.
127 * The keys in the auxiliary search tree are stored in (software) floating
128 * point, with an exponent and a mantissa. The exponent needs to be big enough
129 * to address all the bits in the original key, but the number of bits in the
130 * mantissa is somewhat arbitrary; more bits just gets us fewer failures.
132 * We need 7 bits for the exponent and 3 bits for the key's offset (since keys
133 * are 8 byte aligned); using 22 bits for the mantissa means a node is 4 bytes.
134 * We need one node per 128 bytes in the btree node, which means the auxiliary
135 * search trees take up 3% as much memory as the btree itself.
137 * Constructing these auxiliary search trees is moderately expensive, and we
138 * don't want to be constantly rebuilding the search tree for the last set
139 * whenever we insert another key into it. For the unwritten set, we use a much
140 * simpler lookup table - it's just a flat array, so index i in the lookup table
141 * corresponds to the i range of BSET_CACHELINE bytes in the set. Indexing
142 * within each byte range works the same as with the auxiliary search trees.
144 * These are much easier to keep up to date when we insert a key - we do it
145 * somewhat lazily; when we shift a key up we usually just increment the pointer
146 * to it, only when it would overflow do we go to the trouble of finding the
147 * first key in that range of bytes again.
152 struct btree_iter_set
;
159 * We construct a binary tree in an array as if the array
160 * started at 1, so that things line up on the same cachelines
161 * better: see comments in bset.c at cacheline_to_bkey() for
165 /* size of the binary tree and prev array */
168 /* function of size - precalculated for to_inorder() */
171 /* copy of the last key in the set */
173 struct bkey_float
*tree
;
176 * The nodes in the bset tree point to specific keys - this
177 * array holds the sizes of the previous key.
179 * Conceptually it's a member of struct bkey_float, but we want
180 * to keep bkey_float to 4 bytes and prev isn't used in the fast
185 /* The actual btree node, with pointers to each sorted set */
189 struct btree_keys_ops
{
190 bool (*sort_cmp
)(struct btree_iter_set l
,
191 struct btree_iter_set r
);
192 struct bkey
*(*sort_fixup
)(struct btree_iter
*iter
,
194 bool (*insert_fixup
)(struct btree_keys
*b
,
196 struct btree_iter
*iter
,
197 struct bkey
*replace_key
);
198 bool (*key_invalid
)(struct btree_keys
*bk
,
199 const struct bkey
*k
);
200 bool (*key_bad
)(struct btree_keys
*bk
,
201 const struct bkey
*k
);
202 bool (*key_merge
)(struct btree_keys
*bk
,
203 struct bkey
*l
, struct bkey
*r
);
204 void (*key_to_text
)(char *buf
,
206 const struct bkey
*k
);
207 void (*key_dump
)(struct btree_keys
*keys
,
208 const struct bkey
*k
);
211 * Only used for deciding whether to use START_KEY(k) or just the key
212 * itself in a couple places
218 const struct btree_keys_ops
*ops
;
221 unsigned int last_set_unwritten
:1;
222 bool *expensive_debug_checks
;
225 * Sets of sorted keys - the real btree node - plus a binary search tree
227 * set[0] is special; set[0]->tree, set[0]->prev and set[0]->data point
228 * to the memory we have allocated for this btree node. Additionally,
229 * set[0]->data points to the entire btree node as it exists on disk.
231 struct bset_tree set
[MAX_BSETS
];
234 static inline struct bset_tree
*bset_tree_last(struct btree_keys
*b
)
236 return b
->set
+ b
->nsets
;
239 static inline bool bset_written(struct btree_keys
*b
, struct bset_tree
*t
)
241 return t
<= b
->set
+ b
->nsets
- b
->last_set_unwritten
;
244 static inline bool bkey_written(struct btree_keys
*b
, struct bkey
*k
)
246 return !b
->last_set_unwritten
|| k
< b
->set
[b
->nsets
].data
->start
;
249 static inline unsigned int bset_byte_offset(struct btree_keys
*b
,
252 return ((size_t) i
) - ((size_t) b
->set
->data
);
255 static inline unsigned int bset_sector_offset(struct btree_keys
*b
,
258 return bset_byte_offset(b
, i
) >> 9;
261 #define __set_bytes(i, k) (sizeof(*(i)) + (k) * sizeof(uint64_t))
262 #define set_bytes(i) __set_bytes(i, i->keys)
264 #define __set_blocks(i, k, block_bytes) \
265 DIV_ROUND_UP(__set_bytes(i, k), block_bytes)
266 #define set_blocks(i, block_bytes) \
267 __set_blocks(i, (i)->keys, block_bytes)
269 static inline size_t bch_btree_keys_u64s_remaining(struct btree_keys
*b
)
271 struct bset_tree
*t
= bset_tree_last(b
);
273 BUG_ON((PAGE_SIZE
<< b
->page_order
) <
274 (bset_byte_offset(b
, t
->data
) + set_bytes(t
->data
)));
276 if (!b
->last_set_unwritten
)
279 return ((PAGE_SIZE
<< b
->page_order
) -
280 (bset_byte_offset(b
, t
->data
) + set_bytes(t
->data
))) /
284 static inline struct bset
*bset_next_set(struct btree_keys
*b
,
285 unsigned int block_bytes
)
287 struct bset
*i
= bset_tree_last(b
)->data
;
289 return ((void *) i
) + roundup(set_bytes(i
), block_bytes
);
292 void bch_btree_keys_free(struct btree_keys
*b
);
293 int bch_btree_keys_alloc(struct btree_keys
*b
, unsigned int page_order
,
295 void bch_btree_keys_init(struct btree_keys
*b
, const struct btree_keys_ops
*ops
,
296 bool *expensive_debug_checks
);
298 void bch_bset_init_next(struct btree_keys
*b
, struct bset
*i
, uint64_t magic
);
299 void bch_bset_build_written_tree(struct btree_keys
*b
);
300 void bch_bset_fix_invalidated_key(struct btree_keys
*b
, struct bkey
*k
);
301 bool bch_bkey_try_merge(struct btree_keys
*b
, struct bkey
*l
, struct bkey
*r
);
302 void bch_bset_insert(struct btree_keys
*b
, struct bkey
*where
,
303 struct bkey
*insert
);
304 unsigned int bch_btree_insert_key(struct btree_keys
*b
, struct bkey
*k
,
305 struct bkey
*replace_key
);
308 BTREE_INSERT_STATUS_NO_INSERT
= 0,
309 BTREE_INSERT_STATUS_INSERT
,
310 BTREE_INSERT_STATUS_BACK_MERGE
,
311 BTREE_INSERT_STATUS_OVERWROTE
,
312 BTREE_INSERT_STATUS_FRONT_MERGE
,
315 /* Btree key iteration */
319 #ifdef CONFIG_BCACHE_DEBUG
320 struct btree_keys
*b
;
322 struct btree_iter_set
{
323 struct bkey
*k
, *end
;
327 typedef bool (*ptr_filter_fn
)(struct btree_keys
*b
, const struct bkey
*k
);
329 struct bkey
*bch_btree_iter_next(struct btree_iter
*iter
);
330 struct bkey
*bch_btree_iter_next_filter(struct btree_iter
*iter
,
331 struct btree_keys
*b
,
334 void bch_btree_iter_push(struct btree_iter
*iter
, struct bkey
*k
,
336 struct bkey
*bch_btree_iter_init(struct btree_keys
*b
,
337 struct btree_iter
*iter
,
338 struct bkey
*search
);
340 struct bkey
*__bch_bset_search(struct btree_keys
*b
, struct bset_tree
*t
,
341 const struct bkey
*search
);
344 * Returns the first key that is strictly greater than search
346 static inline struct bkey
*bch_bset_search(struct btree_keys
*b
,
348 const struct bkey
*search
)
350 return search
? __bch_bset_search(b
, t
, search
) : t
->data
->start
;
353 #define for_each_key_filter(b, k, iter, filter) \
354 for (bch_btree_iter_init((b), (iter), NULL); \
355 ((k) = bch_btree_iter_next_filter((iter), (b), filter));)
357 #define for_each_key(b, k, iter) \
358 for (bch_btree_iter_init((b), (iter), NULL); \
359 ((k) = bch_btree_iter_next(iter));)
363 struct bset_sort_state
{
366 unsigned int page_order
;
367 unsigned int crit_factor
;
369 struct time_stats time
;
372 void bch_bset_sort_state_free(struct bset_sort_state
*state
);
373 int bch_bset_sort_state_init(struct bset_sort_state
*state
,
374 unsigned int page_order
);
375 void bch_btree_sort_lazy(struct btree_keys
*b
, struct bset_sort_state
*state
);
376 void bch_btree_sort_into(struct btree_keys
*b
, struct btree_keys
*new,
377 struct bset_sort_state
*state
);
378 void bch_btree_sort_and_fix_extents(struct btree_keys
*b
,
379 struct btree_iter
*iter
,
380 struct bset_sort_state
*state
);
381 void bch_btree_sort_partial(struct btree_keys
*b
, unsigned int start
,
382 struct bset_sort_state
*state
);
384 static inline void bch_btree_sort(struct btree_keys
*b
,
385 struct bset_sort_state
*state
)
387 bch_btree_sort_partial(b
, 0, state
);
391 size_t sets_written
, sets_unwritten
;
392 size_t bytes_written
, bytes_unwritten
;
393 size_t floats
, failed
;
396 void bch_btree_keys_stats(struct btree_keys
*b
, struct bset_stats
*state
);
398 /* Bkey utility code */
400 #define bset_bkey_last(i) bkey_idx((struct bkey *) (i)->d, \
401 (unsigned int)(i)->keys)
403 static inline struct bkey
*bset_bkey_idx(struct bset
*i
, unsigned int idx
)
405 return bkey_idx(i
->start
, idx
);
408 static inline void bkey_init(struct bkey
*k
)
413 static __always_inline
int64_t bkey_cmp(const struct bkey
*l
,
414 const struct bkey
*r
)
416 return unlikely(KEY_INODE(l
) != KEY_INODE(r
))
417 ? (int64_t) KEY_INODE(l
) - (int64_t) KEY_INODE(r
)
418 : (int64_t) KEY_OFFSET(l
) - (int64_t) KEY_OFFSET(r
);
421 void bch_bkey_copy_single_ptr(struct bkey
*dest
, const struct bkey
*src
,
423 bool __bch_cut_front(const struct bkey
*where
, struct bkey
*k
);
424 bool __bch_cut_back(const struct bkey
*where
, struct bkey
*k
);
426 static inline bool bch_cut_front(const struct bkey
*where
, struct bkey
*k
)
428 BUG_ON(bkey_cmp(where
, k
) > 0);
429 return __bch_cut_front(where
, k
);
432 static inline bool bch_cut_back(const struct bkey
*where
, struct bkey
*k
)
434 BUG_ON(bkey_cmp(where
, &START_KEY(k
)) < 0);
435 return __bch_cut_back(where
, k
);
439 * Pointer '*preceding_key_p' points to a memory object to store preceding
440 * key of k. If the preceding key does not exist, set '*preceding_key_p' to
441 * NULL. So the caller of preceding_key() needs to take care of memory
442 * which '*preceding_key_p' pointed to before calling preceding_key().
443 * Currently the only caller of preceding_key() is bch_btree_insert_key(),
444 * and it points to an on-stack variable, so the memory release is handled
445 * by stackframe itself.
447 static inline void preceding_key(struct bkey
*k
, struct bkey
**preceding_key_p
)
449 if (KEY_INODE(k
) || KEY_OFFSET(k
)) {
450 (**preceding_key_p
) = KEY(KEY_INODE(k
), KEY_OFFSET(k
), 0);
451 if (!(*preceding_key_p
)->low
)
452 (*preceding_key_p
)->high
--;
453 (*preceding_key_p
)->low
--;
455 (*preceding_key_p
) = NULL
;
459 static inline bool bch_ptr_invalid(struct btree_keys
*b
, const struct bkey
*k
)
461 return b
->ops
->key_invalid(b
, k
);
464 static inline bool bch_ptr_bad(struct btree_keys
*b
, const struct bkey
*k
)
466 return b
->ops
->key_bad(b
, k
);
469 static inline void bch_bkey_to_text(struct btree_keys
*b
, char *buf
,
470 size_t size
, const struct bkey
*k
)
472 return b
->ops
->key_to_text(buf
, size
, k
);
475 static inline bool bch_bkey_equal_header(const struct bkey
*l
,
476 const struct bkey
*r
)
478 return (KEY_DIRTY(l
) == KEY_DIRTY(r
) &&
479 KEY_PTRS(l
) == KEY_PTRS(r
) &&
480 KEY_CSUM(l
) == KEY_CSUM(r
));
495 /* Enough room for btree_split's keys without realloc */
496 #define KEYLIST_INLINE 16
497 uint64_t inline_keys
[KEYLIST_INLINE
];
500 static inline void bch_keylist_init(struct keylist
*l
)
502 l
->top_p
= l
->keys_p
= l
->inline_keys
;
505 static inline void bch_keylist_init_single(struct keylist
*l
, struct bkey
*k
)
508 l
->top
= bkey_next(k
);
511 static inline void bch_keylist_push(struct keylist
*l
)
513 l
->top
= bkey_next(l
->top
);
516 static inline void bch_keylist_add(struct keylist
*l
, struct bkey
*k
)
518 bkey_copy(l
->top
, k
);
522 static inline bool bch_keylist_empty(struct keylist
*l
)
524 return l
->top
== l
->keys
;
527 static inline void bch_keylist_reset(struct keylist
*l
)
532 static inline void bch_keylist_free(struct keylist
*l
)
534 if (l
->keys_p
!= l
->inline_keys
)
538 static inline size_t bch_keylist_nkeys(struct keylist
*l
)
540 return l
->top_p
- l
->keys_p
;
543 static inline size_t bch_keylist_bytes(struct keylist
*l
)
545 return bch_keylist_nkeys(l
) * sizeof(uint64_t);
548 struct bkey
*bch_keylist_pop(struct keylist
*l
);
549 void bch_keylist_pop_front(struct keylist
*l
);
550 int __bch_keylist_realloc(struct keylist
*l
, unsigned int u64s
);
554 #ifdef CONFIG_BCACHE_DEBUG
556 int __bch_count_data(struct btree_keys
*b
);
557 void __printf(2, 3) __bch_check_keys(struct btree_keys
*b
,
560 void bch_dump_bset(struct btree_keys
*b
, struct bset
*i
, unsigned int set
);
561 void bch_dump_bucket(struct btree_keys
*b
);
565 static inline int __bch_count_data(struct btree_keys
*b
) { return -1; }
566 static inline void __printf(2, 3)
567 __bch_check_keys(struct btree_keys
*b
, const char *fmt
, ...) {}
568 static inline void bch_dump_bucket(struct btree_keys
*b
) {}
569 void bch_dump_bset(struct btree_keys
*b
, struct bset
*i
, unsigned int set
);
573 static inline bool btree_keys_expensive_checks(struct btree_keys
*b
)
575 #ifdef CONFIG_BCACHE_DEBUG
576 return *b
->expensive_debug_checks
;
582 static inline int bch_count_data(struct btree_keys
*b
)
584 return btree_keys_expensive_checks(b
) ? __bch_count_data(b
) : -1;
587 #define bch_check_keys(b, ...) \
589 if (btree_keys_expensive_checks(b)) \
590 __bch_check_keys(b, __VA_ARGS__); \