1 #ifndef _BCACHE_BTREE_H
2 #define _BCACHE_BTREE_H
7 * At a high level, bcache's btree is relatively standard b+ tree. All keys and
8 * pointers are in the leaves; interior nodes only have pointers to the child
11 * In the interior nodes, a struct bkey always points to a child btree node, and
12 * the key is the highest key in the child node - except that the highest key in
13 * an interior node is always MAX_KEY. The size field refers to the size on disk
14 * of the child node - this would allow us to have variable sized btree nodes
15 * (handy for keeping the depth of the btree 1 by expanding just the root).
17 * Btree nodes are themselves log structured, but this is hidden fairly
18 * thoroughly. Btree nodes on disk will in practice have extents that overlap
19 * (because they were written at different times), but in memory we never have
20 * overlapping extents - when we read in a btree node from disk, the first thing
21 * we do is resort all the sets of keys with a mergesort, and in the same pass
22 * we check for overlapping extents and adjust them appropriately.
24 * struct btree_op is a central interface to the btree code. It's used for
25 * specifying read vs. write locking, and the embedded closure is used for
26 * waiting on IO or reserve memory.
30 * Btree nodes are cached in memory; traversing the btree might require reading
31 * in btree nodes which is handled mostly transparently.
33 * bch_btree_node_get() looks up a btree node in the cache and reads it in from
34 * disk if necessary. This function is almost never called directly though - the
35 * btree() macro is used to get a btree node, call some function on it, and
36 * unlock the node after the function returns.
38 * The root is special cased - it's taken out of the cache's lru (thus pinning
39 * it in memory), so we can find the root of the btree by just dereferencing a
40 * pointer instead of looking it up in the cache. This makes locking a bit
41 * tricky, since the root pointer is protected by the lock in the btree node it
42 * points to - the btree_root() macro handles this.
44 * In various places we must be able to allocate memory for multiple btree nodes
45 * in order to make forward progress. To do this we use the btree cache itself
46 * as a reserve; if __get_free_pages() fails, we'll find a node in the btree
47 * cache we can reuse. We can't allow more than one thread to be doing this at a
48 * time, so there's a lock, implemented by a pointer to the btree_op closure -
49 * this allows the btree_root() macro to implicitly release this lock.
53 * Btree nodes never have to be explicitly read in; bch_btree_node_get() handles
56 * For writing, we have two btree_write structs embeddded in struct btree - one
57 * write in flight, and one being set up, and we toggle between them.
59 * Writing is done with a single function - bch_btree_write() really serves two
60 * different purposes and should be broken up into two different functions. When
61 * passing now = false, it merely indicates that the node is now dirty - calling
62 * it ensures that the dirty keys will be written at some point in the future.
64 * When passing now = true, bch_btree_write() causes a write to happen
65 * "immediately" (if there was already a write in flight, it'll cause the write
66 * to happen as soon as the previous write completes). It returns immediately
67 * though - but it takes a refcount on the closure in struct btree_op you passed
68 * to it, so a closure_sync() later can be used to wait for the write to
71 * This is handy because btree_split() and garbage collection can issue writes
72 * in parallel, reducing the amount of time they have to hold write locks.
76 * When traversing the btree, we may need write locks starting at some level -
77 * inserting a key into the btree will typically only require a write lock on
80 * This is specified with the lock field in struct btree_op; lock = 0 means we
81 * take write locks at level <= 0, i.e. only leaf nodes. bch_btree_node_get()
82 * checks this field and returns the node with the appropriate lock held.
84 * If, after traversing the btree, the insertion code discovers it has to split
85 * then it must restart from the root and take new locks - to do this it changes
86 * the lock field and returns -EINTR, which causes the btree_root() macro to
89 * Handling cache misses require a different mechanism for upgrading to a write
90 * lock. We do cache lookups with only a read lock held, but if we get a cache
91 * miss and we wish to insert this data into the cache, we have to insert a
92 * placeholder key to detect races - otherwise, we could race with a write and
93 * overwrite the data that was just written to the cache with stale data from
96 * For this we use a sequence number that write locks and unlocks increment - to
97 * insert the check key it unlocks the btree node and then takes a write lock,
98 * and fails if the sequence number doesn't match.
107 /* If btree_split() frees a btree node, it writes a new pointer to that
108 * btree node indicating it was freed; it takes a refcount on
109 * c->prio_blocked because we can't write the gens until the new
110 * pointer is on disk. This allows btree_write_endio() to release the
111 * refcount that btree_split() took.
117 /* Hottest entries first */
118 struct hlist_node hash
;
120 /* Key/pointer for this btree node */
123 /* Single bit - set when accessed, cleared by shrinker */
124 unsigned long accessed
;
126 struct rw_semaphore lock
;
130 uint16_t written
; /* would be nice to kill */
136 * Set of sorted keys - the real btree node - plus a binary search tree
138 * sets[0] is special; set[0]->tree, set[0]->prev and set[0]->data point
139 * to the memory we have allocated for this btree node. Additionally,
140 * set[0]->data points to the entire btree node as it exists on disk.
142 struct bset_tree sets
[MAX_BSETS
];
144 /* For outstanding btree writes, used as a lock - protects write_idx */
145 struct closure_with_waitlist io
;
147 struct list_head list
;
148 struct delayed_work work
;
150 struct btree_write writes
[2];
154 #define BTREE_FLAG(flag) \
155 static inline bool btree_node_ ## flag(struct btree *b) \
156 { return test_bit(BTREE_NODE_ ## flag, &b->flags); } \
158 static inline void set_btree_node_ ## flag(struct btree *b) \
159 { set_bit(BTREE_NODE_ ## flag, &b->flags); } \
164 BTREE_NODE_write_idx
,
167 BTREE_FLAG(io_error
);
169 BTREE_FLAG(write_idx
);
171 static inline struct btree_write
*btree_current_write(struct btree
*b
)
173 return b
->writes
+ btree_node_write_idx(b
);
176 static inline struct btree_write
*btree_prev_write(struct btree
*b
)
178 return b
->writes
+ (btree_node_write_idx(b
) ^ 1);
181 static inline unsigned bset_offset(struct btree
*b
, struct bset
*i
)
183 return (((size_t) i
) - ((size_t) b
->sets
->data
)) >> 9;
186 static inline struct bset
*write_block(struct btree
*b
)
188 return ((void *) b
->sets
[0].data
) + b
->written
* block_bytes(b
->c
);
191 static inline bool bset_written(struct btree
*b
, struct bset_tree
*t
)
193 return t
->data
< write_block(b
);
196 static inline bool bkey_written(struct btree
*b
, struct bkey
*k
)
198 return k
< write_block(b
)->start
;
201 static inline void set_gc_sectors(struct cache_set
*c
)
203 atomic_set(&c
->sectors_to_gc
, c
->sb
.bucket_size
* c
->nbuckets
/ 8);
206 static inline bool bch_ptr_invalid(struct btree
*b
, const struct bkey
*k
)
208 return __bch_ptr_invalid(b
->c
, b
->level
, k
);
211 static inline struct bkey
*bch_btree_iter_init(struct btree
*b
,
212 struct btree_iter
*iter
,
215 return __bch_btree_iter_init(b
, iter
, search
, b
->sets
);
220 #define for_each_cached_btree(b, c, iter) \
222 iter < ARRAY_SIZE((c)->bucket_hash); \
224 hlist_for_each_entry_rcu((b), (c)->bucket_hash + iter, hash)
226 #define for_each_key_filter(b, k, iter, filter) \
227 for (bch_btree_iter_init((b), (iter), NULL); \
228 ((k) = bch_btree_iter_next_filter((iter), b, filter));)
230 #define for_each_key(b, k, iter) \
231 for (bch_btree_iter_init((b), (iter), NULL); \
232 ((k) = bch_btree_iter_next(iter));)
234 /* Recursing down the btree */
240 /* Journal entry we have a refcount on */
243 /* Bio to be inserted into the cache */
244 struct bio
*cache_bio
;
250 /* Btree level at which we start taking write locks */
253 /* Btree insertion type */
261 unsigned flush_journal
:1;
263 unsigned insert_data_done
:1;
264 unsigned lookup_done
:1;
265 unsigned insert_collision
:1;
267 /* Anything after this point won't get zeroed in do_bio_hook() */
269 /* Keys to be inserted */
271 BKEY_PADDED(replace
);
275 BTREE_INSERT_STATUS_INSERT
,
276 BTREE_INSERT_STATUS_BACK_MERGE
,
277 BTREE_INSERT_STATUS_OVERWROTE
,
278 BTREE_INSERT_STATUS_FRONT_MERGE
,
281 void bch_btree_op_init_stack(struct btree_op
*);
283 static inline void rw_lock(bool w
, struct btree
*b
, int level
)
285 w
? down_write_nested(&b
->lock
, level
+ 1)
286 : down_read_nested(&b
->lock
, level
+ 1);
291 static inline void rw_unlock(bool w
, struct btree
*b
)
293 #ifdef CONFIG_BCACHE_EDEBUG
296 if (w
&& b
->key
.ptr
[0])
297 for (i
= 0; i
<= b
->nsets
; i
++)
298 bch_check_key_order(b
, b
->sets
[i
].data
);
303 (w
? up_write
: up_read
)(&b
->lock
);
306 #define insert_lock(s, b) ((b)->level <= (s)->lock)
309 * These macros are for recursing down the btree - they handle the details of
310 * locking and looking up nodes in the cache for you. They're best treated as
311 * mere syntax when reading code that uses them.
313 * op->lock determines whether we take a read or a write lock at a given depth.
314 * If you've got a read lock and find that you need a write lock (i.e. you're
315 * going to have to split), set op->lock and return -EINTR; btree_root() will
316 * call you again and you'll have the correct lock.
320 * btree - recurse down the btree on a specified key
321 * @fn: function to call, which will be passed the child node
322 * @key: key to recurse on
323 * @b: parent btree node
324 * @op: pointer to struct btree_op
326 #define btree(fn, key, b, op, ...) \
328 int _r, l = (b)->level - 1; \
329 bool _w = l <= (op)->lock; \
330 struct btree *_b = bch_btree_node_get((b)->c, key, l, op); \
332 _r = bch_btree_ ## fn(_b, op, ##__VA_ARGS__); \
340 * btree_root - call a function on the root of the btree
341 * @fn: function to call, which will be passed the child node
343 * @op: pointer to struct btree_op
345 #define btree_root(fn, c, op, ...) \
349 struct btree *_b = (c)->root; \
350 bool _w = insert_lock(op, _b); \
351 rw_lock(_w, _b, _b->level); \
352 if (_b == (c)->root && \
353 _w == insert_lock(op, _b)) \
354 _r = bch_btree_ ## fn(_b, op, ##__VA_ARGS__); \
356 bch_cannibalize_unlock(c, &(op)->cl); \
357 } while (_r == -EINTR); \
362 static inline bool should_split(struct btree
*b
)
364 struct bset
*i
= write_block(b
);
365 return b
->written
>= btree_blocks(b
) ||
366 (i
->seq
== b
->sets
[0].data
->seq
&&
367 b
->written
+ __set_blocks(i
, i
->keys
+ 15, b
->c
)
371 void bch_btree_node_read(struct btree
*);
372 void bch_btree_node_write(struct btree
*, struct closure
*);
374 void bch_cannibalize_unlock(struct cache_set
*, struct closure
*);
375 void bch_btree_set_root(struct btree
*);
376 struct btree
*bch_btree_node_alloc(struct cache_set
*, int, struct closure
*);
377 struct btree
*bch_btree_node_get(struct cache_set
*, struct bkey
*,
378 int, struct btree_op
*);
380 bool bch_btree_insert_check_key(struct btree
*, struct btree_op
*,
382 int bch_btree_insert(struct btree_op
*, struct cache_set
*);
384 int bch_btree_search_recurse(struct btree
*, struct btree_op
*);
386 void bch_queue_gc(struct cache_set
*);
387 size_t bch_btree_gc_finish(struct cache_set
*);
388 void bch_moving_gc(struct closure
*);
389 int bch_btree_check(struct cache_set
*, struct btree_op
*);
390 uint8_t __bch_btree_mark_key(struct cache_set
*, int, struct bkey
*);
392 void bch_keybuf_init(struct keybuf
*);
393 void bch_refill_keybuf(struct cache_set
*, struct keybuf
*, struct bkey
*,
395 bool bch_keybuf_check_overlapping(struct keybuf
*, struct bkey
*,
397 void bch_keybuf_del(struct keybuf
*, struct keybuf_key
*);
398 struct keybuf_key
*bch_keybuf_next(struct keybuf
*);
399 struct keybuf_key
*bch_keybuf_next_rescan(struct cache_set
*, struct keybuf
*,
400 struct bkey
*, keybuf_pred_fn
*);