1 // SPDX-License-Identifier: GPL-2.0
3 * Code for working with individual keys, and sorted sets of keys with in a
6 * Copyright 2012 Google, Inc.
10 #include "btree_cache.h"
12 #include "eytzinger.h"
16 #include <linux/unaligned.h>
17 #include <linux/console.h>
18 #include <linux/random.h>
19 #include <linux/prefetch.h>
21 static inline void __bch2_btree_node_iter_advance(struct btree_node_iter
*,
24 static inline unsigned __btree_node_iter_used(struct btree_node_iter
*iter
)
26 unsigned n
= ARRAY_SIZE(iter
->data
);
28 while (n
&& __btree_node_iter_set_end(iter
, n
- 1))
34 struct bset_tree
*bch2_bkey_to_bset(struct btree
*b
, struct bkey_packed
*k
)
36 return bch2_bkey_to_bset_inlined(b
, k
);
40 * There are never duplicate live keys in the btree - but including keys that
41 * have been flagged as deleted (and will be cleaned up later) we _will_ see
44 * Thus the sort order is: usual key comparison first, but for keys that compare
45 * equal the deleted key(s) come first, and the (at most one) live version comes
48 * The main reason for this is insertion: to handle overwrites, we first iterate
49 * over keys that compare equal to our insert key, and then insert immediately
50 * prior to the first key greater than the key we're inserting - our insert
51 * position will be after all keys that compare equal to our insert key, which
52 * by the time we actually do the insert will all be deleted.
55 void bch2_dump_bset(struct bch_fs
*c
, struct btree
*b
,
56 struct bset
*i
, unsigned set
)
58 struct bkey_packed
*_k
, *_n
;
61 struct printbuf buf
= PRINTBUF
;
72 printk(KERN_ERR
"block %u key %5zu - u64s 0? aieee!\n", set
,
73 _k
->_data
- i
->_data
);
77 k
= bkey_disassemble(b
, _k
, &uk
);
81 bch2_bkey_val_to_text(&buf
, c
, k
);
83 bch2_bkey_to_text(&buf
, k
.k
);
84 printk(KERN_ERR
"block %u key %5zu: %s\n", set
,
85 _k
->_data
- i
->_data
, buf
.buf
);
87 if (_n
== vstruct_last(i
))
90 n
= bkey_unpack_key(b
, _n
);
92 if (bpos_lt(n
.p
, k
.k
->p
)) {
93 printk(KERN_ERR
"Key skipped backwards\n");
97 if (!bkey_deleted(k
.k
) && bpos_eq(n
.p
, k
.k
->p
))
98 printk(KERN_ERR
"Duplicate keys\n");
104 void bch2_dump_btree_node(struct bch_fs
*c
, struct btree
*b
)
108 bch2_dump_bset(c
, b
, bset(b
, t
), t
- b
->set
);
112 void bch2_dump_btree_node_iter(struct btree
*b
,
113 struct btree_node_iter
*iter
)
115 struct btree_node_iter_set
*set
;
116 struct printbuf buf
= PRINTBUF
;
118 printk(KERN_ERR
"btree node iter with %u/%u sets:\n",
119 __btree_node_iter_used(iter
), b
->nsets
);
121 btree_node_iter_for_each(iter
, set
) {
122 struct bkey_packed
*k
= __btree_node_offset_to_key(b
, set
->k
);
123 struct bset_tree
*t
= bch2_bkey_to_bset(b
, k
);
124 struct bkey uk
= bkey_unpack_key(b
, k
);
126 printbuf_reset(&buf
);
127 bch2_bkey_to_text(&buf
, &uk
);
128 printk(KERN_ERR
"set %zu key %u: %s\n",
129 t
- b
->set
, set
->k
, buf
.buf
);
135 struct btree_nr_keys
bch2_btree_node_count_keys(struct btree
*b
)
137 struct bkey_packed
*k
;
138 struct btree_nr_keys nr
= {};
141 bset_tree_for_each_key(b
, t
, k
)
142 if (!bkey_deleted(k
))
143 btree_keys_account_key_add(&nr
, t
- b
->set
, k
);
147 #ifdef CONFIG_BCACHEFS_DEBUG
149 void __bch2_verify_btree_nr_keys(struct btree
*b
)
151 struct btree_nr_keys nr
= bch2_btree_node_count_keys(b
);
153 BUG_ON(memcmp(&nr
, &b
->nr
, sizeof(nr
)));
156 static void bch2_btree_node_iter_next_check(struct btree_node_iter
*_iter
,
159 struct btree_node_iter iter
= *_iter
;
160 const struct bkey_packed
*k
, *n
;
162 k
= bch2_btree_node_iter_peek_all(&iter
, b
);
163 __bch2_btree_node_iter_advance(&iter
, b
);
164 n
= bch2_btree_node_iter_peek_all(&iter
, b
);
166 bkey_unpack_key(b
, k
);
169 bkey_iter_cmp(b
, k
, n
) > 0) {
170 struct btree_node_iter_set
*set
;
171 struct bkey ku
= bkey_unpack_key(b
, k
);
172 struct bkey nu
= bkey_unpack_key(b
, n
);
173 struct printbuf buf1
= PRINTBUF
;
174 struct printbuf buf2
= PRINTBUF
;
176 bch2_dump_btree_node(NULL
, b
);
177 bch2_bkey_to_text(&buf1
, &ku
);
178 bch2_bkey_to_text(&buf2
, &nu
);
179 printk(KERN_ERR
"out of order/overlapping:\n%s\n%s\n",
181 printk(KERN_ERR
"iter was:");
183 btree_node_iter_for_each(_iter
, set
) {
184 struct bkey_packed
*k2
= __btree_node_offset_to_key(b
, set
->k
);
185 struct bset_tree
*t
= bch2_bkey_to_bset(b
, k2
);
186 printk(" [%zi %zi]", t
- b
->set
,
187 k2
->_data
- bset(b
, t
)->_data
);
193 void bch2_btree_node_iter_verify(struct btree_node_iter
*iter
,
196 struct btree_node_iter_set
*set
, *s2
;
197 struct bkey_packed
*k
, *p
;
199 if (bch2_btree_node_iter_end(iter
))
202 /* Verify no duplicates: */
203 btree_node_iter_for_each(iter
, set
) {
204 BUG_ON(set
->k
> set
->end
);
205 btree_node_iter_for_each(iter
, s2
)
206 BUG_ON(set
!= s2
&& set
->end
== s2
->end
);
209 /* Verify that set->end is correct: */
210 btree_node_iter_for_each(iter
, set
) {
212 if (set
->end
== t
->end_offset
) {
213 BUG_ON(set
->k
< btree_bkey_first_offset(t
) ||
214 set
->k
>= t
->end_offset
);
222 /* Verify iterator is sorted: */
223 btree_node_iter_for_each(iter
, set
)
224 BUG_ON(set
!= iter
->data
&&
225 btree_node_iter_cmp(b
, set
[-1], set
[0]) > 0);
227 k
= bch2_btree_node_iter_peek_all(iter
, b
);
229 for_each_bset(b
, t
) {
230 if (iter
->data
[0].end
== t
->end_offset
)
233 p
= bch2_bkey_prev_all(b
, t
,
234 bch2_btree_node_iter_bset_pos(iter
, b
, t
));
236 BUG_ON(p
&& bkey_iter_cmp(b
, k
, p
) < 0);
240 void bch2_verify_insert_pos(struct btree
*b
, struct bkey_packed
*where
,
241 struct bkey_packed
*insert
, unsigned clobber_u64s
)
243 struct bset_tree
*t
= bch2_bkey_to_bset(b
, where
);
244 struct bkey_packed
*prev
= bch2_bkey_prev_all(b
, t
, where
);
245 struct bkey_packed
*next
= (void *) ((u64
*) where
->_data
+ clobber_u64s
);
246 struct printbuf buf1
= PRINTBUF
;
247 struct printbuf buf2
= PRINTBUF
;
250 bkey_iter_cmp(b
, prev
, insert
) > 0);
253 bkey_iter_cmp(b
, prev
, insert
) > 0) {
254 struct bkey k1
= bkey_unpack_key(b
, prev
);
255 struct bkey k2
= bkey_unpack_key(b
, insert
);
257 bch2_dump_btree_node(NULL
, b
);
258 bch2_bkey_to_text(&buf1
, &k1
);
259 bch2_bkey_to_text(&buf2
, &k2
);
261 panic("prev > insert:\n"
268 BUG_ON(next
!= btree_bkey_last(b
, t
) &&
269 bkey_iter_cmp(b
, insert
, next
) > 0);
271 if (next
!= btree_bkey_last(b
, t
) &&
272 bkey_iter_cmp(b
, insert
, next
) > 0) {
273 struct bkey k1
= bkey_unpack_key(b
, insert
);
274 struct bkey k2
= bkey_unpack_key(b
, next
);
276 bch2_dump_btree_node(NULL
, b
);
277 bch2_bkey_to_text(&buf1
, &k1
);
278 bch2_bkey_to_text(&buf2
, &k2
);
280 panic("insert > next:\n"
290 static inline void bch2_btree_node_iter_next_check(struct btree_node_iter
*iter
,
295 /* Auxiliary search trees */
297 #define BFLOAT_FAILED_UNPACKED U8_MAX
298 #define BFLOAT_FAILED U8_MAX
305 #define BKEY_MANTISSA_BITS 16
309 struct bkey_float f
[];
317 static unsigned bset_aux_tree_buf_end(const struct bset_tree
*t
)
319 BUG_ON(t
->aux_data_offset
== U16_MAX
);
321 switch (bset_aux_tree_type(t
)) {
322 case BSET_NO_AUX_TREE
:
323 return t
->aux_data_offset
;
324 case BSET_RO_AUX_TREE
:
325 return t
->aux_data_offset
+
326 DIV_ROUND_UP(t
->size
* sizeof(struct bkey_float
), 8);
327 case BSET_RW_AUX_TREE
:
328 return t
->aux_data_offset
+
329 DIV_ROUND_UP(sizeof(struct rw_aux_tree
) * t
->size
, 8);
335 static unsigned bset_aux_tree_buf_start(const struct btree
*b
,
336 const struct bset_tree
*t
)
339 ? DIV_ROUND_UP(b
->unpack_fn_len
, 8)
340 : bset_aux_tree_buf_end(t
- 1);
343 static void *__aux_tree_base(const struct btree
*b
,
344 const struct bset_tree
*t
)
346 return b
->aux_data
+ t
->aux_data_offset
* 8;
349 static struct ro_aux_tree
*ro_aux_tree_base(const struct btree
*b
,
350 const struct bset_tree
*t
)
352 EBUG_ON(bset_aux_tree_type(t
) != BSET_RO_AUX_TREE
);
354 return __aux_tree_base(b
, t
);
357 static struct bkey_float
*bkey_float(const struct btree
*b
,
358 const struct bset_tree
*t
,
361 return ro_aux_tree_base(b
, t
)->f
+ idx
;
364 static void bset_aux_tree_verify(struct btree
*b
)
366 #ifdef CONFIG_BCACHEFS_DEBUG
367 for_each_bset(b
, t
) {
368 if (t
->aux_data_offset
== U16_MAX
)
371 BUG_ON(t
!= b
->set
&&
372 t
[-1].aux_data_offset
== U16_MAX
);
374 BUG_ON(t
->aux_data_offset
< bset_aux_tree_buf_start(b
, t
));
375 BUG_ON(t
->aux_data_offset
> btree_aux_data_u64s(b
));
376 BUG_ON(bset_aux_tree_buf_end(t
) > btree_aux_data_u64s(b
));
381 void bch2_btree_keys_init(struct btree
*b
)
386 memset(&b
->nr
, 0, sizeof(b
->nr
));
388 for (i
= 0; i
< MAX_BSETS
; i
++)
389 b
->set
[i
].data_offset
= U16_MAX
;
391 bch2_bset_set_no_aux_tree(b
, b
->set
);
394 /* Binary tree stuff for auxiliary search trees */
397 * Cacheline/offset <-> bkey pointer arithmetic:
399 * t->tree is a binary search tree in an array; each node corresponds to a key
400 * in one cacheline in t->set (BSET_CACHELINE bytes).
402 * This means we don't have to store the full index of the key that a node in
403 * the binary tree points to; eytzinger1_to_inorder() gives us the cacheline, and
404 * then bkey_float->m gives us the offset within that cacheline, in units of 8
407 * cacheline_to_bkey() and friends abstract out all the pointer arithmetic to
410 * To construct the bfloat for an arbitrary key we need to know what the key
411 * immediately preceding it is: we have to check if the two keys differ in the
412 * bits we're going to store in bkey_float->mantissa. t->prev[j] stores the size
413 * of the previous key so we can walk backwards to it from t->tree[j]'s key.
416 static inline void *bset_cacheline(const struct btree
*b
,
417 const struct bset_tree
*t
,
420 return (void *) round_down((unsigned long) btree_bkey_first(b
, t
),
422 cacheline
* BSET_CACHELINE
;
425 static struct bkey_packed
*cacheline_to_bkey(const struct btree
*b
,
426 const struct bset_tree
*t
,
430 return bset_cacheline(b
, t
, cacheline
) + offset
* 8;
433 static unsigned bkey_to_cacheline(const struct btree
*b
,
434 const struct bset_tree
*t
,
435 const struct bkey_packed
*k
)
437 return ((void *) k
- bset_cacheline(b
, t
, 0)) / BSET_CACHELINE
;
440 static ssize_t
__bkey_to_cacheline_offset(const struct btree
*b
,
441 const struct bset_tree
*t
,
443 const struct bkey_packed
*k
)
445 return (u64
*) k
- (u64
*) bset_cacheline(b
, t
, cacheline
);
448 static unsigned bkey_to_cacheline_offset(const struct btree
*b
,
449 const struct bset_tree
*t
,
451 const struct bkey_packed
*k
)
453 size_t m
= __bkey_to_cacheline_offset(b
, t
, cacheline
, k
);
459 static inline struct bkey_packed
*tree_to_bkey(const struct btree
*b
,
460 const struct bset_tree
*t
,
463 return cacheline_to_bkey(b
, t
,
464 __eytzinger1_to_inorder(j
, t
->size
- 1, t
->extra
),
465 bkey_float(b
, t
, j
)->key_offset
);
468 static struct rw_aux_tree
*rw_aux_tree(const struct btree
*b
,
469 const struct bset_tree
*t
)
471 EBUG_ON(bset_aux_tree_type(t
) != BSET_RW_AUX_TREE
);
473 return __aux_tree_base(b
, t
);
477 * For the write set - the one we're currently inserting keys into - we don't
478 * maintain a full search tree, we just keep a simple lookup table in t->prev.
480 static struct bkey_packed
*rw_aux_to_bkey(const struct btree
*b
,
484 return __btree_node_offset_to_key(b
, rw_aux_tree(b
, t
)[j
].offset
);
487 static void rw_aux_tree_set(const struct btree
*b
, struct bset_tree
*t
,
488 unsigned j
, struct bkey_packed
*k
)
490 EBUG_ON(k
>= btree_bkey_last(b
, t
));
492 rw_aux_tree(b
, t
)[j
] = (struct rw_aux_tree
) {
493 .offset
= __btree_node_key_to_offset(b
, k
),
494 .k
= bkey_unpack_pos(b
, k
),
498 static void bch2_bset_verify_rw_aux_tree(struct btree
*b
,
501 struct bkey_packed
*k
= btree_bkey_first(b
, t
);
504 if (!bch2_expensive_debug_checks
)
507 BUG_ON(bset_has_ro_aux_tree(t
));
509 if (!bset_has_rw_aux_tree(t
))
513 BUG_ON(rw_aux_to_bkey(b
, t
, j
) != k
);
517 if (rw_aux_to_bkey(b
, t
, j
) == k
) {
518 BUG_ON(!bpos_eq(rw_aux_tree(b
, t
)[j
].k
,
519 bkey_unpack_pos(b
, k
)));
524 BUG_ON(rw_aux_tree(b
, t
)[j
].offset
<=
525 rw_aux_tree(b
, t
)[j
- 1].offset
);
529 BUG_ON(k
>= btree_bkey_last(b
, t
));
533 /* returns idx of first entry >= offset: */
534 static unsigned rw_aux_tree_bsearch(struct btree
*b
,
538 unsigned bset_offs
= offset
- btree_bkey_first_offset(t
);
539 unsigned bset_u64s
= t
->end_offset
- btree_bkey_first_offset(t
);
540 unsigned idx
= bset_u64s
? bset_offs
* t
->size
/ bset_u64s
: 0;
542 EBUG_ON(bset_aux_tree_type(t
) != BSET_RW_AUX_TREE
);
544 EBUG_ON(idx
> t
->size
);
546 while (idx
< t
->size
&&
547 rw_aux_tree(b
, t
)[idx
].offset
< offset
)
551 rw_aux_tree(b
, t
)[idx
- 1].offset
>= offset
)
554 EBUG_ON(idx
< t
->size
&&
555 rw_aux_tree(b
, t
)[idx
].offset
< offset
);
556 EBUG_ON(idx
&& rw_aux_tree(b
, t
)[idx
- 1].offset
>= offset
);
557 EBUG_ON(idx
+ 1 < t
->size
&&
558 rw_aux_tree(b
, t
)[idx
].offset
==
559 rw_aux_tree(b
, t
)[idx
+ 1].offset
);
564 static inline unsigned bkey_mantissa(const struct bkey_packed
*k
,
565 const struct bkey_float
*f
)
569 EBUG_ON(!bkey_packed(k
));
571 v
= get_unaligned((u64
*) (((u8
*) k
->_data
) + (f
->exponent
>> 3)));
574 * In little endian, we're shifting off low bits (and then the bits we
575 * want are at the low end), in big endian we're shifting off high bits
576 * (and then the bits we want are at the high end, so we shift them
579 #if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
580 v
>>= f
->exponent
& 7;
582 v
>>= 64 - (f
->exponent
& 7) - BKEY_MANTISSA_BITS
;
587 static __always_inline
void make_bfloat(struct btree
*b
, struct bset_tree
*t
,
589 struct bkey_packed
*min_key
,
590 struct bkey_packed
*max_key
)
592 struct bkey_float
*f
= bkey_float(b
, t
, j
);
593 struct bkey_packed
*m
= tree_to_bkey(b
, t
, j
);
594 struct bkey_packed
*l
= is_power_of_2(j
)
596 : tree_to_bkey(b
, t
, j
>> ffs(j
));
597 struct bkey_packed
*r
= is_power_of_2(j
+ 1)
599 : tree_to_bkey(b
, t
, j
>> (ffz(j
) + 1));
601 int shift
, exponent
, high_bit
;
604 * for failed bfloats, the lookup code falls back to comparing against
608 if (!bkey_packed(l
) || !bkey_packed(r
) || !bkey_packed(m
) ||
610 f
->exponent
= BFLOAT_FAILED_UNPACKED
;
615 * The greatest differing bit of l and r is the first bit we must
616 * include in the bfloat mantissa we're creating in order to do
617 * comparisons - that bit always becomes the high bit of
618 * bfloat->mantissa, and thus the exponent we're calculating here is
619 * the position of what will become the low bit in bfloat->mantissa:
621 * Note that this may be negative - we may be running off the low end
622 * of the key: we handle this later:
624 high_bit
= max(bch2_bkey_greatest_differing_bit(b
, l
, r
),
625 min_t(unsigned, BKEY_MANTISSA_BITS
, b
->nr_key_bits
) - 1);
626 exponent
= high_bit
- (BKEY_MANTISSA_BITS
- 1);
629 * Then we calculate the actual shift value, from the start of the key
630 * (k->_data), to get the key bits starting at exponent:
632 #if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
633 shift
= (int) (b
->format
.key_u64s
* 64 - b
->nr_key_bits
) + exponent
;
635 EBUG_ON(shift
+ BKEY_MANTISSA_BITS
> b
->format
.key_u64s
* 64);
637 shift
= high_bit_offset
+
642 EBUG_ON(shift
< KEY_PACKED_BITS_START
);
644 EBUG_ON(shift
< 0 || shift
>= BFLOAT_FAILED
);
647 mantissa
= bkey_mantissa(m
, f
);
650 * If we've got garbage bits, set them to all 1s - it's legal for the
651 * bfloat to compare larger than the original key, but not smaller:
654 mantissa
|= ~(~0U << -exponent
);
656 f
->mantissa
= mantissa
;
659 /* bytes remaining - only valid for last bset: */
660 static unsigned __bset_tree_capacity(struct btree
*b
, const struct bset_tree
*t
)
662 bset_aux_tree_verify(b
);
664 return btree_aux_data_bytes(b
) - t
->aux_data_offset
* sizeof(u64
);
667 static unsigned bset_ro_tree_capacity(struct btree
*b
, const struct bset_tree
*t
)
669 return __bset_tree_capacity(b
, t
) / sizeof(struct bkey_float
);
672 static unsigned bset_rw_tree_capacity(struct btree
*b
, const struct bset_tree
*t
)
674 return __bset_tree_capacity(b
, t
) / sizeof(struct rw_aux_tree
);
677 static noinline
void __build_rw_aux_tree(struct btree
*b
, struct bset_tree
*t
)
679 struct bkey_packed
*k
;
682 t
->extra
= BSET_RW_AUX_TREE_VAL
;
683 rw_aux_tree(b
, t
)[0].offset
=
684 __btree_node_key_to_offset(b
, btree_bkey_first(b
, t
));
686 bset_tree_for_each_key(b
, t
, k
) {
687 if (t
->size
== bset_rw_tree_capacity(b
, t
))
690 if ((void *) k
- (void *) rw_aux_to_bkey(b
, t
, t
->size
- 1) >
692 rw_aux_tree_set(b
, t
, t
->size
++, k
);
696 static noinline
void __build_ro_aux_tree(struct btree
*b
, struct bset_tree
*t
)
698 struct bkey_packed
*k
= btree_bkey_first(b
, t
);
699 struct bkey_i min_key
, max_key
;
700 unsigned cacheline
= 1;
702 t
->size
= min(bkey_to_cacheline(b
, t
, btree_bkey_last(b
, t
)),
703 bset_ro_tree_capacity(b
, t
));
707 t
->extra
= BSET_NO_AUX_TREE_VAL
;
711 t
->extra
= eytzinger1_extra(t
->size
- 1);
713 /* First we figure out where the first key in each cacheline is */
714 eytzinger1_for_each(j
, t
->size
- 1) {
715 while (bkey_to_cacheline(b
, t
, k
) < cacheline
)
718 if (k
>= btree_bkey_last(b
, t
)) {
719 /* XXX: this path sucks */
724 bkey_float(b
, t
, j
)->key_offset
=
725 bkey_to_cacheline_offset(b
, t
, cacheline
++, k
);
727 EBUG_ON(tree_to_bkey(b
, t
, j
) != k
);
730 if (!bkey_pack_pos(bkey_to_packed(&min_key
), b
->data
->min_key
, b
)) {
731 bkey_init(&min_key
.k
);
732 min_key
.k
.p
= b
->data
->min_key
;
735 if (!bkey_pack_pos(bkey_to_packed(&max_key
), b
->data
->max_key
, b
)) {
736 bkey_init(&max_key
.k
);
737 max_key
.k
.p
= b
->data
->max_key
;
740 /* Then we build the tree */
741 eytzinger1_for_each(j
, t
->size
- 1)
743 bkey_to_packed(&min_key
),
744 bkey_to_packed(&max_key
));
747 static void bset_alloc_tree(struct btree
*b
, struct bset_tree
*t
)
751 for (i
= b
->set
; i
!= t
; i
++)
752 BUG_ON(bset_has_rw_aux_tree(i
));
754 bch2_bset_set_no_aux_tree(b
, t
);
756 /* round up to next cacheline: */
757 t
->aux_data_offset
= round_up(bset_aux_tree_buf_start(b
, t
),
758 SMP_CACHE_BYTES
/ sizeof(u64
));
760 bset_aux_tree_verify(b
);
763 void bch2_bset_build_aux_tree(struct btree
*b
, struct bset_tree
*t
,
767 ? bset_has_rw_aux_tree(t
)
768 : bset_has_ro_aux_tree(t
))
771 bset_alloc_tree(b
, t
);
773 if (!__bset_tree_capacity(b
, t
))
777 __build_rw_aux_tree(b
, t
);
779 __build_ro_aux_tree(b
, t
);
781 bset_aux_tree_verify(b
);
784 void bch2_bset_init_first(struct btree
*b
, struct bset
*i
)
790 memset(i
, 0, sizeof(*i
));
791 get_random_bytes(&i
->seq
, sizeof(i
->seq
));
792 SET_BSET_BIG_ENDIAN(i
, CPU_BIG_ENDIAN
);
794 t
= &b
->set
[b
->nsets
++];
795 set_btree_bset(b
, t
, i
);
798 void bch2_bset_init_next(struct btree
*b
, struct btree_node_entry
*bne
)
800 struct bset
*i
= &bne
->keys
;
803 BUG_ON(bset_byte_offset(b
, bne
) >= btree_buf_bytes(b
));
804 BUG_ON((void *) bne
< (void *) btree_bkey_last(b
, bset_tree_last(b
)));
805 BUG_ON(b
->nsets
>= MAX_BSETS
);
807 memset(i
, 0, sizeof(*i
));
808 i
->seq
= btree_bset_first(b
)->seq
;
809 SET_BSET_BIG_ENDIAN(i
, CPU_BIG_ENDIAN
);
811 t
= &b
->set
[b
->nsets
++];
812 set_btree_bset(b
, t
, i
);
816 * find _some_ key in the same bset as @k that precedes @k - not necessarily the
817 * immediate predecessor:
819 static struct bkey_packed
*__bkey_prev(struct btree
*b
, struct bset_tree
*t
,
820 struct bkey_packed
*k
)
822 struct bkey_packed
*p
;
826 EBUG_ON(k
< btree_bkey_first(b
, t
) ||
827 k
> btree_bkey_last(b
, t
));
829 if (k
== btree_bkey_first(b
, t
))
832 switch (bset_aux_tree_type(t
)) {
833 case BSET_NO_AUX_TREE
:
834 p
= btree_bkey_first(b
, t
);
836 case BSET_RO_AUX_TREE
:
837 j
= min_t(unsigned, t
->size
- 1, bkey_to_cacheline(b
, t
, k
));
840 p
= j
? tree_to_bkey(b
, t
,
841 __inorder_to_eytzinger1(j
--,
842 t
->size
- 1, t
->extra
))
843 : btree_bkey_first(b
, t
);
846 case BSET_RW_AUX_TREE
:
847 offset
= __btree_node_key_to_offset(b
, k
);
848 j
= rw_aux_tree_bsearch(b
, t
, offset
);
849 p
= j
? rw_aux_to_bkey(b
, t
, j
- 1)
850 : btree_bkey_first(b
, t
);
857 struct bkey_packed
*bch2_bkey_prev_filter(struct btree
*b
,
859 struct bkey_packed
*k
,
860 unsigned min_key_type
)
862 struct bkey_packed
*p
, *i
, *ret
= NULL
, *orig_k
= k
;
864 while ((p
= __bkey_prev(b
, t
, k
)) && !ret
) {
865 for (i
= p
; i
!= k
; i
= bkey_p_next(i
))
866 if (i
->type
>= min_key_type
)
872 if (bch2_expensive_debug_checks
) {
873 BUG_ON(ret
>= orig_k
);
877 : btree_bkey_first(b
, t
);
880 BUG_ON(i
->type
>= min_key_type
);
888 static void rw_aux_tree_insert_entry(struct btree
*b
,
892 EBUG_ON(!idx
|| idx
> t
->size
);
893 struct bkey_packed
*start
= rw_aux_to_bkey(b
, t
, idx
- 1);
894 struct bkey_packed
*end
= idx
< t
->size
895 ? rw_aux_to_bkey(b
, t
, idx
)
896 : btree_bkey_last(b
, t
);
898 if (t
->size
< bset_rw_tree_capacity(b
, t
) &&
899 (void *) end
- (void *) start
> L1_CACHE_BYTES
) {
900 struct bkey_packed
*k
= start
;
907 if ((void *) k
- (void *) start
>= L1_CACHE_BYTES
) {
908 memmove(&rw_aux_tree(b
, t
)[idx
+ 1],
909 &rw_aux_tree(b
, t
)[idx
],
910 (void *) &rw_aux_tree(b
, t
)[t
->size
] -
911 (void *) &rw_aux_tree(b
, t
)[idx
]);
913 rw_aux_tree_set(b
, t
, idx
, k
);
920 static void bch2_bset_fix_lookup_table(struct btree
*b
,
922 struct bkey_packed
*_where
,
923 unsigned clobber_u64s
,
926 int shift
= new_u64s
- clobber_u64s
;
927 unsigned idx
, j
, where
= __btree_node_key_to_offset(b
, _where
);
929 EBUG_ON(bset_has_ro_aux_tree(t
));
931 if (!bset_has_rw_aux_tree(t
))
934 if (where
> rw_aux_tree(b
, t
)[t
->size
- 1].offset
) {
935 rw_aux_tree_insert_entry(b
, t
, t
->size
);
939 /* returns first entry >= where */
940 idx
= rw_aux_tree_bsearch(b
, t
, where
);
942 if (rw_aux_tree(b
, t
)[idx
].offset
== where
) {
943 if (!idx
) { /* never delete first entry */
945 } else if (where
< t
->end_offset
) {
946 rw_aux_tree_set(b
, t
, idx
++, _where
);
948 EBUG_ON(where
!= t
->end_offset
);
949 rw_aux_tree_insert_entry(b
, t
, --t
->size
);
954 EBUG_ON(idx
< t
->size
&& rw_aux_tree(b
, t
)[idx
].offset
<= where
);
956 rw_aux_tree(b
, t
)[idx
].offset
+ shift
==
957 rw_aux_tree(b
, t
)[idx
- 1].offset
) {
958 memmove(&rw_aux_tree(b
, t
)[idx
],
959 &rw_aux_tree(b
, t
)[idx
+ 1],
960 (void *) &rw_aux_tree(b
, t
)[t
->size
] -
961 (void *) &rw_aux_tree(b
, t
)[idx
+ 1]);
965 for (j
= idx
; j
< t
->size
; j
++)
966 rw_aux_tree(b
, t
)[j
].offset
+= shift
;
968 EBUG_ON(idx
< t
->size
&&
969 rw_aux_tree(b
, t
)[idx
].offset
==
970 rw_aux_tree(b
, t
)[idx
- 1].offset
);
972 rw_aux_tree_insert_entry(b
, t
, idx
);
975 bch2_bset_verify_rw_aux_tree(b
, t
);
976 bset_aux_tree_verify(b
);
979 void bch2_bset_insert(struct btree
*b
,
980 struct bkey_packed
*where
,
981 struct bkey_i
*insert
,
982 unsigned clobber_u64s
)
984 struct bkey_format
*f
= &b
->format
;
985 struct bset_tree
*t
= bset_tree_last(b
);
986 struct bkey_packed packed
, *src
= bkey_to_packed(insert
);
988 bch2_bset_verify_rw_aux_tree(b
, t
);
989 bch2_verify_insert_pos(b
, where
, bkey_to_packed(insert
), clobber_u64s
);
991 if (bch2_bkey_pack_key(&packed
, &insert
->k
, f
))
994 if (!bkey_deleted(&insert
->k
))
995 btree_keys_account_key_add(&b
->nr
, t
- b
->set
, src
);
997 if (src
->u64s
!= clobber_u64s
) {
998 u64
*src_p
= (u64
*) where
->_data
+ clobber_u64s
;
999 u64
*dst_p
= (u64
*) where
->_data
+ src
->u64s
;
1001 EBUG_ON((int) le16_to_cpu(bset(b
, t
)->u64s
) <
1002 (int) clobber_u64s
- src
->u64s
);
1004 memmove_u64s(dst_p
, src_p
, btree_bkey_last(b
, t
)->_data
- src_p
);
1005 le16_add_cpu(&bset(b
, t
)->u64s
, src
->u64s
- clobber_u64s
);
1006 set_btree_bset_end(b
, t
);
1009 memcpy_u64s_small(where
, src
,
1010 bkeyp_key_u64s(f
, src
));
1011 memcpy_u64s(bkeyp_val(f
, where
), &insert
->v
,
1012 bkeyp_val_u64s(f
, src
));
1014 if (src
->u64s
!= clobber_u64s
)
1015 bch2_bset_fix_lookup_table(b
, t
, where
, clobber_u64s
, src
->u64s
);
1017 bch2_verify_btree_nr_keys(b
);
1020 void bch2_bset_delete(struct btree
*b
,
1021 struct bkey_packed
*where
,
1022 unsigned clobber_u64s
)
1024 struct bset_tree
*t
= bset_tree_last(b
);
1025 u64
*src_p
= (u64
*) where
->_data
+ clobber_u64s
;
1026 u64
*dst_p
= where
->_data
;
1028 bch2_bset_verify_rw_aux_tree(b
, t
);
1030 EBUG_ON(le16_to_cpu(bset(b
, t
)->u64s
) < clobber_u64s
);
1032 memmove_u64s_down(dst_p
, src_p
, btree_bkey_last(b
, t
)->_data
- src_p
);
1033 le16_add_cpu(&bset(b
, t
)->u64s
, -clobber_u64s
);
1034 set_btree_bset_end(b
, t
);
1036 bch2_bset_fix_lookup_table(b
, t
, where
, clobber_u64s
, 0);
1042 static struct bkey_packed
*bset_search_write_set(const struct btree
*b
,
1043 struct bset_tree
*t
,
1044 struct bpos
*search
)
1046 unsigned l
= 0, r
= t
->size
;
1048 while (l
+ 1 != r
) {
1049 unsigned m
= (l
+ r
) >> 1;
1051 if (bpos_lt(rw_aux_tree(b
, t
)[m
].k
, *search
))
1057 return rw_aux_to_bkey(b
, t
, l
);
1060 static inline void prefetch_four_cachelines(void *p
)
1062 #ifdef CONFIG_X86_64
1063 asm("prefetcht0 (-127 + 64 * 0)(%0);"
1064 "prefetcht0 (-127 + 64 * 1)(%0);"
1065 "prefetcht0 (-127 + 64 * 2)(%0);"
1066 "prefetcht0 (-127 + 64 * 3)(%0);"
1070 prefetch(p
+ L1_CACHE_BYTES
* 0);
1071 prefetch(p
+ L1_CACHE_BYTES
* 1);
1072 prefetch(p
+ L1_CACHE_BYTES
* 2);
1073 prefetch(p
+ L1_CACHE_BYTES
* 3);
1077 static inline bool bkey_mantissa_bits_dropped(const struct btree
*b
,
1078 const struct bkey_float
*f
)
1080 #if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
1081 unsigned key_bits_start
= b
->format
.key_u64s
* 64 - b
->nr_key_bits
;
1083 return f
->exponent
> key_bits_start
;
1085 unsigned key_bits_end
= high_bit_offset
+ b
->nr_key_bits
;
1087 return f
->exponent
+ BKEY_MANTISSA_BITS
< key_bits_end
;
1092 static struct bkey_packed
*bset_search_tree(const struct btree
*b
,
1093 const struct bset_tree
*t
,
1094 const struct bpos
*search
,
1095 const struct bkey_packed
*packed_search
)
1097 struct ro_aux_tree
*base
= ro_aux_tree_base(b
, t
);
1098 struct bkey_float
*f
;
1099 struct bkey_packed
*k
;
1100 unsigned inorder
, n
= 1, l
, r
;
1104 if (likely(n
<< 4 < t
->size
))
1105 prefetch(&base
->f
[n
<< 4]);
1108 if (unlikely(f
->exponent
>= BFLOAT_FAILED
))
1112 r
= bkey_mantissa(packed_search
, f
);
1114 if (unlikely(l
== r
) && bkey_mantissa_bits_dropped(b
, f
))
1117 n
= n
* 2 + (l
< r
);
1120 k
= tree_to_bkey(b
, t
, n
);
1121 cmp
= bkey_cmp_p_or_unp(b
, k
, packed_search
, search
);
1125 n
= n
* 2 + (cmp
< 0);
1126 } while (n
< t
->size
);
1128 inorder
= __eytzinger1_to_inorder(n
>> 1, t
->size
- 1, t
->extra
);
1131 * n would have been the node we recursed to - the low bit tells us if
1132 * we recursed left or recursed right.
1134 if (likely(!(n
& 1))) {
1136 if (unlikely(!inorder
))
1137 return btree_bkey_first(b
, t
);
1139 f
= &base
->f
[eytzinger1_prev(n
>> 1, t
->size
- 1)];
1142 return cacheline_to_bkey(b
, t
, inorder
, f
->key_offset
);
1145 static __always_inline __flatten
1146 struct bkey_packed
*__bch2_bset_search(struct btree
*b
,
1147 struct bset_tree
*t
,
1148 struct bpos
*search
,
1149 const struct bkey_packed
*lossy_packed_search
)
1153 * First, we search for a cacheline, then lastly we do a linear search
1154 * within that cacheline.
1156 * To search for the cacheline, there's three different possibilities:
1157 * * The set is too small to have a search tree, so we just do a linear
1158 * search over the whole set.
1159 * * The set is the one we're currently inserting into; keeping a full
1160 * auxiliary search tree up to date would be too expensive, so we
1161 * use a much simpler lookup table to do a binary search -
1162 * bset_search_write_set().
1163 * * Or we use the auxiliary search tree we constructed earlier -
1164 * bset_search_tree()
1167 switch (bset_aux_tree_type(t
)) {
1168 case BSET_NO_AUX_TREE
:
1169 return btree_bkey_first(b
, t
);
1170 case BSET_RW_AUX_TREE
:
1171 return bset_search_write_set(b
, t
, search
);
1172 case BSET_RO_AUX_TREE
:
1173 return bset_search_tree(b
, t
, search
, lossy_packed_search
);
1179 static __always_inline __flatten
1180 struct bkey_packed
*bch2_bset_search_linear(struct btree
*b
,
1181 struct bset_tree
*t
,
1182 struct bpos
*search
,
1183 struct bkey_packed
*packed_search
,
1184 const struct bkey_packed
*lossy_packed_search
,
1185 struct bkey_packed
*m
)
1187 if (lossy_packed_search
)
1188 while (m
!= btree_bkey_last(b
, t
) &&
1189 bkey_iter_cmp_p_or_unp(b
, m
,
1190 lossy_packed_search
, search
) < 0)
1194 while (m
!= btree_bkey_last(b
, t
) &&
1195 bkey_iter_pos_cmp(b
, m
, search
) < 0)
1198 if (bch2_expensive_debug_checks
) {
1199 struct bkey_packed
*prev
= bch2_bkey_prev_all(b
, t
, m
);
1202 bkey_iter_cmp_p_or_unp(b
, prev
,
1203 packed_search
, search
) >= 0);
1209 /* Btree node iterator */
1211 static inline void __bch2_btree_node_iter_push(struct btree_node_iter
*iter
,
1213 const struct bkey_packed
*k
,
1214 const struct bkey_packed
*end
)
1217 struct btree_node_iter_set
*pos
;
1219 btree_node_iter_for_each(iter
, pos
)
1222 BUG_ON(pos
>= iter
->data
+ ARRAY_SIZE(iter
->data
));
1223 *pos
= (struct btree_node_iter_set
) {
1224 __btree_node_key_to_offset(b
, k
),
1225 __btree_node_key_to_offset(b
, end
)
1230 void bch2_btree_node_iter_push(struct btree_node_iter
*iter
,
1232 const struct bkey_packed
*k
,
1233 const struct bkey_packed
*end
)
1235 __bch2_btree_node_iter_push(iter
, b
, k
, end
);
1236 bch2_btree_node_iter_sort(iter
, b
);
1239 noinline __flatten __cold
1240 static void btree_node_iter_init_pack_failed(struct btree_node_iter
*iter
,
1241 struct btree
*b
, struct bpos
*search
)
1243 struct bkey_packed
*k
;
1245 trace_bkey_pack_pos_fail(search
);
1247 bch2_btree_node_iter_init_from_start(iter
, b
);
1249 while ((k
= bch2_btree_node_iter_peek(iter
, b
)) &&
1250 bkey_iter_pos_cmp(b
, k
, search
) < 0)
1251 bch2_btree_node_iter_advance(iter
, b
);
1255 * bch2_btree_node_iter_init - initialize a btree node iterator, starting from a
1258 * @iter: iterator to initialize
1259 * @b: btree node to search
1260 * @search: search key
1262 * Main entry point to the lookup code for individual btree nodes:
1266 * When you don't filter out deleted keys, btree nodes _do_ contain duplicate
1267 * keys. This doesn't matter for most code, but it does matter for lookups.
1269 * Some adjacent keys with a string of equal keys:
1272 * If you search for k, the lookup code isn't guaranteed to return you any
1273 * specific k. The lookup code is conceptually doing a binary search and
1274 * iterating backwards is very expensive so if the pivot happens to land at the
1275 * last k that's what you'll get.
1277 * This works out ok, but it's something to be aware of:
1279 * - For non extents, we guarantee that the live key comes last - see
1280 * btree_node_iter_cmp(), keys_out_of_order(). So the duplicates you don't
1281 * see will only be deleted keys you don't care about.
1283 * - For extents, deleted keys sort last (see the comment at the top of this
1284 * file). But when you're searching for extents, you actually want the first
1285 * key strictly greater than your search key - an extent that compares equal
1286 * to the search key is going to have 0 sectors after the search key.
1288 * But this does mean that we can't just search for
1289 * bpos_successor(start_of_range) to get the first extent that overlaps with
1290 * the range we want - if we're unlucky and there's an extent that ends
1291 * exactly where we searched, then there could be a deleted key at the same
1292 * position and we'd get that when we search instead of the preceding extent
1295 * So we've got to search for start_of_range, then after the lookup iterate
1296 * past any extents that compare equal to the position we searched for.
1299 void bch2_btree_node_iter_init(struct btree_node_iter
*iter
,
1300 struct btree
*b
, struct bpos
*search
)
1302 struct bkey_packed p
, *packed_search
= NULL
;
1303 struct btree_node_iter_set
*pos
= iter
->data
;
1304 struct bkey_packed
*k
[MAX_BSETS
];
1307 EBUG_ON(bpos_lt(*search
, b
->data
->min_key
));
1308 EBUG_ON(bpos_gt(*search
, b
->data
->max_key
));
1309 bset_aux_tree_verify(b
);
1311 memset(iter
, 0, sizeof(*iter
));
1313 switch (bch2_bkey_pack_pos_lossy(&p
, *search
, b
)) {
1314 case BKEY_PACK_POS_EXACT
:
1317 case BKEY_PACK_POS_SMALLER
:
1318 packed_search
= NULL
;
1320 case BKEY_PACK_POS_FAIL
:
1321 btree_node_iter_init_pack_failed(iter
, b
, search
);
1325 for (i
= 0; i
< b
->nsets
; i
++) {
1326 k
[i
] = __bch2_bset_search(b
, b
->set
+ i
, search
, &p
);
1327 prefetch_four_cachelines(k
[i
]);
1330 for (i
= 0; i
< b
->nsets
; i
++) {
1331 struct bset_tree
*t
= b
->set
+ i
;
1332 struct bkey_packed
*end
= btree_bkey_last(b
, t
);
1334 k
[i
] = bch2_bset_search_linear(b
, t
, search
,
1335 packed_search
, &p
, k
[i
]);
1337 *pos
++ = (struct btree_node_iter_set
) {
1338 __btree_node_key_to_offset(b
, k
[i
]),
1339 __btree_node_key_to_offset(b
, end
)
1343 bch2_btree_node_iter_sort(iter
, b
);
1346 void bch2_btree_node_iter_init_from_start(struct btree_node_iter
*iter
,
1349 memset(iter
, 0, sizeof(*iter
));
1352 __bch2_btree_node_iter_push(iter
, b
,
1353 btree_bkey_first(b
, t
),
1354 btree_bkey_last(b
, t
));
1355 bch2_btree_node_iter_sort(iter
, b
);
1358 struct bkey_packed
*bch2_btree_node_iter_bset_pos(struct btree_node_iter
*iter
,
1360 struct bset_tree
*t
)
1362 struct btree_node_iter_set
*set
;
1364 btree_node_iter_for_each(iter
, set
)
1365 if (set
->end
== t
->end_offset
)
1366 return __btree_node_offset_to_key(b
, set
->k
);
1368 return btree_bkey_last(b
, t
);
1371 static inline bool btree_node_iter_sort_two(struct btree_node_iter
*iter
,
1377 if ((ret
= (btree_node_iter_cmp(b
,
1379 iter
->data
[first
+ 1]) > 0)))
1380 swap(iter
->data
[first
], iter
->data
[first
+ 1]);
1384 void bch2_btree_node_iter_sort(struct btree_node_iter
*iter
,
1387 /* unrolled bubble sort: */
1389 if (!__btree_node_iter_set_end(iter
, 2)) {
1390 btree_node_iter_sort_two(iter
, b
, 0);
1391 btree_node_iter_sort_two(iter
, b
, 1);
1394 if (!__btree_node_iter_set_end(iter
, 1))
1395 btree_node_iter_sort_two(iter
, b
, 0);
1398 void bch2_btree_node_iter_set_drop(struct btree_node_iter
*iter
,
1399 struct btree_node_iter_set
*set
)
1401 struct btree_node_iter_set
*last
=
1402 iter
->data
+ ARRAY_SIZE(iter
->data
) - 1;
1404 memmove(&set
[0], &set
[1], (void *) last
- (void *) set
);
1405 *last
= (struct btree_node_iter_set
) { 0, 0 };
1408 static inline void __bch2_btree_node_iter_advance(struct btree_node_iter
*iter
,
1411 iter
->data
->k
+= __bch2_btree_node_iter_peek_all(iter
, b
)->u64s
;
1413 EBUG_ON(iter
->data
->k
> iter
->data
->end
);
1415 if (unlikely(__btree_node_iter_set_end(iter
, 0))) {
1416 /* avoid an expensive memmove call: */
1417 iter
->data
[0] = iter
->data
[1];
1418 iter
->data
[1] = iter
->data
[2];
1419 iter
->data
[2] = (struct btree_node_iter_set
) { 0, 0 };
1423 if (__btree_node_iter_set_end(iter
, 1))
1426 if (!btree_node_iter_sort_two(iter
, b
, 0))
1429 if (__btree_node_iter_set_end(iter
, 2))
1432 btree_node_iter_sort_two(iter
, b
, 1);
1435 void bch2_btree_node_iter_advance(struct btree_node_iter
*iter
,
1438 if (bch2_expensive_debug_checks
) {
1439 bch2_btree_node_iter_verify(iter
, b
);
1440 bch2_btree_node_iter_next_check(iter
, b
);
1443 __bch2_btree_node_iter_advance(iter
, b
);
1449 struct bkey_packed
*bch2_btree_node_iter_prev_all(struct btree_node_iter
*iter
,
1452 struct bkey_packed
*k
, *prev
= NULL
;
1453 struct btree_node_iter_set
*set
;
1456 if (bch2_expensive_debug_checks
)
1457 bch2_btree_node_iter_verify(iter
, b
);
1459 for_each_bset(b
, t
) {
1460 k
= bch2_bkey_prev_all(b
, t
,
1461 bch2_btree_node_iter_bset_pos(iter
, b
, t
));
1463 (!prev
|| bkey_iter_cmp(b
, k
, prev
) > 0)) {
1465 end
= t
->end_offset
;
1473 * We're manually memmoving instead of just calling sort() to ensure the
1474 * prev we picked ends up in slot 0 - sort won't necessarily put it
1475 * there because of duplicate deleted keys:
1477 btree_node_iter_for_each(iter
, set
)
1478 if (set
->end
== end
)
1481 BUG_ON(set
!= &iter
->data
[__btree_node_iter_used(iter
)]);
1483 BUG_ON(set
>= iter
->data
+ ARRAY_SIZE(iter
->data
));
1485 memmove(&iter
->data
[1],
1487 (void *) set
- (void *) &iter
->data
[0]);
1489 iter
->data
[0].k
= __btree_node_key_to_offset(b
, prev
);
1490 iter
->data
[0].end
= end
;
1492 if (bch2_expensive_debug_checks
)
1493 bch2_btree_node_iter_verify(iter
, b
);
1497 struct bkey_packed
*bch2_btree_node_iter_prev(struct btree_node_iter
*iter
,
1500 struct bkey_packed
*prev
;
1503 prev
= bch2_btree_node_iter_prev_all(iter
, b
);
1504 } while (prev
&& bkey_deleted(prev
));
1509 struct bkey_s_c
bch2_btree_node_iter_peek_unpack(struct btree_node_iter
*iter
,
1513 struct bkey_packed
*k
= bch2_btree_node_iter_peek(iter
, b
);
1515 return k
? bkey_disassemble(b
, k
, u
) : bkey_s_c_null
;
1520 void bch2_btree_keys_stats(const struct btree
*b
, struct bset_stats
*stats
)
1522 for_each_bset_c(b
, t
) {
1523 enum bset_aux_tree_type type
= bset_aux_tree_type(t
);
1526 stats
->sets
[type
].nr
++;
1527 stats
->sets
[type
].bytes
+= le16_to_cpu(bset(b
, t
)->u64s
) *
1530 if (bset_has_ro_aux_tree(t
)) {
1531 stats
->floats
+= t
->size
- 1;
1533 for (j
= 1; j
< t
->size
; j
++)
1535 bkey_float(b
, t
, j
)->exponent
==
1541 void bch2_bfloat_to_text(struct printbuf
*out
, struct btree
*b
,
1542 struct bkey_packed
*k
)
1544 struct bset_tree
*t
= bch2_bkey_to_bset(b
, k
);
1546 unsigned j
, inorder
;
1548 if (!bset_has_ro_aux_tree(t
))
1551 inorder
= bkey_to_cacheline(b
, t
, k
);
1552 if (!inorder
|| inorder
>= t
->size
)
1555 j
= __inorder_to_eytzinger1(inorder
, t
->size
- 1, t
->extra
);
1556 if (k
!= tree_to_bkey(b
, t
, j
))
1559 switch (bkey_float(b
, t
, j
)->exponent
) {
1561 uk
= bkey_unpack_key(b
, k
);
1563 " failed unpacked at depth %u\n"
1566 bch2_bpos_to_text(out
, uk
.p
);
1567 prt_printf(out
, "\n");