2 * Code for working with individual keys, and sorted sets of keys with in a
5 * Copyright 2012 Google, Inc.
8 #define pr_fmt(fmt) "bcache: %s() " fmt "\n", __func__
13 #include <linux/console.h>
14 #include <linux/sched/clock.h>
15 #include <linux/random.h>
16 #include <linux/prefetch.h>
18 #ifdef CONFIG_BCACHE_DEBUG
20 void bch_dump_bset(struct btree_keys
*b
, struct bset
*i
, unsigned set
)
22 struct bkey
*k
, *next
;
24 for (k
= i
->start
; k
< bset_bkey_last(i
); k
= next
) {
27 printk(KERN_ERR
"block %u key %u/%u: ", set
,
28 (unsigned) ((u64
*) k
- i
->d
), i
->keys
);
31 b
->ops
->key_dump(b
, k
);
33 printk("%llu:%llu\n", KEY_INODE(k
), KEY_OFFSET(k
));
35 if (next
< bset_bkey_last(i
) &&
36 bkey_cmp(k
, b
->ops
->is_extents
?
37 &START_KEY(next
) : next
) > 0)
38 printk(KERN_ERR
"Key skipped backwards\n");
42 void bch_dump_bucket(struct btree_keys
*b
)
47 for (i
= 0; i
<= b
->nsets
; i
++)
48 bch_dump_bset(b
, b
->set
[i
].data
,
49 bset_sector_offset(b
, b
->set
[i
].data
));
53 int __bch_count_data(struct btree_keys
*b
)
56 struct btree_iter iter
;
59 if (b
->ops
->is_extents
)
60 for_each_key(b
, k
, &iter
)
65 void __bch_check_keys(struct btree_keys
*b
, const char *fmt
, ...)
68 struct bkey
*k
, *p
= NULL
;
69 struct btree_iter iter
;
72 for_each_key(b
, k
, &iter
) {
73 if (b
->ops
->is_extents
) {
74 err
= "Keys out of order";
75 if (p
&& bkey_cmp(&START_KEY(p
), &START_KEY(k
)) > 0)
78 if (bch_ptr_invalid(b
, k
))
81 err
= "Overlapping keys";
82 if (p
&& bkey_cmp(p
, &START_KEY(k
)) > 0)
85 if (bch_ptr_bad(b
, k
))
88 err
= "Duplicate keys";
89 if (p
&& !bkey_cmp(p
, k
))
95 err
= "Key larger than btree node key";
96 if (p
&& bkey_cmp(p
, &b
->key
) > 0)
107 panic("bch_check_keys error: %s:\n", err
);
110 static void bch_btree_iter_next_check(struct btree_iter
*iter
)
112 struct bkey
*k
= iter
->data
->k
, *next
= bkey_next(k
);
114 if (next
< iter
->data
->end
&&
115 bkey_cmp(k
, iter
->b
->ops
->is_extents
?
116 &START_KEY(next
) : next
) > 0) {
117 bch_dump_bucket(iter
->b
);
118 panic("Key skipped backwards\n");
124 static inline void bch_btree_iter_next_check(struct btree_iter
*iter
) {}
130 int __bch_keylist_realloc(struct keylist
*l
, unsigned u64s
)
132 size_t oldsize
= bch_keylist_nkeys(l
);
133 size_t newsize
= oldsize
+ u64s
;
134 uint64_t *old_keys
= l
->keys_p
== l
->inline_keys
? NULL
: l
->keys_p
;
137 newsize
= roundup_pow_of_two(newsize
);
139 if (newsize
<= KEYLIST_INLINE
||
140 roundup_pow_of_two(oldsize
) == newsize
)
143 new_keys
= krealloc(old_keys
, sizeof(uint64_t) * newsize
, GFP_NOIO
);
149 memcpy(new_keys
, l
->inline_keys
, sizeof(uint64_t) * oldsize
);
151 l
->keys_p
= new_keys
;
152 l
->top_p
= new_keys
+ oldsize
;
157 struct bkey
*bch_keylist_pop(struct keylist
*l
)
159 struct bkey
*k
= l
->keys
;
164 while (bkey_next(k
) != l
->top
)
170 void bch_keylist_pop_front(struct keylist
*l
)
172 l
->top_p
-= bkey_u64s(l
->keys
);
176 bch_keylist_bytes(l
));
179 /* Key/pointer manipulation */
181 void bch_bkey_copy_single_ptr(struct bkey
*dest
, const struct bkey
*src
,
184 BUG_ON(i
> KEY_PTRS(src
));
186 /* Only copy the header, key, and one pointer. */
187 memcpy(dest
, src
, 2 * sizeof(uint64_t));
188 dest
->ptr
[0] = src
->ptr
[i
];
189 SET_KEY_PTRS(dest
, 1);
190 /* We didn't copy the checksum so clear that bit. */
191 SET_KEY_CSUM(dest
, 0);
194 bool __bch_cut_front(const struct bkey
*where
, struct bkey
*k
)
198 if (bkey_cmp(where
, &START_KEY(k
)) <= 0)
201 if (bkey_cmp(where
, k
) < 0)
202 len
= KEY_OFFSET(k
) - KEY_OFFSET(where
);
204 bkey_copy_key(k
, where
);
206 for (i
= 0; i
< KEY_PTRS(k
); i
++)
207 SET_PTR_OFFSET(k
, i
, PTR_OFFSET(k
, i
) + KEY_SIZE(k
) - len
);
209 BUG_ON(len
> KEY_SIZE(k
));
210 SET_KEY_SIZE(k
, len
);
214 bool __bch_cut_back(const struct bkey
*where
, struct bkey
*k
)
218 if (bkey_cmp(where
, k
) >= 0)
221 BUG_ON(KEY_INODE(where
) != KEY_INODE(k
));
223 if (bkey_cmp(where
, &START_KEY(k
)) > 0)
224 len
= KEY_OFFSET(where
) - KEY_START(k
);
226 bkey_copy_key(k
, where
);
228 BUG_ON(len
> KEY_SIZE(k
));
229 SET_KEY_SIZE(k
, len
);
233 /* Auxiliary search trees */
236 #define BKEY_MID_BITS 3
237 #define BKEY_EXPONENT_BITS 7
238 #define BKEY_MANTISSA_BITS (32 - BKEY_MID_BITS - BKEY_EXPONENT_BITS)
239 #define BKEY_MANTISSA_MASK ((1 << BKEY_MANTISSA_BITS) - 1)
242 unsigned exponent
:BKEY_EXPONENT_BITS
;
243 unsigned m
:BKEY_MID_BITS
;
244 unsigned mantissa
:BKEY_MANTISSA_BITS
;
248 * BSET_CACHELINE was originally intended to match the hardware cacheline size -
249 * it used to be 64, but I realized the lookup code would touch slightly less
250 * memory if it was 128.
252 * It definites the number of bytes (in struct bset) per struct bkey_float in
253 * the auxiliar search tree - when we're done searching the bset_float tree we
254 * have this many bytes left that we do a linear search over.
256 * Since (after level 5) every level of the bset_tree is on a new cacheline,
257 * we're touching one fewer cacheline in the bset tree in exchange for one more
258 * cacheline in the linear search - but the linear search might stop before it
259 * gets to the second cacheline.
262 #define BSET_CACHELINE 128
264 /* Space required for the btree node keys */
265 static inline size_t btree_keys_bytes(struct btree_keys
*b
)
267 return PAGE_SIZE
<< b
->page_order
;
270 static inline size_t btree_keys_cachelines(struct btree_keys
*b
)
272 return btree_keys_bytes(b
) / BSET_CACHELINE
;
275 /* Space required for the auxiliary search trees */
276 static inline size_t bset_tree_bytes(struct btree_keys
*b
)
278 return btree_keys_cachelines(b
) * sizeof(struct bkey_float
);
281 /* Space required for the prev pointers */
282 static inline size_t bset_prev_bytes(struct btree_keys
*b
)
284 return btree_keys_cachelines(b
) * sizeof(uint8_t);
287 /* Memory allocation */
289 void bch_btree_keys_free(struct btree_keys
*b
)
291 struct bset_tree
*t
= b
->set
;
293 if (bset_prev_bytes(b
) < PAGE_SIZE
)
296 free_pages((unsigned long) t
->prev
,
297 get_order(bset_prev_bytes(b
)));
299 if (bset_tree_bytes(b
) < PAGE_SIZE
)
302 free_pages((unsigned long) t
->tree
,
303 get_order(bset_tree_bytes(b
)));
305 free_pages((unsigned long) t
->data
, b
->page_order
);
311 EXPORT_SYMBOL(bch_btree_keys_free
);
313 int bch_btree_keys_alloc(struct btree_keys
*b
, unsigned page_order
, gfp_t gfp
)
315 struct bset_tree
*t
= b
->set
;
319 b
->page_order
= page_order
;
321 t
->data
= (void *) __get_free_pages(gfp
, b
->page_order
);
325 t
->tree
= bset_tree_bytes(b
) < PAGE_SIZE
326 ? kmalloc(bset_tree_bytes(b
), gfp
)
327 : (void *) __get_free_pages(gfp
, get_order(bset_tree_bytes(b
)));
331 t
->prev
= bset_prev_bytes(b
) < PAGE_SIZE
332 ? kmalloc(bset_prev_bytes(b
), gfp
)
333 : (void *) __get_free_pages(gfp
, get_order(bset_prev_bytes(b
)));
339 bch_btree_keys_free(b
);
342 EXPORT_SYMBOL(bch_btree_keys_alloc
);
344 void bch_btree_keys_init(struct btree_keys
*b
, const struct btree_keys_ops
*ops
,
345 bool *expensive_debug_checks
)
350 b
->expensive_debug_checks
= expensive_debug_checks
;
352 b
->last_set_unwritten
= 0;
354 /* XXX: shouldn't be needed */
355 for (i
= 0; i
< MAX_BSETS
; i
++)
358 * Second loop starts at 1 because b->keys[0]->data is the memory we
361 for (i
= 1; i
< MAX_BSETS
; i
++)
362 b
->set
[i
].data
= NULL
;
364 EXPORT_SYMBOL(bch_btree_keys_init
);
366 /* Binary tree stuff for auxiliary search trees */
368 static unsigned inorder_next(unsigned j
, unsigned size
)
370 if (j
* 2 + 1 < size
) {
381 static unsigned inorder_prev(unsigned j
, unsigned size
)
386 while (j
* 2 + 1 < size
)
394 /* I have no idea why this code works... and I'm the one who wrote it
396 * However, I do know what it does:
397 * Given a binary tree constructed in an array (i.e. how you normally implement
398 * a heap), it converts a node in the tree - referenced by array index - to the
399 * index it would have if you did an inorder traversal.
401 * Also tested for every j, size up to size somewhere around 6 million.
403 * The binary tree starts at array index 1, not 0
404 * extra is a function of size:
405 * extra = (size - rounddown_pow_of_two(size - 1)) << 1;
407 static unsigned __to_inorder(unsigned j
, unsigned size
, unsigned extra
)
410 unsigned shift
= fls(size
- 1) - b
;
418 j
-= (j
- extra
) >> 1;
423 static unsigned to_inorder(unsigned j
, struct bset_tree
*t
)
425 return __to_inorder(j
, t
->size
, t
->extra
);
428 static unsigned __inorder_to_tree(unsigned j
, unsigned size
, unsigned extra
)
438 j
|= roundup_pow_of_two(size
) >> shift
;
443 static unsigned inorder_to_tree(unsigned j
, struct bset_tree
*t
)
445 return __inorder_to_tree(j
, t
->size
, t
->extra
);
449 void inorder_test(void)
451 unsigned long done
= 0;
452 ktime_t start
= ktime_get();
454 for (unsigned size
= 2;
457 unsigned extra
= (size
- rounddown_pow_of_two(size
- 1)) << 1;
458 unsigned i
= 1, j
= rounddown_pow_of_two(size
- 1);
461 printk(KERN_NOTICE
"loop %u, %llu per us\n", size
,
462 done
/ ktime_us_delta(ktime_get(), start
));
465 if (__inorder_to_tree(i
, size
, extra
) != j
)
466 panic("size %10u j %10u i %10u", size
, j
, i
);
468 if (__to_inorder(j
, size
, extra
) != i
)
469 panic("size %10u j %10u i %10u", size
, j
, i
);
471 if (j
== rounddown_pow_of_two(size
) - 1)
474 BUG_ON(inorder_prev(inorder_next(j
, size
), size
) != j
);
476 j
= inorder_next(j
, size
);
486 * Cacheline/offset <-> bkey pointer arithmetic:
488 * t->tree is a binary search tree in an array; each node corresponds to a key
489 * in one cacheline in t->set (BSET_CACHELINE bytes).
491 * This means we don't have to store the full index of the key that a node in
492 * the binary tree points to; to_inorder() gives us the cacheline, and then
493 * bkey_float->m gives us the offset within that cacheline, in units of 8 bytes.
495 * cacheline_to_bkey() and friends abstract out all the pointer arithmetic to
498 * To construct the bfloat for an arbitrary key we need to know what the key
499 * immediately preceding it is: we have to check if the two keys differ in the
500 * bits we're going to store in bkey_float->mantissa. t->prev[j] stores the size
501 * of the previous key so we can walk backwards to it from t->tree[j]'s key.
504 static struct bkey
*cacheline_to_bkey(struct bset_tree
*t
, unsigned cacheline
,
507 return ((void *) t
->data
) + cacheline
* BSET_CACHELINE
+ offset
* 8;
510 static unsigned bkey_to_cacheline(struct bset_tree
*t
, struct bkey
*k
)
512 return ((void *) k
- (void *) t
->data
) / BSET_CACHELINE
;
515 static unsigned bkey_to_cacheline_offset(struct bset_tree
*t
,
519 return (u64
*) k
- (u64
*) cacheline_to_bkey(t
, cacheline
, 0);
522 static struct bkey
*tree_to_bkey(struct bset_tree
*t
, unsigned j
)
524 return cacheline_to_bkey(t
, to_inorder(j
, t
), t
->tree
[j
].m
);
527 static struct bkey
*tree_to_prev_bkey(struct bset_tree
*t
, unsigned j
)
529 return (void *) (((uint64_t *) tree_to_bkey(t
, j
)) - t
->prev
[j
]);
533 * For the write set - the one we're currently inserting keys into - we don't
534 * maintain a full search tree, we just keep a simple lookup table in t->prev.
536 static struct bkey
*table_to_bkey(struct bset_tree
*t
, unsigned cacheline
)
538 return cacheline_to_bkey(t
, cacheline
, t
->prev
[cacheline
]);
541 static inline uint64_t shrd128(uint64_t high
, uint64_t low
, uint8_t shift
)
544 low
|= (high
<< 1) << (63U - shift
);
548 static inline unsigned bfloat_mantissa(const struct bkey
*k
,
549 struct bkey_float
*f
)
551 const uint64_t *p
= &k
->low
- (f
->exponent
>> 6);
552 return shrd128(p
[-1], p
[0], f
->exponent
& 63) & BKEY_MANTISSA_MASK
;
555 static void make_bfloat(struct bset_tree
*t
, unsigned j
)
557 struct bkey_float
*f
= &t
->tree
[j
];
558 struct bkey
*m
= tree_to_bkey(t
, j
);
559 struct bkey
*p
= tree_to_prev_bkey(t
, j
);
561 struct bkey
*l
= is_power_of_2(j
)
563 : tree_to_prev_bkey(t
, j
>> ffs(j
));
565 struct bkey
*r
= is_power_of_2(j
+ 1)
566 ? bset_bkey_idx(t
->data
, t
->data
->keys
- bkey_u64s(&t
->end
))
567 : tree_to_bkey(t
, j
>> (ffz(j
) + 1));
569 BUG_ON(m
< l
|| m
> r
);
570 BUG_ON(bkey_next(p
) != m
);
572 if (KEY_INODE(l
) != KEY_INODE(r
))
573 f
->exponent
= fls64(KEY_INODE(r
) ^ KEY_INODE(l
)) + 64;
575 f
->exponent
= fls64(r
->low
^ l
->low
);
577 f
->exponent
= max_t(int, f
->exponent
- BKEY_MANTISSA_BITS
, 0);
580 * Setting f->exponent = 127 flags this node as failed, and causes the
581 * lookup code to fall back to comparing against the original key.
584 if (bfloat_mantissa(m
, f
) != bfloat_mantissa(p
, f
))
585 f
->mantissa
= bfloat_mantissa(m
, f
) - 1;
590 static void bset_alloc_tree(struct btree_keys
*b
, struct bset_tree
*t
)
593 unsigned j
= roundup(t
[-1].size
,
594 64 / sizeof(struct bkey_float
));
596 t
->tree
= t
[-1].tree
+ j
;
597 t
->prev
= t
[-1].prev
+ j
;
600 while (t
< b
->set
+ MAX_BSETS
)
604 static void bch_bset_build_unwritten_tree(struct btree_keys
*b
)
606 struct bset_tree
*t
= bset_tree_last(b
);
608 BUG_ON(b
->last_set_unwritten
);
609 b
->last_set_unwritten
= 1;
611 bset_alloc_tree(b
, t
);
613 if (t
->tree
!= b
->set
->tree
+ btree_keys_cachelines(b
)) {
614 t
->prev
[0] = bkey_to_cacheline_offset(t
, 0, t
->data
->start
);
619 void bch_bset_init_next(struct btree_keys
*b
, struct bset
*i
, uint64_t magic
)
621 if (i
!= b
->set
->data
) {
622 b
->set
[++b
->nsets
].data
= i
;
623 i
->seq
= b
->set
->data
->seq
;
625 get_random_bytes(&i
->seq
, sizeof(uint64_t));
631 bch_bset_build_unwritten_tree(b
);
633 EXPORT_SYMBOL(bch_bset_init_next
);
635 void bch_bset_build_written_tree(struct btree_keys
*b
)
637 struct bset_tree
*t
= bset_tree_last(b
);
638 struct bkey
*prev
= NULL
, *k
= t
->data
->start
;
639 unsigned j
, cacheline
= 1;
641 b
->last_set_unwritten
= 0;
643 bset_alloc_tree(b
, t
);
645 t
->size
= min_t(unsigned,
646 bkey_to_cacheline(t
, bset_bkey_last(t
->data
)),
647 b
->set
->tree
+ btree_keys_cachelines(b
) - t
->tree
);
654 t
->extra
= (t
->size
- rounddown_pow_of_two(t
->size
- 1)) << 1;
656 /* First we figure out where the first key in each cacheline is */
657 for (j
= inorder_next(0, t
->size
);
659 j
= inorder_next(j
, t
->size
)) {
660 while (bkey_to_cacheline(t
, k
) < cacheline
)
661 prev
= k
, k
= bkey_next(k
);
663 t
->prev
[j
] = bkey_u64s(prev
);
664 t
->tree
[j
].m
= bkey_to_cacheline_offset(t
, cacheline
++, k
);
667 while (bkey_next(k
) != bset_bkey_last(t
->data
))
672 /* Then we build the tree */
673 for (j
= inorder_next(0, t
->size
);
675 j
= inorder_next(j
, t
->size
))
678 EXPORT_SYMBOL(bch_bset_build_written_tree
);
682 void bch_bset_fix_invalidated_key(struct btree_keys
*b
, struct bkey
*k
)
685 unsigned inorder
, j
= 1;
687 for (t
= b
->set
; t
<= bset_tree_last(b
); t
++)
688 if (k
< bset_bkey_last(t
->data
))
693 if (!t
->size
|| !bset_written(b
, t
))
696 inorder
= bkey_to_cacheline(t
, k
);
698 if (k
== t
->data
->start
)
701 if (bkey_next(k
) == bset_bkey_last(t
->data
)) {
706 j
= inorder_to_tree(inorder
, t
);
710 k
== tree_to_bkey(t
, j
))
714 } while (j
< t
->size
);
716 j
= inorder_to_tree(inorder
+ 1, t
);
720 k
== tree_to_prev_bkey(t
, j
))
724 } while (j
< t
->size
);
726 EXPORT_SYMBOL(bch_bset_fix_invalidated_key
);
728 static void bch_bset_fix_lookup_table(struct btree_keys
*b
,
732 unsigned shift
= bkey_u64s(k
);
733 unsigned j
= bkey_to_cacheline(t
, k
);
735 /* We're getting called from btree_split() or btree_gc, just bail out */
739 /* k is the key we just inserted; we need to find the entry in the
740 * lookup table for the first key that is strictly greater than k:
741 * it's either k's cacheline or the next one
743 while (j
< t
->size
&&
744 table_to_bkey(t
, j
) <= k
)
747 /* Adjust all the lookup table entries, and find a new key for any that
748 * have gotten too big
750 for (; j
< t
->size
; j
++) {
753 if (t
->prev
[j
] > 7) {
754 k
= table_to_bkey(t
, j
- 1);
756 while (k
< cacheline_to_bkey(t
, j
, 0))
759 t
->prev
[j
] = bkey_to_cacheline_offset(t
, j
, k
);
763 if (t
->size
== b
->set
->tree
+ btree_keys_cachelines(b
) - t
->tree
)
766 /* Possibly add a new entry to the end of the lookup table */
768 for (k
= table_to_bkey(t
, t
->size
- 1);
769 k
!= bset_bkey_last(t
->data
);
771 if (t
->size
== bkey_to_cacheline(t
, k
)) {
772 t
->prev
[t
->size
] = bkey_to_cacheline_offset(t
, t
->size
, k
);
778 * Tries to merge l and r: l should be lower than r
779 * Returns true if we were able to merge. If we did merge, l will be the merged
780 * key, r will be untouched.
782 bool bch_bkey_try_merge(struct btree_keys
*b
, struct bkey
*l
, struct bkey
*r
)
784 if (!b
->ops
->key_merge
)
788 * Generic header checks
789 * Assumes left and right are in order
790 * Left and right must be exactly aligned
792 if (!bch_bkey_equal_header(l
, r
) ||
793 bkey_cmp(l
, &START_KEY(r
)))
796 return b
->ops
->key_merge(b
, l
, r
);
798 EXPORT_SYMBOL(bch_bkey_try_merge
);
800 void bch_bset_insert(struct btree_keys
*b
, struct bkey
*where
,
803 struct bset_tree
*t
= bset_tree_last(b
);
805 BUG_ON(!b
->last_set_unwritten
);
806 BUG_ON(bset_byte_offset(b
, t
->data
) +
807 __set_bytes(t
->data
, t
->data
->keys
+ bkey_u64s(insert
)) >
808 PAGE_SIZE
<< b
->page_order
);
810 memmove((uint64_t *) where
+ bkey_u64s(insert
),
812 (void *) bset_bkey_last(t
->data
) - (void *) where
);
814 t
->data
->keys
+= bkey_u64s(insert
);
815 bkey_copy(where
, insert
);
816 bch_bset_fix_lookup_table(b
, t
, where
);
818 EXPORT_SYMBOL(bch_bset_insert
);
820 unsigned bch_btree_insert_key(struct btree_keys
*b
, struct bkey
*k
,
821 struct bkey
*replace_key
)
823 unsigned status
= BTREE_INSERT_STATUS_NO_INSERT
;
824 struct bset
*i
= bset_tree_last(b
)->data
;
825 struct bkey
*m
, *prev
= NULL
;
826 struct btree_iter iter
;
828 BUG_ON(b
->ops
->is_extents
&& !KEY_SIZE(k
));
830 m
= bch_btree_iter_init(b
, &iter
, b
->ops
->is_extents
831 ? PRECEDING_KEY(&START_KEY(k
))
834 if (b
->ops
->insert_fixup(b
, k
, &iter
, replace_key
))
837 status
= BTREE_INSERT_STATUS_INSERT
;
839 while (m
!= bset_bkey_last(i
) &&
840 bkey_cmp(k
, b
->ops
->is_extents
? &START_KEY(m
) : m
) > 0)
841 prev
= m
, m
= bkey_next(m
);
843 /* prev is in the tree, if we merge we're done */
844 status
= BTREE_INSERT_STATUS_BACK_MERGE
;
846 bch_bkey_try_merge(b
, prev
, k
))
849 status
= BTREE_INSERT_STATUS_OVERWROTE
;
850 if (m
!= bset_bkey_last(i
) &&
851 KEY_PTRS(m
) == KEY_PTRS(k
) && !KEY_SIZE(m
))
854 status
= BTREE_INSERT_STATUS_FRONT_MERGE
;
855 if (m
!= bset_bkey_last(i
) &&
856 bch_bkey_try_merge(b
, k
, m
))
859 bch_bset_insert(b
, m
, k
);
860 copy
: bkey_copy(m
, k
);
864 EXPORT_SYMBOL(bch_btree_insert_key
);
868 struct bset_search_iter
{
872 static struct bset_search_iter
bset_search_write_set(struct bset_tree
*t
,
873 const struct bkey
*search
)
875 unsigned li
= 0, ri
= t
->size
;
877 while (li
+ 1 != ri
) {
878 unsigned m
= (li
+ ri
) >> 1;
880 if (bkey_cmp(table_to_bkey(t
, m
), search
) > 0)
886 return (struct bset_search_iter
) {
887 table_to_bkey(t
, li
),
888 ri
< t
->size
? table_to_bkey(t
, ri
) : bset_bkey_last(t
->data
)
892 static struct bset_search_iter
bset_search_tree(struct bset_tree
*t
,
893 const struct bkey
*search
)
896 struct bkey_float
*f
;
897 unsigned inorder
, j
, n
= 1;
901 p
&= ((int) (p
- t
->size
)) >> 31;
903 prefetch(&t
->tree
[p
]);
909 * n = (f->mantissa > bfloat_mantissa())
913 * We need to subtract 1 from f->mantissa for the sign bit trick
914 * to work - that's done in make_bfloat()
916 if (likely(f
->exponent
!= 127))
917 n
= j
* 2 + (((unsigned)
919 bfloat_mantissa(search
, f
))) >> 31);
921 n
= (bkey_cmp(tree_to_bkey(t
, j
), search
) > 0)
924 } while (n
< t
->size
);
926 inorder
= to_inorder(j
, t
);
929 * n would have been the node we recursed to - the low bit tells us if
930 * we recursed left or recursed right.
933 l
= cacheline_to_bkey(t
, inorder
, f
->m
);
935 if (++inorder
!= t
->size
) {
936 f
= &t
->tree
[inorder_next(j
, t
->size
)];
937 r
= cacheline_to_bkey(t
, inorder
, f
->m
);
939 r
= bset_bkey_last(t
->data
);
941 r
= cacheline_to_bkey(t
, inorder
, f
->m
);
944 f
= &t
->tree
[inorder_prev(j
, t
->size
)];
945 l
= cacheline_to_bkey(t
, inorder
, f
->m
);
950 return (struct bset_search_iter
) {l
, r
};
953 struct bkey
*__bch_bset_search(struct btree_keys
*b
, struct bset_tree
*t
,
954 const struct bkey
*search
)
956 struct bset_search_iter i
;
959 * First, we search for a cacheline, then lastly we do a linear search
960 * within that cacheline.
962 * To search for the cacheline, there's three different possibilities:
963 * * The set is too small to have a search tree, so we just do a linear
964 * search over the whole set.
965 * * The set is the one we're currently inserting into; keeping a full
966 * auxiliary search tree up to date would be too expensive, so we
967 * use a much simpler lookup table to do a binary search -
968 * bset_search_write_set().
969 * * Or we use the auxiliary search tree we constructed earlier -
973 if (unlikely(!t
->size
)) {
974 i
.l
= t
->data
->start
;
975 i
.r
= bset_bkey_last(t
->data
);
976 } else if (bset_written(b
, t
)) {
978 * Each node in the auxiliary search tree covers a certain range
979 * of bits, and keys above and below the set it covers might
980 * differ outside those bits - so we have to special case the
981 * start and end - handle that here:
984 if (unlikely(bkey_cmp(search
, &t
->end
) >= 0))
985 return bset_bkey_last(t
->data
);
987 if (unlikely(bkey_cmp(search
, t
->data
->start
) < 0))
988 return t
->data
->start
;
990 i
= bset_search_tree(t
, search
);
993 t
->size
< bkey_to_cacheline(t
, bset_bkey_last(t
->data
)));
995 i
= bset_search_write_set(t
, search
);
998 if (btree_keys_expensive_checks(b
)) {
999 BUG_ON(bset_written(b
, t
) &&
1000 i
.l
!= t
->data
->start
&&
1001 bkey_cmp(tree_to_prev_bkey(t
,
1002 inorder_to_tree(bkey_to_cacheline(t
, i
.l
), t
)),
1005 BUG_ON(i
.r
!= bset_bkey_last(t
->data
) &&
1006 bkey_cmp(i
.r
, search
) <= 0);
1009 while (likely(i
.l
!= i
.r
) &&
1010 bkey_cmp(i
.l
, search
) <= 0)
1011 i
.l
= bkey_next(i
.l
);
1015 EXPORT_SYMBOL(__bch_bset_search
);
1017 /* Btree iterator */
1019 typedef bool (btree_iter_cmp_fn
)(struct btree_iter_set
,
1020 struct btree_iter_set
);
1022 static inline bool btree_iter_cmp(struct btree_iter_set l
,
1023 struct btree_iter_set r
)
1025 return bkey_cmp(l
.k
, r
.k
) > 0;
1028 static inline bool btree_iter_end(struct btree_iter
*iter
)
1033 void bch_btree_iter_push(struct btree_iter
*iter
, struct bkey
*k
,
1037 BUG_ON(!heap_add(iter
,
1038 ((struct btree_iter_set
) { k
, end
}),
1042 static struct bkey
*__bch_btree_iter_init(struct btree_keys
*b
,
1043 struct btree_iter
*iter
,
1044 struct bkey
*search
,
1045 struct bset_tree
*start
)
1047 struct bkey
*ret
= NULL
;
1048 iter
->size
= ARRAY_SIZE(iter
->data
);
1051 #ifdef CONFIG_BCACHE_DEBUG
1055 for (; start
<= bset_tree_last(b
); start
++) {
1056 ret
= bch_bset_search(b
, start
, search
);
1057 bch_btree_iter_push(iter
, ret
, bset_bkey_last(start
->data
));
1063 struct bkey
*bch_btree_iter_init(struct btree_keys
*b
,
1064 struct btree_iter
*iter
,
1065 struct bkey
*search
)
1067 return __bch_btree_iter_init(b
, iter
, search
, b
->set
);
1069 EXPORT_SYMBOL(bch_btree_iter_init
);
1071 static inline struct bkey
*__bch_btree_iter_next(struct btree_iter
*iter
,
1072 btree_iter_cmp_fn
*cmp
)
1074 struct btree_iter_set unused
;
1075 struct bkey
*ret
= NULL
;
1077 if (!btree_iter_end(iter
)) {
1078 bch_btree_iter_next_check(iter
);
1080 ret
= iter
->data
->k
;
1081 iter
->data
->k
= bkey_next(iter
->data
->k
);
1083 if (iter
->data
->k
> iter
->data
->end
) {
1084 WARN_ONCE(1, "bset was corrupt!\n");
1085 iter
->data
->k
= iter
->data
->end
;
1088 if (iter
->data
->k
== iter
->data
->end
)
1089 heap_pop(iter
, unused
, cmp
);
1091 heap_sift(iter
, 0, cmp
);
1097 struct bkey
*bch_btree_iter_next(struct btree_iter
*iter
)
1099 return __bch_btree_iter_next(iter
, btree_iter_cmp
);
1102 EXPORT_SYMBOL(bch_btree_iter_next
);
1104 struct bkey
*bch_btree_iter_next_filter(struct btree_iter
*iter
,
1105 struct btree_keys
*b
, ptr_filter_fn fn
)
1110 ret
= bch_btree_iter_next(iter
);
1111 } while (ret
&& fn(b
, ret
));
1118 void bch_bset_sort_state_free(struct bset_sort_state
*state
)
1121 mempool_destroy(state
->pool
);
1124 int bch_bset_sort_state_init(struct bset_sort_state
*state
, unsigned page_order
)
1126 spin_lock_init(&state
->time
.lock
);
1128 state
->page_order
= page_order
;
1129 state
->crit_factor
= int_sqrt(1 << page_order
);
1131 state
->pool
= mempool_create_page_pool(1, page_order
);
1137 EXPORT_SYMBOL(bch_bset_sort_state_init
);
1139 static void btree_mergesort(struct btree_keys
*b
, struct bset
*out
,
1140 struct btree_iter
*iter
,
1141 bool fixup
, bool remove_stale
)
1144 struct bkey
*k
, *last
= NULL
;
1146 bool (*bad
)(struct btree_keys
*, const struct bkey
*) = remove_stale
1150 /* Heapify the iterator, using our comparison function */
1151 for (i
= iter
->used
/ 2 - 1; i
>= 0; --i
)
1152 heap_sift(iter
, i
, b
->ops
->sort_cmp
);
1154 while (!btree_iter_end(iter
)) {
1155 if (b
->ops
->sort_fixup
&& fixup
)
1156 k
= b
->ops
->sort_fixup(iter
, &tmp
.k
);
1161 k
= __bch_btree_iter_next(iter
, b
->ops
->sort_cmp
);
1169 } else if (!bch_bkey_try_merge(b
, last
, k
)) {
1170 last
= bkey_next(last
);
1175 out
->keys
= last
? (uint64_t *) bkey_next(last
) - out
->d
: 0;
1177 pr_debug("sorted %i keys", out
->keys
);
1180 static void __btree_sort(struct btree_keys
*b
, struct btree_iter
*iter
,
1181 unsigned start
, unsigned order
, bool fixup
,
1182 struct bset_sort_state
*state
)
1184 uint64_t start_time
;
1185 bool used_mempool
= false;
1186 struct bset
*out
= (void *) __get_free_pages(__GFP_NOWARN
|GFP_NOWAIT
,
1191 BUG_ON(order
> state
->page_order
);
1193 outp
= mempool_alloc(state
->pool
, GFP_NOIO
);
1194 out
= page_address(outp
);
1195 used_mempool
= true;
1196 order
= state
->page_order
;
1199 start_time
= local_clock();
1201 btree_mergesort(b
, out
, iter
, fixup
, false);
1204 if (!start
&& order
== b
->page_order
) {
1206 * Our temporary buffer is the same size as the btree node's
1207 * buffer, we can just swap buffers instead of doing a big
1211 out
->magic
= b
->set
->data
->magic
;
1212 out
->seq
= b
->set
->data
->seq
;
1213 out
->version
= b
->set
->data
->version
;
1214 swap(out
, b
->set
->data
);
1216 b
->set
[start
].data
->keys
= out
->keys
;
1217 memcpy(b
->set
[start
].data
->start
, out
->start
,
1218 (void *) bset_bkey_last(out
) - (void *) out
->start
);
1222 mempool_free(virt_to_page(out
), state
->pool
);
1224 free_pages((unsigned long) out
, order
);
1226 bch_bset_build_written_tree(b
);
1229 bch_time_stats_update(&state
->time
, start_time
);
1232 void bch_btree_sort_partial(struct btree_keys
*b
, unsigned start
,
1233 struct bset_sort_state
*state
)
1235 size_t order
= b
->page_order
, keys
= 0;
1236 struct btree_iter iter
;
1237 int oldsize
= bch_count_data(b
);
1239 __bch_btree_iter_init(b
, &iter
, NULL
, &b
->set
[start
]);
1244 for (i
= start
; i
<= b
->nsets
; i
++)
1245 keys
+= b
->set
[i
].data
->keys
;
1247 order
= get_order(__set_bytes(b
->set
->data
, keys
));
1250 __btree_sort(b
, &iter
, start
, order
, false, state
);
1252 EBUG_ON(oldsize
>= 0 && bch_count_data(b
) != oldsize
);
1254 EXPORT_SYMBOL(bch_btree_sort_partial
);
1256 void bch_btree_sort_and_fix_extents(struct btree_keys
*b
,
1257 struct btree_iter
*iter
,
1258 struct bset_sort_state
*state
)
1260 __btree_sort(b
, iter
, 0, b
->page_order
, true, state
);
1263 void bch_btree_sort_into(struct btree_keys
*b
, struct btree_keys
*new,
1264 struct bset_sort_state
*state
)
1266 uint64_t start_time
= local_clock();
1268 struct btree_iter iter
;
1269 bch_btree_iter_init(b
, &iter
, NULL
);
1271 btree_mergesort(b
, new->set
->data
, &iter
, false, true);
1273 bch_time_stats_update(&state
->time
, start_time
);
1275 new->set
->size
= 0; // XXX: why?
1278 #define SORT_CRIT (4096 / sizeof(uint64_t))
1280 void bch_btree_sort_lazy(struct btree_keys
*b
, struct bset_sort_state
*state
)
1282 unsigned crit
= SORT_CRIT
;
1285 /* Don't sort if nothing to do */
1289 for (i
= b
->nsets
- 1; i
>= 0; --i
) {
1290 crit
*= state
->crit_factor
;
1292 if (b
->set
[i
].data
->keys
< crit
) {
1293 bch_btree_sort_partial(b
, i
, state
);
1298 /* Sort if we'd overflow */
1299 if (b
->nsets
+ 1 == MAX_BSETS
) {
1300 bch_btree_sort(b
, state
);
1305 bch_bset_build_written_tree(b
);
1307 EXPORT_SYMBOL(bch_btree_sort_lazy
);
1309 void bch_btree_keys_stats(struct btree_keys
*b
, struct bset_stats
*stats
)
1313 for (i
= 0; i
<= b
->nsets
; i
++) {
1314 struct bset_tree
*t
= &b
->set
[i
];
1315 size_t bytes
= t
->data
->keys
* sizeof(uint64_t);
1318 if (bset_written(b
, t
)) {
1319 stats
->sets_written
++;
1320 stats
->bytes_written
+= bytes
;
1322 stats
->floats
+= t
->size
- 1;
1324 for (j
= 1; j
< t
->size
; j
++)
1325 if (t
->tree
[j
].exponent
== 127)
1328 stats
->sets_unwritten
++;
1329 stats
->bytes_unwritten
+= bytes
;