2 * Code for working with individual keys, and sorted sets of keys with in a
5 * Copyright 2012 Google, Inc.
12 #include <linux/random.h>
13 #include <linux/prefetch.h>
17 void bch_keylist_copy(struct keylist
*dest
, struct keylist
*src
)
21 if (src
->list
== src
->d
) {
22 size_t n
= (uint64_t *) src
->top
- src
->d
;
23 dest
->top
= (struct bkey
*) &dest
->d
[n
];
28 int bch_keylist_realloc(struct keylist
*l
, int nptrs
, struct cache_set
*c
)
30 unsigned oldsize
= (uint64_t *) l
->top
- l
->list
;
31 unsigned newsize
= oldsize
+ 2 + nptrs
;
34 /* The journalling code doesn't handle the case where the keys to insert
35 * is bigger than an empty write: If we just return -ENOMEM here,
36 * bio_insert() and bio_invalidate() will insert the keys created so far
37 * and finish the rest when the keylist is empty.
39 if (newsize
* sizeof(uint64_t) > block_bytes(c
) - sizeof(struct jset
))
42 newsize
= roundup_pow_of_two(newsize
);
44 if (newsize
<= KEYLIST_INLINE
||
45 roundup_pow_of_two(oldsize
) == newsize
)
48 new = krealloc(l
->list
== l
->d
? NULL
: l
->list
,
49 sizeof(uint64_t) * newsize
, GFP_NOIO
);
55 memcpy(new, l
->list
, sizeof(uint64_t) * KEYLIST_INLINE
);
58 l
->top
= (struct bkey
*) (&l
->list
[oldsize
]);
63 struct bkey
*bch_keylist_pop(struct keylist
*l
)
65 struct bkey
*k
= l
->bottom
;
70 while (bkey_next(k
) != l
->top
)
76 /* Pointer validation */
78 bool __bch_ptr_invalid(struct cache_set
*c
, int level
, const struct bkey
*k
)
83 if (level
&& (!KEY_PTRS(k
) || !KEY_SIZE(k
) || KEY_DIRTY(k
)))
86 if (!level
&& KEY_SIZE(k
) > KEY_OFFSET(k
))
92 for (i
= 0; i
< KEY_PTRS(k
); i
++)
93 if (ptr_available(c
, k
, i
)) {
94 struct cache
*ca
= PTR_CACHE(c
, k
, i
);
95 size_t bucket
= PTR_BUCKET_NR(c
, k
, i
);
96 size_t r
= bucket_remainder(c
, PTR_OFFSET(k
, i
));
98 if (KEY_SIZE(k
) + r
> c
->sb
.bucket_size
||
99 bucket
< ca
->sb
.first_bucket
||
100 bucket
>= ca
->sb
.nbuckets
)
106 bch_bkey_to_text(buf
, sizeof(buf
), k
);
107 cache_bug(c
, "spotted bad key %s: %s", buf
, bch_ptr_status(c
, k
));
111 bool bch_ptr_bad(struct btree
*b
, const struct bkey
*k
)
116 if (!bkey_cmp(k
, &ZERO_KEY
) ||
118 bch_ptr_invalid(b
, k
))
121 if (KEY_PTRS(k
) && PTR_DEV(k
, 0) == PTR_CHECK_DEV
)
124 for (i
= 0; i
< KEY_PTRS(k
); i
++)
125 if (ptr_available(b
->c
, k
, i
)) {
126 g
= PTR_BUCKET(b
->c
, k
, i
);
127 stale
= ptr_stale(b
->c
, k
, i
);
129 btree_bug_on(stale
> 96, b
,
130 "key too stale: %i, need_gc %u",
131 stale
, b
->c
->need_gc
);
133 btree_bug_on(stale
&& KEY_DIRTY(k
) && KEY_SIZE(k
),
134 b
, "stale dirty pointer");
139 #ifdef CONFIG_BCACHE_EDEBUG
140 if (!mutex_trylock(&b
->c
->bucket_lock
))
145 g
->prio
!= BTREE_PRIO
||
146 (b
->c
->gc_mark_valid
&&
147 GC_MARK(g
) != GC_MARK_METADATA
))
151 if (g
->prio
== BTREE_PRIO
)
155 b
->c
->gc_mark_valid
&&
156 GC_MARK(g
) != GC_MARK_DIRTY
)
159 mutex_unlock(&b
->c
->bucket_lock
);
164 #ifdef CONFIG_BCACHE_EDEBUG
166 mutex_unlock(&b
->c
->bucket_lock
);
171 bch_bkey_to_text(buf
, sizeof(buf
), k
);
173 "inconsistent pointer %s: bucket %zu pin %i prio %i gen %i last_gc %i mark %llu gc_gen %i",
174 buf
, PTR_BUCKET_NR(b
->c
, k
, i
), atomic_read(&g
->pin
),
175 g
->prio
, g
->gen
, g
->last_gc
, GC_MARK(g
), g
->gc_gen
);
181 /* Key/pointer manipulation */
183 void bch_bkey_copy_single_ptr(struct bkey
*dest
, const struct bkey
*src
,
186 BUG_ON(i
> KEY_PTRS(src
));
188 /* Only copy the header, key, and one pointer. */
189 memcpy(dest
, src
, 2 * sizeof(uint64_t));
190 dest
->ptr
[0] = src
->ptr
[i
];
191 SET_KEY_PTRS(dest
, 1);
192 /* We didn't copy the checksum so clear that bit. */
193 SET_KEY_CSUM(dest
, 0);
196 bool __bch_cut_front(const struct bkey
*where
, struct bkey
*k
)
200 if (bkey_cmp(where
, &START_KEY(k
)) <= 0)
203 if (bkey_cmp(where
, k
) < 0)
204 len
= KEY_OFFSET(k
) - KEY_OFFSET(where
);
206 bkey_copy_key(k
, where
);
208 for (i
= 0; i
< KEY_PTRS(k
); i
++)
209 SET_PTR_OFFSET(k
, i
, PTR_OFFSET(k
, i
) + KEY_SIZE(k
) - len
);
211 BUG_ON(len
> KEY_SIZE(k
));
212 SET_KEY_SIZE(k
, len
);
216 bool __bch_cut_back(const struct bkey
*where
, struct bkey
*k
)
220 if (bkey_cmp(where
, k
) >= 0)
223 BUG_ON(KEY_INODE(where
) != KEY_INODE(k
));
225 if (bkey_cmp(where
, &START_KEY(k
)) > 0)
226 len
= KEY_OFFSET(where
) - KEY_START(k
);
228 bkey_copy_key(k
, where
);
230 BUG_ON(len
> KEY_SIZE(k
));
231 SET_KEY_SIZE(k
, len
);
235 static uint64_t merge_chksums(struct bkey
*l
, struct bkey
*r
)
237 return (l
->ptr
[KEY_PTRS(l
)] + r
->ptr
[KEY_PTRS(r
)]) &
238 ~((uint64_t)1 << 63);
241 /* Tries to merge l and r: l should be lower than r
242 * Returns true if we were able to merge. If we did merge, l will be the merged
243 * key, r will be untouched.
245 bool bch_bkey_try_merge(struct btree
*b
, struct bkey
*l
, struct bkey
*r
)
249 if (key_merging_disabled(b
->c
))
252 if (KEY_PTRS(l
) != KEY_PTRS(r
) ||
253 KEY_DIRTY(l
) != KEY_DIRTY(r
) ||
254 bkey_cmp(l
, &START_KEY(r
)))
257 for (i
= 0; i
< KEY_PTRS(l
); i
++)
258 if (l
->ptr
[i
] + PTR(0, KEY_SIZE(l
), 0) != r
->ptr
[i
] ||
259 PTR_BUCKET_NR(b
->c
, l
, i
) != PTR_BUCKET_NR(b
->c
, r
, i
))
262 /* Keys with no pointers aren't restricted to one bucket and could
265 if (KEY_SIZE(l
) + KEY_SIZE(r
) > USHRT_MAX
) {
266 SET_KEY_OFFSET(l
, KEY_OFFSET(l
) + USHRT_MAX
- KEY_SIZE(l
));
267 SET_KEY_SIZE(l
, USHRT_MAX
);
275 l
->ptr
[KEY_PTRS(l
)] = merge_chksums(l
, r
);
280 SET_KEY_OFFSET(l
, KEY_OFFSET(l
) + KEY_SIZE(r
));
281 SET_KEY_SIZE(l
, KEY_SIZE(l
) + KEY_SIZE(r
));
286 /* Binary tree stuff for auxiliary search trees */
288 static unsigned inorder_next(unsigned j
, unsigned size
)
290 if (j
* 2 + 1 < size
) {
301 static unsigned inorder_prev(unsigned j
, unsigned size
)
306 while (j
* 2 + 1 < size
)
314 /* I have no idea why this code works... and I'm the one who wrote it
316 * However, I do know what it does:
317 * Given a binary tree constructed in an array (i.e. how you normally implement
318 * a heap), it converts a node in the tree - referenced by array index - to the
319 * index it would have if you did an inorder traversal.
321 * Also tested for every j, size up to size somewhere around 6 million.
323 * The binary tree starts at array index 1, not 0
324 * extra is a function of size:
325 * extra = (size - rounddown_pow_of_two(size - 1)) << 1;
327 static unsigned __to_inorder(unsigned j
, unsigned size
, unsigned extra
)
330 unsigned shift
= fls(size
- 1) - b
;
338 j
-= (j
- extra
) >> 1;
343 static unsigned to_inorder(unsigned j
, struct bset_tree
*t
)
345 return __to_inorder(j
, t
->size
, t
->extra
);
348 static unsigned __inorder_to_tree(unsigned j
, unsigned size
, unsigned extra
)
358 j
|= roundup_pow_of_two(size
) >> shift
;
363 static unsigned inorder_to_tree(unsigned j
, struct bset_tree
*t
)
365 return __inorder_to_tree(j
, t
->size
, t
->extra
);
369 void inorder_test(void)
371 unsigned long done
= 0;
372 ktime_t start
= ktime_get();
374 for (unsigned size
= 2;
377 unsigned extra
= (size
- rounddown_pow_of_two(size
- 1)) << 1;
378 unsigned i
= 1, j
= rounddown_pow_of_two(size
- 1);
381 printk(KERN_NOTICE
"loop %u, %llu per us\n", size
,
382 done
/ ktime_us_delta(ktime_get(), start
));
385 if (__inorder_to_tree(i
, size
, extra
) != j
)
386 panic("size %10u j %10u i %10u", size
, j
, i
);
388 if (__to_inorder(j
, size
, extra
) != i
)
389 panic("size %10u j %10u i %10u", size
, j
, i
);
391 if (j
== rounddown_pow_of_two(size
) - 1)
394 BUG_ON(inorder_prev(inorder_next(j
, size
), size
) != j
);
396 j
= inorder_next(j
, size
);
406 * Cacheline/offset <-> bkey pointer arithmetic:
408 * t->tree is a binary search tree in an array; each node corresponds to a key
409 * in one cacheline in t->set (BSET_CACHELINE bytes).
411 * This means we don't have to store the full index of the key that a node in
412 * the binary tree points to; to_inorder() gives us the cacheline, and then
413 * bkey_float->m gives us the offset within that cacheline, in units of 8 bytes.
415 * cacheline_to_bkey() and friends abstract out all the pointer arithmetic to
418 * To construct the bfloat for an arbitrary key we need to know what the key
419 * immediately preceding it is: we have to check if the two keys differ in the
420 * bits we're going to store in bkey_float->mantissa. t->prev[j] stores the size
421 * of the previous key so we can walk backwards to it from t->tree[j]'s key.
424 static struct bkey
*cacheline_to_bkey(struct bset_tree
*t
, unsigned cacheline
,
427 return ((void *) t
->data
) + cacheline
* BSET_CACHELINE
+ offset
* 8;
430 static unsigned bkey_to_cacheline(struct bset_tree
*t
, struct bkey
*k
)
432 return ((void *) k
- (void *) t
->data
) / BSET_CACHELINE
;
435 static unsigned bkey_to_cacheline_offset(struct bkey
*k
)
437 return ((size_t) k
& (BSET_CACHELINE
- 1)) / sizeof(uint64_t);
440 static struct bkey
*tree_to_bkey(struct bset_tree
*t
, unsigned j
)
442 return cacheline_to_bkey(t
, to_inorder(j
, t
), t
->tree
[j
].m
);
445 static struct bkey
*tree_to_prev_bkey(struct bset_tree
*t
, unsigned j
)
447 return (void *) (((uint64_t *) tree_to_bkey(t
, j
)) - t
->prev
[j
]);
451 * For the write set - the one we're currently inserting keys into - we don't
452 * maintain a full search tree, we just keep a simple lookup table in t->prev.
454 static struct bkey
*table_to_bkey(struct bset_tree
*t
, unsigned cacheline
)
456 return cacheline_to_bkey(t
, cacheline
, t
->prev
[cacheline
]);
459 static inline uint64_t shrd128(uint64_t high
, uint64_t low
, uint8_t shift
)
462 asm("shrd %[shift],%[high],%[low]"
469 low
|= (high
<< 1) << (63U - shift
);
474 static inline unsigned bfloat_mantissa(const struct bkey
*k
,
475 struct bkey_float
*f
)
477 const uint64_t *p
= &k
->low
- (f
->exponent
>> 6);
478 return shrd128(p
[-1], p
[0], f
->exponent
& 63) & BKEY_MANTISSA_MASK
;
481 static void make_bfloat(struct bset_tree
*t
, unsigned j
)
483 struct bkey_float
*f
= &t
->tree
[j
];
484 struct bkey
*m
= tree_to_bkey(t
, j
);
485 struct bkey
*p
= tree_to_prev_bkey(t
, j
);
487 struct bkey
*l
= is_power_of_2(j
)
489 : tree_to_prev_bkey(t
, j
>> ffs(j
));
491 struct bkey
*r
= is_power_of_2(j
+ 1)
492 ? node(t
->data
, t
->data
->keys
- bkey_u64s(&t
->end
))
493 : tree_to_bkey(t
, j
>> (ffz(j
) + 1));
495 BUG_ON(m
< l
|| m
> r
);
496 BUG_ON(bkey_next(p
) != m
);
498 if (KEY_INODE(l
) != KEY_INODE(r
))
499 f
->exponent
= fls64(KEY_INODE(r
) ^ KEY_INODE(l
)) + 64;
501 f
->exponent
= fls64(r
->low
^ l
->low
);
503 f
->exponent
= max_t(int, f
->exponent
- BKEY_MANTISSA_BITS
, 0);
506 * Setting f->exponent = 127 flags this node as failed, and causes the
507 * lookup code to fall back to comparing against the original key.
510 if (bfloat_mantissa(m
, f
) != bfloat_mantissa(p
, f
))
511 f
->mantissa
= bfloat_mantissa(m
, f
) - 1;
516 static void bset_alloc_tree(struct btree
*b
, struct bset_tree
*t
)
519 unsigned j
= roundup(t
[-1].size
,
520 64 / sizeof(struct bkey_float
));
522 t
->tree
= t
[-1].tree
+ j
;
523 t
->prev
= t
[-1].prev
+ j
;
526 while (t
< b
->sets
+ MAX_BSETS
)
530 static void bset_build_unwritten_tree(struct btree
*b
)
532 struct bset_tree
*t
= b
->sets
+ b
->nsets
;
534 bset_alloc_tree(b
, t
);
536 if (t
->tree
!= b
->sets
->tree
+ bset_tree_space(b
)) {
537 t
->prev
[0] = bkey_to_cacheline_offset(t
->data
->start
);
542 static void bset_build_written_tree(struct btree
*b
)
544 struct bset_tree
*t
= b
->sets
+ b
->nsets
;
545 struct bkey
*k
= t
->data
->start
;
546 unsigned j
, cacheline
= 1;
548 bset_alloc_tree(b
, t
);
550 t
->size
= min_t(unsigned,
551 bkey_to_cacheline(t
, end(t
->data
)),
552 b
->sets
->tree
+ bset_tree_space(b
) - t
->tree
);
559 t
->extra
= (t
->size
- rounddown_pow_of_two(t
->size
- 1)) << 1;
561 /* First we figure out where the first key in each cacheline is */
562 for (j
= inorder_next(0, t
->size
);
564 j
= inorder_next(j
, t
->size
)) {
565 while (bkey_to_cacheline(t
, k
) != cacheline
)
568 t
->prev
[j
] = bkey_u64s(k
);
571 t
->tree
[j
].m
= bkey_to_cacheline_offset(k
);
574 while (bkey_next(k
) != end(t
->data
))
579 /* Then we build the tree */
580 for (j
= inorder_next(0, t
->size
);
582 j
= inorder_next(j
, t
->size
))
586 void bch_bset_fix_invalidated_key(struct btree
*b
, struct bkey
*k
)
589 unsigned inorder
, j
= 1;
591 for (t
= b
->sets
; t
<= &b
->sets
[b
->nsets
]; t
++)
592 if (k
< end(t
->data
))
597 if (!t
->size
|| !bset_written(b
, t
))
600 inorder
= bkey_to_cacheline(t
, k
);
602 if (k
== t
->data
->start
)
605 if (bkey_next(k
) == end(t
->data
)) {
610 j
= inorder_to_tree(inorder
, t
);
614 k
== tree_to_bkey(t
, j
))
618 } while (j
< t
->size
);
620 j
= inorder_to_tree(inorder
+ 1, t
);
624 k
== tree_to_prev_bkey(t
, j
))
628 } while (j
< t
->size
);
631 void bch_bset_fix_lookup_table(struct btree
*b
, struct bkey
*k
)
633 struct bset_tree
*t
= &b
->sets
[b
->nsets
];
634 unsigned shift
= bkey_u64s(k
);
635 unsigned j
= bkey_to_cacheline(t
, k
);
637 /* We're getting called from btree_split() or btree_gc, just bail out */
641 /* k is the key we just inserted; we need to find the entry in the
642 * lookup table for the first key that is strictly greater than k:
643 * it's either k's cacheline or the next one
646 table_to_bkey(t
, j
) <= k
)
649 /* Adjust all the lookup table entries, and find a new key for any that
650 * have gotten too big
652 for (; j
< t
->size
; j
++) {
655 if (t
->prev
[j
] > 7) {
656 k
= table_to_bkey(t
, j
- 1);
658 while (k
< cacheline_to_bkey(t
, j
, 0))
661 t
->prev
[j
] = bkey_to_cacheline_offset(k
);
665 if (t
->size
== b
->sets
->tree
+ bset_tree_space(b
) - t
->tree
)
668 /* Possibly add a new entry to the end of the lookup table */
670 for (k
= table_to_bkey(t
, t
->size
- 1);
673 if (t
->size
== bkey_to_cacheline(t
, k
)) {
674 t
->prev
[t
->size
] = bkey_to_cacheline_offset(k
);
679 void bch_bset_init_next(struct btree
*b
)
681 struct bset
*i
= write_block(b
);
683 if (i
!= b
->sets
[0].data
) {
684 b
->sets
[++b
->nsets
].data
= i
;
685 i
->seq
= b
->sets
[0].data
->seq
;
687 get_random_bytes(&i
->seq
, sizeof(uint64_t));
689 i
->magic
= bset_magic(b
->c
);
693 bset_build_unwritten_tree(b
);
696 struct bset_search_iter
{
700 static struct bset_search_iter
bset_search_write_set(struct btree
*b
,
702 const struct bkey
*search
)
704 unsigned li
= 0, ri
= t
->size
;
707 t
->size
< bkey_to_cacheline(t
, end(t
->data
)));
709 while (li
+ 1 != ri
) {
710 unsigned m
= (li
+ ri
) >> 1;
712 if (bkey_cmp(table_to_bkey(t
, m
), search
) > 0)
718 return (struct bset_search_iter
) {
719 table_to_bkey(t
, li
),
720 ri
< t
->size
? table_to_bkey(t
, ri
) : end(t
->data
)
724 static struct bset_search_iter
bset_search_tree(struct btree
*b
,
726 const struct bkey
*search
)
729 struct bkey_float
*f
;
730 unsigned inorder
, j
, n
= 1;
734 p
&= ((int) (p
- t
->size
)) >> 31;
736 prefetch(&t
->tree
[p
]);
742 * n = (f->mantissa > bfloat_mantissa())
746 * We need to subtract 1 from f->mantissa for the sign bit trick
747 * to work - that's done in make_bfloat()
749 if (likely(f
->exponent
!= 127))
750 n
= j
* 2 + (((unsigned)
752 bfloat_mantissa(search
, f
))) >> 31);
754 n
= (bkey_cmp(tree_to_bkey(t
, j
), search
) > 0)
757 } while (n
< t
->size
);
759 inorder
= to_inorder(j
, t
);
762 * n would have been the node we recursed to - the low bit tells us if
763 * we recursed left or recursed right.
766 l
= cacheline_to_bkey(t
, inorder
, f
->m
);
768 if (++inorder
!= t
->size
) {
769 f
= &t
->tree
[inorder_next(j
, t
->size
)];
770 r
= cacheline_to_bkey(t
, inorder
, f
->m
);
774 r
= cacheline_to_bkey(t
, inorder
, f
->m
);
777 f
= &t
->tree
[inorder_prev(j
, t
->size
)];
778 l
= cacheline_to_bkey(t
, inorder
, f
->m
);
783 return (struct bset_search_iter
) {l
, r
};
786 struct bkey
*__bch_bset_search(struct btree
*b
, struct bset_tree
*t
,
787 const struct bkey
*search
)
789 struct bset_search_iter i
;
792 * First, we search for a cacheline, then lastly we do a linear search
793 * within that cacheline.
795 * To search for the cacheline, there's three different possibilities:
796 * * The set is too small to have a search tree, so we just do a linear
797 * search over the whole set.
798 * * The set is the one we're currently inserting into; keeping a full
799 * auxiliary search tree up to date would be too expensive, so we
800 * use a much simpler lookup table to do a binary search -
801 * bset_search_write_set().
802 * * Or we use the auxiliary search tree we constructed earlier -
806 if (unlikely(!t
->size
)) {
807 i
.l
= t
->data
->start
;
809 } else if (bset_written(b
, t
)) {
811 * Each node in the auxiliary search tree covers a certain range
812 * of bits, and keys above and below the set it covers might
813 * differ outside those bits - so we have to special case the
814 * start and end - handle that here:
817 if (unlikely(bkey_cmp(search
, &t
->end
) >= 0))
820 if (unlikely(bkey_cmp(search
, t
->data
->start
) < 0))
821 return t
->data
->start
;
823 i
= bset_search_tree(b
, t
, search
);
825 i
= bset_search_write_set(b
, t
, search
);
827 #ifdef CONFIG_BCACHE_EDEBUG
828 BUG_ON(bset_written(b
, t
) &&
829 i
.l
!= t
->data
->start
&&
830 bkey_cmp(tree_to_prev_bkey(t
,
831 inorder_to_tree(bkey_to_cacheline(t
, i
.l
), t
)),
834 BUG_ON(i
.r
!= end(t
->data
) &&
835 bkey_cmp(i
.r
, search
) <= 0);
838 while (likely(i
.l
!= i
.r
) &&
839 bkey_cmp(i
.l
, search
) <= 0)
840 i
.l
= bkey_next(i
.l
);
847 static inline bool btree_iter_cmp(struct btree_iter_set l
,
848 struct btree_iter_set r
)
850 int64_t c
= bkey_cmp(&START_KEY(l
.k
), &START_KEY(r
.k
));
852 return c
? c
> 0 : l
.k
< r
.k
;
855 static inline bool btree_iter_end(struct btree_iter
*iter
)
860 void bch_btree_iter_push(struct btree_iter
*iter
, struct bkey
*k
,
864 BUG_ON(!heap_add(iter
,
865 ((struct btree_iter_set
) { k
, end
}),
869 struct bkey
*__bch_btree_iter_init(struct btree
*b
, struct btree_iter
*iter
,
870 struct bkey
*search
, struct bset_tree
*start
)
872 struct bkey
*ret
= NULL
;
873 iter
->size
= ARRAY_SIZE(iter
->data
);
876 for (; start
<= &b
->sets
[b
->nsets
]; start
++) {
877 ret
= bch_bset_search(b
, start
, search
);
878 bch_btree_iter_push(iter
, ret
, end(start
->data
));
884 struct bkey
*bch_btree_iter_next(struct btree_iter
*iter
)
886 struct btree_iter_set unused
;
887 struct bkey
*ret
= NULL
;
889 if (!btree_iter_end(iter
)) {
891 iter
->data
->k
= bkey_next(iter
->data
->k
);
893 if (iter
->data
->k
> iter
->data
->end
) {
894 WARN_ONCE(1, "bset was corrupt!\n");
895 iter
->data
->k
= iter
->data
->end
;
898 if (iter
->data
->k
== iter
->data
->end
)
899 heap_pop(iter
, unused
, btree_iter_cmp
);
901 heap_sift(iter
, 0, btree_iter_cmp
);
907 struct bkey
*bch_btree_iter_next_filter(struct btree_iter
*iter
,
908 struct btree
*b
, ptr_filter_fn fn
)
913 ret
= bch_btree_iter_next(iter
);
914 } while (ret
&& fn(b
, ret
));
919 struct bkey
*bch_next_recurse_key(struct btree
*b
, struct bkey
*search
)
921 struct btree_iter iter
;
923 bch_btree_iter_init(b
, &iter
, search
);
924 return bch_btree_iter_next_filter(&iter
, b
, bch_ptr_bad
);
929 static void sort_key_next(struct btree_iter
*iter
,
930 struct btree_iter_set
*i
)
932 i
->k
= bkey_next(i
->k
);
935 *i
= iter
->data
[--iter
->used
];
938 static struct bkey
*btree_sort_fixup(struct btree_iter
*iter
, struct bkey
*tmp
)
940 while (iter
->used
> 1) {
941 struct btree_iter_set
*top
= iter
->data
, *i
= top
+ 1;
943 if (iter
->used
> 2 &&
944 btree_iter_cmp(i
[0], i
[1]))
947 if (bkey_cmp(top
->k
, &START_KEY(i
->k
)) <= 0)
950 if (!KEY_SIZE(i
->k
)) {
951 sort_key_next(iter
, i
);
952 heap_sift(iter
, i
- top
, btree_iter_cmp
);
957 if (bkey_cmp(top
->k
, i
->k
) >= 0)
958 sort_key_next(iter
, i
);
960 bch_cut_front(top
->k
, i
->k
);
962 heap_sift(iter
, i
- top
, btree_iter_cmp
);
964 /* can't happen because of comparison func */
965 BUG_ON(!bkey_cmp(&START_KEY(top
->k
), &START_KEY(i
->k
)));
967 if (bkey_cmp(i
->k
, top
->k
) < 0) {
968 bkey_copy(tmp
, top
->k
);
970 bch_cut_back(&START_KEY(i
->k
), tmp
);
971 bch_cut_front(i
->k
, top
->k
);
972 heap_sift(iter
, 0, btree_iter_cmp
);
976 bch_cut_back(&START_KEY(i
->k
), top
->k
);
984 static void btree_mergesort(struct btree
*b
, struct bset
*out
,
985 struct btree_iter
*iter
,
986 bool fixup
, bool remove_stale
)
988 struct bkey
*k
, *last
= NULL
;
990 bool (*bad
)(struct btree
*, const struct bkey
*) = remove_stale
994 while (!btree_iter_end(iter
)) {
995 if (fixup
&& !b
->level
)
996 k
= btree_sort_fixup(iter
, &tmp
.k
);
1001 k
= bch_btree_iter_next(iter
);
1009 } else if (b
->level
||
1010 !bch_bkey_try_merge(b
, last
, k
)) {
1011 last
= bkey_next(last
);
1016 out
->keys
= last
? (uint64_t *) bkey_next(last
) - out
->d
: 0;
1018 pr_debug("sorted %i keys", out
->keys
);
1019 bch_check_key_order(b
, out
);
1022 static void __btree_sort(struct btree
*b
, struct btree_iter
*iter
,
1023 unsigned start
, unsigned order
, bool fixup
)
1025 uint64_t start_time
;
1026 bool remove_stale
= !b
->written
;
1027 struct bset
*out
= (void *) __get_free_pages(__GFP_NOWARN
|GFP_NOIO
,
1030 mutex_lock(&b
->c
->sort_lock
);
1032 order
= ilog2(bucket_pages(b
->c
));
1035 start_time
= local_clock();
1037 btree_mergesort(b
, out
, iter
, fixup
, remove_stale
);
1040 if (!fixup
&& !start
&& b
->written
)
1041 bch_btree_verify(b
, out
);
1043 if (!start
&& order
== b
->page_order
) {
1045 * Our temporary buffer is the same size as the btree node's
1046 * buffer, we can just swap buffers instead of doing a big
1050 out
->magic
= bset_magic(b
->c
);
1051 out
->seq
= b
->sets
[0].data
->seq
;
1052 out
->version
= b
->sets
[0].data
->version
;
1053 swap(out
, b
->sets
[0].data
);
1055 if (b
->c
->sort
== b
->sets
[0].data
)
1058 b
->sets
[start
].data
->keys
= out
->keys
;
1059 memcpy(b
->sets
[start
].data
->start
, out
->start
,
1060 (void *) end(out
) - (void *) out
->start
);
1063 if (out
== b
->c
->sort
)
1064 mutex_unlock(&b
->c
->sort_lock
);
1066 free_pages((unsigned long) out
, order
);
1069 bset_build_written_tree(b
);
1072 spin_lock(&b
->c
->sort_time_lock
);
1073 bch_time_stats_update(&b
->c
->sort_time
, start_time
);
1074 spin_unlock(&b
->c
->sort_time_lock
);
1078 void bch_btree_sort_partial(struct btree
*b
, unsigned start
)
1080 size_t oldsize
= 0, order
= b
->page_order
, keys
= 0;
1081 struct btree_iter iter
;
1082 __bch_btree_iter_init(b
, &iter
, NULL
, &b
->sets
[start
]);
1084 BUG_ON(b
->sets
[b
->nsets
].data
== write_block(b
) &&
1085 (b
->sets
[b
->nsets
].size
|| b
->nsets
));
1088 oldsize
= bch_count_data(b
);
1093 for (i
= start
; i
<= b
->nsets
; i
++)
1094 keys
+= b
->sets
[i
].data
->keys
;
1096 order
= roundup_pow_of_two(__set_bytes(b
->sets
->data
,
1099 order
= ilog2(order
);
1102 __btree_sort(b
, &iter
, start
, order
, false);
1104 EBUG_ON(b
->written
&& bch_count_data(b
) != oldsize
);
1107 void bch_btree_sort_and_fix_extents(struct btree
*b
, struct btree_iter
*iter
)
1109 BUG_ON(!b
->written
);
1110 __btree_sort(b
, iter
, 0, b
->page_order
, true);
1113 void bch_btree_sort_into(struct btree
*b
, struct btree
*new)
1115 uint64_t start_time
= local_clock();
1117 struct btree_iter iter
;
1118 bch_btree_iter_init(b
, &iter
, NULL
);
1120 btree_mergesort(b
, new->sets
->data
, &iter
, false, true);
1122 spin_lock(&b
->c
->sort_time_lock
);
1123 bch_time_stats_update(&b
->c
->sort_time
, start_time
);
1124 spin_unlock(&b
->c
->sort_time_lock
);
1126 bkey_copy_key(&new->key
, &b
->key
);
1127 new->sets
->size
= 0;
1130 #define SORT_CRIT (4096 / sizeof(uint64_t))
1132 void bch_btree_sort_lazy(struct btree
*b
)
1134 unsigned crit
= SORT_CRIT
;
1137 /* Don't sort if nothing to do */
1141 /* If not a leaf node, always sort */
1147 for (i
= b
->nsets
- 1; i
>= 0; --i
) {
1148 crit
*= b
->c
->sort_crit_factor
;
1150 if (b
->sets
[i
].data
->keys
< crit
) {
1151 bch_btree_sort_partial(b
, i
);
1156 /* Sort if we'd overflow */
1157 if (b
->nsets
+ 1 == MAX_BSETS
) {
1163 bset_build_written_tree(b
);
1170 size_t sets_written
, sets_unwritten
;
1171 size_t bytes_written
, bytes_unwritten
;
1172 size_t floats
, failed
;
1175 static int bch_btree_bset_stats(struct btree
*b
, struct btree_op
*op
,
1176 struct bset_stats
*stats
)
1183 for (i
= 0; i
<= b
->nsets
; i
++) {
1184 struct bset_tree
*t
= &b
->sets
[i
];
1185 size_t bytes
= t
->data
->keys
* sizeof(uint64_t);
1188 if (bset_written(b
, t
)) {
1189 stats
->sets_written
++;
1190 stats
->bytes_written
+= bytes
;
1192 stats
->floats
+= t
->size
- 1;
1194 for (j
= 1; j
< t
->size
; j
++)
1195 if (t
->tree
[j
].exponent
== 127)
1198 stats
->sets_unwritten
++;
1199 stats
->bytes_unwritten
+= bytes
;
1204 struct btree_iter iter
;
1206 for_each_key_filter(b
, k
, &iter
, bch_ptr_bad
) {
1207 int ret
= btree(bset_stats
, k
, b
, op
, stats
);
1216 int bch_bset_print_stats(struct cache_set
*c
, char *buf
)
1219 struct bset_stats t
;
1222 bch_btree_op_init_stack(&op
);
1223 memset(&t
, 0, sizeof(struct bset_stats
));
1225 ret
= btree_root(bset_stats
, c
, &op
, &t
);
1229 return snprintf(buf
, PAGE_SIZE
,
1230 "btree nodes: %zu\n"
1231 "written sets: %zu\n"
1232 "unwritten sets: %zu\n"
1233 "written key bytes: %zu\n"
1234 "unwritten key bytes: %zu\n"
1238 t
.sets_written
, t
.sets_unwritten
,
1239 t
.bytes_written
, t
.bytes_unwritten
,
1240 t
.floats
, t
.failed
);