2 * Copyright (C) 2011 Red Hat, Inc.
4 * This file is released under the GPL.
8 #include "dm-btree-internal.h"
9 #include "dm-transaction-manager.h"
11 #include <linux/export.h>
14 * Removing an entry from a btree
15 * ==============================
17 * A very important constraint for our btree is that no node, except the
18 * root, may have fewer than a certain number of entries.
19 * (MIN_ENTRIES <= nr_entries <= MAX_ENTRIES).
21 * Ensuring this is complicated by the way we want to only ever hold the
22 * locks on 2 nodes concurrently, and only change nodes in a top to bottom
25 * Each node may have a left or right sibling. When decending the spine,
26 * if a node contains only MIN_ENTRIES then we try and increase this to at
27 * least MIN_ENTRIES + 1. We do this in the following ways:
29 * [A] No siblings => this can only happen if the node is the root, in which
30 * case we copy the childs contents over the root.
33 * ==> rebalance(node, right sibling)
35 * [C] No right sibling
36 * ==> rebalance(left sibling, node)
38 * [D] Both siblings, total_entries(left, node, right) <= DEL_THRESHOLD
39 * ==> delete node adding it's contents to left and right
41 * [E] Both siblings, total_entries(left, node, right) > DEL_THRESHOLD
42 * ==> rebalance(left, node, right)
44 * After these operations it's possible that the our original node no
45 * longer contains the desired sub tree. For this reason this rebalancing
46 * is performed on the children of the current node. This also avoids
47 * having a special case for the root.
49 * Once this rebalancing has occurred we can then step into the child node
50 * for internal nodes. Or delete the entry for leaf nodes.
54 * Some little utilities for moving node data around.
56 static void node_shift(struct btree_node
*n
, int shift
)
58 uint32_t nr_entries
= le32_to_cpu(n
->header
.nr_entries
);
59 uint32_t value_size
= le32_to_cpu(n
->header
.value_size
);
63 BUG_ON(shift
> nr_entries
);
64 BUG_ON((void *) key_ptr(n
, shift
) >= value_ptr(n
, shift
));
65 memmove(key_ptr(n
, 0),
67 (nr_entries
- shift
) * sizeof(__le64
));
68 memmove(value_ptr(n
, 0),
70 (nr_entries
- shift
) * value_size
);
72 BUG_ON(nr_entries
+ shift
> le32_to_cpu(n
->header
.max_entries
));
73 memmove(key_ptr(n
, shift
),
75 nr_entries
* sizeof(__le64
));
76 memmove(value_ptr(n
, shift
),
78 nr_entries
* value_size
);
82 static void node_copy(struct btree_node
*left
, struct btree_node
*right
, int shift
)
84 uint32_t nr_left
= le32_to_cpu(left
->header
.nr_entries
);
85 uint32_t value_size
= le32_to_cpu(left
->header
.value_size
);
86 BUG_ON(value_size
!= le32_to_cpu(right
->header
.value_size
));
90 BUG_ON(nr_left
+ shift
> le32_to_cpu(left
->header
.max_entries
));
91 memcpy(key_ptr(left
, nr_left
),
93 shift
* sizeof(__le64
));
94 memcpy(value_ptr(left
, nr_left
),
98 BUG_ON(shift
> le32_to_cpu(right
->header
.max_entries
));
99 memcpy(key_ptr(right
, 0),
100 key_ptr(left
, nr_left
- shift
),
101 shift
* sizeof(__le64
));
102 memcpy(value_ptr(right
, 0),
103 value_ptr(left
, nr_left
- shift
),
109 * Delete a specific entry from a leaf node.
111 static void delete_at(struct btree_node
*n
, unsigned index
)
113 unsigned nr_entries
= le32_to_cpu(n
->header
.nr_entries
);
114 unsigned nr_to_copy
= nr_entries
- (index
+ 1);
115 uint32_t value_size
= le32_to_cpu(n
->header
.value_size
);
116 BUG_ON(index
>= nr_entries
);
119 memmove(key_ptr(n
, index
),
120 key_ptr(n
, index
+ 1),
121 nr_to_copy
* sizeof(__le64
));
123 memmove(value_ptr(n
, index
),
124 value_ptr(n
, index
+ 1),
125 nr_to_copy
* value_size
);
128 n
->header
.nr_entries
= cpu_to_le32(nr_entries
- 1);
131 static unsigned merge_threshold(struct btree_node
*n
)
133 return le32_to_cpu(n
->header
.max_entries
) / 3;
138 struct dm_block
*block
;
139 struct btree_node
*n
;
142 static int init_child(struct dm_btree_info
*info
, struct dm_btree_value_type
*vt
,
143 struct btree_node
*parent
,
144 unsigned index
, struct child
*result
)
149 result
->index
= index
;
150 root
= value64(parent
, index
);
152 r
= dm_tm_shadow_block(info
->tm
, root
, &btree_node_validator
,
153 &result
->block
, &inc
);
157 result
->n
= dm_block_data(result
->block
);
160 inc_children(info
->tm
, result
->n
, vt
);
162 *((__le64
*) value_ptr(parent
, index
)) =
163 cpu_to_le64(dm_block_location(result
->block
));
168 static int exit_child(struct dm_btree_info
*info
, struct child
*c
)
170 return dm_tm_unlock(info
->tm
, c
->block
);
173 static void shift(struct btree_node
*left
, struct btree_node
*right
, int count
)
175 uint32_t nr_left
= le32_to_cpu(left
->header
.nr_entries
);
176 uint32_t nr_right
= le32_to_cpu(right
->header
.nr_entries
);
177 uint32_t max_entries
= le32_to_cpu(left
->header
.max_entries
);
178 uint32_t r_max_entries
= le32_to_cpu(right
->header
.max_entries
);
180 BUG_ON(max_entries
!= r_max_entries
);
181 BUG_ON(nr_left
- count
> max_entries
);
182 BUG_ON(nr_right
+ count
> max_entries
);
188 node_shift(right
, count
);
189 node_copy(left
, right
, count
);
191 node_copy(left
, right
, count
);
192 node_shift(right
, count
);
195 left
->header
.nr_entries
= cpu_to_le32(nr_left
- count
);
196 right
->header
.nr_entries
= cpu_to_le32(nr_right
+ count
);
199 static void __rebalance2(struct dm_btree_info
*info
, struct btree_node
*parent
,
200 struct child
*l
, struct child
*r
)
202 struct btree_node
*left
= l
->n
;
203 struct btree_node
*right
= r
->n
;
204 uint32_t nr_left
= le32_to_cpu(left
->header
.nr_entries
);
205 uint32_t nr_right
= le32_to_cpu(right
->header
.nr_entries
);
206 unsigned threshold
= 2 * merge_threshold(left
) + 1;
208 if (nr_left
+ nr_right
< threshold
) {
212 node_copy(left
, right
, -nr_right
);
213 left
->header
.nr_entries
= cpu_to_le32(nr_left
+ nr_right
);
214 delete_at(parent
, r
->index
);
217 * We need to decrement the right block, but not it's
218 * children, since they're still referenced by left.
220 dm_tm_dec(info
->tm
, dm_block_location(r
->block
));
225 unsigned target_left
= (nr_left
+ nr_right
) / 2;
226 shift(left
, right
, nr_left
- target_left
);
227 *key_ptr(parent
, r
->index
) = right
->keys
[0];
231 static int rebalance2(struct shadow_spine
*s
, struct dm_btree_info
*info
,
232 struct dm_btree_value_type
*vt
, unsigned left_index
)
235 struct btree_node
*parent
;
236 struct child left
, right
;
238 parent
= dm_block_data(shadow_current(s
));
240 r
= init_child(info
, vt
, parent
, left_index
, &left
);
244 r
= init_child(info
, vt
, parent
, left_index
+ 1, &right
);
246 exit_child(info
, &left
);
250 __rebalance2(info
, parent
, &left
, &right
);
252 r
= exit_child(info
, &left
);
254 exit_child(info
, &right
);
258 return exit_child(info
, &right
);
262 * We dump as many entries from center as possible into left, then the rest
263 * in right, then rebalance2. This wastes some cpu, but I want something
266 static void delete_center_node(struct dm_btree_info
*info
, struct btree_node
*parent
,
267 struct child
*l
, struct child
*c
, struct child
*r
,
268 struct btree_node
*left
, struct btree_node
*center
, struct btree_node
*right
,
269 uint32_t nr_left
, uint32_t nr_center
, uint32_t nr_right
)
271 uint32_t max_entries
= le32_to_cpu(left
->header
.max_entries
);
272 unsigned shift
= min(max_entries
- nr_left
, nr_center
);
274 BUG_ON(nr_left
+ shift
> max_entries
);
275 node_copy(left
, center
, -shift
);
276 left
->header
.nr_entries
= cpu_to_le32(nr_left
+ shift
);
278 if (shift
!= nr_center
) {
279 shift
= nr_center
- shift
;
280 BUG_ON((nr_right
+ shift
) > max_entries
);
281 node_shift(right
, shift
);
282 node_copy(center
, right
, shift
);
283 right
->header
.nr_entries
= cpu_to_le32(nr_right
+ shift
);
285 *key_ptr(parent
, r
->index
) = right
->keys
[0];
287 delete_at(parent
, c
->index
);
290 dm_tm_dec(info
->tm
, dm_block_location(c
->block
));
291 __rebalance2(info
, parent
, l
, r
);
295 * Redistributes entries among 3 sibling nodes.
297 static void redistribute3(struct dm_btree_info
*info
, struct btree_node
*parent
,
298 struct child
*l
, struct child
*c
, struct child
*r
,
299 struct btree_node
*left
, struct btree_node
*center
, struct btree_node
*right
,
300 uint32_t nr_left
, uint32_t nr_center
, uint32_t nr_right
)
303 uint32_t max_entries
= le32_to_cpu(left
->header
.max_entries
);
304 unsigned target
= (nr_left
+ nr_center
+ nr_right
) / 3;
305 BUG_ON(target
> max_entries
);
307 if (nr_left
< nr_right
) {
308 s
= nr_left
- target
;
310 if (s
< 0 && nr_center
< -s
) {
311 /* not enough in central node */
312 shift(left
, center
, -nr_center
);
314 shift(left
, right
, s
);
317 shift(left
, center
, s
);
319 shift(center
, right
, target
- nr_right
);
322 s
= target
- nr_right
;
323 if (s
> 0 && nr_center
< s
) {
324 /* not enough in central node */
325 shift(center
, right
, nr_center
);
327 shift(left
, right
, s
);
330 shift(center
, right
, s
);
332 shift(left
, center
, nr_left
- target
);
335 *key_ptr(parent
, c
->index
) = center
->keys
[0];
336 *key_ptr(parent
, r
->index
) = right
->keys
[0];
339 static void __rebalance3(struct dm_btree_info
*info
, struct btree_node
*parent
,
340 struct child
*l
, struct child
*c
, struct child
*r
)
342 struct btree_node
*left
= l
->n
;
343 struct btree_node
*center
= c
->n
;
344 struct btree_node
*right
= r
->n
;
346 uint32_t nr_left
= le32_to_cpu(left
->header
.nr_entries
);
347 uint32_t nr_center
= le32_to_cpu(center
->header
.nr_entries
);
348 uint32_t nr_right
= le32_to_cpu(right
->header
.nr_entries
);
350 unsigned threshold
= merge_threshold(left
) * 4 + 1;
352 BUG_ON(left
->header
.max_entries
!= center
->header
.max_entries
);
353 BUG_ON(center
->header
.max_entries
!= right
->header
.max_entries
);
355 if ((nr_left
+ nr_center
+ nr_right
) < threshold
)
356 delete_center_node(info
, parent
, l
, c
, r
, left
, center
, right
,
357 nr_left
, nr_center
, nr_right
);
359 redistribute3(info
, parent
, l
, c
, r
, left
, center
, right
,
360 nr_left
, nr_center
, nr_right
);
363 static int rebalance3(struct shadow_spine
*s
, struct dm_btree_info
*info
,
364 struct dm_btree_value_type
*vt
, unsigned left_index
)
367 struct btree_node
*parent
= dm_block_data(shadow_current(s
));
368 struct child left
, center
, right
;
371 * FIXME: fill out an array?
373 r
= init_child(info
, vt
, parent
, left_index
, &left
);
377 r
= init_child(info
, vt
, parent
, left_index
+ 1, ¢er
);
379 exit_child(info
, &left
);
383 r
= init_child(info
, vt
, parent
, left_index
+ 2, &right
);
385 exit_child(info
, &left
);
386 exit_child(info
, ¢er
);
390 __rebalance3(info
, parent
, &left
, ¢er
, &right
);
392 r
= exit_child(info
, &left
);
394 exit_child(info
, ¢er
);
395 exit_child(info
, &right
);
399 r
= exit_child(info
, ¢er
);
401 exit_child(info
, &right
);
405 r
= exit_child(info
, &right
);
412 static int get_nr_entries(struct dm_transaction_manager
*tm
,
413 dm_block_t b
, uint32_t *result
)
416 struct dm_block
*block
;
417 struct btree_node
*n
;
419 r
= dm_tm_read_lock(tm
, b
, &btree_node_validator
, &block
);
423 n
= dm_block_data(block
);
424 *result
= le32_to_cpu(n
->header
.nr_entries
);
426 return dm_tm_unlock(tm
, block
);
429 static int rebalance_children(struct shadow_spine
*s
,
430 struct dm_btree_info
*info
,
431 struct dm_btree_value_type
*vt
, uint64_t key
)
433 int i
, r
, has_left_sibling
, has_right_sibling
;
434 uint32_t child_entries
;
435 struct btree_node
*n
;
437 n
= dm_block_data(shadow_current(s
));
439 if (le32_to_cpu(n
->header
.nr_entries
) == 1) {
440 struct dm_block
*child
;
441 dm_block_t b
= value64(n
, 0);
443 r
= dm_tm_read_lock(info
->tm
, b
, &btree_node_validator
, &child
);
447 memcpy(n
, dm_block_data(child
),
448 dm_bm_block_size(dm_tm_get_bm(info
->tm
)));
449 r
= dm_tm_unlock(info
->tm
, child
);
453 dm_tm_dec(info
->tm
, dm_block_location(child
));
457 i
= lower_bound(n
, key
);
461 r
= get_nr_entries(info
->tm
, value64(n
, i
), &child_entries
);
465 has_left_sibling
= i
> 0;
466 has_right_sibling
= i
< (le32_to_cpu(n
->header
.nr_entries
) - 1);
468 if (!has_left_sibling
)
469 r
= rebalance2(s
, info
, vt
, i
);
471 else if (!has_right_sibling
)
472 r
= rebalance2(s
, info
, vt
, i
- 1);
475 r
= rebalance3(s
, info
, vt
, i
- 1);
480 static int do_leaf(struct btree_node
*n
, uint64_t key
, unsigned *index
)
482 int i
= lower_bound(n
, key
);
485 (i
>= le32_to_cpu(n
->header
.nr_entries
)) ||
486 (le64_to_cpu(n
->keys
[i
]) != key
))
495 * Prepares for removal from one level of the hierarchy. The caller must
496 * call delete_at() to remove the entry at index.
498 static int remove_raw(struct shadow_spine
*s
, struct dm_btree_info
*info
,
499 struct dm_btree_value_type
*vt
, dm_block_t root
,
500 uint64_t key
, unsigned *index
)
503 struct btree_node
*n
;
506 r
= shadow_step(s
, root
, vt
);
511 * We have to patch up the parent node, ugly, but I don't
512 * see a way to do this automatically as part of the spine
515 if (shadow_has_parent(s
)) {
516 __le64 location
= cpu_to_le64(dm_block_location(shadow_current(s
)));
517 memcpy(value_ptr(dm_block_data(shadow_parent(s
)), i
),
518 &location
, sizeof(__le64
));
521 n
= dm_block_data(shadow_current(s
));
523 if (le32_to_cpu(n
->header
.flags
) & LEAF_NODE
)
524 return do_leaf(n
, key
, index
);
526 r
= rebalance_children(s
, info
, vt
, key
);
530 n
= dm_block_data(shadow_current(s
));
531 if (le32_to_cpu(n
->header
.flags
) & LEAF_NODE
)
532 return do_leaf(n
, key
, index
);
534 i
= lower_bound(n
, key
);
537 * We know the key is present, or else
538 * rebalance_children would have returned
541 root
= value64(n
, i
);
547 int dm_btree_remove(struct dm_btree_info
*info
, dm_block_t root
,
548 uint64_t *keys
, dm_block_t
*new_root
)
550 unsigned level
, last_level
= info
->levels
- 1;
551 int index
= 0, r
= 0;
552 struct shadow_spine spine
;
553 struct btree_node
*n
;
554 struct dm_btree_value_type le64_vt
;
556 init_le64_type(info
->tm
, &le64_vt
);
557 init_shadow_spine(&spine
, info
);
558 for (level
= 0; level
< info
->levels
; level
++) {
559 r
= remove_raw(&spine
, info
,
560 (level
== last_level
?
561 &info
->value_type
: &le64_vt
),
562 root
, keys
[level
], (unsigned *)&index
);
566 n
= dm_block_data(shadow_current(&spine
));
567 if (level
!= last_level
) {
568 root
= value64(n
, index
);
572 BUG_ON(index
< 0 || index
>= le32_to_cpu(n
->header
.nr_entries
));
574 if (info
->value_type
.dec
)
575 info
->value_type
.dec(info
->value_type
.context
,
576 value_ptr(n
, index
));
581 *new_root
= shadow_root(&spine
);
582 exit_shadow_spine(&spine
);
586 EXPORT_SYMBOL_GPL(dm_btree_remove
);
588 /*----------------------------------------------------------------*/
590 static int remove_nearest(struct shadow_spine
*s
, struct dm_btree_info
*info
,
591 struct dm_btree_value_type
*vt
, dm_block_t root
,
592 uint64_t key
, int *index
)
595 struct btree_node
*n
;
598 r
= shadow_step(s
, root
, vt
);
603 * We have to patch up the parent node, ugly, but I don't
604 * see a way to do this automatically as part of the spine
607 if (shadow_has_parent(s
)) {
608 __le64 location
= cpu_to_le64(dm_block_location(shadow_current(s
)));
609 memcpy(value_ptr(dm_block_data(shadow_parent(s
)), i
),
610 &location
, sizeof(__le64
));
613 n
= dm_block_data(shadow_current(s
));
615 if (le32_to_cpu(n
->header
.flags
) & LEAF_NODE
) {
616 *index
= lower_bound(n
, key
);
620 r
= rebalance_children(s
, info
, vt
, key
);
624 n
= dm_block_data(shadow_current(s
));
625 if (le32_to_cpu(n
->header
.flags
) & LEAF_NODE
) {
626 *index
= lower_bound(n
, key
);
630 i
= lower_bound(n
, key
);
633 * We know the key is present, or else
634 * rebalance_children would have returned
637 root
= value64(n
, i
);
643 static int remove_one(struct dm_btree_info
*info
, dm_block_t root
,
644 uint64_t *keys
, uint64_t end_key
,
645 dm_block_t
*new_root
, unsigned *nr_removed
)
647 unsigned level
, last_level
= info
->levels
- 1;
648 int index
= 0, r
= 0;
649 struct shadow_spine spine
;
650 struct btree_node
*n
;
651 struct dm_btree_value_type le64_vt
;
654 init_le64_type(info
->tm
, &le64_vt
);
655 init_shadow_spine(&spine
, info
);
656 for (level
= 0; level
< last_level
; level
++) {
657 r
= remove_raw(&spine
, info
, &le64_vt
,
658 root
, keys
[level
], (unsigned *) &index
);
662 n
= dm_block_data(shadow_current(&spine
));
663 root
= value64(n
, index
);
666 r
= remove_nearest(&spine
, info
, &info
->value_type
,
667 root
, keys
[last_level
], &index
);
671 n
= dm_block_data(shadow_current(&spine
));
676 if (index
>= le32_to_cpu(n
->header
.nr_entries
)) {
681 k
= le64_to_cpu(n
->keys
[index
]);
682 if (k
>= keys
[last_level
] && k
< end_key
) {
683 if (info
->value_type
.dec
)
684 info
->value_type
.dec(info
->value_type
.context
,
685 value_ptr(n
, index
));
688 keys
[last_level
] = k
+ 1ull;
694 *new_root
= shadow_root(&spine
);
695 exit_shadow_spine(&spine
);
700 int dm_btree_remove_leaves(struct dm_btree_info
*info
, dm_block_t root
,
701 uint64_t *first_key
, uint64_t end_key
,
702 dm_block_t
*new_root
, unsigned *nr_removed
)
708 r
= remove_one(info
, root
, first_key
, end_key
, &root
, nr_removed
);
714 return r
== -ENODATA
? 0 : r
;
716 EXPORT_SYMBOL_GPL(dm_btree_remove_leaves
);