2 * Copyright (C) 2011 Red Hat, Inc.
4 * This file is released under the GPL.
8 #include "dm-btree-internal.h"
9 #include "dm-transaction-manager.h"
11 #include <linux/export.h>
14 * Removing an entry from a btree
15 * ==============================
17 * A very important constraint for our btree is that no node, except the
18 * root, may have fewer than a certain number of entries.
19 * (MIN_ENTRIES <= nr_entries <= MAX_ENTRIES).
21 * Ensuring this is complicated by the way we want to only ever hold the
22 * locks on 2 nodes concurrently, and only change nodes in a top to bottom
25 * Each node may have a left or right sibling. When decending the spine,
26 * if a node contains only MIN_ENTRIES then we try and increase this to at
27 * least MIN_ENTRIES + 1. We do this in the following ways:
29 * [A] No siblings => this can only happen if the node is the root, in which
30 * case we copy the childs contents over the root.
33 * ==> rebalance(node, right sibling)
35 * [C] No right sibling
36 * ==> rebalance(left sibling, node)
38 * [D] Both siblings, total_entries(left, node, right) <= DEL_THRESHOLD
39 * ==> delete node adding it's contents to left and right
41 * [E] Both siblings, total_entries(left, node, right) > DEL_THRESHOLD
42 * ==> rebalance(left, node, right)
44 * After these operations it's possible that the our original node no
45 * longer contains the desired sub tree. For this reason this rebalancing
46 * is performed on the children of the current node. This also avoids
47 * having a special case for the root.
49 * Once this rebalancing has occurred we can then step into the child node
50 * for internal nodes. Or delete the entry for leaf nodes.
54 * Some little utilities for moving node data around.
56 static void node_shift(struct node
*n
, int shift
)
58 uint32_t nr_entries
= le32_to_cpu(n
->header
.nr_entries
);
59 uint32_t value_size
= le32_to_cpu(n
->header
.value_size
);
63 BUG_ON(shift
> nr_entries
);
64 BUG_ON((void *) key_ptr(n
, shift
) >= value_ptr(n
, shift
, value_size
));
65 memmove(key_ptr(n
, 0),
67 (nr_entries
- shift
) * sizeof(__le64
));
68 memmove(value_ptr(n
, 0, value_size
),
69 value_ptr(n
, shift
, value_size
),
70 (nr_entries
- shift
) * value_size
);
72 BUG_ON(nr_entries
+ shift
> le32_to_cpu(n
->header
.max_entries
));
73 memmove(key_ptr(n
, shift
),
75 nr_entries
* sizeof(__le64
));
76 memmove(value_ptr(n
, shift
, value_size
),
77 value_ptr(n
, 0, value_size
),
78 nr_entries
* value_size
);
82 static void node_copy(struct node
*left
, struct node
*right
, int shift
)
84 uint32_t nr_left
= le32_to_cpu(left
->header
.nr_entries
);
85 uint32_t value_size
= le32_to_cpu(left
->header
.value_size
);
86 BUG_ON(value_size
!= le32_to_cpu(right
->header
.value_size
));
90 BUG_ON(nr_left
+ shift
> le32_to_cpu(left
->header
.max_entries
));
91 memcpy(key_ptr(left
, nr_left
),
93 shift
* sizeof(__le64
));
94 memcpy(value_ptr(left
, nr_left
, value_size
),
95 value_ptr(right
, 0, value_size
),
98 BUG_ON(shift
> le32_to_cpu(right
->header
.max_entries
));
99 memcpy(key_ptr(right
, 0),
100 key_ptr(left
, nr_left
- shift
),
101 shift
* sizeof(__le64
));
102 memcpy(value_ptr(right
, 0, value_size
),
103 value_ptr(left
, nr_left
- shift
, value_size
),
109 * Delete a specific entry from a leaf node.
111 static void delete_at(struct node
*n
, unsigned index
)
113 unsigned nr_entries
= le32_to_cpu(n
->header
.nr_entries
);
114 unsigned nr_to_copy
= nr_entries
- (index
+ 1);
115 uint32_t value_size
= le32_to_cpu(n
->header
.value_size
);
116 BUG_ON(index
>= nr_entries
);
119 memmove(key_ptr(n
, index
),
120 key_ptr(n
, index
+ 1),
121 nr_to_copy
* sizeof(__le64
));
123 memmove(value_ptr(n
, index
, value_size
),
124 value_ptr(n
, index
+ 1, value_size
),
125 nr_to_copy
* value_size
);
128 n
->header
.nr_entries
= cpu_to_le32(nr_entries
- 1);
131 static unsigned del_threshold(struct node
*n
)
133 return le32_to_cpu(n
->header
.max_entries
) / 3;
136 static unsigned merge_threshold(struct node
*n
)
139 * The extra one is because we know we're potentially going to
142 return 2 * (le32_to_cpu(n
->header
.max_entries
) / 3) + 1;
147 struct dm_block
*block
;
151 static struct dm_btree_value_type le64_type
= {
153 .size
= sizeof(__le64
),
159 static int init_child(struct dm_btree_info
*info
, struct node
*parent
,
160 unsigned index
, struct child
*result
)
165 result
->index
= index
;
166 root
= value64(parent
, index
);
168 r
= dm_tm_shadow_block(info
->tm
, root
, &btree_node_validator
,
169 &result
->block
, &inc
);
173 result
->n
= dm_block_data(result
->block
);
176 inc_children(info
->tm
, result
->n
, &le64_type
);
178 *((__le64
*) value_ptr(parent
, index
, sizeof(__le64
))) =
179 cpu_to_le64(dm_block_location(result
->block
));
184 static int exit_child(struct dm_btree_info
*info
, struct child
*c
)
186 return dm_tm_unlock(info
->tm
, c
->block
);
189 static void shift(struct node
*left
, struct node
*right
, int count
)
195 node_shift(right
, count
);
196 node_copy(left
, right
, count
);
198 node_copy(left
, right
, count
);
199 node_shift(right
, count
);
202 left
->header
.nr_entries
=
203 cpu_to_le32(le32_to_cpu(left
->header
.nr_entries
) - count
);
204 BUG_ON(le32_to_cpu(left
->header
.nr_entries
) > le32_to_cpu(left
->header
.max_entries
));
206 right
->header
.nr_entries
=
207 cpu_to_le32(le32_to_cpu(right
->header
.nr_entries
) + count
);
208 BUG_ON(le32_to_cpu(right
->header
.nr_entries
) > le32_to_cpu(right
->header
.max_entries
));
211 static void __rebalance2(struct dm_btree_info
*info
, struct node
*parent
,
212 struct child
*l
, struct child
*r
)
214 struct node
*left
= l
->n
;
215 struct node
*right
= r
->n
;
216 uint32_t nr_left
= le32_to_cpu(left
->header
.nr_entries
);
217 uint32_t nr_right
= le32_to_cpu(right
->header
.nr_entries
);
219 if (nr_left
+ nr_right
<= merge_threshold(left
)) {
223 node_copy(left
, right
, -nr_right
);
224 left
->header
.nr_entries
= cpu_to_le32(nr_left
+ nr_right
);
225 delete_at(parent
, r
->index
);
228 * We need to decrement the right block, but not it's
229 * children, since they're still referenced by left.
231 dm_tm_dec(info
->tm
, dm_block_location(r
->block
));
236 unsigned target_left
= (nr_left
+ nr_right
) / 2;
237 unsigned shift_
= nr_left
- target_left
;
238 BUG_ON(le32_to_cpu(left
->header
.max_entries
) <= nr_left
- shift_
);
239 BUG_ON(le32_to_cpu(right
->header
.max_entries
) <= nr_right
+ shift_
);
240 shift(left
, right
, nr_left
- target_left
);
241 *key_ptr(parent
, r
->index
) = right
->keys
[0];
245 static int rebalance2(struct shadow_spine
*s
, struct dm_btree_info
*info
,
250 struct child left
, right
;
252 parent
= dm_block_data(shadow_current(s
));
254 r
= init_child(info
, parent
, left_index
, &left
);
258 r
= init_child(info
, parent
, left_index
+ 1, &right
);
260 exit_child(info
, &left
);
264 __rebalance2(info
, parent
, &left
, &right
);
266 r
= exit_child(info
, &left
);
268 exit_child(info
, &right
);
272 return exit_child(info
, &right
);
275 static void __rebalance3(struct dm_btree_info
*info
, struct node
*parent
,
276 struct child
*l
, struct child
*c
, struct child
*r
)
278 struct node
*left
= l
->n
;
279 struct node
*center
= c
->n
;
280 struct node
*right
= r
->n
;
282 uint32_t nr_left
= le32_to_cpu(left
->header
.nr_entries
);
283 uint32_t nr_center
= le32_to_cpu(center
->header
.nr_entries
);
284 uint32_t nr_right
= le32_to_cpu(right
->header
.nr_entries
);
285 uint32_t max_entries
= le32_to_cpu(left
->header
.max_entries
);
289 BUG_ON(left
->header
.max_entries
!= center
->header
.max_entries
);
290 BUG_ON(center
->header
.max_entries
!= right
->header
.max_entries
);
292 if (((nr_left
+ nr_center
+ nr_right
) / 2) < merge_threshold(center
)) {
294 * Delete center node:
296 * We dump as many entries from center as possible into
297 * left, then the rest in right, then rebalance2. This
298 * wastes some cpu, but I want something simple atm.
300 unsigned shift
= min(max_entries
- nr_left
, nr_center
);
302 BUG_ON(nr_left
+ shift
> max_entries
);
303 node_copy(left
, center
, -shift
);
304 left
->header
.nr_entries
= cpu_to_le32(nr_left
+ shift
);
306 if (shift
!= nr_center
) {
307 shift
= nr_center
- shift
;
308 BUG_ON((nr_right
+ shift
) >= max_entries
);
309 node_shift(right
, shift
);
310 node_copy(center
, right
, shift
);
311 right
->header
.nr_entries
= cpu_to_le32(nr_right
+ shift
);
313 *key_ptr(parent
, r
->index
) = right
->keys
[0];
315 delete_at(parent
, c
->index
);
318 dm_tm_dec(info
->tm
, dm_block_location(c
->block
));
319 __rebalance2(info
, parent
, l
, r
);
327 target
= (nr_left
+ nr_center
+ nr_right
) / 3;
328 BUG_ON(target
> max_entries
);
331 * Adjust the left node
333 shift(left
, center
, nr_left
- target
);
336 * Adjust the right node
338 shift(center
, right
, target
- nr_right
);
339 *key_ptr(parent
, c
->index
) = center
->keys
[0];
340 *key_ptr(parent
, r
->index
) = right
->keys
[0];
343 static int rebalance3(struct shadow_spine
*s
, struct dm_btree_info
*info
,
347 struct node
*parent
= dm_block_data(shadow_current(s
));
348 struct child left
, center
, right
;
351 * FIXME: fill out an array?
353 r
= init_child(info
, parent
, left_index
, &left
);
357 r
= init_child(info
, parent
, left_index
+ 1, ¢er
);
359 exit_child(info
, &left
);
363 r
= init_child(info
, parent
, left_index
+ 2, &right
);
365 exit_child(info
, &left
);
366 exit_child(info
, ¢er
);
370 __rebalance3(info
, parent
, &left
, ¢er
, &right
);
372 r
= exit_child(info
, &left
);
374 exit_child(info
, ¢er
);
375 exit_child(info
, &right
);
379 r
= exit_child(info
, ¢er
);
381 exit_child(info
, &right
);
385 r
= exit_child(info
, &right
);
392 static int get_nr_entries(struct dm_transaction_manager
*tm
,
393 dm_block_t b
, uint32_t *result
)
396 struct dm_block
*block
;
399 r
= dm_tm_read_lock(tm
, b
, &btree_node_validator
, &block
);
403 n
= dm_block_data(block
);
404 *result
= le32_to_cpu(n
->header
.nr_entries
);
406 return dm_tm_unlock(tm
, block
);
409 static int rebalance_children(struct shadow_spine
*s
,
410 struct dm_btree_info
*info
, uint64_t key
)
412 int i
, r
, has_left_sibling
, has_right_sibling
;
413 uint32_t child_entries
;
416 n
= dm_block_data(shadow_current(s
));
418 if (le32_to_cpu(n
->header
.nr_entries
) == 1) {
419 struct dm_block
*child
;
420 dm_block_t b
= value64(n
, 0);
422 r
= dm_tm_read_lock(info
->tm
, b
, &btree_node_validator
, &child
);
426 memcpy(n
, dm_block_data(child
),
427 dm_bm_block_size(dm_tm_get_bm(info
->tm
)));
428 r
= dm_tm_unlock(info
->tm
, child
);
432 dm_tm_dec(info
->tm
, dm_block_location(child
));
436 i
= lower_bound(n
, key
);
440 r
= get_nr_entries(info
->tm
, value64(n
, i
), &child_entries
);
444 if (child_entries
> del_threshold(n
))
447 has_left_sibling
= i
> 0;
448 has_right_sibling
= i
< (le32_to_cpu(n
->header
.nr_entries
) - 1);
450 if (!has_left_sibling
)
451 r
= rebalance2(s
, info
, i
);
453 else if (!has_right_sibling
)
454 r
= rebalance2(s
, info
, i
- 1);
457 r
= rebalance3(s
, info
, i
- 1);
462 static int do_leaf(struct node
*n
, uint64_t key
, unsigned *index
)
464 int i
= lower_bound(n
, key
);
467 (i
>= le32_to_cpu(n
->header
.nr_entries
)) ||
468 (le64_to_cpu(n
->keys
[i
]) != key
))
477 * Prepares for removal from one level of the hierarchy. The caller must
478 * call delete_at() to remove the entry at index.
480 static int remove_raw(struct shadow_spine
*s
, struct dm_btree_info
*info
,
481 struct dm_btree_value_type
*vt
, dm_block_t root
,
482 uint64_t key
, unsigned *index
)
488 r
= shadow_step(s
, root
, vt
);
493 * We have to patch up the parent node, ugly, but I don't
494 * see a way to do this automatically as part of the spine
497 if (shadow_has_parent(s
)) {
498 __le64 location
= cpu_to_le64(dm_block_location(shadow_current(s
)));
499 memcpy(value_ptr(dm_block_data(shadow_parent(s
)), i
, sizeof(__le64
)),
500 &location
, sizeof(__le64
));
503 n
= dm_block_data(shadow_current(s
));
505 if (le32_to_cpu(n
->header
.flags
) & LEAF_NODE
)
506 return do_leaf(n
, key
, index
);
508 r
= rebalance_children(s
, info
, key
);
512 n
= dm_block_data(shadow_current(s
));
513 if (le32_to_cpu(n
->header
.flags
) & LEAF_NODE
)
514 return do_leaf(n
, key
, index
);
516 i
= lower_bound(n
, key
);
519 * We know the key is present, or else
520 * rebalance_children would have returned
523 root
= value64(n
, i
);
529 int dm_btree_remove(struct dm_btree_info
*info
, dm_block_t root
,
530 uint64_t *keys
, dm_block_t
*new_root
)
532 unsigned level
, last_level
= info
->levels
- 1;
533 int index
= 0, r
= 0;
534 struct shadow_spine spine
;
537 init_shadow_spine(&spine
, info
);
538 for (level
= 0; level
< info
->levels
; level
++) {
539 r
= remove_raw(&spine
, info
,
540 (level
== last_level
?
541 &info
->value_type
: &le64_type
),
542 root
, keys
[level
], (unsigned *)&index
);
546 n
= dm_block_data(shadow_current(&spine
));
547 if (level
!= last_level
) {
548 root
= value64(n
, index
);
552 BUG_ON(index
< 0 || index
>= le32_to_cpu(n
->header
.nr_entries
));
554 if (info
->value_type
.dec
)
555 info
->value_type
.dec(info
->value_type
.context
,
556 value_ptr(n
, index
, info
->value_type
.size
));
561 *new_root
= shadow_root(&spine
);
562 exit_shadow_spine(&spine
);
566 EXPORT_SYMBOL_GPL(dm_btree_remove
);