2 * btree.c - NILFS B-tree.
4 * Copyright (C) 2005-2008 Nippon Telegraph and Telephone Corporation.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
20 * Written by Koji Sato <koji@osrg.net>.
23 #include <linux/slab.h>
24 #include <linux/string.h>
25 #include <linux/errno.h>
26 #include <linux/pagevec.h>
34 static void __nilfs_btree_init(struct nilfs_bmap
*bmap
);
36 static struct nilfs_btree_path
*nilfs_btree_alloc_path(void)
38 struct nilfs_btree_path
*path
;
39 int level
= NILFS_BTREE_LEVEL_DATA
;
41 path
= kmem_cache_alloc(nilfs_btree_path_cache
, GFP_NOFS
);
45 for (; level
< NILFS_BTREE_LEVEL_MAX
; level
++) {
46 path
[level
].bp_bh
= NULL
;
47 path
[level
].bp_sib_bh
= NULL
;
48 path
[level
].bp_index
= 0;
49 path
[level
].bp_oldreq
.bpr_ptr
= NILFS_BMAP_INVALID_PTR
;
50 path
[level
].bp_newreq
.bpr_ptr
= NILFS_BMAP_INVALID_PTR
;
51 path
[level
].bp_op
= NULL
;
58 static void nilfs_btree_free_path(struct nilfs_btree_path
*path
)
60 int level
= NILFS_BTREE_LEVEL_DATA
;
62 for (; level
< NILFS_BTREE_LEVEL_MAX
; level
++)
63 brelse(path
[level
].bp_bh
);
65 kmem_cache_free(nilfs_btree_path_cache
, path
);
69 * B-tree node operations
71 static int nilfs_btree_get_new_block(const struct nilfs_bmap
*btree
,
72 __u64 ptr
, struct buffer_head
**bhp
)
74 struct address_space
*btnc
= &NILFS_BMAP_I(btree
)->i_btnode_cache
;
75 struct buffer_head
*bh
;
77 bh
= nilfs_btnode_create_block(btnc
, ptr
);
81 set_buffer_nilfs_volatile(bh
);
86 static int nilfs_btree_node_get_flags(const struct nilfs_btree_node
*node
)
88 return node
->bn_flags
;
92 nilfs_btree_node_set_flags(struct nilfs_btree_node
*node
, int flags
)
94 node
->bn_flags
= flags
;
97 static int nilfs_btree_node_root(const struct nilfs_btree_node
*node
)
99 return nilfs_btree_node_get_flags(node
) & NILFS_BTREE_NODE_ROOT
;
102 static int nilfs_btree_node_get_level(const struct nilfs_btree_node
*node
)
104 return node
->bn_level
;
108 nilfs_btree_node_set_level(struct nilfs_btree_node
*node
, int level
)
110 node
->bn_level
= level
;
113 static int nilfs_btree_node_get_nchildren(const struct nilfs_btree_node
*node
)
115 return le16_to_cpu(node
->bn_nchildren
);
119 nilfs_btree_node_set_nchildren(struct nilfs_btree_node
*node
, int nchildren
)
121 node
->bn_nchildren
= cpu_to_le16(nchildren
);
124 static int nilfs_btree_node_size(const struct nilfs_bmap
*btree
)
126 return 1 << btree
->b_inode
->i_blkbits
;
129 static int nilfs_btree_nchildren_per_block(const struct nilfs_bmap
*btree
)
131 return btree
->b_nchildren_per_block
;
135 nilfs_btree_node_dkeys(const struct nilfs_btree_node
*node
)
137 return (__le64
*)((char *)(node
+ 1) +
138 (nilfs_btree_node_root(node
) ?
139 0 : NILFS_BTREE_NODE_EXTRA_PAD_SIZE
));
143 nilfs_btree_node_dptrs(const struct nilfs_btree_node
*node
, int ncmax
)
145 return (__le64
*)(nilfs_btree_node_dkeys(node
) + ncmax
);
149 nilfs_btree_node_get_key(const struct nilfs_btree_node
*node
, int index
)
151 return le64_to_cpu(*(nilfs_btree_node_dkeys(node
) + index
));
155 nilfs_btree_node_set_key(struct nilfs_btree_node
*node
, int index
, __u64 key
)
157 *(nilfs_btree_node_dkeys(node
) + index
) = cpu_to_le64(key
);
161 nilfs_btree_node_get_ptr(const struct nilfs_btree_node
*node
, int index
,
164 return le64_to_cpu(*(nilfs_btree_node_dptrs(node
, ncmax
) + index
));
168 nilfs_btree_node_set_ptr(struct nilfs_btree_node
*node
, int index
, __u64 ptr
,
171 *(nilfs_btree_node_dptrs(node
, ncmax
) + index
) = cpu_to_le64(ptr
);
174 static void nilfs_btree_node_init(struct nilfs_btree_node
*node
, int flags
,
175 int level
, int nchildren
, int ncmax
,
176 const __u64
*keys
, const __u64
*ptrs
)
182 nilfs_btree_node_set_flags(node
, flags
);
183 nilfs_btree_node_set_level(node
, level
);
184 nilfs_btree_node_set_nchildren(node
, nchildren
);
186 dkeys
= nilfs_btree_node_dkeys(node
);
187 dptrs
= nilfs_btree_node_dptrs(node
, ncmax
);
188 for (i
= 0; i
< nchildren
; i
++) {
189 dkeys
[i
] = cpu_to_le64(keys
[i
]);
190 dptrs
[i
] = cpu_to_le64(ptrs
[i
]);
194 /* Assume the buffer heads corresponding to left and right are locked. */
195 static void nilfs_btree_node_move_left(struct nilfs_btree_node
*left
,
196 struct nilfs_btree_node
*right
,
197 int n
, int lncmax
, int rncmax
)
199 __le64
*ldkeys
, *rdkeys
;
200 __le64
*ldptrs
, *rdptrs
;
201 int lnchildren
, rnchildren
;
203 ldkeys
= nilfs_btree_node_dkeys(left
);
204 ldptrs
= nilfs_btree_node_dptrs(left
, lncmax
);
205 lnchildren
= nilfs_btree_node_get_nchildren(left
);
207 rdkeys
= nilfs_btree_node_dkeys(right
);
208 rdptrs
= nilfs_btree_node_dptrs(right
, rncmax
);
209 rnchildren
= nilfs_btree_node_get_nchildren(right
);
211 memcpy(ldkeys
+ lnchildren
, rdkeys
, n
* sizeof(*rdkeys
));
212 memcpy(ldptrs
+ lnchildren
, rdptrs
, n
* sizeof(*rdptrs
));
213 memmove(rdkeys
, rdkeys
+ n
, (rnchildren
- n
) * sizeof(*rdkeys
));
214 memmove(rdptrs
, rdptrs
+ n
, (rnchildren
- n
) * sizeof(*rdptrs
));
218 nilfs_btree_node_set_nchildren(left
, lnchildren
);
219 nilfs_btree_node_set_nchildren(right
, rnchildren
);
222 /* Assume that the buffer heads corresponding to left and right are locked. */
223 static void nilfs_btree_node_move_right(struct nilfs_btree_node
*left
,
224 struct nilfs_btree_node
*right
,
225 int n
, int lncmax
, int rncmax
)
227 __le64
*ldkeys
, *rdkeys
;
228 __le64
*ldptrs
, *rdptrs
;
229 int lnchildren
, rnchildren
;
231 ldkeys
= nilfs_btree_node_dkeys(left
);
232 ldptrs
= nilfs_btree_node_dptrs(left
, lncmax
);
233 lnchildren
= nilfs_btree_node_get_nchildren(left
);
235 rdkeys
= nilfs_btree_node_dkeys(right
);
236 rdptrs
= nilfs_btree_node_dptrs(right
, rncmax
);
237 rnchildren
= nilfs_btree_node_get_nchildren(right
);
239 memmove(rdkeys
+ n
, rdkeys
, rnchildren
* sizeof(*rdkeys
));
240 memmove(rdptrs
+ n
, rdptrs
, rnchildren
* sizeof(*rdptrs
));
241 memcpy(rdkeys
, ldkeys
+ lnchildren
- n
, n
* sizeof(*rdkeys
));
242 memcpy(rdptrs
, ldptrs
+ lnchildren
- n
, n
* sizeof(*rdptrs
));
246 nilfs_btree_node_set_nchildren(left
, lnchildren
);
247 nilfs_btree_node_set_nchildren(right
, rnchildren
);
250 /* Assume that the buffer head corresponding to node is locked. */
251 static void nilfs_btree_node_insert(struct nilfs_btree_node
*node
, int index
,
252 __u64 key
, __u64 ptr
, int ncmax
)
258 dkeys
= nilfs_btree_node_dkeys(node
);
259 dptrs
= nilfs_btree_node_dptrs(node
, ncmax
);
260 nchildren
= nilfs_btree_node_get_nchildren(node
);
261 if (index
< nchildren
) {
262 memmove(dkeys
+ index
+ 1, dkeys
+ index
,
263 (nchildren
- index
) * sizeof(*dkeys
));
264 memmove(dptrs
+ index
+ 1, dptrs
+ index
,
265 (nchildren
- index
) * sizeof(*dptrs
));
267 dkeys
[index
] = cpu_to_le64(key
);
268 dptrs
[index
] = cpu_to_le64(ptr
);
270 nilfs_btree_node_set_nchildren(node
, nchildren
);
273 /* Assume that the buffer head corresponding to node is locked. */
274 static void nilfs_btree_node_delete(struct nilfs_btree_node
*node
, int index
,
275 __u64
*keyp
, __u64
*ptrp
, int ncmax
)
283 dkeys
= nilfs_btree_node_dkeys(node
);
284 dptrs
= nilfs_btree_node_dptrs(node
, ncmax
);
285 key
= le64_to_cpu(dkeys
[index
]);
286 ptr
= le64_to_cpu(dptrs
[index
]);
287 nchildren
= nilfs_btree_node_get_nchildren(node
);
293 if (index
< nchildren
- 1) {
294 memmove(dkeys
+ index
, dkeys
+ index
+ 1,
295 (nchildren
- index
- 1) * sizeof(*dkeys
));
296 memmove(dptrs
+ index
, dptrs
+ index
+ 1,
297 (nchildren
- index
- 1) * sizeof(*dptrs
));
300 nilfs_btree_node_set_nchildren(node
, nchildren
);
303 static int nilfs_btree_node_lookup(const struct nilfs_btree_node
*node
,
304 __u64 key
, int *indexp
)
307 int index
, low
, high
, s
;
311 high
= nilfs_btree_node_get_nchildren(node
) - 1;
314 while (low
<= high
) {
315 index
= (low
+ high
) / 2;
316 nkey
= nilfs_btree_node_get_key(node
, index
);
320 } else if (nkey
< key
) {
330 if (nilfs_btree_node_get_level(node
) > NILFS_BTREE_LEVEL_NODE_MIN
) {
331 if (s
> 0 && index
> 0)
343 * nilfs_btree_node_broken - verify consistency of btree node
344 * @node: btree node block to be examined
345 * @size: node size (in bytes)
346 * @blocknr: block number
348 * Return Value: If node is broken, 1 is returned. Otherwise, 0 is returned.
350 static int nilfs_btree_node_broken(const struct nilfs_btree_node
*node
,
351 size_t size
, sector_t blocknr
)
353 int level
, flags
, nchildren
;
356 level
= nilfs_btree_node_get_level(node
);
357 flags
= nilfs_btree_node_get_flags(node
);
358 nchildren
= nilfs_btree_node_get_nchildren(node
);
360 if (unlikely(level
< NILFS_BTREE_LEVEL_NODE_MIN
||
361 level
>= NILFS_BTREE_LEVEL_MAX
||
362 (flags
& NILFS_BTREE_NODE_ROOT
) ||
364 nchildren
> NILFS_BTREE_NODE_NCHILDREN_MAX(size
))) {
365 printk(KERN_CRIT
"NILFS: bad btree node (blocknr=%llu): "
366 "level = %d, flags = 0x%x, nchildren = %d\n",
367 (unsigned long long)blocknr
, level
, flags
, nchildren
);
374 * nilfs_btree_root_broken - verify consistency of btree root node
375 * @node: btree root node to be examined
378 * Return Value: If node is broken, 1 is returned. Otherwise, 0 is returned.
380 static int nilfs_btree_root_broken(const struct nilfs_btree_node
*node
,
383 int level
, flags
, nchildren
;
386 level
= nilfs_btree_node_get_level(node
);
387 flags
= nilfs_btree_node_get_flags(node
);
388 nchildren
= nilfs_btree_node_get_nchildren(node
);
390 if (unlikely(level
< NILFS_BTREE_LEVEL_NODE_MIN
||
391 level
>= NILFS_BTREE_LEVEL_MAX
||
393 nchildren
> NILFS_BTREE_ROOT_NCHILDREN_MAX
)) {
394 pr_crit("NILFS: bad btree root (inode number=%lu): level = %d, flags = 0x%x, nchildren = %d\n",
395 ino
, level
, flags
, nchildren
);
401 int nilfs_btree_broken_node_block(struct buffer_head
*bh
)
405 if (buffer_nilfs_checked(bh
))
408 ret
= nilfs_btree_node_broken((struct nilfs_btree_node
*)bh
->b_data
,
409 bh
->b_size
, bh
->b_blocknr
);
411 set_buffer_nilfs_checked(bh
);
415 static struct nilfs_btree_node
*
416 nilfs_btree_get_root(const struct nilfs_bmap
*btree
)
418 return (struct nilfs_btree_node
*)btree
->b_u
.u_data
;
421 static struct nilfs_btree_node
*
422 nilfs_btree_get_nonroot_node(const struct nilfs_btree_path
*path
, int level
)
424 return (struct nilfs_btree_node
*)path
[level
].bp_bh
->b_data
;
427 static struct nilfs_btree_node
*
428 nilfs_btree_get_sib_node(const struct nilfs_btree_path
*path
, int level
)
430 return (struct nilfs_btree_node
*)path
[level
].bp_sib_bh
->b_data
;
433 static int nilfs_btree_height(const struct nilfs_bmap
*btree
)
435 return nilfs_btree_node_get_level(nilfs_btree_get_root(btree
)) + 1;
438 static struct nilfs_btree_node
*
439 nilfs_btree_get_node(const struct nilfs_bmap
*btree
,
440 const struct nilfs_btree_path
*path
,
441 int level
, int *ncmaxp
)
443 struct nilfs_btree_node
*node
;
445 if (level
== nilfs_btree_height(btree
) - 1) {
446 node
= nilfs_btree_get_root(btree
);
447 *ncmaxp
= NILFS_BTREE_ROOT_NCHILDREN_MAX
;
449 node
= nilfs_btree_get_nonroot_node(path
, level
);
450 *ncmaxp
= nilfs_btree_nchildren_per_block(btree
);
456 nilfs_btree_bad_node(struct nilfs_btree_node
*node
, int level
)
458 if (unlikely(nilfs_btree_node_get_level(node
) != level
)) {
460 printk(KERN_CRIT
"NILFS: btree level mismatch: %d != %d\n",
461 nilfs_btree_node_get_level(node
), level
);
467 struct nilfs_btree_readahead_info
{
468 struct nilfs_btree_node
*node
; /* parent node */
469 int max_ra_blocks
; /* max nof blocks to read ahead */
470 int index
; /* current index on the parent node */
471 int ncmax
; /* nof children in the parent node */
474 static int __nilfs_btree_get_block(const struct nilfs_bmap
*btree
, __u64 ptr
,
475 struct buffer_head
**bhp
,
476 const struct nilfs_btree_readahead_info
*ra
)
478 struct address_space
*btnc
= &NILFS_BMAP_I(btree
)->i_btnode_cache
;
479 struct buffer_head
*bh
, *ra_bh
;
480 sector_t submit_ptr
= 0;
483 ret
= nilfs_btnode_submit_block(btnc
, ptr
, 0, READ
, &bh
, &submit_ptr
);
494 /* read ahead sibling nodes */
495 for (n
= ra
->max_ra_blocks
, i
= ra
->index
+ 1;
496 n
> 0 && i
< ra
->ncmax
; n
--, i
++) {
497 ptr2
= nilfs_btree_node_get_ptr(ra
->node
, i
, ra
->ncmax
);
499 ret
= nilfs_btnode_submit_block(btnc
, ptr2
, 0, READA
,
500 &ra_bh
, &submit_ptr
);
501 if (likely(!ret
|| ret
== -EEXIST
))
503 else if (ret
!= -EBUSY
)
505 if (!buffer_locked(bh
))
513 if (!buffer_uptodate(bh
)) {
519 if (nilfs_btree_broken_node_block(bh
)) {
520 clear_buffer_uptodate(bh
);
529 static int nilfs_btree_get_block(const struct nilfs_bmap
*btree
, __u64 ptr
,
530 struct buffer_head
**bhp
)
532 return __nilfs_btree_get_block(btree
, ptr
, bhp
, NULL
);
535 static int nilfs_btree_do_lookup(const struct nilfs_bmap
*btree
,
536 struct nilfs_btree_path
*path
,
537 __u64 key
, __u64
*ptrp
, int minlevel
,
540 struct nilfs_btree_node
*node
;
541 struct nilfs_btree_readahead_info p
, *ra
;
543 int level
, index
, found
, ncmax
, ret
;
545 node
= nilfs_btree_get_root(btree
);
546 level
= nilfs_btree_node_get_level(node
);
547 if (level
< minlevel
|| nilfs_btree_node_get_nchildren(node
) <= 0)
550 found
= nilfs_btree_node_lookup(node
, key
, &index
);
551 ptr
= nilfs_btree_node_get_ptr(node
, index
,
552 NILFS_BTREE_ROOT_NCHILDREN_MAX
);
553 path
[level
].bp_bh
= NULL
;
554 path
[level
].bp_index
= index
;
556 ncmax
= nilfs_btree_nchildren_per_block(btree
);
558 while (--level
>= minlevel
) {
560 if (level
== NILFS_BTREE_LEVEL_NODE_MIN
&& readahead
) {
561 p
.node
= nilfs_btree_get_node(btree
, path
, level
+ 1,
567 ret
= __nilfs_btree_get_block(btree
, ptr
, &path
[level
].bp_bh
,
572 node
= nilfs_btree_get_nonroot_node(path
, level
);
573 if (nilfs_btree_bad_node(node
, level
))
576 found
= nilfs_btree_node_lookup(node
, key
, &index
);
580 ptr
= nilfs_btree_node_get_ptr(node
, index
, ncmax
);
582 WARN_ON(found
|| level
!= NILFS_BTREE_LEVEL_NODE_MIN
);
584 ptr
= NILFS_BMAP_INVALID_PTR
;
586 path
[level
].bp_index
= index
;
597 static int nilfs_btree_do_lookup_last(const struct nilfs_bmap
*btree
,
598 struct nilfs_btree_path
*path
,
599 __u64
*keyp
, __u64
*ptrp
)
601 struct nilfs_btree_node
*node
;
603 int index
, level
, ncmax
, ret
;
605 node
= nilfs_btree_get_root(btree
);
606 index
= nilfs_btree_node_get_nchildren(node
) - 1;
609 level
= nilfs_btree_node_get_level(node
);
610 ptr
= nilfs_btree_node_get_ptr(node
, index
,
611 NILFS_BTREE_ROOT_NCHILDREN_MAX
);
612 path
[level
].bp_bh
= NULL
;
613 path
[level
].bp_index
= index
;
614 ncmax
= nilfs_btree_nchildren_per_block(btree
);
616 for (level
--; level
> 0; level
--) {
617 ret
= nilfs_btree_get_block(btree
, ptr
, &path
[level
].bp_bh
);
620 node
= nilfs_btree_get_nonroot_node(path
, level
);
621 if (nilfs_btree_bad_node(node
, level
))
623 index
= nilfs_btree_node_get_nchildren(node
) - 1;
624 ptr
= nilfs_btree_node_get_ptr(node
, index
, ncmax
);
625 path
[level
].bp_index
= index
;
629 *keyp
= nilfs_btree_node_get_key(node
, index
);
637 * nilfs_btree_get_next_key - get next valid key from btree path array
638 * @btree: bmap struct of btree
639 * @path: array of nilfs_btree_path struct
640 * @minlevel: start level
641 * @nextkey: place to store the next valid key
643 * Return Value: If a next key was found, 0 is returned. Otherwise,
644 * -ENOENT is returned.
646 static int nilfs_btree_get_next_key(const struct nilfs_bmap
*btree
,
647 const struct nilfs_btree_path
*path
,
648 int minlevel
, __u64
*nextkey
)
650 struct nilfs_btree_node
*node
;
651 int maxlevel
= nilfs_btree_height(btree
) - 1;
652 int index
, next_adj
, level
;
654 /* Next index is already set to bp_index for leaf nodes. */
656 for (level
= minlevel
; level
<= maxlevel
; level
++) {
657 if (level
== maxlevel
)
658 node
= nilfs_btree_get_root(btree
);
660 node
= nilfs_btree_get_nonroot_node(path
, level
);
662 index
= path
[level
].bp_index
+ next_adj
;
663 if (index
< nilfs_btree_node_get_nchildren(node
)) {
664 /* Next key is in this node */
665 *nextkey
= nilfs_btree_node_get_key(node
, index
);
668 /* For non-leaf nodes, next index is stored at bp_index + 1. */
674 static int nilfs_btree_lookup(const struct nilfs_bmap
*btree
,
675 __u64 key
, int level
, __u64
*ptrp
)
677 struct nilfs_btree_path
*path
;
680 path
= nilfs_btree_alloc_path();
684 ret
= nilfs_btree_do_lookup(btree
, path
, key
, ptrp
, level
, 0);
686 nilfs_btree_free_path(path
);
691 static int nilfs_btree_lookup_contig(const struct nilfs_bmap
*btree
,
692 __u64 key
, __u64
*ptrp
, unsigned maxblocks
)
694 struct nilfs_btree_path
*path
;
695 struct nilfs_btree_node
*node
;
696 struct inode
*dat
= NULL
;
699 int level
= NILFS_BTREE_LEVEL_NODE_MIN
;
700 int ret
, cnt
, index
, maxlevel
, ncmax
;
701 struct nilfs_btree_readahead_info p
;
703 path
= nilfs_btree_alloc_path();
707 ret
= nilfs_btree_do_lookup(btree
, path
, key
, &ptr
, level
, 1);
711 if (NILFS_BMAP_USE_VBN(btree
)) {
712 dat
= nilfs_bmap_get_dat(btree
);
713 ret
= nilfs_dat_translate(dat
, ptr
, &blocknr
);
719 if (cnt
== maxblocks
)
722 maxlevel
= nilfs_btree_height(btree
) - 1;
723 node
= nilfs_btree_get_node(btree
, path
, level
, &ncmax
);
724 index
= path
[level
].bp_index
+ 1;
726 while (index
< nilfs_btree_node_get_nchildren(node
)) {
727 if (nilfs_btree_node_get_key(node
, index
) !=
730 ptr2
= nilfs_btree_node_get_ptr(node
, index
, ncmax
);
732 ret
= nilfs_dat_translate(dat
, ptr2
, &blocknr
);
737 if (ptr2
!= ptr
+ cnt
|| ++cnt
== maxblocks
)
742 if (level
== maxlevel
)
745 /* look-up right sibling node */
746 p
.node
= nilfs_btree_get_node(btree
, path
, level
+ 1, &p
.ncmax
);
747 p
.index
= path
[level
+ 1].bp_index
+ 1;
749 if (p
.index
>= nilfs_btree_node_get_nchildren(p
.node
) ||
750 nilfs_btree_node_get_key(p
.node
, p
.index
) != key
+ cnt
)
752 ptr2
= nilfs_btree_node_get_ptr(p
.node
, p
.index
, p
.ncmax
);
753 path
[level
+ 1].bp_index
= p
.index
;
755 brelse(path
[level
].bp_bh
);
756 path
[level
].bp_bh
= NULL
;
758 ret
= __nilfs_btree_get_block(btree
, ptr2
, &path
[level
].bp_bh
,
762 node
= nilfs_btree_get_nonroot_node(path
, level
);
763 ncmax
= nilfs_btree_nchildren_per_block(btree
);
765 path
[level
].bp_index
= index
;
771 nilfs_btree_free_path(path
);
775 static void nilfs_btree_promote_key(struct nilfs_bmap
*btree
,
776 struct nilfs_btree_path
*path
,
777 int level
, __u64 key
)
779 if (level
< nilfs_btree_height(btree
) - 1) {
781 nilfs_btree_node_set_key(
782 nilfs_btree_get_nonroot_node(path
, level
),
783 path
[level
].bp_index
, key
);
784 if (!buffer_dirty(path
[level
].bp_bh
))
785 mark_buffer_dirty(path
[level
].bp_bh
);
786 } while ((path
[level
].bp_index
== 0) &&
787 (++level
< nilfs_btree_height(btree
) - 1));
791 if (level
== nilfs_btree_height(btree
) - 1) {
792 nilfs_btree_node_set_key(nilfs_btree_get_root(btree
),
793 path
[level
].bp_index
, key
);
797 static void nilfs_btree_do_insert(struct nilfs_bmap
*btree
,
798 struct nilfs_btree_path
*path
,
799 int level
, __u64
*keyp
, __u64
*ptrp
)
801 struct nilfs_btree_node
*node
;
804 if (level
< nilfs_btree_height(btree
) - 1) {
805 node
= nilfs_btree_get_nonroot_node(path
, level
);
806 ncblk
= nilfs_btree_nchildren_per_block(btree
);
807 nilfs_btree_node_insert(node
, path
[level
].bp_index
,
808 *keyp
, *ptrp
, ncblk
);
809 if (!buffer_dirty(path
[level
].bp_bh
))
810 mark_buffer_dirty(path
[level
].bp_bh
);
812 if (path
[level
].bp_index
== 0)
813 nilfs_btree_promote_key(btree
, path
, level
+ 1,
814 nilfs_btree_node_get_key(node
,
817 node
= nilfs_btree_get_root(btree
);
818 nilfs_btree_node_insert(node
, path
[level
].bp_index
,
820 NILFS_BTREE_ROOT_NCHILDREN_MAX
);
824 static void nilfs_btree_carry_left(struct nilfs_bmap
*btree
,
825 struct nilfs_btree_path
*path
,
826 int level
, __u64
*keyp
, __u64
*ptrp
)
828 struct nilfs_btree_node
*node
, *left
;
829 int nchildren
, lnchildren
, n
, move
, ncblk
;
831 node
= nilfs_btree_get_nonroot_node(path
, level
);
832 left
= nilfs_btree_get_sib_node(path
, level
);
833 nchildren
= nilfs_btree_node_get_nchildren(node
);
834 lnchildren
= nilfs_btree_node_get_nchildren(left
);
835 ncblk
= nilfs_btree_nchildren_per_block(btree
);
838 n
= (nchildren
+ lnchildren
+ 1) / 2 - lnchildren
;
839 if (n
> path
[level
].bp_index
) {
840 /* move insert point */
845 nilfs_btree_node_move_left(left
, node
, n
, ncblk
, ncblk
);
847 if (!buffer_dirty(path
[level
].bp_bh
))
848 mark_buffer_dirty(path
[level
].bp_bh
);
849 if (!buffer_dirty(path
[level
].bp_sib_bh
))
850 mark_buffer_dirty(path
[level
].bp_sib_bh
);
852 nilfs_btree_promote_key(btree
, path
, level
+ 1,
853 nilfs_btree_node_get_key(node
, 0));
856 brelse(path
[level
].bp_bh
);
857 path
[level
].bp_bh
= path
[level
].bp_sib_bh
;
858 path
[level
].bp_sib_bh
= NULL
;
859 path
[level
].bp_index
+= lnchildren
;
860 path
[level
+ 1].bp_index
--;
862 brelse(path
[level
].bp_sib_bh
);
863 path
[level
].bp_sib_bh
= NULL
;
864 path
[level
].bp_index
-= n
;
867 nilfs_btree_do_insert(btree
, path
, level
, keyp
, ptrp
);
870 static void nilfs_btree_carry_right(struct nilfs_bmap
*btree
,
871 struct nilfs_btree_path
*path
,
872 int level
, __u64
*keyp
, __u64
*ptrp
)
874 struct nilfs_btree_node
*node
, *right
;
875 int nchildren
, rnchildren
, n
, move
, ncblk
;
877 node
= nilfs_btree_get_nonroot_node(path
, level
);
878 right
= nilfs_btree_get_sib_node(path
, level
);
879 nchildren
= nilfs_btree_node_get_nchildren(node
);
880 rnchildren
= nilfs_btree_node_get_nchildren(right
);
881 ncblk
= nilfs_btree_nchildren_per_block(btree
);
884 n
= (nchildren
+ rnchildren
+ 1) / 2 - rnchildren
;
885 if (n
> nchildren
- path
[level
].bp_index
) {
886 /* move insert point */
891 nilfs_btree_node_move_right(node
, right
, n
, ncblk
, ncblk
);
893 if (!buffer_dirty(path
[level
].bp_bh
))
894 mark_buffer_dirty(path
[level
].bp_bh
);
895 if (!buffer_dirty(path
[level
].bp_sib_bh
))
896 mark_buffer_dirty(path
[level
].bp_sib_bh
);
898 path
[level
+ 1].bp_index
++;
899 nilfs_btree_promote_key(btree
, path
, level
+ 1,
900 nilfs_btree_node_get_key(right
, 0));
901 path
[level
+ 1].bp_index
--;
904 brelse(path
[level
].bp_bh
);
905 path
[level
].bp_bh
= path
[level
].bp_sib_bh
;
906 path
[level
].bp_sib_bh
= NULL
;
907 path
[level
].bp_index
-= nilfs_btree_node_get_nchildren(node
);
908 path
[level
+ 1].bp_index
++;
910 brelse(path
[level
].bp_sib_bh
);
911 path
[level
].bp_sib_bh
= NULL
;
914 nilfs_btree_do_insert(btree
, path
, level
, keyp
, ptrp
);
917 static void nilfs_btree_split(struct nilfs_bmap
*btree
,
918 struct nilfs_btree_path
*path
,
919 int level
, __u64
*keyp
, __u64
*ptrp
)
921 struct nilfs_btree_node
*node
, *right
;
924 int nchildren
, n
, move
, ncblk
;
926 node
= nilfs_btree_get_nonroot_node(path
, level
);
927 right
= nilfs_btree_get_sib_node(path
, level
);
928 nchildren
= nilfs_btree_node_get_nchildren(node
);
929 ncblk
= nilfs_btree_nchildren_per_block(btree
);
932 n
= (nchildren
+ 1) / 2;
933 if (n
> nchildren
- path
[level
].bp_index
) {
938 nilfs_btree_node_move_right(node
, right
, n
, ncblk
, ncblk
);
940 if (!buffer_dirty(path
[level
].bp_bh
))
941 mark_buffer_dirty(path
[level
].bp_bh
);
942 if (!buffer_dirty(path
[level
].bp_sib_bh
))
943 mark_buffer_dirty(path
[level
].bp_sib_bh
);
945 newkey
= nilfs_btree_node_get_key(right
, 0);
946 newptr
= path
[level
].bp_newreq
.bpr_ptr
;
949 path
[level
].bp_index
-= nilfs_btree_node_get_nchildren(node
);
950 nilfs_btree_node_insert(right
, path
[level
].bp_index
,
951 *keyp
, *ptrp
, ncblk
);
953 *keyp
= nilfs_btree_node_get_key(right
, 0);
954 *ptrp
= path
[level
].bp_newreq
.bpr_ptr
;
956 brelse(path
[level
].bp_bh
);
957 path
[level
].bp_bh
= path
[level
].bp_sib_bh
;
958 path
[level
].bp_sib_bh
= NULL
;
960 nilfs_btree_do_insert(btree
, path
, level
, keyp
, ptrp
);
962 *keyp
= nilfs_btree_node_get_key(right
, 0);
963 *ptrp
= path
[level
].bp_newreq
.bpr_ptr
;
965 brelse(path
[level
].bp_sib_bh
);
966 path
[level
].bp_sib_bh
= NULL
;
969 path
[level
+ 1].bp_index
++;
972 static void nilfs_btree_grow(struct nilfs_bmap
*btree
,
973 struct nilfs_btree_path
*path
,
974 int level
, __u64
*keyp
, __u64
*ptrp
)
976 struct nilfs_btree_node
*root
, *child
;
979 root
= nilfs_btree_get_root(btree
);
980 child
= nilfs_btree_get_sib_node(path
, level
);
981 ncblk
= nilfs_btree_nchildren_per_block(btree
);
983 n
= nilfs_btree_node_get_nchildren(root
);
985 nilfs_btree_node_move_right(root
, child
, n
,
986 NILFS_BTREE_ROOT_NCHILDREN_MAX
, ncblk
);
987 nilfs_btree_node_set_level(root
, level
+ 1);
989 if (!buffer_dirty(path
[level
].bp_sib_bh
))
990 mark_buffer_dirty(path
[level
].bp_sib_bh
);
992 path
[level
].bp_bh
= path
[level
].bp_sib_bh
;
993 path
[level
].bp_sib_bh
= NULL
;
995 nilfs_btree_do_insert(btree
, path
, level
, keyp
, ptrp
);
997 *keyp
= nilfs_btree_node_get_key(child
, 0);
998 *ptrp
= path
[level
].bp_newreq
.bpr_ptr
;
1001 static __u64
nilfs_btree_find_near(const struct nilfs_bmap
*btree
,
1002 const struct nilfs_btree_path
*path
)
1004 struct nilfs_btree_node
*node
;
1008 return NILFS_BMAP_INVALID_PTR
;
1011 level
= NILFS_BTREE_LEVEL_NODE_MIN
;
1012 if (path
[level
].bp_index
> 0) {
1013 node
= nilfs_btree_get_node(btree
, path
, level
, &ncmax
);
1014 return nilfs_btree_node_get_ptr(node
,
1015 path
[level
].bp_index
- 1,
1020 level
= NILFS_BTREE_LEVEL_NODE_MIN
+ 1;
1021 if (level
<= nilfs_btree_height(btree
) - 1) {
1022 node
= nilfs_btree_get_node(btree
, path
, level
, &ncmax
);
1023 return nilfs_btree_node_get_ptr(node
, path
[level
].bp_index
,
1027 return NILFS_BMAP_INVALID_PTR
;
1030 static __u64
nilfs_btree_find_target_v(const struct nilfs_bmap
*btree
,
1031 const struct nilfs_btree_path
*path
,
1036 ptr
= nilfs_bmap_find_target_seq(btree
, key
);
1037 if (ptr
!= NILFS_BMAP_INVALID_PTR
)
1038 /* sequential access */
1041 ptr
= nilfs_btree_find_near(btree
, path
);
1042 if (ptr
!= NILFS_BMAP_INVALID_PTR
)
1047 return nilfs_bmap_find_target_in_group(btree
);
1050 static int nilfs_btree_prepare_insert(struct nilfs_bmap
*btree
,
1051 struct nilfs_btree_path
*path
,
1052 int *levelp
, __u64 key
, __u64 ptr
,
1053 struct nilfs_bmap_stats
*stats
)
1055 struct buffer_head
*bh
;
1056 struct nilfs_btree_node
*node
, *parent
, *sib
;
1058 int pindex
, level
, ncmax
, ncblk
, ret
;
1059 struct inode
*dat
= NULL
;
1061 stats
->bs_nblocks
= 0;
1062 level
= NILFS_BTREE_LEVEL_DATA
;
1064 /* allocate a new ptr for data block */
1065 if (NILFS_BMAP_USE_VBN(btree
)) {
1066 path
[level
].bp_newreq
.bpr_ptr
=
1067 nilfs_btree_find_target_v(btree
, path
, key
);
1068 dat
= nilfs_bmap_get_dat(btree
);
1071 ret
= nilfs_bmap_prepare_alloc_ptr(btree
, &path
[level
].bp_newreq
, dat
);
1075 ncblk
= nilfs_btree_nchildren_per_block(btree
);
1077 for (level
= NILFS_BTREE_LEVEL_NODE_MIN
;
1078 level
< nilfs_btree_height(btree
) - 1;
1080 node
= nilfs_btree_get_nonroot_node(path
, level
);
1081 if (nilfs_btree_node_get_nchildren(node
) < ncblk
) {
1082 path
[level
].bp_op
= nilfs_btree_do_insert
;
1083 stats
->bs_nblocks
++;
1087 parent
= nilfs_btree_get_node(btree
, path
, level
+ 1, &ncmax
);
1088 pindex
= path
[level
+ 1].bp_index
;
1092 sibptr
= nilfs_btree_node_get_ptr(parent
, pindex
- 1,
1094 ret
= nilfs_btree_get_block(btree
, sibptr
, &bh
);
1096 goto err_out_child_node
;
1097 sib
= (struct nilfs_btree_node
*)bh
->b_data
;
1098 if (nilfs_btree_node_get_nchildren(sib
) < ncblk
) {
1099 path
[level
].bp_sib_bh
= bh
;
1100 path
[level
].bp_op
= nilfs_btree_carry_left
;
1101 stats
->bs_nblocks
++;
1109 if (pindex
< nilfs_btree_node_get_nchildren(parent
) - 1) {
1110 sibptr
= nilfs_btree_node_get_ptr(parent
, pindex
+ 1,
1112 ret
= nilfs_btree_get_block(btree
, sibptr
, &bh
);
1114 goto err_out_child_node
;
1115 sib
= (struct nilfs_btree_node
*)bh
->b_data
;
1116 if (nilfs_btree_node_get_nchildren(sib
) < ncblk
) {
1117 path
[level
].bp_sib_bh
= bh
;
1118 path
[level
].bp_op
= nilfs_btree_carry_right
;
1119 stats
->bs_nblocks
++;
1127 path
[level
].bp_newreq
.bpr_ptr
=
1128 path
[level
- 1].bp_newreq
.bpr_ptr
+ 1;
1129 ret
= nilfs_bmap_prepare_alloc_ptr(btree
,
1130 &path
[level
].bp_newreq
, dat
);
1132 goto err_out_child_node
;
1133 ret
= nilfs_btree_get_new_block(btree
,
1134 path
[level
].bp_newreq
.bpr_ptr
,
1137 goto err_out_curr_node
;
1139 stats
->bs_nblocks
++;
1141 sib
= (struct nilfs_btree_node
*)bh
->b_data
;
1142 nilfs_btree_node_init(sib
, 0, level
, 0, ncblk
, NULL
, NULL
);
1143 path
[level
].bp_sib_bh
= bh
;
1144 path
[level
].bp_op
= nilfs_btree_split
;
1148 node
= nilfs_btree_get_root(btree
);
1149 if (nilfs_btree_node_get_nchildren(node
) <
1150 NILFS_BTREE_ROOT_NCHILDREN_MAX
) {
1151 path
[level
].bp_op
= nilfs_btree_do_insert
;
1152 stats
->bs_nblocks
++;
1157 path
[level
].bp_newreq
.bpr_ptr
= path
[level
- 1].bp_newreq
.bpr_ptr
+ 1;
1158 ret
= nilfs_bmap_prepare_alloc_ptr(btree
, &path
[level
].bp_newreq
, dat
);
1160 goto err_out_child_node
;
1161 ret
= nilfs_btree_get_new_block(btree
, path
[level
].bp_newreq
.bpr_ptr
,
1164 goto err_out_curr_node
;
1166 nilfs_btree_node_init((struct nilfs_btree_node
*)bh
->b_data
,
1167 0, level
, 0, ncblk
, NULL
, NULL
);
1168 path
[level
].bp_sib_bh
= bh
;
1169 path
[level
].bp_op
= nilfs_btree_grow
;
1172 path
[level
].bp_op
= nilfs_btree_do_insert
;
1174 /* a newly-created node block and a data block are added */
1175 stats
->bs_nblocks
+= 2;
1184 nilfs_bmap_abort_alloc_ptr(btree
, &path
[level
].bp_newreq
, dat
);
1186 for (level
--; level
> NILFS_BTREE_LEVEL_DATA
; level
--) {
1187 nilfs_btnode_delete(path
[level
].bp_sib_bh
);
1188 nilfs_bmap_abort_alloc_ptr(btree
, &path
[level
].bp_newreq
, dat
);
1192 nilfs_bmap_abort_alloc_ptr(btree
, &path
[level
].bp_newreq
, dat
);
1195 stats
->bs_nblocks
= 0;
1199 static void nilfs_btree_commit_insert(struct nilfs_bmap
*btree
,
1200 struct nilfs_btree_path
*path
,
1201 int maxlevel
, __u64 key
, __u64 ptr
)
1203 struct inode
*dat
= NULL
;
1206 set_buffer_nilfs_volatile((struct buffer_head
*)((unsigned long)ptr
));
1207 ptr
= path
[NILFS_BTREE_LEVEL_DATA
].bp_newreq
.bpr_ptr
;
1208 if (NILFS_BMAP_USE_VBN(btree
)) {
1209 nilfs_bmap_set_target_v(btree
, key
, ptr
);
1210 dat
= nilfs_bmap_get_dat(btree
);
1213 for (level
= NILFS_BTREE_LEVEL_NODE_MIN
; level
<= maxlevel
; level
++) {
1214 nilfs_bmap_commit_alloc_ptr(btree
,
1215 &path
[level
- 1].bp_newreq
, dat
);
1216 path
[level
].bp_op(btree
, path
, level
, &key
, &ptr
);
1219 if (!nilfs_bmap_dirty(btree
))
1220 nilfs_bmap_set_dirty(btree
);
1223 static int nilfs_btree_insert(struct nilfs_bmap
*btree
, __u64 key
, __u64 ptr
)
1225 struct nilfs_btree_path
*path
;
1226 struct nilfs_bmap_stats stats
;
1229 path
= nilfs_btree_alloc_path();
1233 ret
= nilfs_btree_do_lookup(btree
, path
, key
, NULL
,
1234 NILFS_BTREE_LEVEL_NODE_MIN
, 0);
1235 if (ret
!= -ENOENT
) {
1241 ret
= nilfs_btree_prepare_insert(btree
, path
, &level
, key
, ptr
, &stats
);
1244 nilfs_btree_commit_insert(btree
, path
, level
, key
, ptr
);
1245 nilfs_inode_add_blocks(btree
->b_inode
, stats
.bs_nblocks
);
1248 nilfs_btree_free_path(path
);
1252 static void nilfs_btree_do_delete(struct nilfs_bmap
*btree
,
1253 struct nilfs_btree_path
*path
,
1254 int level
, __u64
*keyp
, __u64
*ptrp
)
1256 struct nilfs_btree_node
*node
;
1259 if (level
< nilfs_btree_height(btree
) - 1) {
1260 node
= nilfs_btree_get_nonroot_node(path
, level
);
1261 ncblk
= nilfs_btree_nchildren_per_block(btree
);
1262 nilfs_btree_node_delete(node
, path
[level
].bp_index
,
1264 if (!buffer_dirty(path
[level
].bp_bh
))
1265 mark_buffer_dirty(path
[level
].bp_bh
);
1266 if (path
[level
].bp_index
== 0)
1267 nilfs_btree_promote_key(btree
, path
, level
+ 1,
1268 nilfs_btree_node_get_key(node
, 0));
1270 node
= nilfs_btree_get_root(btree
);
1271 nilfs_btree_node_delete(node
, path
[level
].bp_index
,
1273 NILFS_BTREE_ROOT_NCHILDREN_MAX
);
1277 static void nilfs_btree_borrow_left(struct nilfs_bmap
*btree
,
1278 struct nilfs_btree_path
*path
,
1279 int level
, __u64
*keyp
, __u64
*ptrp
)
1281 struct nilfs_btree_node
*node
, *left
;
1282 int nchildren
, lnchildren
, n
, ncblk
;
1284 nilfs_btree_do_delete(btree
, path
, level
, keyp
, ptrp
);
1286 node
= nilfs_btree_get_nonroot_node(path
, level
);
1287 left
= nilfs_btree_get_sib_node(path
, level
);
1288 nchildren
= nilfs_btree_node_get_nchildren(node
);
1289 lnchildren
= nilfs_btree_node_get_nchildren(left
);
1290 ncblk
= nilfs_btree_nchildren_per_block(btree
);
1292 n
= (nchildren
+ lnchildren
) / 2 - nchildren
;
1294 nilfs_btree_node_move_right(left
, node
, n
, ncblk
, ncblk
);
1296 if (!buffer_dirty(path
[level
].bp_bh
))
1297 mark_buffer_dirty(path
[level
].bp_bh
);
1298 if (!buffer_dirty(path
[level
].bp_sib_bh
))
1299 mark_buffer_dirty(path
[level
].bp_sib_bh
);
1301 nilfs_btree_promote_key(btree
, path
, level
+ 1,
1302 nilfs_btree_node_get_key(node
, 0));
1304 brelse(path
[level
].bp_sib_bh
);
1305 path
[level
].bp_sib_bh
= NULL
;
1306 path
[level
].bp_index
+= n
;
1309 static void nilfs_btree_borrow_right(struct nilfs_bmap
*btree
,
1310 struct nilfs_btree_path
*path
,
1311 int level
, __u64
*keyp
, __u64
*ptrp
)
1313 struct nilfs_btree_node
*node
, *right
;
1314 int nchildren
, rnchildren
, n
, ncblk
;
1316 nilfs_btree_do_delete(btree
, path
, level
, keyp
, ptrp
);
1318 node
= nilfs_btree_get_nonroot_node(path
, level
);
1319 right
= nilfs_btree_get_sib_node(path
, level
);
1320 nchildren
= nilfs_btree_node_get_nchildren(node
);
1321 rnchildren
= nilfs_btree_node_get_nchildren(right
);
1322 ncblk
= nilfs_btree_nchildren_per_block(btree
);
1324 n
= (nchildren
+ rnchildren
) / 2 - nchildren
;
1326 nilfs_btree_node_move_left(node
, right
, n
, ncblk
, ncblk
);
1328 if (!buffer_dirty(path
[level
].bp_bh
))
1329 mark_buffer_dirty(path
[level
].bp_bh
);
1330 if (!buffer_dirty(path
[level
].bp_sib_bh
))
1331 mark_buffer_dirty(path
[level
].bp_sib_bh
);
1333 path
[level
+ 1].bp_index
++;
1334 nilfs_btree_promote_key(btree
, path
, level
+ 1,
1335 nilfs_btree_node_get_key(right
, 0));
1336 path
[level
+ 1].bp_index
--;
1338 brelse(path
[level
].bp_sib_bh
);
1339 path
[level
].bp_sib_bh
= NULL
;
1342 static void nilfs_btree_concat_left(struct nilfs_bmap
*btree
,
1343 struct nilfs_btree_path
*path
,
1344 int level
, __u64
*keyp
, __u64
*ptrp
)
1346 struct nilfs_btree_node
*node
, *left
;
1349 nilfs_btree_do_delete(btree
, path
, level
, keyp
, ptrp
);
1351 node
= nilfs_btree_get_nonroot_node(path
, level
);
1352 left
= nilfs_btree_get_sib_node(path
, level
);
1353 ncblk
= nilfs_btree_nchildren_per_block(btree
);
1355 n
= nilfs_btree_node_get_nchildren(node
);
1357 nilfs_btree_node_move_left(left
, node
, n
, ncblk
, ncblk
);
1359 if (!buffer_dirty(path
[level
].bp_sib_bh
))
1360 mark_buffer_dirty(path
[level
].bp_sib_bh
);
1362 nilfs_btnode_delete(path
[level
].bp_bh
);
1363 path
[level
].bp_bh
= path
[level
].bp_sib_bh
;
1364 path
[level
].bp_sib_bh
= NULL
;
1365 path
[level
].bp_index
+= nilfs_btree_node_get_nchildren(left
);
1368 static void nilfs_btree_concat_right(struct nilfs_bmap
*btree
,
1369 struct nilfs_btree_path
*path
,
1370 int level
, __u64
*keyp
, __u64
*ptrp
)
1372 struct nilfs_btree_node
*node
, *right
;
1375 nilfs_btree_do_delete(btree
, path
, level
, keyp
, ptrp
);
1377 node
= nilfs_btree_get_nonroot_node(path
, level
);
1378 right
= nilfs_btree_get_sib_node(path
, level
);
1379 ncblk
= nilfs_btree_nchildren_per_block(btree
);
1381 n
= nilfs_btree_node_get_nchildren(right
);
1383 nilfs_btree_node_move_left(node
, right
, n
, ncblk
, ncblk
);
1385 if (!buffer_dirty(path
[level
].bp_bh
))
1386 mark_buffer_dirty(path
[level
].bp_bh
);
1388 nilfs_btnode_delete(path
[level
].bp_sib_bh
);
1389 path
[level
].bp_sib_bh
= NULL
;
1390 path
[level
+ 1].bp_index
++;
1393 static void nilfs_btree_shrink(struct nilfs_bmap
*btree
,
1394 struct nilfs_btree_path
*path
,
1395 int level
, __u64
*keyp
, __u64
*ptrp
)
1397 struct nilfs_btree_node
*root
, *child
;
1400 nilfs_btree_do_delete(btree
, path
, level
, keyp
, ptrp
);
1402 root
= nilfs_btree_get_root(btree
);
1403 child
= nilfs_btree_get_nonroot_node(path
, level
);
1404 ncblk
= nilfs_btree_nchildren_per_block(btree
);
1406 nilfs_btree_node_delete(root
, 0, NULL
, NULL
,
1407 NILFS_BTREE_ROOT_NCHILDREN_MAX
);
1408 nilfs_btree_node_set_level(root
, level
);
1409 n
= nilfs_btree_node_get_nchildren(child
);
1410 nilfs_btree_node_move_left(root
, child
, n
,
1411 NILFS_BTREE_ROOT_NCHILDREN_MAX
, ncblk
);
1413 nilfs_btnode_delete(path
[level
].bp_bh
);
1414 path
[level
].bp_bh
= NULL
;
1417 static void nilfs_btree_nop(struct nilfs_bmap
*btree
,
1418 struct nilfs_btree_path
*path
,
1419 int level
, __u64
*keyp
, __u64
*ptrp
)
1423 static int nilfs_btree_prepare_delete(struct nilfs_bmap
*btree
,
1424 struct nilfs_btree_path
*path
,
1426 struct nilfs_bmap_stats
*stats
,
1429 struct buffer_head
*bh
;
1430 struct nilfs_btree_node
*node
, *parent
, *sib
;
1432 int pindex
, dindex
, level
, ncmin
, ncmax
, ncblk
, ret
;
1435 stats
->bs_nblocks
= 0;
1436 ncmin
= NILFS_BTREE_NODE_NCHILDREN_MIN(nilfs_btree_node_size(btree
));
1437 ncblk
= nilfs_btree_nchildren_per_block(btree
);
1439 for (level
= NILFS_BTREE_LEVEL_NODE_MIN
, dindex
= path
[level
].bp_index
;
1440 level
< nilfs_btree_height(btree
) - 1;
1442 node
= nilfs_btree_get_nonroot_node(path
, level
);
1443 path
[level
].bp_oldreq
.bpr_ptr
=
1444 nilfs_btree_node_get_ptr(node
, dindex
, ncblk
);
1445 ret
= nilfs_bmap_prepare_end_ptr(btree
,
1446 &path
[level
].bp_oldreq
, dat
);
1448 goto err_out_child_node
;
1450 if (nilfs_btree_node_get_nchildren(node
) > ncmin
) {
1451 path
[level
].bp_op
= nilfs_btree_do_delete
;
1452 stats
->bs_nblocks
++;
1456 parent
= nilfs_btree_get_node(btree
, path
, level
+ 1, &ncmax
);
1457 pindex
= path
[level
+ 1].bp_index
;
1462 sibptr
= nilfs_btree_node_get_ptr(parent
, pindex
- 1,
1464 ret
= nilfs_btree_get_block(btree
, sibptr
, &bh
);
1466 goto err_out_curr_node
;
1467 sib
= (struct nilfs_btree_node
*)bh
->b_data
;
1468 if (nilfs_btree_node_get_nchildren(sib
) > ncmin
) {
1469 path
[level
].bp_sib_bh
= bh
;
1470 path
[level
].bp_op
= nilfs_btree_borrow_left
;
1471 stats
->bs_nblocks
++;
1474 path
[level
].bp_sib_bh
= bh
;
1475 path
[level
].bp_op
= nilfs_btree_concat_left
;
1476 stats
->bs_nblocks
++;
1480 nilfs_btree_node_get_nchildren(parent
) - 1) {
1482 sibptr
= nilfs_btree_node_get_ptr(parent
, pindex
+ 1,
1484 ret
= nilfs_btree_get_block(btree
, sibptr
, &bh
);
1486 goto err_out_curr_node
;
1487 sib
= (struct nilfs_btree_node
*)bh
->b_data
;
1488 if (nilfs_btree_node_get_nchildren(sib
) > ncmin
) {
1489 path
[level
].bp_sib_bh
= bh
;
1490 path
[level
].bp_op
= nilfs_btree_borrow_right
;
1491 stats
->bs_nblocks
++;
1494 path
[level
].bp_sib_bh
= bh
;
1495 path
[level
].bp_op
= nilfs_btree_concat_right
;
1496 stats
->bs_nblocks
++;
1498 * When merging right sibling node
1499 * into the current node, pointer to
1500 * the right sibling node must be
1501 * terminated instead. The adjustment
1502 * below is required for that.
1504 dindex
= pindex
+ 1;
1509 /* the only child of the root node */
1510 WARN_ON(level
!= nilfs_btree_height(btree
) - 2);
1511 if (nilfs_btree_node_get_nchildren(node
) - 1 <=
1512 NILFS_BTREE_ROOT_NCHILDREN_MAX
) {
1513 path
[level
].bp_op
= nilfs_btree_shrink
;
1514 stats
->bs_nblocks
+= 2;
1516 path
[level
].bp_op
= nilfs_btree_nop
;
1517 goto shrink_root_child
;
1519 path
[level
].bp_op
= nilfs_btree_do_delete
;
1520 stats
->bs_nblocks
++;
1526 /* child of the root node is deleted */
1527 path
[level
].bp_op
= nilfs_btree_do_delete
;
1528 stats
->bs_nblocks
++;
1531 node
= nilfs_btree_get_root(btree
);
1532 path
[level
].bp_oldreq
.bpr_ptr
=
1533 nilfs_btree_node_get_ptr(node
, dindex
,
1534 NILFS_BTREE_ROOT_NCHILDREN_MAX
);
1536 ret
= nilfs_bmap_prepare_end_ptr(btree
, &path
[level
].bp_oldreq
, dat
);
1538 goto err_out_child_node
;
1547 nilfs_bmap_abort_end_ptr(btree
, &path
[level
].bp_oldreq
, dat
);
1549 for (level
--; level
>= NILFS_BTREE_LEVEL_NODE_MIN
; level
--) {
1550 brelse(path
[level
].bp_sib_bh
);
1551 nilfs_bmap_abort_end_ptr(btree
, &path
[level
].bp_oldreq
, dat
);
1554 stats
->bs_nblocks
= 0;
1558 static void nilfs_btree_commit_delete(struct nilfs_bmap
*btree
,
1559 struct nilfs_btree_path
*path
,
1560 int maxlevel
, struct inode
*dat
)
1564 for (level
= NILFS_BTREE_LEVEL_NODE_MIN
; level
<= maxlevel
; level
++) {
1565 nilfs_bmap_commit_end_ptr(btree
, &path
[level
].bp_oldreq
, dat
);
1566 path
[level
].bp_op(btree
, path
, level
, NULL
, NULL
);
1569 if (!nilfs_bmap_dirty(btree
))
1570 nilfs_bmap_set_dirty(btree
);
1573 static int nilfs_btree_delete(struct nilfs_bmap
*btree
, __u64 key
)
1576 struct nilfs_btree_path
*path
;
1577 struct nilfs_bmap_stats stats
;
1581 path
= nilfs_btree_alloc_path();
1585 ret
= nilfs_btree_do_lookup(btree
, path
, key
, NULL
,
1586 NILFS_BTREE_LEVEL_NODE_MIN
, 0);
1591 dat
= NILFS_BMAP_USE_VBN(btree
) ? nilfs_bmap_get_dat(btree
) : NULL
;
1593 ret
= nilfs_btree_prepare_delete(btree
, path
, &level
, &stats
, dat
);
1596 nilfs_btree_commit_delete(btree
, path
, level
, dat
);
1597 nilfs_inode_sub_blocks(btree
->b_inode
, stats
.bs_nblocks
);
1600 nilfs_btree_free_path(path
);
1604 static int nilfs_btree_seek_key(const struct nilfs_bmap
*btree
, __u64 start
,
1607 struct nilfs_btree_path
*path
;
1608 const int minlevel
= NILFS_BTREE_LEVEL_NODE_MIN
;
1611 path
= nilfs_btree_alloc_path();
1615 ret
= nilfs_btree_do_lookup(btree
, path
, start
, NULL
, minlevel
, 0);
1618 else if (ret
== -ENOENT
)
1619 ret
= nilfs_btree_get_next_key(btree
, path
, minlevel
, keyp
);
1621 nilfs_btree_free_path(path
);
1625 static int nilfs_btree_last_key(const struct nilfs_bmap
*btree
, __u64
*keyp
)
1627 struct nilfs_btree_path
*path
;
1630 path
= nilfs_btree_alloc_path();
1634 ret
= nilfs_btree_do_lookup_last(btree
, path
, keyp
, NULL
);
1636 nilfs_btree_free_path(path
);
1641 static int nilfs_btree_check_delete(struct nilfs_bmap
*btree
, __u64 key
)
1643 struct buffer_head
*bh
;
1644 struct nilfs_btree_node
*root
, *node
;
1645 __u64 maxkey
, nextmaxkey
;
1649 root
= nilfs_btree_get_root(btree
);
1650 switch (nilfs_btree_height(btree
)) {
1656 nchildren
= nilfs_btree_node_get_nchildren(root
);
1659 ptr
= nilfs_btree_node_get_ptr(root
, nchildren
- 1,
1660 NILFS_BTREE_ROOT_NCHILDREN_MAX
);
1661 ret
= nilfs_btree_get_block(btree
, ptr
, &bh
);
1664 node
= (struct nilfs_btree_node
*)bh
->b_data
;
1670 nchildren
= nilfs_btree_node_get_nchildren(node
);
1671 maxkey
= nilfs_btree_node_get_key(node
, nchildren
- 1);
1672 nextmaxkey
= (nchildren
> 1) ?
1673 nilfs_btree_node_get_key(node
, nchildren
- 2) : 0;
1677 return (maxkey
== key
) && (nextmaxkey
< NILFS_BMAP_LARGE_LOW
);
1680 static int nilfs_btree_gather_data(struct nilfs_bmap
*btree
,
1681 __u64
*keys
, __u64
*ptrs
, int nitems
)
1683 struct buffer_head
*bh
;
1684 struct nilfs_btree_node
*node
, *root
;
1688 int nchildren
, ncmax
, i
, ret
;
1690 root
= nilfs_btree_get_root(btree
);
1691 switch (nilfs_btree_height(btree
)) {
1695 ncmax
= NILFS_BTREE_ROOT_NCHILDREN_MAX
;
1698 nchildren
= nilfs_btree_node_get_nchildren(root
);
1699 WARN_ON(nchildren
> 1);
1700 ptr
= nilfs_btree_node_get_ptr(root
, nchildren
- 1,
1701 NILFS_BTREE_ROOT_NCHILDREN_MAX
);
1702 ret
= nilfs_btree_get_block(btree
, ptr
, &bh
);
1705 node
= (struct nilfs_btree_node
*)bh
->b_data
;
1706 ncmax
= nilfs_btree_nchildren_per_block(btree
);
1713 nchildren
= nilfs_btree_node_get_nchildren(node
);
1714 if (nchildren
< nitems
)
1716 dkeys
= nilfs_btree_node_dkeys(node
);
1717 dptrs
= nilfs_btree_node_dptrs(node
, ncmax
);
1718 for (i
= 0; i
< nitems
; i
++) {
1719 keys
[i
] = le64_to_cpu(dkeys
[i
]);
1720 ptrs
[i
] = le64_to_cpu(dptrs
[i
]);
1730 nilfs_btree_prepare_convert_and_insert(struct nilfs_bmap
*btree
, __u64 key
,
1731 union nilfs_bmap_ptr_req
*dreq
,
1732 union nilfs_bmap_ptr_req
*nreq
,
1733 struct buffer_head
**bhp
,
1734 struct nilfs_bmap_stats
*stats
)
1736 struct buffer_head
*bh
;
1737 struct inode
*dat
= NULL
;
1740 stats
->bs_nblocks
= 0;
1743 /* cannot find near ptr */
1744 if (NILFS_BMAP_USE_VBN(btree
)) {
1745 dreq
->bpr_ptr
= nilfs_btree_find_target_v(btree
, NULL
, key
);
1746 dat
= nilfs_bmap_get_dat(btree
);
1749 ret
= nilfs_bmap_prepare_alloc_ptr(btree
, dreq
, dat
);
1754 stats
->bs_nblocks
++;
1756 nreq
->bpr_ptr
= dreq
->bpr_ptr
+ 1;
1757 ret
= nilfs_bmap_prepare_alloc_ptr(btree
, nreq
, dat
);
1761 ret
= nilfs_btree_get_new_block(btree
, nreq
->bpr_ptr
, &bh
);
1766 stats
->bs_nblocks
++;
1774 nilfs_bmap_abort_alloc_ptr(btree
, nreq
, dat
);
1776 nilfs_bmap_abort_alloc_ptr(btree
, dreq
, dat
);
1777 stats
->bs_nblocks
= 0;
1783 nilfs_btree_commit_convert_and_insert(struct nilfs_bmap
*btree
,
1784 __u64 key
, __u64 ptr
,
1785 const __u64
*keys
, const __u64
*ptrs
,
1787 union nilfs_bmap_ptr_req
*dreq
,
1788 union nilfs_bmap_ptr_req
*nreq
,
1789 struct buffer_head
*bh
)
1791 struct nilfs_btree_node
*node
;
1796 /* free resources */
1797 if (btree
->b_ops
->bop_clear
!= NULL
)
1798 btree
->b_ops
->bop_clear(btree
);
1800 /* ptr must be a pointer to a buffer head. */
1801 set_buffer_nilfs_volatile((struct buffer_head
*)((unsigned long)ptr
));
1803 /* convert and insert */
1804 dat
= NILFS_BMAP_USE_VBN(btree
) ? nilfs_bmap_get_dat(btree
) : NULL
;
1805 __nilfs_btree_init(btree
);
1807 nilfs_bmap_commit_alloc_ptr(btree
, dreq
, dat
);
1808 nilfs_bmap_commit_alloc_ptr(btree
, nreq
, dat
);
1810 /* create child node at level 1 */
1811 node
= (struct nilfs_btree_node
*)bh
->b_data
;
1812 ncblk
= nilfs_btree_nchildren_per_block(btree
);
1813 nilfs_btree_node_init(node
, 0, 1, n
, ncblk
, keys
, ptrs
);
1814 nilfs_btree_node_insert(node
, n
, key
, dreq
->bpr_ptr
, ncblk
);
1815 if (!buffer_dirty(bh
))
1816 mark_buffer_dirty(bh
);
1817 if (!nilfs_bmap_dirty(btree
))
1818 nilfs_bmap_set_dirty(btree
);
1822 /* create root node at level 2 */
1823 node
= nilfs_btree_get_root(btree
);
1824 tmpptr
= nreq
->bpr_ptr
;
1825 nilfs_btree_node_init(node
, NILFS_BTREE_NODE_ROOT
, 2, 1,
1826 NILFS_BTREE_ROOT_NCHILDREN_MAX
,
1829 nilfs_bmap_commit_alloc_ptr(btree
, dreq
, dat
);
1831 /* create root node at level 1 */
1832 node
= nilfs_btree_get_root(btree
);
1833 nilfs_btree_node_init(node
, NILFS_BTREE_NODE_ROOT
, 1, n
,
1834 NILFS_BTREE_ROOT_NCHILDREN_MAX
,
1836 nilfs_btree_node_insert(node
, n
, key
, dreq
->bpr_ptr
,
1837 NILFS_BTREE_ROOT_NCHILDREN_MAX
);
1838 if (!nilfs_bmap_dirty(btree
))
1839 nilfs_bmap_set_dirty(btree
);
1842 if (NILFS_BMAP_USE_VBN(btree
))
1843 nilfs_bmap_set_target_v(btree
, key
, dreq
->bpr_ptr
);
1847 * nilfs_btree_convert_and_insert -
1855 int nilfs_btree_convert_and_insert(struct nilfs_bmap
*btree
,
1856 __u64 key
, __u64 ptr
,
1857 const __u64
*keys
, const __u64
*ptrs
, int n
)
1859 struct buffer_head
*bh
;
1860 union nilfs_bmap_ptr_req dreq
, nreq
, *di
, *ni
;
1861 struct nilfs_bmap_stats stats
;
1864 if (n
+ 1 <= NILFS_BTREE_ROOT_NCHILDREN_MAX
) {
1867 } else if ((n
+ 1) <= NILFS_BTREE_NODE_NCHILDREN_MAX(
1868 1 << btree
->b_inode
->i_blkbits
)) {
1877 ret
= nilfs_btree_prepare_convert_and_insert(btree
, key
, di
, ni
, &bh
,
1881 nilfs_btree_commit_convert_and_insert(btree
, key
, ptr
, keys
, ptrs
, n
,
1883 nilfs_inode_add_blocks(btree
->b_inode
, stats
.bs_nblocks
);
1887 static int nilfs_btree_propagate_p(struct nilfs_bmap
*btree
,
1888 struct nilfs_btree_path
*path
,
1890 struct buffer_head
*bh
)
1892 while ((++level
< nilfs_btree_height(btree
) - 1) &&
1893 !buffer_dirty(path
[level
].bp_bh
))
1894 mark_buffer_dirty(path
[level
].bp_bh
);
1899 static int nilfs_btree_prepare_update_v(struct nilfs_bmap
*btree
,
1900 struct nilfs_btree_path
*path
,
1901 int level
, struct inode
*dat
)
1903 struct nilfs_btree_node
*parent
;
1906 parent
= nilfs_btree_get_node(btree
, path
, level
+ 1, &ncmax
);
1907 path
[level
].bp_oldreq
.bpr_ptr
=
1908 nilfs_btree_node_get_ptr(parent
, path
[level
+ 1].bp_index
,
1910 path
[level
].bp_newreq
.bpr_ptr
= path
[level
].bp_oldreq
.bpr_ptr
+ 1;
1911 ret
= nilfs_dat_prepare_update(dat
, &path
[level
].bp_oldreq
.bpr_req
,
1912 &path
[level
].bp_newreq
.bpr_req
);
1916 if (buffer_nilfs_node(path
[level
].bp_bh
)) {
1917 path
[level
].bp_ctxt
.oldkey
= path
[level
].bp_oldreq
.bpr_ptr
;
1918 path
[level
].bp_ctxt
.newkey
= path
[level
].bp_newreq
.bpr_ptr
;
1919 path
[level
].bp_ctxt
.bh
= path
[level
].bp_bh
;
1920 ret
= nilfs_btnode_prepare_change_key(
1921 &NILFS_BMAP_I(btree
)->i_btnode_cache
,
1922 &path
[level
].bp_ctxt
);
1924 nilfs_dat_abort_update(dat
,
1925 &path
[level
].bp_oldreq
.bpr_req
,
1926 &path
[level
].bp_newreq
.bpr_req
);
1934 static void nilfs_btree_commit_update_v(struct nilfs_bmap
*btree
,
1935 struct nilfs_btree_path
*path
,
1936 int level
, struct inode
*dat
)
1938 struct nilfs_btree_node
*parent
;
1941 nilfs_dat_commit_update(dat
, &path
[level
].bp_oldreq
.bpr_req
,
1942 &path
[level
].bp_newreq
.bpr_req
,
1943 btree
->b_ptr_type
== NILFS_BMAP_PTR_VS
);
1945 if (buffer_nilfs_node(path
[level
].bp_bh
)) {
1946 nilfs_btnode_commit_change_key(
1947 &NILFS_BMAP_I(btree
)->i_btnode_cache
,
1948 &path
[level
].bp_ctxt
);
1949 path
[level
].bp_bh
= path
[level
].bp_ctxt
.bh
;
1951 set_buffer_nilfs_volatile(path
[level
].bp_bh
);
1953 parent
= nilfs_btree_get_node(btree
, path
, level
+ 1, &ncmax
);
1954 nilfs_btree_node_set_ptr(parent
, path
[level
+ 1].bp_index
,
1955 path
[level
].bp_newreq
.bpr_ptr
, ncmax
);
1958 static void nilfs_btree_abort_update_v(struct nilfs_bmap
*btree
,
1959 struct nilfs_btree_path
*path
,
1960 int level
, struct inode
*dat
)
1962 nilfs_dat_abort_update(dat
, &path
[level
].bp_oldreq
.bpr_req
,
1963 &path
[level
].bp_newreq
.bpr_req
);
1964 if (buffer_nilfs_node(path
[level
].bp_bh
))
1965 nilfs_btnode_abort_change_key(
1966 &NILFS_BMAP_I(btree
)->i_btnode_cache
,
1967 &path
[level
].bp_ctxt
);
1970 static int nilfs_btree_prepare_propagate_v(struct nilfs_bmap
*btree
,
1971 struct nilfs_btree_path
*path
,
1972 int minlevel
, int *maxlevelp
,
1978 if (!buffer_nilfs_volatile(path
[level
].bp_bh
)) {
1979 ret
= nilfs_btree_prepare_update_v(btree
, path
, level
, dat
);
1983 while ((++level
< nilfs_btree_height(btree
) - 1) &&
1984 !buffer_dirty(path
[level
].bp_bh
)) {
1986 WARN_ON(buffer_nilfs_volatile(path
[level
].bp_bh
));
1987 ret
= nilfs_btree_prepare_update_v(btree
, path
, level
, dat
);
1993 *maxlevelp
= level
- 1;
1998 while (--level
> minlevel
)
1999 nilfs_btree_abort_update_v(btree
, path
, level
, dat
);
2000 if (!buffer_nilfs_volatile(path
[level
].bp_bh
))
2001 nilfs_btree_abort_update_v(btree
, path
, level
, dat
);
2005 static void nilfs_btree_commit_propagate_v(struct nilfs_bmap
*btree
,
2006 struct nilfs_btree_path
*path
,
2007 int minlevel
, int maxlevel
,
2008 struct buffer_head
*bh
,
2013 if (!buffer_nilfs_volatile(path
[minlevel
].bp_bh
))
2014 nilfs_btree_commit_update_v(btree
, path
, minlevel
, dat
);
2016 for (level
= minlevel
+ 1; level
<= maxlevel
; level
++)
2017 nilfs_btree_commit_update_v(btree
, path
, level
, dat
);
2020 static int nilfs_btree_propagate_v(struct nilfs_bmap
*btree
,
2021 struct nilfs_btree_path
*path
,
2022 int level
, struct buffer_head
*bh
)
2024 int maxlevel
= 0, ret
;
2025 struct nilfs_btree_node
*parent
;
2026 struct inode
*dat
= nilfs_bmap_get_dat(btree
);
2031 path
[level
].bp_bh
= bh
;
2032 ret
= nilfs_btree_prepare_propagate_v(btree
, path
, level
, &maxlevel
,
2037 if (buffer_nilfs_volatile(path
[level
].bp_bh
)) {
2038 parent
= nilfs_btree_get_node(btree
, path
, level
+ 1, &ncmax
);
2039 ptr
= nilfs_btree_node_get_ptr(parent
,
2040 path
[level
+ 1].bp_index
,
2042 ret
= nilfs_dat_mark_dirty(dat
, ptr
);
2047 nilfs_btree_commit_propagate_v(btree
, path
, level
, maxlevel
, bh
, dat
);
2050 brelse(path
[level
].bp_bh
);
2051 path
[level
].bp_bh
= NULL
;
2055 static int nilfs_btree_propagate(struct nilfs_bmap
*btree
,
2056 struct buffer_head
*bh
)
2058 struct nilfs_btree_path
*path
;
2059 struct nilfs_btree_node
*node
;
2063 WARN_ON(!buffer_dirty(bh
));
2065 path
= nilfs_btree_alloc_path();
2069 if (buffer_nilfs_node(bh
)) {
2070 node
= (struct nilfs_btree_node
*)bh
->b_data
;
2071 key
= nilfs_btree_node_get_key(node
, 0);
2072 level
= nilfs_btree_node_get_level(node
);
2074 key
= nilfs_bmap_data_get_key(btree
, bh
);
2075 level
= NILFS_BTREE_LEVEL_DATA
;
2078 ret
= nilfs_btree_do_lookup(btree
, path
, key
, NULL
, level
+ 1, 0);
2080 if (unlikely(ret
== -ENOENT
))
2081 printk(KERN_CRIT
"%s: key = %llu, level == %d\n",
2082 __func__
, (unsigned long long)key
, level
);
2086 ret
= NILFS_BMAP_USE_VBN(btree
) ?
2087 nilfs_btree_propagate_v(btree
, path
, level
, bh
) :
2088 nilfs_btree_propagate_p(btree
, path
, level
, bh
);
2091 nilfs_btree_free_path(path
);
2096 static int nilfs_btree_propagate_gc(struct nilfs_bmap
*btree
,
2097 struct buffer_head
*bh
)
2099 return nilfs_dat_mark_dirty(nilfs_bmap_get_dat(btree
), bh
->b_blocknr
);
2102 static void nilfs_btree_add_dirty_buffer(struct nilfs_bmap
*btree
,
2103 struct list_head
*lists
,
2104 struct buffer_head
*bh
)
2106 struct list_head
*head
;
2107 struct buffer_head
*cbh
;
2108 struct nilfs_btree_node
*node
, *cnode
;
2113 node
= (struct nilfs_btree_node
*)bh
->b_data
;
2114 key
= nilfs_btree_node_get_key(node
, 0);
2115 level
= nilfs_btree_node_get_level(node
);
2116 if (level
< NILFS_BTREE_LEVEL_NODE_MIN
||
2117 level
>= NILFS_BTREE_LEVEL_MAX
) {
2120 "%s: invalid btree level: %d (key=%llu, ino=%lu, "
2122 __func__
, level
, (unsigned long long)key
,
2123 NILFS_BMAP_I(btree
)->vfs_inode
.i_ino
,
2124 (unsigned long long)bh
->b_blocknr
);
2128 list_for_each(head
, &lists
[level
]) {
2129 cbh
= list_entry(head
, struct buffer_head
, b_assoc_buffers
);
2130 cnode
= (struct nilfs_btree_node
*)cbh
->b_data
;
2131 ckey
= nilfs_btree_node_get_key(cnode
, 0);
2135 list_add_tail(&bh
->b_assoc_buffers
, head
);
2138 static void nilfs_btree_lookup_dirty_buffers(struct nilfs_bmap
*btree
,
2139 struct list_head
*listp
)
2141 struct address_space
*btcache
= &NILFS_BMAP_I(btree
)->i_btnode_cache
;
2142 struct list_head lists
[NILFS_BTREE_LEVEL_MAX
];
2143 struct pagevec pvec
;
2144 struct buffer_head
*bh
, *head
;
2148 for (level
= NILFS_BTREE_LEVEL_NODE_MIN
;
2149 level
< NILFS_BTREE_LEVEL_MAX
;
2151 INIT_LIST_HEAD(&lists
[level
]);
2153 pagevec_init(&pvec
, 0);
2155 while (pagevec_lookup_tag(&pvec
, btcache
, &index
, PAGECACHE_TAG_DIRTY
,
2157 for (i
= 0; i
< pagevec_count(&pvec
); i
++) {
2158 bh
= head
= page_buffers(pvec
.pages
[i
]);
2160 if (buffer_dirty(bh
))
2161 nilfs_btree_add_dirty_buffer(btree
,
2163 } while ((bh
= bh
->b_this_page
) != head
);
2165 pagevec_release(&pvec
);
2169 for (level
= NILFS_BTREE_LEVEL_NODE_MIN
;
2170 level
< NILFS_BTREE_LEVEL_MAX
;
2172 list_splice_tail(&lists
[level
], listp
);
2175 static int nilfs_btree_assign_p(struct nilfs_bmap
*btree
,
2176 struct nilfs_btree_path
*path
,
2178 struct buffer_head
**bh
,
2180 union nilfs_binfo
*binfo
)
2182 struct nilfs_btree_node
*parent
;
2187 parent
= nilfs_btree_get_node(btree
, path
, level
+ 1, &ncmax
);
2188 ptr
= nilfs_btree_node_get_ptr(parent
, path
[level
+ 1].bp_index
,
2190 if (buffer_nilfs_node(*bh
)) {
2191 path
[level
].bp_ctxt
.oldkey
= ptr
;
2192 path
[level
].bp_ctxt
.newkey
= blocknr
;
2193 path
[level
].bp_ctxt
.bh
= *bh
;
2194 ret
= nilfs_btnode_prepare_change_key(
2195 &NILFS_BMAP_I(btree
)->i_btnode_cache
,
2196 &path
[level
].bp_ctxt
);
2199 nilfs_btnode_commit_change_key(
2200 &NILFS_BMAP_I(btree
)->i_btnode_cache
,
2201 &path
[level
].bp_ctxt
);
2202 *bh
= path
[level
].bp_ctxt
.bh
;
2205 nilfs_btree_node_set_ptr(parent
, path
[level
+ 1].bp_index
, blocknr
,
2208 key
= nilfs_btree_node_get_key(parent
, path
[level
+ 1].bp_index
);
2209 /* on-disk format */
2210 binfo
->bi_dat
.bi_blkoff
= cpu_to_le64(key
);
2211 binfo
->bi_dat
.bi_level
= level
;
2216 static int nilfs_btree_assign_v(struct nilfs_bmap
*btree
,
2217 struct nilfs_btree_path
*path
,
2219 struct buffer_head
**bh
,
2221 union nilfs_binfo
*binfo
)
2223 struct nilfs_btree_node
*parent
;
2224 struct inode
*dat
= nilfs_bmap_get_dat(btree
);
2227 union nilfs_bmap_ptr_req req
;
2230 parent
= nilfs_btree_get_node(btree
, path
, level
+ 1, &ncmax
);
2231 ptr
= nilfs_btree_node_get_ptr(parent
, path
[level
+ 1].bp_index
,
2234 ret
= nilfs_dat_prepare_start(dat
, &req
.bpr_req
);
2237 nilfs_dat_commit_start(dat
, &req
.bpr_req
, blocknr
);
2239 key
= nilfs_btree_node_get_key(parent
, path
[level
+ 1].bp_index
);
2240 /* on-disk format */
2241 binfo
->bi_v
.bi_vblocknr
= cpu_to_le64(ptr
);
2242 binfo
->bi_v
.bi_blkoff
= cpu_to_le64(key
);
2247 static int nilfs_btree_assign(struct nilfs_bmap
*btree
,
2248 struct buffer_head
**bh
,
2250 union nilfs_binfo
*binfo
)
2252 struct nilfs_btree_path
*path
;
2253 struct nilfs_btree_node
*node
;
2257 path
= nilfs_btree_alloc_path();
2261 if (buffer_nilfs_node(*bh
)) {
2262 node
= (struct nilfs_btree_node
*)(*bh
)->b_data
;
2263 key
= nilfs_btree_node_get_key(node
, 0);
2264 level
= nilfs_btree_node_get_level(node
);
2266 key
= nilfs_bmap_data_get_key(btree
, *bh
);
2267 level
= NILFS_BTREE_LEVEL_DATA
;
2270 ret
= nilfs_btree_do_lookup(btree
, path
, key
, NULL
, level
+ 1, 0);
2272 WARN_ON(ret
== -ENOENT
);
2276 ret
= NILFS_BMAP_USE_VBN(btree
) ?
2277 nilfs_btree_assign_v(btree
, path
, level
, bh
, blocknr
, binfo
) :
2278 nilfs_btree_assign_p(btree
, path
, level
, bh
, blocknr
, binfo
);
2281 nilfs_btree_free_path(path
);
2286 static int nilfs_btree_assign_gc(struct nilfs_bmap
*btree
,
2287 struct buffer_head
**bh
,
2289 union nilfs_binfo
*binfo
)
2291 struct nilfs_btree_node
*node
;
2295 ret
= nilfs_dat_move(nilfs_bmap_get_dat(btree
), (*bh
)->b_blocknr
,
2300 if (buffer_nilfs_node(*bh
)) {
2301 node
= (struct nilfs_btree_node
*)(*bh
)->b_data
;
2302 key
= nilfs_btree_node_get_key(node
, 0);
2304 key
= nilfs_bmap_data_get_key(btree
, *bh
);
2306 /* on-disk format */
2307 binfo
->bi_v
.bi_vblocknr
= cpu_to_le64((*bh
)->b_blocknr
);
2308 binfo
->bi_v
.bi_blkoff
= cpu_to_le64(key
);
2313 static int nilfs_btree_mark(struct nilfs_bmap
*btree
, __u64 key
, int level
)
2315 struct buffer_head
*bh
;
2316 struct nilfs_btree_path
*path
;
2320 path
= nilfs_btree_alloc_path();
2324 ret
= nilfs_btree_do_lookup(btree
, path
, key
, &ptr
, level
+ 1, 0);
2326 WARN_ON(ret
== -ENOENT
);
2329 ret
= nilfs_btree_get_block(btree
, ptr
, &bh
);
2331 WARN_ON(ret
== -ENOENT
);
2335 if (!buffer_dirty(bh
))
2336 mark_buffer_dirty(bh
);
2338 if (!nilfs_bmap_dirty(btree
))
2339 nilfs_bmap_set_dirty(btree
);
2342 nilfs_btree_free_path(path
);
2346 static const struct nilfs_bmap_operations nilfs_btree_ops
= {
2347 .bop_lookup
= nilfs_btree_lookup
,
2348 .bop_lookup_contig
= nilfs_btree_lookup_contig
,
2349 .bop_insert
= nilfs_btree_insert
,
2350 .bop_delete
= nilfs_btree_delete
,
2353 .bop_propagate
= nilfs_btree_propagate
,
2355 .bop_lookup_dirty_buffers
= nilfs_btree_lookup_dirty_buffers
,
2357 .bop_assign
= nilfs_btree_assign
,
2358 .bop_mark
= nilfs_btree_mark
,
2360 .bop_seek_key
= nilfs_btree_seek_key
,
2361 .bop_last_key
= nilfs_btree_last_key
,
2363 .bop_check_insert
= NULL
,
2364 .bop_check_delete
= nilfs_btree_check_delete
,
2365 .bop_gather_data
= nilfs_btree_gather_data
,
2368 static const struct nilfs_bmap_operations nilfs_btree_ops_gc
= {
2370 .bop_lookup_contig
= NULL
,
2375 .bop_propagate
= nilfs_btree_propagate_gc
,
2377 .bop_lookup_dirty_buffers
= nilfs_btree_lookup_dirty_buffers
,
2379 .bop_assign
= nilfs_btree_assign_gc
,
2382 .bop_seek_key
= NULL
,
2383 .bop_last_key
= NULL
,
2385 .bop_check_insert
= NULL
,
2386 .bop_check_delete
= NULL
,
2387 .bop_gather_data
= NULL
,
2390 static void __nilfs_btree_init(struct nilfs_bmap
*bmap
)
2392 bmap
->b_ops
= &nilfs_btree_ops
;
2393 bmap
->b_nchildren_per_block
=
2394 NILFS_BTREE_NODE_NCHILDREN_MAX(nilfs_btree_node_size(bmap
));
2397 int nilfs_btree_init(struct nilfs_bmap
*bmap
)
2401 __nilfs_btree_init(bmap
);
2403 if (nilfs_btree_root_broken(nilfs_btree_get_root(bmap
),
2404 bmap
->b_inode
->i_ino
))
2409 void nilfs_btree_init_gc(struct nilfs_bmap
*bmap
)
2411 bmap
->b_ops
= &nilfs_btree_ops_gc
;
2412 bmap
->b_nchildren_per_block
=
2413 NILFS_BTREE_NODE_NCHILDREN_MAX(nilfs_btree_node_size(bmap
));