2 * Copyright (C) 2007,2008 Oracle. All rights reserved.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public
6 * License v2 as published by the Free Software Foundation.
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
13 * You should have received a copy of the GNU General Public
14 * License along with this program; if not, write to the
15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16 * Boston, MA 021110-1307, USA.
19 #include <linux/sched.h>
20 #include <linux/slab.h>
23 #include "transaction.h"
24 #include "print-tree.h"
27 static int split_node(struct btrfs_trans_handle
*trans
, struct btrfs_root
28 *root
, struct btrfs_path
*path
, int level
);
29 static int split_leaf(struct btrfs_trans_handle
*trans
, struct btrfs_root
30 *root
, struct btrfs_key
*ins_key
,
31 struct btrfs_path
*path
, int data_size
, int extend
);
32 static int push_node_left(struct btrfs_trans_handle
*trans
,
33 struct btrfs_root
*root
, struct extent_buffer
*dst
,
34 struct extent_buffer
*src
, int empty
);
35 static int balance_node_right(struct btrfs_trans_handle
*trans
,
36 struct btrfs_root
*root
,
37 struct extent_buffer
*dst_buf
,
38 struct extent_buffer
*src_buf
);
39 static int del_ptr(struct btrfs_trans_handle
*trans
, struct btrfs_root
*root
,
40 struct btrfs_path
*path
, int level
, int slot
);
42 struct btrfs_path
*btrfs_alloc_path(void)
44 struct btrfs_path
*path
;
45 path
= kmem_cache_zalloc(btrfs_path_cachep
, GFP_NOFS
);
50 * set all locked nodes in the path to blocking locks. This should
51 * be done before scheduling
53 noinline
void btrfs_set_path_blocking(struct btrfs_path
*p
)
56 for (i
= 0; i
< BTRFS_MAX_LEVEL
; i
++) {
57 if (!p
->nodes
[i
] || !p
->locks
[i
])
59 btrfs_set_lock_blocking_rw(p
->nodes
[i
], p
->locks
[i
]);
60 if (p
->locks
[i
] == BTRFS_READ_LOCK
)
61 p
->locks
[i
] = BTRFS_READ_LOCK_BLOCKING
;
62 else if (p
->locks
[i
] == BTRFS_WRITE_LOCK
)
63 p
->locks
[i
] = BTRFS_WRITE_LOCK_BLOCKING
;
68 * reset all the locked nodes in the patch to spinning locks.
70 * held is used to keep lockdep happy, when lockdep is enabled
71 * we set held to a blocking lock before we go around and
72 * retake all the spinlocks in the path. You can safely use NULL
75 noinline
void btrfs_clear_path_blocking(struct btrfs_path
*p
,
76 struct extent_buffer
*held
, int held_rw
)
80 #ifdef CONFIG_DEBUG_LOCK_ALLOC
81 /* lockdep really cares that we take all of these spinlocks
82 * in the right order. If any of the locks in the path are not
83 * currently blocking, it is going to complain. So, make really
84 * really sure by forcing the path to blocking before we clear
88 btrfs_set_lock_blocking_rw(held
, held_rw
);
89 if (held_rw
== BTRFS_WRITE_LOCK
)
90 held_rw
= BTRFS_WRITE_LOCK_BLOCKING
;
91 else if (held_rw
== BTRFS_READ_LOCK
)
92 held_rw
= BTRFS_READ_LOCK_BLOCKING
;
94 btrfs_set_path_blocking(p
);
97 for (i
= BTRFS_MAX_LEVEL
- 1; i
>= 0; i
--) {
98 if (p
->nodes
[i
] && p
->locks
[i
]) {
99 btrfs_clear_lock_blocking_rw(p
->nodes
[i
], p
->locks
[i
]);
100 if (p
->locks
[i
] == BTRFS_WRITE_LOCK_BLOCKING
)
101 p
->locks
[i
] = BTRFS_WRITE_LOCK
;
102 else if (p
->locks
[i
] == BTRFS_READ_LOCK_BLOCKING
)
103 p
->locks
[i
] = BTRFS_READ_LOCK
;
107 #ifdef CONFIG_DEBUG_LOCK_ALLOC
109 btrfs_clear_lock_blocking_rw(held
, held_rw
);
113 /* this also releases the path */
114 void btrfs_free_path(struct btrfs_path
*p
)
118 btrfs_release_path(p
);
119 kmem_cache_free(btrfs_path_cachep
, p
);
123 * path release drops references on the extent buffers in the path
124 * and it drops any locks held by this path
126 * It is safe to call this on paths that no locks or extent buffers held.
128 noinline
void btrfs_release_path(struct btrfs_path
*p
)
132 for (i
= 0; i
< BTRFS_MAX_LEVEL
; i
++) {
137 btrfs_tree_unlock_rw(p
->nodes
[i
], p
->locks
[i
]);
140 free_extent_buffer(p
->nodes
[i
]);
146 * safely gets a reference on the root node of a tree. A lock
147 * is not taken, so a concurrent writer may put a different node
148 * at the root of the tree. See btrfs_lock_root_node for the
151 * The extent buffer returned by this has a reference taken, so
152 * it won't disappear. It may stop being the root of the tree
153 * at any time because there are no locks held.
155 struct extent_buffer
*btrfs_root_node(struct btrfs_root
*root
)
157 struct extent_buffer
*eb
;
160 eb
= rcu_dereference(root
->node
);
161 extent_buffer_get(eb
);
166 /* loop around taking references on and locking the root node of the
167 * tree until you end up with a lock on the root. A locked buffer
168 * is returned, with a reference held.
170 struct extent_buffer
*btrfs_lock_root_node(struct btrfs_root
*root
)
172 struct extent_buffer
*eb
;
175 eb
= btrfs_root_node(root
);
177 if (eb
== root
->node
)
179 btrfs_tree_unlock(eb
);
180 free_extent_buffer(eb
);
185 /* loop around taking references on and locking the root node of the
186 * tree until you end up with a lock on the root. A locked buffer
187 * is returned, with a reference held.
189 struct extent_buffer
*btrfs_read_lock_root_node(struct btrfs_root
*root
)
191 struct extent_buffer
*eb
;
194 eb
= btrfs_root_node(root
);
195 btrfs_tree_read_lock(eb
);
196 if (eb
== root
->node
)
198 btrfs_tree_read_unlock(eb
);
199 free_extent_buffer(eb
);
204 /* cowonly root (everything not a reference counted cow subvolume), just get
205 * put onto a simple dirty list. transaction.c walks this to make sure they
206 * get properly updated on disk.
208 static void add_root_to_dirty_list(struct btrfs_root
*root
)
210 if (root
->track_dirty
&& list_empty(&root
->dirty_list
)) {
211 list_add(&root
->dirty_list
,
212 &root
->fs_info
->dirty_cowonly_roots
);
217 * used by snapshot creation to make a copy of a root for a tree with
218 * a given objectid. The buffer with the new root node is returned in
219 * cow_ret, and this func returns zero on success or a negative error code.
221 int btrfs_copy_root(struct btrfs_trans_handle
*trans
,
222 struct btrfs_root
*root
,
223 struct extent_buffer
*buf
,
224 struct extent_buffer
**cow_ret
, u64 new_root_objectid
)
226 struct extent_buffer
*cow
;
229 struct btrfs_disk_key disk_key
;
231 WARN_ON(root
->ref_cows
&& trans
->transid
!=
232 root
->fs_info
->running_transaction
->transid
);
233 WARN_ON(root
->ref_cows
&& trans
->transid
!= root
->last_trans
);
235 level
= btrfs_header_level(buf
);
237 btrfs_item_key(buf
, &disk_key
, 0);
239 btrfs_node_key(buf
, &disk_key
, 0);
241 cow
= btrfs_alloc_free_block(trans
, root
, buf
->len
, 0,
242 new_root_objectid
, &disk_key
, level
,
247 copy_extent_buffer(cow
, buf
, 0, 0, cow
->len
);
248 btrfs_set_header_bytenr(cow
, cow
->start
);
249 btrfs_set_header_generation(cow
, trans
->transid
);
250 btrfs_set_header_backref_rev(cow
, BTRFS_MIXED_BACKREF_REV
);
251 btrfs_clear_header_flag(cow
, BTRFS_HEADER_FLAG_WRITTEN
|
252 BTRFS_HEADER_FLAG_RELOC
);
253 if (new_root_objectid
== BTRFS_TREE_RELOC_OBJECTID
)
254 btrfs_set_header_flag(cow
, BTRFS_HEADER_FLAG_RELOC
);
256 btrfs_set_header_owner(cow
, new_root_objectid
);
258 write_extent_buffer(cow
, root
->fs_info
->fsid
,
259 (unsigned long)btrfs_header_fsid(cow
),
262 WARN_ON(btrfs_header_generation(buf
) > trans
->transid
);
263 if (new_root_objectid
== BTRFS_TREE_RELOC_OBJECTID
)
264 ret
= btrfs_inc_ref(trans
, root
, cow
, 1, 1);
266 ret
= btrfs_inc_ref(trans
, root
, cow
, 0, 1);
271 btrfs_mark_buffer_dirty(cow
);
277 * check if the tree block can be shared by multiple trees
279 int btrfs_block_can_be_shared(struct btrfs_root
*root
,
280 struct extent_buffer
*buf
)
283 * Tree blocks not in refernece counted trees and tree roots
284 * are never shared. If a block was allocated after the last
285 * snapshot and the block was not allocated by tree relocation,
286 * we know the block is not shared.
288 if (root
->ref_cows
&&
289 buf
!= root
->node
&& buf
!= root
->commit_root
&&
290 (btrfs_header_generation(buf
) <=
291 btrfs_root_last_snapshot(&root
->root_item
) ||
292 btrfs_header_flag(buf
, BTRFS_HEADER_FLAG_RELOC
)))
294 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
295 if (root
->ref_cows
&&
296 btrfs_header_backref_rev(buf
) < BTRFS_MIXED_BACKREF_REV
)
302 static noinline
int update_ref_for_cow(struct btrfs_trans_handle
*trans
,
303 struct btrfs_root
*root
,
304 struct extent_buffer
*buf
,
305 struct extent_buffer
*cow
,
315 * Backrefs update rules:
317 * Always use full backrefs for extent pointers in tree block
318 * allocated by tree relocation.
320 * If a shared tree block is no longer referenced by its owner
321 * tree (btrfs_header_owner(buf) == root->root_key.objectid),
322 * use full backrefs for extent pointers in tree block.
324 * If a tree block is been relocating
325 * (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID),
326 * use full backrefs for extent pointers in tree block.
327 * The reason for this is some operations (such as drop tree)
328 * are only allowed for blocks use full backrefs.
331 if (btrfs_block_can_be_shared(root
, buf
)) {
332 ret
= btrfs_lookup_extent_info(trans
, root
, buf
->start
,
333 buf
->len
, &refs
, &flags
);
338 if (root
->root_key
.objectid
== BTRFS_TREE_RELOC_OBJECTID
||
339 btrfs_header_backref_rev(buf
) < BTRFS_MIXED_BACKREF_REV
)
340 flags
= BTRFS_BLOCK_FLAG_FULL_BACKREF
;
345 owner
= btrfs_header_owner(buf
);
346 BUG_ON(owner
== BTRFS_TREE_RELOC_OBJECTID
&&
347 !(flags
& BTRFS_BLOCK_FLAG_FULL_BACKREF
));
350 if ((owner
== root
->root_key
.objectid
||
351 root
->root_key
.objectid
== BTRFS_TREE_RELOC_OBJECTID
) &&
352 !(flags
& BTRFS_BLOCK_FLAG_FULL_BACKREF
)) {
353 ret
= btrfs_inc_ref(trans
, root
, buf
, 1, 1);
356 if (root
->root_key
.objectid
==
357 BTRFS_TREE_RELOC_OBJECTID
) {
358 ret
= btrfs_dec_ref(trans
, root
, buf
, 0, 1);
360 ret
= btrfs_inc_ref(trans
, root
, cow
, 1, 1);
363 new_flags
|= BTRFS_BLOCK_FLAG_FULL_BACKREF
;
366 if (root
->root_key
.objectid
==
367 BTRFS_TREE_RELOC_OBJECTID
)
368 ret
= btrfs_inc_ref(trans
, root
, cow
, 1, 1);
370 ret
= btrfs_inc_ref(trans
, root
, cow
, 0, 1);
373 if (new_flags
!= 0) {
374 ret
= btrfs_set_disk_extent_flags(trans
, root
,
381 if (flags
& BTRFS_BLOCK_FLAG_FULL_BACKREF
) {
382 if (root
->root_key
.objectid
==
383 BTRFS_TREE_RELOC_OBJECTID
)
384 ret
= btrfs_inc_ref(trans
, root
, cow
, 1, 1);
386 ret
= btrfs_inc_ref(trans
, root
, cow
, 0, 1);
388 ret
= btrfs_dec_ref(trans
, root
, buf
, 1, 1);
391 clean_tree_block(trans
, root
, buf
);
398 * does the dirty work in cow of a single block. The parent block (if
399 * supplied) is updated to point to the new cow copy. The new buffer is marked
400 * dirty and returned locked. If you modify the block it needs to be marked
403 * search_start -- an allocation hint for the new block
405 * empty_size -- a hint that you plan on doing more cow. This is the size in
406 * bytes the allocator should try to find free next to the block it returns.
407 * This is just a hint and may be ignored by the allocator.
409 static noinline
int __btrfs_cow_block(struct btrfs_trans_handle
*trans
,
410 struct btrfs_root
*root
,
411 struct extent_buffer
*buf
,
412 struct extent_buffer
*parent
, int parent_slot
,
413 struct extent_buffer
**cow_ret
,
414 u64 search_start
, u64 empty_size
)
416 struct btrfs_disk_key disk_key
;
417 struct extent_buffer
*cow
;
426 btrfs_assert_tree_locked(buf
);
428 WARN_ON(root
->ref_cows
&& trans
->transid
!=
429 root
->fs_info
->running_transaction
->transid
);
430 WARN_ON(root
->ref_cows
&& trans
->transid
!= root
->last_trans
);
432 level
= btrfs_header_level(buf
);
435 btrfs_item_key(buf
, &disk_key
, 0);
437 btrfs_node_key(buf
, &disk_key
, 0);
439 if (root
->root_key
.objectid
== BTRFS_TREE_RELOC_OBJECTID
) {
441 parent_start
= parent
->start
;
447 cow
= btrfs_alloc_free_block(trans
, root
, buf
->len
, parent_start
,
448 root
->root_key
.objectid
, &disk_key
,
449 level
, search_start
, empty_size
, 1);
453 /* cow is set to blocking by btrfs_init_new_buffer */
455 copy_extent_buffer(cow
, buf
, 0, 0, cow
->len
);
456 btrfs_set_header_bytenr(cow
, cow
->start
);
457 btrfs_set_header_generation(cow
, trans
->transid
);
458 btrfs_set_header_backref_rev(cow
, BTRFS_MIXED_BACKREF_REV
);
459 btrfs_clear_header_flag(cow
, BTRFS_HEADER_FLAG_WRITTEN
|
460 BTRFS_HEADER_FLAG_RELOC
);
461 if (root
->root_key
.objectid
== BTRFS_TREE_RELOC_OBJECTID
)
462 btrfs_set_header_flag(cow
, BTRFS_HEADER_FLAG_RELOC
);
464 btrfs_set_header_owner(cow
, root
->root_key
.objectid
);
466 write_extent_buffer(cow
, root
->fs_info
->fsid
,
467 (unsigned long)btrfs_header_fsid(cow
),
470 update_ref_for_cow(trans
, root
, buf
, cow
, &last_ref
);
473 btrfs_reloc_cow_block(trans
, root
, buf
, cow
);
475 if (buf
== root
->node
) {
476 WARN_ON(parent
&& parent
!= buf
);
477 if (root
->root_key
.objectid
== BTRFS_TREE_RELOC_OBJECTID
||
478 btrfs_header_backref_rev(buf
) < BTRFS_MIXED_BACKREF_REV
)
479 parent_start
= buf
->start
;
483 extent_buffer_get(cow
);
484 rcu_assign_pointer(root
->node
, cow
);
486 btrfs_free_tree_block(trans
, root
, buf
, parent_start
,
488 free_extent_buffer(buf
);
489 add_root_to_dirty_list(root
);
491 if (root
->root_key
.objectid
== BTRFS_TREE_RELOC_OBJECTID
)
492 parent_start
= parent
->start
;
496 WARN_ON(trans
->transid
!= btrfs_header_generation(parent
));
497 btrfs_set_node_blockptr(parent
, parent_slot
,
499 btrfs_set_node_ptr_generation(parent
, parent_slot
,
501 btrfs_mark_buffer_dirty(parent
);
502 btrfs_free_tree_block(trans
, root
, buf
, parent_start
,
506 btrfs_tree_unlock(buf
);
507 free_extent_buffer(buf
);
508 btrfs_mark_buffer_dirty(cow
);
513 static inline int should_cow_block(struct btrfs_trans_handle
*trans
,
514 struct btrfs_root
*root
,
515 struct extent_buffer
*buf
)
517 /* ensure we can see the force_cow */
521 * We do not need to cow a block if
522 * 1) this block is not created or changed in this transaction;
523 * 2) this block does not belong to TREE_RELOC tree;
524 * 3) the root is not forced COW.
526 * What is forced COW:
527 * when we create snapshot during commiting the transaction,
528 * after we've finished coping src root, we must COW the shared
529 * block to ensure the metadata consistency.
531 if (btrfs_header_generation(buf
) == trans
->transid
&&
532 !btrfs_header_flag(buf
, BTRFS_HEADER_FLAG_WRITTEN
) &&
533 !(root
->root_key
.objectid
!= BTRFS_TREE_RELOC_OBJECTID
&&
534 btrfs_header_flag(buf
, BTRFS_HEADER_FLAG_RELOC
)) &&
541 * cows a single block, see __btrfs_cow_block for the real work.
542 * This version of it has extra checks so that a block isn't cow'd more than
543 * once per transaction, as long as it hasn't been written yet
545 noinline
int btrfs_cow_block(struct btrfs_trans_handle
*trans
,
546 struct btrfs_root
*root
, struct extent_buffer
*buf
,
547 struct extent_buffer
*parent
, int parent_slot
,
548 struct extent_buffer
**cow_ret
)
553 if (trans
->transaction
!= root
->fs_info
->running_transaction
) {
554 printk(KERN_CRIT
"trans %llu running %llu\n",
555 (unsigned long long)trans
->transid
,
557 root
->fs_info
->running_transaction
->transid
);
560 if (trans
->transid
!= root
->fs_info
->generation
) {
561 printk(KERN_CRIT
"trans %llu running %llu\n",
562 (unsigned long long)trans
->transid
,
563 (unsigned long long)root
->fs_info
->generation
);
567 if (!should_cow_block(trans
, root
, buf
)) {
572 search_start
= buf
->start
& ~((u64
)(1024 * 1024 * 1024) - 1);
575 btrfs_set_lock_blocking(parent
);
576 btrfs_set_lock_blocking(buf
);
578 ret
= __btrfs_cow_block(trans
, root
, buf
, parent
,
579 parent_slot
, cow_ret
, search_start
, 0);
581 trace_btrfs_cow_block(root
, buf
, *cow_ret
);
587 * helper function for defrag to decide if two blocks pointed to by a
588 * node are actually close by
590 static int close_blocks(u64 blocknr
, u64 other
, u32 blocksize
)
592 if (blocknr
< other
&& other
- (blocknr
+ blocksize
) < 32768)
594 if (blocknr
> other
&& blocknr
- (other
+ blocksize
) < 32768)
600 * compare two keys in a memcmp fashion
602 static int comp_keys(struct btrfs_disk_key
*disk
, struct btrfs_key
*k2
)
606 btrfs_disk_key_to_cpu(&k1
, disk
);
608 return btrfs_comp_cpu_keys(&k1
, k2
);
612 * same as comp_keys only with two btrfs_key's
614 int btrfs_comp_cpu_keys(struct btrfs_key
*k1
, struct btrfs_key
*k2
)
616 if (k1
->objectid
> k2
->objectid
)
618 if (k1
->objectid
< k2
->objectid
)
620 if (k1
->type
> k2
->type
)
622 if (k1
->type
< k2
->type
)
624 if (k1
->offset
> k2
->offset
)
626 if (k1
->offset
< k2
->offset
)
632 * this is used by the defrag code to go through all the
633 * leaves pointed to by a node and reallocate them so that
634 * disk order is close to key order
636 int btrfs_realloc_node(struct btrfs_trans_handle
*trans
,
637 struct btrfs_root
*root
, struct extent_buffer
*parent
,
638 int start_slot
, int cache_only
, u64
*last_ret
,
639 struct btrfs_key
*progress
)
641 struct extent_buffer
*cur
;
644 u64 search_start
= *last_ret
;
654 int progress_passed
= 0;
655 struct btrfs_disk_key disk_key
;
657 parent_level
= btrfs_header_level(parent
);
658 if (cache_only
&& parent_level
!= 1)
661 if (trans
->transaction
!= root
->fs_info
->running_transaction
)
663 if (trans
->transid
!= root
->fs_info
->generation
)
666 parent_nritems
= btrfs_header_nritems(parent
);
667 blocksize
= btrfs_level_size(root
, parent_level
- 1);
668 end_slot
= parent_nritems
;
670 if (parent_nritems
== 1)
673 btrfs_set_lock_blocking(parent
);
675 for (i
= start_slot
; i
< end_slot
; i
++) {
678 btrfs_node_key(parent
, &disk_key
, i
);
679 if (!progress_passed
&& comp_keys(&disk_key
, progress
) < 0)
683 blocknr
= btrfs_node_blockptr(parent
, i
);
684 gen
= btrfs_node_ptr_generation(parent
, i
);
686 last_block
= blocknr
;
689 other
= btrfs_node_blockptr(parent
, i
- 1);
690 close
= close_blocks(blocknr
, other
, blocksize
);
692 if (!close
&& i
< end_slot
- 2) {
693 other
= btrfs_node_blockptr(parent
, i
+ 1);
694 close
= close_blocks(blocknr
, other
, blocksize
);
697 last_block
= blocknr
;
701 cur
= btrfs_find_tree_block(root
, blocknr
, blocksize
);
703 uptodate
= btrfs_buffer_uptodate(cur
, gen
);
706 if (!cur
|| !uptodate
) {
708 free_extent_buffer(cur
);
712 cur
= read_tree_block(root
, blocknr
,
716 } else if (!uptodate
) {
717 btrfs_read_buffer(cur
, gen
);
720 if (search_start
== 0)
721 search_start
= last_block
;
723 btrfs_tree_lock(cur
);
724 btrfs_set_lock_blocking(cur
);
725 err
= __btrfs_cow_block(trans
, root
, cur
, parent
, i
,
728 (end_slot
- i
) * blocksize
));
730 btrfs_tree_unlock(cur
);
731 free_extent_buffer(cur
);
734 search_start
= cur
->start
;
735 last_block
= cur
->start
;
736 *last_ret
= search_start
;
737 btrfs_tree_unlock(cur
);
738 free_extent_buffer(cur
);
744 * The leaf data grows from end-to-front in the node.
745 * this returns the address of the start of the last item,
746 * which is the stop of the leaf data stack
748 static inline unsigned int leaf_data_end(struct btrfs_root
*root
,
749 struct extent_buffer
*leaf
)
751 u32 nr
= btrfs_header_nritems(leaf
);
753 return BTRFS_LEAF_DATA_SIZE(root
);
754 return btrfs_item_offset_nr(leaf
, nr
- 1);
759 * search for key in the extent_buffer. The items start at offset p,
760 * and they are item_size apart. There are 'max' items in p.
762 * the slot in the array is returned via slot, and it points to
763 * the place where you would insert key if it is not found in
766 * slot may point to max if the key is bigger than all of the keys
768 static noinline
int generic_bin_search(struct extent_buffer
*eb
,
770 int item_size
, struct btrfs_key
*key
,
777 struct btrfs_disk_key
*tmp
= NULL
;
778 struct btrfs_disk_key unaligned
;
779 unsigned long offset
;
781 unsigned long map_start
= 0;
782 unsigned long map_len
= 0;
786 mid
= (low
+ high
) / 2;
787 offset
= p
+ mid
* item_size
;
789 if (!kaddr
|| offset
< map_start
||
790 (offset
+ sizeof(struct btrfs_disk_key
)) >
791 map_start
+ map_len
) {
793 err
= map_private_extent_buffer(eb
, offset
,
794 sizeof(struct btrfs_disk_key
),
795 &kaddr
, &map_start
, &map_len
);
798 tmp
= (struct btrfs_disk_key
*)(kaddr
+ offset
-
801 read_extent_buffer(eb
, &unaligned
,
802 offset
, sizeof(unaligned
));
807 tmp
= (struct btrfs_disk_key
*)(kaddr
+ offset
-
810 ret
= comp_keys(tmp
, key
);
826 * simple bin_search frontend that does the right thing for
829 static int bin_search(struct extent_buffer
*eb
, struct btrfs_key
*key
,
830 int level
, int *slot
)
833 return generic_bin_search(eb
,
834 offsetof(struct btrfs_leaf
, items
),
835 sizeof(struct btrfs_item
),
836 key
, btrfs_header_nritems(eb
),
839 return generic_bin_search(eb
,
840 offsetof(struct btrfs_node
, ptrs
),
841 sizeof(struct btrfs_key_ptr
),
842 key
, btrfs_header_nritems(eb
),
848 int btrfs_bin_search(struct extent_buffer
*eb
, struct btrfs_key
*key
,
849 int level
, int *slot
)
851 return bin_search(eb
, key
, level
, slot
);
854 static void root_add_used(struct btrfs_root
*root
, u32 size
)
856 spin_lock(&root
->accounting_lock
);
857 btrfs_set_root_used(&root
->root_item
,
858 btrfs_root_used(&root
->root_item
) + size
);
859 spin_unlock(&root
->accounting_lock
);
862 static void root_sub_used(struct btrfs_root
*root
, u32 size
)
864 spin_lock(&root
->accounting_lock
);
865 btrfs_set_root_used(&root
->root_item
,
866 btrfs_root_used(&root
->root_item
) - size
);
867 spin_unlock(&root
->accounting_lock
);
870 /* given a node and slot number, this reads the blocks it points to. The
871 * extent buffer is returned with a reference taken (but unlocked).
872 * NULL is returned on error.
874 static noinline
struct extent_buffer
*read_node_slot(struct btrfs_root
*root
,
875 struct extent_buffer
*parent
, int slot
)
877 int level
= btrfs_header_level(parent
);
880 if (slot
>= btrfs_header_nritems(parent
))
885 return read_tree_block(root
, btrfs_node_blockptr(parent
, slot
),
886 btrfs_level_size(root
, level
- 1),
887 btrfs_node_ptr_generation(parent
, slot
));
891 * node level balancing, used to make sure nodes are in proper order for
892 * item deletion. We balance from the top down, so we have to make sure
893 * that a deletion won't leave an node completely empty later on.
895 static noinline
int balance_level(struct btrfs_trans_handle
*trans
,
896 struct btrfs_root
*root
,
897 struct btrfs_path
*path
, int level
)
899 struct extent_buffer
*right
= NULL
;
900 struct extent_buffer
*mid
;
901 struct extent_buffer
*left
= NULL
;
902 struct extent_buffer
*parent
= NULL
;
906 int orig_slot
= path
->slots
[level
];
912 mid
= path
->nodes
[level
];
914 WARN_ON(path
->locks
[level
] != BTRFS_WRITE_LOCK
&&
915 path
->locks
[level
] != BTRFS_WRITE_LOCK_BLOCKING
);
916 WARN_ON(btrfs_header_generation(mid
) != trans
->transid
);
918 orig_ptr
= btrfs_node_blockptr(mid
, orig_slot
);
920 if (level
< BTRFS_MAX_LEVEL
- 1) {
921 parent
= path
->nodes
[level
+ 1];
922 pslot
= path
->slots
[level
+ 1];
926 * deal with the case where there is only one pointer in the root
927 * by promoting the node below to a root
930 struct extent_buffer
*child
;
932 if (btrfs_header_nritems(mid
) != 1)
935 /* promote the child to a root */
936 child
= read_node_slot(root
, mid
, 0);
938 btrfs_tree_lock(child
);
939 btrfs_set_lock_blocking(child
);
940 ret
= btrfs_cow_block(trans
, root
, child
, mid
, 0, &child
);
942 btrfs_tree_unlock(child
);
943 free_extent_buffer(child
);
947 rcu_assign_pointer(root
->node
, child
);
949 add_root_to_dirty_list(root
);
950 btrfs_tree_unlock(child
);
952 path
->locks
[level
] = 0;
953 path
->nodes
[level
] = NULL
;
954 clean_tree_block(trans
, root
, mid
);
955 btrfs_tree_unlock(mid
);
956 /* once for the path */
957 free_extent_buffer(mid
);
959 root_sub_used(root
, mid
->len
);
960 btrfs_free_tree_block(trans
, root
, mid
, 0, 1, 0);
961 /* once for the root ptr */
962 free_extent_buffer(mid
);
965 if (btrfs_header_nritems(mid
) >
966 BTRFS_NODEPTRS_PER_BLOCK(root
) / 4)
969 btrfs_header_nritems(mid
);
971 left
= read_node_slot(root
, parent
, pslot
- 1);
973 btrfs_tree_lock(left
);
974 btrfs_set_lock_blocking(left
);
975 wret
= btrfs_cow_block(trans
, root
, left
,
976 parent
, pslot
- 1, &left
);
982 right
= read_node_slot(root
, parent
, pslot
+ 1);
984 btrfs_tree_lock(right
);
985 btrfs_set_lock_blocking(right
);
986 wret
= btrfs_cow_block(trans
, root
, right
,
987 parent
, pslot
+ 1, &right
);
994 /* first, try to make some room in the middle buffer */
996 orig_slot
+= btrfs_header_nritems(left
);
997 wret
= push_node_left(trans
, root
, left
, mid
, 1);
1000 btrfs_header_nritems(mid
);
1004 * then try to empty the right most buffer into the middle
1007 wret
= push_node_left(trans
, root
, mid
, right
, 1);
1008 if (wret
< 0 && wret
!= -ENOSPC
)
1010 if (btrfs_header_nritems(right
) == 0) {
1011 clean_tree_block(trans
, root
, right
);
1012 btrfs_tree_unlock(right
);
1013 wret
= del_ptr(trans
, root
, path
, level
+ 1, pslot
+
1017 root_sub_used(root
, right
->len
);
1018 btrfs_free_tree_block(trans
, root
, right
, 0, 1, 0);
1019 free_extent_buffer(right
);
1022 struct btrfs_disk_key right_key
;
1023 btrfs_node_key(right
, &right_key
, 0);
1024 btrfs_set_node_key(parent
, &right_key
, pslot
+ 1);
1025 btrfs_mark_buffer_dirty(parent
);
1028 if (btrfs_header_nritems(mid
) == 1) {
1030 * we're not allowed to leave a node with one item in the
1031 * tree during a delete. A deletion from lower in the tree
1032 * could try to delete the only pointer in this node.
1033 * So, pull some keys from the left.
1034 * There has to be a left pointer at this point because
1035 * otherwise we would have pulled some pointers from the
1039 wret
= balance_node_right(trans
, root
, mid
, left
);
1045 wret
= push_node_left(trans
, root
, left
, mid
, 1);
1051 if (btrfs_header_nritems(mid
) == 0) {
1052 clean_tree_block(trans
, root
, mid
);
1053 btrfs_tree_unlock(mid
);
1054 wret
= del_ptr(trans
, root
, path
, level
+ 1, pslot
);
1057 root_sub_used(root
, mid
->len
);
1058 btrfs_free_tree_block(trans
, root
, mid
, 0, 1, 0);
1059 free_extent_buffer(mid
);
1062 /* update the parent key to reflect our changes */
1063 struct btrfs_disk_key mid_key
;
1064 btrfs_node_key(mid
, &mid_key
, 0);
1065 btrfs_set_node_key(parent
, &mid_key
, pslot
);
1066 btrfs_mark_buffer_dirty(parent
);
1069 /* update the path */
1071 if (btrfs_header_nritems(left
) > orig_slot
) {
1072 extent_buffer_get(left
);
1073 /* left was locked after cow */
1074 path
->nodes
[level
] = left
;
1075 path
->slots
[level
+ 1] -= 1;
1076 path
->slots
[level
] = orig_slot
;
1078 btrfs_tree_unlock(mid
);
1079 free_extent_buffer(mid
);
1082 orig_slot
-= btrfs_header_nritems(left
);
1083 path
->slots
[level
] = orig_slot
;
1086 /* double check we haven't messed things up */
1088 btrfs_node_blockptr(path
->nodes
[level
], path
->slots
[level
]))
1092 btrfs_tree_unlock(right
);
1093 free_extent_buffer(right
);
1096 if (path
->nodes
[level
] != left
)
1097 btrfs_tree_unlock(left
);
1098 free_extent_buffer(left
);
1103 /* Node balancing for insertion. Here we only split or push nodes around
1104 * when they are completely full. This is also done top down, so we
1105 * have to be pessimistic.
1107 static noinline
int push_nodes_for_insert(struct btrfs_trans_handle
*trans
,
1108 struct btrfs_root
*root
,
1109 struct btrfs_path
*path
, int level
)
1111 struct extent_buffer
*right
= NULL
;
1112 struct extent_buffer
*mid
;
1113 struct extent_buffer
*left
= NULL
;
1114 struct extent_buffer
*parent
= NULL
;
1118 int orig_slot
= path
->slots
[level
];
1123 mid
= path
->nodes
[level
];
1124 WARN_ON(btrfs_header_generation(mid
) != trans
->transid
);
1126 if (level
< BTRFS_MAX_LEVEL
- 1) {
1127 parent
= path
->nodes
[level
+ 1];
1128 pslot
= path
->slots
[level
+ 1];
1134 left
= read_node_slot(root
, parent
, pslot
- 1);
1136 /* first, try to make some room in the middle buffer */
1140 btrfs_tree_lock(left
);
1141 btrfs_set_lock_blocking(left
);
1143 left_nr
= btrfs_header_nritems(left
);
1144 if (left_nr
>= BTRFS_NODEPTRS_PER_BLOCK(root
) - 1) {
1147 ret
= btrfs_cow_block(trans
, root
, left
, parent
,
1152 wret
= push_node_left(trans
, root
,
1159 struct btrfs_disk_key disk_key
;
1160 orig_slot
+= left_nr
;
1161 btrfs_node_key(mid
, &disk_key
, 0);
1162 btrfs_set_node_key(parent
, &disk_key
, pslot
);
1163 btrfs_mark_buffer_dirty(parent
);
1164 if (btrfs_header_nritems(left
) > orig_slot
) {
1165 path
->nodes
[level
] = left
;
1166 path
->slots
[level
+ 1] -= 1;
1167 path
->slots
[level
] = orig_slot
;
1168 btrfs_tree_unlock(mid
);
1169 free_extent_buffer(mid
);
1172 btrfs_header_nritems(left
);
1173 path
->slots
[level
] = orig_slot
;
1174 btrfs_tree_unlock(left
);
1175 free_extent_buffer(left
);
1179 btrfs_tree_unlock(left
);
1180 free_extent_buffer(left
);
1182 right
= read_node_slot(root
, parent
, pslot
+ 1);
1185 * then try to empty the right most buffer into the middle
1190 btrfs_tree_lock(right
);
1191 btrfs_set_lock_blocking(right
);
1193 right_nr
= btrfs_header_nritems(right
);
1194 if (right_nr
>= BTRFS_NODEPTRS_PER_BLOCK(root
) - 1) {
1197 ret
= btrfs_cow_block(trans
, root
, right
,
1203 wret
= balance_node_right(trans
, root
,
1210 struct btrfs_disk_key disk_key
;
1212 btrfs_node_key(right
, &disk_key
, 0);
1213 btrfs_set_node_key(parent
, &disk_key
, pslot
+ 1);
1214 btrfs_mark_buffer_dirty(parent
);
1216 if (btrfs_header_nritems(mid
) <= orig_slot
) {
1217 path
->nodes
[level
] = right
;
1218 path
->slots
[level
+ 1] += 1;
1219 path
->slots
[level
] = orig_slot
-
1220 btrfs_header_nritems(mid
);
1221 btrfs_tree_unlock(mid
);
1222 free_extent_buffer(mid
);
1224 btrfs_tree_unlock(right
);
1225 free_extent_buffer(right
);
1229 btrfs_tree_unlock(right
);
1230 free_extent_buffer(right
);
1236 * readahead one full node of leaves, finding things that are close
1237 * to the block in 'slot', and triggering ra on them.
1239 static void reada_for_search(struct btrfs_root
*root
,
1240 struct btrfs_path
*path
,
1241 int level
, int slot
, u64 objectid
)
1243 struct extent_buffer
*node
;
1244 struct btrfs_disk_key disk_key
;
1250 int direction
= path
->reada
;
1251 struct extent_buffer
*eb
;
1259 if (!path
->nodes
[level
])
1262 node
= path
->nodes
[level
];
1264 search
= btrfs_node_blockptr(node
, slot
);
1265 blocksize
= btrfs_level_size(root
, level
- 1);
1266 eb
= btrfs_find_tree_block(root
, search
, blocksize
);
1268 free_extent_buffer(eb
);
1274 nritems
= btrfs_header_nritems(node
);
1278 if (direction
< 0) {
1282 } else if (direction
> 0) {
1287 if (path
->reada
< 0 && objectid
) {
1288 btrfs_node_key(node
, &disk_key
, nr
);
1289 if (btrfs_disk_key_objectid(&disk_key
) != objectid
)
1292 search
= btrfs_node_blockptr(node
, nr
);
1293 if ((search
<= target
&& target
- search
<= 65536) ||
1294 (search
> target
&& search
- target
<= 65536)) {
1295 gen
= btrfs_node_ptr_generation(node
, nr
);
1296 readahead_tree_block(root
, search
, blocksize
, gen
);
1300 if ((nread
> 65536 || nscan
> 32))
1306 * returns -EAGAIN if it had to drop the path, or zero if everything was in
1309 static noinline
int reada_for_balance(struct btrfs_root
*root
,
1310 struct btrfs_path
*path
, int level
)
1314 struct extent_buffer
*parent
;
1315 struct extent_buffer
*eb
;
1322 parent
= path
->nodes
[level
+ 1];
1326 nritems
= btrfs_header_nritems(parent
);
1327 slot
= path
->slots
[level
+ 1];
1328 blocksize
= btrfs_level_size(root
, level
);
1331 block1
= btrfs_node_blockptr(parent
, slot
- 1);
1332 gen
= btrfs_node_ptr_generation(parent
, slot
- 1);
1333 eb
= btrfs_find_tree_block(root
, block1
, blocksize
);
1334 if (eb
&& btrfs_buffer_uptodate(eb
, gen
))
1336 free_extent_buffer(eb
);
1338 if (slot
+ 1 < nritems
) {
1339 block2
= btrfs_node_blockptr(parent
, slot
+ 1);
1340 gen
= btrfs_node_ptr_generation(parent
, slot
+ 1);
1341 eb
= btrfs_find_tree_block(root
, block2
, blocksize
);
1342 if (eb
&& btrfs_buffer_uptodate(eb
, gen
))
1344 free_extent_buffer(eb
);
1346 if (block1
|| block2
) {
1349 /* release the whole path */
1350 btrfs_release_path(path
);
1352 /* read the blocks */
1354 readahead_tree_block(root
, block1
, blocksize
, 0);
1356 readahead_tree_block(root
, block2
, blocksize
, 0);
1359 eb
= read_tree_block(root
, block1
, blocksize
, 0);
1360 free_extent_buffer(eb
);
1363 eb
= read_tree_block(root
, block2
, blocksize
, 0);
1364 free_extent_buffer(eb
);
1372 * when we walk down the tree, it is usually safe to unlock the higher layers
1373 * in the tree. The exceptions are when our path goes through slot 0, because
1374 * operations on the tree might require changing key pointers higher up in the
1377 * callers might also have set path->keep_locks, which tells this code to keep
1378 * the lock if the path points to the last slot in the block. This is part of
1379 * walking through the tree, and selecting the next slot in the higher block.
1381 * lowest_unlock sets the lowest level in the tree we're allowed to unlock. so
1382 * if lowest_unlock is 1, level 0 won't be unlocked
1384 static noinline
void unlock_up(struct btrfs_path
*path
, int level
,
1388 int skip_level
= level
;
1390 struct extent_buffer
*t
;
1392 for (i
= level
; i
< BTRFS_MAX_LEVEL
; i
++) {
1393 if (!path
->nodes
[i
])
1395 if (!path
->locks
[i
])
1397 if (!no_skips
&& path
->slots
[i
] == 0) {
1401 if (!no_skips
&& path
->keep_locks
) {
1404 nritems
= btrfs_header_nritems(t
);
1405 if (nritems
< 1 || path
->slots
[i
] >= nritems
- 1) {
1410 if (skip_level
< i
&& i
>= lowest_unlock
)
1414 if (i
>= lowest_unlock
&& i
> skip_level
&& path
->locks
[i
]) {
1415 btrfs_tree_unlock_rw(t
, path
->locks
[i
]);
1422 * This releases any locks held in the path starting at level and
1423 * going all the way up to the root.
1425 * btrfs_search_slot will keep the lock held on higher nodes in a few
1426 * corner cases, such as COW of the block at slot zero in the node. This
1427 * ignores those rules, and it should only be called when there are no
1428 * more updates to be done higher up in the tree.
1430 noinline
void btrfs_unlock_up_safe(struct btrfs_path
*path
, int level
)
1434 if (path
->keep_locks
)
1437 for (i
= level
; i
< BTRFS_MAX_LEVEL
; i
++) {
1438 if (!path
->nodes
[i
])
1440 if (!path
->locks
[i
])
1442 btrfs_tree_unlock_rw(path
->nodes
[i
], path
->locks
[i
]);
1448 * helper function for btrfs_search_slot. The goal is to find a block
1449 * in cache without setting the path to blocking. If we find the block
1450 * we return zero and the path is unchanged.
1452 * If we can't find the block, we set the path blocking and do some
1453 * reada. -EAGAIN is returned and the search must be repeated.
1456 read_block_for_search(struct btrfs_trans_handle
*trans
,
1457 struct btrfs_root
*root
, struct btrfs_path
*p
,
1458 struct extent_buffer
**eb_ret
, int level
, int slot
,
1459 struct btrfs_key
*key
)
1464 struct extent_buffer
*b
= *eb_ret
;
1465 struct extent_buffer
*tmp
;
1468 blocknr
= btrfs_node_blockptr(b
, slot
);
1469 gen
= btrfs_node_ptr_generation(b
, slot
);
1470 blocksize
= btrfs_level_size(root
, level
- 1);
1472 tmp
= btrfs_find_tree_block(root
, blocknr
, blocksize
);
1474 if (btrfs_buffer_uptodate(tmp
, 0)) {
1475 if (btrfs_buffer_uptodate(tmp
, gen
)) {
1477 * we found an up to date block without
1484 /* the pages were up to date, but we failed
1485 * the generation number check. Do a full
1486 * read for the generation number that is correct.
1487 * We must do this without dropping locks so
1488 * we can trust our generation number
1490 free_extent_buffer(tmp
);
1491 btrfs_set_path_blocking(p
);
1493 tmp
= read_tree_block(root
, blocknr
, blocksize
, gen
);
1494 if (tmp
&& btrfs_buffer_uptodate(tmp
, gen
)) {
1498 free_extent_buffer(tmp
);
1499 btrfs_release_path(p
);
1505 * reduce lock contention at high levels
1506 * of the btree by dropping locks before
1507 * we read. Don't release the lock on the current
1508 * level because we need to walk this node to figure
1509 * out which blocks to read.
1511 btrfs_unlock_up_safe(p
, level
+ 1);
1512 btrfs_set_path_blocking(p
);
1514 free_extent_buffer(tmp
);
1516 reada_for_search(root
, p
, level
, slot
, key
->objectid
);
1518 btrfs_release_path(p
);
1521 tmp
= read_tree_block(root
, blocknr
, blocksize
, 0);
1524 * If the read above didn't mark this buffer up to date,
1525 * it will never end up being up to date. Set ret to EIO now
1526 * and give up so that our caller doesn't loop forever
1529 if (!btrfs_buffer_uptodate(tmp
, 0))
1531 free_extent_buffer(tmp
);
1537 * helper function for btrfs_search_slot. This does all of the checks
1538 * for node-level blocks and does any balancing required based on
1541 * If no extra work was required, zero is returned. If we had to
1542 * drop the path, -EAGAIN is returned and btrfs_search_slot must
1546 setup_nodes_for_search(struct btrfs_trans_handle
*trans
,
1547 struct btrfs_root
*root
, struct btrfs_path
*p
,
1548 struct extent_buffer
*b
, int level
, int ins_len
,
1549 int *write_lock_level
)
1552 if ((p
->search_for_split
|| ins_len
> 0) && btrfs_header_nritems(b
) >=
1553 BTRFS_NODEPTRS_PER_BLOCK(root
) - 3) {
1556 if (*write_lock_level
< level
+ 1) {
1557 *write_lock_level
= level
+ 1;
1558 btrfs_release_path(p
);
1562 sret
= reada_for_balance(root
, p
, level
);
1566 btrfs_set_path_blocking(p
);
1567 sret
= split_node(trans
, root
, p
, level
);
1568 btrfs_clear_path_blocking(p
, NULL
, 0);
1575 b
= p
->nodes
[level
];
1576 } else if (ins_len
< 0 && btrfs_header_nritems(b
) <
1577 BTRFS_NODEPTRS_PER_BLOCK(root
) / 2) {
1580 if (*write_lock_level
< level
+ 1) {
1581 *write_lock_level
= level
+ 1;
1582 btrfs_release_path(p
);
1586 sret
= reada_for_balance(root
, p
, level
);
1590 btrfs_set_path_blocking(p
);
1591 sret
= balance_level(trans
, root
, p
, level
);
1592 btrfs_clear_path_blocking(p
, NULL
, 0);
1598 b
= p
->nodes
[level
];
1600 btrfs_release_path(p
);
1603 BUG_ON(btrfs_header_nritems(b
) == 1);
1614 * look for key in the tree. path is filled in with nodes along the way
1615 * if key is found, we return zero and you can find the item in the leaf
1616 * level of the path (level 0)
1618 * If the key isn't found, the path points to the slot where it should
1619 * be inserted, and 1 is returned. If there are other errors during the
1620 * search a negative error number is returned.
1622 * if ins_len > 0, nodes and leaves will be split as we walk down the
1623 * tree. if ins_len < 0, nodes will be merged as we walk down the tree (if
1626 int btrfs_search_slot(struct btrfs_trans_handle
*trans
, struct btrfs_root
1627 *root
, struct btrfs_key
*key
, struct btrfs_path
*p
, int
1630 struct extent_buffer
*b
;
1635 int lowest_unlock
= 1;
1637 /* everything at write_lock_level or lower must be write locked */
1638 int write_lock_level
= 0;
1639 u8 lowest_level
= 0;
1641 lowest_level
= p
->lowest_level
;
1642 WARN_ON(lowest_level
&& ins_len
> 0);
1643 WARN_ON(p
->nodes
[0] != NULL
);
1648 /* when we are removing items, we might have to go up to level
1649 * two as we update tree pointers Make sure we keep write
1650 * for those levels as well
1652 write_lock_level
= 2;
1653 } else if (ins_len
> 0) {
1655 * for inserting items, make sure we have a write lock on
1656 * level 1 so we can update keys
1658 write_lock_level
= 1;
1662 write_lock_level
= -1;
1664 if (cow
&& (p
->keep_locks
|| p
->lowest_level
))
1665 write_lock_level
= BTRFS_MAX_LEVEL
;
1669 * we try very hard to do read locks on the root
1671 root_lock
= BTRFS_READ_LOCK
;
1673 if (p
->search_commit_root
) {
1675 * the commit roots are read only
1676 * so we always do read locks
1678 b
= root
->commit_root
;
1679 extent_buffer_get(b
);
1680 level
= btrfs_header_level(b
);
1681 if (!p
->skip_locking
)
1682 btrfs_tree_read_lock(b
);
1684 if (p
->skip_locking
) {
1685 b
= btrfs_root_node(root
);
1686 level
= btrfs_header_level(b
);
1688 /* we don't know the level of the root node
1689 * until we actually have it read locked
1691 b
= btrfs_read_lock_root_node(root
);
1692 level
= btrfs_header_level(b
);
1693 if (level
<= write_lock_level
) {
1694 /* whoops, must trade for write lock */
1695 btrfs_tree_read_unlock(b
);
1696 free_extent_buffer(b
);
1697 b
= btrfs_lock_root_node(root
);
1698 root_lock
= BTRFS_WRITE_LOCK
;
1700 /* the level might have changed, check again */
1701 level
= btrfs_header_level(b
);
1705 p
->nodes
[level
] = b
;
1706 if (!p
->skip_locking
)
1707 p
->locks
[level
] = root_lock
;
1710 level
= btrfs_header_level(b
);
1713 * setup the path here so we can release it under lock
1714 * contention with the cow code
1718 * if we don't really need to cow this block
1719 * then we don't want to set the path blocking,
1720 * so we test it here
1722 if (!should_cow_block(trans
, root
, b
))
1725 btrfs_set_path_blocking(p
);
1728 * must have write locks on this node and the
1731 if (level
+ 1 > write_lock_level
) {
1732 write_lock_level
= level
+ 1;
1733 btrfs_release_path(p
);
1737 err
= btrfs_cow_block(trans
, root
, b
,
1738 p
->nodes
[level
+ 1],
1739 p
->slots
[level
+ 1], &b
);
1746 BUG_ON(!cow
&& ins_len
);
1748 p
->nodes
[level
] = b
;
1749 btrfs_clear_path_blocking(p
, NULL
, 0);
1752 * we have a lock on b and as long as we aren't changing
1753 * the tree, there is no way to for the items in b to change.
1754 * It is safe to drop the lock on our parent before we
1755 * go through the expensive btree search on b.
1757 * If cow is true, then we might be changing slot zero,
1758 * which may require changing the parent. So, we can't
1759 * drop the lock until after we know which slot we're
1763 btrfs_unlock_up_safe(p
, level
+ 1);
1765 ret
= bin_search(b
, key
, level
, &slot
);
1769 if (ret
&& slot
> 0) {
1773 p
->slots
[level
] = slot
;
1774 err
= setup_nodes_for_search(trans
, root
, p
, b
, level
,
1775 ins_len
, &write_lock_level
);
1782 b
= p
->nodes
[level
];
1783 slot
= p
->slots
[level
];
1786 * slot 0 is special, if we change the key
1787 * we have to update the parent pointer
1788 * which means we must have a write lock
1791 if (slot
== 0 && cow
&&
1792 write_lock_level
< level
+ 1) {
1793 write_lock_level
= level
+ 1;
1794 btrfs_release_path(p
);
1798 unlock_up(p
, level
, lowest_unlock
);
1800 if (level
== lowest_level
) {
1806 err
= read_block_for_search(trans
, root
, p
,
1807 &b
, level
, slot
, key
);
1815 if (!p
->skip_locking
) {
1816 level
= btrfs_header_level(b
);
1817 if (level
<= write_lock_level
) {
1818 err
= btrfs_try_tree_write_lock(b
);
1820 btrfs_set_path_blocking(p
);
1822 btrfs_clear_path_blocking(p
, b
,
1825 p
->locks
[level
] = BTRFS_WRITE_LOCK
;
1827 err
= btrfs_try_tree_read_lock(b
);
1829 btrfs_set_path_blocking(p
);
1830 btrfs_tree_read_lock(b
);
1831 btrfs_clear_path_blocking(p
, b
,
1834 p
->locks
[level
] = BTRFS_READ_LOCK
;
1836 p
->nodes
[level
] = b
;
1839 p
->slots
[level
] = slot
;
1841 btrfs_leaf_free_space(root
, b
) < ins_len
) {
1842 if (write_lock_level
< 1) {
1843 write_lock_level
= 1;
1844 btrfs_release_path(p
);
1848 btrfs_set_path_blocking(p
);
1849 err
= split_leaf(trans
, root
, key
,
1850 p
, ins_len
, ret
== 0);
1851 btrfs_clear_path_blocking(p
, NULL
, 0);
1859 if (!p
->search_for_split
)
1860 unlock_up(p
, level
, lowest_unlock
);
1867 * we don't really know what they plan on doing with the path
1868 * from here on, so for now just mark it as blocking
1870 if (!p
->leave_spinning
)
1871 btrfs_set_path_blocking(p
);
1873 btrfs_release_path(p
);
1878 * adjust the pointers going up the tree, starting at level
1879 * making sure the right key of each node is points to 'key'.
1880 * This is used after shifting pointers to the left, so it stops
1881 * fixing up pointers when a given leaf/node is not in slot 0 of the
1884 * If this fails to write a tree block, it returns -1, but continues
1885 * fixing up the blocks in ram so the tree is consistent.
1887 static int fixup_low_keys(struct btrfs_trans_handle
*trans
,
1888 struct btrfs_root
*root
, struct btrfs_path
*path
,
1889 struct btrfs_disk_key
*key
, int level
)
1893 struct extent_buffer
*t
;
1895 for (i
= level
; i
< BTRFS_MAX_LEVEL
; i
++) {
1896 int tslot
= path
->slots
[i
];
1897 if (!path
->nodes
[i
])
1900 btrfs_set_node_key(t
, key
, tslot
);
1901 btrfs_mark_buffer_dirty(path
->nodes
[i
]);
1911 * This function isn't completely safe. It's the caller's responsibility
1912 * that the new key won't break the order
1914 int btrfs_set_item_key_safe(struct btrfs_trans_handle
*trans
,
1915 struct btrfs_root
*root
, struct btrfs_path
*path
,
1916 struct btrfs_key
*new_key
)
1918 struct btrfs_disk_key disk_key
;
1919 struct extent_buffer
*eb
;
1922 eb
= path
->nodes
[0];
1923 slot
= path
->slots
[0];
1925 btrfs_item_key(eb
, &disk_key
, slot
- 1);
1926 if (comp_keys(&disk_key
, new_key
) >= 0)
1929 if (slot
< btrfs_header_nritems(eb
) - 1) {
1930 btrfs_item_key(eb
, &disk_key
, slot
+ 1);
1931 if (comp_keys(&disk_key
, new_key
) <= 0)
1935 btrfs_cpu_key_to_disk(&disk_key
, new_key
);
1936 btrfs_set_item_key(eb
, &disk_key
, slot
);
1937 btrfs_mark_buffer_dirty(eb
);
1939 fixup_low_keys(trans
, root
, path
, &disk_key
, 1);
1944 * try to push data from one node into the next node left in the
1947 * returns 0 if some ptrs were pushed left, < 0 if there was some horrible
1948 * error, and > 0 if there was no room in the left hand block.
1950 static int push_node_left(struct btrfs_trans_handle
*trans
,
1951 struct btrfs_root
*root
, struct extent_buffer
*dst
,
1952 struct extent_buffer
*src
, int empty
)
1959 src_nritems
= btrfs_header_nritems(src
);
1960 dst_nritems
= btrfs_header_nritems(dst
);
1961 push_items
= BTRFS_NODEPTRS_PER_BLOCK(root
) - dst_nritems
;
1962 WARN_ON(btrfs_header_generation(src
) != trans
->transid
);
1963 WARN_ON(btrfs_header_generation(dst
) != trans
->transid
);
1965 if (!empty
&& src_nritems
<= 8)
1968 if (push_items
<= 0)
1972 push_items
= min(src_nritems
, push_items
);
1973 if (push_items
< src_nritems
) {
1974 /* leave at least 8 pointers in the node if
1975 * we aren't going to empty it
1977 if (src_nritems
- push_items
< 8) {
1978 if (push_items
<= 8)
1984 push_items
= min(src_nritems
- 8, push_items
);
1986 copy_extent_buffer(dst
, src
,
1987 btrfs_node_key_ptr_offset(dst_nritems
),
1988 btrfs_node_key_ptr_offset(0),
1989 push_items
* sizeof(struct btrfs_key_ptr
));
1991 if (push_items
< src_nritems
) {
1992 memmove_extent_buffer(src
, btrfs_node_key_ptr_offset(0),
1993 btrfs_node_key_ptr_offset(push_items
),
1994 (src_nritems
- push_items
) *
1995 sizeof(struct btrfs_key_ptr
));
1997 btrfs_set_header_nritems(src
, src_nritems
- push_items
);
1998 btrfs_set_header_nritems(dst
, dst_nritems
+ push_items
);
1999 btrfs_mark_buffer_dirty(src
);
2000 btrfs_mark_buffer_dirty(dst
);
2006 * try to push data from one node into the next node right in the
2009 * returns 0 if some ptrs were pushed, < 0 if there was some horrible
2010 * error, and > 0 if there was no room in the right hand block.
2012 * this will only push up to 1/2 the contents of the left node over
2014 static int balance_node_right(struct btrfs_trans_handle
*trans
,
2015 struct btrfs_root
*root
,
2016 struct extent_buffer
*dst
,
2017 struct extent_buffer
*src
)
2025 WARN_ON(btrfs_header_generation(src
) != trans
->transid
);
2026 WARN_ON(btrfs_header_generation(dst
) != trans
->transid
);
2028 src_nritems
= btrfs_header_nritems(src
);
2029 dst_nritems
= btrfs_header_nritems(dst
);
2030 push_items
= BTRFS_NODEPTRS_PER_BLOCK(root
) - dst_nritems
;
2031 if (push_items
<= 0)
2034 if (src_nritems
< 4)
2037 max_push
= src_nritems
/ 2 + 1;
2038 /* don't try to empty the node */
2039 if (max_push
>= src_nritems
)
2042 if (max_push
< push_items
)
2043 push_items
= max_push
;
2045 memmove_extent_buffer(dst
, btrfs_node_key_ptr_offset(push_items
),
2046 btrfs_node_key_ptr_offset(0),
2048 sizeof(struct btrfs_key_ptr
));
2050 copy_extent_buffer(dst
, src
,
2051 btrfs_node_key_ptr_offset(0),
2052 btrfs_node_key_ptr_offset(src_nritems
- push_items
),
2053 push_items
* sizeof(struct btrfs_key_ptr
));
2055 btrfs_set_header_nritems(src
, src_nritems
- push_items
);
2056 btrfs_set_header_nritems(dst
, dst_nritems
+ push_items
);
2058 btrfs_mark_buffer_dirty(src
);
2059 btrfs_mark_buffer_dirty(dst
);
2065 * helper function to insert a new root level in the tree.
2066 * A new node is allocated, and a single item is inserted to
2067 * point to the existing root
2069 * returns zero on success or < 0 on failure.
2071 static noinline
int insert_new_root(struct btrfs_trans_handle
*trans
,
2072 struct btrfs_root
*root
,
2073 struct btrfs_path
*path
, int level
)
2076 struct extent_buffer
*lower
;
2077 struct extent_buffer
*c
;
2078 struct extent_buffer
*old
;
2079 struct btrfs_disk_key lower_key
;
2081 BUG_ON(path
->nodes
[level
]);
2082 BUG_ON(path
->nodes
[level
-1] != root
->node
);
2084 lower
= path
->nodes
[level
-1];
2086 btrfs_item_key(lower
, &lower_key
, 0);
2088 btrfs_node_key(lower
, &lower_key
, 0);
2090 c
= btrfs_alloc_free_block(trans
, root
, root
->nodesize
, 0,
2091 root
->root_key
.objectid
, &lower_key
,
2092 level
, root
->node
->start
, 0, 0);
2096 root_add_used(root
, root
->nodesize
);
2098 memset_extent_buffer(c
, 0, 0, sizeof(struct btrfs_header
));
2099 btrfs_set_header_nritems(c
, 1);
2100 btrfs_set_header_level(c
, level
);
2101 btrfs_set_header_bytenr(c
, c
->start
);
2102 btrfs_set_header_generation(c
, trans
->transid
);
2103 btrfs_set_header_backref_rev(c
, BTRFS_MIXED_BACKREF_REV
);
2104 btrfs_set_header_owner(c
, root
->root_key
.objectid
);
2106 write_extent_buffer(c
, root
->fs_info
->fsid
,
2107 (unsigned long)btrfs_header_fsid(c
),
2110 write_extent_buffer(c
, root
->fs_info
->chunk_tree_uuid
,
2111 (unsigned long)btrfs_header_chunk_tree_uuid(c
),
2114 btrfs_set_node_key(c
, &lower_key
, 0);
2115 btrfs_set_node_blockptr(c
, 0, lower
->start
);
2116 lower_gen
= btrfs_header_generation(lower
);
2117 WARN_ON(lower_gen
!= trans
->transid
);
2119 btrfs_set_node_ptr_generation(c
, 0, lower_gen
);
2121 btrfs_mark_buffer_dirty(c
);
2124 rcu_assign_pointer(root
->node
, c
);
2126 /* the super has an extra ref to root->node */
2127 free_extent_buffer(old
);
2129 add_root_to_dirty_list(root
);
2130 extent_buffer_get(c
);
2131 path
->nodes
[level
] = c
;
2132 path
->locks
[level
] = BTRFS_WRITE_LOCK
;
2133 path
->slots
[level
] = 0;
2138 * worker function to insert a single pointer in a node.
2139 * the node should have enough room for the pointer already
2141 * slot and level indicate where you want the key to go, and
2142 * blocknr is the block the key points to.
2144 * returns zero on success and < 0 on any error
2146 static int insert_ptr(struct btrfs_trans_handle
*trans
, struct btrfs_root
2147 *root
, struct btrfs_path
*path
, struct btrfs_disk_key
2148 *key
, u64 bytenr
, int slot
, int level
)
2150 struct extent_buffer
*lower
;
2153 BUG_ON(!path
->nodes
[level
]);
2154 btrfs_assert_tree_locked(path
->nodes
[level
]);
2155 lower
= path
->nodes
[level
];
2156 nritems
= btrfs_header_nritems(lower
);
2157 BUG_ON(slot
> nritems
);
2158 if (nritems
== BTRFS_NODEPTRS_PER_BLOCK(root
))
2160 if (slot
!= nritems
) {
2161 memmove_extent_buffer(lower
,
2162 btrfs_node_key_ptr_offset(slot
+ 1),
2163 btrfs_node_key_ptr_offset(slot
),
2164 (nritems
- slot
) * sizeof(struct btrfs_key_ptr
));
2166 btrfs_set_node_key(lower
, key
, slot
);
2167 btrfs_set_node_blockptr(lower
, slot
, bytenr
);
2168 WARN_ON(trans
->transid
== 0);
2169 btrfs_set_node_ptr_generation(lower
, slot
, trans
->transid
);
2170 btrfs_set_header_nritems(lower
, nritems
+ 1);
2171 btrfs_mark_buffer_dirty(lower
);
2176 * split the node at the specified level in path in two.
2177 * The path is corrected to point to the appropriate node after the split
2179 * Before splitting this tries to make some room in the node by pushing
2180 * left and right, if either one works, it returns right away.
2182 * returns 0 on success and < 0 on failure
2184 static noinline
int split_node(struct btrfs_trans_handle
*trans
,
2185 struct btrfs_root
*root
,
2186 struct btrfs_path
*path
, int level
)
2188 struct extent_buffer
*c
;
2189 struct extent_buffer
*split
;
2190 struct btrfs_disk_key disk_key
;
2196 c
= path
->nodes
[level
];
2197 WARN_ON(btrfs_header_generation(c
) != trans
->transid
);
2198 if (c
== root
->node
) {
2199 /* trying to split the root, lets make a new one */
2200 ret
= insert_new_root(trans
, root
, path
, level
+ 1);
2204 ret
= push_nodes_for_insert(trans
, root
, path
, level
);
2205 c
= path
->nodes
[level
];
2206 if (!ret
&& btrfs_header_nritems(c
) <
2207 BTRFS_NODEPTRS_PER_BLOCK(root
) - 3)
2213 c_nritems
= btrfs_header_nritems(c
);
2214 mid
= (c_nritems
+ 1) / 2;
2215 btrfs_node_key(c
, &disk_key
, mid
);
2217 split
= btrfs_alloc_free_block(trans
, root
, root
->nodesize
, 0,
2218 root
->root_key
.objectid
,
2219 &disk_key
, level
, c
->start
, 0, 0);
2221 return PTR_ERR(split
);
2223 root_add_used(root
, root
->nodesize
);
2225 memset_extent_buffer(split
, 0, 0, sizeof(struct btrfs_header
));
2226 btrfs_set_header_level(split
, btrfs_header_level(c
));
2227 btrfs_set_header_bytenr(split
, split
->start
);
2228 btrfs_set_header_generation(split
, trans
->transid
);
2229 btrfs_set_header_backref_rev(split
, BTRFS_MIXED_BACKREF_REV
);
2230 btrfs_set_header_owner(split
, root
->root_key
.objectid
);
2231 write_extent_buffer(split
, root
->fs_info
->fsid
,
2232 (unsigned long)btrfs_header_fsid(split
),
2234 write_extent_buffer(split
, root
->fs_info
->chunk_tree_uuid
,
2235 (unsigned long)btrfs_header_chunk_tree_uuid(split
),
2239 copy_extent_buffer(split
, c
,
2240 btrfs_node_key_ptr_offset(0),
2241 btrfs_node_key_ptr_offset(mid
),
2242 (c_nritems
- mid
) * sizeof(struct btrfs_key_ptr
));
2243 btrfs_set_header_nritems(split
, c_nritems
- mid
);
2244 btrfs_set_header_nritems(c
, mid
);
2247 btrfs_mark_buffer_dirty(c
);
2248 btrfs_mark_buffer_dirty(split
);
2250 wret
= insert_ptr(trans
, root
, path
, &disk_key
, split
->start
,
2251 path
->slots
[level
+ 1] + 1,
2256 if (path
->slots
[level
] >= mid
) {
2257 path
->slots
[level
] -= mid
;
2258 btrfs_tree_unlock(c
);
2259 free_extent_buffer(c
);
2260 path
->nodes
[level
] = split
;
2261 path
->slots
[level
+ 1] += 1;
2263 btrfs_tree_unlock(split
);
2264 free_extent_buffer(split
);
2270 * how many bytes are required to store the items in a leaf. start
2271 * and nr indicate which items in the leaf to check. This totals up the
2272 * space used both by the item structs and the item data
2274 static int leaf_space_used(struct extent_buffer
*l
, int start
, int nr
)
2277 int nritems
= btrfs_header_nritems(l
);
2278 int end
= min(nritems
, start
+ nr
) - 1;
2282 data_len
= btrfs_item_end_nr(l
, start
);
2283 data_len
= data_len
- btrfs_item_offset_nr(l
, end
);
2284 data_len
+= sizeof(struct btrfs_item
) * nr
;
2285 WARN_ON(data_len
< 0);
2290 * The space between the end of the leaf items and
2291 * the start of the leaf data. IOW, how much room
2292 * the leaf has left for both items and data
2294 noinline
int btrfs_leaf_free_space(struct btrfs_root
*root
,
2295 struct extent_buffer
*leaf
)
2297 int nritems
= btrfs_header_nritems(leaf
);
2299 ret
= BTRFS_LEAF_DATA_SIZE(root
) - leaf_space_used(leaf
, 0, nritems
);
2301 printk(KERN_CRIT
"leaf free space ret %d, leaf data size %lu, "
2302 "used %d nritems %d\n",
2303 ret
, (unsigned long) BTRFS_LEAF_DATA_SIZE(root
),
2304 leaf_space_used(leaf
, 0, nritems
), nritems
);
2310 * min slot controls the lowest index we're willing to push to the
2311 * right. We'll push up to and including min_slot, but no lower
2313 static noinline
int __push_leaf_right(struct btrfs_trans_handle
*trans
,
2314 struct btrfs_root
*root
,
2315 struct btrfs_path
*path
,
2316 int data_size
, int empty
,
2317 struct extent_buffer
*right
,
2318 int free_space
, u32 left_nritems
,
2321 struct extent_buffer
*left
= path
->nodes
[0];
2322 struct extent_buffer
*upper
= path
->nodes
[1];
2323 struct btrfs_disk_key disk_key
;
2328 struct btrfs_item
*item
;
2337 nr
= max_t(u32
, 1, min_slot
);
2339 if (path
->slots
[0] >= left_nritems
)
2340 push_space
+= data_size
;
2342 slot
= path
->slots
[1];
2343 i
= left_nritems
- 1;
2345 item
= btrfs_item_nr(left
, i
);
2347 if (!empty
&& push_items
> 0) {
2348 if (path
->slots
[0] > i
)
2350 if (path
->slots
[0] == i
) {
2351 int space
= btrfs_leaf_free_space(root
, left
);
2352 if (space
+ push_space
* 2 > free_space
)
2357 if (path
->slots
[0] == i
)
2358 push_space
+= data_size
;
2360 this_item_size
= btrfs_item_size(left
, item
);
2361 if (this_item_size
+ sizeof(*item
) + push_space
> free_space
)
2365 push_space
+= this_item_size
+ sizeof(*item
);
2371 if (push_items
== 0)
2374 if (!empty
&& push_items
== left_nritems
)
2377 /* push left to right */
2378 right_nritems
= btrfs_header_nritems(right
);
2380 push_space
= btrfs_item_end_nr(left
, left_nritems
- push_items
);
2381 push_space
-= leaf_data_end(root
, left
);
2383 /* make room in the right data area */
2384 data_end
= leaf_data_end(root
, right
);
2385 memmove_extent_buffer(right
,
2386 btrfs_leaf_data(right
) + data_end
- push_space
,
2387 btrfs_leaf_data(right
) + data_end
,
2388 BTRFS_LEAF_DATA_SIZE(root
) - data_end
);
2390 /* copy from the left data area */
2391 copy_extent_buffer(right
, left
, btrfs_leaf_data(right
) +
2392 BTRFS_LEAF_DATA_SIZE(root
) - push_space
,
2393 btrfs_leaf_data(left
) + leaf_data_end(root
, left
),
2396 memmove_extent_buffer(right
, btrfs_item_nr_offset(push_items
),
2397 btrfs_item_nr_offset(0),
2398 right_nritems
* sizeof(struct btrfs_item
));
2400 /* copy the items from left to right */
2401 copy_extent_buffer(right
, left
, btrfs_item_nr_offset(0),
2402 btrfs_item_nr_offset(left_nritems
- push_items
),
2403 push_items
* sizeof(struct btrfs_item
));
2405 /* update the item pointers */
2406 right_nritems
+= push_items
;
2407 btrfs_set_header_nritems(right
, right_nritems
);
2408 push_space
= BTRFS_LEAF_DATA_SIZE(root
);
2409 for (i
= 0; i
< right_nritems
; i
++) {
2410 item
= btrfs_item_nr(right
, i
);
2411 push_space
-= btrfs_item_size(right
, item
);
2412 btrfs_set_item_offset(right
, item
, push_space
);
2415 left_nritems
-= push_items
;
2416 btrfs_set_header_nritems(left
, left_nritems
);
2419 btrfs_mark_buffer_dirty(left
);
2421 clean_tree_block(trans
, root
, left
);
2423 btrfs_mark_buffer_dirty(right
);
2425 btrfs_item_key(right
, &disk_key
, 0);
2426 btrfs_set_node_key(upper
, &disk_key
, slot
+ 1);
2427 btrfs_mark_buffer_dirty(upper
);
2429 /* then fixup the leaf pointer in the path */
2430 if (path
->slots
[0] >= left_nritems
) {
2431 path
->slots
[0] -= left_nritems
;
2432 if (btrfs_header_nritems(path
->nodes
[0]) == 0)
2433 clean_tree_block(trans
, root
, path
->nodes
[0]);
2434 btrfs_tree_unlock(path
->nodes
[0]);
2435 free_extent_buffer(path
->nodes
[0]);
2436 path
->nodes
[0] = right
;
2437 path
->slots
[1] += 1;
2439 btrfs_tree_unlock(right
);
2440 free_extent_buffer(right
);
2445 btrfs_tree_unlock(right
);
2446 free_extent_buffer(right
);
2451 * push some data in the path leaf to the right, trying to free up at
2452 * least data_size bytes. returns zero if the push worked, nonzero otherwise
2454 * returns 1 if the push failed because the other node didn't have enough
2455 * room, 0 if everything worked out and < 0 if there were major errors.
2457 * this will push starting from min_slot to the end of the leaf. It won't
2458 * push any slot lower than min_slot
2460 static int push_leaf_right(struct btrfs_trans_handle
*trans
, struct btrfs_root
2461 *root
, struct btrfs_path
*path
,
2462 int min_data_size
, int data_size
,
2463 int empty
, u32 min_slot
)
2465 struct extent_buffer
*left
= path
->nodes
[0];
2466 struct extent_buffer
*right
;
2467 struct extent_buffer
*upper
;
2473 if (!path
->nodes
[1])
2476 slot
= path
->slots
[1];
2477 upper
= path
->nodes
[1];
2478 if (slot
>= btrfs_header_nritems(upper
) - 1)
2481 btrfs_assert_tree_locked(path
->nodes
[1]);
2483 right
= read_node_slot(root
, upper
, slot
+ 1);
2487 btrfs_tree_lock(right
);
2488 btrfs_set_lock_blocking(right
);
2490 free_space
= btrfs_leaf_free_space(root
, right
);
2491 if (free_space
< data_size
)
2494 /* cow and double check */
2495 ret
= btrfs_cow_block(trans
, root
, right
, upper
,
2500 free_space
= btrfs_leaf_free_space(root
, right
);
2501 if (free_space
< data_size
)
2504 left_nritems
= btrfs_header_nritems(left
);
2505 if (left_nritems
== 0)
2508 return __push_leaf_right(trans
, root
, path
, min_data_size
, empty
,
2509 right
, free_space
, left_nritems
, min_slot
);
2511 btrfs_tree_unlock(right
);
2512 free_extent_buffer(right
);
2517 * push some data in the path leaf to the left, trying to free up at
2518 * least data_size bytes. returns zero if the push worked, nonzero otherwise
2520 * max_slot can put a limit on how far into the leaf we'll push items. The
2521 * item at 'max_slot' won't be touched. Use (u32)-1 to make us do all the
2524 static noinline
int __push_leaf_left(struct btrfs_trans_handle
*trans
,
2525 struct btrfs_root
*root
,
2526 struct btrfs_path
*path
, int data_size
,
2527 int empty
, struct extent_buffer
*left
,
2528 int free_space
, u32 right_nritems
,
2531 struct btrfs_disk_key disk_key
;
2532 struct extent_buffer
*right
= path
->nodes
[0];
2536 struct btrfs_item
*item
;
2537 u32 old_left_nritems
;
2542 u32 old_left_item_size
;
2545 nr
= min(right_nritems
, max_slot
);
2547 nr
= min(right_nritems
- 1, max_slot
);
2549 for (i
= 0; i
< nr
; i
++) {
2550 item
= btrfs_item_nr(right
, i
);
2552 if (!empty
&& push_items
> 0) {
2553 if (path
->slots
[0] < i
)
2555 if (path
->slots
[0] == i
) {
2556 int space
= btrfs_leaf_free_space(root
, right
);
2557 if (space
+ push_space
* 2 > free_space
)
2562 if (path
->slots
[0] == i
)
2563 push_space
+= data_size
;
2565 this_item_size
= btrfs_item_size(right
, item
);
2566 if (this_item_size
+ sizeof(*item
) + push_space
> free_space
)
2570 push_space
+= this_item_size
+ sizeof(*item
);
2573 if (push_items
== 0) {
2577 if (!empty
&& push_items
== btrfs_header_nritems(right
))
2580 /* push data from right to left */
2581 copy_extent_buffer(left
, right
,
2582 btrfs_item_nr_offset(btrfs_header_nritems(left
)),
2583 btrfs_item_nr_offset(0),
2584 push_items
* sizeof(struct btrfs_item
));
2586 push_space
= BTRFS_LEAF_DATA_SIZE(root
) -
2587 btrfs_item_offset_nr(right
, push_items
- 1);
2589 copy_extent_buffer(left
, right
, btrfs_leaf_data(left
) +
2590 leaf_data_end(root
, left
) - push_space
,
2591 btrfs_leaf_data(right
) +
2592 btrfs_item_offset_nr(right
, push_items
- 1),
2594 old_left_nritems
= btrfs_header_nritems(left
);
2595 BUG_ON(old_left_nritems
<= 0);
2597 old_left_item_size
= btrfs_item_offset_nr(left
, old_left_nritems
- 1);
2598 for (i
= old_left_nritems
; i
< old_left_nritems
+ push_items
; i
++) {
2601 item
= btrfs_item_nr(left
, i
);
2603 ioff
= btrfs_item_offset(left
, item
);
2604 btrfs_set_item_offset(left
, item
,
2605 ioff
- (BTRFS_LEAF_DATA_SIZE(root
) - old_left_item_size
));
2607 btrfs_set_header_nritems(left
, old_left_nritems
+ push_items
);
2609 /* fixup right node */
2610 if (push_items
> right_nritems
) {
2611 printk(KERN_CRIT
"push items %d nr %u\n", push_items
,
2616 if (push_items
< right_nritems
) {
2617 push_space
= btrfs_item_offset_nr(right
, push_items
- 1) -
2618 leaf_data_end(root
, right
);
2619 memmove_extent_buffer(right
, btrfs_leaf_data(right
) +
2620 BTRFS_LEAF_DATA_SIZE(root
) - push_space
,
2621 btrfs_leaf_data(right
) +
2622 leaf_data_end(root
, right
), push_space
);
2624 memmove_extent_buffer(right
, btrfs_item_nr_offset(0),
2625 btrfs_item_nr_offset(push_items
),
2626 (btrfs_header_nritems(right
) - push_items
) *
2627 sizeof(struct btrfs_item
));
2629 right_nritems
-= push_items
;
2630 btrfs_set_header_nritems(right
, right_nritems
);
2631 push_space
= BTRFS_LEAF_DATA_SIZE(root
);
2632 for (i
= 0; i
< right_nritems
; i
++) {
2633 item
= btrfs_item_nr(right
, i
);
2635 push_space
= push_space
- btrfs_item_size(right
, item
);
2636 btrfs_set_item_offset(right
, item
, push_space
);
2639 btrfs_mark_buffer_dirty(left
);
2641 btrfs_mark_buffer_dirty(right
);
2643 clean_tree_block(trans
, root
, right
);
2645 btrfs_item_key(right
, &disk_key
, 0);
2646 wret
= fixup_low_keys(trans
, root
, path
, &disk_key
, 1);
2650 /* then fixup the leaf pointer in the path */
2651 if (path
->slots
[0] < push_items
) {
2652 path
->slots
[0] += old_left_nritems
;
2653 btrfs_tree_unlock(path
->nodes
[0]);
2654 free_extent_buffer(path
->nodes
[0]);
2655 path
->nodes
[0] = left
;
2656 path
->slots
[1] -= 1;
2658 btrfs_tree_unlock(left
);
2659 free_extent_buffer(left
);
2660 path
->slots
[0] -= push_items
;
2662 BUG_ON(path
->slots
[0] < 0);
2665 btrfs_tree_unlock(left
);
2666 free_extent_buffer(left
);
2671 * push some data in the path leaf to the left, trying to free up at
2672 * least data_size bytes. returns zero if the push worked, nonzero otherwise
2674 * max_slot can put a limit on how far into the leaf we'll push items. The
2675 * item at 'max_slot' won't be touched. Use (u32)-1 to make us push all the
2678 static int push_leaf_left(struct btrfs_trans_handle
*trans
, struct btrfs_root
2679 *root
, struct btrfs_path
*path
, int min_data_size
,
2680 int data_size
, int empty
, u32 max_slot
)
2682 struct extent_buffer
*right
= path
->nodes
[0];
2683 struct extent_buffer
*left
;
2689 slot
= path
->slots
[1];
2692 if (!path
->nodes
[1])
2695 right_nritems
= btrfs_header_nritems(right
);
2696 if (right_nritems
== 0)
2699 btrfs_assert_tree_locked(path
->nodes
[1]);
2701 left
= read_node_slot(root
, path
->nodes
[1], slot
- 1);
2705 btrfs_tree_lock(left
);
2706 btrfs_set_lock_blocking(left
);
2708 free_space
= btrfs_leaf_free_space(root
, left
);
2709 if (free_space
< data_size
) {
2714 /* cow and double check */
2715 ret
= btrfs_cow_block(trans
, root
, left
,
2716 path
->nodes
[1], slot
- 1, &left
);
2718 /* we hit -ENOSPC, but it isn't fatal here */
2723 free_space
= btrfs_leaf_free_space(root
, left
);
2724 if (free_space
< data_size
) {
2729 return __push_leaf_left(trans
, root
, path
, min_data_size
,
2730 empty
, left
, free_space
, right_nritems
,
2733 btrfs_tree_unlock(left
);
2734 free_extent_buffer(left
);
2739 * split the path's leaf in two, making sure there is at least data_size
2740 * available for the resulting leaf level of the path.
2742 * returns 0 if all went well and < 0 on failure.
2744 static noinline
int copy_for_split(struct btrfs_trans_handle
*trans
,
2745 struct btrfs_root
*root
,
2746 struct btrfs_path
*path
,
2747 struct extent_buffer
*l
,
2748 struct extent_buffer
*right
,
2749 int slot
, int mid
, int nritems
)
2756 struct btrfs_disk_key disk_key
;
2758 nritems
= nritems
- mid
;
2759 btrfs_set_header_nritems(right
, nritems
);
2760 data_copy_size
= btrfs_item_end_nr(l
, mid
) - leaf_data_end(root
, l
);
2762 copy_extent_buffer(right
, l
, btrfs_item_nr_offset(0),
2763 btrfs_item_nr_offset(mid
),
2764 nritems
* sizeof(struct btrfs_item
));
2766 copy_extent_buffer(right
, l
,
2767 btrfs_leaf_data(right
) + BTRFS_LEAF_DATA_SIZE(root
) -
2768 data_copy_size
, btrfs_leaf_data(l
) +
2769 leaf_data_end(root
, l
), data_copy_size
);
2771 rt_data_off
= BTRFS_LEAF_DATA_SIZE(root
) -
2772 btrfs_item_end_nr(l
, mid
);
2774 for (i
= 0; i
< nritems
; i
++) {
2775 struct btrfs_item
*item
= btrfs_item_nr(right
, i
);
2778 ioff
= btrfs_item_offset(right
, item
);
2779 btrfs_set_item_offset(right
, item
, ioff
+ rt_data_off
);
2782 btrfs_set_header_nritems(l
, mid
);
2784 btrfs_item_key(right
, &disk_key
, 0);
2785 wret
= insert_ptr(trans
, root
, path
, &disk_key
, right
->start
,
2786 path
->slots
[1] + 1, 1);
2790 btrfs_mark_buffer_dirty(right
);
2791 btrfs_mark_buffer_dirty(l
);
2792 BUG_ON(path
->slots
[0] != slot
);
2795 btrfs_tree_unlock(path
->nodes
[0]);
2796 free_extent_buffer(path
->nodes
[0]);
2797 path
->nodes
[0] = right
;
2798 path
->slots
[0] -= mid
;
2799 path
->slots
[1] += 1;
2801 btrfs_tree_unlock(right
);
2802 free_extent_buffer(right
);
2805 BUG_ON(path
->slots
[0] < 0);
2811 * double splits happen when we need to insert a big item in the middle
2812 * of a leaf. A double split can leave us with 3 mostly empty leaves:
2813 * leaf: [ slots 0 - N] [ our target ] [ N + 1 - total in leaf ]
2816 * We avoid this by trying to push the items on either side of our target
2817 * into the adjacent leaves. If all goes well we can avoid the double split
2820 static noinline
int push_for_double_split(struct btrfs_trans_handle
*trans
,
2821 struct btrfs_root
*root
,
2822 struct btrfs_path
*path
,
2830 slot
= path
->slots
[0];
2833 * try to push all the items after our slot into the
2836 ret
= push_leaf_right(trans
, root
, path
, 1, data_size
, 0, slot
);
2843 nritems
= btrfs_header_nritems(path
->nodes
[0]);
2845 * our goal is to get our slot at the start or end of a leaf. If
2846 * we've done so we're done
2848 if (path
->slots
[0] == 0 || path
->slots
[0] == nritems
)
2851 if (btrfs_leaf_free_space(root
, path
->nodes
[0]) >= data_size
)
2854 /* try to push all the items before our slot into the next leaf */
2855 slot
= path
->slots
[0];
2856 ret
= push_leaf_left(trans
, root
, path
, 1, data_size
, 0, slot
);
2869 * split the path's leaf in two, making sure there is at least data_size
2870 * available for the resulting leaf level of the path.
2872 * returns 0 if all went well and < 0 on failure.
2874 static noinline
int split_leaf(struct btrfs_trans_handle
*trans
,
2875 struct btrfs_root
*root
,
2876 struct btrfs_key
*ins_key
,
2877 struct btrfs_path
*path
, int data_size
,
2880 struct btrfs_disk_key disk_key
;
2881 struct extent_buffer
*l
;
2885 struct extent_buffer
*right
;
2889 int num_doubles
= 0;
2890 int tried_avoid_double
= 0;
2893 slot
= path
->slots
[0];
2894 if (extend
&& data_size
+ btrfs_item_size_nr(l
, slot
) +
2895 sizeof(struct btrfs_item
) > BTRFS_LEAF_DATA_SIZE(root
))
2898 /* first try to make some room by pushing left and right */
2900 wret
= push_leaf_right(trans
, root
, path
, data_size
,
2905 wret
= push_leaf_left(trans
, root
, path
, data_size
,
2906 data_size
, 0, (u32
)-1);
2912 /* did the pushes work? */
2913 if (btrfs_leaf_free_space(root
, l
) >= data_size
)
2917 if (!path
->nodes
[1]) {
2918 ret
= insert_new_root(trans
, root
, path
, 1);
2925 slot
= path
->slots
[0];
2926 nritems
= btrfs_header_nritems(l
);
2927 mid
= (nritems
+ 1) / 2;
2931 leaf_space_used(l
, mid
, nritems
- mid
) + data_size
>
2932 BTRFS_LEAF_DATA_SIZE(root
)) {
2933 if (slot
>= nritems
) {
2937 if (mid
!= nritems
&&
2938 leaf_space_used(l
, mid
, nritems
- mid
) +
2939 data_size
> BTRFS_LEAF_DATA_SIZE(root
)) {
2940 if (data_size
&& !tried_avoid_double
)
2941 goto push_for_double
;
2947 if (leaf_space_used(l
, 0, mid
) + data_size
>
2948 BTRFS_LEAF_DATA_SIZE(root
)) {
2949 if (!extend
&& data_size
&& slot
== 0) {
2951 } else if ((extend
|| !data_size
) && slot
== 0) {
2955 if (mid
!= nritems
&&
2956 leaf_space_used(l
, mid
, nritems
- mid
) +
2957 data_size
> BTRFS_LEAF_DATA_SIZE(root
)) {
2958 if (data_size
&& !tried_avoid_double
)
2959 goto push_for_double
;
2967 btrfs_cpu_key_to_disk(&disk_key
, ins_key
);
2969 btrfs_item_key(l
, &disk_key
, mid
);
2971 right
= btrfs_alloc_free_block(trans
, root
, root
->leafsize
, 0,
2972 root
->root_key
.objectid
,
2973 &disk_key
, 0, l
->start
, 0, 0);
2975 return PTR_ERR(right
);
2977 root_add_used(root
, root
->leafsize
);
2979 memset_extent_buffer(right
, 0, 0, sizeof(struct btrfs_header
));
2980 btrfs_set_header_bytenr(right
, right
->start
);
2981 btrfs_set_header_generation(right
, trans
->transid
);
2982 btrfs_set_header_backref_rev(right
, BTRFS_MIXED_BACKREF_REV
);
2983 btrfs_set_header_owner(right
, root
->root_key
.objectid
);
2984 btrfs_set_header_level(right
, 0);
2985 write_extent_buffer(right
, root
->fs_info
->fsid
,
2986 (unsigned long)btrfs_header_fsid(right
),
2989 write_extent_buffer(right
, root
->fs_info
->chunk_tree_uuid
,
2990 (unsigned long)btrfs_header_chunk_tree_uuid(right
),
2995 btrfs_set_header_nritems(right
, 0);
2996 wret
= insert_ptr(trans
, root
, path
,
2997 &disk_key
, right
->start
,
2998 path
->slots
[1] + 1, 1);
3002 btrfs_tree_unlock(path
->nodes
[0]);
3003 free_extent_buffer(path
->nodes
[0]);
3004 path
->nodes
[0] = right
;
3006 path
->slots
[1] += 1;
3008 btrfs_set_header_nritems(right
, 0);
3009 wret
= insert_ptr(trans
, root
, path
,
3015 btrfs_tree_unlock(path
->nodes
[0]);
3016 free_extent_buffer(path
->nodes
[0]);
3017 path
->nodes
[0] = right
;
3019 if (path
->slots
[1] == 0) {
3020 wret
= fixup_low_keys(trans
, root
,
3021 path
, &disk_key
, 1);
3026 btrfs_mark_buffer_dirty(right
);
3030 ret
= copy_for_split(trans
, root
, path
, l
, right
, slot
, mid
, nritems
);
3034 BUG_ON(num_doubles
!= 0);
3042 push_for_double_split(trans
, root
, path
, data_size
);
3043 tried_avoid_double
= 1;
3044 if (btrfs_leaf_free_space(root
, path
->nodes
[0]) >= data_size
)
3049 static noinline
int setup_leaf_for_split(struct btrfs_trans_handle
*trans
,
3050 struct btrfs_root
*root
,
3051 struct btrfs_path
*path
, int ins_len
)
3053 struct btrfs_key key
;
3054 struct extent_buffer
*leaf
;
3055 struct btrfs_file_extent_item
*fi
;
3060 leaf
= path
->nodes
[0];
3061 btrfs_item_key_to_cpu(leaf
, &key
, path
->slots
[0]);
3063 BUG_ON(key
.type
!= BTRFS_EXTENT_DATA_KEY
&&
3064 key
.type
!= BTRFS_EXTENT_CSUM_KEY
);
3066 if (btrfs_leaf_free_space(root
, leaf
) >= ins_len
)
3069 item_size
= btrfs_item_size_nr(leaf
, path
->slots
[0]);
3070 if (key
.type
== BTRFS_EXTENT_DATA_KEY
) {
3071 fi
= btrfs_item_ptr(leaf
, path
->slots
[0],
3072 struct btrfs_file_extent_item
);
3073 extent_len
= btrfs_file_extent_num_bytes(leaf
, fi
);
3075 btrfs_release_path(path
);
3077 path
->keep_locks
= 1;
3078 path
->search_for_split
= 1;
3079 ret
= btrfs_search_slot(trans
, root
, &key
, path
, 0, 1);
3080 path
->search_for_split
= 0;
3085 leaf
= path
->nodes
[0];
3086 /* if our item isn't there or got smaller, return now */
3087 if (ret
> 0 || item_size
!= btrfs_item_size_nr(leaf
, path
->slots
[0]))
3090 /* the leaf has changed, it now has room. return now */
3091 if (btrfs_leaf_free_space(root
, path
->nodes
[0]) >= ins_len
)
3094 if (key
.type
== BTRFS_EXTENT_DATA_KEY
) {
3095 fi
= btrfs_item_ptr(leaf
, path
->slots
[0],
3096 struct btrfs_file_extent_item
);
3097 if (extent_len
!= btrfs_file_extent_num_bytes(leaf
, fi
))
3101 btrfs_set_path_blocking(path
);
3102 ret
= split_leaf(trans
, root
, &key
, path
, ins_len
, 1);
3106 path
->keep_locks
= 0;
3107 btrfs_unlock_up_safe(path
, 1);
3110 path
->keep_locks
= 0;
3114 static noinline
int split_item(struct btrfs_trans_handle
*trans
,
3115 struct btrfs_root
*root
,
3116 struct btrfs_path
*path
,
3117 struct btrfs_key
*new_key
,
3118 unsigned long split_offset
)
3120 struct extent_buffer
*leaf
;
3121 struct btrfs_item
*item
;
3122 struct btrfs_item
*new_item
;
3128 struct btrfs_disk_key disk_key
;
3130 leaf
= path
->nodes
[0];
3131 BUG_ON(btrfs_leaf_free_space(root
, leaf
) < sizeof(struct btrfs_item
));
3133 btrfs_set_path_blocking(path
);
3135 item
= btrfs_item_nr(leaf
, path
->slots
[0]);
3136 orig_offset
= btrfs_item_offset(leaf
, item
);
3137 item_size
= btrfs_item_size(leaf
, item
);
3139 buf
= kmalloc(item_size
, GFP_NOFS
);
3143 read_extent_buffer(leaf
, buf
, btrfs_item_ptr_offset(leaf
,
3144 path
->slots
[0]), item_size
);
3146 slot
= path
->slots
[0] + 1;
3147 nritems
= btrfs_header_nritems(leaf
);
3148 if (slot
!= nritems
) {
3149 /* shift the items */
3150 memmove_extent_buffer(leaf
, btrfs_item_nr_offset(slot
+ 1),
3151 btrfs_item_nr_offset(slot
),
3152 (nritems
- slot
) * sizeof(struct btrfs_item
));
3155 btrfs_cpu_key_to_disk(&disk_key
, new_key
);
3156 btrfs_set_item_key(leaf
, &disk_key
, slot
);
3158 new_item
= btrfs_item_nr(leaf
, slot
);
3160 btrfs_set_item_offset(leaf
, new_item
, orig_offset
);
3161 btrfs_set_item_size(leaf
, new_item
, item_size
- split_offset
);
3163 btrfs_set_item_offset(leaf
, item
,
3164 orig_offset
+ item_size
- split_offset
);
3165 btrfs_set_item_size(leaf
, item
, split_offset
);
3167 btrfs_set_header_nritems(leaf
, nritems
+ 1);
3169 /* write the data for the start of the original item */
3170 write_extent_buffer(leaf
, buf
,
3171 btrfs_item_ptr_offset(leaf
, path
->slots
[0]),
3174 /* write the data for the new item */
3175 write_extent_buffer(leaf
, buf
+ split_offset
,
3176 btrfs_item_ptr_offset(leaf
, slot
),
3177 item_size
- split_offset
);
3178 btrfs_mark_buffer_dirty(leaf
);
3180 BUG_ON(btrfs_leaf_free_space(root
, leaf
) < 0);
3186 * This function splits a single item into two items,
3187 * giving 'new_key' to the new item and splitting the
3188 * old one at split_offset (from the start of the item).
3190 * The path may be released by this operation. After
3191 * the split, the path is pointing to the old item. The
3192 * new item is going to be in the same node as the old one.
3194 * Note, the item being split must be smaller enough to live alone on
3195 * a tree block with room for one extra struct btrfs_item
3197 * This allows us to split the item in place, keeping a lock on the
3198 * leaf the entire time.
3200 int btrfs_split_item(struct btrfs_trans_handle
*trans
,
3201 struct btrfs_root
*root
,
3202 struct btrfs_path
*path
,
3203 struct btrfs_key
*new_key
,
3204 unsigned long split_offset
)
3207 ret
= setup_leaf_for_split(trans
, root
, path
,
3208 sizeof(struct btrfs_item
));
3212 ret
= split_item(trans
, root
, path
, new_key
, split_offset
);
3217 * This function duplicate a item, giving 'new_key' to the new item.
3218 * It guarantees both items live in the same tree leaf and the new item
3219 * is contiguous with the original item.
3221 * This allows us to split file extent in place, keeping a lock on the
3222 * leaf the entire time.
3224 int btrfs_duplicate_item(struct btrfs_trans_handle
*trans
,
3225 struct btrfs_root
*root
,
3226 struct btrfs_path
*path
,
3227 struct btrfs_key
*new_key
)
3229 struct extent_buffer
*leaf
;
3233 leaf
= path
->nodes
[0];
3234 item_size
= btrfs_item_size_nr(leaf
, path
->slots
[0]);
3235 ret
= setup_leaf_for_split(trans
, root
, path
,
3236 item_size
+ sizeof(struct btrfs_item
));
3241 ret
= setup_items_for_insert(trans
, root
, path
, new_key
, &item_size
,
3242 item_size
, item_size
+
3243 sizeof(struct btrfs_item
), 1);
3246 leaf
= path
->nodes
[0];
3247 memcpy_extent_buffer(leaf
,
3248 btrfs_item_ptr_offset(leaf
, path
->slots
[0]),
3249 btrfs_item_ptr_offset(leaf
, path
->slots
[0] - 1),
3255 * make the item pointed to by the path smaller. new_size indicates
3256 * how small to make it, and from_end tells us if we just chop bytes
3257 * off the end of the item or if we shift the item to chop bytes off
3260 int btrfs_truncate_item(struct btrfs_trans_handle
*trans
,
3261 struct btrfs_root
*root
,
3262 struct btrfs_path
*path
,
3263 u32 new_size
, int from_end
)
3266 struct extent_buffer
*leaf
;
3267 struct btrfs_item
*item
;
3269 unsigned int data_end
;
3270 unsigned int old_data_start
;
3271 unsigned int old_size
;
3272 unsigned int size_diff
;
3275 leaf
= path
->nodes
[0];
3276 slot
= path
->slots
[0];
3278 old_size
= btrfs_item_size_nr(leaf
, slot
);
3279 if (old_size
== new_size
)
3282 nritems
= btrfs_header_nritems(leaf
);
3283 data_end
= leaf_data_end(root
, leaf
);
3285 old_data_start
= btrfs_item_offset_nr(leaf
, slot
);
3287 size_diff
= old_size
- new_size
;
3290 BUG_ON(slot
>= nritems
);
3293 * item0..itemN ... dataN.offset..dataN.size .. data0.size
3295 /* first correct the data pointers */
3296 for (i
= slot
; i
< nritems
; i
++) {
3298 item
= btrfs_item_nr(leaf
, i
);
3300 ioff
= btrfs_item_offset(leaf
, item
);
3301 btrfs_set_item_offset(leaf
, item
, ioff
+ size_diff
);
3304 /* shift the data */
3306 memmove_extent_buffer(leaf
, btrfs_leaf_data(leaf
) +
3307 data_end
+ size_diff
, btrfs_leaf_data(leaf
) +
3308 data_end
, old_data_start
+ new_size
- data_end
);
3310 struct btrfs_disk_key disk_key
;
3313 btrfs_item_key(leaf
, &disk_key
, slot
);
3315 if (btrfs_disk_key_type(&disk_key
) == BTRFS_EXTENT_DATA_KEY
) {
3317 struct btrfs_file_extent_item
*fi
;
3319 fi
= btrfs_item_ptr(leaf
, slot
,
3320 struct btrfs_file_extent_item
);
3321 fi
= (struct btrfs_file_extent_item
*)(
3322 (unsigned long)fi
- size_diff
);
3324 if (btrfs_file_extent_type(leaf
, fi
) ==
3325 BTRFS_FILE_EXTENT_INLINE
) {
3326 ptr
= btrfs_item_ptr_offset(leaf
, slot
);
3327 memmove_extent_buffer(leaf
, ptr
,
3329 offsetof(struct btrfs_file_extent_item
,
3334 memmove_extent_buffer(leaf
, btrfs_leaf_data(leaf
) +
3335 data_end
+ size_diff
, btrfs_leaf_data(leaf
) +
3336 data_end
, old_data_start
- data_end
);
3338 offset
= btrfs_disk_key_offset(&disk_key
);
3339 btrfs_set_disk_key_offset(&disk_key
, offset
+ size_diff
);
3340 btrfs_set_item_key(leaf
, &disk_key
, slot
);
3342 fixup_low_keys(trans
, root
, path
, &disk_key
, 1);
3345 item
= btrfs_item_nr(leaf
, slot
);
3346 btrfs_set_item_size(leaf
, item
, new_size
);
3347 btrfs_mark_buffer_dirty(leaf
);
3349 if (btrfs_leaf_free_space(root
, leaf
) < 0) {
3350 btrfs_print_leaf(root
, leaf
);
3357 * make the item pointed to by the path bigger, data_size is the new size.
3359 int btrfs_extend_item(struct btrfs_trans_handle
*trans
,
3360 struct btrfs_root
*root
, struct btrfs_path
*path
,
3364 struct extent_buffer
*leaf
;
3365 struct btrfs_item
*item
;
3367 unsigned int data_end
;
3368 unsigned int old_data
;
3369 unsigned int old_size
;
3372 leaf
= path
->nodes
[0];
3374 nritems
= btrfs_header_nritems(leaf
);
3375 data_end
= leaf_data_end(root
, leaf
);
3377 if (btrfs_leaf_free_space(root
, leaf
) < data_size
) {
3378 btrfs_print_leaf(root
, leaf
);
3381 slot
= path
->slots
[0];
3382 old_data
= btrfs_item_end_nr(leaf
, slot
);
3385 if (slot
>= nritems
) {
3386 btrfs_print_leaf(root
, leaf
);
3387 printk(KERN_CRIT
"slot %d too large, nritems %d\n",
3393 * item0..itemN ... dataN.offset..dataN.size .. data0.size
3395 /* first correct the data pointers */
3396 for (i
= slot
; i
< nritems
; i
++) {
3398 item
= btrfs_item_nr(leaf
, i
);
3400 ioff
= btrfs_item_offset(leaf
, item
);
3401 btrfs_set_item_offset(leaf
, item
, ioff
- data_size
);
3404 /* shift the data */
3405 memmove_extent_buffer(leaf
, btrfs_leaf_data(leaf
) +
3406 data_end
- data_size
, btrfs_leaf_data(leaf
) +
3407 data_end
, old_data
- data_end
);
3409 data_end
= old_data
;
3410 old_size
= btrfs_item_size_nr(leaf
, slot
);
3411 item
= btrfs_item_nr(leaf
, slot
);
3412 btrfs_set_item_size(leaf
, item
, old_size
+ data_size
);
3413 btrfs_mark_buffer_dirty(leaf
);
3415 if (btrfs_leaf_free_space(root
, leaf
) < 0) {
3416 btrfs_print_leaf(root
, leaf
);
3423 * Given a key and some data, insert items into the tree.
3424 * This does all the path init required, making room in the tree if needed.
3425 * Returns the number of keys that were inserted.
3427 int btrfs_insert_some_items(struct btrfs_trans_handle
*trans
,
3428 struct btrfs_root
*root
,
3429 struct btrfs_path
*path
,
3430 struct btrfs_key
*cpu_key
, u32
*data_size
,
3433 struct extent_buffer
*leaf
;
3434 struct btrfs_item
*item
;
3441 unsigned int data_end
;
3442 struct btrfs_disk_key disk_key
;
3443 struct btrfs_key found_key
;
3445 for (i
= 0; i
< nr
; i
++) {
3446 if (total_size
+ data_size
[i
] + sizeof(struct btrfs_item
) >
3447 BTRFS_LEAF_DATA_SIZE(root
)) {
3451 total_data
+= data_size
[i
];
3452 total_size
+= data_size
[i
] + sizeof(struct btrfs_item
);
3456 ret
= btrfs_search_slot(trans
, root
, cpu_key
, path
, total_size
, 1);
3462 leaf
= path
->nodes
[0];
3464 nritems
= btrfs_header_nritems(leaf
);
3465 data_end
= leaf_data_end(root
, leaf
);
3467 if (btrfs_leaf_free_space(root
, leaf
) < total_size
) {
3468 for (i
= nr
; i
>= 0; i
--) {
3469 total_data
-= data_size
[i
];
3470 total_size
-= data_size
[i
] + sizeof(struct btrfs_item
);
3471 if (total_size
< btrfs_leaf_free_space(root
, leaf
))
3477 slot
= path
->slots
[0];
3480 if (slot
!= nritems
) {
3481 unsigned int old_data
= btrfs_item_end_nr(leaf
, slot
);
3483 item
= btrfs_item_nr(leaf
, slot
);
3484 btrfs_item_key_to_cpu(leaf
, &found_key
, slot
);
3486 /* figure out how many keys we can insert in here */
3487 total_data
= data_size
[0];
3488 for (i
= 1; i
< nr
; i
++) {
3489 if (btrfs_comp_cpu_keys(&found_key
, cpu_key
+ i
) <= 0)
3491 total_data
+= data_size
[i
];
3495 if (old_data
< data_end
) {
3496 btrfs_print_leaf(root
, leaf
);
3497 printk(KERN_CRIT
"slot %d old_data %d data_end %d\n",
3498 slot
, old_data
, data_end
);
3502 * item0..itemN ... dataN.offset..dataN.size .. data0.size
3504 /* first correct the data pointers */
3505 for (i
= slot
; i
< nritems
; i
++) {
3508 item
= btrfs_item_nr(leaf
, i
);
3509 ioff
= btrfs_item_offset(leaf
, item
);
3510 btrfs_set_item_offset(leaf
, item
, ioff
- total_data
);
3512 /* shift the items */
3513 memmove_extent_buffer(leaf
, btrfs_item_nr_offset(slot
+ nr
),
3514 btrfs_item_nr_offset(slot
),
3515 (nritems
- slot
) * sizeof(struct btrfs_item
));
3517 /* shift the data */
3518 memmove_extent_buffer(leaf
, btrfs_leaf_data(leaf
) +
3519 data_end
- total_data
, btrfs_leaf_data(leaf
) +
3520 data_end
, old_data
- data_end
);
3521 data_end
= old_data
;
3524 * this sucks but it has to be done, if we are inserting at
3525 * the end of the leaf only insert 1 of the items, since we
3526 * have no way of knowing whats on the next leaf and we'd have
3527 * to drop our current locks to figure it out
3532 /* setup the item for the new data */
3533 for (i
= 0; i
< nr
; i
++) {
3534 btrfs_cpu_key_to_disk(&disk_key
, cpu_key
+ i
);
3535 btrfs_set_item_key(leaf
, &disk_key
, slot
+ i
);
3536 item
= btrfs_item_nr(leaf
, slot
+ i
);
3537 btrfs_set_item_offset(leaf
, item
, data_end
- data_size
[i
]);
3538 data_end
-= data_size
[i
];
3539 btrfs_set_item_size(leaf
, item
, data_size
[i
]);
3541 btrfs_set_header_nritems(leaf
, nritems
+ nr
);
3542 btrfs_mark_buffer_dirty(leaf
);
3546 btrfs_cpu_key_to_disk(&disk_key
, cpu_key
);
3547 ret
= fixup_low_keys(trans
, root
, path
, &disk_key
, 1);
3550 if (btrfs_leaf_free_space(root
, leaf
) < 0) {
3551 btrfs_print_leaf(root
, leaf
);
3561 * this is a helper for btrfs_insert_empty_items, the main goal here is
3562 * to save stack depth by doing the bulk of the work in a function
3563 * that doesn't call btrfs_search_slot
3565 int setup_items_for_insert(struct btrfs_trans_handle
*trans
,
3566 struct btrfs_root
*root
, struct btrfs_path
*path
,
3567 struct btrfs_key
*cpu_key
, u32
*data_size
,
3568 u32 total_data
, u32 total_size
, int nr
)
3570 struct btrfs_item
*item
;
3573 unsigned int data_end
;
3574 struct btrfs_disk_key disk_key
;
3576 struct extent_buffer
*leaf
;
3579 leaf
= path
->nodes
[0];
3580 slot
= path
->slots
[0];
3582 nritems
= btrfs_header_nritems(leaf
);
3583 data_end
= leaf_data_end(root
, leaf
);
3585 if (btrfs_leaf_free_space(root
, leaf
) < total_size
) {
3586 btrfs_print_leaf(root
, leaf
);
3587 printk(KERN_CRIT
"not enough freespace need %u have %d\n",
3588 total_size
, btrfs_leaf_free_space(root
, leaf
));
3592 if (slot
!= nritems
) {
3593 unsigned int old_data
= btrfs_item_end_nr(leaf
, slot
);
3595 if (old_data
< data_end
) {
3596 btrfs_print_leaf(root
, leaf
);
3597 printk(KERN_CRIT
"slot %d old_data %d data_end %d\n",
3598 slot
, old_data
, data_end
);
3602 * item0..itemN ... dataN.offset..dataN.size .. data0.size
3604 /* first correct the data pointers */
3605 for (i
= slot
; i
< nritems
; i
++) {
3608 item
= btrfs_item_nr(leaf
, i
);
3609 ioff
= btrfs_item_offset(leaf
, item
);
3610 btrfs_set_item_offset(leaf
, item
, ioff
- total_data
);
3612 /* shift the items */
3613 memmove_extent_buffer(leaf
, btrfs_item_nr_offset(slot
+ nr
),
3614 btrfs_item_nr_offset(slot
),
3615 (nritems
- slot
) * sizeof(struct btrfs_item
));
3617 /* shift the data */
3618 memmove_extent_buffer(leaf
, btrfs_leaf_data(leaf
) +
3619 data_end
- total_data
, btrfs_leaf_data(leaf
) +
3620 data_end
, old_data
- data_end
);
3621 data_end
= old_data
;
3624 /* setup the item for the new data */
3625 for (i
= 0; i
< nr
; i
++) {
3626 btrfs_cpu_key_to_disk(&disk_key
, cpu_key
+ i
);
3627 btrfs_set_item_key(leaf
, &disk_key
, slot
+ i
);
3628 item
= btrfs_item_nr(leaf
, slot
+ i
);
3629 btrfs_set_item_offset(leaf
, item
, data_end
- data_size
[i
]);
3630 data_end
-= data_size
[i
];
3631 btrfs_set_item_size(leaf
, item
, data_size
[i
]);
3634 btrfs_set_header_nritems(leaf
, nritems
+ nr
);
3638 btrfs_cpu_key_to_disk(&disk_key
, cpu_key
);
3639 ret
= fixup_low_keys(trans
, root
, path
, &disk_key
, 1);
3641 btrfs_unlock_up_safe(path
, 1);
3642 btrfs_mark_buffer_dirty(leaf
);
3644 if (btrfs_leaf_free_space(root
, leaf
) < 0) {
3645 btrfs_print_leaf(root
, leaf
);
3652 * Given a key and some data, insert items into the tree.
3653 * This does all the path init required, making room in the tree if needed.
3655 int btrfs_insert_empty_items(struct btrfs_trans_handle
*trans
,
3656 struct btrfs_root
*root
,
3657 struct btrfs_path
*path
,
3658 struct btrfs_key
*cpu_key
, u32
*data_size
,
3667 for (i
= 0; i
< nr
; i
++)
3668 total_data
+= data_size
[i
];
3670 total_size
= total_data
+ (nr
* sizeof(struct btrfs_item
));
3671 ret
= btrfs_search_slot(trans
, root
, cpu_key
, path
, total_size
, 1);
3677 slot
= path
->slots
[0];
3680 ret
= setup_items_for_insert(trans
, root
, path
, cpu_key
, data_size
,
3681 total_data
, total_size
, nr
);
3688 * Given a key and some data, insert an item into the tree.
3689 * This does all the path init required, making room in the tree if needed.
3691 int btrfs_insert_item(struct btrfs_trans_handle
*trans
, struct btrfs_root
3692 *root
, struct btrfs_key
*cpu_key
, void *data
, u32
3696 struct btrfs_path
*path
;
3697 struct extent_buffer
*leaf
;
3700 path
= btrfs_alloc_path();
3703 ret
= btrfs_insert_empty_item(trans
, root
, path
, cpu_key
, data_size
);
3705 leaf
= path
->nodes
[0];
3706 ptr
= btrfs_item_ptr_offset(leaf
, path
->slots
[0]);
3707 write_extent_buffer(leaf
, data
, ptr
, data_size
);
3708 btrfs_mark_buffer_dirty(leaf
);
3710 btrfs_free_path(path
);
3715 * delete the pointer from a given node.
3717 * the tree should have been previously balanced so the deletion does not
3720 static int del_ptr(struct btrfs_trans_handle
*trans
, struct btrfs_root
*root
,
3721 struct btrfs_path
*path
, int level
, int slot
)
3723 struct extent_buffer
*parent
= path
->nodes
[level
];
3728 nritems
= btrfs_header_nritems(parent
);
3729 if (slot
!= nritems
- 1) {
3730 memmove_extent_buffer(parent
,
3731 btrfs_node_key_ptr_offset(slot
),
3732 btrfs_node_key_ptr_offset(slot
+ 1),
3733 sizeof(struct btrfs_key_ptr
) *
3734 (nritems
- slot
- 1));
3737 btrfs_set_header_nritems(parent
, nritems
);
3738 if (nritems
== 0 && parent
== root
->node
) {
3739 BUG_ON(btrfs_header_level(root
->node
) != 1);
3740 /* just turn the root into a leaf and break */
3741 btrfs_set_header_level(root
->node
, 0);
3742 } else if (slot
== 0) {
3743 struct btrfs_disk_key disk_key
;
3745 btrfs_node_key(parent
, &disk_key
, 0);
3746 wret
= fixup_low_keys(trans
, root
, path
, &disk_key
, level
+ 1);
3750 btrfs_mark_buffer_dirty(parent
);
3755 * a helper function to delete the leaf pointed to by path->slots[1] and
3758 * This deletes the pointer in path->nodes[1] and frees the leaf
3759 * block extent. zero is returned if it all worked out, < 0 otherwise.
3761 * The path must have already been setup for deleting the leaf, including
3762 * all the proper balancing. path->nodes[1] must be locked.
3764 static noinline
int btrfs_del_leaf(struct btrfs_trans_handle
*trans
,
3765 struct btrfs_root
*root
,
3766 struct btrfs_path
*path
,
3767 struct extent_buffer
*leaf
)
3771 WARN_ON(btrfs_header_generation(leaf
) != trans
->transid
);
3772 ret
= del_ptr(trans
, root
, path
, 1, path
->slots
[1]);
3777 * btrfs_free_extent is expensive, we want to make sure we
3778 * aren't holding any locks when we call it
3780 btrfs_unlock_up_safe(path
, 0);
3782 root_sub_used(root
, leaf
->len
);
3784 btrfs_free_tree_block(trans
, root
, leaf
, 0, 1, 0);
3788 * delete the item at the leaf level in path. If that empties
3789 * the leaf, remove it from the tree
3791 int btrfs_del_items(struct btrfs_trans_handle
*trans
, struct btrfs_root
*root
,
3792 struct btrfs_path
*path
, int slot
, int nr
)
3794 struct extent_buffer
*leaf
;
3795 struct btrfs_item
*item
;
3803 leaf
= path
->nodes
[0];
3804 last_off
= btrfs_item_offset_nr(leaf
, slot
+ nr
- 1);
3806 for (i
= 0; i
< nr
; i
++)
3807 dsize
+= btrfs_item_size_nr(leaf
, slot
+ i
);
3809 nritems
= btrfs_header_nritems(leaf
);
3811 if (slot
+ nr
!= nritems
) {
3812 int data_end
= leaf_data_end(root
, leaf
);
3814 memmove_extent_buffer(leaf
, btrfs_leaf_data(leaf
) +
3816 btrfs_leaf_data(leaf
) + data_end
,
3817 last_off
- data_end
);
3819 for (i
= slot
+ nr
; i
< nritems
; i
++) {
3822 item
= btrfs_item_nr(leaf
, i
);
3823 ioff
= btrfs_item_offset(leaf
, item
);
3824 btrfs_set_item_offset(leaf
, item
, ioff
+ dsize
);
3827 memmove_extent_buffer(leaf
, btrfs_item_nr_offset(slot
),
3828 btrfs_item_nr_offset(slot
+ nr
),
3829 sizeof(struct btrfs_item
) *
3830 (nritems
- slot
- nr
));
3832 btrfs_set_header_nritems(leaf
, nritems
- nr
);
3835 /* delete the leaf if we've emptied it */
3837 if (leaf
== root
->node
) {
3838 btrfs_set_header_level(leaf
, 0);
3840 btrfs_set_path_blocking(path
);
3841 clean_tree_block(trans
, root
, leaf
);
3842 ret
= btrfs_del_leaf(trans
, root
, path
, leaf
);
3846 int used
= leaf_space_used(leaf
, 0, nritems
);
3848 struct btrfs_disk_key disk_key
;
3850 btrfs_item_key(leaf
, &disk_key
, 0);
3851 wret
= fixup_low_keys(trans
, root
, path
,
3857 /* delete the leaf if it is mostly empty */
3858 if (used
< BTRFS_LEAF_DATA_SIZE(root
) / 3) {
3859 /* push_leaf_left fixes the path.
3860 * make sure the path still points to our leaf
3861 * for possible call to del_ptr below
3863 slot
= path
->slots
[1];
3864 extent_buffer_get(leaf
);
3866 btrfs_set_path_blocking(path
);
3867 wret
= push_leaf_left(trans
, root
, path
, 1, 1,
3869 if (wret
< 0 && wret
!= -ENOSPC
)
3872 if (path
->nodes
[0] == leaf
&&
3873 btrfs_header_nritems(leaf
)) {
3874 wret
= push_leaf_right(trans
, root
, path
, 1,
3876 if (wret
< 0 && wret
!= -ENOSPC
)
3880 if (btrfs_header_nritems(leaf
) == 0) {
3881 path
->slots
[1] = slot
;
3882 ret
= btrfs_del_leaf(trans
, root
, path
, leaf
);
3884 free_extent_buffer(leaf
);
3886 /* if we're still in the path, make sure
3887 * we're dirty. Otherwise, one of the
3888 * push_leaf functions must have already
3889 * dirtied this buffer
3891 if (path
->nodes
[0] == leaf
)
3892 btrfs_mark_buffer_dirty(leaf
);
3893 free_extent_buffer(leaf
);
3896 btrfs_mark_buffer_dirty(leaf
);
3903 * search the tree again to find a leaf with lesser keys
3904 * returns 0 if it found something or 1 if there are no lesser leaves.
3905 * returns < 0 on io errors.
3907 * This may release the path, and so you may lose any locks held at the
3910 int btrfs_prev_leaf(struct btrfs_root
*root
, struct btrfs_path
*path
)
3912 struct btrfs_key key
;
3913 struct btrfs_disk_key found_key
;
3916 btrfs_item_key_to_cpu(path
->nodes
[0], &key
, 0);
3920 else if (key
.type
> 0)
3922 else if (key
.objectid
> 0)
3927 btrfs_release_path(path
);
3928 ret
= btrfs_search_slot(NULL
, root
, &key
, path
, 0, 0);
3931 btrfs_item_key(path
->nodes
[0], &found_key
, 0);
3932 ret
= comp_keys(&found_key
, &key
);
3939 * A helper function to walk down the tree starting at min_key, and looking
3940 * for nodes or leaves that are either in cache or have a minimum
3941 * transaction id. This is used by the btree defrag code, and tree logging
3943 * This does not cow, but it does stuff the starting key it finds back
3944 * into min_key, so you can call btrfs_search_slot with cow=1 on the
3945 * key and get a writable path.
3947 * This does lock as it descends, and path->keep_locks should be set
3948 * to 1 by the caller.
3950 * This honors path->lowest_level to prevent descent past a given level
3953 * min_trans indicates the oldest transaction that you are interested
3954 * in walking through. Any nodes or leaves older than min_trans are
3955 * skipped over (without reading them).
3957 * returns zero if something useful was found, < 0 on error and 1 if there
3958 * was nothing in the tree that matched the search criteria.
3960 int btrfs_search_forward(struct btrfs_root
*root
, struct btrfs_key
*min_key
,
3961 struct btrfs_key
*max_key
,
3962 struct btrfs_path
*path
, int cache_only
,
3965 struct extent_buffer
*cur
;
3966 struct btrfs_key found_key
;
3973 WARN_ON(!path
->keep_locks
);
3975 cur
= btrfs_read_lock_root_node(root
);
3976 level
= btrfs_header_level(cur
);
3977 WARN_ON(path
->nodes
[level
]);
3978 path
->nodes
[level
] = cur
;
3979 path
->locks
[level
] = BTRFS_READ_LOCK
;
3981 if (btrfs_header_generation(cur
) < min_trans
) {
3986 nritems
= btrfs_header_nritems(cur
);
3987 level
= btrfs_header_level(cur
);
3988 sret
= bin_search(cur
, min_key
, level
, &slot
);
3990 /* at the lowest level, we're done, setup the path and exit */
3991 if (level
== path
->lowest_level
) {
3992 if (slot
>= nritems
)
3995 path
->slots
[level
] = slot
;
3996 btrfs_item_key_to_cpu(cur
, &found_key
, slot
);
3999 if (sret
&& slot
> 0)
4002 * check this node pointer against the cache_only and
4003 * min_trans parameters. If it isn't in cache or is too
4004 * old, skip to the next one.
4006 while (slot
< nritems
) {
4009 struct extent_buffer
*tmp
;
4010 struct btrfs_disk_key disk_key
;
4012 blockptr
= btrfs_node_blockptr(cur
, slot
);
4013 gen
= btrfs_node_ptr_generation(cur
, slot
);
4014 if (gen
< min_trans
) {
4022 btrfs_node_key(cur
, &disk_key
, slot
);
4023 if (comp_keys(&disk_key
, max_key
) >= 0) {
4029 tmp
= btrfs_find_tree_block(root
, blockptr
,
4030 btrfs_level_size(root
, level
- 1));
4032 if (tmp
&& btrfs_buffer_uptodate(tmp
, gen
)) {
4033 free_extent_buffer(tmp
);
4037 free_extent_buffer(tmp
);
4042 * we didn't find a candidate key in this node, walk forward
4043 * and find another one
4045 if (slot
>= nritems
) {
4046 path
->slots
[level
] = slot
;
4047 btrfs_set_path_blocking(path
);
4048 sret
= btrfs_find_next_key(root
, path
, min_key
, level
,
4049 cache_only
, min_trans
);
4051 btrfs_release_path(path
);
4057 /* save our key for returning back */
4058 btrfs_node_key_to_cpu(cur
, &found_key
, slot
);
4059 path
->slots
[level
] = slot
;
4060 if (level
== path
->lowest_level
) {
4062 unlock_up(path
, level
, 1);
4065 btrfs_set_path_blocking(path
);
4066 cur
= read_node_slot(root
, cur
, slot
);
4069 btrfs_tree_read_lock(cur
);
4071 path
->locks
[level
- 1] = BTRFS_READ_LOCK
;
4072 path
->nodes
[level
- 1] = cur
;
4073 unlock_up(path
, level
, 1);
4074 btrfs_clear_path_blocking(path
, NULL
, 0);
4078 memcpy(min_key
, &found_key
, sizeof(found_key
));
4079 btrfs_set_path_blocking(path
);
4084 * this is similar to btrfs_next_leaf, but does not try to preserve
4085 * and fixup the path. It looks for and returns the next key in the
4086 * tree based on the current path and the cache_only and min_trans
4089 * 0 is returned if another key is found, < 0 if there are any errors
4090 * and 1 is returned if there are no higher keys in the tree
4092 * path->keep_locks should be set to 1 on the search made before
4093 * calling this function.
4095 int btrfs_find_next_key(struct btrfs_root
*root
, struct btrfs_path
*path
,
4096 struct btrfs_key
*key
, int level
,
4097 int cache_only
, u64 min_trans
)
4100 struct extent_buffer
*c
;
4102 WARN_ON(!path
->keep_locks
);
4103 while (level
< BTRFS_MAX_LEVEL
) {
4104 if (!path
->nodes
[level
])
4107 slot
= path
->slots
[level
] + 1;
4108 c
= path
->nodes
[level
];
4110 if (slot
>= btrfs_header_nritems(c
)) {
4113 struct btrfs_key cur_key
;
4114 if (level
+ 1 >= BTRFS_MAX_LEVEL
||
4115 !path
->nodes
[level
+ 1])
4118 if (path
->locks
[level
+ 1]) {
4123 slot
= btrfs_header_nritems(c
) - 1;
4125 btrfs_item_key_to_cpu(c
, &cur_key
, slot
);
4127 btrfs_node_key_to_cpu(c
, &cur_key
, slot
);
4129 orig_lowest
= path
->lowest_level
;
4130 btrfs_release_path(path
);
4131 path
->lowest_level
= level
;
4132 ret
= btrfs_search_slot(NULL
, root
, &cur_key
, path
,
4134 path
->lowest_level
= orig_lowest
;
4138 c
= path
->nodes
[level
];
4139 slot
= path
->slots
[level
];
4146 btrfs_item_key_to_cpu(c
, key
, slot
);
4148 u64 blockptr
= btrfs_node_blockptr(c
, slot
);
4149 u64 gen
= btrfs_node_ptr_generation(c
, slot
);
4152 struct extent_buffer
*cur
;
4153 cur
= btrfs_find_tree_block(root
, blockptr
,
4154 btrfs_level_size(root
, level
- 1));
4155 if (!cur
|| !btrfs_buffer_uptodate(cur
, gen
)) {
4158 free_extent_buffer(cur
);
4161 free_extent_buffer(cur
);
4163 if (gen
< min_trans
) {
4167 btrfs_node_key_to_cpu(c
, key
, slot
);
4175 * search the tree again to find a leaf with greater keys
4176 * returns 0 if it found something or 1 if there are no greater leaves.
4177 * returns < 0 on io errors.
4179 int btrfs_next_leaf(struct btrfs_root
*root
, struct btrfs_path
*path
)
4183 struct extent_buffer
*c
;
4184 struct extent_buffer
*next
;
4185 struct btrfs_key key
;
4188 int old_spinning
= path
->leave_spinning
;
4189 int next_rw_lock
= 0;
4191 nritems
= btrfs_header_nritems(path
->nodes
[0]);
4195 btrfs_item_key_to_cpu(path
->nodes
[0], &key
, nritems
- 1);
4200 btrfs_release_path(path
);
4202 path
->keep_locks
= 1;
4203 path
->leave_spinning
= 1;
4205 ret
= btrfs_search_slot(NULL
, root
, &key
, path
, 0, 0);
4206 path
->keep_locks
= 0;
4211 nritems
= btrfs_header_nritems(path
->nodes
[0]);
4213 * by releasing the path above we dropped all our locks. A balance
4214 * could have added more items next to the key that used to be
4215 * at the very end of the block. So, check again here and
4216 * advance the path if there are now more items available.
4218 if (nritems
> 0 && path
->slots
[0] < nritems
- 1) {
4225 while (level
< BTRFS_MAX_LEVEL
) {
4226 if (!path
->nodes
[level
]) {
4231 slot
= path
->slots
[level
] + 1;
4232 c
= path
->nodes
[level
];
4233 if (slot
>= btrfs_header_nritems(c
)) {
4235 if (level
== BTRFS_MAX_LEVEL
) {
4243 btrfs_tree_unlock_rw(next
, next_rw_lock
);
4244 free_extent_buffer(next
);
4248 next_rw_lock
= path
->locks
[level
];
4249 ret
= read_block_for_search(NULL
, root
, path
, &next
, level
,
4255 btrfs_release_path(path
);
4259 if (!path
->skip_locking
) {
4260 ret
= btrfs_try_tree_read_lock(next
);
4262 btrfs_set_path_blocking(path
);
4263 btrfs_tree_read_lock(next
);
4264 btrfs_clear_path_blocking(path
, next
,
4267 next_rw_lock
= BTRFS_READ_LOCK
;
4271 path
->slots
[level
] = slot
;
4274 c
= path
->nodes
[level
];
4275 if (path
->locks
[level
])
4276 btrfs_tree_unlock_rw(c
, path
->locks
[level
]);
4278 free_extent_buffer(c
);
4279 path
->nodes
[level
] = next
;
4280 path
->slots
[level
] = 0;
4281 if (!path
->skip_locking
)
4282 path
->locks
[level
] = next_rw_lock
;
4286 ret
= read_block_for_search(NULL
, root
, path
, &next
, level
,
4292 btrfs_release_path(path
);
4296 if (!path
->skip_locking
) {
4297 ret
= btrfs_try_tree_read_lock(next
);
4299 btrfs_set_path_blocking(path
);
4300 btrfs_tree_read_lock(next
);
4301 btrfs_clear_path_blocking(path
, next
,
4304 next_rw_lock
= BTRFS_READ_LOCK
;
4309 unlock_up(path
, 0, 1);
4310 path
->leave_spinning
= old_spinning
;
4312 btrfs_set_path_blocking(path
);
4318 * this uses btrfs_prev_leaf to walk backwards in the tree, and keeps
4319 * searching until it gets past min_objectid or finds an item of 'type'
4321 * returns 0 if something is found, 1 if nothing was found and < 0 on error
4323 int btrfs_previous_item(struct btrfs_root
*root
,
4324 struct btrfs_path
*path
, u64 min_objectid
,
4327 struct btrfs_key found_key
;
4328 struct extent_buffer
*leaf
;
4333 if (path
->slots
[0] == 0) {
4334 btrfs_set_path_blocking(path
);
4335 ret
= btrfs_prev_leaf(root
, path
);
4341 leaf
= path
->nodes
[0];
4342 nritems
= btrfs_header_nritems(leaf
);
4345 if (path
->slots
[0] == nritems
)
4348 btrfs_item_key_to_cpu(leaf
, &found_key
, path
->slots
[0]);
4349 if (found_key
.objectid
< min_objectid
)
4351 if (found_key
.type
== type
)
4353 if (found_key
.objectid
== min_objectid
&&
4354 found_key
.type
< type
)