1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (C) 2007,2008 Oracle. All rights reserved.
6 #include <linux/sched.h>
7 #include <linux/slab.h>
8 #include <linux/rbtree.h>
12 #include "transaction.h"
13 #include "print-tree.h"
16 static int split_node(struct btrfs_trans_handle
*trans
, struct btrfs_root
17 *root
, struct btrfs_path
*path
, int level
);
18 static int split_leaf(struct btrfs_trans_handle
*trans
, struct btrfs_root
*root
,
19 const struct btrfs_key
*ins_key
, struct btrfs_path
*path
,
20 int data_size
, int extend
);
21 static int push_node_left(struct btrfs_trans_handle
*trans
,
22 struct btrfs_fs_info
*fs_info
,
23 struct extent_buffer
*dst
,
24 struct extent_buffer
*src
, int empty
);
25 static int balance_node_right(struct btrfs_trans_handle
*trans
,
26 struct btrfs_fs_info
*fs_info
,
27 struct extent_buffer
*dst_buf
,
28 struct extent_buffer
*src_buf
);
29 static void del_ptr(struct btrfs_root
*root
, struct btrfs_path
*path
,
32 struct btrfs_path
*btrfs_alloc_path(void)
34 return kmem_cache_zalloc(btrfs_path_cachep
, GFP_NOFS
);
38 * set all locked nodes in the path to blocking locks. This should
39 * be done before scheduling
41 noinline
void btrfs_set_path_blocking(struct btrfs_path
*p
)
44 for (i
= 0; i
< BTRFS_MAX_LEVEL
; i
++) {
45 if (!p
->nodes
[i
] || !p
->locks
[i
])
47 btrfs_set_lock_blocking_rw(p
->nodes
[i
], p
->locks
[i
]);
48 if (p
->locks
[i
] == BTRFS_READ_LOCK
)
49 p
->locks
[i
] = BTRFS_READ_LOCK_BLOCKING
;
50 else if (p
->locks
[i
] == BTRFS_WRITE_LOCK
)
51 p
->locks
[i
] = BTRFS_WRITE_LOCK_BLOCKING
;
56 * reset all the locked nodes in the patch to spinning locks.
58 * held is used to keep lockdep happy, when lockdep is enabled
59 * we set held to a blocking lock before we go around and
60 * retake all the spinlocks in the path. You can safely use NULL
63 noinline
void btrfs_clear_path_blocking(struct btrfs_path
*p
,
64 struct extent_buffer
*held
, int held_rw
)
69 btrfs_set_lock_blocking_rw(held
, held_rw
);
70 if (held_rw
== BTRFS_WRITE_LOCK
)
71 held_rw
= BTRFS_WRITE_LOCK_BLOCKING
;
72 else if (held_rw
== BTRFS_READ_LOCK
)
73 held_rw
= BTRFS_READ_LOCK_BLOCKING
;
75 btrfs_set_path_blocking(p
);
77 for (i
= BTRFS_MAX_LEVEL
- 1; i
>= 0; i
--) {
78 if (p
->nodes
[i
] && p
->locks
[i
]) {
79 btrfs_clear_lock_blocking_rw(p
->nodes
[i
], p
->locks
[i
]);
80 if (p
->locks
[i
] == BTRFS_WRITE_LOCK_BLOCKING
)
81 p
->locks
[i
] = BTRFS_WRITE_LOCK
;
82 else if (p
->locks
[i
] == BTRFS_READ_LOCK_BLOCKING
)
83 p
->locks
[i
] = BTRFS_READ_LOCK
;
88 btrfs_clear_lock_blocking_rw(held
, held_rw
);
91 /* this also releases the path */
92 void btrfs_free_path(struct btrfs_path
*p
)
96 btrfs_release_path(p
);
97 kmem_cache_free(btrfs_path_cachep
, p
);
101 * path release drops references on the extent buffers in the path
102 * and it drops any locks held by this path
104 * It is safe to call this on paths that no locks or extent buffers held.
106 noinline
void btrfs_release_path(struct btrfs_path
*p
)
110 for (i
= 0; i
< BTRFS_MAX_LEVEL
; i
++) {
115 btrfs_tree_unlock_rw(p
->nodes
[i
], p
->locks
[i
]);
118 free_extent_buffer(p
->nodes
[i
]);
124 * safely gets a reference on the root node of a tree. A lock
125 * is not taken, so a concurrent writer may put a different node
126 * at the root of the tree. See btrfs_lock_root_node for the
129 * The extent buffer returned by this has a reference taken, so
130 * it won't disappear. It may stop being the root of the tree
131 * at any time because there are no locks held.
133 struct extent_buffer
*btrfs_root_node(struct btrfs_root
*root
)
135 struct extent_buffer
*eb
;
139 eb
= rcu_dereference(root
->node
);
142 * RCU really hurts here, we could free up the root node because
143 * it was COWed but we may not get the new root node yet so do
144 * the inc_not_zero dance and if it doesn't work then
145 * synchronize_rcu and try again.
147 if (atomic_inc_not_zero(&eb
->refs
)) {
157 /* loop around taking references on and locking the root node of the
158 * tree until you end up with a lock on the root. A locked buffer
159 * is returned, with a reference held.
161 struct extent_buffer
*btrfs_lock_root_node(struct btrfs_root
*root
)
163 struct extent_buffer
*eb
;
166 eb
= btrfs_root_node(root
);
168 if (eb
== root
->node
)
170 btrfs_tree_unlock(eb
);
171 free_extent_buffer(eb
);
176 /* loop around taking references on and locking the root node of the
177 * tree until you end up with a lock on the root. A locked buffer
178 * is returned, with a reference held.
180 struct extent_buffer
*btrfs_read_lock_root_node(struct btrfs_root
*root
)
182 struct extent_buffer
*eb
;
185 eb
= btrfs_root_node(root
);
186 btrfs_tree_read_lock(eb
);
187 if (eb
== root
->node
)
189 btrfs_tree_read_unlock(eb
);
190 free_extent_buffer(eb
);
195 /* cowonly root (everything not a reference counted cow subvolume), just get
196 * put onto a simple dirty list. transaction.c walks this to make sure they
197 * get properly updated on disk.
199 static void add_root_to_dirty_list(struct btrfs_root
*root
)
201 struct btrfs_fs_info
*fs_info
= root
->fs_info
;
203 if (test_bit(BTRFS_ROOT_DIRTY
, &root
->state
) ||
204 !test_bit(BTRFS_ROOT_TRACK_DIRTY
, &root
->state
))
207 spin_lock(&fs_info
->trans_lock
);
208 if (!test_and_set_bit(BTRFS_ROOT_DIRTY
, &root
->state
)) {
209 /* Want the extent tree to be the last on the list */
210 if (root
->objectid
== BTRFS_EXTENT_TREE_OBJECTID
)
211 list_move_tail(&root
->dirty_list
,
212 &fs_info
->dirty_cowonly_roots
);
214 list_move(&root
->dirty_list
,
215 &fs_info
->dirty_cowonly_roots
);
217 spin_unlock(&fs_info
->trans_lock
);
221 * used by snapshot creation to make a copy of a root for a tree with
222 * a given objectid. The buffer with the new root node is returned in
223 * cow_ret, and this func returns zero on success or a negative error code.
225 int btrfs_copy_root(struct btrfs_trans_handle
*trans
,
226 struct btrfs_root
*root
,
227 struct extent_buffer
*buf
,
228 struct extent_buffer
**cow_ret
, u64 new_root_objectid
)
230 struct btrfs_fs_info
*fs_info
= root
->fs_info
;
231 struct extent_buffer
*cow
;
234 struct btrfs_disk_key disk_key
;
236 WARN_ON(test_bit(BTRFS_ROOT_REF_COWS
, &root
->state
) &&
237 trans
->transid
!= fs_info
->running_transaction
->transid
);
238 WARN_ON(test_bit(BTRFS_ROOT_REF_COWS
, &root
->state
) &&
239 trans
->transid
!= root
->last_trans
);
241 level
= btrfs_header_level(buf
);
243 btrfs_item_key(buf
, &disk_key
, 0);
245 btrfs_node_key(buf
, &disk_key
, 0);
247 cow
= btrfs_alloc_tree_block(trans
, root
, 0, new_root_objectid
,
248 &disk_key
, level
, buf
->start
, 0);
252 copy_extent_buffer_full(cow
, buf
);
253 btrfs_set_header_bytenr(cow
, cow
->start
);
254 btrfs_set_header_generation(cow
, trans
->transid
);
255 btrfs_set_header_backref_rev(cow
, BTRFS_MIXED_BACKREF_REV
);
256 btrfs_clear_header_flag(cow
, BTRFS_HEADER_FLAG_WRITTEN
|
257 BTRFS_HEADER_FLAG_RELOC
);
258 if (new_root_objectid
== BTRFS_TREE_RELOC_OBJECTID
)
259 btrfs_set_header_flag(cow
, BTRFS_HEADER_FLAG_RELOC
);
261 btrfs_set_header_owner(cow
, new_root_objectid
);
263 write_extent_buffer_fsid(cow
, fs_info
->fsid
);
265 WARN_ON(btrfs_header_generation(buf
) > trans
->transid
);
266 if (new_root_objectid
== BTRFS_TREE_RELOC_OBJECTID
)
267 ret
= btrfs_inc_ref(trans
, root
, cow
, 1);
269 ret
= btrfs_inc_ref(trans
, root
, cow
, 0);
274 btrfs_mark_buffer_dirty(cow
);
283 MOD_LOG_KEY_REMOVE_WHILE_FREEING
,
284 MOD_LOG_KEY_REMOVE_WHILE_MOVING
,
286 MOD_LOG_ROOT_REPLACE
,
289 struct tree_mod_root
{
294 struct tree_mod_elem
{
300 /* this is used for MOD_LOG_KEY_* and MOD_LOG_MOVE_KEYS operations */
303 /* this is used for MOD_LOG_KEY* and MOD_LOG_ROOT_REPLACE */
306 /* those are used for op == MOD_LOG_KEY_{REPLACE,REMOVE} */
307 struct btrfs_disk_key key
;
310 /* this is used for op == MOD_LOG_MOVE_KEYS */
316 /* this is used for op == MOD_LOG_ROOT_REPLACE */
317 struct tree_mod_root old_root
;
321 * Pull a new tree mod seq number for our operation.
323 static inline u64
btrfs_inc_tree_mod_seq(struct btrfs_fs_info
*fs_info
)
325 return atomic64_inc_return(&fs_info
->tree_mod_seq
);
329 * This adds a new blocker to the tree mod log's blocker list if the @elem
330 * passed does not already have a sequence number set. So when a caller expects
331 * to record tree modifications, it should ensure to set elem->seq to zero
332 * before calling btrfs_get_tree_mod_seq.
333 * Returns a fresh, unused tree log modification sequence number, even if no new
336 u64
btrfs_get_tree_mod_seq(struct btrfs_fs_info
*fs_info
,
337 struct seq_list
*elem
)
339 write_lock(&fs_info
->tree_mod_log_lock
);
340 spin_lock(&fs_info
->tree_mod_seq_lock
);
342 elem
->seq
= btrfs_inc_tree_mod_seq(fs_info
);
343 list_add_tail(&elem
->list
, &fs_info
->tree_mod_seq_list
);
345 spin_unlock(&fs_info
->tree_mod_seq_lock
);
346 write_unlock(&fs_info
->tree_mod_log_lock
);
351 void btrfs_put_tree_mod_seq(struct btrfs_fs_info
*fs_info
,
352 struct seq_list
*elem
)
354 struct rb_root
*tm_root
;
355 struct rb_node
*node
;
356 struct rb_node
*next
;
357 struct seq_list
*cur_elem
;
358 struct tree_mod_elem
*tm
;
359 u64 min_seq
= (u64
)-1;
360 u64 seq_putting
= elem
->seq
;
365 spin_lock(&fs_info
->tree_mod_seq_lock
);
366 list_del(&elem
->list
);
369 list_for_each_entry(cur_elem
, &fs_info
->tree_mod_seq_list
, list
) {
370 if (cur_elem
->seq
< min_seq
) {
371 if (seq_putting
> cur_elem
->seq
) {
373 * blocker with lower sequence number exists, we
374 * cannot remove anything from the log
376 spin_unlock(&fs_info
->tree_mod_seq_lock
);
379 min_seq
= cur_elem
->seq
;
382 spin_unlock(&fs_info
->tree_mod_seq_lock
);
385 * anything that's lower than the lowest existing (read: blocked)
386 * sequence number can be removed from the tree.
388 write_lock(&fs_info
->tree_mod_log_lock
);
389 tm_root
= &fs_info
->tree_mod_log
;
390 for (node
= rb_first(tm_root
); node
; node
= next
) {
391 next
= rb_next(node
);
392 tm
= rb_entry(node
, struct tree_mod_elem
, node
);
393 if (tm
->seq
> min_seq
)
395 rb_erase(node
, tm_root
);
398 write_unlock(&fs_info
->tree_mod_log_lock
);
402 * key order of the log:
403 * node/leaf start address -> sequence
405 * The 'start address' is the logical address of the *new* root node
406 * for root replace operations, or the logical address of the affected
407 * block for all other operations.
409 * Note: must be called with write lock for fs_info::tree_mod_log_lock.
412 __tree_mod_log_insert(struct btrfs_fs_info
*fs_info
, struct tree_mod_elem
*tm
)
414 struct rb_root
*tm_root
;
415 struct rb_node
**new;
416 struct rb_node
*parent
= NULL
;
417 struct tree_mod_elem
*cur
;
419 tm
->seq
= btrfs_inc_tree_mod_seq(fs_info
);
421 tm_root
= &fs_info
->tree_mod_log
;
422 new = &tm_root
->rb_node
;
424 cur
= rb_entry(*new, struct tree_mod_elem
, node
);
426 if (cur
->logical
< tm
->logical
)
427 new = &((*new)->rb_left
);
428 else if (cur
->logical
> tm
->logical
)
429 new = &((*new)->rb_right
);
430 else if (cur
->seq
< tm
->seq
)
431 new = &((*new)->rb_left
);
432 else if (cur
->seq
> tm
->seq
)
433 new = &((*new)->rb_right
);
438 rb_link_node(&tm
->node
, parent
, new);
439 rb_insert_color(&tm
->node
, tm_root
);
444 * Determines if logging can be omitted. Returns 1 if it can. Otherwise, it
445 * returns zero with the tree_mod_log_lock acquired. The caller must hold
446 * this until all tree mod log insertions are recorded in the rb tree and then
447 * write unlock fs_info::tree_mod_log_lock.
449 static inline int tree_mod_dont_log(struct btrfs_fs_info
*fs_info
,
450 struct extent_buffer
*eb
) {
452 if (list_empty(&(fs_info
)->tree_mod_seq_list
))
454 if (eb
&& btrfs_header_level(eb
) == 0)
457 write_lock(&fs_info
->tree_mod_log_lock
);
458 if (list_empty(&(fs_info
)->tree_mod_seq_list
)) {
459 write_unlock(&fs_info
->tree_mod_log_lock
);
466 /* Similar to tree_mod_dont_log, but doesn't acquire any locks. */
467 static inline int tree_mod_need_log(const struct btrfs_fs_info
*fs_info
,
468 struct extent_buffer
*eb
)
471 if (list_empty(&(fs_info
)->tree_mod_seq_list
))
473 if (eb
&& btrfs_header_level(eb
) == 0)
479 static struct tree_mod_elem
*
480 alloc_tree_mod_elem(struct extent_buffer
*eb
, int slot
,
481 enum mod_log_op op
, gfp_t flags
)
483 struct tree_mod_elem
*tm
;
485 tm
= kzalloc(sizeof(*tm
), flags
);
489 tm
->logical
= eb
->start
;
490 if (op
!= MOD_LOG_KEY_ADD
) {
491 btrfs_node_key(eb
, &tm
->key
, slot
);
492 tm
->blockptr
= btrfs_node_blockptr(eb
, slot
);
496 tm
->generation
= btrfs_node_ptr_generation(eb
, slot
);
497 RB_CLEAR_NODE(&tm
->node
);
502 static noinline
int tree_mod_log_insert_key(struct extent_buffer
*eb
, int slot
,
503 enum mod_log_op op
, gfp_t flags
)
505 struct tree_mod_elem
*tm
;
508 if (!tree_mod_need_log(eb
->fs_info
, eb
))
511 tm
= alloc_tree_mod_elem(eb
, slot
, op
, flags
);
515 if (tree_mod_dont_log(eb
->fs_info
, eb
)) {
520 ret
= __tree_mod_log_insert(eb
->fs_info
, tm
);
521 write_unlock(&eb
->fs_info
->tree_mod_log_lock
);
528 static noinline
int tree_mod_log_insert_move(struct extent_buffer
*eb
,
529 int dst_slot
, int src_slot
, int nr_items
)
531 struct tree_mod_elem
*tm
= NULL
;
532 struct tree_mod_elem
**tm_list
= NULL
;
537 if (!tree_mod_need_log(eb
->fs_info
, eb
))
540 tm_list
= kcalloc(nr_items
, sizeof(struct tree_mod_elem
*), GFP_NOFS
);
544 tm
= kzalloc(sizeof(*tm
), GFP_NOFS
);
550 tm
->logical
= eb
->start
;
552 tm
->move
.dst_slot
= dst_slot
;
553 tm
->move
.nr_items
= nr_items
;
554 tm
->op
= MOD_LOG_MOVE_KEYS
;
556 for (i
= 0; i
+ dst_slot
< src_slot
&& i
< nr_items
; i
++) {
557 tm_list
[i
] = alloc_tree_mod_elem(eb
, i
+ dst_slot
,
558 MOD_LOG_KEY_REMOVE_WHILE_MOVING
, GFP_NOFS
);
565 if (tree_mod_dont_log(eb
->fs_info
, eb
))
570 * When we override something during the move, we log these removals.
571 * This can only happen when we move towards the beginning of the
572 * buffer, i.e. dst_slot < src_slot.
574 for (i
= 0; i
+ dst_slot
< src_slot
&& i
< nr_items
; i
++) {
575 ret
= __tree_mod_log_insert(eb
->fs_info
, tm_list
[i
]);
580 ret
= __tree_mod_log_insert(eb
->fs_info
, tm
);
583 write_unlock(&eb
->fs_info
->tree_mod_log_lock
);
588 for (i
= 0; i
< nr_items
; i
++) {
589 if (tm_list
[i
] && !RB_EMPTY_NODE(&tm_list
[i
]->node
))
590 rb_erase(&tm_list
[i
]->node
, &eb
->fs_info
->tree_mod_log
);
594 write_unlock(&eb
->fs_info
->tree_mod_log_lock
);
602 __tree_mod_log_free_eb(struct btrfs_fs_info
*fs_info
,
603 struct tree_mod_elem
**tm_list
,
609 for (i
= nritems
- 1; i
>= 0; i
--) {
610 ret
= __tree_mod_log_insert(fs_info
, tm_list
[i
]);
612 for (j
= nritems
- 1; j
> i
; j
--)
613 rb_erase(&tm_list
[j
]->node
,
614 &fs_info
->tree_mod_log
);
622 static noinline
int tree_mod_log_insert_root(struct extent_buffer
*old_root
,
623 struct extent_buffer
*new_root
, int log_removal
)
625 struct btrfs_fs_info
*fs_info
= old_root
->fs_info
;
626 struct tree_mod_elem
*tm
= NULL
;
627 struct tree_mod_elem
**tm_list
= NULL
;
632 if (!tree_mod_need_log(fs_info
, NULL
))
635 if (log_removal
&& btrfs_header_level(old_root
) > 0) {
636 nritems
= btrfs_header_nritems(old_root
);
637 tm_list
= kcalloc(nritems
, sizeof(struct tree_mod_elem
*),
643 for (i
= 0; i
< nritems
; i
++) {
644 tm_list
[i
] = alloc_tree_mod_elem(old_root
, i
,
645 MOD_LOG_KEY_REMOVE_WHILE_FREEING
, GFP_NOFS
);
653 tm
= kzalloc(sizeof(*tm
), GFP_NOFS
);
659 tm
->logical
= new_root
->start
;
660 tm
->old_root
.logical
= old_root
->start
;
661 tm
->old_root
.level
= btrfs_header_level(old_root
);
662 tm
->generation
= btrfs_header_generation(old_root
);
663 tm
->op
= MOD_LOG_ROOT_REPLACE
;
665 if (tree_mod_dont_log(fs_info
, NULL
))
669 ret
= __tree_mod_log_free_eb(fs_info
, tm_list
, nritems
);
671 ret
= __tree_mod_log_insert(fs_info
, tm
);
673 write_unlock(&fs_info
->tree_mod_log_lock
);
682 for (i
= 0; i
< nritems
; i
++)
691 static struct tree_mod_elem
*
692 __tree_mod_log_search(struct btrfs_fs_info
*fs_info
, u64 start
, u64 min_seq
,
695 struct rb_root
*tm_root
;
696 struct rb_node
*node
;
697 struct tree_mod_elem
*cur
= NULL
;
698 struct tree_mod_elem
*found
= NULL
;
700 read_lock(&fs_info
->tree_mod_log_lock
);
701 tm_root
= &fs_info
->tree_mod_log
;
702 node
= tm_root
->rb_node
;
704 cur
= rb_entry(node
, struct tree_mod_elem
, node
);
705 if (cur
->logical
< start
) {
706 node
= node
->rb_left
;
707 } else if (cur
->logical
> start
) {
708 node
= node
->rb_right
;
709 } else if (cur
->seq
< min_seq
) {
710 node
= node
->rb_left
;
711 } else if (!smallest
) {
712 /* we want the node with the highest seq */
714 BUG_ON(found
->seq
> cur
->seq
);
716 node
= node
->rb_left
;
717 } else if (cur
->seq
> min_seq
) {
718 /* we want the node with the smallest seq */
720 BUG_ON(found
->seq
< cur
->seq
);
722 node
= node
->rb_right
;
728 read_unlock(&fs_info
->tree_mod_log_lock
);
734 * this returns the element from the log with the smallest time sequence
735 * value that's in the log (the oldest log item). any element with a time
736 * sequence lower than min_seq will be ignored.
738 static struct tree_mod_elem
*
739 tree_mod_log_search_oldest(struct btrfs_fs_info
*fs_info
, u64 start
,
742 return __tree_mod_log_search(fs_info
, start
, min_seq
, 1);
746 * this returns the element from the log with the largest time sequence
747 * value that's in the log (the most recent log item). any element with
748 * a time sequence lower than min_seq will be ignored.
750 static struct tree_mod_elem
*
751 tree_mod_log_search(struct btrfs_fs_info
*fs_info
, u64 start
, u64 min_seq
)
753 return __tree_mod_log_search(fs_info
, start
, min_seq
, 0);
757 tree_mod_log_eb_copy(struct btrfs_fs_info
*fs_info
, struct extent_buffer
*dst
,
758 struct extent_buffer
*src
, unsigned long dst_offset
,
759 unsigned long src_offset
, int nr_items
)
762 struct tree_mod_elem
**tm_list
= NULL
;
763 struct tree_mod_elem
**tm_list_add
, **tm_list_rem
;
767 if (!tree_mod_need_log(fs_info
, NULL
))
770 if (btrfs_header_level(dst
) == 0 && btrfs_header_level(src
) == 0)
773 tm_list
= kcalloc(nr_items
* 2, sizeof(struct tree_mod_elem
*),
778 tm_list_add
= tm_list
;
779 tm_list_rem
= tm_list
+ nr_items
;
780 for (i
= 0; i
< nr_items
; i
++) {
781 tm_list_rem
[i
] = alloc_tree_mod_elem(src
, i
+ src_offset
,
782 MOD_LOG_KEY_REMOVE
, GFP_NOFS
);
783 if (!tm_list_rem
[i
]) {
788 tm_list_add
[i
] = alloc_tree_mod_elem(dst
, i
+ dst_offset
,
789 MOD_LOG_KEY_ADD
, GFP_NOFS
);
790 if (!tm_list_add
[i
]) {
796 if (tree_mod_dont_log(fs_info
, NULL
))
800 for (i
= 0; i
< nr_items
; i
++) {
801 ret
= __tree_mod_log_insert(fs_info
, tm_list_rem
[i
]);
804 ret
= __tree_mod_log_insert(fs_info
, tm_list_add
[i
]);
809 write_unlock(&fs_info
->tree_mod_log_lock
);
815 for (i
= 0; i
< nr_items
* 2; i
++) {
816 if (tm_list
[i
] && !RB_EMPTY_NODE(&tm_list
[i
]->node
))
817 rb_erase(&tm_list
[i
]->node
, &fs_info
->tree_mod_log
);
821 write_unlock(&fs_info
->tree_mod_log_lock
);
827 static noinline
int tree_mod_log_free_eb(struct extent_buffer
*eb
)
829 struct tree_mod_elem
**tm_list
= NULL
;
834 if (btrfs_header_level(eb
) == 0)
837 if (!tree_mod_need_log(eb
->fs_info
, NULL
))
840 nritems
= btrfs_header_nritems(eb
);
841 tm_list
= kcalloc(nritems
, sizeof(struct tree_mod_elem
*), GFP_NOFS
);
845 for (i
= 0; i
< nritems
; i
++) {
846 tm_list
[i
] = alloc_tree_mod_elem(eb
, i
,
847 MOD_LOG_KEY_REMOVE_WHILE_FREEING
, GFP_NOFS
);
854 if (tree_mod_dont_log(eb
->fs_info
, eb
))
857 ret
= __tree_mod_log_free_eb(eb
->fs_info
, tm_list
, nritems
);
858 write_unlock(&eb
->fs_info
->tree_mod_log_lock
);
866 for (i
= 0; i
< nritems
; i
++)
874 * check if the tree block can be shared by multiple trees
876 int btrfs_block_can_be_shared(struct btrfs_root
*root
,
877 struct extent_buffer
*buf
)
880 * Tree blocks not in reference counted trees and tree roots
881 * are never shared. If a block was allocated after the last
882 * snapshot and the block was not allocated by tree relocation,
883 * we know the block is not shared.
885 if (test_bit(BTRFS_ROOT_REF_COWS
, &root
->state
) &&
886 buf
!= root
->node
&& buf
!= root
->commit_root
&&
887 (btrfs_header_generation(buf
) <=
888 btrfs_root_last_snapshot(&root
->root_item
) ||
889 btrfs_header_flag(buf
, BTRFS_HEADER_FLAG_RELOC
)))
895 static noinline
int update_ref_for_cow(struct btrfs_trans_handle
*trans
,
896 struct btrfs_root
*root
,
897 struct extent_buffer
*buf
,
898 struct extent_buffer
*cow
,
901 struct btrfs_fs_info
*fs_info
= root
->fs_info
;
909 * Backrefs update rules:
911 * Always use full backrefs for extent pointers in tree block
912 * allocated by tree relocation.
914 * If a shared tree block is no longer referenced by its owner
915 * tree (btrfs_header_owner(buf) == root->root_key.objectid),
916 * use full backrefs for extent pointers in tree block.
918 * If a tree block is been relocating
919 * (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID),
920 * use full backrefs for extent pointers in tree block.
921 * The reason for this is some operations (such as drop tree)
922 * are only allowed for blocks use full backrefs.
925 if (btrfs_block_can_be_shared(root
, buf
)) {
926 ret
= btrfs_lookup_extent_info(trans
, fs_info
, buf
->start
,
927 btrfs_header_level(buf
), 1,
933 btrfs_handle_fs_error(fs_info
, ret
, NULL
);
938 if (root
->root_key
.objectid
== BTRFS_TREE_RELOC_OBJECTID
||
939 btrfs_header_backref_rev(buf
) < BTRFS_MIXED_BACKREF_REV
)
940 flags
= BTRFS_BLOCK_FLAG_FULL_BACKREF
;
945 owner
= btrfs_header_owner(buf
);
946 BUG_ON(owner
== BTRFS_TREE_RELOC_OBJECTID
&&
947 !(flags
& BTRFS_BLOCK_FLAG_FULL_BACKREF
));
950 if ((owner
== root
->root_key
.objectid
||
951 root
->root_key
.objectid
== BTRFS_TREE_RELOC_OBJECTID
) &&
952 !(flags
& BTRFS_BLOCK_FLAG_FULL_BACKREF
)) {
953 ret
= btrfs_inc_ref(trans
, root
, buf
, 1);
957 if (root
->root_key
.objectid
==
958 BTRFS_TREE_RELOC_OBJECTID
) {
959 ret
= btrfs_dec_ref(trans
, root
, buf
, 0);
962 ret
= btrfs_inc_ref(trans
, root
, cow
, 1);
966 new_flags
|= BTRFS_BLOCK_FLAG_FULL_BACKREF
;
969 if (root
->root_key
.objectid
==
970 BTRFS_TREE_RELOC_OBJECTID
)
971 ret
= btrfs_inc_ref(trans
, root
, cow
, 1);
973 ret
= btrfs_inc_ref(trans
, root
, cow
, 0);
977 if (new_flags
!= 0) {
978 int level
= btrfs_header_level(buf
);
980 ret
= btrfs_set_disk_extent_flags(trans
, fs_info
,
983 new_flags
, level
, 0);
988 if (flags
& BTRFS_BLOCK_FLAG_FULL_BACKREF
) {
989 if (root
->root_key
.objectid
==
990 BTRFS_TREE_RELOC_OBJECTID
)
991 ret
= btrfs_inc_ref(trans
, root
, cow
, 1);
993 ret
= btrfs_inc_ref(trans
, root
, cow
, 0);
996 ret
= btrfs_dec_ref(trans
, root
, buf
, 1);
1000 clean_tree_block(fs_info
, buf
);
1007 * does the dirty work in cow of a single block. The parent block (if
1008 * supplied) is updated to point to the new cow copy. The new buffer is marked
1009 * dirty and returned locked. If you modify the block it needs to be marked
1012 * search_start -- an allocation hint for the new block
1014 * empty_size -- a hint that you plan on doing more cow. This is the size in
1015 * bytes the allocator should try to find free next to the block it returns.
1016 * This is just a hint and may be ignored by the allocator.
1018 static noinline
int __btrfs_cow_block(struct btrfs_trans_handle
*trans
,
1019 struct btrfs_root
*root
,
1020 struct extent_buffer
*buf
,
1021 struct extent_buffer
*parent
, int parent_slot
,
1022 struct extent_buffer
**cow_ret
,
1023 u64 search_start
, u64 empty_size
)
1025 struct btrfs_fs_info
*fs_info
= root
->fs_info
;
1026 struct btrfs_disk_key disk_key
;
1027 struct extent_buffer
*cow
;
1030 int unlock_orig
= 0;
1031 u64 parent_start
= 0;
1033 if (*cow_ret
== buf
)
1036 btrfs_assert_tree_locked(buf
);
1038 WARN_ON(test_bit(BTRFS_ROOT_REF_COWS
, &root
->state
) &&
1039 trans
->transid
!= fs_info
->running_transaction
->transid
);
1040 WARN_ON(test_bit(BTRFS_ROOT_REF_COWS
, &root
->state
) &&
1041 trans
->transid
!= root
->last_trans
);
1043 level
= btrfs_header_level(buf
);
1046 btrfs_item_key(buf
, &disk_key
, 0);
1048 btrfs_node_key(buf
, &disk_key
, 0);
1050 if ((root
->root_key
.objectid
== BTRFS_TREE_RELOC_OBJECTID
) && parent
)
1051 parent_start
= parent
->start
;
1053 cow
= btrfs_alloc_tree_block(trans
, root
, parent_start
,
1054 root
->root_key
.objectid
, &disk_key
, level
,
1055 search_start
, empty_size
);
1057 return PTR_ERR(cow
);
1059 /* cow is set to blocking by btrfs_init_new_buffer */
1061 copy_extent_buffer_full(cow
, buf
);
1062 btrfs_set_header_bytenr(cow
, cow
->start
);
1063 btrfs_set_header_generation(cow
, trans
->transid
);
1064 btrfs_set_header_backref_rev(cow
, BTRFS_MIXED_BACKREF_REV
);
1065 btrfs_clear_header_flag(cow
, BTRFS_HEADER_FLAG_WRITTEN
|
1066 BTRFS_HEADER_FLAG_RELOC
);
1067 if (root
->root_key
.objectid
== BTRFS_TREE_RELOC_OBJECTID
)
1068 btrfs_set_header_flag(cow
, BTRFS_HEADER_FLAG_RELOC
);
1070 btrfs_set_header_owner(cow
, root
->root_key
.objectid
);
1072 write_extent_buffer_fsid(cow
, fs_info
->fsid
);
1074 ret
= update_ref_for_cow(trans
, root
, buf
, cow
, &last_ref
);
1076 btrfs_abort_transaction(trans
, ret
);
1080 if (test_bit(BTRFS_ROOT_REF_COWS
, &root
->state
)) {
1081 ret
= btrfs_reloc_cow_block(trans
, root
, buf
, cow
);
1083 btrfs_abort_transaction(trans
, ret
);
1088 if (buf
== root
->node
) {
1089 WARN_ON(parent
&& parent
!= buf
);
1090 if (root
->root_key
.objectid
== BTRFS_TREE_RELOC_OBJECTID
||
1091 btrfs_header_backref_rev(buf
) < BTRFS_MIXED_BACKREF_REV
)
1092 parent_start
= buf
->start
;
1094 extent_buffer_get(cow
);
1095 ret
= tree_mod_log_insert_root(root
->node
, cow
, 1);
1097 rcu_assign_pointer(root
->node
, cow
);
1099 btrfs_free_tree_block(trans
, root
, buf
, parent_start
,
1101 free_extent_buffer(buf
);
1102 add_root_to_dirty_list(root
);
1104 WARN_ON(trans
->transid
!= btrfs_header_generation(parent
));
1105 tree_mod_log_insert_key(parent
, parent_slot
,
1106 MOD_LOG_KEY_REPLACE
, GFP_NOFS
);
1107 btrfs_set_node_blockptr(parent
, parent_slot
,
1109 btrfs_set_node_ptr_generation(parent
, parent_slot
,
1111 btrfs_mark_buffer_dirty(parent
);
1113 ret
= tree_mod_log_free_eb(buf
);
1115 btrfs_abort_transaction(trans
, ret
);
1119 btrfs_free_tree_block(trans
, root
, buf
, parent_start
,
1123 btrfs_tree_unlock(buf
);
1124 free_extent_buffer_stale(buf
);
1125 btrfs_mark_buffer_dirty(cow
);
1131 * returns the logical address of the oldest predecessor of the given root.
1132 * entries older than time_seq are ignored.
1134 static struct tree_mod_elem
*__tree_mod_log_oldest_root(
1135 struct extent_buffer
*eb_root
, u64 time_seq
)
1137 struct tree_mod_elem
*tm
;
1138 struct tree_mod_elem
*found
= NULL
;
1139 u64 root_logical
= eb_root
->start
;
1146 * the very last operation that's logged for a root is the
1147 * replacement operation (if it is replaced at all). this has
1148 * the logical address of the *new* root, making it the very
1149 * first operation that's logged for this root.
1152 tm
= tree_mod_log_search_oldest(eb_root
->fs_info
, root_logical
,
1157 * if there are no tree operation for the oldest root, we simply
1158 * return it. this should only happen if that (old) root is at
1165 * if there's an operation that's not a root replacement, we
1166 * found the oldest version of our root. normally, we'll find a
1167 * MOD_LOG_KEY_REMOVE_WHILE_FREEING operation here.
1169 if (tm
->op
!= MOD_LOG_ROOT_REPLACE
)
1173 root_logical
= tm
->old_root
.logical
;
1177 /* if there's no old root to return, return what we found instead */
1185 * tm is a pointer to the first operation to rewind within eb. then, all
1186 * previous operations will be rewound (until we reach something older than
1190 __tree_mod_log_rewind(struct btrfs_fs_info
*fs_info
, struct extent_buffer
*eb
,
1191 u64 time_seq
, struct tree_mod_elem
*first_tm
)
1194 struct rb_node
*next
;
1195 struct tree_mod_elem
*tm
= first_tm
;
1196 unsigned long o_dst
;
1197 unsigned long o_src
;
1198 unsigned long p_size
= sizeof(struct btrfs_key_ptr
);
1200 n
= btrfs_header_nritems(eb
);
1201 read_lock(&fs_info
->tree_mod_log_lock
);
1202 while (tm
&& tm
->seq
>= time_seq
) {
1204 * all the operations are recorded with the operator used for
1205 * the modification. as we're going backwards, we do the
1206 * opposite of each operation here.
1209 case MOD_LOG_KEY_REMOVE_WHILE_FREEING
:
1210 BUG_ON(tm
->slot
< n
);
1212 case MOD_LOG_KEY_REMOVE_WHILE_MOVING
:
1213 case MOD_LOG_KEY_REMOVE
:
1214 btrfs_set_node_key(eb
, &tm
->key
, tm
->slot
);
1215 btrfs_set_node_blockptr(eb
, tm
->slot
, tm
->blockptr
);
1216 btrfs_set_node_ptr_generation(eb
, tm
->slot
,
1220 case MOD_LOG_KEY_REPLACE
:
1221 BUG_ON(tm
->slot
>= n
);
1222 btrfs_set_node_key(eb
, &tm
->key
, tm
->slot
);
1223 btrfs_set_node_blockptr(eb
, tm
->slot
, tm
->blockptr
);
1224 btrfs_set_node_ptr_generation(eb
, tm
->slot
,
1227 case MOD_LOG_KEY_ADD
:
1228 /* if a move operation is needed it's in the log */
1231 case MOD_LOG_MOVE_KEYS
:
1232 o_dst
= btrfs_node_key_ptr_offset(tm
->slot
);
1233 o_src
= btrfs_node_key_ptr_offset(tm
->move
.dst_slot
);
1234 memmove_extent_buffer(eb
, o_dst
, o_src
,
1235 tm
->move
.nr_items
* p_size
);
1237 case MOD_LOG_ROOT_REPLACE
:
1239 * this operation is special. for roots, this must be
1240 * handled explicitly before rewinding.
1241 * for non-roots, this operation may exist if the node
1242 * was a root: root A -> child B; then A gets empty and
1243 * B is promoted to the new root. in the mod log, we'll
1244 * have a root-replace operation for B, a tree block
1245 * that is no root. we simply ignore that operation.
1249 next
= rb_next(&tm
->node
);
1252 tm
= rb_entry(next
, struct tree_mod_elem
, node
);
1253 if (tm
->logical
!= first_tm
->logical
)
1256 read_unlock(&fs_info
->tree_mod_log_lock
);
1257 btrfs_set_header_nritems(eb
, n
);
1261 * Called with eb read locked. If the buffer cannot be rewound, the same buffer
1262 * is returned. If rewind operations happen, a fresh buffer is returned. The
1263 * returned buffer is always read-locked. If the returned buffer is not the
1264 * input buffer, the lock on the input buffer is released and the input buffer
1265 * is freed (its refcount is decremented).
1267 static struct extent_buffer
*
1268 tree_mod_log_rewind(struct btrfs_fs_info
*fs_info
, struct btrfs_path
*path
,
1269 struct extent_buffer
*eb
, u64 time_seq
)
1271 struct extent_buffer
*eb_rewin
;
1272 struct tree_mod_elem
*tm
;
1277 if (btrfs_header_level(eb
) == 0)
1280 tm
= tree_mod_log_search(fs_info
, eb
->start
, time_seq
);
1284 btrfs_set_path_blocking(path
);
1285 btrfs_set_lock_blocking_rw(eb
, BTRFS_READ_LOCK
);
1287 if (tm
->op
== MOD_LOG_KEY_REMOVE_WHILE_FREEING
) {
1288 BUG_ON(tm
->slot
!= 0);
1289 eb_rewin
= alloc_dummy_extent_buffer(fs_info
, eb
->start
);
1291 btrfs_tree_read_unlock_blocking(eb
);
1292 free_extent_buffer(eb
);
1295 btrfs_set_header_bytenr(eb_rewin
, eb
->start
);
1296 btrfs_set_header_backref_rev(eb_rewin
,
1297 btrfs_header_backref_rev(eb
));
1298 btrfs_set_header_owner(eb_rewin
, btrfs_header_owner(eb
));
1299 btrfs_set_header_level(eb_rewin
, btrfs_header_level(eb
));
1301 eb_rewin
= btrfs_clone_extent_buffer(eb
);
1303 btrfs_tree_read_unlock_blocking(eb
);
1304 free_extent_buffer(eb
);
1309 btrfs_clear_path_blocking(path
, NULL
, BTRFS_READ_LOCK
);
1310 btrfs_tree_read_unlock_blocking(eb
);
1311 free_extent_buffer(eb
);
1313 extent_buffer_get(eb_rewin
);
1314 btrfs_tree_read_lock(eb_rewin
);
1315 __tree_mod_log_rewind(fs_info
, eb_rewin
, time_seq
, tm
);
1316 WARN_ON(btrfs_header_nritems(eb_rewin
) >
1317 BTRFS_NODEPTRS_PER_BLOCK(fs_info
));
1323 * get_old_root() rewinds the state of @root's root node to the given @time_seq
1324 * value. If there are no changes, the current root->root_node is returned. If
1325 * anything changed in between, there's a fresh buffer allocated on which the
1326 * rewind operations are done. In any case, the returned buffer is read locked.
1327 * Returns NULL on error (with no locks held).
1329 static inline struct extent_buffer
*
1330 get_old_root(struct btrfs_root
*root
, u64 time_seq
)
1332 struct btrfs_fs_info
*fs_info
= root
->fs_info
;
1333 struct tree_mod_elem
*tm
;
1334 struct extent_buffer
*eb
= NULL
;
1335 struct extent_buffer
*eb_root
;
1336 struct extent_buffer
*old
;
1337 struct tree_mod_root
*old_root
= NULL
;
1338 u64 old_generation
= 0;
1342 eb_root
= btrfs_read_lock_root_node(root
);
1343 tm
= __tree_mod_log_oldest_root(eb_root
, time_seq
);
1347 if (tm
->op
== MOD_LOG_ROOT_REPLACE
) {
1348 old_root
= &tm
->old_root
;
1349 old_generation
= tm
->generation
;
1350 logical
= old_root
->logical
;
1351 level
= old_root
->level
;
1353 logical
= eb_root
->start
;
1354 level
= btrfs_header_level(eb_root
);
1357 tm
= tree_mod_log_search(fs_info
, logical
, time_seq
);
1358 if (old_root
&& tm
&& tm
->op
!= MOD_LOG_KEY_REMOVE_WHILE_FREEING
) {
1359 btrfs_tree_read_unlock(eb_root
);
1360 free_extent_buffer(eb_root
);
1361 old
= read_tree_block(fs_info
, logical
, 0, level
, NULL
);
1362 if (WARN_ON(IS_ERR(old
) || !extent_buffer_uptodate(old
))) {
1364 free_extent_buffer(old
);
1366 "failed to read tree block %llu from get_old_root",
1369 eb
= btrfs_clone_extent_buffer(old
);
1370 free_extent_buffer(old
);
1372 } else if (old_root
) {
1373 btrfs_tree_read_unlock(eb_root
);
1374 free_extent_buffer(eb_root
);
1375 eb
= alloc_dummy_extent_buffer(fs_info
, logical
);
1377 btrfs_set_lock_blocking_rw(eb_root
, BTRFS_READ_LOCK
);
1378 eb
= btrfs_clone_extent_buffer(eb_root
);
1379 btrfs_tree_read_unlock_blocking(eb_root
);
1380 free_extent_buffer(eb_root
);
1385 extent_buffer_get(eb
);
1386 btrfs_tree_read_lock(eb
);
1388 btrfs_set_header_bytenr(eb
, eb
->start
);
1389 btrfs_set_header_backref_rev(eb
, BTRFS_MIXED_BACKREF_REV
);
1390 btrfs_set_header_owner(eb
, btrfs_header_owner(eb_root
));
1391 btrfs_set_header_level(eb
, old_root
->level
);
1392 btrfs_set_header_generation(eb
, old_generation
);
1395 __tree_mod_log_rewind(fs_info
, eb
, time_seq
, tm
);
1397 WARN_ON(btrfs_header_level(eb
) != 0);
1398 WARN_ON(btrfs_header_nritems(eb
) > BTRFS_NODEPTRS_PER_BLOCK(fs_info
));
1403 int btrfs_old_root_level(struct btrfs_root
*root
, u64 time_seq
)
1405 struct tree_mod_elem
*tm
;
1407 struct extent_buffer
*eb_root
= btrfs_root_node(root
);
1409 tm
= __tree_mod_log_oldest_root(eb_root
, time_seq
);
1410 if (tm
&& tm
->op
== MOD_LOG_ROOT_REPLACE
) {
1411 level
= tm
->old_root
.level
;
1413 level
= btrfs_header_level(eb_root
);
1415 free_extent_buffer(eb_root
);
1420 static inline int should_cow_block(struct btrfs_trans_handle
*trans
,
1421 struct btrfs_root
*root
,
1422 struct extent_buffer
*buf
)
1424 if (btrfs_is_testing(root
->fs_info
))
1427 /* Ensure we can see the FORCE_COW bit */
1428 smp_mb__before_atomic();
1431 * We do not need to cow a block if
1432 * 1) this block is not created or changed in this transaction;
1433 * 2) this block does not belong to TREE_RELOC tree;
1434 * 3) the root is not forced COW.
1436 * What is forced COW:
1437 * when we create snapshot during committing the transaction,
1438 * after we've finished coping src root, we must COW the shared
1439 * block to ensure the metadata consistency.
1441 if (btrfs_header_generation(buf
) == trans
->transid
&&
1442 !btrfs_header_flag(buf
, BTRFS_HEADER_FLAG_WRITTEN
) &&
1443 !(root
->root_key
.objectid
!= BTRFS_TREE_RELOC_OBJECTID
&&
1444 btrfs_header_flag(buf
, BTRFS_HEADER_FLAG_RELOC
)) &&
1445 !test_bit(BTRFS_ROOT_FORCE_COW
, &root
->state
))
1451 * cows a single block, see __btrfs_cow_block for the real work.
1452 * This version of it has extra checks so that a block isn't COWed more than
1453 * once per transaction, as long as it hasn't been written yet
1455 noinline
int btrfs_cow_block(struct btrfs_trans_handle
*trans
,
1456 struct btrfs_root
*root
, struct extent_buffer
*buf
,
1457 struct extent_buffer
*parent
, int parent_slot
,
1458 struct extent_buffer
**cow_ret
)
1460 struct btrfs_fs_info
*fs_info
= root
->fs_info
;
1464 if (trans
->transaction
!= fs_info
->running_transaction
)
1465 WARN(1, KERN_CRIT
"trans %llu running %llu\n",
1467 fs_info
->running_transaction
->transid
);
1469 if (trans
->transid
!= fs_info
->generation
)
1470 WARN(1, KERN_CRIT
"trans %llu running %llu\n",
1471 trans
->transid
, fs_info
->generation
);
1473 if (!should_cow_block(trans
, root
, buf
)) {
1474 trans
->dirty
= true;
1479 search_start
= buf
->start
& ~((u64
)SZ_1G
- 1);
1482 btrfs_set_lock_blocking(parent
);
1483 btrfs_set_lock_blocking(buf
);
1485 ret
= __btrfs_cow_block(trans
, root
, buf
, parent
,
1486 parent_slot
, cow_ret
, search_start
, 0);
1488 trace_btrfs_cow_block(root
, buf
, *cow_ret
);
1494 * helper function for defrag to decide if two blocks pointed to by a
1495 * node are actually close by
1497 static int close_blocks(u64 blocknr
, u64 other
, u32 blocksize
)
1499 if (blocknr
< other
&& other
- (blocknr
+ blocksize
) < 32768)
1501 if (blocknr
> other
&& blocknr
- (other
+ blocksize
) < 32768)
1507 * compare two keys in a memcmp fashion
1509 static int comp_keys(const struct btrfs_disk_key
*disk
,
1510 const struct btrfs_key
*k2
)
1512 struct btrfs_key k1
;
1514 btrfs_disk_key_to_cpu(&k1
, disk
);
1516 return btrfs_comp_cpu_keys(&k1
, k2
);
1520 * same as comp_keys only with two btrfs_key's
1522 int btrfs_comp_cpu_keys(const struct btrfs_key
*k1
, const struct btrfs_key
*k2
)
1524 if (k1
->objectid
> k2
->objectid
)
1526 if (k1
->objectid
< k2
->objectid
)
1528 if (k1
->type
> k2
->type
)
1530 if (k1
->type
< k2
->type
)
1532 if (k1
->offset
> k2
->offset
)
1534 if (k1
->offset
< k2
->offset
)
1540 * this is used by the defrag code to go through all the
1541 * leaves pointed to by a node and reallocate them so that
1542 * disk order is close to key order
1544 int btrfs_realloc_node(struct btrfs_trans_handle
*trans
,
1545 struct btrfs_root
*root
, struct extent_buffer
*parent
,
1546 int start_slot
, u64
*last_ret
,
1547 struct btrfs_key
*progress
)
1549 struct btrfs_fs_info
*fs_info
= root
->fs_info
;
1550 struct extent_buffer
*cur
;
1553 u64 search_start
= *last_ret
;
1563 int progress_passed
= 0;
1564 struct btrfs_disk_key disk_key
;
1566 parent_level
= btrfs_header_level(parent
);
1568 WARN_ON(trans
->transaction
!= fs_info
->running_transaction
);
1569 WARN_ON(trans
->transid
!= fs_info
->generation
);
1571 parent_nritems
= btrfs_header_nritems(parent
);
1572 blocksize
= fs_info
->nodesize
;
1573 end_slot
= parent_nritems
- 1;
1575 if (parent_nritems
<= 1)
1578 btrfs_set_lock_blocking(parent
);
1580 for (i
= start_slot
; i
<= end_slot
; i
++) {
1581 struct btrfs_key first_key
;
1584 btrfs_node_key(parent
, &disk_key
, i
);
1585 if (!progress_passed
&& comp_keys(&disk_key
, progress
) < 0)
1588 progress_passed
= 1;
1589 blocknr
= btrfs_node_blockptr(parent
, i
);
1590 gen
= btrfs_node_ptr_generation(parent
, i
);
1591 btrfs_node_key_to_cpu(parent
, &first_key
, i
);
1592 if (last_block
== 0)
1593 last_block
= blocknr
;
1596 other
= btrfs_node_blockptr(parent
, i
- 1);
1597 close
= close_blocks(blocknr
, other
, blocksize
);
1599 if (!close
&& i
< end_slot
) {
1600 other
= btrfs_node_blockptr(parent
, i
+ 1);
1601 close
= close_blocks(blocknr
, other
, blocksize
);
1604 last_block
= blocknr
;
1608 cur
= find_extent_buffer(fs_info
, blocknr
);
1610 uptodate
= btrfs_buffer_uptodate(cur
, gen
, 0);
1613 if (!cur
|| !uptodate
) {
1615 cur
= read_tree_block(fs_info
, blocknr
, gen
,
1619 return PTR_ERR(cur
);
1620 } else if (!extent_buffer_uptodate(cur
)) {
1621 free_extent_buffer(cur
);
1624 } else if (!uptodate
) {
1625 err
= btrfs_read_buffer(cur
, gen
,
1626 parent_level
- 1,&first_key
);
1628 free_extent_buffer(cur
);
1633 if (search_start
== 0)
1634 search_start
= last_block
;
1636 btrfs_tree_lock(cur
);
1637 btrfs_set_lock_blocking(cur
);
1638 err
= __btrfs_cow_block(trans
, root
, cur
, parent
, i
,
1641 (end_slot
- i
) * blocksize
));
1643 btrfs_tree_unlock(cur
);
1644 free_extent_buffer(cur
);
1647 search_start
= cur
->start
;
1648 last_block
= cur
->start
;
1649 *last_ret
= search_start
;
1650 btrfs_tree_unlock(cur
);
1651 free_extent_buffer(cur
);
1657 * search for key in the extent_buffer. The items start at offset p,
1658 * and they are item_size apart. There are 'max' items in p.
1660 * the slot in the array is returned via slot, and it points to
1661 * the place where you would insert key if it is not found in
1664 * slot may point to max if the key is bigger than all of the keys
1666 static noinline
int generic_bin_search(struct extent_buffer
*eb
,
1667 unsigned long p
, int item_size
,
1668 const struct btrfs_key
*key
,
1675 struct btrfs_disk_key
*tmp
= NULL
;
1676 struct btrfs_disk_key unaligned
;
1677 unsigned long offset
;
1679 unsigned long map_start
= 0;
1680 unsigned long map_len
= 0;
1684 btrfs_err(eb
->fs_info
,
1685 "%s: low (%d) > high (%d) eb %llu owner %llu level %d",
1686 __func__
, low
, high
, eb
->start
,
1687 btrfs_header_owner(eb
), btrfs_header_level(eb
));
1691 while (low
< high
) {
1692 mid
= (low
+ high
) / 2;
1693 offset
= p
+ mid
* item_size
;
1695 if (!kaddr
|| offset
< map_start
||
1696 (offset
+ sizeof(struct btrfs_disk_key
)) >
1697 map_start
+ map_len
) {
1699 err
= map_private_extent_buffer(eb
, offset
,
1700 sizeof(struct btrfs_disk_key
),
1701 &kaddr
, &map_start
, &map_len
);
1704 tmp
= (struct btrfs_disk_key
*)(kaddr
+ offset
-
1706 } else if (err
== 1) {
1707 read_extent_buffer(eb
, &unaligned
,
1708 offset
, sizeof(unaligned
));
1715 tmp
= (struct btrfs_disk_key
*)(kaddr
+ offset
-
1718 ret
= comp_keys(tmp
, key
);
1734 * simple bin_search frontend that does the right thing for
1737 int btrfs_bin_search(struct extent_buffer
*eb
, const struct btrfs_key
*key
,
1738 int level
, int *slot
)
1741 return generic_bin_search(eb
,
1742 offsetof(struct btrfs_leaf
, items
),
1743 sizeof(struct btrfs_item
),
1744 key
, btrfs_header_nritems(eb
),
1747 return generic_bin_search(eb
,
1748 offsetof(struct btrfs_node
, ptrs
),
1749 sizeof(struct btrfs_key_ptr
),
1750 key
, btrfs_header_nritems(eb
),
1754 static void root_add_used(struct btrfs_root
*root
, u32 size
)
1756 spin_lock(&root
->accounting_lock
);
1757 btrfs_set_root_used(&root
->root_item
,
1758 btrfs_root_used(&root
->root_item
) + size
);
1759 spin_unlock(&root
->accounting_lock
);
1762 static void root_sub_used(struct btrfs_root
*root
, u32 size
)
1764 spin_lock(&root
->accounting_lock
);
1765 btrfs_set_root_used(&root
->root_item
,
1766 btrfs_root_used(&root
->root_item
) - size
);
1767 spin_unlock(&root
->accounting_lock
);
1770 /* given a node and slot number, this reads the blocks it points to. The
1771 * extent buffer is returned with a reference taken (but unlocked).
1773 static noinline
struct extent_buffer
*
1774 read_node_slot(struct btrfs_fs_info
*fs_info
, struct extent_buffer
*parent
,
1777 int level
= btrfs_header_level(parent
);
1778 struct extent_buffer
*eb
;
1779 struct btrfs_key first_key
;
1781 if (slot
< 0 || slot
>= btrfs_header_nritems(parent
))
1782 return ERR_PTR(-ENOENT
);
1786 btrfs_node_key_to_cpu(parent
, &first_key
, slot
);
1787 eb
= read_tree_block(fs_info
, btrfs_node_blockptr(parent
, slot
),
1788 btrfs_node_ptr_generation(parent
, slot
),
1789 level
- 1, &first_key
);
1790 if (!IS_ERR(eb
) && !extent_buffer_uptodate(eb
)) {
1791 free_extent_buffer(eb
);
1799 * node level balancing, used to make sure nodes are in proper order for
1800 * item deletion. We balance from the top down, so we have to make sure
1801 * that a deletion won't leave an node completely empty later on.
1803 static noinline
int balance_level(struct btrfs_trans_handle
*trans
,
1804 struct btrfs_root
*root
,
1805 struct btrfs_path
*path
, int level
)
1807 struct btrfs_fs_info
*fs_info
= root
->fs_info
;
1808 struct extent_buffer
*right
= NULL
;
1809 struct extent_buffer
*mid
;
1810 struct extent_buffer
*left
= NULL
;
1811 struct extent_buffer
*parent
= NULL
;
1815 int orig_slot
= path
->slots
[level
];
1821 mid
= path
->nodes
[level
];
1823 WARN_ON(path
->locks
[level
] != BTRFS_WRITE_LOCK
&&
1824 path
->locks
[level
] != BTRFS_WRITE_LOCK_BLOCKING
);
1825 WARN_ON(btrfs_header_generation(mid
) != trans
->transid
);
1827 orig_ptr
= btrfs_node_blockptr(mid
, orig_slot
);
1829 if (level
< BTRFS_MAX_LEVEL
- 1) {
1830 parent
= path
->nodes
[level
+ 1];
1831 pslot
= path
->slots
[level
+ 1];
1835 * deal with the case where there is only one pointer in the root
1836 * by promoting the node below to a root
1839 struct extent_buffer
*child
;
1841 if (btrfs_header_nritems(mid
) != 1)
1844 /* promote the child to a root */
1845 child
= read_node_slot(fs_info
, mid
, 0);
1846 if (IS_ERR(child
)) {
1847 ret
= PTR_ERR(child
);
1848 btrfs_handle_fs_error(fs_info
, ret
, NULL
);
1852 btrfs_tree_lock(child
);
1853 btrfs_set_lock_blocking(child
);
1854 ret
= btrfs_cow_block(trans
, root
, child
, mid
, 0, &child
);
1856 btrfs_tree_unlock(child
);
1857 free_extent_buffer(child
);
1861 ret
= tree_mod_log_insert_root(root
->node
, child
, 1);
1863 rcu_assign_pointer(root
->node
, child
);
1865 add_root_to_dirty_list(root
);
1866 btrfs_tree_unlock(child
);
1868 path
->locks
[level
] = 0;
1869 path
->nodes
[level
] = NULL
;
1870 clean_tree_block(fs_info
, mid
);
1871 btrfs_tree_unlock(mid
);
1872 /* once for the path */
1873 free_extent_buffer(mid
);
1875 root_sub_used(root
, mid
->len
);
1876 btrfs_free_tree_block(trans
, root
, mid
, 0, 1);
1877 /* once for the root ptr */
1878 free_extent_buffer_stale(mid
);
1881 if (btrfs_header_nritems(mid
) >
1882 BTRFS_NODEPTRS_PER_BLOCK(fs_info
) / 4)
1885 left
= read_node_slot(fs_info
, parent
, pslot
- 1);
1890 btrfs_tree_lock(left
);
1891 btrfs_set_lock_blocking(left
);
1892 wret
= btrfs_cow_block(trans
, root
, left
,
1893 parent
, pslot
- 1, &left
);
1900 right
= read_node_slot(fs_info
, parent
, pslot
+ 1);
1905 btrfs_tree_lock(right
);
1906 btrfs_set_lock_blocking(right
);
1907 wret
= btrfs_cow_block(trans
, root
, right
,
1908 parent
, pslot
+ 1, &right
);
1915 /* first, try to make some room in the middle buffer */
1917 orig_slot
+= btrfs_header_nritems(left
);
1918 wret
= push_node_left(trans
, fs_info
, left
, mid
, 1);
1924 * then try to empty the right most buffer into the middle
1927 wret
= push_node_left(trans
, fs_info
, mid
, right
, 1);
1928 if (wret
< 0 && wret
!= -ENOSPC
)
1930 if (btrfs_header_nritems(right
) == 0) {
1931 clean_tree_block(fs_info
, right
);
1932 btrfs_tree_unlock(right
);
1933 del_ptr(root
, path
, level
+ 1, pslot
+ 1);
1934 root_sub_used(root
, right
->len
);
1935 btrfs_free_tree_block(trans
, root
, right
, 0, 1);
1936 free_extent_buffer_stale(right
);
1939 struct btrfs_disk_key right_key
;
1940 btrfs_node_key(right
, &right_key
, 0);
1941 ret
= tree_mod_log_insert_key(parent
, pslot
+ 1,
1942 MOD_LOG_KEY_REPLACE
, GFP_NOFS
);
1944 btrfs_set_node_key(parent
, &right_key
, pslot
+ 1);
1945 btrfs_mark_buffer_dirty(parent
);
1948 if (btrfs_header_nritems(mid
) == 1) {
1950 * we're not allowed to leave a node with one item in the
1951 * tree during a delete. A deletion from lower in the tree
1952 * could try to delete the only pointer in this node.
1953 * So, pull some keys from the left.
1954 * There has to be a left pointer at this point because
1955 * otherwise we would have pulled some pointers from the
1960 btrfs_handle_fs_error(fs_info
, ret
, NULL
);
1963 wret
= balance_node_right(trans
, fs_info
, mid
, left
);
1969 wret
= push_node_left(trans
, fs_info
, left
, mid
, 1);
1975 if (btrfs_header_nritems(mid
) == 0) {
1976 clean_tree_block(fs_info
, mid
);
1977 btrfs_tree_unlock(mid
);
1978 del_ptr(root
, path
, level
+ 1, pslot
);
1979 root_sub_used(root
, mid
->len
);
1980 btrfs_free_tree_block(trans
, root
, mid
, 0, 1);
1981 free_extent_buffer_stale(mid
);
1984 /* update the parent key to reflect our changes */
1985 struct btrfs_disk_key mid_key
;
1986 btrfs_node_key(mid
, &mid_key
, 0);
1987 ret
= tree_mod_log_insert_key(parent
, pslot
,
1988 MOD_LOG_KEY_REPLACE
, GFP_NOFS
);
1990 btrfs_set_node_key(parent
, &mid_key
, pslot
);
1991 btrfs_mark_buffer_dirty(parent
);
1994 /* update the path */
1996 if (btrfs_header_nritems(left
) > orig_slot
) {
1997 extent_buffer_get(left
);
1998 /* left was locked after cow */
1999 path
->nodes
[level
] = left
;
2000 path
->slots
[level
+ 1] -= 1;
2001 path
->slots
[level
] = orig_slot
;
2003 btrfs_tree_unlock(mid
);
2004 free_extent_buffer(mid
);
2007 orig_slot
-= btrfs_header_nritems(left
);
2008 path
->slots
[level
] = orig_slot
;
2011 /* double check we haven't messed things up */
2013 btrfs_node_blockptr(path
->nodes
[level
], path
->slots
[level
]))
2017 btrfs_tree_unlock(right
);
2018 free_extent_buffer(right
);
2021 if (path
->nodes
[level
] != left
)
2022 btrfs_tree_unlock(left
);
2023 free_extent_buffer(left
);
2028 /* Node balancing for insertion. Here we only split or push nodes around
2029 * when they are completely full. This is also done top down, so we
2030 * have to be pessimistic.
2032 static noinline
int push_nodes_for_insert(struct btrfs_trans_handle
*trans
,
2033 struct btrfs_root
*root
,
2034 struct btrfs_path
*path
, int level
)
2036 struct btrfs_fs_info
*fs_info
= root
->fs_info
;
2037 struct extent_buffer
*right
= NULL
;
2038 struct extent_buffer
*mid
;
2039 struct extent_buffer
*left
= NULL
;
2040 struct extent_buffer
*parent
= NULL
;
2044 int orig_slot
= path
->slots
[level
];
2049 mid
= path
->nodes
[level
];
2050 WARN_ON(btrfs_header_generation(mid
) != trans
->transid
);
2052 if (level
< BTRFS_MAX_LEVEL
- 1) {
2053 parent
= path
->nodes
[level
+ 1];
2054 pslot
= path
->slots
[level
+ 1];
2060 left
= read_node_slot(fs_info
, parent
, pslot
- 1);
2064 /* first, try to make some room in the middle buffer */
2068 btrfs_tree_lock(left
);
2069 btrfs_set_lock_blocking(left
);
2071 left_nr
= btrfs_header_nritems(left
);
2072 if (left_nr
>= BTRFS_NODEPTRS_PER_BLOCK(fs_info
) - 1) {
2075 ret
= btrfs_cow_block(trans
, root
, left
, parent
,
2080 wret
= push_node_left(trans
, fs_info
,
2087 struct btrfs_disk_key disk_key
;
2088 orig_slot
+= left_nr
;
2089 btrfs_node_key(mid
, &disk_key
, 0);
2090 ret
= tree_mod_log_insert_key(parent
, pslot
,
2091 MOD_LOG_KEY_REPLACE
, GFP_NOFS
);
2093 btrfs_set_node_key(parent
, &disk_key
, pslot
);
2094 btrfs_mark_buffer_dirty(parent
);
2095 if (btrfs_header_nritems(left
) > orig_slot
) {
2096 path
->nodes
[level
] = left
;
2097 path
->slots
[level
+ 1] -= 1;
2098 path
->slots
[level
] = orig_slot
;
2099 btrfs_tree_unlock(mid
);
2100 free_extent_buffer(mid
);
2103 btrfs_header_nritems(left
);
2104 path
->slots
[level
] = orig_slot
;
2105 btrfs_tree_unlock(left
);
2106 free_extent_buffer(left
);
2110 btrfs_tree_unlock(left
);
2111 free_extent_buffer(left
);
2113 right
= read_node_slot(fs_info
, parent
, pslot
+ 1);
2118 * then try to empty the right most buffer into the middle
2123 btrfs_tree_lock(right
);
2124 btrfs_set_lock_blocking(right
);
2126 right_nr
= btrfs_header_nritems(right
);
2127 if (right_nr
>= BTRFS_NODEPTRS_PER_BLOCK(fs_info
) - 1) {
2130 ret
= btrfs_cow_block(trans
, root
, right
,
2136 wret
= balance_node_right(trans
, fs_info
,
2143 struct btrfs_disk_key disk_key
;
2145 btrfs_node_key(right
, &disk_key
, 0);
2146 ret
= tree_mod_log_insert_key(parent
, pslot
+ 1,
2147 MOD_LOG_KEY_REPLACE
, GFP_NOFS
);
2149 btrfs_set_node_key(parent
, &disk_key
, pslot
+ 1);
2150 btrfs_mark_buffer_dirty(parent
);
2152 if (btrfs_header_nritems(mid
) <= orig_slot
) {
2153 path
->nodes
[level
] = right
;
2154 path
->slots
[level
+ 1] += 1;
2155 path
->slots
[level
] = orig_slot
-
2156 btrfs_header_nritems(mid
);
2157 btrfs_tree_unlock(mid
);
2158 free_extent_buffer(mid
);
2160 btrfs_tree_unlock(right
);
2161 free_extent_buffer(right
);
2165 btrfs_tree_unlock(right
);
2166 free_extent_buffer(right
);
2172 * readahead one full node of leaves, finding things that are close
2173 * to the block in 'slot', and triggering ra on them.
2175 static void reada_for_search(struct btrfs_fs_info
*fs_info
,
2176 struct btrfs_path
*path
,
2177 int level
, int slot
, u64 objectid
)
2179 struct extent_buffer
*node
;
2180 struct btrfs_disk_key disk_key
;
2185 struct extent_buffer
*eb
;
2193 if (!path
->nodes
[level
])
2196 node
= path
->nodes
[level
];
2198 search
= btrfs_node_blockptr(node
, slot
);
2199 blocksize
= fs_info
->nodesize
;
2200 eb
= find_extent_buffer(fs_info
, search
);
2202 free_extent_buffer(eb
);
2208 nritems
= btrfs_header_nritems(node
);
2212 if (path
->reada
== READA_BACK
) {
2216 } else if (path
->reada
== READA_FORWARD
) {
2221 if (path
->reada
== READA_BACK
&& objectid
) {
2222 btrfs_node_key(node
, &disk_key
, nr
);
2223 if (btrfs_disk_key_objectid(&disk_key
) != objectid
)
2226 search
= btrfs_node_blockptr(node
, nr
);
2227 if ((search
<= target
&& target
- search
<= 65536) ||
2228 (search
> target
&& search
- target
<= 65536)) {
2229 readahead_tree_block(fs_info
, search
);
2233 if ((nread
> 65536 || nscan
> 32))
2238 static noinline
void reada_for_balance(struct btrfs_fs_info
*fs_info
,
2239 struct btrfs_path
*path
, int level
)
2243 struct extent_buffer
*parent
;
2244 struct extent_buffer
*eb
;
2249 parent
= path
->nodes
[level
+ 1];
2253 nritems
= btrfs_header_nritems(parent
);
2254 slot
= path
->slots
[level
+ 1];
2257 block1
= btrfs_node_blockptr(parent
, slot
- 1);
2258 gen
= btrfs_node_ptr_generation(parent
, slot
- 1);
2259 eb
= find_extent_buffer(fs_info
, block1
);
2261 * if we get -eagain from btrfs_buffer_uptodate, we
2262 * don't want to return eagain here. That will loop
2265 if (eb
&& btrfs_buffer_uptodate(eb
, gen
, 1) != 0)
2267 free_extent_buffer(eb
);
2269 if (slot
+ 1 < nritems
) {
2270 block2
= btrfs_node_blockptr(parent
, slot
+ 1);
2271 gen
= btrfs_node_ptr_generation(parent
, slot
+ 1);
2272 eb
= find_extent_buffer(fs_info
, block2
);
2273 if (eb
&& btrfs_buffer_uptodate(eb
, gen
, 1) != 0)
2275 free_extent_buffer(eb
);
2279 readahead_tree_block(fs_info
, block1
);
2281 readahead_tree_block(fs_info
, block2
);
2286 * when we walk down the tree, it is usually safe to unlock the higher layers
2287 * in the tree. The exceptions are when our path goes through slot 0, because
2288 * operations on the tree might require changing key pointers higher up in the
2291 * callers might also have set path->keep_locks, which tells this code to keep
2292 * the lock if the path points to the last slot in the block. This is part of
2293 * walking through the tree, and selecting the next slot in the higher block.
2295 * lowest_unlock sets the lowest level in the tree we're allowed to unlock. so
2296 * if lowest_unlock is 1, level 0 won't be unlocked
2298 static noinline
void unlock_up(struct btrfs_path
*path
, int level
,
2299 int lowest_unlock
, int min_write_lock_level
,
2300 int *write_lock_level
)
2303 int skip_level
= level
;
2305 struct extent_buffer
*t
;
2307 for (i
= level
; i
< BTRFS_MAX_LEVEL
; i
++) {
2308 if (!path
->nodes
[i
])
2310 if (!path
->locks
[i
])
2312 if (!no_skips
&& path
->slots
[i
] == 0) {
2316 if (!no_skips
&& path
->keep_locks
) {
2319 nritems
= btrfs_header_nritems(t
);
2320 if (nritems
< 1 || path
->slots
[i
] >= nritems
- 1) {
2325 if (skip_level
< i
&& i
>= lowest_unlock
)
2329 if (i
>= lowest_unlock
&& i
> skip_level
) {
2330 btrfs_tree_unlock_rw(t
, path
->locks
[i
]);
2332 if (write_lock_level
&&
2333 i
> min_write_lock_level
&&
2334 i
<= *write_lock_level
) {
2335 *write_lock_level
= i
- 1;
2342 * This releases any locks held in the path starting at level and
2343 * going all the way up to the root.
2345 * btrfs_search_slot will keep the lock held on higher nodes in a few
2346 * corner cases, such as COW of the block at slot zero in the node. This
2347 * ignores those rules, and it should only be called when there are no
2348 * more updates to be done higher up in the tree.
2350 noinline
void btrfs_unlock_up_safe(struct btrfs_path
*path
, int level
)
2354 if (path
->keep_locks
)
2357 for (i
= level
; i
< BTRFS_MAX_LEVEL
; i
++) {
2358 if (!path
->nodes
[i
])
2360 if (!path
->locks
[i
])
2362 btrfs_tree_unlock_rw(path
->nodes
[i
], path
->locks
[i
]);
2368 * helper function for btrfs_search_slot. The goal is to find a block
2369 * in cache without setting the path to blocking. If we find the block
2370 * we return zero and the path is unchanged.
2372 * If we can't find the block, we set the path blocking and do some
2373 * reada. -EAGAIN is returned and the search must be repeated.
2376 read_block_for_search(struct btrfs_root
*root
, struct btrfs_path
*p
,
2377 struct extent_buffer
**eb_ret
, int level
, int slot
,
2378 const struct btrfs_key
*key
)
2380 struct btrfs_fs_info
*fs_info
= root
->fs_info
;
2383 struct extent_buffer
*b
= *eb_ret
;
2384 struct extent_buffer
*tmp
;
2385 struct btrfs_key first_key
;
2389 blocknr
= btrfs_node_blockptr(b
, slot
);
2390 gen
= btrfs_node_ptr_generation(b
, slot
);
2391 parent_level
= btrfs_header_level(b
);
2392 btrfs_node_key_to_cpu(b
, &first_key
, slot
);
2394 tmp
= find_extent_buffer(fs_info
, blocknr
);
2396 /* first we do an atomic uptodate check */
2397 if (btrfs_buffer_uptodate(tmp
, gen
, 1) > 0) {
2402 /* the pages were up to date, but we failed
2403 * the generation number check. Do a full
2404 * read for the generation number that is correct.
2405 * We must do this without dropping locks so
2406 * we can trust our generation number
2408 btrfs_set_path_blocking(p
);
2410 /* now we're allowed to do a blocking uptodate check */
2411 ret
= btrfs_read_buffer(tmp
, gen
, parent_level
- 1, &first_key
);
2416 free_extent_buffer(tmp
);
2417 btrfs_release_path(p
);
2422 * reduce lock contention at high levels
2423 * of the btree by dropping locks before
2424 * we read. Don't release the lock on the current
2425 * level because we need to walk this node to figure
2426 * out which blocks to read.
2428 btrfs_unlock_up_safe(p
, level
+ 1);
2429 btrfs_set_path_blocking(p
);
2431 if (p
->reada
!= READA_NONE
)
2432 reada_for_search(fs_info
, p
, level
, slot
, key
->objectid
);
2435 tmp
= read_tree_block(fs_info
, blocknr
, gen
, parent_level
- 1,
2439 * If the read above didn't mark this buffer up to date,
2440 * it will never end up being up to date. Set ret to EIO now
2441 * and give up so that our caller doesn't loop forever
2444 if (!extent_buffer_uptodate(tmp
))
2446 free_extent_buffer(tmp
);
2451 btrfs_release_path(p
);
2456 * helper function for btrfs_search_slot. This does all of the checks
2457 * for node-level blocks and does any balancing required based on
2460 * If no extra work was required, zero is returned. If we had to
2461 * drop the path, -EAGAIN is returned and btrfs_search_slot must
2465 setup_nodes_for_search(struct btrfs_trans_handle
*trans
,
2466 struct btrfs_root
*root
, struct btrfs_path
*p
,
2467 struct extent_buffer
*b
, int level
, int ins_len
,
2468 int *write_lock_level
)
2470 struct btrfs_fs_info
*fs_info
= root
->fs_info
;
2473 if ((p
->search_for_split
|| ins_len
> 0) && btrfs_header_nritems(b
) >=
2474 BTRFS_NODEPTRS_PER_BLOCK(fs_info
) - 3) {
2477 if (*write_lock_level
< level
+ 1) {
2478 *write_lock_level
= level
+ 1;
2479 btrfs_release_path(p
);
2483 btrfs_set_path_blocking(p
);
2484 reada_for_balance(fs_info
, p
, level
);
2485 sret
= split_node(trans
, root
, p
, level
);
2486 btrfs_clear_path_blocking(p
, NULL
, 0);
2493 b
= p
->nodes
[level
];
2494 } else if (ins_len
< 0 && btrfs_header_nritems(b
) <
2495 BTRFS_NODEPTRS_PER_BLOCK(fs_info
) / 2) {
2498 if (*write_lock_level
< level
+ 1) {
2499 *write_lock_level
= level
+ 1;
2500 btrfs_release_path(p
);
2504 btrfs_set_path_blocking(p
);
2505 reada_for_balance(fs_info
, p
, level
);
2506 sret
= balance_level(trans
, root
, p
, level
);
2507 btrfs_clear_path_blocking(p
, NULL
, 0);
2513 b
= p
->nodes
[level
];
2515 btrfs_release_path(p
);
2518 BUG_ON(btrfs_header_nritems(b
) == 1);
2528 static void key_search_validate(struct extent_buffer
*b
,
2529 const struct btrfs_key
*key
,
2532 #ifdef CONFIG_BTRFS_ASSERT
2533 struct btrfs_disk_key disk_key
;
2535 btrfs_cpu_key_to_disk(&disk_key
, key
);
2538 ASSERT(!memcmp_extent_buffer(b
, &disk_key
,
2539 offsetof(struct btrfs_leaf
, items
[0].key
),
2542 ASSERT(!memcmp_extent_buffer(b
, &disk_key
,
2543 offsetof(struct btrfs_node
, ptrs
[0].key
),
2548 static int key_search(struct extent_buffer
*b
, const struct btrfs_key
*key
,
2549 int level
, int *prev_cmp
, int *slot
)
2551 if (*prev_cmp
!= 0) {
2552 *prev_cmp
= btrfs_bin_search(b
, key
, level
, slot
);
2556 key_search_validate(b
, key
, level
);
2562 int btrfs_find_item(struct btrfs_root
*fs_root
, struct btrfs_path
*path
,
2563 u64 iobjectid
, u64 ioff
, u8 key_type
,
2564 struct btrfs_key
*found_key
)
2567 struct btrfs_key key
;
2568 struct extent_buffer
*eb
;
2573 key
.type
= key_type
;
2574 key
.objectid
= iobjectid
;
2577 ret
= btrfs_search_slot(NULL
, fs_root
, &key
, path
, 0, 0);
2581 eb
= path
->nodes
[0];
2582 if (ret
&& path
->slots
[0] >= btrfs_header_nritems(eb
)) {
2583 ret
= btrfs_next_leaf(fs_root
, path
);
2586 eb
= path
->nodes
[0];
2589 btrfs_item_key_to_cpu(eb
, found_key
, path
->slots
[0]);
2590 if (found_key
->type
!= key
.type
||
2591 found_key
->objectid
!= key
.objectid
)
2597 static struct extent_buffer
*btrfs_search_slot_get_root(struct btrfs_root
*root
,
2598 struct btrfs_path
*p
,
2599 int write_lock_level
)
2601 struct btrfs_fs_info
*fs_info
= root
->fs_info
;
2602 struct extent_buffer
*b
;
2606 /* We try very hard to do read locks on the root */
2607 root_lock
= BTRFS_READ_LOCK
;
2609 if (p
->search_commit_root
) {
2610 /* The commit roots are read only so we always do read locks */
2611 if (p
->need_commit_sem
)
2612 down_read(&fs_info
->commit_root_sem
);
2613 b
= root
->commit_root
;
2614 extent_buffer_get(b
);
2615 level
= btrfs_header_level(b
);
2616 if (p
->need_commit_sem
)
2617 up_read(&fs_info
->commit_root_sem
);
2619 * Ensure that all callers have set skip_locking when
2620 * p->search_commit_root = 1.
2622 ASSERT(p
->skip_locking
== 1);
2627 if (p
->skip_locking
) {
2628 b
= btrfs_root_node(root
);
2629 level
= btrfs_header_level(b
);
2634 * If the level is set to maximum, we can skip trying to get the read
2637 if (write_lock_level
< BTRFS_MAX_LEVEL
) {
2639 * We don't know the level of the root node until we actually
2640 * have it read locked
2642 b
= btrfs_read_lock_root_node(root
);
2643 level
= btrfs_header_level(b
);
2644 if (level
> write_lock_level
)
2647 /* Whoops, must trade for write lock */
2648 btrfs_tree_read_unlock(b
);
2649 free_extent_buffer(b
);
2652 b
= btrfs_lock_root_node(root
);
2653 root_lock
= BTRFS_WRITE_LOCK
;
2655 /* The level might have changed, check again */
2656 level
= btrfs_header_level(b
);
2659 p
->nodes
[level
] = b
;
2660 if (!p
->skip_locking
)
2661 p
->locks
[level
] = root_lock
;
2663 * Callers are responsible for dropping b's references.
2670 * btrfs_search_slot - look for a key in a tree and perform necessary
2671 * modifications to preserve tree invariants.
2673 * @trans: Handle of transaction, used when modifying the tree
2674 * @p: Holds all btree nodes along the search path
2675 * @root: The root node of the tree
2676 * @key: The key we are looking for
2677 * @ins_len: Indicates purpose of search, for inserts it is 1, for
2678 * deletions it's -1. 0 for plain searches
2679 * @cow: boolean should CoW operations be performed. Must always be 1
2680 * when modifying the tree.
2682 * If @ins_len > 0, nodes and leaves will be split as we walk down the tree.
2683 * If @ins_len < 0, nodes will be merged as we walk down the tree (if possible)
2685 * If @key is found, 0 is returned and you can find the item in the leaf level
2686 * of the path (level 0)
2688 * If @key isn't found, 1 is returned and the leaf level of the path (level 0)
2689 * points to the slot where it should be inserted
2691 * If an error is encountered while searching the tree a negative error number
2694 int btrfs_search_slot(struct btrfs_trans_handle
*trans
, struct btrfs_root
*root
,
2695 const struct btrfs_key
*key
, struct btrfs_path
*p
,
2696 int ins_len
, int cow
)
2698 struct btrfs_fs_info
*fs_info
= root
->fs_info
;
2699 struct extent_buffer
*b
;
2704 int lowest_unlock
= 1;
2705 /* everything at write_lock_level or lower must be write locked */
2706 int write_lock_level
= 0;
2707 u8 lowest_level
= 0;
2708 int min_write_lock_level
;
2711 lowest_level
= p
->lowest_level
;
2712 WARN_ON(lowest_level
&& ins_len
> 0);
2713 WARN_ON(p
->nodes
[0] != NULL
);
2714 BUG_ON(!cow
&& ins_len
);
2719 /* when we are removing items, we might have to go up to level
2720 * two as we update tree pointers Make sure we keep write
2721 * for those levels as well
2723 write_lock_level
= 2;
2724 } else if (ins_len
> 0) {
2726 * for inserting items, make sure we have a write lock on
2727 * level 1 so we can update keys
2729 write_lock_level
= 1;
2733 write_lock_level
= -1;
2735 if (cow
&& (p
->keep_locks
|| p
->lowest_level
))
2736 write_lock_level
= BTRFS_MAX_LEVEL
;
2738 min_write_lock_level
= write_lock_level
;
2742 b
= btrfs_search_slot_get_root(root
, p
, write_lock_level
);
2745 level
= btrfs_header_level(b
);
2748 * setup the path here so we can release it under lock
2749 * contention with the cow code
2752 bool last_level
= (level
== (BTRFS_MAX_LEVEL
- 1));
2755 * if we don't really need to cow this block
2756 * then we don't want to set the path blocking,
2757 * so we test it here
2759 if (!should_cow_block(trans
, root
, b
)) {
2760 trans
->dirty
= true;
2765 * must have write locks on this node and the
2768 if (level
> write_lock_level
||
2769 (level
+ 1 > write_lock_level
&&
2770 level
+ 1 < BTRFS_MAX_LEVEL
&&
2771 p
->nodes
[level
+ 1])) {
2772 write_lock_level
= level
+ 1;
2773 btrfs_release_path(p
);
2777 btrfs_set_path_blocking(p
);
2779 err
= btrfs_cow_block(trans
, root
, b
, NULL
, 0,
2782 err
= btrfs_cow_block(trans
, root
, b
,
2783 p
->nodes
[level
+ 1],
2784 p
->slots
[level
+ 1], &b
);
2791 p
->nodes
[level
] = b
;
2792 btrfs_clear_path_blocking(p
, NULL
, 0);
2795 * we have a lock on b and as long as we aren't changing
2796 * the tree, there is no way to for the items in b to change.
2797 * It is safe to drop the lock on our parent before we
2798 * go through the expensive btree search on b.
2800 * If we're inserting or deleting (ins_len != 0), then we might
2801 * be changing slot zero, which may require changing the parent.
2802 * So, we can't drop the lock until after we know which slot
2803 * we're operating on.
2805 if (!ins_len
&& !p
->keep_locks
) {
2808 if (u
< BTRFS_MAX_LEVEL
&& p
->locks
[u
]) {
2809 btrfs_tree_unlock_rw(p
->nodes
[u
], p
->locks
[u
]);
2814 ret
= key_search(b
, key
, level
, &prev_cmp
, &slot
);
2820 if (ret
&& slot
> 0) {
2824 p
->slots
[level
] = slot
;
2825 err
= setup_nodes_for_search(trans
, root
, p
, b
, level
,
2826 ins_len
, &write_lock_level
);
2833 b
= p
->nodes
[level
];
2834 slot
= p
->slots
[level
];
2837 * slot 0 is special, if we change the key
2838 * we have to update the parent pointer
2839 * which means we must have a write lock
2842 if (slot
== 0 && ins_len
&&
2843 write_lock_level
< level
+ 1) {
2844 write_lock_level
= level
+ 1;
2845 btrfs_release_path(p
);
2849 unlock_up(p
, level
, lowest_unlock
,
2850 min_write_lock_level
, &write_lock_level
);
2852 if (level
== lowest_level
) {
2858 err
= read_block_for_search(root
, p
, &b
, level
,
2867 if (!p
->skip_locking
) {
2868 level
= btrfs_header_level(b
);
2869 if (level
<= write_lock_level
) {
2870 err
= btrfs_try_tree_write_lock(b
);
2872 btrfs_set_path_blocking(p
);
2874 btrfs_clear_path_blocking(p
, b
,
2877 p
->locks
[level
] = BTRFS_WRITE_LOCK
;
2879 err
= btrfs_tree_read_lock_atomic(b
);
2881 btrfs_set_path_blocking(p
);
2882 btrfs_tree_read_lock(b
);
2883 btrfs_clear_path_blocking(p
, b
,
2886 p
->locks
[level
] = BTRFS_READ_LOCK
;
2888 p
->nodes
[level
] = b
;
2891 p
->slots
[level
] = slot
;
2893 btrfs_leaf_free_space(fs_info
, b
) < ins_len
) {
2894 if (write_lock_level
< 1) {
2895 write_lock_level
= 1;
2896 btrfs_release_path(p
);
2900 btrfs_set_path_blocking(p
);
2901 err
= split_leaf(trans
, root
, key
,
2902 p
, ins_len
, ret
== 0);
2903 btrfs_clear_path_blocking(p
, NULL
, 0);
2911 if (!p
->search_for_split
)
2912 unlock_up(p
, level
, lowest_unlock
,
2913 min_write_lock_level
, &write_lock_level
);
2920 * we don't really know what they plan on doing with the path
2921 * from here on, so for now just mark it as blocking
2923 if (!p
->leave_spinning
)
2924 btrfs_set_path_blocking(p
);
2925 if (ret
< 0 && !p
->skip_release_on_error
)
2926 btrfs_release_path(p
);
2931 * Like btrfs_search_slot, this looks for a key in the given tree. It uses the
2932 * current state of the tree together with the operations recorded in the tree
2933 * modification log to search for the key in a previous version of this tree, as
2934 * denoted by the time_seq parameter.
2936 * Naturally, there is no support for insert, delete or cow operations.
2938 * The resulting path and return value will be set up as if we called
2939 * btrfs_search_slot at that point in time with ins_len and cow both set to 0.
2941 int btrfs_search_old_slot(struct btrfs_root
*root
, const struct btrfs_key
*key
,
2942 struct btrfs_path
*p
, u64 time_seq
)
2944 struct btrfs_fs_info
*fs_info
= root
->fs_info
;
2945 struct extent_buffer
*b
;
2950 int lowest_unlock
= 1;
2951 u8 lowest_level
= 0;
2954 lowest_level
= p
->lowest_level
;
2955 WARN_ON(p
->nodes
[0] != NULL
);
2957 if (p
->search_commit_root
) {
2959 return btrfs_search_slot(NULL
, root
, key
, p
, 0, 0);
2963 b
= get_old_root(root
, time_seq
);
2964 level
= btrfs_header_level(b
);
2965 p
->locks
[level
] = BTRFS_READ_LOCK
;
2968 level
= btrfs_header_level(b
);
2969 p
->nodes
[level
] = b
;
2970 btrfs_clear_path_blocking(p
, NULL
, 0);
2973 * we have a lock on b and as long as we aren't changing
2974 * the tree, there is no way to for the items in b to change.
2975 * It is safe to drop the lock on our parent before we
2976 * go through the expensive btree search on b.
2978 btrfs_unlock_up_safe(p
, level
+ 1);
2981 * Since we can unwind ebs we want to do a real search every
2985 ret
= key_search(b
, key
, level
, &prev_cmp
, &slot
);
2989 if (ret
&& slot
> 0) {
2993 p
->slots
[level
] = slot
;
2994 unlock_up(p
, level
, lowest_unlock
, 0, NULL
);
2996 if (level
== lowest_level
) {
3002 err
= read_block_for_search(root
, p
, &b
, level
,
3011 level
= btrfs_header_level(b
);
3012 err
= btrfs_tree_read_lock_atomic(b
);
3014 btrfs_set_path_blocking(p
);
3015 btrfs_tree_read_lock(b
);
3016 btrfs_clear_path_blocking(p
, b
,
3019 b
= tree_mod_log_rewind(fs_info
, p
, b
, time_seq
);
3024 p
->locks
[level
] = BTRFS_READ_LOCK
;
3025 p
->nodes
[level
] = b
;
3027 p
->slots
[level
] = slot
;
3028 unlock_up(p
, level
, lowest_unlock
, 0, NULL
);
3034 if (!p
->leave_spinning
)
3035 btrfs_set_path_blocking(p
);
3037 btrfs_release_path(p
);
3043 * helper to use instead of search slot if no exact match is needed but
3044 * instead the next or previous item should be returned.
3045 * When find_higher is true, the next higher item is returned, the next lower
3047 * When return_any and find_higher are both true, and no higher item is found,
3048 * return the next lower instead.
3049 * When return_any is true and find_higher is false, and no lower item is found,
3050 * return the next higher instead.
3051 * It returns 0 if any item is found, 1 if none is found (tree empty), and
3054 int btrfs_search_slot_for_read(struct btrfs_root
*root
,
3055 const struct btrfs_key
*key
,
3056 struct btrfs_path
*p
, int find_higher
,
3060 struct extent_buffer
*leaf
;
3063 ret
= btrfs_search_slot(NULL
, root
, key
, p
, 0, 0);
3067 * a return value of 1 means the path is at the position where the
3068 * item should be inserted. Normally this is the next bigger item,
3069 * but in case the previous item is the last in a leaf, path points
3070 * to the first free slot in the previous leaf, i.e. at an invalid
3076 if (p
->slots
[0] >= btrfs_header_nritems(leaf
)) {
3077 ret
= btrfs_next_leaf(root
, p
);
3083 * no higher item found, return the next
3088 btrfs_release_path(p
);
3092 if (p
->slots
[0] == 0) {
3093 ret
= btrfs_prev_leaf(root
, p
);
3098 if (p
->slots
[0] == btrfs_header_nritems(leaf
))
3105 * no lower item found, return the next
3110 btrfs_release_path(p
);
3120 * adjust the pointers going up the tree, starting at level
3121 * making sure the right key of each node is points to 'key'.
3122 * This is used after shifting pointers to the left, so it stops
3123 * fixing up pointers when a given leaf/node is not in slot 0 of the
3127 static void fixup_low_keys(struct btrfs_path
*path
,
3128 struct btrfs_disk_key
*key
, int level
)
3131 struct extent_buffer
*t
;
3134 for (i
= level
; i
< BTRFS_MAX_LEVEL
; i
++) {
3135 int tslot
= path
->slots
[i
];
3137 if (!path
->nodes
[i
])
3140 ret
= tree_mod_log_insert_key(t
, tslot
, MOD_LOG_KEY_REPLACE
,
3143 btrfs_set_node_key(t
, key
, tslot
);
3144 btrfs_mark_buffer_dirty(path
->nodes
[i
]);
3153 * This function isn't completely safe. It's the caller's responsibility
3154 * that the new key won't break the order
3156 void btrfs_set_item_key_safe(struct btrfs_fs_info
*fs_info
,
3157 struct btrfs_path
*path
,
3158 const struct btrfs_key
*new_key
)
3160 struct btrfs_disk_key disk_key
;
3161 struct extent_buffer
*eb
;
3164 eb
= path
->nodes
[0];
3165 slot
= path
->slots
[0];
3167 btrfs_item_key(eb
, &disk_key
, slot
- 1);
3168 BUG_ON(comp_keys(&disk_key
, new_key
) >= 0);
3170 if (slot
< btrfs_header_nritems(eb
) - 1) {
3171 btrfs_item_key(eb
, &disk_key
, slot
+ 1);
3172 BUG_ON(comp_keys(&disk_key
, new_key
) <= 0);
3175 btrfs_cpu_key_to_disk(&disk_key
, new_key
);
3176 btrfs_set_item_key(eb
, &disk_key
, slot
);
3177 btrfs_mark_buffer_dirty(eb
);
3179 fixup_low_keys(path
, &disk_key
, 1);
3183 * try to push data from one node into the next node left in the
3186 * returns 0 if some ptrs were pushed left, < 0 if there was some horrible
3187 * error, and > 0 if there was no room in the left hand block.
3189 static int push_node_left(struct btrfs_trans_handle
*trans
,
3190 struct btrfs_fs_info
*fs_info
,
3191 struct extent_buffer
*dst
,
3192 struct extent_buffer
*src
, int empty
)
3199 src_nritems
= btrfs_header_nritems(src
);
3200 dst_nritems
= btrfs_header_nritems(dst
);
3201 push_items
= BTRFS_NODEPTRS_PER_BLOCK(fs_info
) - dst_nritems
;
3202 WARN_ON(btrfs_header_generation(src
) != trans
->transid
);
3203 WARN_ON(btrfs_header_generation(dst
) != trans
->transid
);
3205 if (!empty
&& src_nritems
<= 8)
3208 if (push_items
<= 0)
3212 push_items
= min(src_nritems
, push_items
);
3213 if (push_items
< src_nritems
) {
3214 /* leave at least 8 pointers in the node if
3215 * we aren't going to empty it
3217 if (src_nritems
- push_items
< 8) {
3218 if (push_items
<= 8)
3224 push_items
= min(src_nritems
- 8, push_items
);
3226 ret
= tree_mod_log_eb_copy(fs_info
, dst
, src
, dst_nritems
, 0,
3229 btrfs_abort_transaction(trans
, ret
);
3232 copy_extent_buffer(dst
, src
,
3233 btrfs_node_key_ptr_offset(dst_nritems
),
3234 btrfs_node_key_ptr_offset(0),
3235 push_items
* sizeof(struct btrfs_key_ptr
));
3237 if (push_items
< src_nritems
) {
3239 * Don't call tree_mod_log_insert_move here, key removal was
3240 * already fully logged by tree_mod_log_eb_copy above.
3242 memmove_extent_buffer(src
, btrfs_node_key_ptr_offset(0),
3243 btrfs_node_key_ptr_offset(push_items
),
3244 (src_nritems
- push_items
) *
3245 sizeof(struct btrfs_key_ptr
));
3247 btrfs_set_header_nritems(src
, src_nritems
- push_items
);
3248 btrfs_set_header_nritems(dst
, dst_nritems
+ push_items
);
3249 btrfs_mark_buffer_dirty(src
);
3250 btrfs_mark_buffer_dirty(dst
);
3256 * try to push data from one node into the next node right in the
3259 * returns 0 if some ptrs were pushed, < 0 if there was some horrible
3260 * error, and > 0 if there was no room in the right hand block.
3262 * this will only push up to 1/2 the contents of the left node over
3264 static int balance_node_right(struct btrfs_trans_handle
*trans
,
3265 struct btrfs_fs_info
*fs_info
,
3266 struct extent_buffer
*dst
,
3267 struct extent_buffer
*src
)
3275 WARN_ON(btrfs_header_generation(src
) != trans
->transid
);
3276 WARN_ON(btrfs_header_generation(dst
) != trans
->transid
);
3278 src_nritems
= btrfs_header_nritems(src
);
3279 dst_nritems
= btrfs_header_nritems(dst
);
3280 push_items
= BTRFS_NODEPTRS_PER_BLOCK(fs_info
) - dst_nritems
;
3281 if (push_items
<= 0)
3284 if (src_nritems
< 4)
3287 max_push
= src_nritems
/ 2 + 1;
3288 /* don't try to empty the node */
3289 if (max_push
>= src_nritems
)
3292 if (max_push
< push_items
)
3293 push_items
= max_push
;
3295 ret
= tree_mod_log_insert_move(dst
, push_items
, 0, dst_nritems
);
3297 memmove_extent_buffer(dst
, btrfs_node_key_ptr_offset(push_items
),
3298 btrfs_node_key_ptr_offset(0),
3300 sizeof(struct btrfs_key_ptr
));
3302 ret
= tree_mod_log_eb_copy(fs_info
, dst
, src
, 0,
3303 src_nritems
- push_items
, push_items
);
3305 btrfs_abort_transaction(trans
, ret
);
3308 copy_extent_buffer(dst
, src
,
3309 btrfs_node_key_ptr_offset(0),
3310 btrfs_node_key_ptr_offset(src_nritems
- push_items
),
3311 push_items
* sizeof(struct btrfs_key_ptr
));
3313 btrfs_set_header_nritems(src
, src_nritems
- push_items
);
3314 btrfs_set_header_nritems(dst
, dst_nritems
+ push_items
);
3316 btrfs_mark_buffer_dirty(src
);
3317 btrfs_mark_buffer_dirty(dst
);
3323 * helper function to insert a new root level in the tree.
3324 * A new node is allocated, and a single item is inserted to
3325 * point to the existing root
3327 * returns zero on success or < 0 on failure.
3329 static noinline
int insert_new_root(struct btrfs_trans_handle
*trans
,
3330 struct btrfs_root
*root
,
3331 struct btrfs_path
*path
, int level
)
3333 struct btrfs_fs_info
*fs_info
= root
->fs_info
;
3335 struct extent_buffer
*lower
;
3336 struct extent_buffer
*c
;
3337 struct extent_buffer
*old
;
3338 struct btrfs_disk_key lower_key
;
3341 BUG_ON(path
->nodes
[level
]);
3342 BUG_ON(path
->nodes
[level
-1] != root
->node
);
3344 lower
= path
->nodes
[level
-1];
3346 btrfs_item_key(lower
, &lower_key
, 0);
3348 btrfs_node_key(lower
, &lower_key
, 0);
3350 c
= btrfs_alloc_tree_block(trans
, root
, 0, root
->root_key
.objectid
,
3351 &lower_key
, level
, root
->node
->start
, 0);
3355 root_add_used(root
, fs_info
->nodesize
);
3357 btrfs_set_header_nritems(c
, 1);
3358 btrfs_set_node_key(c
, &lower_key
, 0);
3359 btrfs_set_node_blockptr(c
, 0, lower
->start
);
3360 lower_gen
= btrfs_header_generation(lower
);
3361 WARN_ON(lower_gen
!= trans
->transid
);
3363 btrfs_set_node_ptr_generation(c
, 0, lower_gen
);
3365 btrfs_mark_buffer_dirty(c
);
3368 ret
= tree_mod_log_insert_root(root
->node
, c
, 0);
3370 rcu_assign_pointer(root
->node
, c
);
3372 /* the super has an extra ref to root->node */
3373 free_extent_buffer(old
);
3375 add_root_to_dirty_list(root
);
3376 extent_buffer_get(c
);
3377 path
->nodes
[level
] = c
;
3378 path
->locks
[level
] = BTRFS_WRITE_LOCK_BLOCKING
;
3379 path
->slots
[level
] = 0;
3384 * worker function to insert a single pointer in a node.
3385 * the node should have enough room for the pointer already
3387 * slot and level indicate where you want the key to go, and
3388 * blocknr is the block the key points to.
3390 static void insert_ptr(struct btrfs_trans_handle
*trans
,
3391 struct btrfs_fs_info
*fs_info
, struct btrfs_path
*path
,
3392 struct btrfs_disk_key
*key
, u64 bytenr
,
3393 int slot
, int level
)
3395 struct extent_buffer
*lower
;
3399 BUG_ON(!path
->nodes
[level
]);
3400 btrfs_assert_tree_locked(path
->nodes
[level
]);
3401 lower
= path
->nodes
[level
];
3402 nritems
= btrfs_header_nritems(lower
);
3403 BUG_ON(slot
> nritems
);
3404 BUG_ON(nritems
== BTRFS_NODEPTRS_PER_BLOCK(fs_info
));
3405 if (slot
!= nritems
) {
3407 ret
= tree_mod_log_insert_move(lower
, slot
+ 1, slot
,
3411 memmove_extent_buffer(lower
,
3412 btrfs_node_key_ptr_offset(slot
+ 1),
3413 btrfs_node_key_ptr_offset(slot
),
3414 (nritems
- slot
) * sizeof(struct btrfs_key_ptr
));
3417 ret
= tree_mod_log_insert_key(lower
, slot
, MOD_LOG_KEY_ADD
,
3421 btrfs_set_node_key(lower
, key
, slot
);
3422 btrfs_set_node_blockptr(lower
, slot
, bytenr
);
3423 WARN_ON(trans
->transid
== 0);
3424 btrfs_set_node_ptr_generation(lower
, slot
, trans
->transid
);
3425 btrfs_set_header_nritems(lower
, nritems
+ 1);
3426 btrfs_mark_buffer_dirty(lower
);
3430 * split the node at the specified level in path in two.
3431 * The path is corrected to point to the appropriate node after the split
3433 * Before splitting this tries to make some room in the node by pushing
3434 * left and right, if either one works, it returns right away.
3436 * returns 0 on success and < 0 on failure
3438 static noinline
int split_node(struct btrfs_trans_handle
*trans
,
3439 struct btrfs_root
*root
,
3440 struct btrfs_path
*path
, int level
)
3442 struct btrfs_fs_info
*fs_info
= root
->fs_info
;
3443 struct extent_buffer
*c
;
3444 struct extent_buffer
*split
;
3445 struct btrfs_disk_key disk_key
;
3450 c
= path
->nodes
[level
];
3451 WARN_ON(btrfs_header_generation(c
) != trans
->transid
);
3452 if (c
== root
->node
) {
3454 * trying to split the root, lets make a new one
3456 * tree mod log: We don't log_removal old root in
3457 * insert_new_root, because that root buffer will be kept as a
3458 * normal node. We are going to log removal of half of the
3459 * elements below with tree_mod_log_eb_copy. We're holding a
3460 * tree lock on the buffer, which is why we cannot race with
3461 * other tree_mod_log users.
3463 ret
= insert_new_root(trans
, root
, path
, level
+ 1);
3467 ret
= push_nodes_for_insert(trans
, root
, path
, level
);
3468 c
= path
->nodes
[level
];
3469 if (!ret
&& btrfs_header_nritems(c
) <
3470 BTRFS_NODEPTRS_PER_BLOCK(fs_info
) - 3)
3476 c_nritems
= btrfs_header_nritems(c
);
3477 mid
= (c_nritems
+ 1) / 2;
3478 btrfs_node_key(c
, &disk_key
, mid
);
3480 split
= btrfs_alloc_tree_block(trans
, root
, 0, root
->root_key
.objectid
,
3481 &disk_key
, level
, c
->start
, 0);
3483 return PTR_ERR(split
);
3485 root_add_used(root
, fs_info
->nodesize
);
3486 ASSERT(btrfs_header_level(c
) == level
);
3488 ret
= tree_mod_log_eb_copy(fs_info
, split
, c
, 0, mid
, c_nritems
- mid
);
3490 btrfs_abort_transaction(trans
, ret
);
3493 copy_extent_buffer(split
, c
,
3494 btrfs_node_key_ptr_offset(0),
3495 btrfs_node_key_ptr_offset(mid
),
3496 (c_nritems
- mid
) * sizeof(struct btrfs_key_ptr
));
3497 btrfs_set_header_nritems(split
, c_nritems
- mid
);
3498 btrfs_set_header_nritems(c
, mid
);
3501 btrfs_mark_buffer_dirty(c
);
3502 btrfs_mark_buffer_dirty(split
);
3504 insert_ptr(trans
, fs_info
, path
, &disk_key
, split
->start
,
3505 path
->slots
[level
+ 1] + 1, level
+ 1);
3507 if (path
->slots
[level
] >= mid
) {
3508 path
->slots
[level
] -= mid
;
3509 btrfs_tree_unlock(c
);
3510 free_extent_buffer(c
);
3511 path
->nodes
[level
] = split
;
3512 path
->slots
[level
+ 1] += 1;
3514 btrfs_tree_unlock(split
);
3515 free_extent_buffer(split
);
3521 * how many bytes are required to store the items in a leaf. start
3522 * and nr indicate which items in the leaf to check. This totals up the
3523 * space used both by the item structs and the item data
3525 static int leaf_space_used(struct extent_buffer
*l
, int start
, int nr
)
3527 struct btrfs_item
*start_item
;
3528 struct btrfs_item
*end_item
;
3529 struct btrfs_map_token token
;
3531 int nritems
= btrfs_header_nritems(l
);
3532 int end
= min(nritems
, start
+ nr
) - 1;
3536 btrfs_init_map_token(&token
);
3537 start_item
= btrfs_item_nr(start
);
3538 end_item
= btrfs_item_nr(end
);
3539 data_len
= btrfs_token_item_offset(l
, start_item
, &token
) +
3540 btrfs_token_item_size(l
, start_item
, &token
);
3541 data_len
= data_len
- btrfs_token_item_offset(l
, end_item
, &token
);
3542 data_len
+= sizeof(struct btrfs_item
) * nr
;
3543 WARN_ON(data_len
< 0);
3548 * The space between the end of the leaf items and
3549 * the start of the leaf data. IOW, how much room
3550 * the leaf has left for both items and data
3552 noinline
int btrfs_leaf_free_space(struct btrfs_fs_info
*fs_info
,
3553 struct extent_buffer
*leaf
)
3555 int nritems
= btrfs_header_nritems(leaf
);
3558 ret
= BTRFS_LEAF_DATA_SIZE(fs_info
) - leaf_space_used(leaf
, 0, nritems
);
3561 "leaf free space ret %d, leaf data size %lu, used %d nritems %d",
3563 (unsigned long) BTRFS_LEAF_DATA_SIZE(fs_info
),
3564 leaf_space_used(leaf
, 0, nritems
), nritems
);
3570 * min slot controls the lowest index we're willing to push to the
3571 * right. We'll push up to and including min_slot, but no lower
3573 static noinline
int __push_leaf_right(struct btrfs_fs_info
*fs_info
,
3574 struct btrfs_path
*path
,
3575 int data_size
, int empty
,
3576 struct extent_buffer
*right
,
3577 int free_space
, u32 left_nritems
,
3580 struct extent_buffer
*left
= path
->nodes
[0];
3581 struct extent_buffer
*upper
= path
->nodes
[1];
3582 struct btrfs_map_token token
;
3583 struct btrfs_disk_key disk_key
;
3588 struct btrfs_item
*item
;
3594 btrfs_init_map_token(&token
);
3599 nr
= max_t(u32
, 1, min_slot
);
3601 if (path
->slots
[0] >= left_nritems
)
3602 push_space
+= data_size
;
3604 slot
= path
->slots
[1];
3605 i
= left_nritems
- 1;
3607 item
= btrfs_item_nr(i
);
3609 if (!empty
&& push_items
> 0) {
3610 if (path
->slots
[0] > i
)
3612 if (path
->slots
[0] == i
) {
3613 int space
= btrfs_leaf_free_space(fs_info
, left
);
3614 if (space
+ push_space
* 2 > free_space
)
3619 if (path
->slots
[0] == i
)
3620 push_space
+= data_size
;
3622 this_item_size
= btrfs_item_size(left
, item
);
3623 if (this_item_size
+ sizeof(*item
) + push_space
> free_space
)
3627 push_space
+= this_item_size
+ sizeof(*item
);
3633 if (push_items
== 0)
3636 WARN_ON(!empty
&& push_items
== left_nritems
);
3638 /* push left to right */
3639 right_nritems
= btrfs_header_nritems(right
);
3641 push_space
= btrfs_item_end_nr(left
, left_nritems
- push_items
);
3642 push_space
-= leaf_data_end(fs_info
, left
);
3644 /* make room in the right data area */
3645 data_end
= leaf_data_end(fs_info
, right
);
3646 memmove_extent_buffer(right
,
3647 BTRFS_LEAF_DATA_OFFSET
+ data_end
- push_space
,
3648 BTRFS_LEAF_DATA_OFFSET
+ data_end
,
3649 BTRFS_LEAF_DATA_SIZE(fs_info
) - data_end
);
3651 /* copy from the left data area */
3652 copy_extent_buffer(right
, left
, BTRFS_LEAF_DATA_OFFSET
+
3653 BTRFS_LEAF_DATA_SIZE(fs_info
) - push_space
,
3654 BTRFS_LEAF_DATA_OFFSET
+ leaf_data_end(fs_info
, left
),
3657 memmove_extent_buffer(right
, btrfs_item_nr_offset(push_items
),
3658 btrfs_item_nr_offset(0),
3659 right_nritems
* sizeof(struct btrfs_item
));
3661 /* copy the items from left to right */
3662 copy_extent_buffer(right
, left
, btrfs_item_nr_offset(0),
3663 btrfs_item_nr_offset(left_nritems
- push_items
),
3664 push_items
* sizeof(struct btrfs_item
));
3666 /* update the item pointers */
3667 right_nritems
+= push_items
;
3668 btrfs_set_header_nritems(right
, right_nritems
);
3669 push_space
= BTRFS_LEAF_DATA_SIZE(fs_info
);
3670 for (i
= 0; i
< right_nritems
; i
++) {
3671 item
= btrfs_item_nr(i
);
3672 push_space
-= btrfs_token_item_size(right
, item
, &token
);
3673 btrfs_set_token_item_offset(right
, item
, push_space
, &token
);
3676 left_nritems
-= push_items
;
3677 btrfs_set_header_nritems(left
, left_nritems
);
3680 btrfs_mark_buffer_dirty(left
);
3682 clean_tree_block(fs_info
, left
);
3684 btrfs_mark_buffer_dirty(right
);
3686 btrfs_item_key(right
, &disk_key
, 0);
3687 btrfs_set_node_key(upper
, &disk_key
, slot
+ 1);
3688 btrfs_mark_buffer_dirty(upper
);
3690 /* then fixup the leaf pointer in the path */
3691 if (path
->slots
[0] >= left_nritems
) {
3692 path
->slots
[0] -= left_nritems
;
3693 if (btrfs_header_nritems(path
->nodes
[0]) == 0)
3694 clean_tree_block(fs_info
, path
->nodes
[0]);
3695 btrfs_tree_unlock(path
->nodes
[0]);
3696 free_extent_buffer(path
->nodes
[0]);
3697 path
->nodes
[0] = right
;
3698 path
->slots
[1] += 1;
3700 btrfs_tree_unlock(right
);
3701 free_extent_buffer(right
);
3706 btrfs_tree_unlock(right
);
3707 free_extent_buffer(right
);
3712 * push some data in the path leaf to the right, trying to free up at
3713 * least data_size bytes. returns zero if the push worked, nonzero otherwise
3715 * returns 1 if the push failed because the other node didn't have enough
3716 * room, 0 if everything worked out and < 0 if there were major errors.
3718 * this will push starting from min_slot to the end of the leaf. It won't
3719 * push any slot lower than min_slot
3721 static int push_leaf_right(struct btrfs_trans_handle
*trans
, struct btrfs_root
3722 *root
, struct btrfs_path
*path
,
3723 int min_data_size
, int data_size
,
3724 int empty
, u32 min_slot
)
3726 struct btrfs_fs_info
*fs_info
= root
->fs_info
;
3727 struct extent_buffer
*left
= path
->nodes
[0];
3728 struct extent_buffer
*right
;
3729 struct extent_buffer
*upper
;
3735 if (!path
->nodes
[1])
3738 slot
= path
->slots
[1];
3739 upper
= path
->nodes
[1];
3740 if (slot
>= btrfs_header_nritems(upper
) - 1)
3743 btrfs_assert_tree_locked(path
->nodes
[1]);
3745 right
= read_node_slot(fs_info
, upper
, slot
+ 1);
3747 * slot + 1 is not valid or we fail to read the right node,
3748 * no big deal, just return.
3753 btrfs_tree_lock(right
);
3754 btrfs_set_lock_blocking(right
);
3756 free_space
= btrfs_leaf_free_space(fs_info
, right
);
3757 if (free_space
< data_size
)
3760 /* cow and double check */
3761 ret
= btrfs_cow_block(trans
, root
, right
, upper
,
3766 free_space
= btrfs_leaf_free_space(fs_info
, right
);
3767 if (free_space
< data_size
)
3770 left_nritems
= btrfs_header_nritems(left
);
3771 if (left_nritems
== 0)
3774 if (path
->slots
[0] == left_nritems
&& !empty
) {
3775 /* Key greater than all keys in the leaf, right neighbor has
3776 * enough room for it and we're not emptying our leaf to delete
3777 * it, therefore use right neighbor to insert the new item and
3778 * no need to touch/dirty our left leaft. */
3779 btrfs_tree_unlock(left
);
3780 free_extent_buffer(left
);
3781 path
->nodes
[0] = right
;
3787 return __push_leaf_right(fs_info
, path
, min_data_size
, empty
,
3788 right
, free_space
, left_nritems
, min_slot
);
3790 btrfs_tree_unlock(right
);
3791 free_extent_buffer(right
);
3796 * push some data in the path leaf to the left, trying to free up at
3797 * least data_size bytes. returns zero if the push worked, nonzero otherwise
3799 * max_slot can put a limit on how far into the leaf we'll push items. The
3800 * item at 'max_slot' won't be touched. Use (u32)-1 to make us do all the
3803 static noinline
int __push_leaf_left(struct btrfs_fs_info
*fs_info
,
3804 struct btrfs_path
*path
, int data_size
,
3805 int empty
, struct extent_buffer
*left
,
3806 int free_space
, u32 right_nritems
,
3809 struct btrfs_disk_key disk_key
;
3810 struct extent_buffer
*right
= path
->nodes
[0];
3814 struct btrfs_item
*item
;
3815 u32 old_left_nritems
;
3819 u32 old_left_item_size
;
3820 struct btrfs_map_token token
;
3822 btrfs_init_map_token(&token
);
3825 nr
= min(right_nritems
, max_slot
);
3827 nr
= min(right_nritems
- 1, max_slot
);
3829 for (i
= 0; i
< nr
; i
++) {
3830 item
= btrfs_item_nr(i
);
3832 if (!empty
&& push_items
> 0) {
3833 if (path
->slots
[0] < i
)
3835 if (path
->slots
[0] == i
) {
3836 int space
= btrfs_leaf_free_space(fs_info
, right
);
3837 if (space
+ push_space
* 2 > free_space
)
3842 if (path
->slots
[0] == i
)
3843 push_space
+= data_size
;
3845 this_item_size
= btrfs_item_size(right
, item
);
3846 if (this_item_size
+ sizeof(*item
) + push_space
> free_space
)
3850 push_space
+= this_item_size
+ sizeof(*item
);
3853 if (push_items
== 0) {
3857 WARN_ON(!empty
&& push_items
== btrfs_header_nritems(right
));
3859 /* push data from right to left */
3860 copy_extent_buffer(left
, right
,
3861 btrfs_item_nr_offset(btrfs_header_nritems(left
)),
3862 btrfs_item_nr_offset(0),
3863 push_items
* sizeof(struct btrfs_item
));
3865 push_space
= BTRFS_LEAF_DATA_SIZE(fs_info
) -
3866 btrfs_item_offset_nr(right
, push_items
- 1);
3868 copy_extent_buffer(left
, right
, BTRFS_LEAF_DATA_OFFSET
+
3869 leaf_data_end(fs_info
, left
) - push_space
,
3870 BTRFS_LEAF_DATA_OFFSET
+
3871 btrfs_item_offset_nr(right
, push_items
- 1),
3873 old_left_nritems
= btrfs_header_nritems(left
);
3874 BUG_ON(old_left_nritems
<= 0);
3876 old_left_item_size
= btrfs_item_offset_nr(left
, old_left_nritems
- 1);
3877 for (i
= old_left_nritems
; i
< old_left_nritems
+ push_items
; i
++) {
3880 item
= btrfs_item_nr(i
);
3882 ioff
= btrfs_token_item_offset(left
, item
, &token
);
3883 btrfs_set_token_item_offset(left
, item
,
3884 ioff
- (BTRFS_LEAF_DATA_SIZE(fs_info
) - old_left_item_size
),
3887 btrfs_set_header_nritems(left
, old_left_nritems
+ push_items
);
3889 /* fixup right node */
3890 if (push_items
> right_nritems
)
3891 WARN(1, KERN_CRIT
"push items %d nr %u\n", push_items
,
3894 if (push_items
< right_nritems
) {
3895 push_space
= btrfs_item_offset_nr(right
, push_items
- 1) -
3896 leaf_data_end(fs_info
, right
);
3897 memmove_extent_buffer(right
, BTRFS_LEAF_DATA_OFFSET
+
3898 BTRFS_LEAF_DATA_SIZE(fs_info
) - push_space
,
3899 BTRFS_LEAF_DATA_OFFSET
+
3900 leaf_data_end(fs_info
, right
), push_space
);
3902 memmove_extent_buffer(right
, btrfs_item_nr_offset(0),
3903 btrfs_item_nr_offset(push_items
),
3904 (btrfs_header_nritems(right
) - push_items
) *
3905 sizeof(struct btrfs_item
));
3907 right_nritems
-= push_items
;
3908 btrfs_set_header_nritems(right
, right_nritems
);
3909 push_space
= BTRFS_LEAF_DATA_SIZE(fs_info
);
3910 for (i
= 0; i
< right_nritems
; i
++) {
3911 item
= btrfs_item_nr(i
);
3913 push_space
= push_space
- btrfs_token_item_size(right
,
3915 btrfs_set_token_item_offset(right
, item
, push_space
, &token
);
3918 btrfs_mark_buffer_dirty(left
);
3920 btrfs_mark_buffer_dirty(right
);
3922 clean_tree_block(fs_info
, right
);
3924 btrfs_item_key(right
, &disk_key
, 0);
3925 fixup_low_keys(path
, &disk_key
, 1);
3927 /* then fixup the leaf pointer in the path */
3928 if (path
->slots
[0] < push_items
) {
3929 path
->slots
[0] += old_left_nritems
;
3930 btrfs_tree_unlock(path
->nodes
[0]);
3931 free_extent_buffer(path
->nodes
[0]);
3932 path
->nodes
[0] = left
;
3933 path
->slots
[1] -= 1;
3935 btrfs_tree_unlock(left
);
3936 free_extent_buffer(left
);
3937 path
->slots
[0] -= push_items
;
3939 BUG_ON(path
->slots
[0] < 0);
3942 btrfs_tree_unlock(left
);
3943 free_extent_buffer(left
);
3948 * push some data in the path leaf to the left, trying to free up at
3949 * least data_size bytes. returns zero if the push worked, nonzero otherwise
3951 * max_slot can put a limit on how far into the leaf we'll push items. The
3952 * item at 'max_slot' won't be touched. Use (u32)-1 to make us push all the
3955 static int push_leaf_left(struct btrfs_trans_handle
*trans
, struct btrfs_root
3956 *root
, struct btrfs_path
*path
, int min_data_size
,
3957 int data_size
, int empty
, u32 max_slot
)
3959 struct btrfs_fs_info
*fs_info
= root
->fs_info
;
3960 struct extent_buffer
*right
= path
->nodes
[0];
3961 struct extent_buffer
*left
;
3967 slot
= path
->slots
[1];
3970 if (!path
->nodes
[1])
3973 right_nritems
= btrfs_header_nritems(right
);
3974 if (right_nritems
== 0)
3977 btrfs_assert_tree_locked(path
->nodes
[1]);
3979 left
= read_node_slot(fs_info
, path
->nodes
[1], slot
- 1);
3981 * slot - 1 is not valid or we fail to read the left node,
3982 * no big deal, just return.
3987 btrfs_tree_lock(left
);
3988 btrfs_set_lock_blocking(left
);
3990 free_space
= btrfs_leaf_free_space(fs_info
, left
);
3991 if (free_space
< data_size
) {
3996 /* cow and double check */
3997 ret
= btrfs_cow_block(trans
, root
, left
,
3998 path
->nodes
[1], slot
- 1, &left
);
4000 /* we hit -ENOSPC, but it isn't fatal here */
4006 free_space
= btrfs_leaf_free_space(fs_info
, left
);
4007 if (free_space
< data_size
) {
4012 return __push_leaf_left(fs_info
, path
, min_data_size
,
4013 empty
, left
, free_space
, right_nritems
,
4016 btrfs_tree_unlock(left
);
4017 free_extent_buffer(left
);
4022 * split the path's leaf in two, making sure there is at least data_size
4023 * available for the resulting leaf level of the path.
4025 static noinline
void copy_for_split(struct btrfs_trans_handle
*trans
,
4026 struct btrfs_fs_info
*fs_info
,
4027 struct btrfs_path
*path
,
4028 struct extent_buffer
*l
,
4029 struct extent_buffer
*right
,
4030 int slot
, int mid
, int nritems
)
4035 struct btrfs_disk_key disk_key
;
4036 struct btrfs_map_token token
;
4038 btrfs_init_map_token(&token
);
4040 nritems
= nritems
- mid
;
4041 btrfs_set_header_nritems(right
, nritems
);
4042 data_copy_size
= btrfs_item_end_nr(l
, mid
) - leaf_data_end(fs_info
, l
);
4044 copy_extent_buffer(right
, l
, btrfs_item_nr_offset(0),
4045 btrfs_item_nr_offset(mid
),
4046 nritems
* sizeof(struct btrfs_item
));
4048 copy_extent_buffer(right
, l
,
4049 BTRFS_LEAF_DATA_OFFSET
+ BTRFS_LEAF_DATA_SIZE(fs_info
) -
4050 data_copy_size
, BTRFS_LEAF_DATA_OFFSET
+
4051 leaf_data_end(fs_info
, l
), data_copy_size
);
4053 rt_data_off
= BTRFS_LEAF_DATA_SIZE(fs_info
) - btrfs_item_end_nr(l
, mid
);
4055 for (i
= 0; i
< nritems
; i
++) {
4056 struct btrfs_item
*item
= btrfs_item_nr(i
);
4059 ioff
= btrfs_token_item_offset(right
, item
, &token
);
4060 btrfs_set_token_item_offset(right
, item
,
4061 ioff
+ rt_data_off
, &token
);
4064 btrfs_set_header_nritems(l
, mid
);
4065 btrfs_item_key(right
, &disk_key
, 0);
4066 insert_ptr(trans
, fs_info
, path
, &disk_key
, right
->start
,
4067 path
->slots
[1] + 1, 1);
4069 btrfs_mark_buffer_dirty(right
);
4070 btrfs_mark_buffer_dirty(l
);
4071 BUG_ON(path
->slots
[0] != slot
);
4074 btrfs_tree_unlock(path
->nodes
[0]);
4075 free_extent_buffer(path
->nodes
[0]);
4076 path
->nodes
[0] = right
;
4077 path
->slots
[0] -= mid
;
4078 path
->slots
[1] += 1;
4080 btrfs_tree_unlock(right
);
4081 free_extent_buffer(right
);
4084 BUG_ON(path
->slots
[0] < 0);
4088 * double splits happen when we need to insert a big item in the middle
4089 * of a leaf. A double split can leave us with 3 mostly empty leaves:
4090 * leaf: [ slots 0 - N] [ our target ] [ N + 1 - total in leaf ]
4093 * We avoid this by trying to push the items on either side of our target
4094 * into the adjacent leaves. If all goes well we can avoid the double split
4097 static noinline
int push_for_double_split(struct btrfs_trans_handle
*trans
,
4098 struct btrfs_root
*root
,
4099 struct btrfs_path
*path
,
4102 struct btrfs_fs_info
*fs_info
= root
->fs_info
;
4107 int space_needed
= data_size
;
4109 slot
= path
->slots
[0];
4110 if (slot
< btrfs_header_nritems(path
->nodes
[0]))
4111 space_needed
-= btrfs_leaf_free_space(fs_info
, path
->nodes
[0]);
4114 * try to push all the items after our slot into the
4117 ret
= push_leaf_right(trans
, root
, path
, 1, space_needed
, 0, slot
);
4124 nritems
= btrfs_header_nritems(path
->nodes
[0]);
4126 * our goal is to get our slot at the start or end of a leaf. If
4127 * we've done so we're done
4129 if (path
->slots
[0] == 0 || path
->slots
[0] == nritems
)
4132 if (btrfs_leaf_free_space(fs_info
, path
->nodes
[0]) >= data_size
)
4135 /* try to push all the items before our slot into the next leaf */
4136 slot
= path
->slots
[0];
4137 space_needed
= data_size
;
4139 space_needed
-= btrfs_leaf_free_space(fs_info
, path
->nodes
[0]);
4140 ret
= push_leaf_left(trans
, root
, path
, 1, space_needed
, 0, slot
);
4153 * split the path's leaf in two, making sure there is at least data_size
4154 * available for the resulting leaf level of the path.
4156 * returns 0 if all went well and < 0 on failure.
4158 static noinline
int split_leaf(struct btrfs_trans_handle
*trans
,
4159 struct btrfs_root
*root
,
4160 const struct btrfs_key
*ins_key
,
4161 struct btrfs_path
*path
, int data_size
,
4164 struct btrfs_disk_key disk_key
;
4165 struct extent_buffer
*l
;
4169 struct extent_buffer
*right
;
4170 struct btrfs_fs_info
*fs_info
= root
->fs_info
;
4174 int num_doubles
= 0;
4175 int tried_avoid_double
= 0;
4178 slot
= path
->slots
[0];
4179 if (extend
&& data_size
+ btrfs_item_size_nr(l
, slot
) +
4180 sizeof(struct btrfs_item
) > BTRFS_LEAF_DATA_SIZE(fs_info
))
4183 /* first try to make some room by pushing left and right */
4184 if (data_size
&& path
->nodes
[1]) {
4185 int space_needed
= data_size
;
4187 if (slot
< btrfs_header_nritems(l
))
4188 space_needed
-= btrfs_leaf_free_space(fs_info
, l
);
4190 wret
= push_leaf_right(trans
, root
, path
, space_needed
,
4191 space_needed
, 0, 0);
4195 space_needed
= data_size
;
4197 space_needed
-= btrfs_leaf_free_space(fs_info
,
4199 wret
= push_leaf_left(trans
, root
, path
, space_needed
,
4200 space_needed
, 0, (u32
)-1);
4206 /* did the pushes work? */
4207 if (btrfs_leaf_free_space(fs_info
, l
) >= data_size
)
4211 if (!path
->nodes
[1]) {
4212 ret
= insert_new_root(trans
, root
, path
, 1);
4219 slot
= path
->slots
[0];
4220 nritems
= btrfs_header_nritems(l
);
4221 mid
= (nritems
+ 1) / 2;
4225 leaf_space_used(l
, mid
, nritems
- mid
) + data_size
>
4226 BTRFS_LEAF_DATA_SIZE(fs_info
)) {
4227 if (slot
>= nritems
) {
4231 if (mid
!= nritems
&&
4232 leaf_space_used(l
, mid
, nritems
- mid
) +
4233 data_size
> BTRFS_LEAF_DATA_SIZE(fs_info
)) {
4234 if (data_size
&& !tried_avoid_double
)
4235 goto push_for_double
;
4241 if (leaf_space_used(l
, 0, mid
) + data_size
>
4242 BTRFS_LEAF_DATA_SIZE(fs_info
)) {
4243 if (!extend
&& data_size
&& slot
== 0) {
4245 } else if ((extend
|| !data_size
) && slot
== 0) {
4249 if (mid
!= nritems
&&
4250 leaf_space_used(l
, mid
, nritems
- mid
) +
4251 data_size
> BTRFS_LEAF_DATA_SIZE(fs_info
)) {
4252 if (data_size
&& !tried_avoid_double
)
4253 goto push_for_double
;
4261 btrfs_cpu_key_to_disk(&disk_key
, ins_key
);
4263 btrfs_item_key(l
, &disk_key
, mid
);
4265 right
= btrfs_alloc_tree_block(trans
, root
, 0, root
->root_key
.objectid
,
4266 &disk_key
, 0, l
->start
, 0);
4268 return PTR_ERR(right
);
4270 root_add_used(root
, fs_info
->nodesize
);
4274 btrfs_set_header_nritems(right
, 0);
4275 insert_ptr(trans
, fs_info
, path
, &disk_key
,
4276 right
->start
, path
->slots
[1] + 1, 1);
4277 btrfs_tree_unlock(path
->nodes
[0]);
4278 free_extent_buffer(path
->nodes
[0]);
4279 path
->nodes
[0] = right
;
4281 path
->slots
[1] += 1;
4283 btrfs_set_header_nritems(right
, 0);
4284 insert_ptr(trans
, fs_info
, path
, &disk_key
,
4285 right
->start
, path
->slots
[1], 1);
4286 btrfs_tree_unlock(path
->nodes
[0]);
4287 free_extent_buffer(path
->nodes
[0]);
4288 path
->nodes
[0] = right
;
4290 if (path
->slots
[1] == 0)
4291 fixup_low_keys(path
, &disk_key
, 1);
4294 * We create a new leaf 'right' for the required ins_len and
4295 * we'll do btrfs_mark_buffer_dirty() on this leaf after copying
4296 * the content of ins_len to 'right'.
4301 copy_for_split(trans
, fs_info
, path
, l
, right
, slot
, mid
, nritems
);
4304 BUG_ON(num_doubles
!= 0);
4312 push_for_double_split(trans
, root
, path
, data_size
);
4313 tried_avoid_double
= 1;
4314 if (btrfs_leaf_free_space(fs_info
, path
->nodes
[0]) >= data_size
)
4319 static noinline
int setup_leaf_for_split(struct btrfs_trans_handle
*trans
,
4320 struct btrfs_root
*root
,
4321 struct btrfs_path
*path
, int ins_len
)
4323 struct btrfs_fs_info
*fs_info
= root
->fs_info
;
4324 struct btrfs_key key
;
4325 struct extent_buffer
*leaf
;
4326 struct btrfs_file_extent_item
*fi
;
4331 leaf
= path
->nodes
[0];
4332 btrfs_item_key_to_cpu(leaf
, &key
, path
->slots
[0]);
4334 BUG_ON(key
.type
!= BTRFS_EXTENT_DATA_KEY
&&
4335 key
.type
!= BTRFS_EXTENT_CSUM_KEY
);
4337 if (btrfs_leaf_free_space(fs_info
, leaf
) >= ins_len
)
4340 item_size
= btrfs_item_size_nr(leaf
, path
->slots
[0]);
4341 if (key
.type
== BTRFS_EXTENT_DATA_KEY
) {
4342 fi
= btrfs_item_ptr(leaf
, path
->slots
[0],
4343 struct btrfs_file_extent_item
);
4344 extent_len
= btrfs_file_extent_num_bytes(leaf
, fi
);
4346 btrfs_release_path(path
);
4348 path
->keep_locks
= 1;
4349 path
->search_for_split
= 1;
4350 ret
= btrfs_search_slot(trans
, root
, &key
, path
, 0, 1);
4351 path
->search_for_split
= 0;
4358 leaf
= path
->nodes
[0];
4359 /* if our item isn't there, return now */
4360 if (item_size
!= btrfs_item_size_nr(leaf
, path
->slots
[0]))
4363 /* the leaf has changed, it now has room. return now */
4364 if (btrfs_leaf_free_space(fs_info
, path
->nodes
[0]) >= ins_len
)
4367 if (key
.type
== BTRFS_EXTENT_DATA_KEY
) {
4368 fi
= btrfs_item_ptr(leaf
, path
->slots
[0],
4369 struct btrfs_file_extent_item
);
4370 if (extent_len
!= btrfs_file_extent_num_bytes(leaf
, fi
))
4374 btrfs_set_path_blocking(path
);
4375 ret
= split_leaf(trans
, root
, &key
, path
, ins_len
, 1);
4379 path
->keep_locks
= 0;
4380 btrfs_unlock_up_safe(path
, 1);
4383 path
->keep_locks
= 0;
4387 static noinline
int split_item(struct btrfs_fs_info
*fs_info
,
4388 struct btrfs_path
*path
,
4389 const struct btrfs_key
*new_key
,
4390 unsigned long split_offset
)
4392 struct extent_buffer
*leaf
;
4393 struct btrfs_item
*item
;
4394 struct btrfs_item
*new_item
;
4400 struct btrfs_disk_key disk_key
;
4402 leaf
= path
->nodes
[0];
4403 BUG_ON(btrfs_leaf_free_space(fs_info
, leaf
) < sizeof(struct btrfs_item
));
4405 btrfs_set_path_blocking(path
);
4407 item
= btrfs_item_nr(path
->slots
[0]);
4408 orig_offset
= btrfs_item_offset(leaf
, item
);
4409 item_size
= btrfs_item_size(leaf
, item
);
4411 buf
= kmalloc(item_size
, GFP_NOFS
);
4415 read_extent_buffer(leaf
, buf
, btrfs_item_ptr_offset(leaf
,
4416 path
->slots
[0]), item_size
);
4418 slot
= path
->slots
[0] + 1;
4419 nritems
= btrfs_header_nritems(leaf
);
4420 if (slot
!= nritems
) {
4421 /* shift the items */
4422 memmove_extent_buffer(leaf
, btrfs_item_nr_offset(slot
+ 1),
4423 btrfs_item_nr_offset(slot
),
4424 (nritems
- slot
) * sizeof(struct btrfs_item
));
4427 btrfs_cpu_key_to_disk(&disk_key
, new_key
);
4428 btrfs_set_item_key(leaf
, &disk_key
, slot
);
4430 new_item
= btrfs_item_nr(slot
);
4432 btrfs_set_item_offset(leaf
, new_item
, orig_offset
);
4433 btrfs_set_item_size(leaf
, new_item
, item_size
- split_offset
);
4435 btrfs_set_item_offset(leaf
, item
,
4436 orig_offset
+ item_size
- split_offset
);
4437 btrfs_set_item_size(leaf
, item
, split_offset
);
4439 btrfs_set_header_nritems(leaf
, nritems
+ 1);
4441 /* write the data for the start of the original item */
4442 write_extent_buffer(leaf
, buf
,
4443 btrfs_item_ptr_offset(leaf
, path
->slots
[0]),
4446 /* write the data for the new item */
4447 write_extent_buffer(leaf
, buf
+ split_offset
,
4448 btrfs_item_ptr_offset(leaf
, slot
),
4449 item_size
- split_offset
);
4450 btrfs_mark_buffer_dirty(leaf
);
4452 BUG_ON(btrfs_leaf_free_space(fs_info
, leaf
) < 0);
4458 * This function splits a single item into two items,
4459 * giving 'new_key' to the new item and splitting the
4460 * old one at split_offset (from the start of the item).
4462 * The path may be released by this operation. After
4463 * the split, the path is pointing to the old item. The
4464 * new item is going to be in the same node as the old one.
4466 * Note, the item being split must be smaller enough to live alone on
4467 * a tree block with room for one extra struct btrfs_item
4469 * This allows us to split the item in place, keeping a lock on the
4470 * leaf the entire time.
4472 int btrfs_split_item(struct btrfs_trans_handle
*trans
,
4473 struct btrfs_root
*root
,
4474 struct btrfs_path
*path
,
4475 const struct btrfs_key
*new_key
,
4476 unsigned long split_offset
)
4479 ret
= setup_leaf_for_split(trans
, root
, path
,
4480 sizeof(struct btrfs_item
));
4484 ret
= split_item(root
->fs_info
, path
, new_key
, split_offset
);
4489 * This function duplicate a item, giving 'new_key' to the new item.
4490 * It guarantees both items live in the same tree leaf and the new item
4491 * is contiguous with the original item.
4493 * This allows us to split file extent in place, keeping a lock on the
4494 * leaf the entire time.
4496 int btrfs_duplicate_item(struct btrfs_trans_handle
*trans
,
4497 struct btrfs_root
*root
,
4498 struct btrfs_path
*path
,
4499 const struct btrfs_key
*new_key
)
4501 struct extent_buffer
*leaf
;
4505 leaf
= path
->nodes
[0];
4506 item_size
= btrfs_item_size_nr(leaf
, path
->slots
[0]);
4507 ret
= setup_leaf_for_split(trans
, root
, path
,
4508 item_size
+ sizeof(struct btrfs_item
));
4513 setup_items_for_insert(root
, path
, new_key
, &item_size
,
4514 item_size
, item_size
+
4515 sizeof(struct btrfs_item
), 1);
4516 leaf
= path
->nodes
[0];
4517 memcpy_extent_buffer(leaf
,
4518 btrfs_item_ptr_offset(leaf
, path
->slots
[0]),
4519 btrfs_item_ptr_offset(leaf
, path
->slots
[0] - 1),
4525 * make the item pointed to by the path smaller. new_size indicates
4526 * how small to make it, and from_end tells us if we just chop bytes
4527 * off the end of the item or if we shift the item to chop bytes off
4530 void btrfs_truncate_item(struct btrfs_fs_info
*fs_info
,
4531 struct btrfs_path
*path
, u32 new_size
, int from_end
)
4534 struct extent_buffer
*leaf
;
4535 struct btrfs_item
*item
;
4537 unsigned int data_end
;
4538 unsigned int old_data_start
;
4539 unsigned int old_size
;
4540 unsigned int size_diff
;
4542 struct btrfs_map_token token
;
4544 btrfs_init_map_token(&token
);
4546 leaf
= path
->nodes
[0];
4547 slot
= path
->slots
[0];
4549 old_size
= btrfs_item_size_nr(leaf
, slot
);
4550 if (old_size
== new_size
)
4553 nritems
= btrfs_header_nritems(leaf
);
4554 data_end
= leaf_data_end(fs_info
, leaf
);
4556 old_data_start
= btrfs_item_offset_nr(leaf
, slot
);
4558 size_diff
= old_size
- new_size
;
4561 BUG_ON(slot
>= nritems
);
4564 * item0..itemN ... dataN.offset..dataN.size .. data0.size
4566 /* first correct the data pointers */
4567 for (i
= slot
; i
< nritems
; i
++) {
4569 item
= btrfs_item_nr(i
);
4571 ioff
= btrfs_token_item_offset(leaf
, item
, &token
);
4572 btrfs_set_token_item_offset(leaf
, item
,
4573 ioff
+ size_diff
, &token
);
4576 /* shift the data */
4578 memmove_extent_buffer(leaf
, BTRFS_LEAF_DATA_OFFSET
+
4579 data_end
+ size_diff
, BTRFS_LEAF_DATA_OFFSET
+
4580 data_end
, old_data_start
+ new_size
- data_end
);
4582 struct btrfs_disk_key disk_key
;
4585 btrfs_item_key(leaf
, &disk_key
, slot
);
4587 if (btrfs_disk_key_type(&disk_key
) == BTRFS_EXTENT_DATA_KEY
) {
4589 struct btrfs_file_extent_item
*fi
;
4591 fi
= btrfs_item_ptr(leaf
, slot
,
4592 struct btrfs_file_extent_item
);
4593 fi
= (struct btrfs_file_extent_item
*)(
4594 (unsigned long)fi
- size_diff
);
4596 if (btrfs_file_extent_type(leaf
, fi
) ==
4597 BTRFS_FILE_EXTENT_INLINE
) {
4598 ptr
= btrfs_item_ptr_offset(leaf
, slot
);
4599 memmove_extent_buffer(leaf
, ptr
,
4601 BTRFS_FILE_EXTENT_INLINE_DATA_START
);
4605 memmove_extent_buffer(leaf
, BTRFS_LEAF_DATA_OFFSET
+
4606 data_end
+ size_diff
, BTRFS_LEAF_DATA_OFFSET
+
4607 data_end
, old_data_start
- data_end
);
4609 offset
= btrfs_disk_key_offset(&disk_key
);
4610 btrfs_set_disk_key_offset(&disk_key
, offset
+ size_diff
);
4611 btrfs_set_item_key(leaf
, &disk_key
, slot
);
4613 fixup_low_keys(path
, &disk_key
, 1);
4616 item
= btrfs_item_nr(slot
);
4617 btrfs_set_item_size(leaf
, item
, new_size
);
4618 btrfs_mark_buffer_dirty(leaf
);
4620 if (btrfs_leaf_free_space(fs_info
, leaf
) < 0) {
4621 btrfs_print_leaf(leaf
);
4627 * make the item pointed to by the path bigger, data_size is the added size.
4629 void btrfs_extend_item(struct btrfs_fs_info
*fs_info
, struct btrfs_path
*path
,
4633 struct extent_buffer
*leaf
;
4634 struct btrfs_item
*item
;
4636 unsigned int data_end
;
4637 unsigned int old_data
;
4638 unsigned int old_size
;
4640 struct btrfs_map_token token
;
4642 btrfs_init_map_token(&token
);
4644 leaf
= path
->nodes
[0];
4646 nritems
= btrfs_header_nritems(leaf
);
4647 data_end
= leaf_data_end(fs_info
, leaf
);
4649 if (btrfs_leaf_free_space(fs_info
, leaf
) < data_size
) {
4650 btrfs_print_leaf(leaf
);
4653 slot
= path
->slots
[0];
4654 old_data
= btrfs_item_end_nr(leaf
, slot
);
4657 if (slot
>= nritems
) {
4658 btrfs_print_leaf(leaf
);
4659 btrfs_crit(fs_info
, "slot %d too large, nritems %d",
4665 * item0..itemN ... dataN.offset..dataN.size .. data0.size
4667 /* first correct the data pointers */
4668 for (i
= slot
; i
< nritems
; i
++) {
4670 item
= btrfs_item_nr(i
);
4672 ioff
= btrfs_token_item_offset(leaf
, item
, &token
);
4673 btrfs_set_token_item_offset(leaf
, item
,
4674 ioff
- data_size
, &token
);
4677 /* shift the data */
4678 memmove_extent_buffer(leaf
, BTRFS_LEAF_DATA_OFFSET
+
4679 data_end
- data_size
, BTRFS_LEAF_DATA_OFFSET
+
4680 data_end
, old_data
- data_end
);
4682 data_end
= old_data
;
4683 old_size
= btrfs_item_size_nr(leaf
, slot
);
4684 item
= btrfs_item_nr(slot
);
4685 btrfs_set_item_size(leaf
, item
, old_size
+ data_size
);
4686 btrfs_mark_buffer_dirty(leaf
);
4688 if (btrfs_leaf_free_space(fs_info
, leaf
) < 0) {
4689 btrfs_print_leaf(leaf
);
4695 * this is a helper for btrfs_insert_empty_items, the main goal here is
4696 * to save stack depth by doing the bulk of the work in a function
4697 * that doesn't call btrfs_search_slot
4699 void setup_items_for_insert(struct btrfs_root
*root
, struct btrfs_path
*path
,
4700 const struct btrfs_key
*cpu_key
, u32
*data_size
,
4701 u32 total_data
, u32 total_size
, int nr
)
4703 struct btrfs_fs_info
*fs_info
= root
->fs_info
;
4704 struct btrfs_item
*item
;
4707 unsigned int data_end
;
4708 struct btrfs_disk_key disk_key
;
4709 struct extent_buffer
*leaf
;
4711 struct btrfs_map_token token
;
4713 if (path
->slots
[0] == 0) {
4714 btrfs_cpu_key_to_disk(&disk_key
, cpu_key
);
4715 fixup_low_keys(path
, &disk_key
, 1);
4717 btrfs_unlock_up_safe(path
, 1);
4719 btrfs_init_map_token(&token
);
4721 leaf
= path
->nodes
[0];
4722 slot
= path
->slots
[0];
4724 nritems
= btrfs_header_nritems(leaf
);
4725 data_end
= leaf_data_end(fs_info
, leaf
);
4727 if (btrfs_leaf_free_space(fs_info
, leaf
) < total_size
) {
4728 btrfs_print_leaf(leaf
);
4729 btrfs_crit(fs_info
, "not enough freespace need %u have %d",
4730 total_size
, btrfs_leaf_free_space(fs_info
, leaf
));
4734 if (slot
!= nritems
) {
4735 unsigned int old_data
= btrfs_item_end_nr(leaf
, slot
);
4737 if (old_data
< data_end
) {
4738 btrfs_print_leaf(leaf
);
4739 btrfs_crit(fs_info
, "slot %d old_data %d data_end %d",
4740 slot
, old_data
, data_end
);
4744 * item0..itemN ... dataN.offset..dataN.size .. data0.size
4746 /* first correct the data pointers */
4747 for (i
= slot
; i
< nritems
; i
++) {
4750 item
= btrfs_item_nr(i
);
4751 ioff
= btrfs_token_item_offset(leaf
, item
, &token
);
4752 btrfs_set_token_item_offset(leaf
, item
,
4753 ioff
- total_data
, &token
);
4755 /* shift the items */
4756 memmove_extent_buffer(leaf
, btrfs_item_nr_offset(slot
+ nr
),
4757 btrfs_item_nr_offset(slot
),
4758 (nritems
- slot
) * sizeof(struct btrfs_item
));
4760 /* shift the data */
4761 memmove_extent_buffer(leaf
, BTRFS_LEAF_DATA_OFFSET
+
4762 data_end
- total_data
, BTRFS_LEAF_DATA_OFFSET
+
4763 data_end
, old_data
- data_end
);
4764 data_end
= old_data
;
4767 /* setup the item for the new data */
4768 for (i
= 0; i
< nr
; i
++) {
4769 btrfs_cpu_key_to_disk(&disk_key
, cpu_key
+ i
);
4770 btrfs_set_item_key(leaf
, &disk_key
, slot
+ i
);
4771 item
= btrfs_item_nr(slot
+ i
);
4772 btrfs_set_token_item_offset(leaf
, item
,
4773 data_end
- data_size
[i
], &token
);
4774 data_end
-= data_size
[i
];
4775 btrfs_set_token_item_size(leaf
, item
, data_size
[i
], &token
);
4778 btrfs_set_header_nritems(leaf
, nritems
+ nr
);
4779 btrfs_mark_buffer_dirty(leaf
);
4781 if (btrfs_leaf_free_space(fs_info
, leaf
) < 0) {
4782 btrfs_print_leaf(leaf
);
4788 * Given a key and some data, insert items into the tree.
4789 * This does all the path init required, making room in the tree if needed.
4791 int btrfs_insert_empty_items(struct btrfs_trans_handle
*trans
,
4792 struct btrfs_root
*root
,
4793 struct btrfs_path
*path
,
4794 const struct btrfs_key
*cpu_key
, u32
*data_size
,
4803 for (i
= 0; i
< nr
; i
++)
4804 total_data
+= data_size
[i
];
4806 total_size
= total_data
+ (nr
* sizeof(struct btrfs_item
));
4807 ret
= btrfs_search_slot(trans
, root
, cpu_key
, path
, total_size
, 1);
4813 slot
= path
->slots
[0];
4816 setup_items_for_insert(root
, path
, cpu_key
, data_size
,
4817 total_data
, total_size
, nr
);
4822 * Given a key and some data, insert an item into the tree.
4823 * This does all the path init required, making room in the tree if needed.
4825 int btrfs_insert_item(struct btrfs_trans_handle
*trans
, struct btrfs_root
*root
,
4826 const struct btrfs_key
*cpu_key
, void *data
,
4830 struct btrfs_path
*path
;
4831 struct extent_buffer
*leaf
;
4834 path
= btrfs_alloc_path();
4837 ret
= btrfs_insert_empty_item(trans
, root
, path
, cpu_key
, data_size
);
4839 leaf
= path
->nodes
[0];
4840 ptr
= btrfs_item_ptr_offset(leaf
, path
->slots
[0]);
4841 write_extent_buffer(leaf
, data
, ptr
, data_size
);
4842 btrfs_mark_buffer_dirty(leaf
);
4844 btrfs_free_path(path
);
4849 * delete the pointer from a given node.
4851 * the tree should have been previously balanced so the deletion does not
4854 static void del_ptr(struct btrfs_root
*root
, struct btrfs_path
*path
,
4855 int level
, int slot
)
4857 struct extent_buffer
*parent
= path
->nodes
[level
];
4861 nritems
= btrfs_header_nritems(parent
);
4862 if (slot
!= nritems
- 1) {
4864 ret
= tree_mod_log_insert_move(parent
, slot
, slot
+ 1,
4865 nritems
- slot
- 1);
4868 memmove_extent_buffer(parent
,
4869 btrfs_node_key_ptr_offset(slot
),
4870 btrfs_node_key_ptr_offset(slot
+ 1),
4871 sizeof(struct btrfs_key_ptr
) *
4872 (nritems
- slot
- 1));
4874 ret
= tree_mod_log_insert_key(parent
, slot
, MOD_LOG_KEY_REMOVE
,
4880 btrfs_set_header_nritems(parent
, nritems
);
4881 if (nritems
== 0 && parent
== root
->node
) {
4882 BUG_ON(btrfs_header_level(root
->node
) != 1);
4883 /* just turn the root into a leaf and break */
4884 btrfs_set_header_level(root
->node
, 0);
4885 } else if (slot
== 0) {
4886 struct btrfs_disk_key disk_key
;
4888 btrfs_node_key(parent
, &disk_key
, 0);
4889 fixup_low_keys(path
, &disk_key
, level
+ 1);
4891 btrfs_mark_buffer_dirty(parent
);
4895 * a helper function to delete the leaf pointed to by path->slots[1] and
4898 * This deletes the pointer in path->nodes[1] and frees the leaf
4899 * block extent. zero is returned if it all worked out, < 0 otherwise.
4901 * The path must have already been setup for deleting the leaf, including
4902 * all the proper balancing. path->nodes[1] must be locked.
4904 static noinline
void btrfs_del_leaf(struct btrfs_trans_handle
*trans
,
4905 struct btrfs_root
*root
,
4906 struct btrfs_path
*path
,
4907 struct extent_buffer
*leaf
)
4909 WARN_ON(btrfs_header_generation(leaf
) != trans
->transid
);
4910 del_ptr(root
, path
, 1, path
->slots
[1]);
4913 * btrfs_free_extent is expensive, we want to make sure we
4914 * aren't holding any locks when we call it
4916 btrfs_unlock_up_safe(path
, 0);
4918 root_sub_used(root
, leaf
->len
);
4920 extent_buffer_get(leaf
);
4921 btrfs_free_tree_block(trans
, root
, leaf
, 0, 1);
4922 free_extent_buffer_stale(leaf
);
4925 * delete the item at the leaf level in path. If that empties
4926 * the leaf, remove it from the tree
4928 int btrfs_del_items(struct btrfs_trans_handle
*trans
, struct btrfs_root
*root
,
4929 struct btrfs_path
*path
, int slot
, int nr
)
4931 struct btrfs_fs_info
*fs_info
= root
->fs_info
;
4932 struct extent_buffer
*leaf
;
4933 struct btrfs_item
*item
;
4940 struct btrfs_map_token token
;
4942 btrfs_init_map_token(&token
);
4944 leaf
= path
->nodes
[0];
4945 last_off
= btrfs_item_offset_nr(leaf
, slot
+ nr
- 1);
4947 for (i
= 0; i
< nr
; i
++)
4948 dsize
+= btrfs_item_size_nr(leaf
, slot
+ i
);
4950 nritems
= btrfs_header_nritems(leaf
);
4952 if (slot
+ nr
!= nritems
) {
4953 int data_end
= leaf_data_end(fs_info
, leaf
);
4955 memmove_extent_buffer(leaf
, BTRFS_LEAF_DATA_OFFSET
+
4957 BTRFS_LEAF_DATA_OFFSET
+ data_end
,
4958 last_off
- data_end
);
4960 for (i
= slot
+ nr
; i
< nritems
; i
++) {
4963 item
= btrfs_item_nr(i
);
4964 ioff
= btrfs_token_item_offset(leaf
, item
, &token
);
4965 btrfs_set_token_item_offset(leaf
, item
,
4966 ioff
+ dsize
, &token
);
4969 memmove_extent_buffer(leaf
, btrfs_item_nr_offset(slot
),
4970 btrfs_item_nr_offset(slot
+ nr
),
4971 sizeof(struct btrfs_item
) *
4972 (nritems
- slot
- nr
));
4974 btrfs_set_header_nritems(leaf
, nritems
- nr
);
4977 /* delete the leaf if we've emptied it */
4979 if (leaf
== root
->node
) {
4980 btrfs_set_header_level(leaf
, 0);
4982 btrfs_set_path_blocking(path
);
4983 clean_tree_block(fs_info
, leaf
);
4984 btrfs_del_leaf(trans
, root
, path
, leaf
);
4987 int used
= leaf_space_used(leaf
, 0, nritems
);
4989 struct btrfs_disk_key disk_key
;
4991 btrfs_item_key(leaf
, &disk_key
, 0);
4992 fixup_low_keys(path
, &disk_key
, 1);
4995 /* delete the leaf if it is mostly empty */
4996 if (used
< BTRFS_LEAF_DATA_SIZE(fs_info
) / 3) {
4997 /* push_leaf_left fixes the path.
4998 * make sure the path still points to our leaf
4999 * for possible call to del_ptr below
5001 slot
= path
->slots
[1];
5002 extent_buffer_get(leaf
);
5004 btrfs_set_path_blocking(path
);
5005 wret
= push_leaf_left(trans
, root
, path
, 1, 1,
5007 if (wret
< 0 && wret
!= -ENOSPC
)
5010 if (path
->nodes
[0] == leaf
&&
5011 btrfs_header_nritems(leaf
)) {
5012 wret
= push_leaf_right(trans
, root
, path
, 1,
5014 if (wret
< 0 && wret
!= -ENOSPC
)
5018 if (btrfs_header_nritems(leaf
) == 0) {
5019 path
->slots
[1] = slot
;
5020 btrfs_del_leaf(trans
, root
, path
, leaf
);
5021 free_extent_buffer(leaf
);
5024 /* if we're still in the path, make sure
5025 * we're dirty. Otherwise, one of the
5026 * push_leaf functions must have already
5027 * dirtied this buffer
5029 if (path
->nodes
[0] == leaf
)
5030 btrfs_mark_buffer_dirty(leaf
);
5031 free_extent_buffer(leaf
);
5034 btrfs_mark_buffer_dirty(leaf
);
5041 * search the tree again to find a leaf with lesser keys
5042 * returns 0 if it found something or 1 if there are no lesser leaves.
5043 * returns < 0 on io errors.
5045 * This may release the path, and so you may lose any locks held at the
5048 int btrfs_prev_leaf(struct btrfs_root
*root
, struct btrfs_path
*path
)
5050 struct btrfs_key key
;
5051 struct btrfs_disk_key found_key
;
5054 btrfs_item_key_to_cpu(path
->nodes
[0], &key
, 0);
5056 if (key
.offset
> 0) {
5058 } else if (key
.type
> 0) {
5060 key
.offset
= (u64
)-1;
5061 } else if (key
.objectid
> 0) {
5064 key
.offset
= (u64
)-1;
5069 btrfs_release_path(path
);
5070 ret
= btrfs_search_slot(NULL
, root
, &key
, path
, 0, 0);
5073 btrfs_item_key(path
->nodes
[0], &found_key
, 0);
5074 ret
= comp_keys(&found_key
, &key
);
5076 * We might have had an item with the previous key in the tree right
5077 * before we released our path. And after we released our path, that
5078 * item might have been pushed to the first slot (0) of the leaf we
5079 * were holding due to a tree balance. Alternatively, an item with the
5080 * previous key can exist as the only element of a leaf (big fat item).
5081 * Therefore account for these 2 cases, so that our callers (like
5082 * btrfs_previous_item) don't miss an existing item with a key matching
5083 * the previous key we computed above.
5091 * A helper function to walk down the tree starting at min_key, and looking
5092 * for nodes or leaves that are have a minimum transaction id.
5093 * This is used by the btree defrag code, and tree logging
5095 * This does not cow, but it does stuff the starting key it finds back
5096 * into min_key, so you can call btrfs_search_slot with cow=1 on the
5097 * key and get a writable path.
5099 * This honors path->lowest_level to prevent descent past a given level
5102 * min_trans indicates the oldest transaction that you are interested
5103 * in walking through. Any nodes or leaves older than min_trans are
5104 * skipped over (without reading them).
5106 * returns zero if something useful was found, < 0 on error and 1 if there
5107 * was nothing in the tree that matched the search criteria.
5109 int btrfs_search_forward(struct btrfs_root
*root
, struct btrfs_key
*min_key
,
5110 struct btrfs_path
*path
,
5113 struct btrfs_fs_info
*fs_info
= root
->fs_info
;
5114 struct extent_buffer
*cur
;
5115 struct btrfs_key found_key
;
5121 int keep_locks
= path
->keep_locks
;
5123 path
->keep_locks
= 1;
5125 cur
= btrfs_read_lock_root_node(root
);
5126 level
= btrfs_header_level(cur
);
5127 WARN_ON(path
->nodes
[level
]);
5128 path
->nodes
[level
] = cur
;
5129 path
->locks
[level
] = BTRFS_READ_LOCK
;
5131 if (btrfs_header_generation(cur
) < min_trans
) {
5136 nritems
= btrfs_header_nritems(cur
);
5137 level
= btrfs_header_level(cur
);
5138 sret
= btrfs_bin_search(cur
, min_key
, level
, &slot
);
5140 /* at the lowest level, we're done, setup the path and exit */
5141 if (level
== path
->lowest_level
) {
5142 if (slot
>= nritems
)
5145 path
->slots
[level
] = slot
;
5146 btrfs_item_key_to_cpu(cur
, &found_key
, slot
);
5149 if (sret
&& slot
> 0)
5152 * check this node pointer against the min_trans parameters.
5153 * If it is too old, old, skip to the next one.
5155 while (slot
< nritems
) {
5158 gen
= btrfs_node_ptr_generation(cur
, slot
);
5159 if (gen
< min_trans
) {
5167 * we didn't find a candidate key in this node, walk forward
5168 * and find another one
5170 if (slot
>= nritems
) {
5171 path
->slots
[level
] = slot
;
5172 btrfs_set_path_blocking(path
);
5173 sret
= btrfs_find_next_key(root
, path
, min_key
, level
,
5176 btrfs_release_path(path
);
5182 /* save our key for returning back */
5183 btrfs_node_key_to_cpu(cur
, &found_key
, slot
);
5184 path
->slots
[level
] = slot
;
5185 if (level
== path
->lowest_level
) {
5189 btrfs_set_path_blocking(path
);
5190 cur
= read_node_slot(fs_info
, cur
, slot
);
5196 btrfs_tree_read_lock(cur
);
5198 path
->locks
[level
- 1] = BTRFS_READ_LOCK
;
5199 path
->nodes
[level
- 1] = cur
;
5200 unlock_up(path
, level
, 1, 0, NULL
);
5201 btrfs_clear_path_blocking(path
, NULL
, 0);
5204 path
->keep_locks
= keep_locks
;
5206 btrfs_unlock_up_safe(path
, path
->lowest_level
+ 1);
5207 btrfs_set_path_blocking(path
);
5208 memcpy(min_key
, &found_key
, sizeof(found_key
));
5213 static int tree_move_down(struct btrfs_fs_info
*fs_info
,
5214 struct btrfs_path
*path
,
5217 struct extent_buffer
*eb
;
5219 BUG_ON(*level
== 0);
5220 eb
= read_node_slot(fs_info
, path
->nodes
[*level
], path
->slots
[*level
]);
5224 path
->nodes
[*level
- 1] = eb
;
5225 path
->slots
[*level
- 1] = 0;
5230 static int tree_move_next_or_upnext(struct btrfs_path
*path
,
5231 int *level
, int root_level
)
5235 nritems
= btrfs_header_nritems(path
->nodes
[*level
]);
5237 path
->slots
[*level
]++;
5239 while (path
->slots
[*level
] >= nritems
) {
5240 if (*level
== root_level
)
5244 path
->slots
[*level
] = 0;
5245 free_extent_buffer(path
->nodes
[*level
]);
5246 path
->nodes
[*level
] = NULL
;
5248 path
->slots
[*level
]++;
5250 nritems
= btrfs_header_nritems(path
->nodes
[*level
]);
5257 * Returns 1 if it had to move up and next. 0 is returned if it moved only next
5260 static int tree_advance(struct btrfs_fs_info
*fs_info
,
5261 struct btrfs_path
*path
,
5262 int *level
, int root_level
,
5264 struct btrfs_key
*key
)
5268 if (*level
== 0 || !allow_down
) {
5269 ret
= tree_move_next_or_upnext(path
, level
, root_level
);
5271 ret
= tree_move_down(fs_info
, path
, level
);
5275 btrfs_item_key_to_cpu(path
->nodes
[*level
], key
,
5276 path
->slots
[*level
]);
5278 btrfs_node_key_to_cpu(path
->nodes
[*level
], key
,
5279 path
->slots
[*level
]);
5284 static int tree_compare_item(struct btrfs_path
*left_path
,
5285 struct btrfs_path
*right_path
,
5290 unsigned long off1
, off2
;
5292 len1
= btrfs_item_size_nr(left_path
->nodes
[0], left_path
->slots
[0]);
5293 len2
= btrfs_item_size_nr(right_path
->nodes
[0], right_path
->slots
[0]);
5297 off1
= btrfs_item_ptr_offset(left_path
->nodes
[0], left_path
->slots
[0]);
5298 off2
= btrfs_item_ptr_offset(right_path
->nodes
[0],
5299 right_path
->slots
[0]);
5301 read_extent_buffer(left_path
->nodes
[0], tmp_buf
, off1
, len1
);
5303 cmp
= memcmp_extent_buffer(right_path
->nodes
[0], tmp_buf
, off2
, len1
);
5310 #define ADVANCE_ONLY_NEXT -1
5313 * This function compares two trees and calls the provided callback for
5314 * every changed/new/deleted item it finds.
5315 * If shared tree blocks are encountered, whole subtrees are skipped, making
5316 * the compare pretty fast on snapshotted subvolumes.
5318 * This currently works on commit roots only. As commit roots are read only,
5319 * we don't do any locking. The commit roots are protected with transactions.
5320 * Transactions are ended and rejoined when a commit is tried in between.
5322 * This function checks for modifications done to the trees while comparing.
5323 * If it detects a change, it aborts immediately.
5325 int btrfs_compare_trees(struct btrfs_root
*left_root
,
5326 struct btrfs_root
*right_root
,
5327 btrfs_changed_cb_t changed_cb
, void *ctx
)
5329 struct btrfs_fs_info
*fs_info
= left_root
->fs_info
;
5332 struct btrfs_path
*left_path
= NULL
;
5333 struct btrfs_path
*right_path
= NULL
;
5334 struct btrfs_key left_key
;
5335 struct btrfs_key right_key
;
5336 char *tmp_buf
= NULL
;
5337 int left_root_level
;
5338 int right_root_level
;
5341 int left_end_reached
;
5342 int right_end_reached
;
5350 left_path
= btrfs_alloc_path();
5355 right_path
= btrfs_alloc_path();
5361 tmp_buf
= kvmalloc(fs_info
->nodesize
, GFP_KERNEL
);
5367 left_path
->search_commit_root
= 1;
5368 left_path
->skip_locking
= 1;
5369 right_path
->search_commit_root
= 1;
5370 right_path
->skip_locking
= 1;
5373 * Strategy: Go to the first items of both trees. Then do
5375 * If both trees are at level 0
5376 * Compare keys of current items
5377 * If left < right treat left item as new, advance left tree
5379 * If left > right treat right item as deleted, advance right tree
5381 * If left == right do deep compare of items, treat as changed if
5382 * needed, advance both trees and repeat
5383 * If both trees are at the same level but not at level 0
5384 * Compare keys of current nodes/leafs
5385 * If left < right advance left tree and repeat
5386 * If left > right advance right tree and repeat
5387 * If left == right compare blockptrs of the next nodes/leafs
5388 * If they match advance both trees but stay at the same level
5390 * If they don't match advance both trees while allowing to go
5392 * If tree levels are different
5393 * Advance the tree that needs it and repeat
5395 * Advancing a tree means:
5396 * If we are at level 0, try to go to the next slot. If that's not
5397 * possible, go one level up and repeat. Stop when we found a level
5398 * where we could go to the next slot. We may at this point be on a
5401 * If we are not at level 0 and not on shared tree blocks, go one
5404 * If we are not at level 0 and on shared tree blocks, go one slot to
5405 * the right if possible or go up and right.
5408 down_read(&fs_info
->commit_root_sem
);
5409 left_level
= btrfs_header_level(left_root
->commit_root
);
5410 left_root_level
= left_level
;
5411 left_path
->nodes
[left_level
] =
5412 btrfs_clone_extent_buffer(left_root
->commit_root
);
5413 if (!left_path
->nodes
[left_level
]) {
5414 up_read(&fs_info
->commit_root_sem
);
5418 extent_buffer_get(left_path
->nodes
[left_level
]);
5420 right_level
= btrfs_header_level(right_root
->commit_root
);
5421 right_root_level
= right_level
;
5422 right_path
->nodes
[right_level
] =
5423 btrfs_clone_extent_buffer(right_root
->commit_root
);
5424 if (!right_path
->nodes
[right_level
]) {
5425 up_read(&fs_info
->commit_root_sem
);
5429 extent_buffer_get(right_path
->nodes
[right_level
]);
5430 up_read(&fs_info
->commit_root_sem
);
5432 if (left_level
== 0)
5433 btrfs_item_key_to_cpu(left_path
->nodes
[left_level
],
5434 &left_key
, left_path
->slots
[left_level
]);
5436 btrfs_node_key_to_cpu(left_path
->nodes
[left_level
],
5437 &left_key
, left_path
->slots
[left_level
]);
5438 if (right_level
== 0)
5439 btrfs_item_key_to_cpu(right_path
->nodes
[right_level
],
5440 &right_key
, right_path
->slots
[right_level
]);
5442 btrfs_node_key_to_cpu(right_path
->nodes
[right_level
],
5443 &right_key
, right_path
->slots
[right_level
]);
5445 left_end_reached
= right_end_reached
= 0;
5446 advance_left
= advance_right
= 0;
5449 if (advance_left
&& !left_end_reached
) {
5450 ret
= tree_advance(fs_info
, left_path
, &left_level
,
5452 advance_left
!= ADVANCE_ONLY_NEXT
,
5455 left_end_reached
= ADVANCE
;
5460 if (advance_right
&& !right_end_reached
) {
5461 ret
= tree_advance(fs_info
, right_path
, &right_level
,
5463 advance_right
!= ADVANCE_ONLY_NEXT
,
5466 right_end_reached
= ADVANCE
;
5472 if (left_end_reached
&& right_end_reached
) {
5475 } else if (left_end_reached
) {
5476 if (right_level
== 0) {
5477 ret
= changed_cb(left_path
, right_path
,
5479 BTRFS_COMPARE_TREE_DELETED
,
5484 advance_right
= ADVANCE
;
5486 } else if (right_end_reached
) {
5487 if (left_level
== 0) {
5488 ret
= changed_cb(left_path
, right_path
,
5490 BTRFS_COMPARE_TREE_NEW
,
5495 advance_left
= ADVANCE
;
5499 if (left_level
== 0 && right_level
== 0) {
5500 cmp
= btrfs_comp_cpu_keys(&left_key
, &right_key
);
5502 ret
= changed_cb(left_path
, right_path
,
5504 BTRFS_COMPARE_TREE_NEW
,
5508 advance_left
= ADVANCE
;
5509 } else if (cmp
> 0) {
5510 ret
= changed_cb(left_path
, right_path
,
5512 BTRFS_COMPARE_TREE_DELETED
,
5516 advance_right
= ADVANCE
;
5518 enum btrfs_compare_tree_result result
;
5520 WARN_ON(!extent_buffer_uptodate(left_path
->nodes
[0]));
5521 ret
= tree_compare_item(left_path
, right_path
,
5524 result
= BTRFS_COMPARE_TREE_CHANGED
;
5526 result
= BTRFS_COMPARE_TREE_SAME
;
5527 ret
= changed_cb(left_path
, right_path
,
5528 &left_key
, result
, ctx
);
5531 advance_left
= ADVANCE
;
5532 advance_right
= ADVANCE
;
5534 } else if (left_level
== right_level
) {
5535 cmp
= btrfs_comp_cpu_keys(&left_key
, &right_key
);
5537 advance_left
= ADVANCE
;
5538 } else if (cmp
> 0) {
5539 advance_right
= ADVANCE
;
5541 left_blockptr
= btrfs_node_blockptr(
5542 left_path
->nodes
[left_level
],
5543 left_path
->slots
[left_level
]);
5544 right_blockptr
= btrfs_node_blockptr(
5545 right_path
->nodes
[right_level
],
5546 right_path
->slots
[right_level
]);
5547 left_gen
= btrfs_node_ptr_generation(
5548 left_path
->nodes
[left_level
],
5549 left_path
->slots
[left_level
]);
5550 right_gen
= btrfs_node_ptr_generation(
5551 right_path
->nodes
[right_level
],
5552 right_path
->slots
[right_level
]);
5553 if (left_blockptr
== right_blockptr
&&
5554 left_gen
== right_gen
) {
5556 * As we're on a shared block, don't
5557 * allow to go deeper.
5559 advance_left
= ADVANCE_ONLY_NEXT
;
5560 advance_right
= ADVANCE_ONLY_NEXT
;
5562 advance_left
= ADVANCE
;
5563 advance_right
= ADVANCE
;
5566 } else if (left_level
< right_level
) {
5567 advance_right
= ADVANCE
;
5569 advance_left
= ADVANCE
;
5574 btrfs_free_path(left_path
);
5575 btrfs_free_path(right_path
);
5581 * this is similar to btrfs_next_leaf, but does not try to preserve
5582 * and fixup the path. It looks for and returns the next key in the
5583 * tree based on the current path and the min_trans parameters.
5585 * 0 is returned if another key is found, < 0 if there are any errors
5586 * and 1 is returned if there are no higher keys in the tree
5588 * path->keep_locks should be set to 1 on the search made before
5589 * calling this function.
5591 int btrfs_find_next_key(struct btrfs_root
*root
, struct btrfs_path
*path
,
5592 struct btrfs_key
*key
, int level
, u64 min_trans
)
5595 struct extent_buffer
*c
;
5597 WARN_ON(!path
->keep_locks
);
5598 while (level
< BTRFS_MAX_LEVEL
) {
5599 if (!path
->nodes
[level
])
5602 slot
= path
->slots
[level
] + 1;
5603 c
= path
->nodes
[level
];
5605 if (slot
>= btrfs_header_nritems(c
)) {
5608 struct btrfs_key cur_key
;
5609 if (level
+ 1 >= BTRFS_MAX_LEVEL
||
5610 !path
->nodes
[level
+ 1])
5613 if (path
->locks
[level
+ 1]) {
5618 slot
= btrfs_header_nritems(c
) - 1;
5620 btrfs_item_key_to_cpu(c
, &cur_key
, slot
);
5622 btrfs_node_key_to_cpu(c
, &cur_key
, slot
);
5624 orig_lowest
= path
->lowest_level
;
5625 btrfs_release_path(path
);
5626 path
->lowest_level
= level
;
5627 ret
= btrfs_search_slot(NULL
, root
, &cur_key
, path
,
5629 path
->lowest_level
= orig_lowest
;
5633 c
= path
->nodes
[level
];
5634 slot
= path
->slots
[level
];
5641 btrfs_item_key_to_cpu(c
, key
, slot
);
5643 u64 gen
= btrfs_node_ptr_generation(c
, slot
);
5645 if (gen
< min_trans
) {
5649 btrfs_node_key_to_cpu(c
, key
, slot
);
5657 * search the tree again to find a leaf with greater keys
5658 * returns 0 if it found something or 1 if there are no greater leaves.
5659 * returns < 0 on io errors.
5661 int btrfs_next_leaf(struct btrfs_root
*root
, struct btrfs_path
*path
)
5663 return btrfs_next_old_leaf(root
, path
, 0);
5666 int btrfs_next_old_leaf(struct btrfs_root
*root
, struct btrfs_path
*path
,
5671 struct extent_buffer
*c
;
5672 struct extent_buffer
*next
;
5673 struct btrfs_key key
;
5676 int old_spinning
= path
->leave_spinning
;
5677 int next_rw_lock
= 0;
5679 nritems
= btrfs_header_nritems(path
->nodes
[0]);
5683 btrfs_item_key_to_cpu(path
->nodes
[0], &key
, nritems
- 1);
5688 btrfs_release_path(path
);
5690 path
->keep_locks
= 1;
5691 path
->leave_spinning
= 1;
5694 ret
= btrfs_search_old_slot(root
, &key
, path
, time_seq
);
5696 ret
= btrfs_search_slot(NULL
, root
, &key
, path
, 0, 0);
5697 path
->keep_locks
= 0;
5702 nritems
= btrfs_header_nritems(path
->nodes
[0]);
5704 * by releasing the path above we dropped all our locks. A balance
5705 * could have added more items next to the key that used to be
5706 * at the very end of the block. So, check again here and
5707 * advance the path if there are now more items available.
5709 if (nritems
> 0 && path
->slots
[0] < nritems
- 1) {
5716 * So the above check misses one case:
5717 * - after releasing the path above, someone has removed the item that
5718 * used to be at the very end of the block, and balance between leafs
5719 * gets another one with bigger key.offset to replace it.
5721 * This one should be returned as well, or we can get leaf corruption
5722 * later(esp. in __btrfs_drop_extents()).
5724 * And a bit more explanation about this check,
5725 * with ret > 0, the key isn't found, the path points to the slot
5726 * where it should be inserted, so the path->slots[0] item must be the
5729 if (nritems
> 0 && ret
> 0 && path
->slots
[0] == nritems
- 1) {
5734 while (level
< BTRFS_MAX_LEVEL
) {
5735 if (!path
->nodes
[level
]) {
5740 slot
= path
->slots
[level
] + 1;
5741 c
= path
->nodes
[level
];
5742 if (slot
>= btrfs_header_nritems(c
)) {
5744 if (level
== BTRFS_MAX_LEVEL
) {
5752 btrfs_tree_unlock_rw(next
, next_rw_lock
);
5753 free_extent_buffer(next
);
5757 next_rw_lock
= path
->locks
[level
];
5758 ret
= read_block_for_search(root
, path
, &next
, level
,
5764 btrfs_release_path(path
);
5768 if (!path
->skip_locking
) {
5769 ret
= btrfs_try_tree_read_lock(next
);
5770 if (!ret
&& time_seq
) {
5772 * If we don't get the lock, we may be racing
5773 * with push_leaf_left, holding that lock while
5774 * itself waiting for the leaf we've currently
5775 * locked. To solve this situation, we give up
5776 * on our lock and cycle.
5778 free_extent_buffer(next
);
5779 btrfs_release_path(path
);
5784 btrfs_set_path_blocking(path
);
5785 btrfs_tree_read_lock(next
);
5786 btrfs_clear_path_blocking(path
, next
,
5789 next_rw_lock
= BTRFS_READ_LOCK
;
5793 path
->slots
[level
] = slot
;
5796 c
= path
->nodes
[level
];
5797 if (path
->locks
[level
])
5798 btrfs_tree_unlock_rw(c
, path
->locks
[level
]);
5800 free_extent_buffer(c
);
5801 path
->nodes
[level
] = next
;
5802 path
->slots
[level
] = 0;
5803 if (!path
->skip_locking
)
5804 path
->locks
[level
] = next_rw_lock
;
5808 ret
= read_block_for_search(root
, path
, &next
, level
,
5814 btrfs_release_path(path
);
5818 if (!path
->skip_locking
) {
5819 ret
= btrfs_try_tree_read_lock(next
);
5821 btrfs_set_path_blocking(path
);
5822 btrfs_tree_read_lock(next
);
5823 btrfs_clear_path_blocking(path
, next
,
5826 next_rw_lock
= BTRFS_READ_LOCK
;
5831 unlock_up(path
, 0, 1, 0, NULL
);
5832 path
->leave_spinning
= old_spinning
;
5834 btrfs_set_path_blocking(path
);
5840 * this uses btrfs_prev_leaf to walk backwards in the tree, and keeps
5841 * searching until it gets past min_objectid or finds an item of 'type'
5843 * returns 0 if something is found, 1 if nothing was found and < 0 on error
5845 int btrfs_previous_item(struct btrfs_root
*root
,
5846 struct btrfs_path
*path
, u64 min_objectid
,
5849 struct btrfs_key found_key
;
5850 struct extent_buffer
*leaf
;
5855 if (path
->slots
[0] == 0) {
5856 btrfs_set_path_blocking(path
);
5857 ret
= btrfs_prev_leaf(root
, path
);
5863 leaf
= path
->nodes
[0];
5864 nritems
= btrfs_header_nritems(leaf
);
5867 if (path
->slots
[0] == nritems
)
5870 btrfs_item_key_to_cpu(leaf
, &found_key
, path
->slots
[0]);
5871 if (found_key
.objectid
< min_objectid
)
5873 if (found_key
.type
== type
)
5875 if (found_key
.objectid
== min_objectid
&&
5876 found_key
.type
< type
)
5883 * search in extent tree to find a previous Metadata/Data extent item with
5886 * returns 0 if something is found, 1 if nothing was found and < 0 on error
5888 int btrfs_previous_extent_item(struct btrfs_root
*root
,
5889 struct btrfs_path
*path
, u64 min_objectid
)
5891 struct btrfs_key found_key
;
5892 struct extent_buffer
*leaf
;
5897 if (path
->slots
[0] == 0) {
5898 btrfs_set_path_blocking(path
);
5899 ret
= btrfs_prev_leaf(root
, path
);
5905 leaf
= path
->nodes
[0];
5906 nritems
= btrfs_header_nritems(leaf
);
5909 if (path
->slots
[0] == nritems
)
5912 btrfs_item_key_to_cpu(leaf
, &found_key
, path
->slots
[0]);
5913 if (found_key
.objectid
< min_objectid
)
5915 if (found_key
.type
== BTRFS_EXTENT_ITEM_KEY
||
5916 found_key
.type
== BTRFS_METADATA_ITEM_KEY
)
5918 if (found_key
.objectid
== min_objectid
&&
5919 found_key
.type
< BTRFS_EXTENT_ITEM_KEY
)