1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (C) 2007,2008 Oracle. All rights reserved.
6 #include <linux/sched.h>
7 #include <linux/slab.h>
8 #include <linux/rbtree.h>
12 #include "transaction.h"
13 #include "print-tree.h"
18 static int split_node(struct btrfs_trans_handle
*trans
, struct btrfs_root
19 *root
, struct btrfs_path
*path
, int level
);
20 static int split_leaf(struct btrfs_trans_handle
*trans
, struct btrfs_root
*root
,
21 const struct btrfs_key
*ins_key
, struct btrfs_path
*path
,
22 int data_size
, int extend
);
23 static int push_node_left(struct btrfs_trans_handle
*trans
,
24 struct extent_buffer
*dst
,
25 struct extent_buffer
*src
, int empty
);
26 static int balance_node_right(struct btrfs_trans_handle
*trans
,
27 struct extent_buffer
*dst_buf
,
28 struct extent_buffer
*src_buf
);
29 static void del_ptr(struct btrfs_root
*root
, struct btrfs_path
*path
,
32 static const struct btrfs_csums
{
35 const char driver
[12];
37 [BTRFS_CSUM_TYPE_CRC32
] = { .size
= 4, .name
= "crc32c" },
38 [BTRFS_CSUM_TYPE_XXHASH
] = { .size
= 8, .name
= "xxhash64" },
39 [BTRFS_CSUM_TYPE_SHA256
] = { .size
= 32, .name
= "sha256" },
40 [BTRFS_CSUM_TYPE_BLAKE2
] = { .size
= 32, .name
= "blake2b",
41 .driver
= "blake2b-256" },
44 int btrfs_super_csum_size(const struct btrfs_super_block
*s
)
46 u16 t
= btrfs_super_csum_type(s
);
48 * csum type is validated at mount time
50 return btrfs_csums
[t
].size
;
53 const char *btrfs_super_csum_name(u16 csum_type
)
55 /* csum type is validated at mount time */
56 return btrfs_csums
[csum_type
].name
;
60 * Return driver name if defined, otherwise the name that's also a valid driver
63 const char *btrfs_super_csum_driver(u16 csum_type
)
65 /* csum type is validated at mount time */
66 return btrfs_csums
[csum_type
].driver
[0] ?
67 btrfs_csums
[csum_type
].driver
:
68 btrfs_csums
[csum_type
].name
;
71 size_t __const
btrfs_get_num_csums(void)
73 return ARRAY_SIZE(btrfs_csums
);
76 struct btrfs_path
*btrfs_alloc_path(void)
78 return kmem_cache_zalloc(btrfs_path_cachep
, GFP_NOFS
);
81 /* this also releases the path */
82 void btrfs_free_path(struct btrfs_path
*p
)
86 btrfs_release_path(p
);
87 kmem_cache_free(btrfs_path_cachep
, p
);
91 * path release drops references on the extent buffers in the path
92 * and it drops any locks held by this path
94 * It is safe to call this on paths that no locks or extent buffers held.
96 noinline
void btrfs_release_path(struct btrfs_path
*p
)
100 for (i
= 0; i
< BTRFS_MAX_LEVEL
; i
++) {
105 btrfs_tree_unlock_rw(p
->nodes
[i
], p
->locks
[i
]);
108 free_extent_buffer(p
->nodes
[i
]);
114 * safely gets a reference on the root node of a tree. A lock
115 * is not taken, so a concurrent writer may put a different node
116 * at the root of the tree. See btrfs_lock_root_node for the
119 * The extent buffer returned by this has a reference taken, so
120 * it won't disappear. It may stop being the root of the tree
121 * at any time because there are no locks held.
123 struct extent_buffer
*btrfs_root_node(struct btrfs_root
*root
)
125 struct extent_buffer
*eb
;
129 eb
= rcu_dereference(root
->node
);
132 * RCU really hurts here, we could free up the root node because
133 * it was COWed but we may not get the new root node yet so do
134 * the inc_not_zero dance and if it doesn't work then
135 * synchronize_rcu and try again.
137 if (atomic_inc_not_zero(&eb
->refs
)) {
147 /* cowonly root (everything not a reference counted cow subvolume), just get
148 * put onto a simple dirty list. transaction.c walks this to make sure they
149 * get properly updated on disk.
151 static void add_root_to_dirty_list(struct btrfs_root
*root
)
153 struct btrfs_fs_info
*fs_info
= root
->fs_info
;
155 if (test_bit(BTRFS_ROOT_DIRTY
, &root
->state
) ||
156 !test_bit(BTRFS_ROOT_TRACK_DIRTY
, &root
->state
))
159 spin_lock(&fs_info
->trans_lock
);
160 if (!test_and_set_bit(BTRFS_ROOT_DIRTY
, &root
->state
)) {
161 /* Want the extent tree to be the last on the list */
162 if (root
->root_key
.objectid
== BTRFS_EXTENT_TREE_OBJECTID
)
163 list_move_tail(&root
->dirty_list
,
164 &fs_info
->dirty_cowonly_roots
);
166 list_move(&root
->dirty_list
,
167 &fs_info
->dirty_cowonly_roots
);
169 spin_unlock(&fs_info
->trans_lock
);
173 * used by snapshot creation to make a copy of a root for a tree with
174 * a given objectid. The buffer with the new root node is returned in
175 * cow_ret, and this func returns zero on success or a negative error code.
177 int btrfs_copy_root(struct btrfs_trans_handle
*trans
,
178 struct btrfs_root
*root
,
179 struct extent_buffer
*buf
,
180 struct extent_buffer
**cow_ret
, u64 new_root_objectid
)
182 struct btrfs_fs_info
*fs_info
= root
->fs_info
;
183 struct extent_buffer
*cow
;
186 struct btrfs_disk_key disk_key
;
188 WARN_ON(test_bit(BTRFS_ROOT_REF_COWS
, &root
->state
) &&
189 trans
->transid
!= fs_info
->running_transaction
->transid
);
190 WARN_ON(test_bit(BTRFS_ROOT_REF_COWS
, &root
->state
) &&
191 trans
->transid
!= root
->last_trans
);
193 level
= btrfs_header_level(buf
);
195 btrfs_item_key(buf
, &disk_key
, 0);
197 btrfs_node_key(buf
, &disk_key
, 0);
199 cow
= btrfs_alloc_tree_block(trans
, root
, 0, new_root_objectid
,
200 &disk_key
, level
, buf
->start
, 0);
204 copy_extent_buffer_full(cow
, buf
);
205 btrfs_set_header_bytenr(cow
, cow
->start
);
206 btrfs_set_header_generation(cow
, trans
->transid
);
207 btrfs_set_header_backref_rev(cow
, BTRFS_MIXED_BACKREF_REV
);
208 btrfs_clear_header_flag(cow
, BTRFS_HEADER_FLAG_WRITTEN
|
209 BTRFS_HEADER_FLAG_RELOC
);
210 if (new_root_objectid
== BTRFS_TREE_RELOC_OBJECTID
)
211 btrfs_set_header_flag(cow
, BTRFS_HEADER_FLAG_RELOC
);
213 btrfs_set_header_owner(cow
, new_root_objectid
);
215 write_extent_buffer_fsid(cow
, fs_info
->fs_devices
->metadata_uuid
);
217 WARN_ON(btrfs_header_generation(buf
) > trans
->transid
);
218 if (new_root_objectid
== BTRFS_TREE_RELOC_OBJECTID
)
219 ret
= btrfs_inc_ref(trans
, root
, cow
, 1);
221 ret
= btrfs_inc_ref(trans
, root
, cow
, 0);
226 btrfs_mark_buffer_dirty(cow
);
235 MOD_LOG_KEY_REMOVE_WHILE_FREEING
,
236 MOD_LOG_KEY_REMOVE_WHILE_MOVING
,
238 MOD_LOG_ROOT_REPLACE
,
241 struct tree_mod_root
{
246 struct tree_mod_elem
{
252 /* this is used for MOD_LOG_KEY_* and MOD_LOG_MOVE_KEYS operations */
255 /* this is used for MOD_LOG_KEY* and MOD_LOG_ROOT_REPLACE */
258 /* those are used for op == MOD_LOG_KEY_{REPLACE,REMOVE} */
259 struct btrfs_disk_key key
;
262 /* this is used for op == MOD_LOG_MOVE_KEYS */
268 /* this is used for op == MOD_LOG_ROOT_REPLACE */
269 struct tree_mod_root old_root
;
273 * Pull a new tree mod seq number for our operation.
275 static inline u64
btrfs_inc_tree_mod_seq(struct btrfs_fs_info
*fs_info
)
277 return atomic64_inc_return(&fs_info
->tree_mod_seq
);
281 * This adds a new blocker to the tree mod log's blocker list if the @elem
282 * passed does not already have a sequence number set. So when a caller expects
283 * to record tree modifications, it should ensure to set elem->seq to zero
284 * before calling btrfs_get_tree_mod_seq.
285 * Returns a fresh, unused tree log modification sequence number, even if no new
288 u64
btrfs_get_tree_mod_seq(struct btrfs_fs_info
*fs_info
,
289 struct seq_list
*elem
)
291 write_lock(&fs_info
->tree_mod_log_lock
);
293 elem
->seq
= btrfs_inc_tree_mod_seq(fs_info
);
294 list_add_tail(&elem
->list
, &fs_info
->tree_mod_seq_list
);
296 write_unlock(&fs_info
->tree_mod_log_lock
);
301 void btrfs_put_tree_mod_seq(struct btrfs_fs_info
*fs_info
,
302 struct seq_list
*elem
)
304 struct rb_root
*tm_root
;
305 struct rb_node
*node
;
306 struct rb_node
*next
;
307 struct tree_mod_elem
*tm
;
308 u64 min_seq
= (u64
)-1;
309 u64 seq_putting
= elem
->seq
;
314 write_lock(&fs_info
->tree_mod_log_lock
);
315 list_del(&elem
->list
);
318 if (!list_empty(&fs_info
->tree_mod_seq_list
)) {
319 struct seq_list
*first
;
321 first
= list_first_entry(&fs_info
->tree_mod_seq_list
,
322 struct seq_list
, list
);
323 if (seq_putting
> first
->seq
) {
325 * Blocker with lower sequence number exists, we
326 * cannot remove anything from the log.
328 write_unlock(&fs_info
->tree_mod_log_lock
);
331 min_seq
= first
->seq
;
335 * anything that's lower than the lowest existing (read: blocked)
336 * sequence number can be removed from the tree.
338 tm_root
= &fs_info
->tree_mod_log
;
339 for (node
= rb_first(tm_root
); node
; node
= next
) {
340 next
= rb_next(node
);
341 tm
= rb_entry(node
, struct tree_mod_elem
, node
);
342 if (tm
->seq
>= min_seq
)
344 rb_erase(node
, tm_root
);
347 write_unlock(&fs_info
->tree_mod_log_lock
);
351 * key order of the log:
352 * node/leaf start address -> sequence
354 * The 'start address' is the logical address of the *new* root node
355 * for root replace operations, or the logical address of the affected
356 * block for all other operations.
359 __tree_mod_log_insert(struct btrfs_fs_info
*fs_info
, struct tree_mod_elem
*tm
)
361 struct rb_root
*tm_root
;
362 struct rb_node
**new;
363 struct rb_node
*parent
= NULL
;
364 struct tree_mod_elem
*cur
;
366 lockdep_assert_held_write(&fs_info
->tree_mod_log_lock
);
368 tm
->seq
= btrfs_inc_tree_mod_seq(fs_info
);
370 tm_root
= &fs_info
->tree_mod_log
;
371 new = &tm_root
->rb_node
;
373 cur
= rb_entry(*new, struct tree_mod_elem
, node
);
375 if (cur
->logical
< tm
->logical
)
376 new = &((*new)->rb_left
);
377 else if (cur
->logical
> tm
->logical
)
378 new = &((*new)->rb_right
);
379 else if (cur
->seq
< tm
->seq
)
380 new = &((*new)->rb_left
);
381 else if (cur
->seq
> tm
->seq
)
382 new = &((*new)->rb_right
);
387 rb_link_node(&tm
->node
, parent
, new);
388 rb_insert_color(&tm
->node
, tm_root
);
393 * Determines if logging can be omitted. Returns 1 if it can. Otherwise, it
394 * returns zero with the tree_mod_log_lock acquired. The caller must hold
395 * this until all tree mod log insertions are recorded in the rb tree and then
396 * write unlock fs_info::tree_mod_log_lock.
398 static inline int tree_mod_dont_log(struct btrfs_fs_info
*fs_info
,
399 struct extent_buffer
*eb
) {
401 if (list_empty(&(fs_info
)->tree_mod_seq_list
))
403 if (eb
&& btrfs_header_level(eb
) == 0)
406 write_lock(&fs_info
->tree_mod_log_lock
);
407 if (list_empty(&(fs_info
)->tree_mod_seq_list
)) {
408 write_unlock(&fs_info
->tree_mod_log_lock
);
415 /* Similar to tree_mod_dont_log, but doesn't acquire any locks. */
416 static inline int tree_mod_need_log(const struct btrfs_fs_info
*fs_info
,
417 struct extent_buffer
*eb
)
420 if (list_empty(&(fs_info
)->tree_mod_seq_list
))
422 if (eb
&& btrfs_header_level(eb
) == 0)
428 static struct tree_mod_elem
*
429 alloc_tree_mod_elem(struct extent_buffer
*eb
, int slot
,
430 enum mod_log_op op
, gfp_t flags
)
432 struct tree_mod_elem
*tm
;
434 tm
= kzalloc(sizeof(*tm
), flags
);
438 tm
->logical
= eb
->start
;
439 if (op
!= MOD_LOG_KEY_ADD
) {
440 btrfs_node_key(eb
, &tm
->key
, slot
);
441 tm
->blockptr
= btrfs_node_blockptr(eb
, slot
);
445 tm
->generation
= btrfs_node_ptr_generation(eb
, slot
);
446 RB_CLEAR_NODE(&tm
->node
);
451 static noinline
int tree_mod_log_insert_key(struct extent_buffer
*eb
, int slot
,
452 enum mod_log_op op
, gfp_t flags
)
454 struct tree_mod_elem
*tm
;
457 if (!tree_mod_need_log(eb
->fs_info
, eb
))
460 tm
= alloc_tree_mod_elem(eb
, slot
, op
, flags
);
464 if (tree_mod_dont_log(eb
->fs_info
, eb
)) {
469 ret
= __tree_mod_log_insert(eb
->fs_info
, tm
);
470 write_unlock(&eb
->fs_info
->tree_mod_log_lock
);
477 static noinline
int tree_mod_log_insert_move(struct extent_buffer
*eb
,
478 int dst_slot
, int src_slot
, int nr_items
)
480 struct tree_mod_elem
*tm
= NULL
;
481 struct tree_mod_elem
**tm_list
= NULL
;
486 if (!tree_mod_need_log(eb
->fs_info
, eb
))
489 tm_list
= kcalloc(nr_items
, sizeof(struct tree_mod_elem
*), GFP_NOFS
);
493 tm
= kzalloc(sizeof(*tm
), GFP_NOFS
);
499 tm
->logical
= eb
->start
;
501 tm
->move
.dst_slot
= dst_slot
;
502 tm
->move
.nr_items
= nr_items
;
503 tm
->op
= MOD_LOG_MOVE_KEYS
;
505 for (i
= 0; i
+ dst_slot
< src_slot
&& i
< nr_items
; i
++) {
506 tm_list
[i
] = alloc_tree_mod_elem(eb
, i
+ dst_slot
,
507 MOD_LOG_KEY_REMOVE_WHILE_MOVING
, GFP_NOFS
);
514 if (tree_mod_dont_log(eb
->fs_info
, eb
))
519 * When we override something during the move, we log these removals.
520 * This can only happen when we move towards the beginning of the
521 * buffer, i.e. dst_slot < src_slot.
523 for (i
= 0; i
+ dst_slot
< src_slot
&& i
< nr_items
; i
++) {
524 ret
= __tree_mod_log_insert(eb
->fs_info
, tm_list
[i
]);
529 ret
= __tree_mod_log_insert(eb
->fs_info
, tm
);
532 write_unlock(&eb
->fs_info
->tree_mod_log_lock
);
537 for (i
= 0; i
< nr_items
; i
++) {
538 if (tm_list
[i
] && !RB_EMPTY_NODE(&tm_list
[i
]->node
))
539 rb_erase(&tm_list
[i
]->node
, &eb
->fs_info
->tree_mod_log
);
543 write_unlock(&eb
->fs_info
->tree_mod_log_lock
);
551 __tree_mod_log_free_eb(struct btrfs_fs_info
*fs_info
,
552 struct tree_mod_elem
**tm_list
,
558 for (i
= nritems
- 1; i
>= 0; i
--) {
559 ret
= __tree_mod_log_insert(fs_info
, tm_list
[i
]);
561 for (j
= nritems
- 1; j
> i
; j
--)
562 rb_erase(&tm_list
[j
]->node
,
563 &fs_info
->tree_mod_log
);
571 static noinline
int tree_mod_log_insert_root(struct extent_buffer
*old_root
,
572 struct extent_buffer
*new_root
, int log_removal
)
574 struct btrfs_fs_info
*fs_info
= old_root
->fs_info
;
575 struct tree_mod_elem
*tm
= NULL
;
576 struct tree_mod_elem
**tm_list
= NULL
;
581 if (!tree_mod_need_log(fs_info
, NULL
))
584 if (log_removal
&& btrfs_header_level(old_root
) > 0) {
585 nritems
= btrfs_header_nritems(old_root
);
586 tm_list
= kcalloc(nritems
, sizeof(struct tree_mod_elem
*),
592 for (i
= 0; i
< nritems
; i
++) {
593 tm_list
[i
] = alloc_tree_mod_elem(old_root
, i
,
594 MOD_LOG_KEY_REMOVE_WHILE_FREEING
, GFP_NOFS
);
602 tm
= kzalloc(sizeof(*tm
), GFP_NOFS
);
608 tm
->logical
= new_root
->start
;
609 tm
->old_root
.logical
= old_root
->start
;
610 tm
->old_root
.level
= btrfs_header_level(old_root
);
611 tm
->generation
= btrfs_header_generation(old_root
);
612 tm
->op
= MOD_LOG_ROOT_REPLACE
;
614 if (tree_mod_dont_log(fs_info
, NULL
))
618 ret
= __tree_mod_log_free_eb(fs_info
, tm_list
, nritems
);
620 ret
= __tree_mod_log_insert(fs_info
, tm
);
622 write_unlock(&fs_info
->tree_mod_log_lock
);
631 for (i
= 0; i
< nritems
; i
++)
640 static struct tree_mod_elem
*
641 __tree_mod_log_search(struct btrfs_fs_info
*fs_info
, u64 start
, u64 min_seq
,
644 struct rb_root
*tm_root
;
645 struct rb_node
*node
;
646 struct tree_mod_elem
*cur
= NULL
;
647 struct tree_mod_elem
*found
= NULL
;
649 read_lock(&fs_info
->tree_mod_log_lock
);
650 tm_root
= &fs_info
->tree_mod_log
;
651 node
= tm_root
->rb_node
;
653 cur
= rb_entry(node
, struct tree_mod_elem
, node
);
654 if (cur
->logical
< start
) {
655 node
= node
->rb_left
;
656 } else if (cur
->logical
> start
) {
657 node
= node
->rb_right
;
658 } else if (cur
->seq
< min_seq
) {
659 node
= node
->rb_left
;
660 } else if (!smallest
) {
661 /* we want the node with the highest seq */
663 BUG_ON(found
->seq
> cur
->seq
);
665 node
= node
->rb_left
;
666 } else if (cur
->seq
> min_seq
) {
667 /* we want the node with the smallest seq */
669 BUG_ON(found
->seq
< cur
->seq
);
671 node
= node
->rb_right
;
677 read_unlock(&fs_info
->tree_mod_log_lock
);
683 * this returns the element from the log with the smallest time sequence
684 * value that's in the log (the oldest log item). any element with a time
685 * sequence lower than min_seq will be ignored.
687 static struct tree_mod_elem
*
688 tree_mod_log_search_oldest(struct btrfs_fs_info
*fs_info
, u64 start
,
691 return __tree_mod_log_search(fs_info
, start
, min_seq
, 1);
695 * this returns the element from the log with the largest time sequence
696 * value that's in the log (the most recent log item). any element with
697 * a time sequence lower than min_seq will be ignored.
699 static struct tree_mod_elem
*
700 tree_mod_log_search(struct btrfs_fs_info
*fs_info
, u64 start
, u64 min_seq
)
702 return __tree_mod_log_search(fs_info
, start
, min_seq
, 0);
705 static noinline
int tree_mod_log_eb_copy(struct extent_buffer
*dst
,
706 struct extent_buffer
*src
, unsigned long dst_offset
,
707 unsigned long src_offset
, int nr_items
)
709 struct btrfs_fs_info
*fs_info
= dst
->fs_info
;
711 struct tree_mod_elem
**tm_list
= NULL
;
712 struct tree_mod_elem
**tm_list_add
, **tm_list_rem
;
716 if (!tree_mod_need_log(fs_info
, NULL
))
719 if (btrfs_header_level(dst
) == 0 && btrfs_header_level(src
) == 0)
722 tm_list
= kcalloc(nr_items
* 2, sizeof(struct tree_mod_elem
*),
727 tm_list_add
= tm_list
;
728 tm_list_rem
= tm_list
+ nr_items
;
729 for (i
= 0; i
< nr_items
; i
++) {
730 tm_list_rem
[i
] = alloc_tree_mod_elem(src
, i
+ src_offset
,
731 MOD_LOG_KEY_REMOVE
, GFP_NOFS
);
732 if (!tm_list_rem
[i
]) {
737 tm_list_add
[i
] = alloc_tree_mod_elem(dst
, i
+ dst_offset
,
738 MOD_LOG_KEY_ADD
, GFP_NOFS
);
739 if (!tm_list_add
[i
]) {
745 if (tree_mod_dont_log(fs_info
, NULL
))
749 for (i
= 0; i
< nr_items
; i
++) {
750 ret
= __tree_mod_log_insert(fs_info
, tm_list_rem
[i
]);
753 ret
= __tree_mod_log_insert(fs_info
, tm_list_add
[i
]);
758 write_unlock(&fs_info
->tree_mod_log_lock
);
764 for (i
= 0; i
< nr_items
* 2; i
++) {
765 if (tm_list
[i
] && !RB_EMPTY_NODE(&tm_list
[i
]->node
))
766 rb_erase(&tm_list
[i
]->node
, &fs_info
->tree_mod_log
);
770 write_unlock(&fs_info
->tree_mod_log_lock
);
776 static noinline
int tree_mod_log_free_eb(struct extent_buffer
*eb
)
778 struct tree_mod_elem
**tm_list
= NULL
;
783 if (btrfs_header_level(eb
) == 0)
786 if (!tree_mod_need_log(eb
->fs_info
, NULL
))
789 nritems
= btrfs_header_nritems(eb
);
790 tm_list
= kcalloc(nritems
, sizeof(struct tree_mod_elem
*), GFP_NOFS
);
794 for (i
= 0; i
< nritems
; i
++) {
795 tm_list
[i
] = alloc_tree_mod_elem(eb
, i
,
796 MOD_LOG_KEY_REMOVE_WHILE_FREEING
, GFP_NOFS
);
803 if (tree_mod_dont_log(eb
->fs_info
, eb
))
806 ret
= __tree_mod_log_free_eb(eb
->fs_info
, tm_list
, nritems
);
807 write_unlock(&eb
->fs_info
->tree_mod_log_lock
);
815 for (i
= 0; i
< nritems
; i
++)
823 * check if the tree block can be shared by multiple trees
825 int btrfs_block_can_be_shared(struct btrfs_root
*root
,
826 struct extent_buffer
*buf
)
829 * Tree blocks not in reference counted trees and tree roots
830 * are never shared. If a block was allocated after the last
831 * snapshot and the block was not allocated by tree relocation,
832 * we know the block is not shared.
834 if (test_bit(BTRFS_ROOT_REF_COWS
, &root
->state
) &&
835 buf
!= root
->node
&& buf
!= root
->commit_root
&&
836 (btrfs_header_generation(buf
) <=
837 btrfs_root_last_snapshot(&root
->root_item
) ||
838 btrfs_header_flag(buf
, BTRFS_HEADER_FLAG_RELOC
)))
844 static noinline
int update_ref_for_cow(struct btrfs_trans_handle
*trans
,
845 struct btrfs_root
*root
,
846 struct extent_buffer
*buf
,
847 struct extent_buffer
*cow
,
850 struct btrfs_fs_info
*fs_info
= root
->fs_info
;
858 * Backrefs update rules:
860 * Always use full backrefs for extent pointers in tree block
861 * allocated by tree relocation.
863 * If a shared tree block is no longer referenced by its owner
864 * tree (btrfs_header_owner(buf) == root->root_key.objectid),
865 * use full backrefs for extent pointers in tree block.
867 * If a tree block is been relocating
868 * (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID),
869 * use full backrefs for extent pointers in tree block.
870 * The reason for this is some operations (such as drop tree)
871 * are only allowed for blocks use full backrefs.
874 if (btrfs_block_can_be_shared(root
, buf
)) {
875 ret
= btrfs_lookup_extent_info(trans
, fs_info
, buf
->start
,
876 btrfs_header_level(buf
), 1,
882 btrfs_handle_fs_error(fs_info
, ret
, NULL
);
887 if (root
->root_key
.objectid
== BTRFS_TREE_RELOC_OBJECTID
||
888 btrfs_header_backref_rev(buf
) < BTRFS_MIXED_BACKREF_REV
)
889 flags
= BTRFS_BLOCK_FLAG_FULL_BACKREF
;
894 owner
= btrfs_header_owner(buf
);
895 BUG_ON(owner
== BTRFS_TREE_RELOC_OBJECTID
&&
896 !(flags
& BTRFS_BLOCK_FLAG_FULL_BACKREF
));
899 if ((owner
== root
->root_key
.objectid
||
900 root
->root_key
.objectid
== BTRFS_TREE_RELOC_OBJECTID
) &&
901 !(flags
& BTRFS_BLOCK_FLAG_FULL_BACKREF
)) {
902 ret
= btrfs_inc_ref(trans
, root
, buf
, 1);
906 if (root
->root_key
.objectid
==
907 BTRFS_TREE_RELOC_OBJECTID
) {
908 ret
= btrfs_dec_ref(trans
, root
, buf
, 0);
911 ret
= btrfs_inc_ref(trans
, root
, cow
, 1);
915 new_flags
|= BTRFS_BLOCK_FLAG_FULL_BACKREF
;
918 if (root
->root_key
.objectid
==
919 BTRFS_TREE_RELOC_OBJECTID
)
920 ret
= btrfs_inc_ref(trans
, root
, cow
, 1);
922 ret
= btrfs_inc_ref(trans
, root
, cow
, 0);
926 if (new_flags
!= 0) {
927 int level
= btrfs_header_level(buf
);
929 ret
= btrfs_set_disk_extent_flags(trans
, buf
,
930 new_flags
, level
, 0);
935 if (flags
& BTRFS_BLOCK_FLAG_FULL_BACKREF
) {
936 if (root
->root_key
.objectid
==
937 BTRFS_TREE_RELOC_OBJECTID
)
938 ret
= btrfs_inc_ref(trans
, root
, cow
, 1);
940 ret
= btrfs_inc_ref(trans
, root
, cow
, 0);
943 ret
= btrfs_dec_ref(trans
, root
, buf
, 1);
947 btrfs_clean_tree_block(buf
);
953 static struct extent_buffer
*alloc_tree_block_no_bg_flush(
954 struct btrfs_trans_handle
*trans
,
955 struct btrfs_root
*root
,
957 const struct btrfs_disk_key
*disk_key
,
962 struct btrfs_fs_info
*fs_info
= root
->fs_info
;
963 struct extent_buffer
*ret
;
966 * If we are COWing a node/leaf from the extent, chunk, device or free
967 * space trees, make sure that we do not finish block group creation of
968 * pending block groups. We do this to avoid a deadlock.
969 * COWing can result in allocation of a new chunk, and flushing pending
970 * block groups (btrfs_create_pending_block_groups()) can be triggered
971 * when finishing allocation of a new chunk. Creation of a pending block
972 * group modifies the extent, chunk, device and free space trees,
973 * therefore we could deadlock with ourselves since we are holding a
974 * lock on an extent buffer that btrfs_create_pending_block_groups() may
976 * For similar reasons, we also need to delay flushing pending block
977 * groups when splitting a leaf or node, from one of those trees, since
978 * we are holding a write lock on it and its parent or when inserting a
979 * new root node for one of those trees.
981 if (root
== fs_info
->extent_root
||
982 root
== fs_info
->chunk_root
||
983 root
== fs_info
->dev_root
||
984 root
== fs_info
->free_space_root
)
985 trans
->can_flush_pending_bgs
= false;
987 ret
= btrfs_alloc_tree_block(trans
, root
, parent_start
,
988 root
->root_key
.objectid
, disk_key
, level
,
990 trans
->can_flush_pending_bgs
= true;
996 * does the dirty work in cow of a single block. The parent block (if
997 * supplied) is updated to point to the new cow copy. The new buffer is marked
998 * dirty and returned locked. If you modify the block it needs to be marked
1001 * search_start -- an allocation hint for the new block
1003 * empty_size -- a hint that you plan on doing more cow. This is the size in
1004 * bytes the allocator should try to find free next to the block it returns.
1005 * This is just a hint and may be ignored by the allocator.
1007 static noinline
int __btrfs_cow_block(struct btrfs_trans_handle
*trans
,
1008 struct btrfs_root
*root
,
1009 struct extent_buffer
*buf
,
1010 struct extent_buffer
*parent
, int parent_slot
,
1011 struct extent_buffer
**cow_ret
,
1012 u64 search_start
, u64 empty_size
)
1014 struct btrfs_fs_info
*fs_info
= root
->fs_info
;
1015 struct btrfs_disk_key disk_key
;
1016 struct extent_buffer
*cow
;
1019 int unlock_orig
= 0;
1020 u64 parent_start
= 0;
1022 if (*cow_ret
== buf
)
1025 btrfs_assert_tree_locked(buf
);
1027 WARN_ON(test_bit(BTRFS_ROOT_REF_COWS
, &root
->state
) &&
1028 trans
->transid
!= fs_info
->running_transaction
->transid
);
1029 WARN_ON(test_bit(BTRFS_ROOT_REF_COWS
, &root
->state
) &&
1030 trans
->transid
!= root
->last_trans
);
1032 level
= btrfs_header_level(buf
);
1035 btrfs_item_key(buf
, &disk_key
, 0);
1037 btrfs_node_key(buf
, &disk_key
, 0);
1039 if ((root
->root_key
.objectid
== BTRFS_TREE_RELOC_OBJECTID
) && parent
)
1040 parent_start
= parent
->start
;
1042 cow
= alloc_tree_block_no_bg_flush(trans
, root
, parent_start
, &disk_key
,
1043 level
, search_start
, empty_size
);
1045 return PTR_ERR(cow
);
1047 /* cow is set to blocking by btrfs_init_new_buffer */
1049 copy_extent_buffer_full(cow
, buf
);
1050 btrfs_set_header_bytenr(cow
, cow
->start
);
1051 btrfs_set_header_generation(cow
, trans
->transid
);
1052 btrfs_set_header_backref_rev(cow
, BTRFS_MIXED_BACKREF_REV
);
1053 btrfs_clear_header_flag(cow
, BTRFS_HEADER_FLAG_WRITTEN
|
1054 BTRFS_HEADER_FLAG_RELOC
);
1055 if (root
->root_key
.objectid
== BTRFS_TREE_RELOC_OBJECTID
)
1056 btrfs_set_header_flag(cow
, BTRFS_HEADER_FLAG_RELOC
);
1058 btrfs_set_header_owner(cow
, root
->root_key
.objectid
);
1060 write_extent_buffer_fsid(cow
, fs_info
->fs_devices
->metadata_uuid
);
1062 ret
= update_ref_for_cow(trans
, root
, buf
, cow
, &last_ref
);
1064 btrfs_abort_transaction(trans
, ret
);
1068 if (test_bit(BTRFS_ROOT_REF_COWS
, &root
->state
)) {
1069 ret
= btrfs_reloc_cow_block(trans
, root
, buf
, cow
);
1071 btrfs_abort_transaction(trans
, ret
);
1076 if (buf
== root
->node
) {
1077 WARN_ON(parent
&& parent
!= buf
);
1078 if (root
->root_key
.objectid
== BTRFS_TREE_RELOC_OBJECTID
||
1079 btrfs_header_backref_rev(buf
) < BTRFS_MIXED_BACKREF_REV
)
1080 parent_start
= buf
->start
;
1082 atomic_inc(&cow
->refs
);
1083 ret
= tree_mod_log_insert_root(root
->node
, cow
, 1);
1085 rcu_assign_pointer(root
->node
, cow
);
1087 btrfs_free_tree_block(trans
, root
, buf
, parent_start
,
1089 free_extent_buffer(buf
);
1090 add_root_to_dirty_list(root
);
1092 WARN_ON(trans
->transid
!= btrfs_header_generation(parent
));
1093 tree_mod_log_insert_key(parent
, parent_slot
,
1094 MOD_LOG_KEY_REPLACE
, GFP_NOFS
);
1095 btrfs_set_node_blockptr(parent
, parent_slot
,
1097 btrfs_set_node_ptr_generation(parent
, parent_slot
,
1099 btrfs_mark_buffer_dirty(parent
);
1101 ret
= tree_mod_log_free_eb(buf
);
1103 btrfs_abort_transaction(trans
, ret
);
1107 btrfs_free_tree_block(trans
, root
, buf
, parent_start
,
1111 btrfs_tree_unlock(buf
);
1112 free_extent_buffer_stale(buf
);
1113 btrfs_mark_buffer_dirty(cow
);
1119 * returns the logical address of the oldest predecessor of the given root.
1120 * entries older than time_seq are ignored.
1122 static struct tree_mod_elem
*__tree_mod_log_oldest_root(
1123 struct extent_buffer
*eb_root
, u64 time_seq
)
1125 struct tree_mod_elem
*tm
;
1126 struct tree_mod_elem
*found
= NULL
;
1127 u64 root_logical
= eb_root
->start
;
1134 * the very last operation that's logged for a root is the
1135 * replacement operation (if it is replaced at all). this has
1136 * the logical address of the *new* root, making it the very
1137 * first operation that's logged for this root.
1140 tm
= tree_mod_log_search_oldest(eb_root
->fs_info
, root_logical
,
1145 * if there are no tree operation for the oldest root, we simply
1146 * return it. this should only happen if that (old) root is at
1153 * if there's an operation that's not a root replacement, we
1154 * found the oldest version of our root. normally, we'll find a
1155 * MOD_LOG_KEY_REMOVE_WHILE_FREEING operation here.
1157 if (tm
->op
!= MOD_LOG_ROOT_REPLACE
)
1161 root_logical
= tm
->old_root
.logical
;
1165 /* if there's no old root to return, return what we found instead */
1173 * tm is a pointer to the first operation to rewind within eb. then, all
1174 * previous operations will be rewound (until we reach something older than
1178 __tree_mod_log_rewind(struct btrfs_fs_info
*fs_info
, struct extent_buffer
*eb
,
1179 u64 time_seq
, struct tree_mod_elem
*first_tm
)
1182 struct rb_node
*next
;
1183 struct tree_mod_elem
*tm
= first_tm
;
1184 unsigned long o_dst
;
1185 unsigned long o_src
;
1186 unsigned long p_size
= sizeof(struct btrfs_key_ptr
);
1188 n
= btrfs_header_nritems(eb
);
1189 read_lock(&fs_info
->tree_mod_log_lock
);
1190 while (tm
&& tm
->seq
>= time_seq
) {
1192 * all the operations are recorded with the operator used for
1193 * the modification. as we're going backwards, we do the
1194 * opposite of each operation here.
1197 case MOD_LOG_KEY_REMOVE_WHILE_FREEING
:
1198 BUG_ON(tm
->slot
< n
);
1200 case MOD_LOG_KEY_REMOVE_WHILE_MOVING
:
1201 case MOD_LOG_KEY_REMOVE
:
1202 btrfs_set_node_key(eb
, &tm
->key
, tm
->slot
);
1203 btrfs_set_node_blockptr(eb
, tm
->slot
, tm
->blockptr
);
1204 btrfs_set_node_ptr_generation(eb
, tm
->slot
,
1208 case MOD_LOG_KEY_REPLACE
:
1209 BUG_ON(tm
->slot
>= n
);
1210 btrfs_set_node_key(eb
, &tm
->key
, tm
->slot
);
1211 btrfs_set_node_blockptr(eb
, tm
->slot
, tm
->blockptr
);
1212 btrfs_set_node_ptr_generation(eb
, tm
->slot
,
1215 case MOD_LOG_KEY_ADD
:
1216 /* if a move operation is needed it's in the log */
1219 case MOD_LOG_MOVE_KEYS
:
1220 o_dst
= btrfs_node_key_ptr_offset(tm
->slot
);
1221 o_src
= btrfs_node_key_ptr_offset(tm
->move
.dst_slot
);
1222 memmove_extent_buffer(eb
, o_dst
, o_src
,
1223 tm
->move
.nr_items
* p_size
);
1225 case MOD_LOG_ROOT_REPLACE
:
1227 * this operation is special. for roots, this must be
1228 * handled explicitly before rewinding.
1229 * for non-roots, this operation may exist if the node
1230 * was a root: root A -> child B; then A gets empty and
1231 * B is promoted to the new root. in the mod log, we'll
1232 * have a root-replace operation for B, a tree block
1233 * that is no root. we simply ignore that operation.
1237 next
= rb_next(&tm
->node
);
1240 tm
= rb_entry(next
, struct tree_mod_elem
, node
);
1241 if (tm
->logical
!= first_tm
->logical
)
1244 read_unlock(&fs_info
->tree_mod_log_lock
);
1245 btrfs_set_header_nritems(eb
, n
);
1249 * Called with eb read locked. If the buffer cannot be rewound, the same buffer
1250 * is returned. If rewind operations happen, a fresh buffer is returned. The
1251 * returned buffer is always read-locked. If the returned buffer is not the
1252 * input buffer, the lock on the input buffer is released and the input buffer
1253 * is freed (its refcount is decremented).
1255 static struct extent_buffer
*
1256 tree_mod_log_rewind(struct btrfs_fs_info
*fs_info
, struct btrfs_path
*path
,
1257 struct extent_buffer
*eb
, u64 time_seq
)
1259 struct extent_buffer
*eb_rewin
;
1260 struct tree_mod_elem
*tm
;
1265 if (btrfs_header_level(eb
) == 0)
1268 tm
= tree_mod_log_search(fs_info
, eb
->start
, time_seq
);
1272 btrfs_set_path_blocking(path
);
1273 btrfs_set_lock_blocking_read(eb
);
1275 if (tm
->op
== MOD_LOG_KEY_REMOVE_WHILE_FREEING
) {
1276 BUG_ON(tm
->slot
!= 0);
1277 eb_rewin
= alloc_dummy_extent_buffer(fs_info
, eb
->start
);
1279 btrfs_tree_read_unlock_blocking(eb
);
1280 free_extent_buffer(eb
);
1283 btrfs_set_header_bytenr(eb_rewin
, eb
->start
);
1284 btrfs_set_header_backref_rev(eb_rewin
,
1285 btrfs_header_backref_rev(eb
));
1286 btrfs_set_header_owner(eb_rewin
, btrfs_header_owner(eb
));
1287 btrfs_set_header_level(eb_rewin
, btrfs_header_level(eb
));
1289 eb_rewin
= btrfs_clone_extent_buffer(eb
);
1291 btrfs_tree_read_unlock_blocking(eb
);
1292 free_extent_buffer(eb
);
1297 btrfs_tree_read_unlock_blocking(eb
);
1298 free_extent_buffer(eb
);
1300 btrfs_tree_read_lock(eb_rewin
);
1301 __tree_mod_log_rewind(fs_info
, eb_rewin
, time_seq
, tm
);
1302 WARN_ON(btrfs_header_nritems(eb_rewin
) >
1303 BTRFS_NODEPTRS_PER_BLOCK(fs_info
));
1309 * get_old_root() rewinds the state of @root's root node to the given @time_seq
1310 * value. If there are no changes, the current root->root_node is returned. If
1311 * anything changed in between, there's a fresh buffer allocated on which the
1312 * rewind operations are done. In any case, the returned buffer is read locked.
1313 * Returns NULL on error (with no locks held).
1315 static inline struct extent_buffer
*
1316 get_old_root(struct btrfs_root
*root
, u64 time_seq
)
1318 struct btrfs_fs_info
*fs_info
= root
->fs_info
;
1319 struct tree_mod_elem
*tm
;
1320 struct extent_buffer
*eb
= NULL
;
1321 struct extent_buffer
*eb_root
;
1322 u64 eb_root_owner
= 0;
1323 struct extent_buffer
*old
;
1324 struct tree_mod_root
*old_root
= NULL
;
1325 u64 old_generation
= 0;
1329 eb_root
= btrfs_read_lock_root_node(root
);
1330 tm
= __tree_mod_log_oldest_root(eb_root
, time_seq
);
1334 if (tm
->op
== MOD_LOG_ROOT_REPLACE
) {
1335 old_root
= &tm
->old_root
;
1336 old_generation
= tm
->generation
;
1337 logical
= old_root
->logical
;
1338 level
= old_root
->level
;
1340 logical
= eb_root
->start
;
1341 level
= btrfs_header_level(eb_root
);
1344 tm
= tree_mod_log_search(fs_info
, logical
, time_seq
);
1345 if (old_root
&& tm
&& tm
->op
!= MOD_LOG_KEY_REMOVE_WHILE_FREEING
) {
1346 btrfs_tree_read_unlock(eb_root
);
1347 free_extent_buffer(eb_root
);
1348 old
= read_tree_block(fs_info
, logical
, 0, level
, NULL
);
1349 if (WARN_ON(IS_ERR(old
) || !extent_buffer_uptodate(old
))) {
1351 free_extent_buffer(old
);
1353 "failed to read tree block %llu from get_old_root",
1356 eb
= btrfs_clone_extent_buffer(old
);
1357 free_extent_buffer(old
);
1359 } else if (old_root
) {
1360 eb_root_owner
= btrfs_header_owner(eb_root
);
1361 btrfs_tree_read_unlock(eb_root
);
1362 free_extent_buffer(eb_root
);
1363 eb
= alloc_dummy_extent_buffer(fs_info
, logical
);
1365 btrfs_set_lock_blocking_read(eb_root
);
1366 eb
= btrfs_clone_extent_buffer(eb_root
);
1367 btrfs_tree_read_unlock_blocking(eb_root
);
1368 free_extent_buffer(eb_root
);
1373 btrfs_tree_read_lock(eb
);
1375 btrfs_set_header_bytenr(eb
, eb
->start
);
1376 btrfs_set_header_backref_rev(eb
, BTRFS_MIXED_BACKREF_REV
);
1377 btrfs_set_header_owner(eb
, eb_root_owner
);
1378 btrfs_set_header_level(eb
, old_root
->level
);
1379 btrfs_set_header_generation(eb
, old_generation
);
1382 __tree_mod_log_rewind(fs_info
, eb
, time_seq
, tm
);
1384 WARN_ON(btrfs_header_level(eb
) != 0);
1385 WARN_ON(btrfs_header_nritems(eb
) > BTRFS_NODEPTRS_PER_BLOCK(fs_info
));
1390 int btrfs_old_root_level(struct btrfs_root
*root
, u64 time_seq
)
1392 struct tree_mod_elem
*tm
;
1394 struct extent_buffer
*eb_root
= btrfs_root_node(root
);
1396 tm
= __tree_mod_log_oldest_root(eb_root
, time_seq
);
1397 if (tm
&& tm
->op
== MOD_LOG_ROOT_REPLACE
) {
1398 level
= tm
->old_root
.level
;
1400 level
= btrfs_header_level(eb_root
);
1402 free_extent_buffer(eb_root
);
1407 static inline int should_cow_block(struct btrfs_trans_handle
*trans
,
1408 struct btrfs_root
*root
,
1409 struct extent_buffer
*buf
)
1411 if (btrfs_is_testing(root
->fs_info
))
1414 /* Ensure we can see the FORCE_COW bit */
1415 smp_mb__before_atomic();
1418 * We do not need to cow a block if
1419 * 1) this block is not created or changed in this transaction;
1420 * 2) this block does not belong to TREE_RELOC tree;
1421 * 3) the root is not forced COW.
1423 * What is forced COW:
1424 * when we create snapshot during committing the transaction,
1425 * after we've finished copying src root, we must COW the shared
1426 * block to ensure the metadata consistency.
1428 if (btrfs_header_generation(buf
) == trans
->transid
&&
1429 !btrfs_header_flag(buf
, BTRFS_HEADER_FLAG_WRITTEN
) &&
1430 !(root
->root_key
.objectid
!= BTRFS_TREE_RELOC_OBJECTID
&&
1431 btrfs_header_flag(buf
, BTRFS_HEADER_FLAG_RELOC
)) &&
1432 !test_bit(BTRFS_ROOT_FORCE_COW
, &root
->state
))
1438 * cows a single block, see __btrfs_cow_block for the real work.
1439 * This version of it has extra checks so that a block isn't COWed more than
1440 * once per transaction, as long as it hasn't been written yet
1442 noinline
int btrfs_cow_block(struct btrfs_trans_handle
*trans
,
1443 struct btrfs_root
*root
, struct extent_buffer
*buf
,
1444 struct extent_buffer
*parent
, int parent_slot
,
1445 struct extent_buffer
**cow_ret
)
1447 struct btrfs_fs_info
*fs_info
= root
->fs_info
;
1451 if (test_bit(BTRFS_ROOT_DELETING
, &root
->state
))
1453 "COW'ing blocks on a fs root that's being dropped");
1455 if (trans
->transaction
!= fs_info
->running_transaction
)
1456 WARN(1, KERN_CRIT
"trans %llu running %llu\n",
1458 fs_info
->running_transaction
->transid
);
1460 if (trans
->transid
!= fs_info
->generation
)
1461 WARN(1, KERN_CRIT
"trans %llu running %llu\n",
1462 trans
->transid
, fs_info
->generation
);
1464 if (!should_cow_block(trans
, root
, buf
)) {
1465 trans
->dirty
= true;
1470 search_start
= buf
->start
& ~((u64
)SZ_1G
- 1);
1473 btrfs_set_lock_blocking_write(parent
);
1474 btrfs_set_lock_blocking_write(buf
);
1477 * Before CoWing this block for later modification, check if it's
1478 * the subtree root and do the delayed subtree trace if needed.
1480 * Also We don't care about the error, as it's handled internally.
1482 btrfs_qgroup_trace_subtree_after_cow(trans
, root
, buf
);
1483 ret
= __btrfs_cow_block(trans
, root
, buf
, parent
,
1484 parent_slot
, cow_ret
, search_start
, 0);
1486 trace_btrfs_cow_block(root
, buf
, *cow_ret
);
1492 * helper function for defrag to decide if two blocks pointed to by a
1493 * node are actually close by
1495 static int close_blocks(u64 blocknr
, u64 other
, u32 blocksize
)
1497 if (blocknr
< other
&& other
- (blocknr
+ blocksize
) < 32768)
1499 if (blocknr
> other
&& blocknr
- (other
+ blocksize
) < 32768)
1505 * compare two keys in a memcmp fashion
1507 static int comp_keys(const struct btrfs_disk_key
*disk
,
1508 const struct btrfs_key
*k2
)
1510 struct btrfs_key k1
;
1512 btrfs_disk_key_to_cpu(&k1
, disk
);
1514 return btrfs_comp_cpu_keys(&k1
, k2
);
1518 * same as comp_keys only with two btrfs_key's
1520 int __pure
btrfs_comp_cpu_keys(const struct btrfs_key
*k1
, const struct btrfs_key
*k2
)
1522 if (k1
->objectid
> k2
->objectid
)
1524 if (k1
->objectid
< k2
->objectid
)
1526 if (k1
->type
> k2
->type
)
1528 if (k1
->type
< k2
->type
)
1530 if (k1
->offset
> k2
->offset
)
1532 if (k1
->offset
< k2
->offset
)
1538 * this is used by the defrag code to go through all the
1539 * leaves pointed to by a node and reallocate them so that
1540 * disk order is close to key order
1542 int btrfs_realloc_node(struct btrfs_trans_handle
*trans
,
1543 struct btrfs_root
*root
, struct extent_buffer
*parent
,
1544 int start_slot
, u64
*last_ret
,
1545 struct btrfs_key
*progress
)
1547 struct btrfs_fs_info
*fs_info
= root
->fs_info
;
1548 struct extent_buffer
*cur
;
1551 u64 search_start
= *last_ret
;
1561 int progress_passed
= 0;
1562 struct btrfs_disk_key disk_key
;
1564 parent_level
= btrfs_header_level(parent
);
1566 WARN_ON(trans
->transaction
!= fs_info
->running_transaction
);
1567 WARN_ON(trans
->transid
!= fs_info
->generation
);
1569 parent_nritems
= btrfs_header_nritems(parent
);
1570 blocksize
= fs_info
->nodesize
;
1571 end_slot
= parent_nritems
- 1;
1573 if (parent_nritems
<= 1)
1576 btrfs_set_lock_blocking_write(parent
);
1578 for (i
= start_slot
; i
<= end_slot
; i
++) {
1579 struct btrfs_key first_key
;
1582 btrfs_node_key(parent
, &disk_key
, i
);
1583 if (!progress_passed
&& comp_keys(&disk_key
, progress
) < 0)
1586 progress_passed
= 1;
1587 blocknr
= btrfs_node_blockptr(parent
, i
);
1588 gen
= btrfs_node_ptr_generation(parent
, i
);
1589 btrfs_node_key_to_cpu(parent
, &first_key
, i
);
1590 if (last_block
== 0)
1591 last_block
= blocknr
;
1594 other
= btrfs_node_blockptr(parent
, i
- 1);
1595 close
= close_blocks(blocknr
, other
, blocksize
);
1597 if (!close
&& i
< end_slot
) {
1598 other
= btrfs_node_blockptr(parent
, i
+ 1);
1599 close
= close_blocks(blocknr
, other
, blocksize
);
1602 last_block
= blocknr
;
1606 cur
= find_extent_buffer(fs_info
, blocknr
);
1608 uptodate
= btrfs_buffer_uptodate(cur
, gen
, 0);
1611 if (!cur
|| !uptodate
) {
1613 cur
= read_tree_block(fs_info
, blocknr
, gen
,
1617 return PTR_ERR(cur
);
1618 } else if (!extent_buffer_uptodate(cur
)) {
1619 free_extent_buffer(cur
);
1622 } else if (!uptodate
) {
1623 err
= btrfs_read_buffer(cur
, gen
,
1624 parent_level
- 1,&first_key
);
1626 free_extent_buffer(cur
);
1631 if (search_start
== 0)
1632 search_start
= last_block
;
1634 btrfs_tree_lock(cur
);
1635 btrfs_set_lock_blocking_write(cur
);
1636 err
= __btrfs_cow_block(trans
, root
, cur
, parent
, i
,
1639 (end_slot
- i
) * blocksize
));
1641 btrfs_tree_unlock(cur
);
1642 free_extent_buffer(cur
);
1645 search_start
= cur
->start
;
1646 last_block
= cur
->start
;
1647 *last_ret
= search_start
;
1648 btrfs_tree_unlock(cur
);
1649 free_extent_buffer(cur
);
1655 * search for key in the extent_buffer. The items start at offset p,
1656 * and they are item_size apart. There are 'max' items in p.
1658 * the slot in the array is returned via slot, and it points to
1659 * the place where you would insert key if it is not found in
1662 * slot may point to max if the key is bigger than all of the keys
1664 static noinline
int generic_bin_search(struct extent_buffer
*eb
,
1665 unsigned long p
, int item_size
,
1666 const struct btrfs_key
*key
,
1673 struct btrfs_disk_key
*tmp
= NULL
;
1674 struct btrfs_disk_key unaligned
;
1675 unsigned long offset
;
1677 unsigned long map_start
= 0;
1678 unsigned long map_len
= 0;
1682 btrfs_err(eb
->fs_info
,
1683 "%s: low (%d) > high (%d) eb %llu owner %llu level %d",
1684 __func__
, low
, high
, eb
->start
,
1685 btrfs_header_owner(eb
), btrfs_header_level(eb
));
1689 while (low
< high
) {
1690 mid
= (low
+ high
) / 2;
1691 offset
= p
+ mid
* item_size
;
1693 if (!kaddr
|| offset
< map_start
||
1694 (offset
+ sizeof(struct btrfs_disk_key
)) >
1695 map_start
+ map_len
) {
1697 err
= map_private_extent_buffer(eb
, offset
,
1698 sizeof(struct btrfs_disk_key
),
1699 &kaddr
, &map_start
, &map_len
);
1702 tmp
= (struct btrfs_disk_key
*)(kaddr
+ offset
-
1704 } else if (err
== 1) {
1705 read_extent_buffer(eb
, &unaligned
,
1706 offset
, sizeof(unaligned
));
1713 tmp
= (struct btrfs_disk_key
*)(kaddr
+ offset
-
1716 ret
= comp_keys(tmp
, key
);
1732 * simple bin_search frontend that does the right thing for
1735 int btrfs_bin_search(struct extent_buffer
*eb
, const struct btrfs_key
*key
,
1736 int level
, int *slot
)
1739 return generic_bin_search(eb
,
1740 offsetof(struct btrfs_leaf
, items
),
1741 sizeof(struct btrfs_item
),
1742 key
, btrfs_header_nritems(eb
),
1745 return generic_bin_search(eb
,
1746 offsetof(struct btrfs_node
, ptrs
),
1747 sizeof(struct btrfs_key_ptr
),
1748 key
, btrfs_header_nritems(eb
),
1752 static void root_add_used(struct btrfs_root
*root
, u32 size
)
1754 spin_lock(&root
->accounting_lock
);
1755 btrfs_set_root_used(&root
->root_item
,
1756 btrfs_root_used(&root
->root_item
) + size
);
1757 spin_unlock(&root
->accounting_lock
);
1760 static void root_sub_used(struct btrfs_root
*root
, u32 size
)
1762 spin_lock(&root
->accounting_lock
);
1763 btrfs_set_root_used(&root
->root_item
,
1764 btrfs_root_used(&root
->root_item
) - size
);
1765 spin_unlock(&root
->accounting_lock
);
1768 /* given a node and slot number, this reads the blocks it points to. The
1769 * extent buffer is returned with a reference taken (but unlocked).
1771 struct extent_buffer
*btrfs_read_node_slot(struct extent_buffer
*parent
,
1774 int level
= btrfs_header_level(parent
);
1775 struct extent_buffer
*eb
;
1776 struct btrfs_key first_key
;
1778 if (slot
< 0 || slot
>= btrfs_header_nritems(parent
))
1779 return ERR_PTR(-ENOENT
);
1783 btrfs_node_key_to_cpu(parent
, &first_key
, slot
);
1784 eb
= read_tree_block(parent
->fs_info
, btrfs_node_blockptr(parent
, slot
),
1785 btrfs_node_ptr_generation(parent
, slot
),
1786 level
- 1, &first_key
);
1787 if (!IS_ERR(eb
) && !extent_buffer_uptodate(eb
)) {
1788 free_extent_buffer(eb
);
1796 * node level balancing, used to make sure nodes are in proper order for
1797 * item deletion. We balance from the top down, so we have to make sure
1798 * that a deletion won't leave an node completely empty later on.
1800 static noinline
int balance_level(struct btrfs_trans_handle
*trans
,
1801 struct btrfs_root
*root
,
1802 struct btrfs_path
*path
, int level
)
1804 struct btrfs_fs_info
*fs_info
= root
->fs_info
;
1805 struct extent_buffer
*right
= NULL
;
1806 struct extent_buffer
*mid
;
1807 struct extent_buffer
*left
= NULL
;
1808 struct extent_buffer
*parent
= NULL
;
1812 int orig_slot
= path
->slots
[level
];
1817 mid
= path
->nodes
[level
];
1819 WARN_ON(path
->locks
[level
] != BTRFS_WRITE_LOCK
&&
1820 path
->locks
[level
] != BTRFS_WRITE_LOCK_BLOCKING
);
1821 WARN_ON(btrfs_header_generation(mid
) != trans
->transid
);
1823 orig_ptr
= btrfs_node_blockptr(mid
, orig_slot
);
1825 if (level
< BTRFS_MAX_LEVEL
- 1) {
1826 parent
= path
->nodes
[level
+ 1];
1827 pslot
= path
->slots
[level
+ 1];
1831 * deal with the case where there is only one pointer in the root
1832 * by promoting the node below to a root
1835 struct extent_buffer
*child
;
1837 if (btrfs_header_nritems(mid
) != 1)
1840 /* promote the child to a root */
1841 child
= btrfs_read_node_slot(mid
, 0);
1842 if (IS_ERR(child
)) {
1843 ret
= PTR_ERR(child
);
1844 btrfs_handle_fs_error(fs_info
, ret
, NULL
);
1848 btrfs_tree_lock(child
);
1849 btrfs_set_lock_blocking_write(child
);
1850 ret
= btrfs_cow_block(trans
, root
, child
, mid
, 0, &child
);
1852 btrfs_tree_unlock(child
);
1853 free_extent_buffer(child
);
1857 ret
= tree_mod_log_insert_root(root
->node
, child
, 1);
1859 rcu_assign_pointer(root
->node
, child
);
1861 add_root_to_dirty_list(root
);
1862 btrfs_tree_unlock(child
);
1864 path
->locks
[level
] = 0;
1865 path
->nodes
[level
] = NULL
;
1866 btrfs_clean_tree_block(mid
);
1867 btrfs_tree_unlock(mid
);
1868 /* once for the path */
1869 free_extent_buffer(mid
);
1871 root_sub_used(root
, mid
->len
);
1872 btrfs_free_tree_block(trans
, root
, mid
, 0, 1);
1873 /* once for the root ptr */
1874 free_extent_buffer_stale(mid
);
1877 if (btrfs_header_nritems(mid
) >
1878 BTRFS_NODEPTRS_PER_BLOCK(fs_info
) / 4)
1881 left
= btrfs_read_node_slot(parent
, pslot
- 1);
1886 btrfs_tree_lock(left
);
1887 btrfs_set_lock_blocking_write(left
);
1888 wret
= btrfs_cow_block(trans
, root
, left
,
1889 parent
, pslot
- 1, &left
);
1896 right
= btrfs_read_node_slot(parent
, pslot
+ 1);
1901 btrfs_tree_lock(right
);
1902 btrfs_set_lock_blocking_write(right
);
1903 wret
= btrfs_cow_block(trans
, root
, right
,
1904 parent
, pslot
+ 1, &right
);
1911 /* first, try to make some room in the middle buffer */
1913 orig_slot
+= btrfs_header_nritems(left
);
1914 wret
= push_node_left(trans
, left
, mid
, 1);
1920 * then try to empty the right most buffer into the middle
1923 wret
= push_node_left(trans
, mid
, right
, 1);
1924 if (wret
< 0 && wret
!= -ENOSPC
)
1926 if (btrfs_header_nritems(right
) == 0) {
1927 btrfs_clean_tree_block(right
);
1928 btrfs_tree_unlock(right
);
1929 del_ptr(root
, path
, level
+ 1, pslot
+ 1);
1930 root_sub_used(root
, right
->len
);
1931 btrfs_free_tree_block(trans
, root
, right
, 0, 1);
1932 free_extent_buffer_stale(right
);
1935 struct btrfs_disk_key right_key
;
1936 btrfs_node_key(right
, &right_key
, 0);
1937 ret
= tree_mod_log_insert_key(parent
, pslot
+ 1,
1938 MOD_LOG_KEY_REPLACE
, GFP_NOFS
);
1940 btrfs_set_node_key(parent
, &right_key
, pslot
+ 1);
1941 btrfs_mark_buffer_dirty(parent
);
1944 if (btrfs_header_nritems(mid
) == 1) {
1946 * we're not allowed to leave a node with one item in the
1947 * tree during a delete. A deletion from lower in the tree
1948 * could try to delete the only pointer in this node.
1949 * So, pull some keys from the left.
1950 * There has to be a left pointer at this point because
1951 * otherwise we would have pulled some pointers from the
1956 btrfs_handle_fs_error(fs_info
, ret
, NULL
);
1959 wret
= balance_node_right(trans
, mid
, left
);
1965 wret
= push_node_left(trans
, left
, mid
, 1);
1971 if (btrfs_header_nritems(mid
) == 0) {
1972 btrfs_clean_tree_block(mid
);
1973 btrfs_tree_unlock(mid
);
1974 del_ptr(root
, path
, level
+ 1, pslot
);
1975 root_sub_used(root
, mid
->len
);
1976 btrfs_free_tree_block(trans
, root
, mid
, 0, 1);
1977 free_extent_buffer_stale(mid
);
1980 /* update the parent key to reflect our changes */
1981 struct btrfs_disk_key mid_key
;
1982 btrfs_node_key(mid
, &mid_key
, 0);
1983 ret
= tree_mod_log_insert_key(parent
, pslot
,
1984 MOD_LOG_KEY_REPLACE
, GFP_NOFS
);
1986 btrfs_set_node_key(parent
, &mid_key
, pslot
);
1987 btrfs_mark_buffer_dirty(parent
);
1990 /* update the path */
1992 if (btrfs_header_nritems(left
) > orig_slot
) {
1993 atomic_inc(&left
->refs
);
1994 /* left was locked after cow */
1995 path
->nodes
[level
] = left
;
1996 path
->slots
[level
+ 1] -= 1;
1997 path
->slots
[level
] = orig_slot
;
1999 btrfs_tree_unlock(mid
);
2000 free_extent_buffer(mid
);
2003 orig_slot
-= btrfs_header_nritems(left
);
2004 path
->slots
[level
] = orig_slot
;
2007 /* double check we haven't messed things up */
2009 btrfs_node_blockptr(path
->nodes
[level
], path
->slots
[level
]))
2013 btrfs_tree_unlock(right
);
2014 free_extent_buffer(right
);
2017 if (path
->nodes
[level
] != left
)
2018 btrfs_tree_unlock(left
);
2019 free_extent_buffer(left
);
2024 /* Node balancing for insertion. Here we only split or push nodes around
2025 * when they are completely full. This is also done top down, so we
2026 * have to be pessimistic.
2028 static noinline
int push_nodes_for_insert(struct btrfs_trans_handle
*trans
,
2029 struct btrfs_root
*root
,
2030 struct btrfs_path
*path
, int level
)
2032 struct btrfs_fs_info
*fs_info
= root
->fs_info
;
2033 struct extent_buffer
*right
= NULL
;
2034 struct extent_buffer
*mid
;
2035 struct extent_buffer
*left
= NULL
;
2036 struct extent_buffer
*parent
= NULL
;
2040 int orig_slot
= path
->slots
[level
];
2045 mid
= path
->nodes
[level
];
2046 WARN_ON(btrfs_header_generation(mid
) != trans
->transid
);
2048 if (level
< BTRFS_MAX_LEVEL
- 1) {
2049 parent
= path
->nodes
[level
+ 1];
2050 pslot
= path
->slots
[level
+ 1];
2056 left
= btrfs_read_node_slot(parent
, pslot
- 1);
2060 /* first, try to make some room in the middle buffer */
2064 btrfs_tree_lock(left
);
2065 btrfs_set_lock_blocking_write(left
);
2067 left_nr
= btrfs_header_nritems(left
);
2068 if (left_nr
>= BTRFS_NODEPTRS_PER_BLOCK(fs_info
) - 1) {
2071 ret
= btrfs_cow_block(trans
, root
, left
, parent
,
2076 wret
= push_node_left(trans
, left
, mid
, 0);
2082 struct btrfs_disk_key disk_key
;
2083 orig_slot
+= left_nr
;
2084 btrfs_node_key(mid
, &disk_key
, 0);
2085 ret
= tree_mod_log_insert_key(parent
, pslot
,
2086 MOD_LOG_KEY_REPLACE
, GFP_NOFS
);
2088 btrfs_set_node_key(parent
, &disk_key
, pslot
);
2089 btrfs_mark_buffer_dirty(parent
);
2090 if (btrfs_header_nritems(left
) > orig_slot
) {
2091 path
->nodes
[level
] = left
;
2092 path
->slots
[level
+ 1] -= 1;
2093 path
->slots
[level
] = orig_slot
;
2094 btrfs_tree_unlock(mid
);
2095 free_extent_buffer(mid
);
2098 btrfs_header_nritems(left
);
2099 path
->slots
[level
] = orig_slot
;
2100 btrfs_tree_unlock(left
);
2101 free_extent_buffer(left
);
2105 btrfs_tree_unlock(left
);
2106 free_extent_buffer(left
);
2108 right
= btrfs_read_node_slot(parent
, pslot
+ 1);
2113 * then try to empty the right most buffer into the middle
2118 btrfs_tree_lock(right
);
2119 btrfs_set_lock_blocking_write(right
);
2121 right_nr
= btrfs_header_nritems(right
);
2122 if (right_nr
>= BTRFS_NODEPTRS_PER_BLOCK(fs_info
) - 1) {
2125 ret
= btrfs_cow_block(trans
, root
, right
,
2131 wret
= balance_node_right(trans
, right
, mid
);
2137 struct btrfs_disk_key disk_key
;
2139 btrfs_node_key(right
, &disk_key
, 0);
2140 ret
= tree_mod_log_insert_key(parent
, pslot
+ 1,
2141 MOD_LOG_KEY_REPLACE
, GFP_NOFS
);
2143 btrfs_set_node_key(parent
, &disk_key
, pslot
+ 1);
2144 btrfs_mark_buffer_dirty(parent
);
2146 if (btrfs_header_nritems(mid
) <= orig_slot
) {
2147 path
->nodes
[level
] = right
;
2148 path
->slots
[level
+ 1] += 1;
2149 path
->slots
[level
] = orig_slot
-
2150 btrfs_header_nritems(mid
);
2151 btrfs_tree_unlock(mid
);
2152 free_extent_buffer(mid
);
2154 btrfs_tree_unlock(right
);
2155 free_extent_buffer(right
);
2159 btrfs_tree_unlock(right
);
2160 free_extent_buffer(right
);
2166 * readahead one full node of leaves, finding things that are close
2167 * to the block in 'slot', and triggering ra on them.
2169 static void reada_for_search(struct btrfs_fs_info
*fs_info
,
2170 struct btrfs_path
*path
,
2171 int level
, int slot
, u64 objectid
)
2173 struct extent_buffer
*node
;
2174 struct btrfs_disk_key disk_key
;
2179 struct extent_buffer
*eb
;
2187 if (!path
->nodes
[level
])
2190 node
= path
->nodes
[level
];
2192 search
= btrfs_node_blockptr(node
, slot
);
2193 blocksize
= fs_info
->nodesize
;
2194 eb
= find_extent_buffer(fs_info
, search
);
2196 free_extent_buffer(eb
);
2202 nritems
= btrfs_header_nritems(node
);
2206 if (path
->reada
== READA_BACK
) {
2210 } else if (path
->reada
== READA_FORWARD
) {
2215 if (path
->reada
== READA_BACK
&& objectid
) {
2216 btrfs_node_key(node
, &disk_key
, nr
);
2217 if (btrfs_disk_key_objectid(&disk_key
) != objectid
)
2220 search
= btrfs_node_blockptr(node
, nr
);
2221 if ((search
<= target
&& target
- search
<= 65536) ||
2222 (search
> target
&& search
- target
<= 65536)) {
2223 readahead_tree_block(fs_info
, search
);
2227 if ((nread
> 65536 || nscan
> 32))
2232 static noinline
void reada_for_balance(struct btrfs_fs_info
*fs_info
,
2233 struct btrfs_path
*path
, int level
)
2237 struct extent_buffer
*parent
;
2238 struct extent_buffer
*eb
;
2243 parent
= path
->nodes
[level
+ 1];
2247 nritems
= btrfs_header_nritems(parent
);
2248 slot
= path
->slots
[level
+ 1];
2251 block1
= btrfs_node_blockptr(parent
, slot
- 1);
2252 gen
= btrfs_node_ptr_generation(parent
, slot
- 1);
2253 eb
= find_extent_buffer(fs_info
, block1
);
2255 * if we get -eagain from btrfs_buffer_uptodate, we
2256 * don't want to return eagain here. That will loop
2259 if (eb
&& btrfs_buffer_uptodate(eb
, gen
, 1) != 0)
2261 free_extent_buffer(eb
);
2263 if (slot
+ 1 < nritems
) {
2264 block2
= btrfs_node_blockptr(parent
, slot
+ 1);
2265 gen
= btrfs_node_ptr_generation(parent
, slot
+ 1);
2266 eb
= find_extent_buffer(fs_info
, block2
);
2267 if (eb
&& btrfs_buffer_uptodate(eb
, gen
, 1) != 0)
2269 free_extent_buffer(eb
);
2273 readahead_tree_block(fs_info
, block1
);
2275 readahead_tree_block(fs_info
, block2
);
2280 * when we walk down the tree, it is usually safe to unlock the higher layers
2281 * in the tree. The exceptions are when our path goes through slot 0, because
2282 * operations on the tree might require changing key pointers higher up in the
2285 * callers might also have set path->keep_locks, which tells this code to keep
2286 * the lock if the path points to the last slot in the block. This is part of
2287 * walking through the tree, and selecting the next slot in the higher block.
2289 * lowest_unlock sets the lowest level in the tree we're allowed to unlock. so
2290 * if lowest_unlock is 1, level 0 won't be unlocked
2292 static noinline
void unlock_up(struct btrfs_path
*path
, int level
,
2293 int lowest_unlock
, int min_write_lock_level
,
2294 int *write_lock_level
)
2297 int skip_level
= level
;
2299 struct extent_buffer
*t
;
2301 for (i
= level
; i
< BTRFS_MAX_LEVEL
; i
++) {
2302 if (!path
->nodes
[i
])
2304 if (!path
->locks
[i
])
2306 if (!no_skips
&& path
->slots
[i
] == 0) {
2310 if (!no_skips
&& path
->keep_locks
) {
2313 nritems
= btrfs_header_nritems(t
);
2314 if (nritems
< 1 || path
->slots
[i
] >= nritems
- 1) {
2319 if (skip_level
< i
&& i
>= lowest_unlock
)
2323 if (i
>= lowest_unlock
&& i
> skip_level
) {
2324 btrfs_tree_unlock_rw(t
, path
->locks
[i
]);
2326 if (write_lock_level
&&
2327 i
> min_write_lock_level
&&
2328 i
<= *write_lock_level
) {
2329 *write_lock_level
= i
- 1;
2336 * helper function for btrfs_search_slot. The goal is to find a block
2337 * in cache without setting the path to blocking. If we find the block
2338 * we return zero and the path is unchanged.
2340 * If we can't find the block, we set the path blocking and do some
2341 * reada. -EAGAIN is returned and the search must be repeated.
2344 read_block_for_search(struct btrfs_root
*root
, struct btrfs_path
*p
,
2345 struct extent_buffer
**eb_ret
, int level
, int slot
,
2346 const struct btrfs_key
*key
)
2348 struct btrfs_fs_info
*fs_info
= root
->fs_info
;
2351 struct extent_buffer
*b
= *eb_ret
;
2352 struct extent_buffer
*tmp
;
2353 struct btrfs_key first_key
;
2357 blocknr
= btrfs_node_blockptr(b
, slot
);
2358 gen
= btrfs_node_ptr_generation(b
, slot
);
2359 parent_level
= btrfs_header_level(b
);
2360 btrfs_node_key_to_cpu(b
, &first_key
, slot
);
2362 tmp
= find_extent_buffer(fs_info
, blocknr
);
2364 /* first we do an atomic uptodate check */
2365 if (btrfs_buffer_uptodate(tmp
, gen
, 1) > 0) {
2367 * Do extra check for first_key, eb can be stale due to
2368 * being cached, read from scrub, or have multiple
2369 * parents (shared tree blocks).
2371 if (btrfs_verify_level_key(tmp
,
2372 parent_level
- 1, &first_key
, gen
)) {
2373 free_extent_buffer(tmp
);
2380 /* the pages were up to date, but we failed
2381 * the generation number check. Do a full
2382 * read for the generation number that is correct.
2383 * We must do this without dropping locks so
2384 * we can trust our generation number
2386 btrfs_set_path_blocking(p
);
2388 /* now we're allowed to do a blocking uptodate check */
2389 ret
= btrfs_read_buffer(tmp
, gen
, parent_level
- 1, &first_key
);
2394 free_extent_buffer(tmp
);
2395 btrfs_release_path(p
);
2400 * reduce lock contention at high levels
2401 * of the btree by dropping locks before
2402 * we read. Don't release the lock on the current
2403 * level because we need to walk this node to figure
2404 * out which blocks to read.
2406 btrfs_unlock_up_safe(p
, level
+ 1);
2407 btrfs_set_path_blocking(p
);
2409 if (p
->reada
!= READA_NONE
)
2410 reada_for_search(fs_info
, p
, level
, slot
, key
->objectid
);
2413 tmp
= read_tree_block(fs_info
, blocknr
, gen
, parent_level
- 1,
2417 * If the read above didn't mark this buffer up to date,
2418 * it will never end up being up to date. Set ret to EIO now
2419 * and give up so that our caller doesn't loop forever
2422 if (!extent_buffer_uptodate(tmp
))
2424 free_extent_buffer(tmp
);
2429 btrfs_release_path(p
);
2434 * helper function for btrfs_search_slot. This does all of the checks
2435 * for node-level blocks and does any balancing required based on
2438 * If no extra work was required, zero is returned. If we had to
2439 * drop the path, -EAGAIN is returned and btrfs_search_slot must
2443 setup_nodes_for_search(struct btrfs_trans_handle
*trans
,
2444 struct btrfs_root
*root
, struct btrfs_path
*p
,
2445 struct extent_buffer
*b
, int level
, int ins_len
,
2446 int *write_lock_level
)
2448 struct btrfs_fs_info
*fs_info
= root
->fs_info
;
2451 if ((p
->search_for_split
|| ins_len
> 0) && btrfs_header_nritems(b
) >=
2452 BTRFS_NODEPTRS_PER_BLOCK(fs_info
) - 3) {
2455 if (*write_lock_level
< level
+ 1) {
2456 *write_lock_level
= level
+ 1;
2457 btrfs_release_path(p
);
2461 btrfs_set_path_blocking(p
);
2462 reada_for_balance(fs_info
, p
, level
);
2463 sret
= split_node(trans
, root
, p
, level
);
2470 b
= p
->nodes
[level
];
2471 } else if (ins_len
< 0 && btrfs_header_nritems(b
) <
2472 BTRFS_NODEPTRS_PER_BLOCK(fs_info
) / 2) {
2475 if (*write_lock_level
< level
+ 1) {
2476 *write_lock_level
= level
+ 1;
2477 btrfs_release_path(p
);
2481 btrfs_set_path_blocking(p
);
2482 reada_for_balance(fs_info
, p
, level
);
2483 sret
= balance_level(trans
, root
, p
, level
);
2489 b
= p
->nodes
[level
];
2491 btrfs_release_path(p
);
2494 BUG_ON(btrfs_header_nritems(b
) == 1);
2504 static int key_search(struct extent_buffer
*b
, const struct btrfs_key
*key
,
2505 int level
, int *prev_cmp
, int *slot
)
2507 if (*prev_cmp
!= 0) {
2508 *prev_cmp
= btrfs_bin_search(b
, key
, level
, slot
);
2517 int btrfs_find_item(struct btrfs_root
*fs_root
, struct btrfs_path
*path
,
2518 u64 iobjectid
, u64 ioff
, u8 key_type
,
2519 struct btrfs_key
*found_key
)
2522 struct btrfs_key key
;
2523 struct extent_buffer
*eb
;
2528 key
.type
= key_type
;
2529 key
.objectid
= iobjectid
;
2532 ret
= btrfs_search_slot(NULL
, fs_root
, &key
, path
, 0, 0);
2536 eb
= path
->nodes
[0];
2537 if (ret
&& path
->slots
[0] >= btrfs_header_nritems(eb
)) {
2538 ret
= btrfs_next_leaf(fs_root
, path
);
2541 eb
= path
->nodes
[0];
2544 btrfs_item_key_to_cpu(eb
, found_key
, path
->slots
[0]);
2545 if (found_key
->type
!= key
.type
||
2546 found_key
->objectid
!= key
.objectid
)
2552 static struct extent_buffer
*btrfs_search_slot_get_root(struct btrfs_root
*root
,
2553 struct btrfs_path
*p
,
2554 int write_lock_level
)
2556 struct btrfs_fs_info
*fs_info
= root
->fs_info
;
2557 struct extent_buffer
*b
;
2561 /* We try very hard to do read locks on the root */
2562 root_lock
= BTRFS_READ_LOCK
;
2564 if (p
->search_commit_root
) {
2566 * The commit roots are read only so we always do read locks,
2567 * and we always must hold the commit_root_sem when doing
2568 * searches on them, the only exception is send where we don't
2569 * want to block transaction commits for a long time, so
2570 * we need to clone the commit root in order to avoid races
2571 * with transaction commits that create a snapshot of one of
2572 * the roots used by a send operation.
2574 if (p
->need_commit_sem
) {
2575 down_read(&fs_info
->commit_root_sem
);
2576 b
= btrfs_clone_extent_buffer(root
->commit_root
);
2577 up_read(&fs_info
->commit_root_sem
);
2579 return ERR_PTR(-ENOMEM
);
2582 b
= root
->commit_root
;
2583 atomic_inc(&b
->refs
);
2585 level
= btrfs_header_level(b
);
2587 * Ensure that all callers have set skip_locking when
2588 * p->search_commit_root = 1.
2590 ASSERT(p
->skip_locking
== 1);
2595 if (p
->skip_locking
) {
2596 b
= btrfs_root_node(root
);
2597 level
= btrfs_header_level(b
);
2602 * If the level is set to maximum, we can skip trying to get the read
2605 if (write_lock_level
< BTRFS_MAX_LEVEL
) {
2607 * We don't know the level of the root node until we actually
2608 * have it read locked
2610 b
= btrfs_read_lock_root_node(root
);
2611 level
= btrfs_header_level(b
);
2612 if (level
> write_lock_level
)
2615 /* Whoops, must trade for write lock */
2616 btrfs_tree_read_unlock(b
);
2617 free_extent_buffer(b
);
2620 b
= btrfs_lock_root_node(root
);
2621 root_lock
= BTRFS_WRITE_LOCK
;
2623 /* The level might have changed, check again */
2624 level
= btrfs_header_level(b
);
2627 p
->nodes
[level
] = b
;
2628 if (!p
->skip_locking
)
2629 p
->locks
[level
] = root_lock
;
2631 * Callers are responsible for dropping b's references.
2638 * btrfs_search_slot - look for a key in a tree and perform necessary
2639 * modifications to preserve tree invariants.
2641 * @trans: Handle of transaction, used when modifying the tree
2642 * @p: Holds all btree nodes along the search path
2643 * @root: The root node of the tree
2644 * @key: The key we are looking for
2645 * @ins_len: Indicates purpose of search, for inserts it is 1, for
2646 * deletions it's -1. 0 for plain searches
2647 * @cow: boolean should CoW operations be performed. Must always be 1
2648 * when modifying the tree.
2650 * If @ins_len > 0, nodes and leaves will be split as we walk down the tree.
2651 * If @ins_len < 0, nodes will be merged as we walk down the tree (if possible)
2653 * If @key is found, 0 is returned and you can find the item in the leaf level
2654 * of the path (level 0)
2656 * If @key isn't found, 1 is returned and the leaf level of the path (level 0)
2657 * points to the slot where it should be inserted
2659 * If an error is encountered while searching the tree a negative error number
2662 int btrfs_search_slot(struct btrfs_trans_handle
*trans
, struct btrfs_root
*root
,
2663 const struct btrfs_key
*key
, struct btrfs_path
*p
,
2664 int ins_len
, int cow
)
2666 struct extent_buffer
*b
;
2671 int lowest_unlock
= 1;
2672 /* everything at write_lock_level or lower must be write locked */
2673 int write_lock_level
= 0;
2674 u8 lowest_level
= 0;
2675 int min_write_lock_level
;
2678 lowest_level
= p
->lowest_level
;
2679 WARN_ON(lowest_level
&& ins_len
> 0);
2680 WARN_ON(p
->nodes
[0] != NULL
);
2681 BUG_ON(!cow
&& ins_len
);
2686 /* when we are removing items, we might have to go up to level
2687 * two as we update tree pointers Make sure we keep write
2688 * for those levels as well
2690 write_lock_level
= 2;
2691 } else if (ins_len
> 0) {
2693 * for inserting items, make sure we have a write lock on
2694 * level 1 so we can update keys
2696 write_lock_level
= 1;
2700 write_lock_level
= -1;
2702 if (cow
&& (p
->keep_locks
|| p
->lowest_level
))
2703 write_lock_level
= BTRFS_MAX_LEVEL
;
2705 min_write_lock_level
= write_lock_level
;
2709 b
= btrfs_search_slot_get_root(root
, p
, write_lock_level
);
2718 level
= btrfs_header_level(b
);
2721 bool last_level
= (level
== (BTRFS_MAX_LEVEL
- 1));
2724 * if we don't really need to cow this block
2725 * then we don't want to set the path blocking,
2726 * so we test it here
2728 if (!should_cow_block(trans
, root
, b
)) {
2729 trans
->dirty
= true;
2734 * must have write locks on this node and the
2737 if (level
> write_lock_level
||
2738 (level
+ 1 > write_lock_level
&&
2739 level
+ 1 < BTRFS_MAX_LEVEL
&&
2740 p
->nodes
[level
+ 1])) {
2741 write_lock_level
= level
+ 1;
2742 btrfs_release_path(p
);
2746 btrfs_set_path_blocking(p
);
2748 err
= btrfs_cow_block(trans
, root
, b
, NULL
, 0,
2751 err
= btrfs_cow_block(trans
, root
, b
,
2752 p
->nodes
[level
+ 1],
2753 p
->slots
[level
+ 1], &b
);
2760 p
->nodes
[level
] = b
;
2762 * Leave path with blocking locks to avoid massive
2763 * lock context switch, this is made on purpose.
2767 * we have a lock on b and as long as we aren't changing
2768 * the tree, there is no way to for the items in b to change.
2769 * It is safe to drop the lock on our parent before we
2770 * go through the expensive btree search on b.
2772 * If we're inserting or deleting (ins_len != 0), then we might
2773 * be changing slot zero, which may require changing the parent.
2774 * So, we can't drop the lock until after we know which slot
2775 * we're operating on.
2777 if (!ins_len
&& !p
->keep_locks
) {
2780 if (u
< BTRFS_MAX_LEVEL
&& p
->locks
[u
]) {
2781 btrfs_tree_unlock_rw(p
->nodes
[u
], p
->locks
[u
]);
2786 ret
= key_search(b
, key
, level
, &prev_cmp
, &slot
);
2791 p
->slots
[level
] = slot
;
2793 btrfs_leaf_free_space(b
) < ins_len
) {
2794 if (write_lock_level
< 1) {
2795 write_lock_level
= 1;
2796 btrfs_release_path(p
);
2800 btrfs_set_path_blocking(p
);
2801 err
= split_leaf(trans
, root
, key
,
2802 p
, ins_len
, ret
== 0);
2810 if (!p
->search_for_split
)
2811 unlock_up(p
, level
, lowest_unlock
,
2812 min_write_lock_level
, NULL
);
2815 if (ret
&& slot
> 0) {
2819 p
->slots
[level
] = slot
;
2820 err
= setup_nodes_for_search(trans
, root
, p
, b
, level
, ins_len
,
2828 b
= p
->nodes
[level
];
2829 slot
= p
->slots
[level
];
2832 * Slot 0 is special, if we change the key we have to update
2833 * the parent pointer which means we must have a write lock on
2836 if (slot
== 0 && ins_len
&& write_lock_level
< level
+ 1) {
2837 write_lock_level
= level
+ 1;
2838 btrfs_release_path(p
);
2842 unlock_up(p
, level
, lowest_unlock
, min_write_lock_level
,
2845 if (level
== lowest_level
) {
2851 err
= read_block_for_search(root
, p
, &b
, level
, slot
, key
);
2859 if (!p
->skip_locking
) {
2860 level
= btrfs_header_level(b
);
2861 if (level
<= write_lock_level
) {
2862 if (!btrfs_try_tree_write_lock(b
)) {
2863 btrfs_set_path_blocking(p
);
2866 p
->locks
[level
] = BTRFS_WRITE_LOCK
;
2868 if (!btrfs_tree_read_lock_atomic(b
)) {
2869 btrfs_set_path_blocking(p
);
2870 btrfs_tree_read_lock(b
);
2872 p
->locks
[level
] = BTRFS_READ_LOCK
;
2874 p
->nodes
[level
] = b
;
2880 * we don't really know what they plan on doing with the path
2881 * from here on, so for now just mark it as blocking
2883 if (!p
->leave_spinning
)
2884 btrfs_set_path_blocking(p
);
2885 if (ret
< 0 && !p
->skip_release_on_error
)
2886 btrfs_release_path(p
);
2891 * Like btrfs_search_slot, this looks for a key in the given tree. It uses the
2892 * current state of the tree together with the operations recorded in the tree
2893 * modification log to search for the key in a previous version of this tree, as
2894 * denoted by the time_seq parameter.
2896 * Naturally, there is no support for insert, delete or cow operations.
2898 * The resulting path and return value will be set up as if we called
2899 * btrfs_search_slot at that point in time with ins_len and cow both set to 0.
2901 int btrfs_search_old_slot(struct btrfs_root
*root
, const struct btrfs_key
*key
,
2902 struct btrfs_path
*p
, u64 time_seq
)
2904 struct btrfs_fs_info
*fs_info
= root
->fs_info
;
2905 struct extent_buffer
*b
;
2910 int lowest_unlock
= 1;
2911 u8 lowest_level
= 0;
2914 lowest_level
= p
->lowest_level
;
2915 WARN_ON(p
->nodes
[0] != NULL
);
2917 if (p
->search_commit_root
) {
2919 return btrfs_search_slot(NULL
, root
, key
, p
, 0, 0);
2923 b
= get_old_root(root
, time_seq
);
2928 level
= btrfs_header_level(b
);
2929 p
->locks
[level
] = BTRFS_READ_LOCK
;
2934 level
= btrfs_header_level(b
);
2935 p
->nodes
[level
] = b
;
2938 * we have a lock on b and as long as we aren't changing
2939 * the tree, there is no way to for the items in b to change.
2940 * It is safe to drop the lock on our parent before we
2941 * go through the expensive btree search on b.
2943 btrfs_unlock_up_safe(p
, level
+ 1);
2946 * Since we can unwind ebs we want to do a real search every
2950 ret
= key_search(b
, key
, level
, &prev_cmp
, &slot
);
2955 p
->slots
[level
] = slot
;
2956 unlock_up(p
, level
, lowest_unlock
, 0, NULL
);
2960 if (ret
&& slot
> 0) {
2964 p
->slots
[level
] = slot
;
2965 unlock_up(p
, level
, lowest_unlock
, 0, NULL
);
2967 if (level
== lowest_level
) {
2973 err
= read_block_for_search(root
, p
, &b
, level
, slot
, key
);
2981 level
= btrfs_header_level(b
);
2982 if (!btrfs_tree_read_lock_atomic(b
)) {
2983 btrfs_set_path_blocking(p
);
2984 btrfs_tree_read_lock(b
);
2986 b
= tree_mod_log_rewind(fs_info
, p
, b
, time_seq
);
2991 p
->locks
[level
] = BTRFS_READ_LOCK
;
2992 p
->nodes
[level
] = b
;
2996 if (!p
->leave_spinning
)
2997 btrfs_set_path_blocking(p
);
2999 btrfs_release_path(p
);
3005 * helper to use instead of search slot if no exact match is needed but
3006 * instead the next or previous item should be returned.
3007 * When find_higher is true, the next higher item is returned, the next lower
3009 * When return_any and find_higher are both true, and no higher item is found,
3010 * return the next lower instead.
3011 * When return_any is true and find_higher is false, and no lower item is found,
3012 * return the next higher instead.
3013 * It returns 0 if any item is found, 1 if none is found (tree empty), and
3016 int btrfs_search_slot_for_read(struct btrfs_root
*root
,
3017 const struct btrfs_key
*key
,
3018 struct btrfs_path
*p
, int find_higher
,
3022 struct extent_buffer
*leaf
;
3025 ret
= btrfs_search_slot(NULL
, root
, key
, p
, 0, 0);
3029 * a return value of 1 means the path is at the position where the
3030 * item should be inserted. Normally this is the next bigger item,
3031 * but in case the previous item is the last in a leaf, path points
3032 * to the first free slot in the previous leaf, i.e. at an invalid
3038 if (p
->slots
[0] >= btrfs_header_nritems(leaf
)) {
3039 ret
= btrfs_next_leaf(root
, p
);
3045 * no higher item found, return the next
3050 btrfs_release_path(p
);
3054 if (p
->slots
[0] == 0) {
3055 ret
= btrfs_prev_leaf(root
, p
);
3060 if (p
->slots
[0] == btrfs_header_nritems(leaf
))
3067 * no lower item found, return the next
3072 btrfs_release_path(p
);
3082 * adjust the pointers going up the tree, starting at level
3083 * making sure the right key of each node is points to 'key'.
3084 * This is used after shifting pointers to the left, so it stops
3085 * fixing up pointers when a given leaf/node is not in slot 0 of the
3089 static void fixup_low_keys(struct btrfs_path
*path
,
3090 struct btrfs_disk_key
*key
, int level
)
3093 struct extent_buffer
*t
;
3096 for (i
= level
; i
< BTRFS_MAX_LEVEL
; i
++) {
3097 int tslot
= path
->slots
[i
];
3099 if (!path
->nodes
[i
])
3102 ret
= tree_mod_log_insert_key(t
, tslot
, MOD_LOG_KEY_REPLACE
,
3105 btrfs_set_node_key(t
, key
, tslot
);
3106 btrfs_mark_buffer_dirty(path
->nodes
[i
]);
3115 * This function isn't completely safe. It's the caller's responsibility
3116 * that the new key won't break the order
3118 void btrfs_set_item_key_safe(struct btrfs_fs_info
*fs_info
,
3119 struct btrfs_path
*path
,
3120 const struct btrfs_key
*new_key
)
3122 struct btrfs_disk_key disk_key
;
3123 struct extent_buffer
*eb
;
3126 eb
= path
->nodes
[0];
3127 slot
= path
->slots
[0];
3129 btrfs_item_key(eb
, &disk_key
, slot
- 1);
3130 if (unlikely(comp_keys(&disk_key
, new_key
) >= 0)) {
3132 "slot %u key (%llu %u %llu) new key (%llu %u %llu)",
3133 slot
, btrfs_disk_key_objectid(&disk_key
),
3134 btrfs_disk_key_type(&disk_key
),
3135 btrfs_disk_key_offset(&disk_key
),
3136 new_key
->objectid
, new_key
->type
,
3138 btrfs_print_leaf(eb
);
3142 if (slot
< btrfs_header_nritems(eb
) - 1) {
3143 btrfs_item_key(eb
, &disk_key
, slot
+ 1);
3144 if (unlikely(comp_keys(&disk_key
, new_key
) <= 0)) {
3146 "slot %u key (%llu %u %llu) new key (%llu %u %llu)",
3147 slot
, btrfs_disk_key_objectid(&disk_key
),
3148 btrfs_disk_key_type(&disk_key
),
3149 btrfs_disk_key_offset(&disk_key
),
3150 new_key
->objectid
, new_key
->type
,
3152 btrfs_print_leaf(eb
);
3157 btrfs_cpu_key_to_disk(&disk_key
, new_key
);
3158 btrfs_set_item_key(eb
, &disk_key
, slot
);
3159 btrfs_mark_buffer_dirty(eb
);
3161 fixup_low_keys(path
, &disk_key
, 1);
3165 * try to push data from one node into the next node left in the
3168 * returns 0 if some ptrs were pushed left, < 0 if there was some horrible
3169 * error, and > 0 if there was no room in the left hand block.
3171 static int push_node_left(struct btrfs_trans_handle
*trans
,
3172 struct extent_buffer
*dst
,
3173 struct extent_buffer
*src
, int empty
)
3175 struct btrfs_fs_info
*fs_info
= trans
->fs_info
;
3181 src_nritems
= btrfs_header_nritems(src
);
3182 dst_nritems
= btrfs_header_nritems(dst
);
3183 push_items
= BTRFS_NODEPTRS_PER_BLOCK(fs_info
) - dst_nritems
;
3184 WARN_ON(btrfs_header_generation(src
) != trans
->transid
);
3185 WARN_ON(btrfs_header_generation(dst
) != trans
->transid
);
3187 if (!empty
&& src_nritems
<= 8)
3190 if (push_items
<= 0)
3194 push_items
= min(src_nritems
, push_items
);
3195 if (push_items
< src_nritems
) {
3196 /* leave at least 8 pointers in the node if
3197 * we aren't going to empty it
3199 if (src_nritems
- push_items
< 8) {
3200 if (push_items
<= 8)
3206 push_items
= min(src_nritems
- 8, push_items
);
3208 ret
= tree_mod_log_eb_copy(dst
, src
, dst_nritems
, 0, push_items
);
3210 btrfs_abort_transaction(trans
, ret
);
3213 copy_extent_buffer(dst
, src
,
3214 btrfs_node_key_ptr_offset(dst_nritems
),
3215 btrfs_node_key_ptr_offset(0),
3216 push_items
* sizeof(struct btrfs_key_ptr
));
3218 if (push_items
< src_nritems
) {
3220 * Don't call tree_mod_log_insert_move here, key removal was
3221 * already fully logged by tree_mod_log_eb_copy above.
3223 memmove_extent_buffer(src
, btrfs_node_key_ptr_offset(0),
3224 btrfs_node_key_ptr_offset(push_items
),
3225 (src_nritems
- push_items
) *
3226 sizeof(struct btrfs_key_ptr
));
3228 btrfs_set_header_nritems(src
, src_nritems
- push_items
);
3229 btrfs_set_header_nritems(dst
, dst_nritems
+ push_items
);
3230 btrfs_mark_buffer_dirty(src
);
3231 btrfs_mark_buffer_dirty(dst
);
3237 * try to push data from one node into the next node right in the
3240 * returns 0 if some ptrs were pushed, < 0 if there was some horrible
3241 * error, and > 0 if there was no room in the right hand block.
3243 * this will only push up to 1/2 the contents of the left node over
3245 static int balance_node_right(struct btrfs_trans_handle
*trans
,
3246 struct extent_buffer
*dst
,
3247 struct extent_buffer
*src
)
3249 struct btrfs_fs_info
*fs_info
= trans
->fs_info
;
3256 WARN_ON(btrfs_header_generation(src
) != trans
->transid
);
3257 WARN_ON(btrfs_header_generation(dst
) != trans
->transid
);
3259 src_nritems
= btrfs_header_nritems(src
);
3260 dst_nritems
= btrfs_header_nritems(dst
);
3261 push_items
= BTRFS_NODEPTRS_PER_BLOCK(fs_info
) - dst_nritems
;
3262 if (push_items
<= 0)
3265 if (src_nritems
< 4)
3268 max_push
= src_nritems
/ 2 + 1;
3269 /* don't try to empty the node */
3270 if (max_push
>= src_nritems
)
3273 if (max_push
< push_items
)
3274 push_items
= max_push
;
3276 ret
= tree_mod_log_insert_move(dst
, push_items
, 0, dst_nritems
);
3278 memmove_extent_buffer(dst
, btrfs_node_key_ptr_offset(push_items
),
3279 btrfs_node_key_ptr_offset(0),
3281 sizeof(struct btrfs_key_ptr
));
3283 ret
= tree_mod_log_eb_copy(dst
, src
, 0, src_nritems
- push_items
,
3286 btrfs_abort_transaction(trans
, ret
);
3289 copy_extent_buffer(dst
, src
,
3290 btrfs_node_key_ptr_offset(0),
3291 btrfs_node_key_ptr_offset(src_nritems
- push_items
),
3292 push_items
* sizeof(struct btrfs_key_ptr
));
3294 btrfs_set_header_nritems(src
, src_nritems
- push_items
);
3295 btrfs_set_header_nritems(dst
, dst_nritems
+ push_items
);
3297 btrfs_mark_buffer_dirty(src
);
3298 btrfs_mark_buffer_dirty(dst
);
3304 * helper function to insert a new root level in the tree.
3305 * A new node is allocated, and a single item is inserted to
3306 * point to the existing root
3308 * returns zero on success or < 0 on failure.
3310 static noinline
int insert_new_root(struct btrfs_trans_handle
*trans
,
3311 struct btrfs_root
*root
,
3312 struct btrfs_path
*path
, int level
)
3314 struct btrfs_fs_info
*fs_info
= root
->fs_info
;
3316 struct extent_buffer
*lower
;
3317 struct extent_buffer
*c
;
3318 struct extent_buffer
*old
;
3319 struct btrfs_disk_key lower_key
;
3322 BUG_ON(path
->nodes
[level
]);
3323 BUG_ON(path
->nodes
[level
-1] != root
->node
);
3325 lower
= path
->nodes
[level
-1];
3327 btrfs_item_key(lower
, &lower_key
, 0);
3329 btrfs_node_key(lower
, &lower_key
, 0);
3331 c
= alloc_tree_block_no_bg_flush(trans
, root
, 0, &lower_key
, level
,
3332 root
->node
->start
, 0);
3336 root_add_used(root
, fs_info
->nodesize
);
3338 btrfs_set_header_nritems(c
, 1);
3339 btrfs_set_node_key(c
, &lower_key
, 0);
3340 btrfs_set_node_blockptr(c
, 0, lower
->start
);
3341 lower_gen
= btrfs_header_generation(lower
);
3342 WARN_ON(lower_gen
!= trans
->transid
);
3344 btrfs_set_node_ptr_generation(c
, 0, lower_gen
);
3346 btrfs_mark_buffer_dirty(c
);
3349 ret
= tree_mod_log_insert_root(root
->node
, c
, 0);
3351 rcu_assign_pointer(root
->node
, c
);
3353 /* the super has an extra ref to root->node */
3354 free_extent_buffer(old
);
3356 add_root_to_dirty_list(root
);
3357 atomic_inc(&c
->refs
);
3358 path
->nodes
[level
] = c
;
3359 path
->locks
[level
] = BTRFS_WRITE_LOCK_BLOCKING
;
3360 path
->slots
[level
] = 0;
3365 * worker function to insert a single pointer in a node.
3366 * the node should have enough room for the pointer already
3368 * slot and level indicate where you want the key to go, and
3369 * blocknr is the block the key points to.
3371 static void insert_ptr(struct btrfs_trans_handle
*trans
,
3372 struct btrfs_path
*path
,
3373 struct btrfs_disk_key
*key
, u64 bytenr
,
3374 int slot
, int level
)
3376 struct extent_buffer
*lower
;
3380 BUG_ON(!path
->nodes
[level
]);
3381 btrfs_assert_tree_locked(path
->nodes
[level
]);
3382 lower
= path
->nodes
[level
];
3383 nritems
= btrfs_header_nritems(lower
);
3384 BUG_ON(slot
> nritems
);
3385 BUG_ON(nritems
== BTRFS_NODEPTRS_PER_BLOCK(trans
->fs_info
));
3386 if (slot
!= nritems
) {
3388 ret
= tree_mod_log_insert_move(lower
, slot
+ 1, slot
,
3392 memmove_extent_buffer(lower
,
3393 btrfs_node_key_ptr_offset(slot
+ 1),
3394 btrfs_node_key_ptr_offset(slot
),
3395 (nritems
- slot
) * sizeof(struct btrfs_key_ptr
));
3398 ret
= tree_mod_log_insert_key(lower
, slot
, MOD_LOG_KEY_ADD
,
3402 btrfs_set_node_key(lower
, key
, slot
);
3403 btrfs_set_node_blockptr(lower
, slot
, bytenr
);
3404 WARN_ON(trans
->transid
== 0);
3405 btrfs_set_node_ptr_generation(lower
, slot
, trans
->transid
);
3406 btrfs_set_header_nritems(lower
, nritems
+ 1);
3407 btrfs_mark_buffer_dirty(lower
);
3411 * split the node at the specified level in path in two.
3412 * The path is corrected to point to the appropriate node after the split
3414 * Before splitting this tries to make some room in the node by pushing
3415 * left and right, if either one works, it returns right away.
3417 * returns 0 on success and < 0 on failure
3419 static noinline
int split_node(struct btrfs_trans_handle
*trans
,
3420 struct btrfs_root
*root
,
3421 struct btrfs_path
*path
, int level
)
3423 struct btrfs_fs_info
*fs_info
= root
->fs_info
;
3424 struct extent_buffer
*c
;
3425 struct extent_buffer
*split
;
3426 struct btrfs_disk_key disk_key
;
3431 c
= path
->nodes
[level
];
3432 WARN_ON(btrfs_header_generation(c
) != trans
->transid
);
3433 if (c
== root
->node
) {
3435 * trying to split the root, lets make a new one
3437 * tree mod log: We don't log_removal old root in
3438 * insert_new_root, because that root buffer will be kept as a
3439 * normal node. We are going to log removal of half of the
3440 * elements below with tree_mod_log_eb_copy. We're holding a
3441 * tree lock on the buffer, which is why we cannot race with
3442 * other tree_mod_log users.
3444 ret
= insert_new_root(trans
, root
, path
, level
+ 1);
3448 ret
= push_nodes_for_insert(trans
, root
, path
, level
);
3449 c
= path
->nodes
[level
];
3450 if (!ret
&& btrfs_header_nritems(c
) <
3451 BTRFS_NODEPTRS_PER_BLOCK(fs_info
) - 3)
3457 c_nritems
= btrfs_header_nritems(c
);
3458 mid
= (c_nritems
+ 1) / 2;
3459 btrfs_node_key(c
, &disk_key
, mid
);
3461 split
= alloc_tree_block_no_bg_flush(trans
, root
, 0, &disk_key
, level
,
3464 return PTR_ERR(split
);
3466 root_add_used(root
, fs_info
->nodesize
);
3467 ASSERT(btrfs_header_level(c
) == level
);
3469 ret
= tree_mod_log_eb_copy(split
, c
, 0, mid
, c_nritems
- mid
);
3471 btrfs_abort_transaction(trans
, ret
);
3474 copy_extent_buffer(split
, c
,
3475 btrfs_node_key_ptr_offset(0),
3476 btrfs_node_key_ptr_offset(mid
),
3477 (c_nritems
- mid
) * sizeof(struct btrfs_key_ptr
));
3478 btrfs_set_header_nritems(split
, c_nritems
- mid
);
3479 btrfs_set_header_nritems(c
, mid
);
3482 btrfs_mark_buffer_dirty(c
);
3483 btrfs_mark_buffer_dirty(split
);
3485 insert_ptr(trans
, path
, &disk_key
, split
->start
,
3486 path
->slots
[level
+ 1] + 1, level
+ 1);
3488 if (path
->slots
[level
] >= mid
) {
3489 path
->slots
[level
] -= mid
;
3490 btrfs_tree_unlock(c
);
3491 free_extent_buffer(c
);
3492 path
->nodes
[level
] = split
;
3493 path
->slots
[level
+ 1] += 1;
3495 btrfs_tree_unlock(split
);
3496 free_extent_buffer(split
);
3502 * how many bytes are required to store the items in a leaf. start
3503 * and nr indicate which items in the leaf to check. This totals up the
3504 * space used both by the item structs and the item data
3506 static int leaf_space_used(struct extent_buffer
*l
, int start
, int nr
)
3508 struct btrfs_item
*start_item
;
3509 struct btrfs_item
*end_item
;
3510 struct btrfs_map_token token
;
3512 int nritems
= btrfs_header_nritems(l
);
3513 int end
= min(nritems
, start
+ nr
) - 1;
3517 btrfs_init_map_token(&token
, l
);
3518 start_item
= btrfs_item_nr(start
);
3519 end_item
= btrfs_item_nr(end
);
3520 data_len
= btrfs_token_item_offset(l
, start_item
, &token
) +
3521 btrfs_token_item_size(l
, start_item
, &token
);
3522 data_len
= data_len
- btrfs_token_item_offset(l
, end_item
, &token
);
3523 data_len
+= sizeof(struct btrfs_item
) * nr
;
3524 WARN_ON(data_len
< 0);
3529 * The space between the end of the leaf items and
3530 * the start of the leaf data. IOW, how much room
3531 * the leaf has left for both items and data
3533 noinline
int btrfs_leaf_free_space(struct extent_buffer
*leaf
)
3535 struct btrfs_fs_info
*fs_info
= leaf
->fs_info
;
3536 int nritems
= btrfs_header_nritems(leaf
);
3539 ret
= BTRFS_LEAF_DATA_SIZE(fs_info
) - leaf_space_used(leaf
, 0, nritems
);
3542 "leaf free space ret %d, leaf data size %lu, used %d nritems %d",
3544 (unsigned long) BTRFS_LEAF_DATA_SIZE(fs_info
),
3545 leaf_space_used(leaf
, 0, nritems
), nritems
);
3551 * min slot controls the lowest index we're willing to push to the
3552 * right. We'll push up to and including min_slot, but no lower
3554 static noinline
int __push_leaf_right(struct btrfs_path
*path
,
3555 int data_size
, int empty
,
3556 struct extent_buffer
*right
,
3557 int free_space
, u32 left_nritems
,
3560 struct btrfs_fs_info
*fs_info
= right
->fs_info
;
3561 struct extent_buffer
*left
= path
->nodes
[0];
3562 struct extent_buffer
*upper
= path
->nodes
[1];
3563 struct btrfs_map_token token
;
3564 struct btrfs_disk_key disk_key
;
3569 struct btrfs_item
*item
;
3578 nr
= max_t(u32
, 1, min_slot
);
3580 if (path
->slots
[0] >= left_nritems
)
3581 push_space
+= data_size
;
3583 slot
= path
->slots
[1];
3584 i
= left_nritems
- 1;
3586 item
= btrfs_item_nr(i
);
3588 if (!empty
&& push_items
> 0) {
3589 if (path
->slots
[0] > i
)
3591 if (path
->slots
[0] == i
) {
3592 int space
= btrfs_leaf_free_space(left
);
3594 if (space
+ push_space
* 2 > free_space
)
3599 if (path
->slots
[0] == i
)
3600 push_space
+= data_size
;
3602 this_item_size
= btrfs_item_size(left
, item
);
3603 if (this_item_size
+ sizeof(*item
) + push_space
> free_space
)
3607 push_space
+= this_item_size
+ sizeof(*item
);
3613 if (push_items
== 0)
3616 WARN_ON(!empty
&& push_items
== left_nritems
);
3618 /* push left to right */
3619 right_nritems
= btrfs_header_nritems(right
);
3621 push_space
= btrfs_item_end_nr(left
, left_nritems
- push_items
);
3622 push_space
-= leaf_data_end(left
);
3624 /* make room in the right data area */
3625 data_end
= leaf_data_end(right
);
3626 memmove_extent_buffer(right
,
3627 BTRFS_LEAF_DATA_OFFSET
+ data_end
- push_space
,
3628 BTRFS_LEAF_DATA_OFFSET
+ data_end
,
3629 BTRFS_LEAF_DATA_SIZE(fs_info
) - data_end
);
3631 /* copy from the left data area */
3632 copy_extent_buffer(right
, left
, BTRFS_LEAF_DATA_OFFSET
+
3633 BTRFS_LEAF_DATA_SIZE(fs_info
) - push_space
,
3634 BTRFS_LEAF_DATA_OFFSET
+ leaf_data_end(left
),
3637 memmove_extent_buffer(right
, btrfs_item_nr_offset(push_items
),
3638 btrfs_item_nr_offset(0),
3639 right_nritems
* sizeof(struct btrfs_item
));
3641 /* copy the items from left to right */
3642 copy_extent_buffer(right
, left
, btrfs_item_nr_offset(0),
3643 btrfs_item_nr_offset(left_nritems
- push_items
),
3644 push_items
* sizeof(struct btrfs_item
));
3646 /* update the item pointers */
3647 btrfs_init_map_token(&token
, right
);
3648 right_nritems
+= push_items
;
3649 btrfs_set_header_nritems(right
, right_nritems
);
3650 push_space
= BTRFS_LEAF_DATA_SIZE(fs_info
);
3651 for (i
= 0; i
< right_nritems
; i
++) {
3652 item
= btrfs_item_nr(i
);
3653 push_space
-= btrfs_token_item_size(right
, item
, &token
);
3654 btrfs_set_token_item_offset(right
, item
, push_space
, &token
);
3657 left_nritems
-= push_items
;
3658 btrfs_set_header_nritems(left
, left_nritems
);
3661 btrfs_mark_buffer_dirty(left
);
3663 btrfs_clean_tree_block(left
);
3665 btrfs_mark_buffer_dirty(right
);
3667 btrfs_item_key(right
, &disk_key
, 0);
3668 btrfs_set_node_key(upper
, &disk_key
, slot
+ 1);
3669 btrfs_mark_buffer_dirty(upper
);
3671 /* then fixup the leaf pointer in the path */
3672 if (path
->slots
[0] >= left_nritems
) {
3673 path
->slots
[0] -= left_nritems
;
3674 if (btrfs_header_nritems(path
->nodes
[0]) == 0)
3675 btrfs_clean_tree_block(path
->nodes
[0]);
3676 btrfs_tree_unlock(path
->nodes
[0]);
3677 free_extent_buffer(path
->nodes
[0]);
3678 path
->nodes
[0] = right
;
3679 path
->slots
[1] += 1;
3681 btrfs_tree_unlock(right
);
3682 free_extent_buffer(right
);
3687 btrfs_tree_unlock(right
);
3688 free_extent_buffer(right
);
3693 * push some data in the path leaf to the right, trying to free up at
3694 * least data_size bytes. returns zero if the push worked, nonzero otherwise
3696 * returns 1 if the push failed because the other node didn't have enough
3697 * room, 0 if everything worked out and < 0 if there were major errors.
3699 * this will push starting from min_slot to the end of the leaf. It won't
3700 * push any slot lower than min_slot
3702 static int push_leaf_right(struct btrfs_trans_handle
*trans
, struct btrfs_root
3703 *root
, struct btrfs_path
*path
,
3704 int min_data_size
, int data_size
,
3705 int empty
, u32 min_slot
)
3707 struct extent_buffer
*left
= path
->nodes
[0];
3708 struct extent_buffer
*right
;
3709 struct extent_buffer
*upper
;
3715 if (!path
->nodes
[1])
3718 slot
= path
->slots
[1];
3719 upper
= path
->nodes
[1];
3720 if (slot
>= btrfs_header_nritems(upper
) - 1)
3723 btrfs_assert_tree_locked(path
->nodes
[1]);
3725 right
= btrfs_read_node_slot(upper
, slot
+ 1);
3727 * slot + 1 is not valid or we fail to read the right node,
3728 * no big deal, just return.
3733 btrfs_tree_lock(right
);
3734 btrfs_set_lock_blocking_write(right
);
3736 free_space
= btrfs_leaf_free_space(right
);
3737 if (free_space
< data_size
)
3740 /* cow and double check */
3741 ret
= btrfs_cow_block(trans
, root
, right
, upper
,
3746 free_space
= btrfs_leaf_free_space(right
);
3747 if (free_space
< data_size
)
3750 left_nritems
= btrfs_header_nritems(left
);
3751 if (left_nritems
== 0)
3754 if (path
->slots
[0] == left_nritems
&& !empty
) {
3755 /* Key greater than all keys in the leaf, right neighbor has
3756 * enough room for it and we're not emptying our leaf to delete
3757 * it, therefore use right neighbor to insert the new item and
3758 * no need to touch/dirty our left leaf. */
3759 btrfs_tree_unlock(left
);
3760 free_extent_buffer(left
);
3761 path
->nodes
[0] = right
;
3767 return __push_leaf_right(path
, min_data_size
, empty
,
3768 right
, free_space
, left_nritems
, min_slot
);
3770 btrfs_tree_unlock(right
);
3771 free_extent_buffer(right
);
3776 * push some data in the path leaf to the left, trying to free up at
3777 * least data_size bytes. returns zero if the push worked, nonzero otherwise
3779 * max_slot can put a limit on how far into the leaf we'll push items. The
3780 * item at 'max_slot' won't be touched. Use (u32)-1 to make us do all the
3783 static noinline
int __push_leaf_left(struct btrfs_path
*path
, int data_size
,
3784 int empty
, struct extent_buffer
*left
,
3785 int free_space
, u32 right_nritems
,
3788 struct btrfs_fs_info
*fs_info
= left
->fs_info
;
3789 struct btrfs_disk_key disk_key
;
3790 struct extent_buffer
*right
= path
->nodes
[0];
3794 struct btrfs_item
*item
;
3795 u32 old_left_nritems
;
3799 u32 old_left_item_size
;
3800 struct btrfs_map_token token
;
3803 nr
= min(right_nritems
, max_slot
);
3805 nr
= min(right_nritems
- 1, max_slot
);
3807 for (i
= 0; i
< nr
; i
++) {
3808 item
= btrfs_item_nr(i
);
3810 if (!empty
&& push_items
> 0) {
3811 if (path
->slots
[0] < i
)
3813 if (path
->slots
[0] == i
) {
3814 int space
= btrfs_leaf_free_space(right
);
3816 if (space
+ push_space
* 2 > free_space
)
3821 if (path
->slots
[0] == i
)
3822 push_space
+= data_size
;
3824 this_item_size
= btrfs_item_size(right
, item
);
3825 if (this_item_size
+ sizeof(*item
) + push_space
> free_space
)
3829 push_space
+= this_item_size
+ sizeof(*item
);
3832 if (push_items
== 0) {
3836 WARN_ON(!empty
&& push_items
== btrfs_header_nritems(right
));
3838 /* push data from right to left */
3839 copy_extent_buffer(left
, right
,
3840 btrfs_item_nr_offset(btrfs_header_nritems(left
)),
3841 btrfs_item_nr_offset(0),
3842 push_items
* sizeof(struct btrfs_item
));
3844 push_space
= BTRFS_LEAF_DATA_SIZE(fs_info
) -
3845 btrfs_item_offset_nr(right
, push_items
- 1);
3847 copy_extent_buffer(left
, right
, BTRFS_LEAF_DATA_OFFSET
+
3848 leaf_data_end(left
) - push_space
,
3849 BTRFS_LEAF_DATA_OFFSET
+
3850 btrfs_item_offset_nr(right
, push_items
- 1),
3852 old_left_nritems
= btrfs_header_nritems(left
);
3853 BUG_ON(old_left_nritems
<= 0);
3855 btrfs_init_map_token(&token
, left
);
3856 old_left_item_size
= btrfs_item_offset_nr(left
, old_left_nritems
- 1);
3857 for (i
= old_left_nritems
; i
< old_left_nritems
+ push_items
; i
++) {
3860 item
= btrfs_item_nr(i
);
3862 ioff
= btrfs_token_item_offset(left
, item
, &token
);
3863 btrfs_set_token_item_offset(left
, item
,
3864 ioff
- (BTRFS_LEAF_DATA_SIZE(fs_info
) - old_left_item_size
),
3867 btrfs_set_header_nritems(left
, old_left_nritems
+ push_items
);
3869 /* fixup right node */
3870 if (push_items
> right_nritems
)
3871 WARN(1, KERN_CRIT
"push items %d nr %u\n", push_items
,
3874 if (push_items
< right_nritems
) {
3875 push_space
= btrfs_item_offset_nr(right
, push_items
- 1) -
3876 leaf_data_end(right
);
3877 memmove_extent_buffer(right
, BTRFS_LEAF_DATA_OFFSET
+
3878 BTRFS_LEAF_DATA_SIZE(fs_info
) - push_space
,
3879 BTRFS_LEAF_DATA_OFFSET
+
3880 leaf_data_end(right
), push_space
);
3882 memmove_extent_buffer(right
, btrfs_item_nr_offset(0),
3883 btrfs_item_nr_offset(push_items
),
3884 (btrfs_header_nritems(right
) - push_items
) *
3885 sizeof(struct btrfs_item
));
3888 btrfs_init_map_token(&token
, right
);
3889 right_nritems
-= push_items
;
3890 btrfs_set_header_nritems(right
, right_nritems
);
3891 push_space
= BTRFS_LEAF_DATA_SIZE(fs_info
);
3892 for (i
= 0; i
< right_nritems
; i
++) {
3893 item
= btrfs_item_nr(i
);
3895 push_space
= push_space
- btrfs_token_item_size(right
,
3897 btrfs_set_token_item_offset(right
, item
, push_space
, &token
);
3900 btrfs_mark_buffer_dirty(left
);
3902 btrfs_mark_buffer_dirty(right
);
3904 btrfs_clean_tree_block(right
);
3906 btrfs_item_key(right
, &disk_key
, 0);
3907 fixup_low_keys(path
, &disk_key
, 1);
3909 /* then fixup the leaf pointer in the path */
3910 if (path
->slots
[0] < push_items
) {
3911 path
->slots
[0] += old_left_nritems
;
3912 btrfs_tree_unlock(path
->nodes
[0]);
3913 free_extent_buffer(path
->nodes
[0]);
3914 path
->nodes
[0] = left
;
3915 path
->slots
[1] -= 1;
3917 btrfs_tree_unlock(left
);
3918 free_extent_buffer(left
);
3919 path
->slots
[0] -= push_items
;
3921 BUG_ON(path
->slots
[0] < 0);
3924 btrfs_tree_unlock(left
);
3925 free_extent_buffer(left
);
3930 * push some data in the path leaf to the left, trying to free up at
3931 * least data_size bytes. returns zero if the push worked, nonzero otherwise
3933 * max_slot can put a limit on how far into the leaf we'll push items. The
3934 * item at 'max_slot' won't be touched. Use (u32)-1 to make us push all the
3937 static int push_leaf_left(struct btrfs_trans_handle
*trans
, struct btrfs_root
3938 *root
, struct btrfs_path
*path
, int min_data_size
,
3939 int data_size
, int empty
, u32 max_slot
)
3941 struct extent_buffer
*right
= path
->nodes
[0];
3942 struct extent_buffer
*left
;
3948 slot
= path
->slots
[1];
3951 if (!path
->nodes
[1])
3954 right_nritems
= btrfs_header_nritems(right
);
3955 if (right_nritems
== 0)
3958 btrfs_assert_tree_locked(path
->nodes
[1]);
3960 left
= btrfs_read_node_slot(path
->nodes
[1], slot
- 1);
3962 * slot - 1 is not valid or we fail to read the left node,
3963 * no big deal, just return.
3968 btrfs_tree_lock(left
);
3969 btrfs_set_lock_blocking_write(left
);
3971 free_space
= btrfs_leaf_free_space(left
);
3972 if (free_space
< data_size
) {
3977 /* cow and double check */
3978 ret
= btrfs_cow_block(trans
, root
, left
,
3979 path
->nodes
[1], slot
- 1, &left
);
3981 /* we hit -ENOSPC, but it isn't fatal here */
3987 free_space
= btrfs_leaf_free_space(left
);
3988 if (free_space
< data_size
) {
3993 return __push_leaf_left(path
, min_data_size
,
3994 empty
, left
, free_space
, right_nritems
,
3997 btrfs_tree_unlock(left
);
3998 free_extent_buffer(left
);
4003 * split the path's leaf in two, making sure there is at least data_size
4004 * available for the resulting leaf level of the path.
4006 static noinline
void copy_for_split(struct btrfs_trans_handle
*trans
,
4007 struct btrfs_path
*path
,
4008 struct extent_buffer
*l
,
4009 struct extent_buffer
*right
,
4010 int slot
, int mid
, int nritems
)
4012 struct btrfs_fs_info
*fs_info
= trans
->fs_info
;
4016 struct btrfs_disk_key disk_key
;
4017 struct btrfs_map_token token
;
4019 nritems
= nritems
- mid
;
4020 btrfs_set_header_nritems(right
, nritems
);
4021 data_copy_size
= btrfs_item_end_nr(l
, mid
) - leaf_data_end(l
);
4023 copy_extent_buffer(right
, l
, btrfs_item_nr_offset(0),
4024 btrfs_item_nr_offset(mid
),
4025 nritems
* sizeof(struct btrfs_item
));
4027 copy_extent_buffer(right
, l
,
4028 BTRFS_LEAF_DATA_OFFSET
+ BTRFS_LEAF_DATA_SIZE(fs_info
) -
4029 data_copy_size
, BTRFS_LEAF_DATA_OFFSET
+
4030 leaf_data_end(l
), data_copy_size
);
4032 rt_data_off
= BTRFS_LEAF_DATA_SIZE(fs_info
) - btrfs_item_end_nr(l
, mid
);
4034 btrfs_init_map_token(&token
, right
);
4035 for (i
= 0; i
< nritems
; i
++) {
4036 struct btrfs_item
*item
= btrfs_item_nr(i
);
4039 ioff
= btrfs_token_item_offset(right
, item
, &token
);
4040 btrfs_set_token_item_offset(right
, item
,
4041 ioff
+ rt_data_off
, &token
);
4044 btrfs_set_header_nritems(l
, mid
);
4045 btrfs_item_key(right
, &disk_key
, 0);
4046 insert_ptr(trans
, path
, &disk_key
, right
->start
, path
->slots
[1] + 1, 1);
4048 btrfs_mark_buffer_dirty(right
);
4049 btrfs_mark_buffer_dirty(l
);
4050 BUG_ON(path
->slots
[0] != slot
);
4053 btrfs_tree_unlock(path
->nodes
[0]);
4054 free_extent_buffer(path
->nodes
[0]);
4055 path
->nodes
[0] = right
;
4056 path
->slots
[0] -= mid
;
4057 path
->slots
[1] += 1;
4059 btrfs_tree_unlock(right
);
4060 free_extent_buffer(right
);
4063 BUG_ON(path
->slots
[0] < 0);
4067 * double splits happen when we need to insert a big item in the middle
4068 * of a leaf. A double split can leave us with 3 mostly empty leaves:
4069 * leaf: [ slots 0 - N] [ our target ] [ N + 1 - total in leaf ]
4072 * We avoid this by trying to push the items on either side of our target
4073 * into the adjacent leaves. If all goes well we can avoid the double split
4076 static noinline
int push_for_double_split(struct btrfs_trans_handle
*trans
,
4077 struct btrfs_root
*root
,
4078 struct btrfs_path
*path
,
4085 int space_needed
= data_size
;
4087 slot
= path
->slots
[0];
4088 if (slot
< btrfs_header_nritems(path
->nodes
[0]))
4089 space_needed
-= btrfs_leaf_free_space(path
->nodes
[0]);
4092 * try to push all the items after our slot into the
4095 ret
= push_leaf_right(trans
, root
, path
, 1, space_needed
, 0, slot
);
4102 nritems
= btrfs_header_nritems(path
->nodes
[0]);
4104 * our goal is to get our slot at the start or end of a leaf. If
4105 * we've done so we're done
4107 if (path
->slots
[0] == 0 || path
->slots
[0] == nritems
)
4110 if (btrfs_leaf_free_space(path
->nodes
[0]) >= data_size
)
4113 /* try to push all the items before our slot into the next leaf */
4114 slot
= path
->slots
[0];
4115 space_needed
= data_size
;
4117 space_needed
-= btrfs_leaf_free_space(path
->nodes
[0]);
4118 ret
= push_leaf_left(trans
, root
, path
, 1, space_needed
, 0, slot
);
4131 * split the path's leaf in two, making sure there is at least data_size
4132 * available for the resulting leaf level of the path.
4134 * returns 0 if all went well and < 0 on failure.
4136 static noinline
int split_leaf(struct btrfs_trans_handle
*trans
,
4137 struct btrfs_root
*root
,
4138 const struct btrfs_key
*ins_key
,
4139 struct btrfs_path
*path
, int data_size
,
4142 struct btrfs_disk_key disk_key
;
4143 struct extent_buffer
*l
;
4147 struct extent_buffer
*right
;
4148 struct btrfs_fs_info
*fs_info
= root
->fs_info
;
4152 int num_doubles
= 0;
4153 int tried_avoid_double
= 0;
4156 slot
= path
->slots
[0];
4157 if (extend
&& data_size
+ btrfs_item_size_nr(l
, slot
) +
4158 sizeof(struct btrfs_item
) > BTRFS_LEAF_DATA_SIZE(fs_info
))
4161 /* first try to make some room by pushing left and right */
4162 if (data_size
&& path
->nodes
[1]) {
4163 int space_needed
= data_size
;
4165 if (slot
< btrfs_header_nritems(l
))
4166 space_needed
-= btrfs_leaf_free_space(l
);
4168 wret
= push_leaf_right(trans
, root
, path
, space_needed
,
4169 space_needed
, 0, 0);
4173 space_needed
= data_size
;
4175 space_needed
-= btrfs_leaf_free_space(l
);
4176 wret
= push_leaf_left(trans
, root
, path
, space_needed
,
4177 space_needed
, 0, (u32
)-1);
4183 /* did the pushes work? */
4184 if (btrfs_leaf_free_space(l
) >= data_size
)
4188 if (!path
->nodes
[1]) {
4189 ret
= insert_new_root(trans
, root
, path
, 1);
4196 slot
= path
->slots
[0];
4197 nritems
= btrfs_header_nritems(l
);
4198 mid
= (nritems
+ 1) / 2;
4202 leaf_space_used(l
, mid
, nritems
- mid
) + data_size
>
4203 BTRFS_LEAF_DATA_SIZE(fs_info
)) {
4204 if (slot
>= nritems
) {
4208 if (mid
!= nritems
&&
4209 leaf_space_used(l
, mid
, nritems
- mid
) +
4210 data_size
> BTRFS_LEAF_DATA_SIZE(fs_info
)) {
4211 if (data_size
&& !tried_avoid_double
)
4212 goto push_for_double
;
4218 if (leaf_space_used(l
, 0, mid
) + data_size
>
4219 BTRFS_LEAF_DATA_SIZE(fs_info
)) {
4220 if (!extend
&& data_size
&& slot
== 0) {
4222 } else if ((extend
|| !data_size
) && slot
== 0) {
4226 if (mid
!= nritems
&&
4227 leaf_space_used(l
, mid
, nritems
- mid
) +
4228 data_size
> BTRFS_LEAF_DATA_SIZE(fs_info
)) {
4229 if (data_size
&& !tried_avoid_double
)
4230 goto push_for_double
;
4238 btrfs_cpu_key_to_disk(&disk_key
, ins_key
);
4240 btrfs_item_key(l
, &disk_key
, mid
);
4242 right
= alloc_tree_block_no_bg_flush(trans
, root
, 0, &disk_key
, 0,
4245 return PTR_ERR(right
);
4247 root_add_used(root
, fs_info
->nodesize
);
4251 btrfs_set_header_nritems(right
, 0);
4252 insert_ptr(trans
, path
, &disk_key
,
4253 right
->start
, path
->slots
[1] + 1, 1);
4254 btrfs_tree_unlock(path
->nodes
[0]);
4255 free_extent_buffer(path
->nodes
[0]);
4256 path
->nodes
[0] = right
;
4258 path
->slots
[1] += 1;
4260 btrfs_set_header_nritems(right
, 0);
4261 insert_ptr(trans
, path
, &disk_key
,
4262 right
->start
, path
->slots
[1], 1);
4263 btrfs_tree_unlock(path
->nodes
[0]);
4264 free_extent_buffer(path
->nodes
[0]);
4265 path
->nodes
[0] = right
;
4267 if (path
->slots
[1] == 0)
4268 fixup_low_keys(path
, &disk_key
, 1);
4271 * We create a new leaf 'right' for the required ins_len and
4272 * we'll do btrfs_mark_buffer_dirty() on this leaf after copying
4273 * the content of ins_len to 'right'.
4278 copy_for_split(trans
, path
, l
, right
, slot
, mid
, nritems
);
4281 BUG_ON(num_doubles
!= 0);
4289 push_for_double_split(trans
, root
, path
, data_size
);
4290 tried_avoid_double
= 1;
4291 if (btrfs_leaf_free_space(path
->nodes
[0]) >= data_size
)
4296 static noinline
int setup_leaf_for_split(struct btrfs_trans_handle
*trans
,
4297 struct btrfs_root
*root
,
4298 struct btrfs_path
*path
, int ins_len
)
4300 struct btrfs_key key
;
4301 struct extent_buffer
*leaf
;
4302 struct btrfs_file_extent_item
*fi
;
4307 leaf
= path
->nodes
[0];
4308 btrfs_item_key_to_cpu(leaf
, &key
, path
->slots
[0]);
4310 BUG_ON(key
.type
!= BTRFS_EXTENT_DATA_KEY
&&
4311 key
.type
!= BTRFS_EXTENT_CSUM_KEY
);
4313 if (btrfs_leaf_free_space(leaf
) >= ins_len
)
4316 item_size
= btrfs_item_size_nr(leaf
, path
->slots
[0]);
4317 if (key
.type
== BTRFS_EXTENT_DATA_KEY
) {
4318 fi
= btrfs_item_ptr(leaf
, path
->slots
[0],
4319 struct btrfs_file_extent_item
);
4320 extent_len
= btrfs_file_extent_num_bytes(leaf
, fi
);
4322 btrfs_release_path(path
);
4324 path
->keep_locks
= 1;
4325 path
->search_for_split
= 1;
4326 ret
= btrfs_search_slot(trans
, root
, &key
, path
, 0, 1);
4327 path
->search_for_split
= 0;
4334 leaf
= path
->nodes
[0];
4335 /* if our item isn't there, return now */
4336 if (item_size
!= btrfs_item_size_nr(leaf
, path
->slots
[0]))
4339 /* the leaf has changed, it now has room. return now */
4340 if (btrfs_leaf_free_space(path
->nodes
[0]) >= ins_len
)
4343 if (key
.type
== BTRFS_EXTENT_DATA_KEY
) {
4344 fi
= btrfs_item_ptr(leaf
, path
->slots
[0],
4345 struct btrfs_file_extent_item
);
4346 if (extent_len
!= btrfs_file_extent_num_bytes(leaf
, fi
))
4350 btrfs_set_path_blocking(path
);
4351 ret
= split_leaf(trans
, root
, &key
, path
, ins_len
, 1);
4355 path
->keep_locks
= 0;
4356 btrfs_unlock_up_safe(path
, 1);
4359 path
->keep_locks
= 0;
4363 static noinline
int split_item(struct btrfs_path
*path
,
4364 const struct btrfs_key
*new_key
,
4365 unsigned long split_offset
)
4367 struct extent_buffer
*leaf
;
4368 struct btrfs_item
*item
;
4369 struct btrfs_item
*new_item
;
4375 struct btrfs_disk_key disk_key
;
4377 leaf
= path
->nodes
[0];
4378 BUG_ON(btrfs_leaf_free_space(leaf
) < sizeof(struct btrfs_item
));
4380 btrfs_set_path_blocking(path
);
4382 item
= btrfs_item_nr(path
->slots
[0]);
4383 orig_offset
= btrfs_item_offset(leaf
, item
);
4384 item_size
= btrfs_item_size(leaf
, item
);
4386 buf
= kmalloc(item_size
, GFP_NOFS
);
4390 read_extent_buffer(leaf
, buf
, btrfs_item_ptr_offset(leaf
,
4391 path
->slots
[0]), item_size
);
4393 slot
= path
->slots
[0] + 1;
4394 nritems
= btrfs_header_nritems(leaf
);
4395 if (slot
!= nritems
) {
4396 /* shift the items */
4397 memmove_extent_buffer(leaf
, btrfs_item_nr_offset(slot
+ 1),
4398 btrfs_item_nr_offset(slot
),
4399 (nritems
- slot
) * sizeof(struct btrfs_item
));
4402 btrfs_cpu_key_to_disk(&disk_key
, new_key
);
4403 btrfs_set_item_key(leaf
, &disk_key
, slot
);
4405 new_item
= btrfs_item_nr(slot
);
4407 btrfs_set_item_offset(leaf
, new_item
, orig_offset
);
4408 btrfs_set_item_size(leaf
, new_item
, item_size
- split_offset
);
4410 btrfs_set_item_offset(leaf
, item
,
4411 orig_offset
+ item_size
- split_offset
);
4412 btrfs_set_item_size(leaf
, item
, split_offset
);
4414 btrfs_set_header_nritems(leaf
, nritems
+ 1);
4416 /* write the data for the start of the original item */
4417 write_extent_buffer(leaf
, buf
,
4418 btrfs_item_ptr_offset(leaf
, path
->slots
[0]),
4421 /* write the data for the new item */
4422 write_extent_buffer(leaf
, buf
+ split_offset
,
4423 btrfs_item_ptr_offset(leaf
, slot
),
4424 item_size
- split_offset
);
4425 btrfs_mark_buffer_dirty(leaf
);
4427 BUG_ON(btrfs_leaf_free_space(leaf
) < 0);
4433 * This function splits a single item into two items,
4434 * giving 'new_key' to the new item and splitting the
4435 * old one at split_offset (from the start of the item).
4437 * The path may be released by this operation. After
4438 * the split, the path is pointing to the old item. The
4439 * new item is going to be in the same node as the old one.
4441 * Note, the item being split must be smaller enough to live alone on
4442 * a tree block with room for one extra struct btrfs_item
4444 * This allows us to split the item in place, keeping a lock on the
4445 * leaf the entire time.
4447 int btrfs_split_item(struct btrfs_trans_handle
*trans
,
4448 struct btrfs_root
*root
,
4449 struct btrfs_path
*path
,
4450 const struct btrfs_key
*new_key
,
4451 unsigned long split_offset
)
4454 ret
= setup_leaf_for_split(trans
, root
, path
,
4455 sizeof(struct btrfs_item
));
4459 ret
= split_item(path
, new_key
, split_offset
);
4464 * This function duplicate a item, giving 'new_key' to the new item.
4465 * It guarantees both items live in the same tree leaf and the new item
4466 * is contiguous with the original item.
4468 * This allows us to split file extent in place, keeping a lock on the
4469 * leaf the entire time.
4471 int btrfs_duplicate_item(struct btrfs_trans_handle
*trans
,
4472 struct btrfs_root
*root
,
4473 struct btrfs_path
*path
,
4474 const struct btrfs_key
*new_key
)
4476 struct extent_buffer
*leaf
;
4480 leaf
= path
->nodes
[0];
4481 item_size
= btrfs_item_size_nr(leaf
, path
->slots
[0]);
4482 ret
= setup_leaf_for_split(trans
, root
, path
,
4483 item_size
+ sizeof(struct btrfs_item
));
4488 setup_items_for_insert(root
, path
, new_key
, &item_size
,
4489 item_size
, item_size
+
4490 sizeof(struct btrfs_item
), 1);
4491 leaf
= path
->nodes
[0];
4492 memcpy_extent_buffer(leaf
,
4493 btrfs_item_ptr_offset(leaf
, path
->slots
[0]),
4494 btrfs_item_ptr_offset(leaf
, path
->slots
[0] - 1),
4500 * make the item pointed to by the path smaller. new_size indicates
4501 * how small to make it, and from_end tells us if we just chop bytes
4502 * off the end of the item or if we shift the item to chop bytes off
4505 void btrfs_truncate_item(struct btrfs_path
*path
, u32 new_size
, int from_end
)
4508 struct extent_buffer
*leaf
;
4509 struct btrfs_item
*item
;
4511 unsigned int data_end
;
4512 unsigned int old_data_start
;
4513 unsigned int old_size
;
4514 unsigned int size_diff
;
4516 struct btrfs_map_token token
;
4518 leaf
= path
->nodes
[0];
4519 slot
= path
->slots
[0];
4521 old_size
= btrfs_item_size_nr(leaf
, slot
);
4522 if (old_size
== new_size
)
4525 nritems
= btrfs_header_nritems(leaf
);
4526 data_end
= leaf_data_end(leaf
);
4528 old_data_start
= btrfs_item_offset_nr(leaf
, slot
);
4530 size_diff
= old_size
- new_size
;
4533 BUG_ON(slot
>= nritems
);
4536 * item0..itemN ... dataN.offset..dataN.size .. data0.size
4538 /* first correct the data pointers */
4539 btrfs_init_map_token(&token
, leaf
);
4540 for (i
= slot
; i
< nritems
; i
++) {
4542 item
= btrfs_item_nr(i
);
4544 ioff
= btrfs_token_item_offset(leaf
, item
, &token
);
4545 btrfs_set_token_item_offset(leaf
, item
,
4546 ioff
+ size_diff
, &token
);
4549 /* shift the data */
4551 memmove_extent_buffer(leaf
, BTRFS_LEAF_DATA_OFFSET
+
4552 data_end
+ size_diff
, BTRFS_LEAF_DATA_OFFSET
+
4553 data_end
, old_data_start
+ new_size
- data_end
);
4555 struct btrfs_disk_key disk_key
;
4558 btrfs_item_key(leaf
, &disk_key
, slot
);
4560 if (btrfs_disk_key_type(&disk_key
) == BTRFS_EXTENT_DATA_KEY
) {
4562 struct btrfs_file_extent_item
*fi
;
4564 fi
= btrfs_item_ptr(leaf
, slot
,
4565 struct btrfs_file_extent_item
);
4566 fi
= (struct btrfs_file_extent_item
*)(
4567 (unsigned long)fi
- size_diff
);
4569 if (btrfs_file_extent_type(leaf
, fi
) ==
4570 BTRFS_FILE_EXTENT_INLINE
) {
4571 ptr
= btrfs_item_ptr_offset(leaf
, slot
);
4572 memmove_extent_buffer(leaf
, ptr
,
4574 BTRFS_FILE_EXTENT_INLINE_DATA_START
);
4578 memmove_extent_buffer(leaf
, BTRFS_LEAF_DATA_OFFSET
+
4579 data_end
+ size_diff
, BTRFS_LEAF_DATA_OFFSET
+
4580 data_end
, old_data_start
- data_end
);
4582 offset
= btrfs_disk_key_offset(&disk_key
);
4583 btrfs_set_disk_key_offset(&disk_key
, offset
+ size_diff
);
4584 btrfs_set_item_key(leaf
, &disk_key
, slot
);
4586 fixup_low_keys(path
, &disk_key
, 1);
4589 item
= btrfs_item_nr(slot
);
4590 btrfs_set_item_size(leaf
, item
, new_size
);
4591 btrfs_mark_buffer_dirty(leaf
);
4593 if (btrfs_leaf_free_space(leaf
) < 0) {
4594 btrfs_print_leaf(leaf
);
4600 * make the item pointed to by the path bigger, data_size is the added size.
4602 void btrfs_extend_item(struct btrfs_path
*path
, u32 data_size
)
4605 struct extent_buffer
*leaf
;
4606 struct btrfs_item
*item
;
4608 unsigned int data_end
;
4609 unsigned int old_data
;
4610 unsigned int old_size
;
4612 struct btrfs_map_token token
;
4614 leaf
= path
->nodes
[0];
4616 nritems
= btrfs_header_nritems(leaf
);
4617 data_end
= leaf_data_end(leaf
);
4619 if (btrfs_leaf_free_space(leaf
) < data_size
) {
4620 btrfs_print_leaf(leaf
);
4623 slot
= path
->slots
[0];
4624 old_data
= btrfs_item_end_nr(leaf
, slot
);
4627 if (slot
>= nritems
) {
4628 btrfs_print_leaf(leaf
);
4629 btrfs_crit(leaf
->fs_info
, "slot %d too large, nritems %d",
4635 * item0..itemN ... dataN.offset..dataN.size .. data0.size
4637 /* first correct the data pointers */
4638 btrfs_init_map_token(&token
, leaf
);
4639 for (i
= slot
; i
< nritems
; i
++) {
4641 item
= btrfs_item_nr(i
);
4643 ioff
= btrfs_token_item_offset(leaf
, item
, &token
);
4644 btrfs_set_token_item_offset(leaf
, item
,
4645 ioff
- data_size
, &token
);
4648 /* shift the data */
4649 memmove_extent_buffer(leaf
, BTRFS_LEAF_DATA_OFFSET
+
4650 data_end
- data_size
, BTRFS_LEAF_DATA_OFFSET
+
4651 data_end
, old_data
- data_end
);
4653 data_end
= old_data
;
4654 old_size
= btrfs_item_size_nr(leaf
, slot
);
4655 item
= btrfs_item_nr(slot
);
4656 btrfs_set_item_size(leaf
, item
, old_size
+ data_size
);
4657 btrfs_mark_buffer_dirty(leaf
);
4659 if (btrfs_leaf_free_space(leaf
) < 0) {
4660 btrfs_print_leaf(leaf
);
4666 * this is a helper for btrfs_insert_empty_items, the main goal here is
4667 * to save stack depth by doing the bulk of the work in a function
4668 * that doesn't call btrfs_search_slot
4670 void setup_items_for_insert(struct btrfs_root
*root
, struct btrfs_path
*path
,
4671 const struct btrfs_key
*cpu_key
, u32
*data_size
,
4672 u32 total_data
, u32 total_size
, int nr
)
4674 struct btrfs_fs_info
*fs_info
= root
->fs_info
;
4675 struct btrfs_item
*item
;
4678 unsigned int data_end
;
4679 struct btrfs_disk_key disk_key
;
4680 struct extent_buffer
*leaf
;
4682 struct btrfs_map_token token
;
4684 if (path
->slots
[0] == 0) {
4685 btrfs_cpu_key_to_disk(&disk_key
, cpu_key
);
4686 fixup_low_keys(path
, &disk_key
, 1);
4688 btrfs_unlock_up_safe(path
, 1);
4690 leaf
= path
->nodes
[0];
4691 slot
= path
->slots
[0];
4693 nritems
= btrfs_header_nritems(leaf
);
4694 data_end
= leaf_data_end(leaf
);
4696 if (btrfs_leaf_free_space(leaf
) < total_size
) {
4697 btrfs_print_leaf(leaf
);
4698 btrfs_crit(fs_info
, "not enough freespace need %u have %d",
4699 total_size
, btrfs_leaf_free_space(leaf
));
4703 btrfs_init_map_token(&token
, leaf
);
4704 if (slot
!= nritems
) {
4705 unsigned int old_data
= btrfs_item_end_nr(leaf
, slot
);
4707 if (old_data
< data_end
) {
4708 btrfs_print_leaf(leaf
);
4709 btrfs_crit(fs_info
, "slot %d old_data %d data_end %d",
4710 slot
, old_data
, data_end
);
4714 * item0..itemN ... dataN.offset..dataN.size .. data0.size
4716 /* first correct the data pointers */
4717 for (i
= slot
; i
< nritems
; i
++) {
4720 item
= btrfs_item_nr(i
);
4721 ioff
= btrfs_token_item_offset(leaf
, item
, &token
);
4722 btrfs_set_token_item_offset(leaf
, item
,
4723 ioff
- total_data
, &token
);
4725 /* shift the items */
4726 memmove_extent_buffer(leaf
, btrfs_item_nr_offset(slot
+ nr
),
4727 btrfs_item_nr_offset(slot
),
4728 (nritems
- slot
) * sizeof(struct btrfs_item
));
4730 /* shift the data */
4731 memmove_extent_buffer(leaf
, BTRFS_LEAF_DATA_OFFSET
+
4732 data_end
- total_data
, BTRFS_LEAF_DATA_OFFSET
+
4733 data_end
, old_data
- data_end
);
4734 data_end
= old_data
;
4737 /* setup the item for the new data */
4738 for (i
= 0; i
< nr
; i
++) {
4739 btrfs_cpu_key_to_disk(&disk_key
, cpu_key
+ i
);
4740 btrfs_set_item_key(leaf
, &disk_key
, slot
+ i
);
4741 item
= btrfs_item_nr(slot
+ i
);
4742 btrfs_set_token_item_offset(leaf
, item
,
4743 data_end
- data_size
[i
], &token
);
4744 data_end
-= data_size
[i
];
4745 btrfs_set_token_item_size(leaf
, item
, data_size
[i
], &token
);
4748 btrfs_set_header_nritems(leaf
, nritems
+ nr
);
4749 btrfs_mark_buffer_dirty(leaf
);
4751 if (btrfs_leaf_free_space(leaf
) < 0) {
4752 btrfs_print_leaf(leaf
);
4758 * Given a key and some data, insert items into the tree.
4759 * This does all the path init required, making room in the tree if needed.
4761 int btrfs_insert_empty_items(struct btrfs_trans_handle
*trans
,
4762 struct btrfs_root
*root
,
4763 struct btrfs_path
*path
,
4764 const struct btrfs_key
*cpu_key
, u32
*data_size
,
4773 for (i
= 0; i
< nr
; i
++)
4774 total_data
+= data_size
[i
];
4776 total_size
= total_data
+ (nr
* sizeof(struct btrfs_item
));
4777 ret
= btrfs_search_slot(trans
, root
, cpu_key
, path
, total_size
, 1);
4783 slot
= path
->slots
[0];
4786 setup_items_for_insert(root
, path
, cpu_key
, data_size
,
4787 total_data
, total_size
, nr
);
4792 * Given a key and some data, insert an item into the tree.
4793 * This does all the path init required, making room in the tree if needed.
4795 int btrfs_insert_item(struct btrfs_trans_handle
*trans
, struct btrfs_root
*root
,
4796 const struct btrfs_key
*cpu_key
, void *data
,
4800 struct btrfs_path
*path
;
4801 struct extent_buffer
*leaf
;
4804 path
= btrfs_alloc_path();
4807 ret
= btrfs_insert_empty_item(trans
, root
, path
, cpu_key
, data_size
);
4809 leaf
= path
->nodes
[0];
4810 ptr
= btrfs_item_ptr_offset(leaf
, path
->slots
[0]);
4811 write_extent_buffer(leaf
, data
, ptr
, data_size
);
4812 btrfs_mark_buffer_dirty(leaf
);
4814 btrfs_free_path(path
);
4819 * delete the pointer from a given node.
4821 * the tree should have been previously balanced so the deletion does not
4824 static void del_ptr(struct btrfs_root
*root
, struct btrfs_path
*path
,
4825 int level
, int slot
)
4827 struct extent_buffer
*parent
= path
->nodes
[level
];
4831 nritems
= btrfs_header_nritems(parent
);
4832 if (slot
!= nritems
- 1) {
4834 ret
= tree_mod_log_insert_move(parent
, slot
, slot
+ 1,
4835 nritems
- slot
- 1);
4838 memmove_extent_buffer(parent
,
4839 btrfs_node_key_ptr_offset(slot
),
4840 btrfs_node_key_ptr_offset(slot
+ 1),
4841 sizeof(struct btrfs_key_ptr
) *
4842 (nritems
- slot
- 1));
4844 ret
= tree_mod_log_insert_key(parent
, slot
, MOD_LOG_KEY_REMOVE
,
4850 btrfs_set_header_nritems(parent
, nritems
);
4851 if (nritems
== 0 && parent
== root
->node
) {
4852 BUG_ON(btrfs_header_level(root
->node
) != 1);
4853 /* just turn the root into a leaf and break */
4854 btrfs_set_header_level(root
->node
, 0);
4855 } else if (slot
== 0) {
4856 struct btrfs_disk_key disk_key
;
4858 btrfs_node_key(parent
, &disk_key
, 0);
4859 fixup_low_keys(path
, &disk_key
, level
+ 1);
4861 btrfs_mark_buffer_dirty(parent
);
4865 * a helper function to delete the leaf pointed to by path->slots[1] and
4868 * This deletes the pointer in path->nodes[1] and frees the leaf
4869 * block extent. zero is returned if it all worked out, < 0 otherwise.
4871 * The path must have already been setup for deleting the leaf, including
4872 * all the proper balancing. path->nodes[1] must be locked.
4874 static noinline
void btrfs_del_leaf(struct btrfs_trans_handle
*trans
,
4875 struct btrfs_root
*root
,
4876 struct btrfs_path
*path
,
4877 struct extent_buffer
*leaf
)
4879 WARN_ON(btrfs_header_generation(leaf
) != trans
->transid
);
4880 del_ptr(root
, path
, 1, path
->slots
[1]);
4883 * btrfs_free_extent is expensive, we want to make sure we
4884 * aren't holding any locks when we call it
4886 btrfs_unlock_up_safe(path
, 0);
4888 root_sub_used(root
, leaf
->len
);
4890 atomic_inc(&leaf
->refs
);
4891 btrfs_free_tree_block(trans
, root
, leaf
, 0, 1);
4892 free_extent_buffer_stale(leaf
);
4895 * delete the item at the leaf level in path. If that empties
4896 * the leaf, remove it from the tree
4898 int btrfs_del_items(struct btrfs_trans_handle
*trans
, struct btrfs_root
*root
,
4899 struct btrfs_path
*path
, int slot
, int nr
)
4901 struct btrfs_fs_info
*fs_info
= root
->fs_info
;
4902 struct extent_buffer
*leaf
;
4903 struct btrfs_item
*item
;
4911 leaf
= path
->nodes
[0];
4912 last_off
= btrfs_item_offset_nr(leaf
, slot
+ nr
- 1);
4914 for (i
= 0; i
< nr
; i
++)
4915 dsize
+= btrfs_item_size_nr(leaf
, slot
+ i
);
4917 nritems
= btrfs_header_nritems(leaf
);
4919 if (slot
+ nr
!= nritems
) {
4920 int data_end
= leaf_data_end(leaf
);
4921 struct btrfs_map_token token
;
4923 memmove_extent_buffer(leaf
, BTRFS_LEAF_DATA_OFFSET
+
4925 BTRFS_LEAF_DATA_OFFSET
+ data_end
,
4926 last_off
- data_end
);
4928 btrfs_init_map_token(&token
, leaf
);
4929 for (i
= slot
+ nr
; i
< nritems
; i
++) {
4932 item
= btrfs_item_nr(i
);
4933 ioff
= btrfs_token_item_offset(leaf
, item
, &token
);
4934 btrfs_set_token_item_offset(leaf
, item
,
4935 ioff
+ dsize
, &token
);
4938 memmove_extent_buffer(leaf
, btrfs_item_nr_offset(slot
),
4939 btrfs_item_nr_offset(slot
+ nr
),
4940 sizeof(struct btrfs_item
) *
4941 (nritems
- slot
- nr
));
4943 btrfs_set_header_nritems(leaf
, nritems
- nr
);
4946 /* delete the leaf if we've emptied it */
4948 if (leaf
== root
->node
) {
4949 btrfs_set_header_level(leaf
, 0);
4951 btrfs_set_path_blocking(path
);
4952 btrfs_clean_tree_block(leaf
);
4953 btrfs_del_leaf(trans
, root
, path
, leaf
);
4956 int used
= leaf_space_used(leaf
, 0, nritems
);
4958 struct btrfs_disk_key disk_key
;
4960 btrfs_item_key(leaf
, &disk_key
, 0);
4961 fixup_low_keys(path
, &disk_key
, 1);
4964 /* delete the leaf if it is mostly empty */
4965 if (used
< BTRFS_LEAF_DATA_SIZE(fs_info
) / 3) {
4966 /* push_leaf_left fixes the path.
4967 * make sure the path still points to our leaf
4968 * for possible call to del_ptr below
4970 slot
= path
->slots
[1];
4971 atomic_inc(&leaf
->refs
);
4973 btrfs_set_path_blocking(path
);
4974 wret
= push_leaf_left(trans
, root
, path
, 1, 1,
4976 if (wret
< 0 && wret
!= -ENOSPC
)
4979 if (path
->nodes
[0] == leaf
&&
4980 btrfs_header_nritems(leaf
)) {
4981 wret
= push_leaf_right(trans
, root
, path
, 1,
4983 if (wret
< 0 && wret
!= -ENOSPC
)
4987 if (btrfs_header_nritems(leaf
) == 0) {
4988 path
->slots
[1] = slot
;
4989 btrfs_del_leaf(trans
, root
, path
, leaf
);
4990 free_extent_buffer(leaf
);
4993 /* if we're still in the path, make sure
4994 * we're dirty. Otherwise, one of the
4995 * push_leaf functions must have already
4996 * dirtied this buffer
4998 if (path
->nodes
[0] == leaf
)
4999 btrfs_mark_buffer_dirty(leaf
);
5000 free_extent_buffer(leaf
);
5003 btrfs_mark_buffer_dirty(leaf
);
5010 * search the tree again to find a leaf with lesser keys
5011 * returns 0 if it found something or 1 if there are no lesser leaves.
5012 * returns < 0 on io errors.
5014 * This may release the path, and so you may lose any locks held at the
5017 int btrfs_prev_leaf(struct btrfs_root
*root
, struct btrfs_path
*path
)
5019 struct btrfs_key key
;
5020 struct btrfs_disk_key found_key
;
5023 btrfs_item_key_to_cpu(path
->nodes
[0], &key
, 0);
5025 if (key
.offset
> 0) {
5027 } else if (key
.type
> 0) {
5029 key
.offset
= (u64
)-1;
5030 } else if (key
.objectid
> 0) {
5033 key
.offset
= (u64
)-1;
5038 btrfs_release_path(path
);
5039 ret
= btrfs_search_slot(NULL
, root
, &key
, path
, 0, 0);
5042 btrfs_item_key(path
->nodes
[0], &found_key
, 0);
5043 ret
= comp_keys(&found_key
, &key
);
5045 * We might have had an item with the previous key in the tree right
5046 * before we released our path. And after we released our path, that
5047 * item might have been pushed to the first slot (0) of the leaf we
5048 * were holding due to a tree balance. Alternatively, an item with the
5049 * previous key can exist as the only element of a leaf (big fat item).
5050 * Therefore account for these 2 cases, so that our callers (like
5051 * btrfs_previous_item) don't miss an existing item with a key matching
5052 * the previous key we computed above.
5060 * A helper function to walk down the tree starting at min_key, and looking
5061 * for nodes or leaves that are have a minimum transaction id.
5062 * This is used by the btree defrag code, and tree logging
5064 * This does not cow, but it does stuff the starting key it finds back
5065 * into min_key, so you can call btrfs_search_slot with cow=1 on the
5066 * key and get a writable path.
5068 * This honors path->lowest_level to prevent descent past a given level
5071 * min_trans indicates the oldest transaction that you are interested
5072 * in walking through. Any nodes or leaves older than min_trans are
5073 * skipped over (without reading them).
5075 * returns zero if something useful was found, < 0 on error and 1 if there
5076 * was nothing in the tree that matched the search criteria.
5078 int btrfs_search_forward(struct btrfs_root
*root
, struct btrfs_key
*min_key
,
5079 struct btrfs_path
*path
,
5082 struct extent_buffer
*cur
;
5083 struct btrfs_key found_key
;
5089 int keep_locks
= path
->keep_locks
;
5091 path
->keep_locks
= 1;
5093 cur
= btrfs_read_lock_root_node(root
);
5094 level
= btrfs_header_level(cur
);
5095 WARN_ON(path
->nodes
[level
]);
5096 path
->nodes
[level
] = cur
;
5097 path
->locks
[level
] = BTRFS_READ_LOCK
;
5099 if (btrfs_header_generation(cur
) < min_trans
) {
5104 nritems
= btrfs_header_nritems(cur
);
5105 level
= btrfs_header_level(cur
);
5106 sret
= btrfs_bin_search(cur
, min_key
, level
, &slot
);
5112 /* at the lowest level, we're done, setup the path and exit */
5113 if (level
== path
->lowest_level
) {
5114 if (slot
>= nritems
)
5117 path
->slots
[level
] = slot
;
5118 btrfs_item_key_to_cpu(cur
, &found_key
, slot
);
5121 if (sret
&& slot
> 0)
5124 * check this node pointer against the min_trans parameters.
5125 * If it is too old, old, skip to the next one.
5127 while (slot
< nritems
) {
5130 gen
= btrfs_node_ptr_generation(cur
, slot
);
5131 if (gen
< min_trans
) {
5139 * we didn't find a candidate key in this node, walk forward
5140 * and find another one
5142 if (slot
>= nritems
) {
5143 path
->slots
[level
] = slot
;
5144 btrfs_set_path_blocking(path
);
5145 sret
= btrfs_find_next_key(root
, path
, min_key
, level
,
5148 btrfs_release_path(path
);
5154 /* save our key for returning back */
5155 btrfs_node_key_to_cpu(cur
, &found_key
, slot
);
5156 path
->slots
[level
] = slot
;
5157 if (level
== path
->lowest_level
) {
5161 btrfs_set_path_blocking(path
);
5162 cur
= btrfs_read_node_slot(cur
, slot
);
5168 btrfs_tree_read_lock(cur
);
5170 path
->locks
[level
- 1] = BTRFS_READ_LOCK
;
5171 path
->nodes
[level
- 1] = cur
;
5172 unlock_up(path
, level
, 1, 0, NULL
);
5175 path
->keep_locks
= keep_locks
;
5177 btrfs_unlock_up_safe(path
, path
->lowest_level
+ 1);
5178 btrfs_set_path_blocking(path
);
5179 memcpy(min_key
, &found_key
, sizeof(found_key
));
5185 * this is similar to btrfs_next_leaf, but does not try to preserve
5186 * and fixup the path. It looks for and returns the next key in the
5187 * tree based on the current path and the min_trans parameters.
5189 * 0 is returned if another key is found, < 0 if there are any errors
5190 * and 1 is returned if there are no higher keys in the tree
5192 * path->keep_locks should be set to 1 on the search made before
5193 * calling this function.
5195 int btrfs_find_next_key(struct btrfs_root
*root
, struct btrfs_path
*path
,
5196 struct btrfs_key
*key
, int level
, u64 min_trans
)
5199 struct extent_buffer
*c
;
5201 WARN_ON(!path
->keep_locks
&& !path
->skip_locking
);
5202 while (level
< BTRFS_MAX_LEVEL
) {
5203 if (!path
->nodes
[level
])
5206 slot
= path
->slots
[level
] + 1;
5207 c
= path
->nodes
[level
];
5209 if (slot
>= btrfs_header_nritems(c
)) {
5212 struct btrfs_key cur_key
;
5213 if (level
+ 1 >= BTRFS_MAX_LEVEL
||
5214 !path
->nodes
[level
+ 1])
5217 if (path
->locks
[level
+ 1] || path
->skip_locking
) {
5222 slot
= btrfs_header_nritems(c
) - 1;
5224 btrfs_item_key_to_cpu(c
, &cur_key
, slot
);
5226 btrfs_node_key_to_cpu(c
, &cur_key
, slot
);
5228 orig_lowest
= path
->lowest_level
;
5229 btrfs_release_path(path
);
5230 path
->lowest_level
= level
;
5231 ret
= btrfs_search_slot(NULL
, root
, &cur_key
, path
,
5233 path
->lowest_level
= orig_lowest
;
5237 c
= path
->nodes
[level
];
5238 slot
= path
->slots
[level
];
5245 btrfs_item_key_to_cpu(c
, key
, slot
);
5247 u64 gen
= btrfs_node_ptr_generation(c
, slot
);
5249 if (gen
< min_trans
) {
5253 btrfs_node_key_to_cpu(c
, key
, slot
);
5261 * search the tree again to find a leaf with greater keys
5262 * returns 0 if it found something or 1 if there are no greater leaves.
5263 * returns < 0 on io errors.
5265 int btrfs_next_leaf(struct btrfs_root
*root
, struct btrfs_path
*path
)
5267 return btrfs_next_old_leaf(root
, path
, 0);
5270 int btrfs_next_old_leaf(struct btrfs_root
*root
, struct btrfs_path
*path
,
5275 struct extent_buffer
*c
;
5276 struct extent_buffer
*next
;
5277 struct btrfs_key key
;
5280 int old_spinning
= path
->leave_spinning
;
5281 int next_rw_lock
= 0;
5283 nritems
= btrfs_header_nritems(path
->nodes
[0]);
5287 btrfs_item_key_to_cpu(path
->nodes
[0], &key
, nritems
- 1);
5292 btrfs_release_path(path
);
5294 path
->keep_locks
= 1;
5295 path
->leave_spinning
= 1;
5298 ret
= btrfs_search_old_slot(root
, &key
, path
, time_seq
);
5300 ret
= btrfs_search_slot(NULL
, root
, &key
, path
, 0, 0);
5301 path
->keep_locks
= 0;
5306 nritems
= btrfs_header_nritems(path
->nodes
[0]);
5308 * by releasing the path above we dropped all our locks. A balance
5309 * could have added more items next to the key that used to be
5310 * at the very end of the block. So, check again here and
5311 * advance the path if there are now more items available.
5313 if (nritems
> 0 && path
->slots
[0] < nritems
- 1) {
5320 * So the above check misses one case:
5321 * - after releasing the path above, someone has removed the item that
5322 * used to be at the very end of the block, and balance between leafs
5323 * gets another one with bigger key.offset to replace it.
5325 * This one should be returned as well, or we can get leaf corruption
5326 * later(esp. in __btrfs_drop_extents()).
5328 * And a bit more explanation about this check,
5329 * with ret > 0, the key isn't found, the path points to the slot
5330 * where it should be inserted, so the path->slots[0] item must be the
5333 if (nritems
> 0 && ret
> 0 && path
->slots
[0] == nritems
- 1) {
5338 while (level
< BTRFS_MAX_LEVEL
) {
5339 if (!path
->nodes
[level
]) {
5344 slot
= path
->slots
[level
] + 1;
5345 c
= path
->nodes
[level
];
5346 if (slot
>= btrfs_header_nritems(c
)) {
5348 if (level
== BTRFS_MAX_LEVEL
) {
5356 btrfs_tree_unlock_rw(next
, next_rw_lock
);
5357 free_extent_buffer(next
);
5361 next_rw_lock
= path
->locks
[level
];
5362 ret
= read_block_for_search(root
, path
, &next
, level
,
5368 btrfs_release_path(path
);
5372 if (!path
->skip_locking
) {
5373 ret
= btrfs_try_tree_read_lock(next
);
5374 if (!ret
&& time_seq
) {
5376 * If we don't get the lock, we may be racing
5377 * with push_leaf_left, holding that lock while
5378 * itself waiting for the leaf we've currently
5379 * locked. To solve this situation, we give up
5380 * on our lock and cycle.
5382 free_extent_buffer(next
);
5383 btrfs_release_path(path
);
5388 btrfs_set_path_blocking(path
);
5389 btrfs_tree_read_lock(next
);
5391 next_rw_lock
= BTRFS_READ_LOCK
;
5395 path
->slots
[level
] = slot
;
5398 c
= path
->nodes
[level
];
5399 if (path
->locks
[level
])
5400 btrfs_tree_unlock_rw(c
, path
->locks
[level
]);
5402 free_extent_buffer(c
);
5403 path
->nodes
[level
] = next
;
5404 path
->slots
[level
] = 0;
5405 if (!path
->skip_locking
)
5406 path
->locks
[level
] = next_rw_lock
;
5410 ret
= read_block_for_search(root
, path
, &next
, level
,
5416 btrfs_release_path(path
);
5420 if (!path
->skip_locking
) {
5421 ret
= btrfs_try_tree_read_lock(next
);
5423 btrfs_set_path_blocking(path
);
5424 btrfs_tree_read_lock(next
);
5426 next_rw_lock
= BTRFS_READ_LOCK
;
5431 unlock_up(path
, 0, 1, 0, NULL
);
5432 path
->leave_spinning
= old_spinning
;
5434 btrfs_set_path_blocking(path
);
5440 * this uses btrfs_prev_leaf to walk backwards in the tree, and keeps
5441 * searching until it gets past min_objectid or finds an item of 'type'
5443 * returns 0 if something is found, 1 if nothing was found and < 0 on error
5445 int btrfs_previous_item(struct btrfs_root
*root
,
5446 struct btrfs_path
*path
, u64 min_objectid
,
5449 struct btrfs_key found_key
;
5450 struct extent_buffer
*leaf
;
5455 if (path
->slots
[0] == 0) {
5456 btrfs_set_path_blocking(path
);
5457 ret
= btrfs_prev_leaf(root
, path
);
5463 leaf
= path
->nodes
[0];
5464 nritems
= btrfs_header_nritems(leaf
);
5467 if (path
->slots
[0] == nritems
)
5470 btrfs_item_key_to_cpu(leaf
, &found_key
, path
->slots
[0]);
5471 if (found_key
.objectid
< min_objectid
)
5473 if (found_key
.type
== type
)
5475 if (found_key
.objectid
== min_objectid
&&
5476 found_key
.type
< type
)
5483 * search in extent tree to find a previous Metadata/Data extent item with
5486 * returns 0 if something is found, 1 if nothing was found and < 0 on error
5488 int btrfs_previous_extent_item(struct btrfs_root
*root
,
5489 struct btrfs_path
*path
, u64 min_objectid
)
5491 struct btrfs_key found_key
;
5492 struct extent_buffer
*leaf
;
5497 if (path
->slots
[0] == 0) {
5498 btrfs_set_path_blocking(path
);
5499 ret
= btrfs_prev_leaf(root
, path
);
5505 leaf
= path
->nodes
[0];
5506 nritems
= btrfs_header_nritems(leaf
);
5509 if (path
->slots
[0] == nritems
)
5512 btrfs_item_key_to_cpu(leaf
, &found_key
, path
->slots
[0]);
5513 if (found_key
.objectid
< min_objectid
)
5515 if (found_key
.type
== BTRFS_EXTENT_ITEM_KEY
||
5516 found_key
.type
== BTRFS_METADATA_ITEM_KEY
)
5518 if (found_key
.objectid
== min_objectid
&&
5519 found_key
.type
< BTRFS_EXTENT_ITEM_KEY
)