1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (C) 2007,2008 Oracle. All rights reserved.
6 #include <linux/sched.h>
7 #include <linux/slab.h>
8 #include <linux/rbtree.h>
12 #include "transaction.h"
13 #include "print-tree.h"
18 static int split_node(struct btrfs_trans_handle
*trans
, struct btrfs_root
19 *root
, struct btrfs_path
*path
, int level
);
20 static int split_leaf(struct btrfs_trans_handle
*trans
, struct btrfs_root
*root
,
21 const struct btrfs_key
*ins_key
, struct btrfs_path
*path
,
22 int data_size
, int extend
);
23 static int push_node_left(struct btrfs_trans_handle
*trans
,
24 struct extent_buffer
*dst
,
25 struct extent_buffer
*src
, int empty
);
26 static int balance_node_right(struct btrfs_trans_handle
*trans
,
27 struct extent_buffer
*dst_buf
,
28 struct extent_buffer
*src_buf
);
29 static void del_ptr(struct btrfs_root
*root
, struct btrfs_path
*path
,
32 static const struct btrfs_csums
{
35 const char driver
[12];
37 [BTRFS_CSUM_TYPE_CRC32
] = { .size
= 4, .name
= "crc32c" },
38 [BTRFS_CSUM_TYPE_XXHASH
] = { .size
= 8, .name
= "xxhash64" },
39 [BTRFS_CSUM_TYPE_SHA256
] = { .size
= 32, .name
= "sha256" },
40 [BTRFS_CSUM_TYPE_BLAKE2
] = { .size
= 32, .name
= "blake2b",
41 .driver
= "blake2b-256" },
44 int btrfs_super_csum_size(const struct btrfs_super_block
*s
)
46 u16 t
= btrfs_super_csum_type(s
);
48 * csum type is validated at mount time
50 return btrfs_csums
[t
].size
;
53 const char *btrfs_super_csum_name(u16 csum_type
)
55 /* csum type is validated at mount time */
56 return btrfs_csums
[csum_type
].name
;
60 * Return driver name if defined, otherwise the name that's also a valid driver
63 const char *btrfs_super_csum_driver(u16 csum_type
)
65 /* csum type is validated at mount time */
66 return btrfs_csums
[csum_type
].driver
[0] ?
67 btrfs_csums
[csum_type
].driver
:
68 btrfs_csums
[csum_type
].name
;
71 size_t __attribute_const__
btrfs_get_num_csums(void)
73 return ARRAY_SIZE(btrfs_csums
);
76 struct btrfs_path
*btrfs_alloc_path(void)
78 return kmem_cache_zalloc(btrfs_path_cachep
, GFP_NOFS
);
81 /* this also releases the path */
82 void btrfs_free_path(struct btrfs_path
*p
)
86 btrfs_release_path(p
);
87 kmem_cache_free(btrfs_path_cachep
, p
);
91 * path release drops references on the extent buffers in the path
92 * and it drops any locks held by this path
94 * It is safe to call this on paths that no locks or extent buffers held.
96 noinline
void btrfs_release_path(struct btrfs_path
*p
)
100 for (i
= 0; i
< BTRFS_MAX_LEVEL
; i
++) {
105 btrfs_tree_unlock_rw(p
->nodes
[i
], p
->locks
[i
]);
108 free_extent_buffer(p
->nodes
[i
]);
114 * safely gets a reference on the root node of a tree. A lock
115 * is not taken, so a concurrent writer may put a different node
116 * at the root of the tree. See btrfs_lock_root_node for the
119 * The extent buffer returned by this has a reference taken, so
120 * it won't disappear. It may stop being the root of the tree
121 * at any time because there are no locks held.
123 struct extent_buffer
*btrfs_root_node(struct btrfs_root
*root
)
125 struct extent_buffer
*eb
;
129 eb
= rcu_dereference(root
->node
);
132 * RCU really hurts here, we could free up the root node because
133 * it was COWed but we may not get the new root node yet so do
134 * the inc_not_zero dance and if it doesn't work then
135 * synchronize_rcu and try again.
137 if (atomic_inc_not_zero(&eb
->refs
)) {
148 * Cowonly root (not-shareable trees, everything not subvolume or reloc roots),
149 * just get put onto a simple dirty list. Transaction walks this list to make
150 * sure they get properly updated on disk.
152 static void add_root_to_dirty_list(struct btrfs_root
*root
)
154 struct btrfs_fs_info
*fs_info
= root
->fs_info
;
156 if (test_bit(BTRFS_ROOT_DIRTY
, &root
->state
) ||
157 !test_bit(BTRFS_ROOT_TRACK_DIRTY
, &root
->state
))
160 spin_lock(&fs_info
->trans_lock
);
161 if (!test_and_set_bit(BTRFS_ROOT_DIRTY
, &root
->state
)) {
162 /* Want the extent tree to be the last on the list */
163 if (root
->root_key
.objectid
== BTRFS_EXTENT_TREE_OBJECTID
)
164 list_move_tail(&root
->dirty_list
,
165 &fs_info
->dirty_cowonly_roots
);
167 list_move(&root
->dirty_list
,
168 &fs_info
->dirty_cowonly_roots
);
170 spin_unlock(&fs_info
->trans_lock
);
174 * used by snapshot creation to make a copy of a root for a tree with
175 * a given objectid. The buffer with the new root node is returned in
176 * cow_ret, and this func returns zero on success or a negative error code.
178 int btrfs_copy_root(struct btrfs_trans_handle
*trans
,
179 struct btrfs_root
*root
,
180 struct extent_buffer
*buf
,
181 struct extent_buffer
**cow_ret
, u64 new_root_objectid
)
183 struct btrfs_fs_info
*fs_info
= root
->fs_info
;
184 struct extent_buffer
*cow
;
187 struct btrfs_disk_key disk_key
;
189 WARN_ON(test_bit(BTRFS_ROOT_SHAREABLE
, &root
->state
) &&
190 trans
->transid
!= fs_info
->running_transaction
->transid
);
191 WARN_ON(test_bit(BTRFS_ROOT_SHAREABLE
, &root
->state
) &&
192 trans
->transid
!= root
->last_trans
);
194 level
= btrfs_header_level(buf
);
196 btrfs_item_key(buf
, &disk_key
, 0);
198 btrfs_node_key(buf
, &disk_key
, 0);
200 cow
= btrfs_alloc_tree_block(trans
, root
, 0, new_root_objectid
,
201 &disk_key
, level
, buf
->start
, 0,
202 BTRFS_NESTING_NEW_ROOT
);
206 copy_extent_buffer_full(cow
, buf
);
207 btrfs_set_header_bytenr(cow
, cow
->start
);
208 btrfs_set_header_generation(cow
, trans
->transid
);
209 btrfs_set_header_backref_rev(cow
, BTRFS_MIXED_BACKREF_REV
);
210 btrfs_clear_header_flag(cow
, BTRFS_HEADER_FLAG_WRITTEN
|
211 BTRFS_HEADER_FLAG_RELOC
);
212 if (new_root_objectid
== BTRFS_TREE_RELOC_OBJECTID
)
213 btrfs_set_header_flag(cow
, BTRFS_HEADER_FLAG_RELOC
);
215 btrfs_set_header_owner(cow
, new_root_objectid
);
217 write_extent_buffer_fsid(cow
, fs_info
->fs_devices
->metadata_uuid
);
219 WARN_ON(btrfs_header_generation(buf
) > trans
->transid
);
220 if (new_root_objectid
== BTRFS_TREE_RELOC_OBJECTID
)
221 ret
= btrfs_inc_ref(trans
, root
, cow
, 1);
223 ret
= btrfs_inc_ref(trans
, root
, cow
, 0);
228 btrfs_mark_buffer_dirty(cow
);
237 MOD_LOG_KEY_REMOVE_WHILE_FREEING
,
238 MOD_LOG_KEY_REMOVE_WHILE_MOVING
,
240 MOD_LOG_ROOT_REPLACE
,
243 struct tree_mod_root
{
248 struct tree_mod_elem
{
254 /* this is used for MOD_LOG_KEY_* and MOD_LOG_MOVE_KEYS operations */
257 /* this is used for MOD_LOG_KEY* and MOD_LOG_ROOT_REPLACE */
260 /* those are used for op == MOD_LOG_KEY_{REPLACE,REMOVE} */
261 struct btrfs_disk_key key
;
264 /* this is used for op == MOD_LOG_MOVE_KEYS */
270 /* this is used for op == MOD_LOG_ROOT_REPLACE */
271 struct tree_mod_root old_root
;
275 * Pull a new tree mod seq number for our operation.
277 static inline u64
btrfs_inc_tree_mod_seq(struct btrfs_fs_info
*fs_info
)
279 return atomic64_inc_return(&fs_info
->tree_mod_seq
);
283 * This adds a new blocker to the tree mod log's blocker list if the @elem
284 * passed does not already have a sequence number set. So when a caller expects
285 * to record tree modifications, it should ensure to set elem->seq to zero
286 * before calling btrfs_get_tree_mod_seq.
287 * Returns a fresh, unused tree log modification sequence number, even if no new
290 u64
btrfs_get_tree_mod_seq(struct btrfs_fs_info
*fs_info
,
291 struct seq_list
*elem
)
293 write_lock(&fs_info
->tree_mod_log_lock
);
295 elem
->seq
= btrfs_inc_tree_mod_seq(fs_info
);
296 list_add_tail(&elem
->list
, &fs_info
->tree_mod_seq_list
);
298 write_unlock(&fs_info
->tree_mod_log_lock
);
303 void btrfs_put_tree_mod_seq(struct btrfs_fs_info
*fs_info
,
304 struct seq_list
*elem
)
306 struct rb_root
*tm_root
;
307 struct rb_node
*node
;
308 struct rb_node
*next
;
309 struct tree_mod_elem
*tm
;
310 u64 min_seq
= (u64
)-1;
311 u64 seq_putting
= elem
->seq
;
316 write_lock(&fs_info
->tree_mod_log_lock
);
317 list_del(&elem
->list
);
320 if (!list_empty(&fs_info
->tree_mod_seq_list
)) {
321 struct seq_list
*first
;
323 first
= list_first_entry(&fs_info
->tree_mod_seq_list
,
324 struct seq_list
, list
);
325 if (seq_putting
> first
->seq
) {
327 * Blocker with lower sequence number exists, we
328 * cannot remove anything from the log.
330 write_unlock(&fs_info
->tree_mod_log_lock
);
333 min_seq
= first
->seq
;
337 * anything that's lower than the lowest existing (read: blocked)
338 * sequence number can be removed from the tree.
340 tm_root
= &fs_info
->tree_mod_log
;
341 for (node
= rb_first(tm_root
); node
; node
= next
) {
342 next
= rb_next(node
);
343 tm
= rb_entry(node
, struct tree_mod_elem
, node
);
344 if (tm
->seq
>= min_seq
)
346 rb_erase(node
, tm_root
);
349 write_unlock(&fs_info
->tree_mod_log_lock
);
353 * key order of the log:
354 * node/leaf start address -> sequence
356 * The 'start address' is the logical address of the *new* root node
357 * for root replace operations, or the logical address of the affected
358 * block for all other operations.
361 __tree_mod_log_insert(struct btrfs_fs_info
*fs_info
, struct tree_mod_elem
*tm
)
363 struct rb_root
*tm_root
;
364 struct rb_node
**new;
365 struct rb_node
*parent
= NULL
;
366 struct tree_mod_elem
*cur
;
368 lockdep_assert_held_write(&fs_info
->tree_mod_log_lock
);
370 tm
->seq
= btrfs_inc_tree_mod_seq(fs_info
);
372 tm_root
= &fs_info
->tree_mod_log
;
373 new = &tm_root
->rb_node
;
375 cur
= rb_entry(*new, struct tree_mod_elem
, node
);
377 if (cur
->logical
< tm
->logical
)
378 new = &((*new)->rb_left
);
379 else if (cur
->logical
> tm
->logical
)
380 new = &((*new)->rb_right
);
381 else if (cur
->seq
< tm
->seq
)
382 new = &((*new)->rb_left
);
383 else if (cur
->seq
> tm
->seq
)
384 new = &((*new)->rb_right
);
389 rb_link_node(&tm
->node
, parent
, new);
390 rb_insert_color(&tm
->node
, tm_root
);
395 * Determines if logging can be omitted. Returns 1 if it can. Otherwise, it
396 * returns zero with the tree_mod_log_lock acquired. The caller must hold
397 * this until all tree mod log insertions are recorded in the rb tree and then
398 * write unlock fs_info::tree_mod_log_lock.
400 static inline int tree_mod_dont_log(struct btrfs_fs_info
*fs_info
,
401 struct extent_buffer
*eb
) {
403 if (list_empty(&(fs_info
)->tree_mod_seq_list
))
405 if (eb
&& btrfs_header_level(eb
) == 0)
408 write_lock(&fs_info
->tree_mod_log_lock
);
409 if (list_empty(&(fs_info
)->tree_mod_seq_list
)) {
410 write_unlock(&fs_info
->tree_mod_log_lock
);
417 /* Similar to tree_mod_dont_log, but doesn't acquire any locks. */
418 static inline int tree_mod_need_log(const struct btrfs_fs_info
*fs_info
,
419 struct extent_buffer
*eb
)
422 if (list_empty(&(fs_info
)->tree_mod_seq_list
))
424 if (eb
&& btrfs_header_level(eb
) == 0)
430 static struct tree_mod_elem
*
431 alloc_tree_mod_elem(struct extent_buffer
*eb
, int slot
,
432 enum mod_log_op op
, gfp_t flags
)
434 struct tree_mod_elem
*tm
;
436 tm
= kzalloc(sizeof(*tm
), flags
);
440 tm
->logical
= eb
->start
;
441 if (op
!= MOD_LOG_KEY_ADD
) {
442 btrfs_node_key(eb
, &tm
->key
, slot
);
443 tm
->blockptr
= btrfs_node_blockptr(eb
, slot
);
447 tm
->generation
= btrfs_node_ptr_generation(eb
, slot
);
448 RB_CLEAR_NODE(&tm
->node
);
453 static noinline
int tree_mod_log_insert_key(struct extent_buffer
*eb
, int slot
,
454 enum mod_log_op op
, gfp_t flags
)
456 struct tree_mod_elem
*tm
;
459 if (!tree_mod_need_log(eb
->fs_info
, eb
))
462 tm
= alloc_tree_mod_elem(eb
, slot
, op
, flags
);
466 if (tree_mod_dont_log(eb
->fs_info
, eb
)) {
471 ret
= __tree_mod_log_insert(eb
->fs_info
, tm
);
472 write_unlock(&eb
->fs_info
->tree_mod_log_lock
);
479 static noinline
int tree_mod_log_insert_move(struct extent_buffer
*eb
,
480 int dst_slot
, int src_slot
, int nr_items
)
482 struct tree_mod_elem
*tm
= NULL
;
483 struct tree_mod_elem
**tm_list
= NULL
;
488 if (!tree_mod_need_log(eb
->fs_info
, eb
))
491 tm_list
= kcalloc(nr_items
, sizeof(struct tree_mod_elem
*), GFP_NOFS
);
495 tm
= kzalloc(sizeof(*tm
), GFP_NOFS
);
501 tm
->logical
= eb
->start
;
503 tm
->move
.dst_slot
= dst_slot
;
504 tm
->move
.nr_items
= nr_items
;
505 tm
->op
= MOD_LOG_MOVE_KEYS
;
507 for (i
= 0; i
+ dst_slot
< src_slot
&& i
< nr_items
; i
++) {
508 tm_list
[i
] = alloc_tree_mod_elem(eb
, i
+ dst_slot
,
509 MOD_LOG_KEY_REMOVE_WHILE_MOVING
, GFP_NOFS
);
516 if (tree_mod_dont_log(eb
->fs_info
, eb
))
521 * When we override something during the move, we log these removals.
522 * This can only happen when we move towards the beginning of the
523 * buffer, i.e. dst_slot < src_slot.
525 for (i
= 0; i
+ dst_slot
< src_slot
&& i
< nr_items
; i
++) {
526 ret
= __tree_mod_log_insert(eb
->fs_info
, tm_list
[i
]);
531 ret
= __tree_mod_log_insert(eb
->fs_info
, tm
);
534 write_unlock(&eb
->fs_info
->tree_mod_log_lock
);
539 for (i
= 0; i
< nr_items
; i
++) {
540 if (tm_list
[i
] && !RB_EMPTY_NODE(&tm_list
[i
]->node
))
541 rb_erase(&tm_list
[i
]->node
, &eb
->fs_info
->tree_mod_log
);
545 write_unlock(&eb
->fs_info
->tree_mod_log_lock
);
553 __tree_mod_log_free_eb(struct btrfs_fs_info
*fs_info
,
554 struct tree_mod_elem
**tm_list
,
560 for (i
= nritems
- 1; i
>= 0; i
--) {
561 ret
= __tree_mod_log_insert(fs_info
, tm_list
[i
]);
563 for (j
= nritems
- 1; j
> i
; j
--)
564 rb_erase(&tm_list
[j
]->node
,
565 &fs_info
->tree_mod_log
);
573 static noinline
int tree_mod_log_insert_root(struct extent_buffer
*old_root
,
574 struct extent_buffer
*new_root
, int log_removal
)
576 struct btrfs_fs_info
*fs_info
= old_root
->fs_info
;
577 struct tree_mod_elem
*tm
= NULL
;
578 struct tree_mod_elem
**tm_list
= NULL
;
583 if (!tree_mod_need_log(fs_info
, NULL
))
586 if (log_removal
&& btrfs_header_level(old_root
) > 0) {
587 nritems
= btrfs_header_nritems(old_root
);
588 tm_list
= kcalloc(nritems
, sizeof(struct tree_mod_elem
*),
594 for (i
= 0; i
< nritems
; i
++) {
595 tm_list
[i
] = alloc_tree_mod_elem(old_root
, i
,
596 MOD_LOG_KEY_REMOVE_WHILE_FREEING
, GFP_NOFS
);
604 tm
= kzalloc(sizeof(*tm
), GFP_NOFS
);
610 tm
->logical
= new_root
->start
;
611 tm
->old_root
.logical
= old_root
->start
;
612 tm
->old_root
.level
= btrfs_header_level(old_root
);
613 tm
->generation
= btrfs_header_generation(old_root
);
614 tm
->op
= MOD_LOG_ROOT_REPLACE
;
616 if (tree_mod_dont_log(fs_info
, NULL
))
620 ret
= __tree_mod_log_free_eb(fs_info
, tm_list
, nritems
);
622 ret
= __tree_mod_log_insert(fs_info
, tm
);
624 write_unlock(&fs_info
->tree_mod_log_lock
);
633 for (i
= 0; i
< nritems
; i
++)
642 static struct tree_mod_elem
*
643 __tree_mod_log_search(struct btrfs_fs_info
*fs_info
, u64 start
, u64 min_seq
,
646 struct rb_root
*tm_root
;
647 struct rb_node
*node
;
648 struct tree_mod_elem
*cur
= NULL
;
649 struct tree_mod_elem
*found
= NULL
;
651 read_lock(&fs_info
->tree_mod_log_lock
);
652 tm_root
= &fs_info
->tree_mod_log
;
653 node
= tm_root
->rb_node
;
655 cur
= rb_entry(node
, struct tree_mod_elem
, node
);
656 if (cur
->logical
< start
) {
657 node
= node
->rb_left
;
658 } else if (cur
->logical
> start
) {
659 node
= node
->rb_right
;
660 } else if (cur
->seq
< min_seq
) {
661 node
= node
->rb_left
;
662 } else if (!smallest
) {
663 /* we want the node with the highest seq */
665 BUG_ON(found
->seq
> cur
->seq
);
667 node
= node
->rb_left
;
668 } else if (cur
->seq
> min_seq
) {
669 /* we want the node with the smallest seq */
671 BUG_ON(found
->seq
< cur
->seq
);
673 node
= node
->rb_right
;
679 read_unlock(&fs_info
->tree_mod_log_lock
);
685 * this returns the element from the log with the smallest time sequence
686 * value that's in the log (the oldest log item). any element with a time
687 * sequence lower than min_seq will be ignored.
689 static struct tree_mod_elem
*
690 tree_mod_log_search_oldest(struct btrfs_fs_info
*fs_info
, u64 start
,
693 return __tree_mod_log_search(fs_info
, start
, min_seq
, 1);
697 * this returns the element from the log with the largest time sequence
698 * value that's in the log (the most recent log item). any element with
699 * a time sequence lower than min_seq will be ignored.
701 static struct tree_mod_elem
*
702 tree_mod_log_search(struct btrfs_fs_info
*fs_info
, u64 start
, u64 min_seq
)
704 return __tree_mod_log_search(fs_info
, start
, min_seq
, 0);
707 static noinline
int tree_mod_log_eb_copy(struct extent_buffer
*dst
,
708 struct extent_buffer
*src
, unsigned long dst_offset
,
709 unsigned long src_offset
, int nr_items
)
711 struct btrfs_fs_info
*fs_info
= dst
->fs_info
;
713 struct tree_mod_elem
**tm_list
= NULL
;
714 struct tree_mod_elem
**tm_list_add
, **tm_list_rem
;
718 if (!tree_mod_need_log(fs_info
, NULL
))
721 if (btrfs_header_level(dst
) == 0 && btrfs_header_level(src
) == 0)
724 tm_list
= kcalloc(nr_items
* 2, sizeof(struct tree_mod_elem
*),
729 tm_list_add
= tm_list
;
730 tm_list_rem
= tm_list
+ nr_items
;
731 for (i
= 0; i
< nr_items
; i
++) {
732 tm_list_rem
[i
] = alloc_tree_mod_elem(src
, i
+ src_offset
,
733 MOD_LOG_KEY_REMOVE
, GFP_NOFS
);
734 if (!tm_list_rem
[i
]) {
739 tm_list_add
[i
] = alloc_tree_mod_elem(dst
, i
+ dst_offset
,
740 MOD_LOG_KEY_ADD
, GFP_NOFS
);
741 if (!tm_list_add
[i
]) {
747 if (tree_mod_dont_log(fs_info
, NULL
))
751 for (i
= 0; i
< nr_items
; i
++) {
752 ret
= __tree_mod_log_insert(fs_info
, tm_list_rem
[i
]);
755 ret
= __tree_mod_log_insert(fs_info
, tm_list_add
[i
]);
760 write_unlock(&fs_info
->tree_mod_log_lock
);
766 for (i
= 0; i
< nr_items
* 2; i
++) {
767 if (tm_list
[i
] && !RB_EMPTY_NODE(&tm_list
[i
]->node
))
768 rb_erase(&tm_list
[i
]->node
, &fs_info
->tree_mod_log
);
772 write_unlock(&fs_info
->tree_mod_log_lock
);
778 static noinline
int tree_mod_log_free_eb(struct extent_buffer
*eb
)
780 struct tree_mod_elem
**tm_list
= NULL
;
785 if (btrfs_header_level(eb
) == 0)
788 if (!tree_mod_need_log(eb
->fs_info
, NULL
))
791 nritems
= btrfs_header_nritems(eb
);
792 tm_list
= kcalloc(nritems
, sizeof(struct tree_mod_elem
*), GFP_NOFS
);
796 for (i
= 0; i
< nritems
; i
++) {
797 tm_list
[i
] = alloc_tree_mod_elem(eb
, i
,
798 MOD_LOG_KEY_REMOVE_WHILE_FREEING
, GFP_NOFS
);
805 if (tree_mod_dont_log(eb
->fs_info
, eb
))
808 ret
= __tree_mod_log_free_eb(eb
->fs_info
, tm_list
, nritems
);
809 write_unlock(&eb
->fs_info
->tree_mod_log_lock
);
817 for (i
= 0; i
< nritems
; i
++)
825 * check if the tree block can be shared by multiple trees
827 int btrfs_block_can_be_shared(struct btrfs_root
*root
,
828 struct extent_buffer
*buf
)
831 * Tree blocks not in shareable trees and tree roots are never shared.
832 * If a block was allocated after the last snapshot and the block was
833 * not allocated by tree relocation, we know the block is not shared.
835 if (test_bit(BTRFS_ROOT_SHAREABLE
, &root
->state
) &&
836 buf
!= root
->node
&& buf
!= root
->commit_root
&&
837 (btrfs_header_generation(buf
) <=
838 btrfs_root_last_snapshot(&root
->root_item
) ||
839 btrfs_header_flag(buf
, BTRFS_HEADER_FLAG_RELOC
)))
845 static noinline
int update_ref_for_cow(struct btrfs_trans_handle
*trans
,
846 struct btrfs_root
*root
,
847 struct extent_buffer
*buf
,
848 struct extent_buffer
*cow
,
851 struct btrfs_fs_info
*fs_info
= root
->fs_info
;
859 * Backrefs update rules:
861 * Always use full backrefs for extent pointers in tree block
862 * allocated by tree relocation.
864 * If a shared tree block is no longer referenced by its owner
865 * tree (btrfs_header_owner(buf) == root->root_key.objectid),
866 * use full backrefs for extent pointers in tree block.
868 * If a tree block is been relocating
869 * (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID),
870 * use full backrefs for extent pointers in tree block.
871 * The reason for this is some operations (such as drop tree)
872 * are only allowed for blocks use full backrefs.
875 if (btrfs_block_can_be_shared(root
, buf
)) {
876 ret
= btrfs_lookup_extent_info(trans
, fs_info
, buf
->start
,
877 btrfs_header_level(buf
), 1,
883 btrfs_handle_fs_error(fs_info
, ret
, NULL
);
888 if (root
->root_key
.objectid
== BTRFS_TREE_RELOC_OBJECTID
||
889 btrfs_header_backref_rev(buf
) < BTRFS_MIXED_BACKREF_REV
)
890 flags
= BTRFS_BLOCK_FLAG_FULL_BACKREF
;
895 owner
= btrfs_header_owner(buf
);
896 BUG_ON(owner
== BTRFS_TREE_RELOC_OBJECTID
&&
897 !(flags
& BTRFS_BLOCK_FLAG_FULL_BACKREF
));
900 if ((owner
== root
->root_key
.objectid
||
901 root
->root_key
.objectid
== BTRFS_TREE_RELOC_OBJECTID
) &&
902 !(flags
& BTRFS_BLOCK_FLAG_FULL_BACKREF
)) {
903 ret
= btrfs_inc_ref(trans
, root
, buf
, 1);
907 if (root
->root_key
.objectid
==
908 BTRFS_TREE_RELOC_OBJECTID
) {
909 ret
= btrfs_dec_ref(trans
, root
, buf
, 0);
912 ret
= btrfs_inc_ref(trans
, root
, cow
, 1);
916 new_flags
|= BTRFS_BLOCK_FLAG_FULL_BACKREF
;
919 if (root
->root_key
.objectid
==
920 BTRFS_TREE_RELOC_OBJECTID
)
921 ret
= btrfs_inc_ref(trans
, root
, cow
, 1);
923 ret
= btrfs_inc_ref(trans
, root
, cow
, 0);
927 if (new_flags
!= 0) {
928 int level
= btrfs_header_level(buf
);
930 ret
= btrfs_set_disk_extent_flags(trans
, buf
,
931 new_flags
, level
, 0);
936 if (flags
& BTRFS_BLOCK_FLAG_FULL_BACKREF
) {
937 if (root
->root_key
.objectid
==
938 BTRFS_TREE_RELOC_OBJECTID
)
939 ret
= btrfs_inc_ref(trans
, root
, cow
, 1);
941 ret
= btrfs_inc_ref(trans
, root
, cow
, 0);
944 ret
= btrfs_dec_ref(trans
, root
, buf
, 1);
948 btrfs_clean_tree_block(buf
);
954 static struct extent_buffer
*alloc_tree_block_no_bg_flush(
955 struct btrfs_trans_handle
*trans
,
956 struct btrfs_root
*root
,
958 const struct btrfs_disk_key
*disk_key
,
962 enum btrfs_lock_nesting nest
)
964 struct btrfs_fs_info
*fs_info
= root
->fs_info
;
965 struct extent_buffer
*ret
;
968 * If we are COWing a node/leaf from the extent, chunk, device or free
969 * space trees, make sure that we do not finish block group creation of
970 * pending block groups. We do this to avoid a deadlock.
971 * COWing can result in allocation of a new chunk, and flushing pending
972 * block groups (btrfs_create_pending_block_groups()) can be triggered
973 * when finishing allocation of a new chunk. Creation of a pending block
974 * group modifies the extent, chunk, device and free space trees,
975 * therefore we could deadlock with ourselves since we are holding a
976 * lock on an extent buffer that btrfs_create_pending_block_groups() may
978 * For similar reasons, we also need to delay flushing pending block
979 * groups when splitting a leaf or node, from one of those trees, since
980 * we are holding a write lock on it and its parent or when inserting a
981 * new root node for one of those trees.
983 if (root
== fs_info
->extent_root
||
984 root
== fs_info
->chunk_root
||
985 root
== fs_info
->dev_root
||
986 root
== fs_info
->free_space_root
)
987 trans
->can_flush_pending_bgs
= false;
989 ret
= btrfs_alloc_tree_block(trans
, root
, parent_start
,
990 root
->root_key
.objectid
, disk_key
, level
,
991 hint
, empty_size
, nest
);
992 trans
->can_flush_pending_bgs
= true;
998 * does the dirty work in cow of a single block. The parent block (if
999 * supplied) is updated to point to the new cow copy. The new buffer is marked
1000 * dirty and returned locked. If you modify the block it needs to be marked
1003 * search_start -- an allocation hint for the new block
1005 * empty_size -- a hint that you plan on doing more cow. This is the size in
1006 * bytes the allocator should try to find free next to the block it returns.
1007 * This is just a hint and may be ignored by the allocator.
1009 static noinline
int __btrfs_cow_block(struct btrfs_trans_handle
*trans
,
1010 struct btrfs_root
*root
,
1011 struct extent_buffer
*buf
,
1012 struct extent_buffer
*parent
, int parent_slot
,
1013 struct extent_buffer
**cow_ret
,
1014 u64 search_start
, u64 empty_size
,
1015 enum btrfs_lock_nesting nest
)
1017 struct btrfs_fs_info
*fs_info
= root
->fs_info
;
1018 struct btrfs_disk_key disk_key
;
1019 struct extent_buffer
*cow
;
1022 int unlock_orig
= 0;
1023 u64 parent_start
= 0;
1025 if (*cow_ret
== buf
)
1028 btrfs_assert_tree_locked(buf
);
1030 WARN_ON(test_bit(BTRFS_ROOT_SHAREABLE
, &root
->state
) &&
1031 trans
->transid
!= fs_info
->running_transaction
->transid
);
1032 WARN_ON(test_bit(BTRFS_ROOT_SHAREABLE
, &root
->state
) &&
1033 trans
->transid
!= root
->last_trans
);
1035 level
= btrfs_header_level(buf
);
1038 btrfs_item_key(buf
, &disk_key
, 0);
1040 btrfs_node_key(buf
, &disk_key
, 0);
1042 if ((root
->root_key
.objectid
== BTRFS_TREE_RELOC_OBJECTID
) && parent
)
1043 parent_start
= parent
->start
;
1045 cow
= alloc_tree_block_no_bg_flush(trans
, root
, parent_start
, &disk_key
,
1046 level
, search_start
, empty_size
, nest
);
1048 return PTR_ERR(cow
);
1050 /* cow is set to blocking by btrfs_init_new_buffer */
1052 copy_extent_buffer_full(cow
, buf
);
1053 btrfs_set_header_bytenr(cow
, cow
->start
);
1054 btrfs_set_header_generation(cow
, trans
->transid
);
1055 btrfs_set_header_backref_rev(cow
, BTRFS_MIXED_BACKREF_REV
);
1056 btrfs_clear_header_flag(cow
, BTRFS_HEADER_FLAG_WRITTEN
|
1057 BTRFS_HEADER_FLAG_RELOC
);
1058 if (root
->root_key
.objectid
== BTRFS_TREE_RELOC_OBJECTID
)
1059 btrfs_set_header_flag(cow
, BTRFS_HEADER_FLAG_RELOC
);
1061 btrfs_set_header_owner(cow
, root
->root_key
.objectid
);
1063 write_extent_buffer_fsid(cow
, fs_info
->fs_devices
->metadata_uuid
);
1065 ret
= update_ref_for_cow(trans
, root
, buf
, cow
, &last_ref
);
1067 btrfs_tree_unlock(cow
);
1068 free_extent_buffer(cow
);
1069 btrfs_abort_transaction(trans
, ret
);
1073 if (test_bit(BTRFS_ROOT_SHAREABLE
, &root
->state
)) {
1074 ret
= btrfs_reloc_cow_block(trans
, root
, buf
, cow
);
1076 btrfs_tree_unlock(cow
);
1077 free_extent_buffer(cow
);
1078 btrfs_abort_transaction(trans
, ret
);
1083 if (buf
== root
->node
) {
1084 WARN_ON(parent
&& parent
!= buf
);
1085 if (root
->root_key
.objectid
== BTRFS_TREE_RELOC_OBJECTID
||
1086 btrfs_header_backref_rev(buf
) < BTRFS_MIXED_BACKREF_REV
)
1087 parent_start
= buf
->start
;
1089 atomic_inc(&cow
->refs
);
1090 ret
= tree_mod_log_insert_root(root
->node
, cow
, 1);
1092 rcu_assign_pointer(root
->node
, cow
);
1094 btrfs_free_tree_block(trans
, root
, buf
, parent_start
,
1096 free_extent_buffer(buf
);
1097 add_root_to_dirty_list(root
);
1099 WARN_ON(trans
->transid
!= btrfs_header_generation(parent
));
1100 tree_mod_log_insert_key(parent
, parent_slot
,
1101 MOD_LOG_KEY_REPLACE
, GFP_NOFS
);
1102 btrfs_set_node_blockptr(parent
, parent_slot
,
1104 btrfs_set_node_ptr_generation(parent
, parent_slot
,
1106 btrfs_mark_buffer_dirty(parent
);
1108 ret
= tree_mod_log_free_eb(buf
);
1110 btrfs_tree_unlock(cow
);
1111 free_extent_buffer(cow
);
1112 btrfs_abort_transaction(trans
, ret
);
1116 btrfs_free_tree_block(trans
, root
, buf
, parent_start
,
1120 btrfs_tree_unlock(buf
);
1121 free_extent_buffer_stale(buf
);
1122 btrfs_mark_buffer_dirty(cow
);
1128 * returns the logical address of the oldest predecessor of the given root.
1129 * entries older than time_seq are ignored.
1131 static struct tree_mod_elem
*__tree_mod_log_oldest_root(
1132 struct extent_buffer
*eb_root
, u64 time_seq
)
1134 struct tree_mod_elem
*tm
;
1135 struct tree_mod_elem
*found
= NULL
;
1136 u64 root_logical
= eb_root
->start
;
1143 * the very last operation that's logged for a root is the
1144 * replacement operation (if it is replaced at all). this has
1145 * the logical address of the *new* root, making it the very
1146 * first operation that's logged for this root.
1149 tm
= tree_mod_log_search_oldest(eb_root
->fs_info
, root_logical
,
1154 * if there are no tree operation for the oldest root, we simply
1155 * return it. this should only happen if that (old) root is at
1162 * if there's an operation that's not a root replacement, we
1163 * found the oldest version of our root. normally, we'll find a
1164 * MOD_LOG_KEY_REMOVE_WHILE_FREEING operation here.
1166 if (tm
->op
!= MOD_LOG_ROOT_REPLACE
)
1170 root_logical
= tm
->old_root
.logical
;
1174 /* if there's no old root to return, return what we found instead */
1182 * tm is a pointer to the first operation to rewind within eb. then, all
1183 * previous operations will be rewound (until we reach something older than
1187 __tree_mod_log_rewind(struct btrfs_fs_info
*fs_info
, struct extent_buffer
*eb
,
1188 u64 time_seq
, struct tree_mod_elem
*first_tm
)
1191 struct rb_node
*next
;
1192 struct tree_mod_elem
*tm
= first_tm
;
1193 unsigned long o_dst
;
1194 unsigned long o_src
;
1195 unsigned long p_size
= sizeof(struct btrfs_key_ptr
);
1197 n
= btrfs_header_nritems(eb
);
1198 read_lock(&fs_info
->tree_mod_log_lock
);
1199 while (tm
&& tm
->seq
>= time_seq
) {
1201 * all the operations are recorded with the operator used for
1202 * the modification. as we're going backwards, we do the
1203 * opposite of each operation here.
1206 case MOD_LOG_KEY_REMOVE_WHILE_FREEING
:
1207 BUG_ON(tm
->slot
< n
);
1209 case MOD_LOG_KEY_REMOVE_WHILE_MOVING
:
1210 case MOD_LOG_KEY_REMOVE
:
1211 btrfs_set_node_key(eb
, &tm
->key
, tm
->slot
);
1212 btrfs_set_node_blockptr(eb
, tm
->slot
, tm
->blockptr
);
1213 btrfs_set_node_ptr_generation(eb
, tm
->slot
,
1217 case MOD_LOG_KEY_REPLACE
:
1218 BUG_ON(tm
->slot
>= n
);
1219 btrfs_set_node_key(eb
, &tm
->key
, tm
->slot
);
1220 btrfs_set_node_blockptr(eb
, tm
->slot
, tm
->blockptr
);
1221 btrfs_set_node_ptr_generation(eb
, tm
->slot
,
1224 case MOD_LOG_KEY_ADD
:
1225 /* if a move operation is needed it's in the log */
1228 case MOD_LOG_MOVE_KEYS
:
1229 o_dst
= btrfs_node_key_ptr_offset(tm
->slot
);
1230 o_src
= btrfs_node_key_ptr_offset(tm
->move
.dst_slot
);
1231 memmove_extent_buffer(eb
, o_dst
, o_src
,
1232 tm
->move
.nr_items
* p_size
);
1234 case MOD_LOG_ROOT_REPLACE
:
1236 * this operation is special. for roots, this must be
1237 * handled explicitly before rewinding.
1238 * for non-roots, this operation may exist if the node
1239 * was a root: root A -> child B; then A gets empty and
1240 * B is promoted to the new root. in the mod log, we'll
1241 * have a root-replace operation for B, a tree block
1242 * that is no root. we simply ignore that operation.
1246 next
= rb_next(&tm
->node
);
1249 tm
= rb_entry(next
, struct tree_mod_elem
, node
);
1250 if (tm
->logical
!= first_tm
->logical
)
1253 read_unlock(&fs_info
->tree_mod_log_lock
);
1254 btrfs_set_header_nritems(eb
, n
);
1258 * Called with eb read locked. If the buffer cannot be rewound, the same buffer
1259 * is returned. If rewind operations happen, a fresh buffer is returned. The
1260 * returned buffer is always read-locked. If the returned buffer is not the
1261 * input buffer, the lock on the input buffer is released and the input buffer
1262 * is freed (its refcount is decremented).
1264 static struct extent_buffer
*
1265 tree_mod_log_rewind(struct btrfs_fs_info
*fs_info
, struct btrfs_path
*path
,
1266 struct extent_buffer
*eb
, u64 time_seq
)
1268 struct extent_buffer
*eb_rewin
;
1269 struct tree_mod_elem
*tm
;
1274 if (btrfs_header_level(eb
) == 0)
1277 tm
= tree_mod_log_search(fs_info
, eb
->start
, time_seq
);
1281 if (tm
->op
== MOD_LOG_KEY_REMOVE_WHILE_FREEING
) {
1282 BUG_ON(tm
->slot
!= 0);
1283 eb_rewin
= alloc_dummy_extent_buffer(fs_info
, eb
->start
);
1285 btrfs_tree_read_unlock(eb
);
1286 free_extent_buffer(eb
);
1289 btrfs_set_header_bytenr(eb_rewin
, eb
->start
);
1290 btrfs_set_header_backref_rev(eb_rewin
,
1291 btrfs_header_backref_rev(eb
));
1292 btrfs_set_header_owner(eb_rewin
, btrfs_header_owner(eb
));
1293 btrfs_set_header_level(eb_rewin
, btrfs_header_level(eb
));
1295 eb_rewin
= btrfs_clone_extent_buffer(eb
);
1297 btrfs_tree_read_unlock(eb
);
1298 free_extent_buffer(eb
);
1303 btrfs_tree_read_unlock(eb
);
1304 free_extent_buffer(eb
);
1306 btrfs_set_buffer_lockdep_class(btrfs_header_owner(eb_rewin
),
1307 eb_rewin
, btrfs_header_level(eb_rewin
));
1308 btrfs_tree_read_lock(eb_rewin
);
1309 __tree_mod_log_rewind(fs_info
, eb_rewin
, time_seq
, tm
);
1310 WARN_ON(btrfs_header_nritems(eb_rewin
) >
1311 BTRFS_NODEPTRS_PER_BLOCK(fs_info
));
1317 * get_old_root() rewinds the state of @root's root node to the given @time_seq
1318 * value. If there are no changes, the current root->root_node is returned. If
1319 * anything changed in between, there's a fresh buffer allocated on which the
1320 * rewind operations are done. In any case, the returned buffer is read locked.
1321 * Returns NULL on error (with no locks held).
1323 static inline struct extent_buffer
*
1324 get_old_root(struct btrfs_root
*root
, u64 time_seq
)
1326 struct btrfs_fs_info
*fs_info
= root
->fs_info
;
1327 struct tree_mod_elem
*tm
;
1328 struct extent_buffer
*eb
= NULL
;
1329 struct extent_buffer
*eb_root
;
1330 u64 eb_root_owner
= 0;
1331 struct extent_buffer
*old
;
1332 struct tree_mod_root
*old_root
= NULL
;
1333 u64 old_generation
= 0;
1337 eb_root
= btrfs_read_lock_root_node(root
);
1338 tm
= __tree_mod_log_oldest_root(eb_root
, time_seq
);
1342 if (tm
->op
== MOD_LOG_ROOT_REPLACE
) {
1343 old_root
= &tm
->old_root
;
1344 old_generation
= tm
->generation
;
1345 logical
= old_root
->logical
;
1346 level
= old_root
->level
;
1348 logical
= eb_root
->start
;
1349 level
= btrfs_header_level(eb_root
);
1352 tm
= tree_mod_log_search(fs_info
, logical
, time_seq
);
1353 if (old_root
&& tm
&& tm
->op
!= MOD_LOG_KEY_REMOVE_WHILE_FREEING
) {
1354 btrfs_tree_read_unlock(eb_root
);
1355 free_extent_buffer(eb_root
);
1356 old
= read_tree_block(fs_info
, logical
, root
->root_key
.objectid
,
1358 if (WARN_ON(IS_ERR(old
) || !extent_buffer_uptodate(old
))) {
1360 free_extent_buffer(old
);
1362 "failed to read tree block %llu from get_old_root",
1365 eb
= btrfs_clone_extent_buffer(old
);
1366 free_extent_buffer(old
);
1368 } else if (old_root
) {
1369 eb_root_owner
= btrfs_header_owner(eb_root
);
1370 btrfs_tree_read_unlock(eb_root
);
1371 free_extent_buffer(eb_root
);
1372 eb
= alloc_dummy_extent_buffer(fs_info
, logical
);
1374 eb
= btrfs_clone_extent_buffer(eb_root
);
1375 btrfs_tree_read_unlock(eb_root
);
1376 free_extent_buffer(eb_root
);
1382 btrfs_set_header_bytenr(eb
, eb
->start
);
1383 btrfs_set_header_backref_rev(eb
, BTRFS_MIXED_BACKREF_REV
);
1384 btrfs_set_header_owner(eb
, eb_root_owner
);
1385 btrfs_set_header_level(eb
, old_root
->level
);
1386 btrfs_set_header_generation(eb
, old_generation
);
1388 btrfs_set_buffer_lockdep_class(btrfs_header_owner(eb
), eb
,
1389 btrfs_header_level(eb
));
1390 btrfs_tree_read_lock(eb
);
1392 __tree_mod_log_rewind(fs_info
, eb
, time_seq
, tm
);
1394 WARN_ON(btrfs_header_level(eb
) != 0);
1395 WARN_ON(btrfs_header_nritems(eb
) > BTRFS_NODEPTRS_PER_BLOCK(fs_info
));
1400 int btrfs_old_root_level(struct btrfs_root
*root
, u64 time_seq
)
1402 struct tree_mod_elem
*tm
;
1404 struct extent_buffer
*eb_root
= btrfs_root_node(root
);
1406 tm
= __tree_mod_log_oldest_root(eb_root
, time_seq
);
1407 if (tm
&& tm
->op
== MOD_LOG_ROOT_REPLACE
) {
1408 level
= tm
->old_root
.level
;
1410 level
= btrfs_header_level(eb_root
);
1412 free_extent_buffer(eb_root
);
1417 static inline int should_cow_block(struct btrfs_trans_handle
*trans
,
1418 struct btrfs_root
*root
,
1419 struct extent_buffer
*buf
)
1421 if (btrfs_is_testing(root
->fs_info
))
1424 /* Ensure we can see the FORCE_COW bit */
1425 smp_mb__before_atomic();
1428 * We do not need to cow a block if
1429 * 1) this block is not created or changed in this transaction;
1430 * 2) this block does not belong to TREE_RELOC tree;
1431 * 3) the root is not forced COW.
1433 * What is forced COW:
1434 * when we create snapshot during committing the transaction,
1435 * after we've finished copying src root, we must COW the shared
1436 * block to ensure the metadata consistency.
1438 if (btrfs_header_generation(buf
) == trans
->transid
&&
1439 !btrfs_header_flag(buf
, BTRFS_HEADER_FLAG_WRITTEN
) &&
1440 !(root
->root_key
.objectid
!= BTRFS_TREE_RELOC_OBJECTID
&&
1441 btrfs_header_flag(buf
, BTRFS_HEADER_FLAG_RELOC
)) &&
1442 !test_bit(BTRFS_ROOT_FORCE_COW
, &root
->state
))
1448 * cows a single block, see __btrfs_cow_block for the real work.
1449 * This version of it has extra checks so that a block isn't COWed more than
1450 * once per transaction, as long as it hasn't been written yet
1452 noinline
int btrfs_cow_block(struct btrfs_trans_handle
*trans
,
1453 struct btrfs_root
*root
, struct extent_buffer
*buf
,
1454 struct extent_buffer
*parent
, int parent_slot
,
1455 struct extent_buffer
**cow_ret
,
1456 enum btrfs_lock_nesting nest
)
1458 struct btrfs_fs_info
*fs_info
= root
->fs_info
;
1462 if (test_bit(BTRFS_ROOT_DELETING
, &root
->state
))
1464 "COW'ing blocks on a fs root that's being dropped");
1466 if (trans
->transaction
!= fs_info
->running_transaction
)
1467 WARN(1, KERN_CRIT
"trans %llu running %llu\n",
1469 fs_info
->running_transaction
->transid
);
1471 if (trans
->transid
!= fs_info
->generation
)
1472 WARN(1, KERN_CRIT
"trans %llu running %llu\n",
1473 trans
->transid
, fs_info
->generation
);
1475 if (!should_cow_block(trans
, root
, buf
)) {
1476 trans
->dirty
= true;
1481 search_start
= buf
->start
& ~((u64
)SZ_1G
- 1);
1484 * Before CoWing this block for later modification, check if it's
1485 * the subtree root and do the delayed subtree trace if needed.
1487 * Also We don't care about the error, as it's handled internally.
1489 btrfs_qgroup_trace_subtree_after_cow(trans
, root
, buf
);
1490 ret
= __btrfs_cow_block(trans
, root
, buf
, parent
,
1491 parent_slot
, cow_ret
, search_start
, 0, nest
);
1493 trace_btrfs_cow_block(root
, buf
, *cow_ret
);
1499 * helper function for defrag to decide if two blocks pointed to by a
1500 * node are actually close by
1502 static int close_blocks(u64 blocknr
, u64 other
, u32 blocksize
)
1504 if (blocknr
< other
&& other
- (blocknr
+ blocksize
) < 32768)
1506 if (blocknr
> other
&& blocknr
- (other
+ blocksize
) < 32768)
1511 #ifdef __LITTLE_ENDIAN
1514 * Compare two keys, on little-endian the disk order is same as CPU order and
1515 * we can avoid the conversion.
1517 static int comp_keys(const struct btrfs_disk_key
*disk_key
,
1518 const struct btrfs_key
*k2
)
1520 const struct btrfs_key
*k1
= (const struct btrfs_key
*)disk_key
;
1522 return btrfs_comp_cpu_keys(k1
, k2
);
1528 * compare two keys in a memcmp fashion
1530 static int comp_keys(const struct btrfs_disk_key
*disk
,
1531 const struct btrfs_key
*k2
)
1533 struct btrfs_key k1
;
1535 btrfs_disk_key_to_cpu(&k1
, disk
);
1537 return btrfs_comp_cpu_keys(&k1
, k2
);
1542 * same as comp_keys only with two btrfs_key's
1544 int __pure
btrfs_comp_cpu_keys(const struct btrfs_key
*k1
, const struct btrfs_key
*k2
)
1546 if (k1
->objectid
> k2
->objectid
)
1548 if (k1
->objectid
< k2
->objectid
)
1550 if (k1
->type
> k2
->type
)
1552 if (k1
->type
< k2
->type
)
1554 if (k1
->offset
> k2
->offset
)
1556 if (k1
->offset
< k2
->offset
)
1562 * this is used by the defrag code to go through all the
1563 * leaves pointed to by a node and reallocate them so that
1564 * disk order is close to key order
1566 int btrfs_realloc_node(struct btrfs_trans_handle
*trans
,
1567 struct btrfs_root
*root
, struct extent_buffer
*parent
,
1568 int start_slot
, u64
*last_ret
,
1569 struct btrfs_key
*progress
)
1571 struct btrfs_fs_info
*fs_info
= root
->fs_info
;
1572 struct extent_buffer
*cur
;
1574 u64 search_start
= *last_ret
;
1582 int progress_passed
= 0;
1583 struct btrfs_disk_key disk_key
;
1585 WARN_ON(trans
->transaction
!= fs_info
->running_transaction
);
1586 WARN_ON(trans
->transid
!= fs_info
->generation
);
1588 parent_nritems
= btrfs_header_nritems(parent
);
1589 blocksize
= fs_info
->nodesize
;
1590 end_slot
= parent_nritems
- 1;
1592 if (parent_nritems
<= 1)
1595 for (i
= start_slot
; i
<= end_slot
; i
++) {
1598 btrfs_node_key(parent
, &disk_key
, i
);
1599 if (!progress_passed
&& comp_keys(&disk_key
, progress
) < 0)
1602 progress_passed
= 1;
1603 blocknr
= btrfs_node_blockptr(parent
, i
);
1604 if (last_block
== 0)
1605 last_block
= blocknr
;
1608 other
= btrfs_node_blockptr(parent
, i
- 1);
1609 close
= close_blocks(blocknr
, other
, blocksize
);
1611 if (!close
&& i
< end_slot
) {
1612 other
= btrfs_node_blockptr(parent
, i
+ 1);
1613 close
= close_blocks(blocknr
, other
, blocksize
);
1616 last_block
= blocknr
;
1620 cur
= btrfs_read_node_slot(parent
, i
);
1622 return PTR_ERR(cur
);
1623 if (search_start
== 0)
1624 search_start
= last_block
;
1626 btrfs_tree_lock(cur
);
1627 err
= __btrfs_cow_block(trans
, root
, cur
, parent
, i
,
1630 (end_slot
- i
) * blocksize
),
1633 btrfs_tree_unlock(cur
);
1634 free_extent_buffer(cur
);
1637 search_start
= cur
->start
;
1638 last_block
= cur
->start
;
1639 *last_ret
= search_start
;
1640 btrfs_tree_unlock(cur
);
1641 free_extent_buffer(cur
);
1647 * search for key in the extent_buffer. The items start at offset p,
1648 * and they are item_size apart. There are 'max' items in p.
1650 * the slot in the array is returned via slot, and it points to
1651 * the place where you would insert key if it is not found in
1654 * slot may point to max if the key is bigger than all of the keys
1656 static noinline
int generic_bin_search(struct extent_buffer
*eb
,
1657 unsigned long p
, int item_size
,
1658 const struct btrfs_key
*key
,
1664 const int key_size
= sizeof(struct btrfs_disk_key
);
1667 btrfs_err(eb
->fs_info
,
1668 "%s: low (%d) > high (%d) eb %llu owner %llu level %d",
1669 __func__
, low
, high
, eb
->start
,
1670 btrfs_header_owner(eb
), btrfs_header_level(eb
));
1674 while (low
< high
) {
1676 unsigned long offset
;
1677 struct btrfs_disk_key
*tmp
;
1678 struct btrfs_disk_key unaligned
;
1681 mid
= (low
+ high
) / 2;
1682 offset
= p
+ mid
* item_size
;
1683 oip
= offset_in_page(offset
);
1685 if (oip
+ key_size
<= PAGE_SIZE
) {
1686 const unsigned long idx
= get_eb_page_index(offset
);
1687 char *kaddr
= page_address(eb
->pages
[idx
]);
1689 oip
= get_eb_offset_in_page(eb
, offset
);
1690 tmp
= (struct btrfs_disk_key
*)(kaddr
+ oip
);
1692 read_extent_buffer(eb
, &unaligned
, offset
, key_size
);
1696 ret
= comp_keys(tmp
, key
);
1712 * simple bin_search frontend that does the right thing for
1715 int btrfs_bin_search(struct extent_buffer
*eb
, const struct btrfs_key
*key
,
1718 if (btrfs_header_level(eb
) == 0)
1719 return generic_bin_search(eb
,
1720 offsetof(struct btrfs_leaf
, items
),
1721 sizeof(struct btrfs_item
),
1722 key
, btrfs_header_nritems(eb
),
1725 return generic_bin_search(eb
,
1726 offsetof(struct btrfs_node
, ptrs
),
1727 sizeof(struct btrfs_key_ptr
),
1728 key
, btrfs_header_nritems(eb
),
1732 static void root_add_used(struct btrfs_root
*root
, u32 size
)
1734 spin_lock(&root
->accounting_lock
);
1735 btrfs_set_root_used(&root
->root_item
,
1736 btrfs_root_used(&root
->root_item
) + size
);
1737 spin_unlock(&root
->accounting_lock
);
1740 static void root_sub_used(struct btrfs_root
*root
, u32 size
)
1742 spin_lock(&root
->accounting_lock
);
1743 btrfs_set_root_used(&root
->root_item
,
1744 btrfs_root_used(&root
->root_item
) - size
);
1745 spin_unlock(&root
->accounting_lock
);
1748 /* given a node and slot number, this reads the blocks it points to. The
1749 * extent buffer is returned with a reference taken (but unlocked).
1751 struct extent_buffer
*btrfs_read_node_slot(struct extent_buffer
*parent
,
1754 int level
= btrfs_header_level(parent
);
1755 struct extent_buffer
*eb
;
1756 struct btrfs_key first_key
;
1758 if (slot
< 0 || slot
>= btrfs_header_nritems(parent
))
1759 return ERR_PTR(-ENOENT
);
1763 btrfs_node_key_to_cpu(parent
, &first_key
, slot
);
1764 eb
= read_tree_block(parent
->fs_info
, btrfs_node_blockptr(parent
, slot
),
1765 btrfs_header_owner(parent
),
1766 btrfs_node_ptr_generation(parent
, slot
),
1767 level
- 1, &first_key
);
1768 if (!IS_ERR(eb
) && !extent_buffer_uptodate(eb
)) {
1769 free_extent_buffer(eb
);
1777 * node level balancing, used to make sure nodes are in proper order for
1778 * item deletion. We balance from the top down, so we have to make sure
1779 * that a deletion won't leave an node completely empty later on.
1781 static noinline
int balance_level(struct btrfs_trans_handle
*trans
,
1782 struct btrfs_root
*root
,
1783 struct btrfs_path
*path
, int level
)
1785 struct btrfs_fs_info
*fs_info
= root
->fs_info
;
1786 struct extent_buffer
*right
= NULL
;
1787 struct extent_buffer
*mid
;
1788 struct extent_buffer
*left
= NULL
;
1789 struct extent_buffer
*parent
= NULL
;
1793 int orig_slot
= path
->slots
[level
];
1798 mid
= path
->nodes
[level
];
1800 WARN_ON(path
->locks
[level
] != BTRFS_WRITE_LOCK
);
1801 WARN_ON(btrfs_header_generation(mid
) != trans
->transid
);
1803 orig_ptr
= btrfs_node_blockptr(mid
, orig_slot
);
1805 if (level
< BTRFS_MAX_LEVEL
- 1) {
1806 parent
= path
->nodes
[level
+ 1];
1807 pslot
= path
->slots
[level
+ 1];
1811 * deal with the case where there is only one pointer in the root
1812 * by promoting the node below to a root
1815 struct extent_buffer
*child
;
1817 if (btrfs_header_nritems(mid
) != 1)
1820 /* promote the child to a root */
1821 child
= btrfs_read_node_slot(mid
, 0);
1822 if (IS_ERR(child
)) {
1823 ret
= PTR_ERR(child
);
1824 btrfs_handle_fs_error(fs_info
, ret
, NULL
);
1828 btrfs_tree_lock(child
);
1829 ret
= btrfs_cow_block(trans
, root
, child
, mid
, 0, &child
,
1832 btrfs_tree_unlock(child
);
1833 free_extent_buffer(child
);
1837 ret
= tree_mod_log_insert_root(root
->node
, child
, 1);
1839 rcu_assign_pointer(root
->node
, child
);
1841 add_root_to_dirty_list(root
);
1842 btrfs_tree_unlock(child
);
1844 path
->locks
[level
] = 0;
1845 path
->nodes
[level
] = NULL
;
1846 btrfs_clean_tree_block(mid
);
1847 btrfs_tree_unlock(mid
);
1848 /* once for the path */
1849 free_extent_buffer(mid
);
1851 root_sub_used(root
, mid
->len
);
1852 btrfs_free_tree_block(trans
, root
, mid
, 0, 1);
1853 /* once for the root ptr */
1854 free_extent_buffer_stale(mid
);
1857 if (btrfs_header_nritems(mid
) >
1858 BTRFS_NODEPTRS_PER_BLOCK(fs_info
) / 4)
1861 left
= btrfs_read_node_slot(parent
, pslot
- 1);
1866 __btrfs_tree_lock(left
, BTRFS_NESTING_LEFT
);
1867 wret
= btrfs_cow_block(trans
, root
, left
,
1868 parent
, pslot
- 1, &left
,
1869 BTRFS_NESTING_LEFT_COW
);
1876 right
= btrfs_read_node_slot(parent
, pslot
+ 1);
1881 __btrfs_tree_lock(right
, BTRFS_NESTING_RIGHT
);
1882 wret
= btrfs_cow_block(trans
, root
, right
,
1883 parent
, pslot
+ 1, &right
,
1884 BTRFS_NESTING_RIGHT_COW
);
1891 /* first, try to make some room in the middle buffer */
1893 orig_slot
+= btrfs_header_nritems(left
);
1894 wret
= push_node_left(trans
, left
, mid
, 1);
1900 * then try to empty the right most buffer into the middle
1903 wret
= push_node_left(trans
, mid
, right
, 1);
1904 if (wret
< 0 && wret
!= -ENOSPC
)
1906 if (btrfs_header_nritems(right
) == 0) {
1907 btrfs_clean_tree_block(right
);
1908 btrfs_tree_unlock(right
);
1909 del_ptr(root
, path
, level
+ 1, pslot
+ 1);
1910 root_sub_used(root
, right
->len
);
1911 btrfs_free_tree_block(trans
, root
, right
, 0, 1);
1912 free_extent_buffer_stale(right
);
1915 struct btrfs_disk_key right_key
;
1916 btrfs_node_key(right
, &right_key
, 0);
1917 ret
= tree_mod_log_insert_key(parent
, pslot
+ 1,
1918 MOD_LOG_KEY_REPLACE
, GFP_NOFS
);
1920 btrfs_set_node_key(parent
, &right_key
, pslot
+ 1);
1921 btrfs_mark_buffer_dirty(parent
);
1924 if (btrfs_header_nritems(mid
) == 1) {
1926 * we're not allowed to leave a node with one item in the
1927 * tree during a delete. A deletion from lower in the tree
1928 * could try to delete the only pointer in this node.
1929 * So, pull some keys from the left.
1930 * There has to be a left pointer at this point because
1931 * otherwise we would have pulled some pointers from the
1936 btrfs_handle_fs_error(fs_info
, ret
, NULL
);
1939 wret
= balance_node_right(trans
, mid
, left
);
1945 wret
= push_node_left(trans
, left
, mid
, 1);
1951 if (btrfs_header_nritems(mid
) == 0) {
1952 btrfs_clean_tree_block(mid
);
1953 btrfs_tree_unlock(mid
);
1954 del_ptr(root
, path
, level
+ 1, pslot
);
1955 root_sub_used(root
, mid
->len
);
1956 btrfs_free_tree_block(trans
, root
, mid
, 0, 1);
1957 free_extent_buffer_stale(mid
);
1960 /* update the parent key to reflect our changes */
1961 struct btrfs_disk_key mid_key
;
1962 btrfs_node_key(mid
, &mid_key
, 0);
1963 ret
= tree_mod_log_insert_key(parent
, pslot
,
1964 MOD_LOG_KEY_REPLACE
, GFP_NOFS
);
1966 btrfs_set_node_key(parent
, &mid_key
, pslot
);
1967 btrfs_mark_buffer_dirty(parent
);
1970 /* update the path */
1972 if (btrfs_header_nritems(left
) > orig_slot
) {
1973 atomic_inc(&left
->refs
);
1974 /* left was locked after cow */
1975 path
->nodes
[level
] = left
;
1976 path
->slots
[level
+ 1] -= 1;
1977 path
->slots
[level
] = orig_slot
;
1979 btrfs_tree_unlock(mid
);
1980 free_extent_buffer(mid
);
1983 orig_slot
-= btrfs_header_nritems(left
);
1984 path
->slots
[level
] = orig_slot
;
1987 /* double check we haven't messed things up */
1989 btrfs_node_blockptr(path
->nodes
[level
], path
->slots
[level
]))
1993 btrfs_tree_unlock(right
);
1994 free_extent_buffer(right
);
1997 if (path
->nodes
[level
] != left
)
1998 btrfs_tree_unlock(left
);
1999 free_extent_buffer(left
);
2004 /* Node balancing for insertion. Here we only split or push nodes around
2005 * when they are completely full. This is also done top down, so we
2006 * have to be pessimistic.
2008 static noinline
int push_nodes_for_insert(struct btrfs_trans_handle
*trans
,
2009 struct btrfs_root
*root
,
2010 struct btrfs_path
*path
, int level
)
2012 struct btrfs_fs_info
*fs_info
= root
->fs_info
;
2013 struct extent_buffer
*right
= NULL
;
2014 struct extent_buffer
*mid
;
2015 struct extent_buffer
*left
= NULL
;
2016 struct extent_buffer
*parent
= NULL
;
2020 int orig_slot
= path
->slots
[level
];
2025 mid
= path
->nodes
[level
];
2026 WARN_ON(btrfs_header_generation(mid
) != trans
->transid
);
2028 if (level
< BTRFS_MAX_LEVEL
- 1) {
2029 parent
= path
->nodes
[level
+ 1];
2030 pslot
= path
->slots
[level
+ 1];
2036 left
= btrfs_read_node_slot(parent
, pslot
- 1);
2040 /* first, try to make some room in the middle buffer */
2044 __btrfs_tree_lock(left
, BTRFS_NESTING_LEFT
);
2046 left_nr
= btrfs_header_nritems(left
);
2047 if (left_nr
>= BTRFS_NODEPTRS_PER_BLOCK(fs_info
) - 1) {
2050 ret
= btrfs_cow_block(trans
, root
, left
, parent
,
2052 BTRFS_NESTING_LEFT_COW
);
2056 wret
= push_node_left(trans
, left
, mid
, 0);
2062 struct btrfs_disk_key disk_key
;
2063 orig_slot
+= left_nr
;
2064 btrfs_node_key(mid
, &disk_key
, 0);
2065 ret
= tree_mod_log_insert_key(parent
, pslot
,
2066 MOD_LOG_KEY_REPLACE
, GFP_NOFS
);
2068 btrfs_set_node_key(parent
, &disk_key
, pslot
);
2069 btrfs_mark_buffer_dirty(parent
);
2070 if (btrfs_header_nritems(left
) > orig_slot
) {
2071 path
->nodes
[level
] = left
;
2072 path
->slots
[level
+ 1] -= 1;
2073 path
->slots
[level
] = orig_slot
;
2074 btrfs_tree_unlock(mid
);
2075 free_extent_buffer(mid
);
2078 btrfs_header_nritems(left
);
2079 path
->slots
[level
] = orig_slot
;
2080 btrfs_tree_unlock(left
);
2081 free_extent_buffer(left
);
2085 btrfs_tree_unlock(left
);
2086 free_extent_buffer(left
);
2088 right
= btrfs_read_node_slot(parent
, pslot
+ 1);
2093 * then try to empty the right most buffer into the middle
2098 __btrfs_tree_lock(right
, BTRFS_NESTING_RIGHT
);
2100 right_nr
= btrfs_header_nritems(right
);
2101 if (right_nr
>= BTRFS_NODEPTRS_PER_BLOCK(fs_info
) - 1) {
2104 ret
= btrfs_cow_block(trans
, root
, right
,
2106 &right
, BTRFS_NESTING_RIGHT_COW
);
2110 wret
= balance_node_right(trans
, right
, mid
);
2116 struct btrfs_disk_key disk_key
;
2118 btrfs_node_key(right
, &disk_key
, 0);
2119 ret
= tree_mod_log_insert_key(parent
, pslot
+ 1,
2120 MOD_LOG_KEY_REPLACE
, GFP_NOFS
);
2122 btrfs_set_node_key(parent
, &disk_key
, pslot
+ 1);
2123 btrfs_mark_buffer_dirty(parent
);
2125 if (btrfs_header_nritems(mid
) <= orig_slot
) {
2126 path
->nodes
[level
] = right
;
2127 path
->slots
[level
+ 1] += 1;
2128 path
->slots
[level
] = orig_slot
-
2129 btrfs_header_nritems(mid
);
2130 btrfs_tree_unlock(mid
);
2131 free_extent_buffer(mid
);
2133 btrfs_tree_unlock(right
);
2134 free_extent_buffer(right
);
2138 btrfs_tree_unlock(right
);
2139 free_extent_buffer(right
);
2145 * readahead one full node of leaves, finding things that are close
2146 * to the block in 'slot', and triggering ra on them.
2148 static void reada_for_search(struct btrfs_fs_info
*fs_info
,
2149 struct btrfs_path
*path
,
2150 int level
, int slot
, u64 objectid
)
2152 struct extent_buffer
*node
;
2153 struct btrfs_disk_key disk_key
;
2158 struct extent_buffer
*eb
;
2166 if (!path
->nodes
[level
])
2169 node
= path
->nodes
[level
];
2171 search
= btrfs_node_blockptr(node
, slot
);
2172 blocksize
= fs_info
->nodesize
;
2173 eb
= find_extent_buffer(fs_info
, search
);
2175 free_extent_buffer(eb
);
2181 nritems
= btrfs_header_nritems(node
);
2185 if (path
->reada
== READA_BACK
) {
2189 } else if (path
->reada
== READA_FORWARD
) {
2194 if (path
->reada
== READA_BACK
&& objectid
) {
2195 btrfs_node_key(node
, &disk_key
, nr
);
2196 if (btrfs_disk_key_objectid(&disk_key
) != objectid
)
2199 search
= btrfs_node_blockptr(node
, nr
);
2200 if ((search
<= target
&& target
- search
<= 65536) ||
2201 (search
> target
&& search
- target
<= 65536)) {
2202 btrfs_readahead_node_child(node
, nr
);
2206 if ((nread
> 65536 || nscan
> 32))
2211 static noinline
void reada_for_balance(struct btrfs_path
*path
, int level
)
2213 struct extent_buffer
*parent
;
2217 parent
= path
->nodes
[level
+ 1];
2221 nritems
= btrfs_header_nritems(parent
);
2222 slot
= path
->slots
[level
+ 1];
2225 btrfs_readahead_node_child(parent
, slot
- 1);
2226 if (slot
+ 1 < nritems
)
2227 btrfs_readahead_node_child(parent
, slot
+ 1);
2232 * when we walk down the tree, it is usually safe to unlock the higher layers
2233 * in the tree. The exceptions are when our path goes through slot 0, because
2234 * operations on the tree might require changing key pointers higher up in the
2237 * callers might also have set path->keep_locks, which tells this code to keep
2238 * the lock if the path points to the last slot in the block. This is part of
2239 * walking through the tree, and selecting the next slot in the higher block.
2241 * lowest_unlock sets the lowest level in the tree we're allowed to unlock. so
2242 * if lowest_unlock is 1, level 0 won't be unlocked
2244 static noinline
void unlock_up(struct btrfs_path
*path
, int level
,
2245 int lowest_unlock
, int min_write_lock_level
,
2246 int *write_lock_level
)
2249 int skip_level
= level
;
2251 struct extent_buffer
*t
;
2253 for (i
= level
; i
< BTRFS_MAX_LEVEL
; i
++) {
2254 if (!path
->nodes
[i
])
2256 if (!path
->locks
[i
])
2258 if (!no_skips
&& path
->slots
[i
] == 0) {
2262 if (!no_skips
&& path
->keep_locks
) {
2265 nritems
= btrfs_header_nritems(t
);
2266 if (nritems
< 1 || path
->slots
[i
] >= nritems
- 1) {
2271 if (skip_level
< i
&& i
>= lowest_unlock
)
2275 if (i
>= lowest_unlock
&& i
> skip_level
) {
2276 btrfs_tree_unlock_rw(t
, path
->locks
[i
]);
2278 if (write_lock_level
&&
2279 i
> min_write_lock_level
&&
2280 i
<= *write_lock_level
) {
2281 *write_lock_level
= i
- 1;
2288 * helper function for btrfs_search_slot. The goal is to find a block
2289 * in cache without setting the path to blocking. If we find the block
2290 * we return zero and the path is unchanged.
2292 * If we can't find the block, we set the path blocking and do some
2293 * reada. -EAGAIN is returned and the search must be repeated.
2296 read_block_for_search(struct btrfs_root
*root
, struct btrfs_path
*p
,
2297 struct extent_buffer
**eb_ret
, int level
, int slot
,
2298 const struct btrfs_key
*key
)
2300 struct btrfs_fs_info
*fs_info
= root
->fs_info
;
2303 struct extent_buffer
*tmp
;
2304 struct btrfs_key first_key
;
2308 blocknr
= btrfs_node_blockptr(*eb_ret
, slot
);
2309 gen
= btrfs_node_ptr_generation(*eb_ret
, slot
);
2310 parent_level
= btrfs_header_level(*eb_ret
);
2311 btrfs_node_key_to_cpu(*eb_ret
, &first_key
, slot
);
2313 tmp
= find_extent_buffer(fs_info
, blocknr
);
2315 /* first we do an atomic uptodate check */
2316 if (btrfs_buffer_uptodate(tmp
, gen
, 1) > 0) {
2318 * Do extra check for first_key, eb can be stale due to
2319 * being cached, read from scrub, or have multiple
2320 * parents (shared tree blocks).
2322 if (btrfs_verify_level_key(tmp
,
2323 parent_level
- 1, &first_key
, gen
)) {
2324 free_extent_buffer(tmp
);
2331 /* now we're allowed to do a blocking uptodate check */
2332 ret
= btrfs_read_buffer(tmp
, gen
, parent_level
- 1, &first_key
);
2337 free_extent_buffer(tmp
);
2338 btrfs_release_path(p
);
2343 * reduce lock contention at high levels
2344 * of the btree by dropping locks before
2345 * we read. Don't release the lock on the current
2346 * level because we need to walk this node to figure
2347 * out which blocks to read.
2349 btrfs_unlock_up_safe(p
, level
+ 1);
2351 if (p
->reada
!= READA_NONE
)
2352 reada_for_search(fs_info
, p
, level
, slot
, key
->objectid
);
2355 tmp
= read_tree_block(fs_info
, blocknr
, root
->root_key
.objectid
,
2356 gen
, parent_level
- 1, &first_key
);
2359 * If the read above didn't mark this buffer up to date,
2360 * it will never end up being up to date. Set ret to EIO now
2361 * and give up so that our caller doesn't loop forever
2364 if (!extent_buffer_uptodate(tmp
))
2366 free_extent_buffer(tmp
);
2371 btrfs_release_path(p
);
2376 * helper function for btrfs_search_slot. This does all of the checks
2377 * for node-level blocks and does any balancing required based on
2380 * If no extra work was required, zero is returned. If we had to
2381 * drop the path, -EAGAIN is returned and btrfs_search_slot must
2385 setup_nodes_for_search(struct btrfs_trans_handle
*trans
,
2386 struct btrfs_root
*root
, struct btrfs_path
*p
,
2387 struct extent_buffer
*b
, int level
, int ins_len
,
2388 int *write_lock_level
)
2390 struct btrfs_fs_info
*fs_info
= root
->fs_info
;
2393 if ((p
->search_for_split
|| ins_len
> 0) && btrfs_header_nritems(b
) >=
2394 BTRFS_NODEPTRS_PER_BLOCK(fs_info
) - 3) {
2396 if (*write_lock_level
< level
+ 1) {
2397 *write_lock_level
= level
+ 1;
2398 btrfs_release_path(p
);
2402 reada_for_balance(p
, level
);
2403 ret
= split_node(trans
, root
, p
, level
);
2405 b
= p
->nodes
[level
];
2406 } else if (ins_len
< 0 && btrfs_header_nritems(b
) <
2407 BTRFS_NODEPTRS_PER_BLOCK(fs_info
) / 2) {
2409 if (*write_lock_level
< level
+ 1) {
2410 *write_lock_level
= level
+ 1;
2411 btrfs_release_path(p
);
2415 reada_for_balance(p
, level
);
2416 ret
= balance_level(trans
, root
, p
, level
);
2420 b
= p
->nodes
[level
];
2422 btrfs_release_path(p
);
2425 BUG_ON(btrfs_header_nritems(b
) == 1);
2430 int btrfs_find_item(struct btrfs_root
*fs_root
, struct btrfs_path
*path
,
2431 u64 iobjectid
, u64 ioff
, u8 key_type
,
2432 struct btrfs_key
*found_key
)
2435 struct btrfs_key key
;
2436 struct extent_buffer
*eb
;
2441 key
.type
= key_type
;
2442 key
.objectid
= iobjectid
;
2445 ret
= btrfs_search_slot(NULL
, fs_root
, &key
, path
, 0, 0);
2449 eb
= path
->nodes
[0];
2450 if (ret
&& path
->slots
[0] >= btrfs_header_nritems(eb
)) {
2451 ret
= btrfs_next_leaf(fs_root
, path
);
2454 eb
= path
->nodes
[0];
2457 btrfs_item_key_to_cpu(eb
, found_key
, path
->slots
[0]);
2458 if (found_key
->type
!= key
.type
||
2459 found_key
->objectid
!= key
.objectid
)
2465 static struct extent_buffer
*btrfs_search_slot_get_root(struct btrfs_root
*root
,
2466 struct btrfs_path
*p
,
2467 int write_lock_level
)
2469 struct btrfs_fs_info
*fs_info
= root
->fs_info
;
2470 struct extent_buffer
*b
;
2474 /* We try very hard to do read locks on the root */
2475 root_lock
= BTRFS_READ_LOCK
;
2477 if (p
->search_commit_root
) {
2479 * The commit roots are read only so we always do read locks,
2480 * and we always must hold the commit_root_sem when doing
2481 * searches on them, the only exception is send where we don't
2482 * want to block transaction commits for a long time, so
2483 * we need to clone the commit root in order to avoid races
2484 * with transaction commits that create a snapshot of one of
2485 * the roots used by a send operation.
2487 if (p
->need_commit_sem
) {
2488 down_read(&fs_info
->commit_root_sem
);
2489 b
= btrfs_clone_extent_buffer(root
->commit_root
);
2490 up_read(&fs_info
->commit_root_sem
);
2492 return ERR_PTR(-ENOMEM
);
2495 b
= root
->commit_root
;
2496 atomic_inc(&b
->refs
);
2498 level
= btrfs_header_level(b
);
2500 * Ensure that all callers have set skip_locking when
2501 * p->search_commit_root = 1.
2503 ASSERT(p
->skip_locking
== 1);
2508 if (p
->skip_locking
) {
2509 b
= btrfs_root_node(root
);
2510 level
= btrfs_header_level(b
);
2515 * If the level is set to maximum, we can skip trying to get the read
2518 if (write_lock_level
< BTRFS_MAX_LEVEL
) {
2520 * We don't know the level of the root node until we actually
2521 * have it read locked
2523 b
= btrfs_read_lock_root_node(root
);
2524 level
= btrfs_header_level(b
);
2525 if (level
> write_lock_level
)
2528 /* Whoops, must trade for write lock */
2529 btrfs_tree_read_unlock(b
);
2530 free_extent_buffer(b
);
2533 b
= btrfs_lock_root_node(root
);
2534 root_lock
= BTRFS_WRITE_LOCK
;
2536 /* The level might have changed, check again */
2537 level
= btrfs_header_level(b
);
2540 p
->nodes
[level
] = b
;
2541 if (!p
->skip_locking
)
2542 p
->locks
[level
] = root_lock
;
2544 * Callers are responsible for dropping b's references.
2551 * btrfs_search_slot - look for a key in a tree and perform necessary
2552 * modifications to preserve tree invariants.
2554 * @trans: Handle of transaction, used when modifying the tree
2555 * @p: Holds all btree nodes along the search path
2556 * @root: The root node of the tree
2557 * @key: The key we are looking for
2558 * @ins_len: Indicates purpose of search:
2559 * >0 for inserts it's size of item inserted (*)
2561 * 0 for plain searches, not modifying the tree
2563 * (*) If size of item inserted doesn't include
2564 * sizeof(struct btrfs_item), then p->search_for_extension must
2566 * @cow: boolean should CoW operations be performed. Must always be 1
2567 * when modifying the tree.
2569 * If @ins_len > 0, nodes and leaves will be split as we walk down the tree.
2570 * If @ins_len < 0, nodes will be merged as we walk down the tree (if possible)
2572 * If @key is found, 0 is returned and you can find the item in the leaf level
2573 * of the path (level 0)
2575 * If @key isn't found, 1 is returned and the leaf level of the path (level 0)
2576 * points to the slot where it should be inserted
2578 * If an error is encountered while searching the tree a negative error number
2581 int btrfs_search_slot(struct btrfs_trans_handle
*trans
, struct btrfs_root
*root
,
2582 const struct btrfs_key
*key
, struct btrfs_path
*p
,
2583 int ins_len
, int cow
)
2585 struct extent_buffer
*b
;
2590 int lowest_unlock
= 1;
2591 /* everything at write_lock_level or lower must be write locked */
2592 int write_lock_level
= 0;
2593 u8 lowest_level
= 0;
2594 int min_write_lock_level
;
2597 lowest_level
= p
->lowest_level
;
2598 WARN_ON(lowest_level
&& ins_len
> 0);
2599 WARN_ON(p
->nodes
[0] != NULL
);
2600 BUG_ON(!cow
&& ins_len
);
2605 /* when we are removing items, we might have to go up to level
2606 * two as we update tree pointers Make sure we keep write
2607 * for those levels as well
2609 write_lock_level
= 2;
2610 } else if (ins_len
> 0) {
2612 * for inserting items, make sure we have a write lock on
2613 * level 1 so we can update keys
2615 write_lock_level
= 1;
2619 write_lock_level
= -1;
2621 if (cow
&& (p
->keep_locks
|| p
->lowest_level
))
2622 write_lock_level
= BTRFS_MAX_LEVEL
;
2624 min_write_lock_level
= write_lock_level
;
2628 b
= btrfs_search_slot_get_root(root
, p
, write_lock_level
);
2637 level
= btrfs_header_level(b
);
2640 bool last_level
= (level
== (BTRFS_MAX_LEVEL
- 1));
2643 * if we don't really need to cow this block
2644 * then we don't want to set the path blocking,
2645 * so we test it here
2647 if (!should_cow_block(trans
, root
, b
)) {
2648 trans
->dirty
= true;
2653 * must have write locks on this node and the
2656 if (level
> write_lock_level
||
2657 (level
+ 1 > write_lock_level
&&
2658 level
+ 1 < BTRFS_MAX_LEVEL
&&
2659 p
->nodes
[level
+ 1])) {
2660 write_lock_level
= level
+ 1;
2661 btrfs_release_path(p
);
2666 err
= btrfs_cow_block(trans
, root
, b
, NULL
, 0,
2670 err
= btrfs_cow_block(trans
, root
, b
,
2671 p
->nodes
[level
+ 1],
2672 p
->slots
[level
+ 1], &b
,
2680 p
->nodes
[level
] = b
;
2682 * Leave path with blocking locks to avoid massive
2683 * lock context switch, this is made on purpose.
2687 * we have a lock on b and as long as we aren't changing
2688 * the tree, there is no way to for the items in b to change.
2689 * It is safe to drop the lock on our parent before we
2690 * go through the expensive btree search on b.
2692 * If we're inserting or deleting (ins_len != 0), then we might
2693 * be changing slot zero, which may require changing the parent.
2694 * So, we can't drop the lock until after we know which slot
2695 * we're operating on.
2697 if (!ins_len
&& !p
->keep_locks
) {
2700 if (u
< BTRFS_MAX_LEVEL
&& p
->locks
[u
]) {
2701 btrfs_tree_unlock_rw(p
->nodes
[u
], p
->locks
[u
]);
2707 * If btrfs_bin_search returns an exact match (prev_cmp == 0)
2708 * we can safely assume the target key will always be in slot 0
2709 * on lower levels due to the invariants BTRFS' btree provides,
2710 * namely that a btrfs_key_ptr entry always points to the
2711 * lowest key in the child node, thus we can skip searching
2714 if (prev_cmp
== 0) {
2718 ret
= btrfs_bin_search(b
, key
, &slot
);
2725 p
->slots
[level
] = slot
;
2727 * Item key already exists. In this case, if we are
2728 * allowed to insert the item (for example, in dir_item
2729 * case, item key collision is allowed), it will be
2730 * merged with the original item. Only the item size
2731 * grows, no new btrfs item will be added. If
2732 * search_for_extension is not set, ins_len already
2733 * accounts the size btrfs_item, deduct it here so leaf
2734 * space check will be correct.
2736 if (ret
== 0 && ins_len
> 0 && !p
->search_for_extension
) {
2737 ASSERT(ins_len
>= sizeof(struct btrfs_item
));
2738 ins_len
-= sizeof(struct btrfs_item
);
2741 btrfs_leaf_free_space(b
) < ins_len
) {
2742 if (write_lock_level
< 1) {
2743 write_lock_level
= 1;
2744 btrfs_release_path(p
);
2748 err
= split_leaf(trans
, root
, key
,
2749 p
, ins_len
, ret
== 0);
2757 if (!p
->search_for_split
)
2758 unlock_up(p
, level
, lowest_unlock
,
2759 min_write_lock_level
, NULL
);
2762 if (ret
&& slot
> 0) {
2766 p
->slots
[level
] = slot
;
2767 err
= setup_nodes_for_search(trans
, root
, p
, b
, level
, ins_len
,
2775 b
= p
->nodes
[level
];
2776 slot
= p
->slots
[level
];
2779 * Slot 0 is special, if we change the key we have to update
2780 * the parent pointer which means we must have a write lock on
2783 if (slot
== 0 && ins_len
&& write_lock_level
< level
+ 1) {
2784 write_lock_level
= level
+ 1;
2785 btrfs_release_path(p
);
2789 unlock_up(p
, level
, lowest_unlock
, min_write_lock_level
,
2792 if (level
== lowest_level
) {
2798 err
= read_block_for_search(root
, p
, &b
, level
, slot
, key
);
2806 if (!p
->skip_locking
) {
2807 level
= btrfs_header_level(b
);
2808 if (level
<= write_lock_level
) {
2810 p
->locks
[level
] = BTRFS_WRITE_LOCK
;
2812 btrfs_tree_read_lock(b
);
2813 p
->locks
[level
] = BTRFS_READ_LOCK
;
2815 p
->nodes
[level
] = b
;
2820 if (ret
< 0 && !p
->skip_release_on_error
)
2821 btrfs_release_path(p
);
2826 * Like btrfs_search_slot, this looks for a key in the given tree. It uses the
2827 * current state of the tree together with the operations recorded in the tree
2828 * modification log to search for the key in a previous version of this tree, as
2829 * denoted by the time_seq parameter.
2831 * Naturally, there is no support for insert, delete or cow operations.
2833 * The resulting path and return value will be set up as if we called
2834 * btrfs_search_slot at that point in time with ins_len and cow both set to 0.
2836 int btrfs_search_old_slot(struct btrfs_root
*root
, const struct btrfs_key
*key
,
2837 struct btrfs_path
*p
, u64 time_seq
)
2839 struct btrfs_fs_info
*fs_info
= root
->fs_info
;
2840 struct extent_buffer
*b
;
2845 int lowest_unlock
= 1;
2846 u8 lowest_level
= 0;
2848 lowest_level
= p
->lowest_level
;
2849 WARN_ON(p
->nodes
[0] != NULL
);
2851 if (p
->search_commit_root
) {
2853 return btrfs_search_slot(NULL
, root
, key
, p
, 0, 0);
2857 b
= get_old_root(root
, time_seq
);
2862 level
= btrfs_header_level(b
);
2863 p
->locks
[level
] = BTRFS_READ_LOCK
;
2868 level
= btrfs_header_level(b
);
2869 p
->nodes
[level
] = b
;
2872 * we have a lock on b and as long as we aren't changing
2873 * the tree, there is no way to for the items in b to change.
2874 * It is safe to drop the lock on our parent before we
2875 * go through the expensive btree search on b.
2877 btrfs_unlock_up_safe(p
, level
+ 1);
2879 ret
= btrfs_bin_search(b
, key
, &slot
);
2884 p
->slots
[level
] = slot
;
2885 unlock_up(p
, level
, lowest_unlock
, 0, NULL
);
2889 if (ret
&& slot
> 0) {
2893 p
->slots
[level
] = slot
;
2894 unlock_up(p
, level
, lowest_unlock
, 0, NULL
);
2896 if (level
== lowest_level
) {
2902 err
= read_block_for_search(root
, p
, &b
, level
, slot
, key
);
2910 level
= btrfs_header_level(b
);
2911 btrfs_tree_read_lock(b
);
2912 b
= tree_mod_log_rewind(fs_info
, p
, b
, time_seq
);
2917 p
->locks
[level
] = BTRFS_READ_LOCK
;
2918 p
->nodes
[level
] = b
;
2923 btrfs_release_path(p
);
2929 * helper to use instead of search slot if no exact match is needed but
2930 * instead the next or previous item should be returned.
2931 * When find_higher is true, the next higher item is returned, the next lower
2933 * When return_any and find_higher are both true, and no higher item is found,
2934 * return the next lower instead.
2935 * When return_any is true and find_higher is false, and no lower item is found,
2936 * return the next higher instead.
2937 * It returns 0 if any item is found, 1 if none is found (tree empty), and
2940 int btrfs_search_slot_for_read(struct btrfs_root
*root
,
2941 const struct btrfs_key
*key
,
2942 struct btrfs_path
*p
, int find_higher
,
2946 struct extent_buffer
*leaf
;
2949 ret
= btrfs_search_slot(NULL
, root
, key
, p
, 0, 0);
2953 * a return value of 1 means the path is at the position where the
2954 * item should be inserted. Normally this is the next bigger item,
2955 * but in case the previous item is the last in a leaf, path points
2956 * to the first free slot in the previous leaf, i.e. at an invalid
2962 if (p
->slots
[0] >= btrfs_header_nritems(leaf
)) {
2963 ret
= btrfs_next_leaf(root
, p
);
2969 * no higher item found, return the next
2974 btrfs_release_path(p
);
2978 if (p
->slots
[0] == 0) {
2979 ret
= btrfs_prev_leaf(root
, p
);
2984 if (p
->slots
[0] == btrfs_header_nritems(leaf
))
2991 * no lower item found, return the next
2996 btrfs_release_path(p
);
3006 * adjust the pointers going up the tree, starting at level
3007 * making sure the right key of each node is points to 'key'.
3008 * This is used after shifting pointers to the left, so it stops
3009 * fixing up pointers when a given leaf/node is not in slot 0 of the
3013 static void fixup_low_keys(struct btrfs_path
*path
,
3014 struct btrfs_disk_key
*key
, int level
)
3017 struct extent_buffer
*t
;
3020 for (i
= level
; i
< BTRFS_MAX_LEVEL
; i
++) {
3021 int tslot
= path
->slots
[i
];
3023 if (!path
->nodes
[i
])
3026 ret
= tree_mod_log_insert_key(t
, tslot
, MOD_LOG_KEY_REPLACE
,
3029 btrfs_set_node_key(t
, key
, tslot
);
3030 btrfs_mark_buffer_dirty(path
->nodes
[i
]);
3039 * This function isn't completely safe. It's the caller's responsibility
3040 * that the new key won't break the order
3042 void btrfs_set_item_key_safe(struct btrfs_fs_info
*fs_info
,
3043 struct btrfs_path
*path
,
3044 const struct btrfs_key
*new_key
)
3046 struct btrfs_disk_key disk_key
;
3047 struct extent_buffer
*eb
;
3050 eb
= path
->nodes
[0];
3051 slot
= path
->slots
[0];
3053 btrfs_item_key(eb
, &disk_key
, slot
- 1);
3054 if (unlikely(comp_keys(&disk_key
, new_key
) >= 0)) {
3056 "slot %u key (%llu %u %llu) new key (%llu %u %llu)",
3057 slot
, btrfs_disk_key_objectid(&disk_key
),
3058 btrfs_disk_key_type(&disk_key
),
3059 btrfs_disk_key_offset(&disk_key
),
3060 new_key
->objectid
, new_key
->type
,
3062 btrfs_print_leaf(eb
);
3066 if (slot
< btrfs_header_nritems(eb
) - 1) {
3067 btrfs_item_key(eb
, &disk_key
, slot
+ 1);
3068 if (unlikely(comp_keys(&disk_key
, new_key
) <= 0)) {
3070 "slot %u key (%llu %u %llu) new key (%llu %u %llu)",
3071 slot
, btrfs_disk_key_objectid(&disk_key
),
3072 btrfs_disk_key_type(&disk_key
),
3073 btrfs_disk_key_offset(&disk_key
),
3074 new_key
->objectid
, new_key
->type
,
3076 btrfs_print_leaf(eb
);
3081 btrfs_cpu_key_to_disk(&disk_key
, new_key
);
3082 btrfs_set_item_key(eb
, &disk_key
, slot
);
3083 btrfs_mark_buffer_dirty(eb
);
3085 fixup_low_keys(path
, &disk_key
, 1);
3089 * Check key order of two sibling extent buffers.
3091 * Return true if something is wrong.
3092 * Return false if everything is fine.
3094 * Tree-checker only works inside one tree block, thus the following
3095 * corruption can not be detected by tree-checker:
3097 * Leaf @left | Leaf @right
3098 * --------------------------------------------------------------
3099 * | 1 | 2 | 3 | 4 | 5 | f6 | | 7 | 8 |
3101 * Key f6 in leaf @left itself is valid, but not valid when the next
3102 * key in leaf @right is 7.
3103 * This can only be checked at tree block merge time.
3104 * And since tree checker has ensured all key order in each tree block
3105 * is correct, we only need to bother the last key of @left and the first
3108 static bool check_sibling_keys(struct extent_buffer
*left
,
3109 struct extent_buffer
*right
)
3111 struct btrfs_key left_last
;
3112 struct btrfs_key right_first
;
3113 int level
= btrfs_header_level(left
);
3114 int nr_left
= btrfs_header_nritems(left
);
3115 int nr_right
= btrfs_header_nritems(right
);
3117 /* No key to check in one of the tree blocks */
3118 if (!nr_left
|| !nr_right
)
3122 btrfs_node_key_to_cpu(left
, &left_last
, nr_left
- 1);
3123 btrfs_node_key_to_cpu(right
, &right_first
, 0);
3125 btrfs_item_key_to_cpu(left
, &left_last
, nr_left
- 1);
3126 btrfs_item_key_to_cpu(right
, &right_first
, 0);
3129 if (btrfs_comp_cpu_keys(&left_last
, &right_first
) >= 0) {
3130 btrfs_crit(left
->fs_info
,
3131 "bad key order, sibling blocks, left last (%llu %u %llu) right first (%llu %u %llu)",
3132 left_last
.objectid
, left_last
.type
,
3133 left_last
.offset
, right_first
.objectid
,
3134 right_first
.type
, right_first
.offset
);
3141 * try to push data from one node into the next node left in the
3144 * returns 0 if some ptrs were pushed left, < 0 if there was some horrible
3145 * error, and > 0 if there was no room in the left hand block.
3147 static int push_node_left(struct btrfs_trans_handle
*trans
,
3148 struct extent_buffer
*dst
,
3149 struct extent_buffer
*src
, int empty
)
3151 struct btrfs_fs_info
*fs_info
= trans
->fs_info
;
3157 src_nritems
= btrfs_header_nritems(src
);
3158 dst_nritems
= btrfs_header_nritems(dst
);
3159 push_items
= BTRFS_NODEPTRS_PER_BLOCK(fs_info
) - dst_nritems
;
3160 WARN_ON(btrfs_header_generation(src
) != trans
->transid
);
3161 WARN_ON(btrfs_header_generation(dst
) != trans
->transid
);
3163 if (!empty
&& src_nritems
<= 8)
3166 if (push_items
<= 0)
3170 push_items
= min(src_nritems
, push_items
);
3171 if (push_items
< src_nritems
) {
3172 /* leave at least 8 pointers in the node if
3173 * we aren't going to empty it
3175 if (src_nritems
- push_items
< 8) {
3176 if (push_items
<= 8)
3182 push_items
= min(src_nritems
- 8, push_items
);
3184 /* dst is the left eb, src is the middle eb */
3185 if (check_sibling_keys(dst
, src
)) {
3187 btrfs_abort_transaction(trans
, ret
);
3190 ret
= tree_mod_log_eb_copy(dst
, src
, dst_nritems
, 0, push_items
);
3192 btrfs_abort_transaction(trans
, ret
);
3195 copy_extent_buffer(dst
, src
,
3196 btrfs_node_key_ptr_offset(dst_nritems
),
3197 btrfs_node_key_ptr_offset(0),
3198 push_items
* sizeof(struct btrfs_key_ptr
));
3200 if (push_items
< src_nritems
) {
3202 * Don't call tree_mod_log_insert_move here, key removal was
3203 * already fully logged by tree_mod_log_eb_copy above.
3205 memmove_extent_buffer(src
, btrfs_node_key_ptr_offset(0),
3206 btrfs_node_key_ptr_offset(push_items
),
3207 (src_nritems
- push_items
) *
3208 sizeof(struct btrfs_key_ptr
));
3210 btrfs_set_header_nritems(src
, src_nritems
- push_items
);
3211 btrfs_set_header_nritems(dst
, dst_nritems
+ push_items
);
3212 btrfs_mark_buffer_dirty(src
);
3213 btrfs_mark_buffer_dirty(dst
);
3219 * try to push data from one node into the next node right in the
3222 * returns 0 if some ptrs were pushed, < 0 if there was some horrible
3223 * error, and > 0 if there was no room in the right hand block.
3225 * this will only push up to 1/2 the contents of the left node over
3227 static int balance_node_right(struct btrfs_trans_handle
*trans
,
3228 struct extent_buffer
*dst
,
3229 struct extent_buffer
*src
)
3231 struct btrfs_fs_info
*fs_info
= trans
->fs_info
;
3238 WARN_ON(btrfs_header_generation(src
) != trans
->transid
);
3239 WARN_ON(btrfs_header_generation(dst
) != trans
->transid
);
3241 src_nritems
= btrfs_header_nritems(src
);
3242 dst_nritems
= btrfs_header_nritems(dst
);
3243 push_items
= BTRFS_NODEPTRS_PER_BLOCK(fs_info
) - dst_nritems
;
3244 if (push_items
<= 0)
3247 if (src_nritems
< 4)
3250 max_push
= src_nritems
/ 2 + 1;
3251 /* don't try to empty the node */
3252 if (max_push
>= src_nritems
)
3255 if (max_push
< push_items
)
3256 push_items
= max_push
;
3258 /* dst is the right eb, src is the middle eb */
3259 if (check_sibling_keys(src
, dst
)) {
3261 btrfs_abort_transaction(trans
, ret
);
3264 ret
= tree_mod_log_insert_move(dst
, push_items
, 0, dst_nritems
);
3266 memmove_extent_buffer(dst
, btrfs_node_key_ptr_offset(push_items
),
3267 btrfs_node_key_ptr_offset(0),
3269 sizeof(struct btrfs_key_ptr
));
3271 ret
= tree_mod_log_eb_copy(dst
, src
, 0, src_nritems
- push_items
,
3274 btrfs_abort_transaction(trans
, ret
);
3277 copy_extent_buffer(dst
, src
,
3278 btrfs_node_key_ptr_offset(0),
3279 btrfs_node_key_ptr_offset(src_nritems
- push_items
),
3280 push_items
* sizeof(struct btrfs_key_ptr
));
3282 btrfs_set_header_nritems(src
, src_nritems
- push_items
);
3283 btrfs_set_header_nritems(dst
, dst_nritems
+ push_items
);
3285 btrfs_mark_buffer_dirty(src
);
3286 btrfs_mark_buffer_dirty(dst
);
3292 * helper function to insert a new root level in the tree.
3293 * A new node is allocated, and a single item is inserted to
3294 * point to the existing root
3296 * returns zero on success or < 0 on failure.
3298 static noinline
int insert_new_root(struct btrfs_trans_handle
*trans
,
3299 struct btrfs_root
*root
,
3300 struct btrfs_path
*path
, int level
)
3302 struct btrfs_fs_info
*fs_info
= root
->fs_info
;
3304 struct extent_buffer
*lower
;
3305 struct extent_buffer
*c
;
3306 struct extent_buffer
*old
;
3307 struct btrfs_disk_key lower_key
;
3310 BUG_ON(path
->nodes
[level
]);
3311 BUG_ON(path
->nodes
[level
-1] != root
->node
);
3313 lower
= path
->nodes
[level
-1];
3315 btrfs_item_key(lower
, &lower_key
, 0);
3317 btrfs_node_key(lower
, &lower_key
, 0);
3319 c
= alloc_tree_block_no_bg_flush(trans
, root
, 0, &lower_key
, level
,
3320 root
->node
->start
, 0,
3321 BTRFS_NESTING_NEW_ROOT
);
3325 root_add_used(root
, fs_info
->nodesize
);
3327 btrfs_set_header_nritems(c
, 1);
3328 btrfs_set_node_key(c
, &lower_key
, 0);
3329 btrfs_set_node_blockptr(c
, 0, lower
->start
);
3330 lower_gen
= btrfs_header_generation(lower
);
3331 WARN_ON(lower_gen
!= trans
->transid
);
3333 btrfs_set_node_ptr_generation(c
, 0, lower_gen
);
3335 btrfs_mark_buffer_dirty(c
);
3338 ret
= tree_mod_log_insert_root(root
->node
, c
, 0);
3340 rcu_assign_pointer(root
->node
, c
);
3342 /* the super has an extra ref to root->node */
3343 free_extent_buffer(old
);
3345 add_root_to_dirty_list(root
);
3346 atomic_inc(&c
->refs
);
3347 path
->nodes
[level
] = c
;
3348 path
->locks
[level
] = BTRFS_WRITE_LOCK
;
3349 path
->slots
[level
] = 0;
3354 * worker function to insert a single pointer in a node.
3355 * the node should have enough room for the pointer already
3357 * slot and level indicate where you want the key to go, and
3358 * blocknr is the block the key points to.
3360 static void insert_ptr(struct btrfs_trans_handle
*trans
,
3361 struct btrfs_path
*path
,
3362 struct btrfs_disk_key
*key
, u64 bytenr
,
3363 int slot
, int level
)
3365 struct extent_buffer
*lower
;
3369 BUG_ON(!path
->nodes
[level
]);
3370 btrfs_assert_tree_locked(path
->nodes
[level
]);
3371 lower
= path
->nodes
[level
];
3372 nritems
= btrfs_header_nritems(lower
);
3373 BUG_ON(slot
> nritems
);
3374 BUG_ON(nritems
== BTRFS_NODEPTRS_PER_BLOCK(trans
->fs_info
));
3375 if (slot
!= nritems
) {
3377 ret
= tree_mod_log_insert_move(lower
, slot
+ 1, slot
,
3381 memmove_extent_buffer(lower
,
3382 btrfs_node_key_ptr_offset(slot
+ 1),
3383 btrfs_node_key_ptr_offset(slot
),
3384 (nritems
- slot
) * sizeof(struct btrfs_key_ptr
));
3387 ret
= tree_mod_log_insert_key(lower
, slot
, MOD_LOG_KEY_ADD
,
3391 btrfs_set_node_key(lower
, key
, slot
);
3392 btrfs_set_node_blockptr(lower
, slot
, bytenr
);
3393 WARN_ON(trans
->transid
== 0);
3394 btrfs_set_node_ptr_generation(lower
, slot
, trans
->transid
);
3395 btrfs_set_header_nritems(lower
, nritems
+ 1);
3396 btrfs_mark_buffer_dirty(lower
);
3400 * split the node at the specified level in path in two.
3401 * The path is corrected to point to the appropriate node after the split
3403 * Before splitting this tries to make some room in the node by pushing
3404 * left and right, if either one works, it returns right away.
3406 * returns 0 on success and < 0 on failure
3408 static noinline
int split_node(struct btrfs_trans_handle
*trans
,
3409 struct btrfs_root
*root
,
3410 struct btrfs_path
*path
, int level
)
3412 struct btrfs_fs_info
*fs_info
= root
->fs_info
;
3413 struct extent_buffer
*c
;
3414 struct extent_buffer
*split
;
3415 struct btrfs_disk_key disk_key
;
3420 c
= path
->nodes
[level
];
3421 WARN_ON(btrfs_header_generation(c
) != trans
->transid
);
3422 if (c
== root
->node
) {
3424 * trying to split the root, lets make a new one
3426 * tree mod log: We don't log_removal old root in
3427 * insert_new_root, because that root buffer will be kept as a
3428 * normal node. We are going to log removal of half of the
3429 * elements below with tree_mod_log_eb_copy. We're holding a
3430 * tree lock on the buffer, which is why we cannot race with
3431 * other tree_mod_log users.
3433 ret
= insert_new_root(trans
, root
, path
, level
+ 1);
3437 ret
= push_nodes_for_insert(trans
, root
, path
, level
);
3438 c
= path
->nodes
[level
];
3439 if (!ret
&& btrfs_header_nritems(c
) <
3440 BTRFS_NODEPTRS_PER_BLOCK(fs_info
) - 3)
3446 c_nritems
= btrfs_header_nritems(c
);
3447 mid
= (c_nritems
+ 1) / 2;
3448 btrfs_node_key(c
, &disk_key
, mid
);
3450 split
= alloc_tree_block_no_bg_flush(trans
, root
, 0, &disk_key
, level
,
3451 c
->start
, 0, BTRFS_NESTING_SPLIT
);
3453 return PTR_ERR(split
);
3455 root_add_used(root
, fs_info
->nodesize
);
3456 ASSERT(btrfs_header_level(c
) == level
);
3458 ret
= tree_mod_log_eb_copy(split
, c
, 0, mid
, c_nritems
- mid
);
3460 btrfs_abort_transaction(trans
, ret
);
3463 copy_extent_buffer(split
, c
,
3464 btrfs_node_key_ptr_offset(0),
3465 btrfs_node_key_ptr_offset(mid
),
3466 (c_nritems
- mid
) * sizeof(struct btrfs_key_ptr
));
3467 btrfs_set_header_nritems(split
, c_nritems
- mid
);
3468 btrfs_set_header_nritems(c
, mid
);
3470 btrfs_mark_buffer_dirty(c
);
3471 btrfs_mark_buffer_dirty(split
);
3473 insert_ptr(trans
, path
, &disk_key
, split
->start
,
3474 path
->slots
[level
+ 1] + 1, level
+ 1);
3476 if (path
->slots
[level
] >= mid
) {
3477 path
->slots
[level
] -= mid
;
3478 btrfs_tree_unlock(c
);
3479 free_extent_buffer(c
);
3480 path
->nodes
[level
] = split
;
3481 path
->slots
[level
+ 1] += 1;
3483 btrfs_tree_unlock(split
);
3484 free_extent_buffer(split
);
3490 * how many bytes are required to store the items in a leaf. start
3491 * and nr indicate which items in the leaf to check. This totals up the
3492 * space used both by the item structs and the item data
3494 static int leaf_space_used(struct extent_buffer
*l
, int start
, int nr
)
3496 struct btrfs_item
*start_item
;
3497 struct btrfs_item
*end_item
;
3499 int nritems
= btrfs_header_nritems(l
);
3500 int end
= min(nritems
, start
+ nr
) - 1;
3504 start_item
= btrfs_item_nr(start
);
3505 end_item
= btrfs_item_nr(end
);
3506 data_len
= btrfs_item_offset(l
, start_item
) +
3507 btrfs_item_size(l
, start_item
);
3508 data_len
= data_len
- btrfs_item_offset(l
, end_item
);
3509 data_len
+= sizeof(struct btrfs_item
) * nr
;
3510 WARN_ON(data_len
< 0);
3515 * The space between the end of the leaf items and
3516 * the start of the leaf data. IOW, how much room
3517 * the leaf has left for both items and data
3519 noinline
int btrfs_leaf_free_space(struct extent_buffer
*leaf
)
3521 struct btrfs_fs_info
*fs_info
= leaf
->fs_info
;
3522 int nritems
= btrfs_header_nritems(leaf
);
3525 ret
= BTRFS_LEAF_DATA_SIZE(fs_info
) - leaf_space_used(leaf
, 0, nritems
);
3528 "leaf free space ret %d, leaf data size %lu, used %d nritems %d",
3530 (unsigned long) BTRFS_LEAF_DATA_SIZE(fs_info
),
3531 leaf_space_used(leaf
, 0, nritems
), nritems
);
3537 * min slot controls the lowest index we're willing to push to the
3538 * right. We'll push up to and including min_slot, but no lower
3540 static noinline
int __push_leaf_right(struct btrfs_path
*path
,
3541 int data_size
, int empty
,
3542 struct extent_buffer
*right
,
3543 int free_space
, u32 left_nritems
,
3546 struct btrfs_fs_info
*fs_info
= right
->fs_info
;
3547 struct extent_buffer
*left
= path
->nodes
[0];
3548 struct extent_buffer
*upper
= path
->nodes
[1];
3549 struct btrfs_map_token token
;
3550 struct btrfs_disk_key disk_key
;
3555 struct btrfs_item
*item
;
3564 nr
= max_t(u32
, 1, min_slot
);
3566 if (path
->slots
[0] >= left_nritems
)
3567 push_space
+= data_size
;
3569 slot
= path
->slots
[1];
3570 i
= left_nritems
- 1;
3572 item
= btrfs_item_nr(i
);
3574 if (!empty
&& push_items
> 0) {
3575 if (path
->slots
[0] > i
)
3577 if (path
->slots
[0] == i
) {
3578 int space
= btrfs_leaf_free_space(left
);
3580 if (space
+ push_space
* 2 > free_space
)
3585 if (path
->slots
[0] == i
)
3586 push_space
+= data_size
;
3588 this_item_size
= btrfs_item_size(left
, item
);
3589 if (this_item_size
+ sizeof(*item
) + push_space
> free_space
)
3593 push_space
+= this_item_size
+ sizeof(*item
);
3599 if (push_items
== 0)
3602 WARN_ON(!empty
&& push_items
== left_nritems
);
3604 /* push left to right */
3605 right_nritems
= btrfs_header_nritems(right
);
3607 push_space
= btrfs_item_end_nr(left
, left_nritems
- push_items
);
3608 push_space
-= leaf_data_end(left
);
3610 /* make room in the right data area */
3611 data_end
= leaf_data_end(right
);
3612 memmove_extent_buffer(right
,
3613 BTRFS_LEAF_DATA_OFFSET
+ data_end
- push_space
,
3614 BTRFS_LEAF_DATA_OFFSET
+ data_end
,
3615 BTRFS_LEAF_DATA_SIZE(fs_info
) - data_end
);
3617 /* copy from the left data area */
3618 copy_extent_buffer(right
, left
, BTRFS_LEAF_DATA_OFFSET
+
3619 BTRFS_LEAF_DATA_SIZE(fs_info
) - push_space
,
3620 BTRFS_LEAF_DATA_OFFSET
+ leaf_data_end(left
),
3623 memmove_extent_buffer(right
, btrfs_item_nr_offset(push_items
),
3624 btrfs_item_nr_offset(0),
3625 right_nritems
* sizeof(struct btrfs_item
));
3627 /* copy the items from left to right */
3628 copy_extent_buffer(right
, left
, btrfs_item_nr_offset(0),
3629 btrfs_item_nr_offset(left_nritems
- push_items
),
3630 push_items
* sizeof(struct btrfs_item
));
3632 /* update the item pointers */
3633 btrfs_init_map_token(&token
, right
);
3634 right_nritems
+= push_items
;
3635 btrfs_set_header_nritems(right
, right_nritems
);
3636 push_space
= BTRFS_LEAF_DATA_SIZE(fs_info
);
3637 for (i
= 0; i
< right_nritems
; i
++) {
3638 item
= btrfs_item_nr(i
);
3639 push_space
-= btrfs_token_item_size(&token
, item
);
3640 btrfs_set_token_item_offset(&token
, item
, push_space
);
3643 left_nritems
-= push_items
;
3644 btrfs_set_header_nritems(left
, left_nritems
);
3647 btrfs_mark_buffer_dirty(left
);
3649 btrfs_clean_tree_block(left
);
3651 btrfs_mark_buffer_dirty(right
);
3653 btrfs_item_key(right
, &disk_key
, 0);
3654 btrfs_set_node_key(upper
, &disk_key
, slot
+ 1);
3655 btrfs_mark_buffer_dirty(upper
);
3657 /* then fixup the leaf pointer in the path */
3658 if (path
->slots
[0] >= left_nritems
) {
3659 path
->slots
[0] -= left_nritems
;
3660 if (btrfs_header_nritems(path
->nodes
[0]) == 0)
3661 btrfs_clean_tree_block(path
->nodes
[0]);
3662 btrfs_tree_unlock(path
->nodes
[0]);
3663 free_extent_buffer(path
->nodes
[0]);
3664 path
->nodes
[0] = right
;
3665 path
->slots
[1] += 1;
3667 btrfs_tree_unlock(right
);
3668 free_extent_buffer(right
);
3673 btrfs_tree_unlock(right
);
3674 free_extent_buffer(right
);
3679 * push some data in the path leaf to the right, trying to free up at
3680 * least data_size bytes. returns zero if the push worked, nonzero otherwise
3682 * returns 1 if the push failed because the other node didn't have enough
3683 * room, 0 if everything worked out and < 0 if there were major errors.
3685 * this will push starting from min_slot to the end of the leaf. It won't
3686 * push any slot lower than min_slot
3688 static int push_leaf_right(struct btrfs_trans_handle
*trans
, struct btrfs_root
3689 *root
, struct btrfs_path
*path
,
3690 int min_data_size
, int data_size
,
3691 int empty
, u32 min_slot
)
3693 struct extent_buffer
*left
= path
->nodes
[0];
3694 struct extent_buffer
*right
;
3695 struct extent_buffer
*upper
;
3701 if (!path
->nodes
[1])
3704 slot
= path
->slots
[1];
3705 upper
= path
->nodes
[1];
3706 if (slot
>= btrfs_header_nritems(upper
) - 1)
3709 btrfs_assert_tree_locked(path
->nodes
[1]);
3711 right
= btrfs_read_node_slot(upper
, slot
+ 1);
3713 * slot + 1 is not valid or we fail to read the right node,
3714 * no big deal, just return.
3719 __btrfs_tree_lock(right
, BTRFS_NESTING_RIGHT
);
3721 free_space
= btrfs_leaf_free_space(right
);
3722 if (free_space
< data_size
)
3725 /* cow and double check */
3726 ret
= btrfs_cow_block(trans
, root
, right
, upper
,
3727 slot
+ 1, &right
, BTRFS_NESTING_RIGHT_COW
);
3731 free_space
= btrfs_leaf_free_space(right
);
3732 if (free_space
< data_size
)
3735 left_nritems
= btrfs_header_nritems(left
);
3736 if (left_nritems
== 0)
3739 if (check_sibling_keys(left
, right
)) {
3741 btrfs_tree_unlock(right
);
3742 free_extent_buffer(right
);
3745 if (path
->slots
[0] == left_nritems
&& !empty
) {
3746 /* Key greater than all keys in the leaf, right neighbor has
3747 * enough room for it and we're not emptying our leaf to delete
3748 * it, therefore use right neighbor to insert the new item and
3749 * no need to touch/dirty our left leaf. */
3750 btrfs_tree_unlock(left
);
3751 free_extent_buffer(left
);
3752 path
->nodes
[0] = right
;
3758 return __push_leaf_right(path
, min_data_size
, empty
,
3759 right
, free_space
, left_nritems
, min_slot
);
3761 btrfs_tree_unlock(right
);
3762 free_extent_buffer(right
);
3767 * push some data in the path leaf to the left, trying to free up at
3768 * least data_size bytes. returns zero if the push worked, nonzero otherwise
3770 * max_slot can put a limit on how far into the leaf we'll push items. The
3771 * item at 'max_slot' won't be touched. Use (u32)-1 to make us do all the
3774 static noinline
int __push_leaf_left(struct btrfs_path
*path
, int data_size
,
3775 int empty
, struct extent_buffer
*left
,
3776 int free_space
, u32 right_nritems
,
3779 struct btrfs_fs_info
*fs_info
= left
->fs_info
;
3780 struct btrfs_disk_key disk_key
;
3781 struct extent_buffer
*right
= path
->nodes
[0];
3785 struct btrfs_item
*item
;
3786 u32 old_left_nritems
;
3790 u32 old_left_item_size
;
3791 struct btrfs_map_token token
;
3794 nr
= min(right_nritems
, max_slot
);
3796 nr
= min(right_nritems
- 1, max_slot
);
3798 for (i
= 0; i
< nr
; i
++) {
3799 item
= btrfs_item_nr(i
);
3801 if (!empty
&& push_items
> 0) {
3802 if (path
->slots
[0] < i
)
3804 if (path
->slots
[0] == i
) {
3805 int space
= btrfs_leaf_free_space(right
);
3807 if (space
+ push_space
* 2 > free_space
)
3812 if (path
->slots
[0] == i
)
3813 push_space
+= data_size
;
3815 this_item_size
= btrfs_item_size(right
, item
);
3816 if (this_item_size
+ sizeof(*item
) + push_space
> free_space
)
3820 push_space
+= this_item_size
+ sizeof(*item
);
3823 if (push_items
== 0) {
3827 WARN_ON(!empty
&& push_items
== btrfs_header_nritems(right
));
3829 /* push data from right to left */
3830 copy_extent_buffer(left
, right
,
3831 btrfs_item_nr_offset(btrfs_header_nritems(left
)),
3832 btrfs_item_nr_offset(0),
3833 push_items
* sizeof(struct btrfs_item
));
3835 push_space
= BTRFS_LEAF_DATA_SIZE(fs_info
) -
3836 btrfs_item_offset_nr(right
, push_items
- 1);
3838 copy_extent_buffer(left
, right
, BTRFS_LEAF_DATA_OFFSET
+
3839 leaf_data_end(left
) - push_space
,
3840 BTRFS_LEAF_DATA_OFFSET
+
3841 btrfs_item_offset_nr(right
, push_items
- 1),
3843 old_left_nritems
= btrfs_header_nritems(left
);
3844 BUG_ON(old_left_nritems
<= 0);
3846 btrfs_init_map_token(&token
, left
);
3847 old_left_item_size
= btrfs_item_offset_nr(left
, old_left_nritems
- 1);
3848 for (i
= old_left_nritems
; i
< old_left_nritems
+ push_items
; i
++) {
3851 item
= btrfs_item_nr(i
);
3853 ioff
= btrfs_token_item_offset(&token
, item
);
3854 btrfs_set_token_item_offset(&token
, item
,
3855 ioff
- (BTRFS_LEAF_DATA_SIZE(fs_info
) - old_left_item_size
));
3857 btrfs_set_header_nritems(left
, old_left_nritems
+ push_items
);
3859 /* fixup right node */
3860 if (push_items
> right_nritems
)
3861 WARN(1, KERN_CRIT
"push items %d nr %u\n", push_items
,
3864 if (push_items
< right_nritems
) {
3865 push_space
= btrfs_item_offset_nr(right
, push_items
- 1) -
3866 leaf_data_end(right
);
3867 memmove_extent_buffer(right
, BTRFS_LEAF_DATA_OFFSET
+
3868 BTRFS_LEAF_DATA_SIZE(fs_info
) - push_space
,
3869 BTRFS_LEAF_DATA_OFFSET
+
3870 leaf_data_end(right
), push_space
);
3872 memmove_extent_buffer(right
, btrfs_item_nr_offset(0),
3873 btrfs_item_nr_offset(push_items
),
3874 (btrfs_header_nritems(right
) - push_items
) *
3875 sizeof(struct btrfs_item
));
3878 btrfs_init_map_token(&token
, right
);
3879 right_nritems
-= push_items
;
3880 btrfs_set_header_nritems(right
, right_nritems
);
3881 push_space
= BTRFS_LEAF_DATA_SIZE(fs_info
);
3882 for (i
= 0; i
< right_nritems
; i
++) {
3883 item
= btrfs_item_nr(i
);
3885 push_space
= push_space
- btrfs_token_item_size(&token
, item
);
3886 btrfs_set_token_item_offset(&token
, item
, push_space
);
3889 btrfs_mark_buffer_dirty(left
);
3891 btrfs_mark_buffer_dirty(right
);
3893 btrfs_clean_tree_block(right
);
3895 btrfs_item_key(right
, &disk_key
, 0);
3896 fixup_low_keys(path
, &disk_key
, 1);
3898 /* then fixup the leaf pointer in the path */
3899 if (path
->slots
[0] < push_items
) {
3900 path
->slots
[0] += old_left_nritems
;
3901 btrfs_tree_unlock(path
->nodes
[0]);
3902 free_extent_buffer(path
->nodes
[0]);
3903 path
->nodes
[0] = left
;
3904 path
->slots
[1] -= 1;
3906 btrfs_tree_unlock(left
);
3907 free_extent_buffer(left
);
3908 path
->slots
[0] -= push_items
;
3910 BUG_ON(path
->slots
[0] < 0);
3913 btrfs_tree_unlock(left
);
3914 free_extent_buffer(left
);
3919 * push some data in the path leaf to the left, trying to free up at
3920 * least data_size bytes. returns zero if the push worked, nonzero otherwise
3922 * max_slot can put a limit on how far into the leaf we'll push items. The
3923 * item at 'max_slot' won't be touched. Use (u32)-1 to make us push all the
3926 static int push_leaf_left(struct btrfs_trans_handle
*trans
, struct btrfs_root
3927 *root
, struct btrfs_path
*path
, int min_data_size
,
3928 int data_size
, int empty
, u32 max_slot
)
3930 struct extent_buffer
*right
= path
->nodes
[0];
3931 struct extent_buffer
*left
;
3937 slot
= path
->slots
[1];
3940 if (!path
->nodes
[1])
3943 right_nritems
= btrfs_header_nritems(right
);
3944 if (right_nritems
== 0)
3947 btrfs_assert_tree_locked(path
->nodes
[1]);
3949 left
= btrfs_read_node_slot(path
->nodes
[1], slot
- 1);
3951 * slot - 1 is not valid or we fail to read the left node,
3952 * no big deal, just return.
3957 __btrfs_tree_lock(left
, BTRFS_NESTING_LEFT
);
3959 free_space
= btrfs_leaf_free_space(left
);
3960 if (free_space
< data_size
) {
3965 /* cow and double check */
3966 ret
= btrfs_cow_block(trans
, root
, left
,
3967 path
->nodes
[1], slot
- 1, &left
,
3968 BTRFS_NESTING_LEFT_COW
);
3970 /* we hit -ENOSPC, but it isn't fatal here */
3976 free_space
= btrfs_leaf_free_space(left
);
3977 if (free_space
< data_size
) {
3982 if (check_sibling_keys(left
, right
)) {
3986 return __push_leaf_left(path
, min_data_size
,
3987 empty
, left
, free_space
, right_nritems
,
3990 btrfs_tree_unlock(left
);
3991 free_extent_buffer(left
);
3996 * split the path's leaf in two, making sure there is at least data_size
3997 * available for the resulting leaf level of the path.
3999 static noinline
void copy_for_split(struct btrfs_trans_handle
*trans
,
4000 struct btrfs_path
*path
,
4001 struct extent_buffer
*l
,
4002 struct extent_buffer
*right
,
4003 int slot
, int mid
, int nritems
)
4005 struct btrfs_fs_info
*fs_info
= trans
->fs_info
;
4009 struct btrfs_disk_key disk_key
;
4010 struct btrfs_map_token token
;
4012 nritems
= nritems
- mid
;
4013 btrfs_set_header_nritems(right
, nritems
);
4014 data_copy_size
= btrfs_item_end_nr(l
, mid
) - leaf_data_end(l
);
4016 copy_extent_buffer(right
, l
, btrfs_item_nr_offset(0),
4017 btrfs_item_nr_offset(mid
),
4018 nritems
* sizeof(struct btrfs_item
));
4020 copy_extent_buffer(right
, l
,
4021 BTRFS_LEAF_DATA_OFFSET
+ BTRFS_LEAF_DATA_SIZE(fs_info
) -
4022 data_copy_size
, BTRFS_LEAF_DATA_OFFSET
+
4023 leaf_data_end(l
), data_copy_size
);
4025 rt_data_off
= BTRFS_LEAF_DATA_SIZE(fs_info
) - btrfs_item_end_nr(l
, mid
);
4027 btrfs_init_map_token(&token
, right
);
4028 for (i
= 0; i
< nritems
; i
++) {
4029 struct btrfs_item
*item
= btrfs_item_nr(i
);
4032 ioff
= btrfs_token_item_offset(&token
, item
);
4033 btrfs_set_token_item_offset(&token
, item
, ioff
+ rt_data_off
);
4036 btrfs_set_header_nritems(l
, mid
);
4037 btrfs_item_key(right
, &disk_key
, 0);
4038 insert_ptr(trans
, path
, &disk_key
, right
->start
, path
->slots
[1] + 1, 1);
4040 btrfs_mark_buffer_dirty(right
);
4041 btrfs_mark_buffer_dirty(l
);
4042 BUG_ON(path
->slots
[0] != slot
);
4045 btrfs_tree_unlock(path
->nodes
[0]);
4046 free_extent_buffer(path
->nodes
[0]);
4047 path
->nodes
[0] = right
;
4048 path
->slots
[0] -= mid
;
4049 path
->slots
[1] += 1;
4051 btrfs_tree_unlock(right
);
4052 free_extent_buffer(right
);
4055 BUG_ON(path
->slots
[0] < 0);
4059 * double splits happen when we need to insert a big item in the middle
4060 * of a leaf. A double split can leave us with 3 mostly empty leaves:
4061 * leaf: [ slots 0 - N] [ our target ] [ N + 1 - total in leaf ]
4064 * We avoid this by trying to push the items on either side of our target
4065 * into the adjacent leaves. If all goes well we can avoid the double split
4068 static noinline
int push_for_double_split(struct btrfs_trans_handle
*trans
,
4069 struct btrfs_root
*root
,
4070 struct btrfs_path
*path
,
4077 int space_needed
= data_size
;
4079 slot
= path
->slots
[0];
4080 if (slot
< btrfs_header_nritems(path
->nodes
[0]))
4081 space_needed
-= btrfs_leaf_free_space(path
->nodes
[0]);
4084 * try to push all the items after our slot into the
4087 ret
= push_leaf_right(trans
, root
, path
, 1, space_needed
, 0, slot
);
4094 nritems
= btrfs_header_nritems(path
->nodes
[0]);
4096 * our goal is to get our slot at the start or end of a leaf. If
4097 * we've done so we're done
4099 if (path
->slots
[0] == 0 || path
->slots
[0] == nritems
)
4102 if (btrfs_leaf_free_space(path
->nodes
[0]) >= data_size
)
4105 /* try to push all the items before our slot into the next leaf */
4106 slot
= path
->slots
[0];
4107 space_needed
= data_size
;
4109 space_needed
-= btrfs_leaf_free_space(path
->nodes
[0]);
4110 ret
= push_leaf_left(trans
, root
, path
, 1, space_needed
, 0, slot
);
4123 * split the path's leaf in two, making sure there is at least data_size
4124 * available for the resulting leaf level of the path.
4126 * returns 0 if all went well and < 0 on failure.
4128 static noinline
int split_leaf(struct btrfs_trans_handle
*trans
,
4129 struct btrfs_root
*root
,
4130 const struct btrfs_key
*ins_key
,
4131 struct btrfs_path
*path
, int data_size
,
4134 struct btrfs_disk_key disk_key
;
4135 struct extent_buffer
*l
;
4139 struct extent_buffer
*right
;
4140 struct btrfs_fs_info
*fs_info
= root
->fs_info
;
4144 int num_doubles
= 0;
4145 int tried_avoid_double
= 0;
4148 slot
= path
->slots
[0];
4149 if (extend
&& data_size
+ btrfs_item_size_nr(l
, slot
) +
4150 sizeof(struct btrfs_item
) > BTRFS_LEAF_DATA_SIZE(fs_info
))
4153 /* first try to make some room by pushing left and right */
4154 if (data_size
&& path
->nodes
[1]) {
4155 int space_needed
= data_size
;
4157 if (slot
< btrfs_header_nritems(l
))
4158 space_needed
-= btrfs_leaf_free_space(l
);
4160 wret
= push_leaf_right(trans
, root
, path
, space_needed
,
4161 space_needed
, 0, 0);
4165 space_needed
= data_size
;
4167 space_needed
-= btrfs_leaf_free_space(l
);
4168 wret
= push_leaf_left(trans
, root
, path
, space_needed
,
4169 space_needed
, 0, (u32
)-1);
4175 /* did the pushes work? */
4176 if (btrfs_leaf_free_space(l
) >= data_size
)
4180 if (!path
->nodes
[1]) {
4181 ret
= insert_new_root(trans
, root
, path
, 1);
4188 slot
= path
->slots
[0];
4189 nritems
= btrfs_header_nritems(l
);
4190 mid
= (nritems
+ 1) / 2;
4194 leaf_space_used(l
, mid
, nritems
- mid
) + data_size
>
4195 BTRFS_LEAF_DATA_SIZE(fs_info
)) {
4196 if (slot
>= nritems
) {
4200 if (mid
!= nritems
&&
4201 leaf_space_used(l
, mid
, nritems
- mid
) +
4202 data_size
> BTRFS_LEAF_DATA_SIZE(fs_info
)) {
4203 if (data_size
&& !tried_avoid_double
)
4204 goto push_for_double
;
4210 if (leaf_space_used(l
, 0, mid
) + data_size
>
4211 BTRFS_LEAF_DATA_SIZE(fs_info
)) {
4212 if (!extend
&& data_size
&& slot
== 0) {
4214 } else if ((extend
|| !data_size
) && slot
== 0) {
4218 if (mid
!= nritems
&&
4219 leaf_space_used(l
, mid
, nritems
- mid
) +
4220 data_size
> BTRFS_LEAF_DATA_SIZE(fs_info
)) {
4221 if (data_size
&& !tried_avoid_double
)
4222 goto push_for_double
;
4230 btrfs_cpu_key_to_disk(&disk_key
, ins_key
);
4232 btrfs_item_key(l
, &disk_key
, mid
);
4235 * We have to about BTRFS_NESTING_NEW_ROOT here if we've done a double
4236 * split, because we're only allowed to have MAX_LOCKDEP_SUBCLASSES
4237 * subclasses, which is 8 at the time of this patch, and we've maxed it
4238 * out. In the future we could add a
4239 * BTRFS_NESTING_SPLIT_THE_SPLITTENING if we need to, but for now just
4240 * use BTRFS_NESTING_NEW_ROOT.
4242 right
= alloc_tree_block_no_bg_flush(trans
, root
, 0, &disk_key
, 0,
4243 l
->start
, 0, num_doubles
?
4244 BTRFS_NESTING_NEW_ROOT
:
4245 BTRFS_NESTING_SPLIT
);
4247 return PTR_ERR(right
);
4249 root_add_used(root
, fs_info
->nodesize
);
4253 btrfs_set_header_nritems(right
, 0);
4254 insert_ptr(trans
, path
, &disk_key
,
4255 right
->start
, path
->slots
[1] + 1, 1);
4256 btrfs_tree_unlock(path
->nodes
[0]);
4257 free_extent_buffer(path
->nodes
[0]);
4258 path
->nodes
[0] = right
;
4260 path
->slots
[1] += 1;
4262 btrfs_set_header_nritems(right
, 0);
4263 insert_ptr(trans
, path
, &disk_key
,
4264 right
->start
, path
->slots
[1], 1);
4265 btrfs_tree_unlock(path
->nodes
[0]);
4266 free_extent_buffer(path
->nodes
[0]);
4267 path
->nodes
[0] = right
;
4269 if (path
->slots
[1] == 0)
4270 fixup_low_keys(path
, &disk_key
, 1);
4273 * We create a new leaf 'right' for the required ins_len and
4274 * we'll do btrfs_mark_buffer_dirty() on this leaf after copying
4275 * the content of ins_len to 'right'.
4280 copy_for_split(trans
, path
, l
, right
, slot
, mid
, nritems
);
4283 BUG_ON(num_doubles
!= 0);
4291 push_for_double_split(trans
, root
, path
, data_size
);
4292 tried_avoid_double
= 1;
4293 if (btrfs_leaf_free_space(path
->nodes
[0]) >= data_size
)
4298 static noinline
int setup_leaf_for_split(struct btrfs_trans_handle
*trans
,
4299 struct btrfs_root
*root
,
4300 struct btrfs_path
*path
, int ins_len
)
4302 struct btrfs_key key
;
4303 struct extent_buffer
*leaf
;
4304 struct btrfs_file_extent_item
*fi
;
4309 leaf
= path
->nodes
[0];
4310 btrfs_item_key_to_cpu(leaf
, &key
, path
->slots
[0]);
4312 BUG_ON(key
.type
!= BTRFS_EXTENT_DATA_KEY
&&
4313 key
.type
!= BTRFS_EXTENT_CSUM_KEY
);
4315 if (btrfs_leaf_free_space(leaf
) >= ins_len
)
4318 item_size
= btrfs_item_size_nr(leaf
, path
->slots
[0]);
4319 if (key
.type
== BTRFS_EXTENT_DATA_KEY
) {
4320 fi
= btrfs_item_ptr(leaf
, path
->slots
[0],
4321 struct btrfs_file_extent_item
);
4322 extent_len
= btrfs_file_extent_num_bytes(leaf
, fi
);
4324 btrfs_release_path(path
);
4326 path
->keep_locks
= 1;
4327 path
->search_for_split
= 1;
4328 ret
= btrfs_search_slot(trans
, root
, &key
, path
, 0, 1);
4329 path
->search_for_split
= 0;
4336 leaf
= path
->nodes
[0];
4337 /* if our item isn't there, return now */
4338 if (item_size
!= btrfs_item_size_nr(leaf
, path
->slots
[0]))
4341 /* the leaf has changed, it now has room. return now */
4342 if (btrfs_leaf_free_space(path
->nodes
[0]) >= ins_len
)
4345 if (key
.type
== BTRFS_EXTENT_DATA_KEY
) {
4346 fi
= btrfs_item_ptr(leaf
, path
->slots
[0],
4347 struct btrfs_file_extent_item
);
4348 if (extent_len
!= btrfs_file_extent_num_bytes(leaf
, fi
))
4352 ret
= split_leaf(trans
, root
, &key
, path
, ins_len
, 1);
4356 path
->keep_locks
= 0;
4357 btrfs_unlock_up_safe(path
, 1);
4360 path
->keep_locks
= 0;
4364 static noinline
int split_item(struct btrfs_path
*path
,
4365 const struct btrfs_key
*new_key
,
4366 unsigned long split_offset
)
4368 struct extent_buffer
*leaf
;
4369 struct btrfs_item
*item
;
4370 struct btrfs_item
*new_item
;
4376 struct btrfs_disk_key disk_key
;
4378 leaf
= path
->nodes
[0];
4379 BUG_ON(btrfs_leaf_free_space(leaf
) < sizeof(struct btrfs_item
));
4381 item
= btrfs_item_nr(path
->slots
[0]);
4382 orig_offset
= btrfs_item_offset(leaf
, item
);
4383 item_size
= btrfs_item_size(leaf
, item
);
4385 buf
= kmalloc(item_size
, GFP_NOFS
);
4389 read_extent_buffer(leaf
, buf
, btrfs_item_ptr_offset(leaf
,
4390 path
->slots
[0]), item_size
);
4392 slot
= path
->slots
[0] + 1;
4393 nritems
= btrfs_header_nritems(leaf
);
4394 if (slot
!= nritems
) {
4395 /* shift the items */
4396 memmove_extent_buffer(leaf
, btrfs_item_nr_offset(slot
+ 1),
4397 btrfs_item_nr_offset(slot
),
4398 (nritems
- slot
) * sizeof(struct btrfs_item
));
4401 btrfs_cpu_key_to_disk(&disk_key
, new_key
);
4402 btrfs_set_item_key(leaf
, &disk_key
, slot
);
4404 new_item
= btrfs_item_nr(slot
);
4406 btrfs_set_item_offset(leaf
, new_item
, orig_offset
);
4407 btrfs_set_item_size(leaf
, new_item
, item_size
- split_offset
);
4409 btrfs_set_item_offset(leaf
, item
,
4410 orig_offset
+ item_size
- split_offset
);
4411 btrfs_set_item_size(leaf
, item
, split_offset
);
4413 btrfs_set_header_nritems(leaf
, nritems
+ 1);
4415 /* write the data for the start of the original item */
4416 write_extent_buffer(leaf
, buf
,
4417 btrfs_item_ptr_offset(leaf
, path
->slots
[0]),
4420 /* write the data for the new item */
4421 write_extent_buffer(leaf
, buf
+ split_offset
,
4422 btrfs_item_ptr_offset(leaf
, slot
),
4423 item_size
- split_offset
);
4424 btrfs_mark_buffer_dirty(leaf
);
4426 BUG_ON(btrfs_leaf_free_space(leaf
) < 0);
4432 * This function splits a single item into two items,
4433 * giving 'new_key' to the new item and splitting the
4434 * old one at split_offset (from the start of the item).
4436 * The path may be released by this operation. After
4437 * the split, the path is pointing to the old item. The
4438 * new item is going to be in the same node as the old one.
4440 * Note, the item being split must be smaller enough to live alone on
4441 * a tree block with room for one extra struct btrfs_item
4443 * This allows us to split the item in place, keeping a lock on the
4444 * leaf the entire time.
4446 int btrfs_split_item(struct btrfs_trans_handle
*trans
,
4447 struct btrfs_root
*root
,
4448 struct btrfs_path
*path
,
4449 const struct btrfs_key
*new_key
,
4450 unsigned long split_offset
)
4453 ret
= setup_leaf_for_split(trans
, root
, path
,
4454 sizeof(struct btrfs_item
));
4458 ret
= split_item(path
, new_key
, split_offset
);
4463 * This function duplicate a item, giving 'new_key' to the new item.
4464 * It guarantees both items live in the same tree leaf and the new item
4465 * is contiguous with the original item.
4467 * This allows us to split file extent in place, keeping a lock on the
4468 * leaf the entire time.
4470 int btrfs_duplicate_item(struct btrfs_trans_handle
*trans
,
4471 struct btrfs_root
*root
,
4472 struct btrfs_path
*path
,
4473 const struct btrfs_key
*new_key
)
4475 struct extent_buffer
*leaf
;
4479 leaf
= path
->nodes
[0];
4480 item_size
= btrfs_item_size_nr(leaf
, path
->slots
[0]);
4481 ret
= setup_leaf_for_split(trans
, root
, path
,
4482 item_size
+ sizeof(struct btrfs_item
));
4487 setup_items_for_insert(root
, path
, new_key
, &item_size
, 1);
4488 leaf
= path
->nodes
[0];
4489 memcpy_extent_buffer(leaf
,
4490 btrfs_item_ptr_offset(leaf
, path
->slots
[0]),
4491 btrfs_item_ptr_offset(leaf
, path
->slots
[0] - 1),
4497 * make the item pointed to by the path smaller. new_size indicates
4498 * how small to make it, and from_end tells us if we just chop bytes
4499 * off the end of the item or if we shift the item to chop bytes off
4502 void btrfs_truncate_item(struct btrfs_path
*path
, u32 new_size
, int from_end
)
4505 struct extent_buffer
*leaf
;
4506 struct btrfs_item
*item
;
4508 unsigned int data_end
;
4509 unsigned int old_data_start
;
4510 unsigned int old_size
;
4511 unsigned int size_diff
;
4513 struct btrfs_map_token token
;
4515 leaf
= path
->nodes
[0];
4516 slot
= path
->slots
[0];
4518 old_size
= btrfs_item_size_nr(leaf
, slot
);
4519 if (old_size
== new_size
)
4522 nritems
= btrfs_header_nritems(leaf
);
4523 data_end
= leaf_data_end(leaf
);
4525 old_data_start
= btrfs_item_offset_nr(leaf
, slot
);
4527 size_diff
= old_size
- new_size
;
4530 BUG_ON(slot
>= nritems
);
4533 * item0..itemN ... dataN.offset..dataN.size .. data0.size
4535 /* first correct the data pointers */
4536 btrfs_init_map_token(&token
, leaf
);
4537 for (i
= slot
; i
< nritems
; i
++) {
4539 item
= btrfs_item_nr(i
);
4541 ioff
= btrfs_token_item_offset(&token
, item
);
4542 btrfs_set_token_item_offset(&token
, item
, ioff
+ size_diff
);
4545 /* shift the data */
4547 memmove_extent_buffer(leaf
, BTRFS_LEAF_DATA_OFFSET
+
4548 data_end
+ size_diff
, BTRFS_LEAF_DATA_OFFSET
+
4549 data_end
, old_data_start
+ new_size
- data_end
);
4551 struct btrfs_disk_key disk_key
;
4554 btrfs_item_key(leaf
, &disk_key
, slot
);
4556 if (btrfs_disk_key_type(&disk_key
) == BTRFS_EXTENT_DATA_KEY
) {
4558 struct btrfs_file_extent_item
*fi
;
4560 fi
= btrfs_item_ptr(leaf
, slot
,
4561 struct btrfs_file_extent_item
);
4562 fi
= (struct btrfs_file_extent_item
*)(
4563 (unsigned long)fi
- size_diff
);
4565 if (btrfs_file_extent_type(leaf
, fi
) ==
4566 BTRFS_FILE_EXTENT_INLINE
) {
4567 ptr
= btrfs_item_ptr_offset(leaf
, slot
);
4568 memmove_extent_buffer(leaf
, ptr
,
4570 BTRFS_FILE_EXTENT_INLINE_DATA_START
);
4574 memmove_extent_buffer(leaf
, BTRFS_LEAF_DATA_OFFSET
+
4575 data_end
+ size_diff
, BTRFS_LEAF_DATA_OFFSET
+
4576 data_end
, old_data_start
- data_end
);
4578 offset
= btrfs_disk_key_offset(&disk_key
);
4579 btrfs_set_disk_key_offset(&disk_key
, offset
+ size_diff
);
4580 btrfs_set_item_key(leaf
, &disk_key
, slot
);
4582 fixup_low_keys(path
, &disk_key
, 1);
4585 item
= btrfs_item_nr(slot
);
4586 btrfs_set_item_size(leaf
, item
, new_size
);
4587 btrfs_mark_buffer_dirty(leaf
);
4589 if (btrfs_leaf_free_space(leaf
) < 0) {
4590 btrfs_print_leaf(leaf
);
4596 * make the item pointed to by the path bigger, data_size is the added size.
4598 void btrfs_extend_item(struct btrfs_path
*path
, u32 data_size
)
4601 struct extent_buffer
*leaf
;
4602 struct btrfs_item
*item
;
4604 unsigned int data_end
;
4605 unsigned int old_data
;
4606 unsigned int old_size
;
4608 struct btrfs_map_token token
;
4610 leaf
= path
->nodes
[0];
4612 nritems
= btrfs_header_nritems(leaf
);
4613 data_end
= leaf_data_end(leaf
);
4615 if (btrfs_leaf_free_space(leaf
) < data_size
) {
4616 btrfs_print_leaf(leaf
);
4619 slot
= path
->slots
[0];
4620 old_data
= btrfs_item_end_nr(leaf
, slot
);
4623 if (slot
>= nritems
) {
4624 btrfs_print_leaf(leaf
);
4625 btrfs_crit(leaf
->fs_info
, "slot %d too large, nritems %d",
4631 * item0..itemN ... dataN.offset..dataN.size .. data0.size
4633 /* first correct the data pointers */
4634 btrfs_init_map_token(&token
, leaf
);
4635 for (i
= slot
; i
< nritems
; i
++) {
4637 item
= btrfs_item_nr(i
);
4639 ioff
= btrfs_token_item_offset(&token
, item
);
4640 btrfs_set_token_item_offset(&token
, item
, ioff
- data_size
);
4643 /* shift the data */
4644 memmove_extent_buffer(leaf
, BTRFS_LEAF_DATA_OFFSET
+
4645 data_end
- data_size
, BTRFS_LEAF_DATA_OFFSET
+
4646 data_end
, old_data
- data_end
);
4648 data_end
= old_data
;
4649 old_size
= btrfs_item_size_nr(leaf
, slot
);
4650 item
= btrfs_item_nr(slot
);
4651 btrfs_set_item_size(leaf
, item
, old_size
+ data_size
);
4652 btrfs_mark_buffer_dirty(leaf
);
4654 if (btrfs_leaf_free_space(leaf
) < 0) {
4655 btrfs_print_leaf(leaf
);
4661 * setup_items_for_insert - Helper called before inserting one or more items
4662 * to a leaf. Main purpose is to save stack depth by doing the bulk of the work
4663 * in a function that doesn't call btrfs_search_slot
4665 * @root: root we are inserting items to
4666 * @path: points to the leaf/slot where we are going to insert new items
4667 * @cpu_key: array of keys for items to be inserted
4668 * @data_size: size of the body of each item we are going to insert
4669 * @nr: size of @cpu_key/@data_size arrays
4671 void setup_items_for_insert(struct btrfs_root
*root
, struct btrfs_path
*path
,
4672 const struct btrfs_key
*cpu_key
, u32
*data_size
,
4675 struct btrfs_fs_info
*fs_info
= root
->fs_info
;
4676 struct btrfs_item
*item
;
4679 unsigned int data_end
;
4680 struct btrfs_disk_key disk_key
;
4681 struct extent_buffer
*leaf
;
4683 struct btrfs_map_token token
;
4687 for (i
= 0; i
< nr
; i
++)
4688 total_data
+= data_size
[i
];
4689 total_size
= total_data
+ (nr
* sizeof(struct btrfs_item
));
4691 if (path
->slots
[0] == 0) {
4692 btrfs_cpu_key_to_disk(&disk_key
, cpu_key
);
4693 fixup_low_keys(path
, &disk_key
, 1);
4695 btrfs_unlock_up_safe(path
, 1);
4697 leaf
= path
->nodes
[0];
4698 slot
= path
->slots
[0];
4700 nritems
= btrfs_header_nritems(leaf
);
4701 data_end
= leaf_data_end(leaf
);
4703 if (btrfs_leaf_free_space(leaf
) < total_size
) {
4704 btrfs_print_leaf(leaf
);
4705 btrfs_crit(fs_info
, "not enough freespace need %u have %d",
4706 total_size
, btrfs_leaf_free_space(leaf
));
4710 btrfs_init_map_token(&token
, leaf
);
4711 if (slot
!= nritems
) {
4712 unsigned int old_data
= btrfs_item_end_nr(leaf
, slot
);
4714 if (old_data
< data_end
) {
4715 btrfs_print_leaf(leaf
);
4717 "item at slot %d with data offset %u beyond data end of leaf %u",
4718 slot
, old_data
, data_end
);
4722 * item0..itemN ... dataN.offset..dataN.size .. data0.size
4724 /* first correct the data pointers */
4725 for (i
= slot
; i
< nritems
; i
++) {
4728 item
= btrfs_item_nr(i
);
4729 ioff
= btrfs_token_item_offset(&token
, item
);
4730 btrfs_set_token_item_offset(&token
, item
,
4733 /* shift the items */
4734 memmove_extent_buffer(leaf
, btrfs_item_nr_offset(slot
+ nr
),
4735 btrfs_item_nr_offset(slot
),
4736 (nritems
- slot
) * sizeof(struct btrfs_item
));
4738 /* shift the data */
4739 memmove_extent_buffer(leaf
, BTRFS_LEAF_DATA_OFFSET
+
4740 data_end
- total_data
, BTRFS_LEAF_DATA_OFFSET
+
4741 data_end
, old_data
- data_end
);
4742 data_end
= old_data
;
4745 /* setup the item for the new data */
4746 for (i
= 0; i
< nr
; i
++) {
4747 btrfs_cpu_key_to_disk(&disk_key
, cpu_key
+ i
);
4748 btrfs_set_item_key(leaf
, &disk_key
, slot
+ i
);
4749 item
= btrfs_item_nr(slot
+ i
);
4750 data_end
-= data_size
[i
];
4751 btrfs_set_token_item_offset(&token
, item
, data_end
);
4752 btrfs_set_token_item_size(&token
, item
, data_size
[i
]);
4755 btrfs_set_header_nritems(leaf
, nritems
+ nr
);
4756 btrfs_mark_buffer_dirty(leaf
);
4758 if (btrfs_leaf_free_space(leaf
) < 0) {
4759 btrfs_print_leaf(leaf
);
4765 * Given a key and some data, insert items into the tree.
4766 * This does all the path init required, making room in the tree if needed.
4768 int btrfs_insert_empty_items(struct btrfs_trans_handle
*trans
,
4769 struct btrfs_root
*root
,
4770 struct btrfs_path
*path
,
4771 const struct btrfs_key
*cpu_key
, u32
*data_size
,
4780 for (i
= 0; i
< nr
; i
++)
4781 total_data
+= data_size
[i
];
4783 total_size
= total_data
+ (nr
* sizeof(struct btrfs_item
));
4784 ret
= btrfs_search_slot(trans
, root
, cpu_key
, path
, total_size
, 1);
4790 slot
= path
->slots
[0];
4793 setup_items_for_insert(root
, path
, cpu_key
, data_size
, nr
);
4798 * Given a key and some data, insert an item into the tree.
4799 * This does all the path init required, making room in the tree if needed.
4801 int btrfs_insert_item(struct btrfs_trans_handle
*trans
, struct btrfs_root
*root
,
4802 const struct btrfs_key
*cpu_key
, void *data
,
4806 struct btrfs_path
*path
;
4807 struct extent_buffer
*leaf
;
4810 path
= btrfs_alloc_path();
4813 ret
= btrfs_insert_empty_item(trans
, root
, path
, cpu_key
, data_size
);
4815 leaf
= path
->nodes
[0];
4816 ptr
= btrfs_item_ptr_offset(leaf
, path
->slots
[0]);
4817 write_extent_buffer(leaf
, data
, ptr
, data_size
);
4818 btrfs_mark_buffer_dirty(leaf
);
4820 btrfs_free_path(path
);
4825 * delete the pointer from a given node.
4827 * the tree should have been previously balanced so the deletion does not
4830 static void del_ptr(struct btrfs_root
*root
, struct btrfs_path
*path
,
4831 int level
, int slot
)
4833 struct extent_buffer
*parent
= path
->nodes
[level
];
4837 nritems
= btrfs_header_nritems(parent
);
4838 if (slot
!= nritems
- 1) {
4840 ret
= tree_mod_log_insert_move(parent
, slot
, slot
+ 1,
4841 nritems
- slot
- 1);
4844 memmove_extent_buffer(parent
,
4845 btrfs_node_key_ptr_offset(slot
),
4846 btrfs_node_key_ptr_offset(slot
+ 1),
4847 sizeof(struct btrfs_key_ptr
) *
4848 (nritems
- slot
- 1));
4850 ret
= tree_mod_log_insert_key(parent
, slot
, MOD_LOG_KEY_REMOVE
,
4856 btrfs_set_header_nritems(parent
, nritems
);
4857 if (nritems
== 0 && parent
== root
->node
) {
4858 BUG_ON(btrfs_header_level(root
->node
) != 1);
4859 /* just turn the root into a leaf and break */
4860 btrfs_set_header_level(root
->node
, 0);
4861 } else if (slot
== 0) {
4862 struct btrfs_disk_key disk_key
;
4864 btrfs_node_key(parent
, &disk_key
, 0);
4865 fixup_low_keys(path
, &disk_key
, level
+ 1);
4867 btrfs_mark_buffer_dirty(parent
);
4871 * a helper function to delete the leaf pointed to by path->slots[1] and
4874 * This deletes the pointer in path->nodes[1] and frees the leaf
4875 * block extent. zero is returned if it all worked out, < 0 otherwise.
4877 * The path must have already been setup for deleting the leaf, including
4878 * all the proper balancing. path->nodes[1] must be locked.
4880 static noinline
void btrfs_del_leaf(struct btrfs_trans_handle
*trans
,
4881 struct btrfs_root
*root
,
4882 struct btrfs_path
*path
,
4883 struct extent_buffer
*leaf
)
4885 WARN_ON(btrfs_header_generation(leaf
) != trans
->transid
);
4886 del_ptr(root
, path
, 1, path
->slots
[1]);
4889 * btrfs_free_extent is expensive, we want to make sure we
4890 * aren't holding any locks when we call it
4892 btrfs_unlock_up_safe(path
, 0);
4894 root_sub_used(root
, leaf
->len
);
4896 atomic_inc(&leaf
->refs
);
4897 btrfs_free_tree_block(trans
, root
, leaf
, 0, 1);
4898 free_extent_buffer_stale(leaf
);
4901 * delete the item at the leaf level in path. If that empties
4902 * the leaf, remove it from the tree
4904 int btrfs_del_items(struct btrfs_trans_handle
*trans
, struct btrfs_root
*root
,
4905 struct btrfs_path
*path
, int slot
, int nr
)
4907 struct btrfs_fs_info
*fs_info
= root
->fs_info
;
4908 struct extent_buffer
*leaf
;
4909 struct btrfs_item
*item
;
4917 leaf
= path
->nodes
[0];
4918 last_off
= btrfs_item_offset_nr(leaf
, slot
+ nr
- 1);
4920 for (i
= 0; i
< nr
; i
++)
4921 dsize
+= btrfs_item_size_nr(leaf
, slot
+ i
);
4923 nritems
= btrfs_header_nritems(leaf
);
4925 if (slot
+ nr
!= nritems
) {
4926 int data_end
= leaf_data_end(leaf
);
4927 struct btrfs_map_token token
;
4929 memmove_extent_buffer(leaf
, BTRFS_LEAF_DATA_OFFSET
+
4931 BTRFS_LEAF_DATA_OFFSET
+ data_end
,
4932 last_off
- data_end
);
4934 btrfs_init_map_token(&token
, leaf
);
4935 for (i
= slot
+ nr
; i
< nritems
; i
++) {
4938 item
= btrfs_item_nr(i
);
4939 ioff
= btrfs_token_item_offset(&token
, item
);
4940 btrfs_set_token_item_offset(&token
, item
, ioff
+ dsize
);
4943 memmove_extent_buffer(leaf
, btrfs_item_nr_offset(slot
),
4944 btrfs_item_nr_offset(slot
+ nr
),
4945 sizeof(struct btrfs_item
) *
4946 (nritems
- slot
- nr
));
4948 btrfs_set_header_nritems(leaf
, nritems
- nr
);
4951 /* delete the leaf if we've emptied it */
4953 if (leaf
== root
->node
) {
4954 btrfs_set_header_level(leaf
, 0);
4956 btrfs_clean_tree_block(leaf
);
4957 btrfs_del_leaf(trans
, root
, path
, leaf
);
4960 int used
= leaf_space_used(leaf
, 0, nritems
);
4962 struct btrfs_disk_key disk_key
;
4964 btrfs_item_key(leaf
, &disk_key
, 0);
4965 fixup_low_keys(path
, &disk_key
, 1);
4968 /* delete the leaf if it is mostly empty */
4969 if (used
< BTRFS_LEAF_DATA_SIZE(fs_info
) / 3) {
4970 /* push_leaf_left fixes the path.
4971 * make sure the path still points to our leaf
4972 * for possible call to del_ptr below
4974 slot
= path
->slots
[1];
4975 atomic_inc(&leaf
->refs
);
4977 wret
= push_leaf_left(trans
, root
, path
, 1, 1,
4979 if (wret
< 0 && wret
!= -ENOSPC
)
4982 if (path
->nodes
[0] == leaf
&&
4983 btrfs_header_nritems(leaf
)) {
4984 wret
= push_leaf_right(trans
, root
, path
, 1,
4986 if (wret
< 0 && wret
!= -ENOSPC
)
4990 if (btrfs_header_nritems(leaf
) == 0) {
4991 path
->slots
[1] = slot
;
4992 btrfs_del_leaf(trans
, root
, path
, leaf
);
4993 free_extent_buffer(leaf
);
4996 /* if we're still in the path, make sure
4997 * we're dirty. Otherwise, one of the
4998 * push_leaf functions must have already
4999 * dirtied this buffer
5001 if (path
->nodes
[0] == leaf
)
5002 btrfs_mark_buffer_dirty(leaf
);
5003 free_extent_buffer(leaf
);
5006 btrfs_mark_buffer_dirty(leaf
);
5013 * search the tree again to find a leaf with lesser keys
5014 * returns 0 if it found something or 1 if there are no lesser leaves.
5015 * returns < 0 on io errors.
5017 * This may release the path, and so you may lose any locks held at the
5020 int btrfs_prev_leaf(struct btrfs_root
*root
, struct btrfs_path
*path
)
5022 struct btrfs_key key
;
5023 struct btrfs_disk_key found_key
;
5026 btrfs_item_key_to_cpu(path
->nodes
[0], &key
, 0);
5028 if (key
.offset
> 0) {
5030 } else if (key
.type
> 0) {
5032 key
.offset
= (u64
)-1;
5033 } else if (key
.objectid
> 0) {
5036 key
.offset
= (u64
)-1;
5041 btrfs_release_path(path
);
5042 ret
= btrfs_search_slot(NULL
, root
, &key
, path
, 0, 0);
5045 btrfs_item_key(path
->nodes
[0], &found_key
, 0);
5046 ret
= comp_keys(&found_key
, &key
);
5048 * We might have had an item with the previous key in the tree right
5049 * before we released our path. And after we released our path, that
5050 * item might have been pushed to the first slot (0) of the leaf we
5051 * were holding due to a tree balance. Alternatively, an item with the
5052 * previous key can exist as the only element of a leaf (big fat item).
5053 * Therefore account for these 2 cases, so that our callers (like
5054 * btrfs_previous_item) don't miss an existing item with a key matching
5055 * the previous key we computed above.
5063 * A helper function to walk down the tree starting at min_key, and looking
5064 * for nodes or leaves that are have a minimum transaction id.
5065 * This is used by the btree defrag code, and tree logging
5067 * This does not cow, but it does stuff the starting key it finds back
5068 * into min_key, so you can call btrfs_search_slot with cow=1 on the
5069 * key and get a writable path.
5071 * This honors path->lowest_level to prevent descent past a given level
5074 * min_trans indicates the oldest transaction that you are interested
5075 * in walking through. Any nodes or leaves older than min_trans are
5076 * skipped over (without reading them).
5078 * returns zero if something useful was found, < 0 on error and 1 if there
5079 * was nothing in the tree that matched the search criteria.
5081 int btrfs_search_forward(struct btrfs_root
*root
, struct btrfs_key
*min_key
,
5082 struct btrfs_path
*path
,
5085 struct extent_buffer
*cur
;
5086 struct btrfs_key found_key
;
5092 int keep_locks
= path
->keep_locks
;
5094 path
->keep_locks
= 1;
5096 cur
= btrfs_read_lock_root_node(root
);
5097 level
= btrfs_header_level(cur
);
5098 WARN_ON(path
->nodes
[level
]);
5099 path
->nodes
[level
] = cur
;
5100 path
->locks
[level
] = BTRFS_READ_LOCK
;
5102 if (btrfs_header_generation(cur
) < min_trans
) {
5107 nritems
= btrfs_header_nritems(cur
);
5108 level
= btrfs_header_level(cur
);
5109 sret
= btrfs_bin_search(cur
, min_key
, &slot
);
5115 /* at the lowest level, we're done, setup the path and exit */
5116 if (level
== path
->lowest_level
) {
5117 if (slot
>= nritems
)
5120 path
->slots
[level
] = slot
;
5121 btrfs_item_key_to_cpu(cur
, &found_key
, slot
);
5124 if (sret
&& slot
> 0)
5127 * check this node pointer against the min_trans parameters.
5128 * If it is too old, skip to the next one.
5130 while (slot
< nritems
) {
5133 gen
= btrfs_node_ptr_generation(cur
, slot
);
5134 if (gen
< min_trans
) {
5142 * we didn't find a candidate key in this node, walk forward
5143 * and find another one
5145 if (slot
>= nritems
) {
5146 path
->slots
[level
] = slot
;
5147 sret
= btrfs_find_next_key(root
, path
, min_key
, level
,
5150 btrfs_release_path(path
);
5156 /* save our key for returning back */
5157 btrfs_node_key_to_cpu(cur
, &found_key
, slot
);
5158 path
->slots
[level
] = slot
;
5159 if (level
== path
->lowest_level
) {
5163 cur
= btrfs_read_node_slot(cur
, slot
);
5169 btrfs_tree_read_lock(cur
);
5171 path
->locks
[level
- 1] = BTRFS_READ_LOCK
;
5172 path
->nodes
[level
- 1] = cur
;
5173 unlock_up(path
, level
, 1, 0, NULL
);
5176 path
->keep_locks
= keep_locks
;
5178 btrfs_unlock_up_safe(path
, path
->lowest_level
+ 1);
5179 memcpy(min_key
, &found_key
, sizeof(found_key
));
5185 * this is similar to btrfs_next_leaf, but does not try to preserve
5186 * and fixup the path. It looks for and returns the next key in the
5187 * tree based on the current path and the min_trans parameters.
5189 * 0 is returned if another key is found, < 0 if there are any errors
5190 * and 1 is returned if there are no higher keys in the tree
5192 * path->keep_locks should be set to 1 on the search made before
5193 * calling this function.
5195 int btrfs_find_next_key(struct btrfs_root
*root
, struct btrfs_path
*path
,
5196 struct btrfs_key
*key
, int level
, u64 min_trans
)
5199 struct extent_buffer
*c
;
5201 WARN_ON(!path
->keep_locks
&& !path
->skip_locking
);
5202 while (level
< BTRFS_MAX_LEVEL
) {
5203 if (!path
->nodes
[level
])
5206 slot
= path
->slots
[level
] + 1;
5207 c
= path
->nodes
[level
];
5209 if (slot
>= btrfs_header_nritems(c
)) {
5212 struct btrfs_key cur_key
;
5213 if (level
+ 1 >= BTRFS_MAX_LEVEL
||
5214 !path
->nodes
[level
+ 1])
5217 if (path
->locks
[level
+ 1] || path
->skip_locking
) {
5222 slot
= btrfs_header_nritems(c
) - 1;
5224 btrfs_item_key_to_cpu(c
, &cur_key
, slot
);
5226 btrfs_node_key_to_cpu(c
, &cur_key
, slot
);
5228 orig_lowest
= path
->lowest_level
;
5229 btrfs_release_path(path
);
5230 path
->lowest_level
= level
;
5231 ret
= btrfs_search_slot(NULL
, root
, &cur_key
, path
,
5233 path
->lowest_level
= orig_lowest
;
5237 c
= path
->nodes
[level
];
5238 slot
= path
->slots
[level
];
5245 btrfs_item_key_to_cpu(c
, key
, slot
);
5247 u64 gen
= btrfs_node_ptr_generation(c
, slot
);
5249 if (gen
< min_trans
) {
5253 btrfs_node_key_to_cpu(c
, key
, slot
);
5261 * search the tree again to find a leaf with greater keys
5262 * returns 0 if it found something or 1 if there are no greater leaves.
5263 * returns < 0 on io errors.
5265 int btrfs_next_leaf(struct btrfs_root
*root
, struct btrfs_path
*path
)
5267 return btrfs_next_old_leaf(root
, path
, 0);
5270 int btrfs_next_old_leaf(struct btrfs_root
*root
, struct btrfs_path
*path
,
5275 struct extent_buffer
*c
;
5276 struct extent_buffer
*next
;
5277 struct btrfs_key key
;
5282 nritems
= btrfs_header_nritems(path
->nodes
[0]);
5286 btrfs_item_key_to_cpu(path
->nodes
[0], &key
, nritems
- 1);
5290 btrfs_release_path(path
);
5292 path
->keep_locks
= 1;
5295 ret
= btrfs_search_old_slot(root
, &key
, path
, time_seq
);
5297 ret
= btrfs_search_slot(NULL
, root
, &key
, path
, 0, 0);
5298 path
->keep_locks
= 0;
5303 nritems
= btrfs_header_nritems(path
->nodes
[0]);
5305 * by releasing the path above we dropped all our locks. A balance
5306 * could have added more items next to the key that used to be
5307 * at the very end of the block. So, check again here and
5308 * advance the path if there are now more items available.
5310 if (nritems
> 0 && path
->slots
[0] < nritems
- 1) {
5317 * So the above check misses one case:
5318 * - after releasing the path above, someone has removed the item that
5319 * used to be at the very end of the block, and balance between leafs
5320 * gets another one with bigger key.offset to replace it.
5322 * This one should be returned as well, or we can get leaf corruption
5323 * later(esp. in __btrfs_drop_extents()).
5325 * And a bit more explanation about this check,
5326 * with ret > 0, the key isn't found, the path points to the slot
5327 * where it should be inserted, so the path->slots[0] item must be the
5330 if (nritems
> 0 && ret
> 0 && path
->slots
[0] == nritems
- 1) {
5335 while (level
< BTRFS_MAX_LEVEL
) {
5336 if (!path
->nodes
[level
]) {
5341 slot
= path
->slots
[level
] + 1;
5342 c
= path
->nodes
[level
];
5343 if (slot
>= btrfs_header_nritems(c
)) {
5345 if (level
== BTRFS_MAX_LEVEL
) {
5354 * Our current level is where we're going to start from, and to
5355 * make sure lockdep doesn't complain we need to drop our locks
5356 * and nodes from 0 to our current level.
5358 for (i
= 0; i
< level
; i
++) {
5359 if (path
->locks
[level
]) {
5360 btrfs_tree_read_unlock(path
->nodes
[i
]);
5363 free_extent_buffer(path
->nodes
[i
]);
5364 path
->nodes
[i
] = NULL
;
5368 ret
= read_block_for_search(root
, path
, &next
, level
,
5374 btrfs_release_path(path
);
5378 if (!path
->skip_locking
) {
5379 ret
= btrfs_try_tree_read_lock(next
);
5380 if (!ret
&& time_seq
) {
5382 * If we don't get the lock, we may be racing
5383 * with push_leaf_left, holding that lock while
5384 * itself waiting for the leaf we've currently
5385 * locked. To solve this situation, we give up
5386 * on our lock and cycle.
5388 free_extent_buffer(next
);
5389 btrfs_release_path(path
);
5394 btrfs_tree_read_lock(next
);
5398 path
->slots
[level
] = slot
;
5401 path
->nodes
[level
] = next
;
5402 path
->slots
[level
] = 0;
5403 if (!path
->skip_locking
)
5404 path
->locks
[level
] = BTRFS_READ_LOCK
;
5408 ret
= read_block_for_search(root
, path
, &next
, level
,
5414 btrfs_release_path(path
);
5418 if (!path
->skip_locking
)
5419 btrfs_tree_read_lock(next
);
5423 unlock_up(path
, 0, 1, 0, NULL
);
5429 * this uses btrfs_prev_leaf to walk backwards in the tree, and keeps
5430 * searching until it gets past min_objectid or finds an item of 'type'
5432 * returns 0 if something is found, 1 if nothing was found and < 0 on error
5434 int btrfs_previous_item(struct btrfs_root
*root
,
5435 struct btrfs_path
*path
, u64 min_objectid
,
5438 struct btrfs_key found_key
;
5439 struct extent_buffer
*leaf
;
5444 if (path
->slots
[0] == 0) {
5445 ret
= btrfs_prev_leaf(root
, path
);
5451 leaf
= path
->nodes
[0];
5452 nritems
= btrfs_header_nritems(leaf
);
5455 if (path
->slots
[0] == nritems
)
5458 btrfs_item_key_to_cpu(leaf
, &found_key
, path
->slots
[0]);
5459 if (found_key
.objectid
< min_objectid
)
5461 if (found_key
.type
== type
)
5463 if (found_key
.objectid
== min_objectid
&&
5464 found_key
.type
< type
)
5471 * search in extent tree to find a previous Metadata/Data extent item with
5474 * returns 0 if something is found, 1 if nothing was found and < 0 on error
5476 int btrfs_previous_extent_item(struct btrfs_root
*root
,
5477 struct btrfs_path
*path
, u64 min_objectid
)
5479 struct btrfs_key found_key
;
5480 struct extent_buffer
*leaf
;
5485 if (path
->slots
[0] == 0) {
5486 ret
= btrfs_prev_leaf(root
, path
);
5492 leaf
= path
->nodes
[0];
5493 nritems
= btrfs_header_nritems(leaf
);
5496 if (path
->slots
[0] == nritems
)
5499 btrfs_item_key_to_cpu(leaf
, &found_key
, path
->slots
[0]);
5500 if (found_key
.objectid
< min_objectid
)
5502 if (found_key
.type
== BTRFS_EXTENT_ITEM_KEY
||
5503 found_key
.type
== BTRFS_METADATA_ITEM_KEY
)
5505 if (found_key
.objectid
== min_objectid
&&
5506 found_key
.type
< BTRFS_EXTENT_ITEM_KEY
)