1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (C) 2014 Facebook. All rights reserved.
6 #include <linux/sched.h>
7 #include <linux/stacktrace.h>
12 #include "delayed-ref.h"
13 #include "ref-verify.h"
15 #include "accessors.h"
18 * Used to keep track the roots and number of refs each root has for a given
19 * bytenr. This just tracks the number of direct references, no shared
29 * These are meant to represent what should exist in the extent tree, these can
30 * be used to verify the extent tree is consistent as these should all match
31 * what the extent tree says.
45 * Whenever we add/remove a reference we record the action. The action maps
46 * back to the delayed ref action. We hold the ref we are changing in the
47 * action so we can account for the history properly, and we record the root we
48 * were called with since it could be different from ref_root. We also store
49 * stack traces because that's how I roll.
55 struct list_head list
;
56 unsigned long trace
[MAX_TRACE
];
57 unsigned int trace_len
;
61 * One of these for every block we reference, it holds the roots and references
62 * to it as well as all of the ref actions that have occurred to it. We never
63 * free it until we unmount the file system in order to make sure re-allocations
64 * are happening properly.
75 struct list_head actions
;
78 static struct block_entry
*insert_block_entry(struct rb_root
*root
,
79 struct block_entry
*be
)
81 struct rb_node
**p
= &root
->rb_node
;
82 struct rb_node
*parent_node
= NULL
;
83 struct block_entry
*entry
;
87 entry
= rb_entry(parent_node
, struct block_entry
, node
);
88 if (entry
->bytenr
> be
->bytenr
)
90 else if (entry
->bytenr
< be
->bytenr
)
96 rb_link_node(&be
->node
, parent_node
, p
);
97 rb_insert_color(&be
->node
, root
);
101 static struct block_entry
*lookup_block_entry(struct rb_root
*root
, u64 bytenr
)
104 struct block_entry
*entry
= NULL
;
108 entry
= rb_entry(n
, struct block_entry
, node
);
109 if (entry
->bytenr
< bytenr
)
111 else if (entry
->bytenr
> bytenr
)
119 static struct root_entry
*insert_root_entry(struct rb_root
*root
,
120 struct root_entry
*re
)
122 struct rb_node
**p
= &root
->rb_node
;
123 struct rb_node
*parent_node
= NULL
;
124 struct root_entry
*entry
;
128 entry
= rb_entry(parent_node
, struct root_entry
, node
);
129 if (entry
->root_objectid
> re
->root_objectid
)
131 else if (entry
->root_objectid
< re
->root_objectid
)
137 rb_link_node(&re
->node
, parent_node
, p
);
138 rb_insert_color(&re
->node
, root
);
143 static int comp_refs(struct ref_entry
*ref1
, struct ref_entry
*ref2
)
145 if (ref1
->root_objectid
< ref2
->root_objectid
)
147 if (ref1
->root_objectid
> ref2
->root_objectid
)
149 if (ref1
->parent
< ref2
->parent
)
151 if (ref1
->parent
> ref2
->parent
)
153 if (ref1
->owner
< ref2
->owner
)
155 if (ref1
->owner
> ref2
->owner
)
157 if (ref1
->offset
< ref2
->offset
)
159 if (ref1
->offset
> ref2
->offset
)
164 static struct ref_entry
*insert_ref_entry(struct rb_root
*root
,
165 struct ref_entry
*ref
)
167 struct rb_node
**p
= &root
->rb_node
;
168 struct rb_node
*parent_node
= NULL
;
169 struct ref_entry
*entry
;
174 entry
= rb_entry(parent_node
, struct ref_entry
, node
);
175 cmp
= comp_refs(entry
, ref
);
184 rb_link_node(&ref
->node
, parent_node
, p
);
185 rb_insert_color(&ref
->node
, root
);
190 static struct root_entry
*lookup_root_entry(struct rb_root
*root
, u64 objectid
)
193 struct root_entry
*entry
= NULL
;
197 entry
= rb_entry(n
, struct root_entry
, node
);
198 if (entry
->root_objectid
< objectid
)
200 else if (entry
->root_objectid
> objectid
)
208 #ifdef CONFIG_STACKTRACE
209 static void __save_stack_trace(struct ref_action
*ra
)
211 ra
->trace_len
= stack_trace_save(ra
->trace
, MAX_TRACE
, 2);
214 static void __print_stack_trace(struct btrfs_fs_info
*fs_info
,
215 struct ref_action
*ra
)
217 if (ra
->trace_len
== 0) {
218 btrfs_err(fs_info
, " ref-verify: no stacktrace");
221 stack_trace_print(ra
->trace
, ra
->trace_len
, 2);
224 static inline void __save_stack_trace(struct ref_action
*ra
)
228 static inline void __print_stack_trace(struct btrfs_fs_info
*fs_info
,
229 struct ref_action
*ra
)
231 btrfs_err(fs_info
, " ref-verify: no stacktrace support");
235 static void free_block_entry(struct block_entry
*be
)
237 struct root_entry
*re
;
238 struct ref_entry
*ref
;
239 struct ref_action
*ra
;
242 while ((n
= rb_first(&be
->roots
))) {
243 re
= rb_entry(n
, struct root_entry
, node
);
244 rb_erase(&re
->node
, &be
->roots
);
248 while((n
= rb_first(&be
->refs
))) {
249 ref
= rb_entry(n
, struct ref_entry
, node
);
250 rb_erase(&ref
->node
, &be
->refs
);
254 while (!list_empty(&be
->actions
)) {
255 ra
= list_first_entry(&be
->actions
, struct ref_action
,
263 static struct block_entry
*add_block_entry(struct btrfs_fs_info
*fs_info
,
267 struct block_entry
*be
= NULL
, *exist
;
268 struct root_entry
*re
= NULL
;
270 re
= kzalloc(sizeof(struct root_entry
), GFP_NOFS
);
271 be
= kzalloc(sizeof(struct block_entry
), GFP_NOFS
);
275 return ERR_PTR(-ENOMEM
);
280 re
->root_objectid
= root_objectid
;
283 spin_lock(&fs_info
->ref_verify_lock
);
284 exist
= insert_block_entry(&fs_info
->block_tree
, be
);
287 struct root_entry
*exist_re
;
289 exist_re
= insert_root_entry(&exist
->roots
, re
);
304 INIT_LIST_HEAD(&be
->actions
);
306 insert_root_entry(&be
->roots
, re
);
312 static int add_tree_block(struct btrfs_fs_info
*fs_info
, u64 ref_root
,
313 u64 parent
, u64 bytenr
, int level
)
315 struct block_entry
*be
;
316 struct root_entry
*re
;
317 struct ref_entry
*ref
= NULL
, *exist
;
319 ref
= kmalloc(sizeof(struct ref_entry
), GFP_NOFS
);
324 ref
->root_objectid
= 0;
326 ref
->root_objectid
= ref_root
;
327 ref
->parent
= parent
;
332 be
= add_block_entry(fs_info
, bytenr
, fs_info
->nodesize
, ref_root
);
343 re
= lookup_root_entry(&be
->roots
, ref_root
);
347 exist
= insert_ref_entry(&be
->refs
, ref
);
352 spin_unlock(&fs_info
->ref_verify_lock
);
357 static int add_shared_data_ref(struct btrfs_fs_info
*fs_info
,
358 u64 parent
, u32 num_refs
, u64 bytenr
,
361 struct block_entry
*be
;
362 struct ref_entry
*ref
;
364 ref
= kzalloc(sizeof(struct ref_entry
), GFP_NOFS
);
367 be
= add_block_entry(fs_info
, bytenr
, num_bytes
, 0);
372 be
->num_refs
+= num_refs
;
374 ref
->parent
= parent
;
375 ref
->num_refs
= num_refs
;
376 if (insert_ref_entry(&be
->refs
, ref
)) {
377 spin_unlock(&fs_info
->ref_verify_lock
);
378 btrfs_err(fs_info
, "existing shared ref when reading from disk?");
382 spin_unlock(&fs_info
->ref_verify_lock
);
386 static int add_extent_data_ref(struct btrfs_fs_info
*fs_info
,
387 struct extent_buffer
*leaf
,
388 struct btrfs_extent_data_ref
*dref
,
389 u64 bytenr
, u64 num_bytes
)
391 struct block_entry
*be
;
392 struct ref_entry
*ref
;
393 struct root_entry
*re
;
394 u64 ref_root
= btrfs_extent_data_ref_root(leaf
, dref
);
395 u64 owner
= btrfs_extent_data_ref_objectid(leaf
, dref
);
396 u64 offset
= btrfs_extent_data_ref_offset(leaf
, dref
);
397 u32 num_refs
= btrfs_extent_data_ref_count(leaf
, dref
);
399 ref
= kzalloc(sizeof(struct ref_entry
), GFP_NOFS
);
402 be
= add_block_entry(fs_info
, bytenr
, num_bytes
, ref_root
);
407 be
->num_refs
+= num_refs
;
411 ref
->root_objectid
= ref_root
;
412 ref
->offset
= offset
;
413 ref
->num_refs
= num_refs
;
414 if (insert_ref_entry(&be
->refs
, ref
)) {
415 spin_unlock(&fs_info
->ref_verify_lock
);
416 btrfs_err(fs_info
, "existing ref when reading from disk?");
421 re
= lookup_root_entry(&be
->roots
, ref_root
);
423 spin_unlock(&fs_info
->ref_verify_lock
);
424 btrfs_err(fs_info
, "missing root in new block entry?");
427 re
->num_refs
+= num_refs
;
428 spin_unlock(&fs_info
->ref_verify_lock
);
432 static int process_extent_item(struct btrfs_fs_info
*fs_info
,
433 struct btrfs_path
*path
, struct btrfs_key
*key
,
434 int slot
, int *tree_block_level
)
436 struct btrfs_extent_item
*ei
;
437 struct btrfs_extent_inline_ref
*iref
;
438 struct btrfs_extent_data_ref
*dref
;
439 struct btrfs_shared_data_ref
*sref
;
440 struct extent_buffer
*leaf
= path
->nodes
[0];
441 u32 item_size
= btrfs_item_size(leaf
, slot
);
442 unsigned long end
, ptr
;
443 u64 offset
, flags
, count
;
447 ei
= btrfs_item_ptr(leaf
, slot
, struct btrfs_extent_item
);
448 flags
= btrfs_extent_flags(leaf
, ei
);
450 if ((key
->type
== BTRFS_EXTENT_ITEM_KEY
) &&
451 flags
& BTRFS_EXTENT_FLAG_TREE_BLOCK
) {
452 struct btrfs_tree_block_info
*info
;
454 info
= (struct btrfs_tree_block_info
*)(ei
+ 1);
455 *tree_block_level
= btrfs_tree_block_level(leaf
, info
);
456 iref
= (struct btrfs_extent_inline_ref
*)(info
+ 1);
458 if (key
->type
== BTRFS_METADATA_ITEM_KEY
)
459 *tree_block_level
= key
->offset
;
460 iref
= (struct btrfs_extent_inline_ref
*)(ei
+ 1);
463 ptr
= (unsigned long)iref
;
464 end
= (unsigned long)ei
+ item_size
;
466 iref
= (struct btrfs_extent_inline_ref
*)ptr
;
467 type
= btrfs_extent_inline_ref_type(leaf
, iref
);
468 offset
= btrfs_extent_inline_ref_offset(leaf
, iref
);
470 case BTRFS_TREE_BLOCK_REF_KEY
:
471 ret
= add_tree_block(fs_info
, offset
, 0, key
->objectid
,
474 case BTRFS_SHARED_BLOCK_REF_KEY
:
475 ret
= add_tree_block(fs_info
, 0, offset
, key
->objectid
,
478 case BTRFS_EXTENT_DATA_REF_KEY
:
479 dref
= (struct btrfs_extent_data_ref
*)(&iref
->offset
);
480 ret
= add_extent_data_ref(fs_info
, leaf
, dref
,
481 key
->objectid
, key
->offset
);
483 case BTRFS_SHARED_DATA_REF_KEY
:
484 sref
= (struct btrfs_shared_data_ref
*)(iref
+ 1);
485 count
= btrfs_shared_data_ref_count(leaf
, sref
);
486 ret
= add_shared_data_ref(fs_info
, offset
, count
,
487 key
->objectid
, key
->offset
);
489 case BTRFS_EXTENT_OWNER_REF_KEY
:
490 if (!btrfs_fs_incompat(fs_info
, SIMPLE_QUOTA
)) {
492 "found extent owner ref without simple quotas enabled");
497 btrfs_err(fs_info
, "invalid key type in iref");
503 ptr
+= btrfs_extent_inline_ref_size(type
);
508 static int process_leaf(struct btrfs_root
*root
,
509 struct btrfs_path
*path
, u64
*bytenr
, u64
*num_bytes
,
510 int *tree_block_level
)
512 struct btrfs_fs_info
*fs_info
= root
->fs_info
;
513 struct extent_buffer
*leaf
= path
->nodes
[0];
514 struct btrfs_extent_data_ref
*dref
;
515 struct btrfs_shared_data_ref
*sref
;
518 struct btrfs_key key
;
519 int nritems
= btrfs_header_nritems(leaf
);
521 for (i
= 0; i
< nritems
; i
++) {
522 btrfs_item_key_to_cpu(leaf
, &key
, i
);
524 case BTRFS_EXTENT_ITEM_KEY
:
525 *num_bytes
= key
.offset
;
527 case BTRFS_METADATA_ITEM_KEY
:
528 *bytenr
= key
.objectid
;
529 ret
= process_extent_item(fs_info
, path
, &key
, i
,
532 case BTRFS_TREE_BLOCK_REF_KEY
:
533 ret
= add_tree_block(fs_info
, key
.offset
, 0,
534 key
.objectid
, *tree_block_level
);
536 case BTRFS_SHARED_BLOCK_REF_KEY
:
537 ret
= add_tree_block(fs_info
, 0, key
.offset
,
538 key
.objectid
, *tree_block_level
);
540 case BTRFS_EXTENT_DATA_REF_KEY
:
541 dref
= btrfs_item_ptr(leaf
, i
,
542 struct btrfs_extent_data_ref
);
543 ret
= add_extent_data_ref(fs_info
, leaf
, dref
, *bytenr
,
546 case BTRFS_SHARED_DATA_REF_KEY
:
547 sref
= btrfs_item_ptr(leaf
, i
,
548 struct btrfs_shared_data_ref
);
549 count
= btrfs_shared_data_ref_count(leaf
, sref
);
550 ret
= add_shared_data_ref(fs_info
, key
.offset
, count
,
551 *bytenr
, *num_bytes
);
562 /* Walk down to the leaf from the given level */
563 static int walk_down_tree(struct btrfs_root
*root
, struct btrfs_path
*path
,
564 int level
, u64
*bytenr
, u64
*num_bytes
,
565 int *tree_block_level
)
567 struct extent_buffer
*eb
;
572 eb
= btrfs_read_node_slot(path
->nodes
[level
],
576 btrfs_tree_read_lock(eb
);
577 path
->nodes
[level
-1] = eb
;
578 path
->slots
[level
-1] = 0;
579 path
->locks
[level
-1] = BTRFS_READ_LOCK
;
581 ret
= process_leaf(root
, path
, bytenr
, num_bytes
,
591 /* Walk up to the next node that needs to be processed */
592 static int walk_up_tree(struct btrfs_path
*path
, int *level
)
596 for (l
= 0; l
< BTRFS_MAX_LEVEL
; l
++) {
602 btrfs_header_nritems(path
->nodes
[l
])) {
607 btrfs_tree_unlock_rw(path
->nodes
[l
], path
->locks
[l
]);
608 free_extent_buffer(path
->nodes
[l
]);
609 path
->nodes
[l
] = NULL
;
617 static void dump_ref_action(struct btrfs_fs_info
*fs_info
,
618 struct ref_action
*ra
)
621 " Ref action %d, root %llu, ref_root %llu, parent %llu, owner %llu, offset %llu, num_refs %llu",
622 ra
->action
, ra
->root
, ra
->ref
.root_objectid
, ra
->ref
.parent
,
623 ra
->ref
.owner
, ra
->ref
.offset
, ra
->ref
.num_refs
);
624 __print_stack_trace(fs_info
, ra
);
628 * Dumps all the information from the block entry to printk, it's going to be
631 static void dump_block_entry(struct btrfs_fs_info
*fs_info
,
632 struct block_entry
*be
)
634 struct ref_entry
*ref
;
635 struct root_entry
*re
;
636 struct ref_action
*ra
;
640 "dumping block entry [%llu %llu], num_refs %llu, metadata %d, from disk %d",
641 be
->bytenr
, be
->len
, be
->num_refs
, be
->metadata
,
644 for (n
= rb_first(&be
->refs
); n
; n
= rb_next(n
)) {
645 ref
= rb_entry(n
, struct ref_entry
, node
);
647 " ref root %llu, parent %llu, owner %llu, offset %llu, num_refs %llu",
648 ref
->root_objectid
, ref
->parent
, ref
->owner
,
649 ref
->offset
, ref
->num_refs
);
652 for (n
= rb_first(&be
->roots
); n
; n
= rb_next(n
)) {
653 re
= rb_entry(n
, struct root_entry
, node
);
654 btrfs_err(fs_info
, " root entry %llu, num_refs %llu",
655 re
->root_objectid
, re
->num_refs
);
658 list_for_each_entry(ra
, &be
->actions
, list
)
659 dump_ref_action(fs_info
, ra
);
663 * Called when we modify a ref for a bytenr.
665 * This will add an action item to the given bytenr and do sanity checks to make
666 * sure we haven't messed something up. If we are making a new allocation and
667 * this block entry has history we will delete all previous actions as long as
668 * our sanity checks pass as they are no longer needed.
670 int btrfs_ref_tree_mod(struct btrfs_fs_info
*fs_info
,
671 struct btrfs_ref
*generic_ref
)
673 struct ref_entry
*ref
= NULL
, *exist
;
674 struct ref_action
*ra
= NULL
;
675 struct block_entry
*be
= NULL
;
676 struct root_entry
*re
= NULL
;
677 int action
= generic_ref
->action
;
680 u64 bytenr
= generic_ref
->bytenr
;
681 u64 num_bytes
= generic_ref
->num_bytes
;
682 u64 parent
= generic_ref
->parent
;
687 if (!btrfs_test_opt(fs_info
, REF_VERIFY
))
690 if (generic_ref
->type
== BTRFS_REF_METADATA
) {
692 ref_root
= generic_ref
->ref_root
;
693 owner
= generic_ref
->tree_ref
.level
;
694 } else if (!parent
) {
695 ref_root
= generic_ref
->ref_root
;
696 owner
= generic_ref
->data_ref
.objectid
;
697 offset
= generic_ref
->data_ref
.offset
;
699 metadata
= owner
< BTRFS_FIRST_FREE_OBJECTID
;
701 ref
= kzalloc(sizeof(struct ref_entry
), GFP_NOFS
);
702 ra
= kmalloc(sizeof(struct ref_action
), GFP_NOFS
);
710 ref
->parent
= parent
;
712 ref
->root_objectid
= ref_root
;
713 ref
->offset
= offset
;
714 ref
->num_refs
= (action
== BTRFS_DROP_DELAYED_REF
) ? -1 : 1;
716 memcpy(&ra
->ref
, ref
, sizeof(struct ref_entry
));
718 * Save the extra info from the delayed ref in the ref action to make it
719 * easier to figure out what is happening. The real ref's we add to the
720 * ref tree need to reflect what we save on disk so it matches any
721 * on-disk refs we pre-loaded.
723 ra
->ref
.owner
= owner
;
724 ra
->ref
.offset
= offset
;
725 ra
->ref
.root_objectid
= ref_root
;
726 __save_stack_trace(ra
);
728 INIT_LIST_HEAD(&ra
->list
);
730 ra
->root
= generic_ref
->real_root
;
733 * This is an allocation, preallocate the block_entry in case we haven't
737 if (action
== BTRFS_ADD_DELAYED_EXTENT
) {
739 * For subvol_create we'll just pass in whatever the parent root
740 * is and the new root objectid, so let's not treat the passed
741 * in root as if it really has a ref for this bytenr.
743 be
= add_block_entry(fs_info
, bytenr
, num_bytes
, ref_root
);
754 if (be
->num_refs
!= 1) {
756 "re-allocated a block that still has references to it!");
757 dump_block_entry(fs_info
, be
);
758 dump_ref_action(fs_info
, ra
);
764 while (!list_empty(&be
->actions
)) {
765 struct ref_action
*tmp
;
767 tmp
= list_first_entry(&be
->actions
, struct ref_action
,
769 list_del(&tmp
->list
);
773 struct root_entry
*tmp
;
776 re
= kmalloc(sizeof(struct root_entry
), GFP_NOFS
);
784 * This is the root that is modifying us, so it's the
785 * one we want to lookup below when we modify the
788 ref_root
= generic_ref
->real_root
;
789 re
->root_objectid
= generic_ref
->real_root
;
793 spin_lock(&fs_info
->ref_verify_lock
);
794 be
= lookup_block_entry(&fs_info
->block_tree
, bytenr
);
797 "trying to do action %d to bytenr %llu num_bytes %llu but there is no existing entry!",
798 action
, bytenr
, num_bytes
);
799 dump_ref_action(fs_info
, ra
);
804 } else if (be
->num_refs
== 0) {
806 "trying to do action %d for a bytenr that has 0 total references",
808 dump_block_entry(fs_info
, be
);
809 dump_ref_action(fs_info
, ra
);
817 tmp
= insert_root_entry(&be
->roots
, re
);
825 exist
= insert_ref_entry(&be
->refs
, ref
);
827 if (action
== BTRFS_DROP_DELAYED_REF
) {
828 if (exist
->num_refs
== 0) {
830 "dropping a ref for a existing root that doesn't have a ref on the block");
831 dump_block_entry(fs_info
, be
);
832 dump_ref_action(fs_info
, ra
);
838 if (exist
->num_refs
== 0) {
839 rb_erase(&exist
->node
, &be
->refs
);
842 } else if (!be
->metadata
) {
846 "attempting to add another ref for an existing ref on a tree block");
847 dump_block_entry(fs_info
, be
);
848 dump_ref_action(fs_info
, ra
);
855 if (action
== BTRFS_DROP_DELAYED_REF
) {
857 "dropping a ref for a root that doesn't have a ref on the block");
858 dump_block_entry(fs_info
, be
);
859 dump_ref_action(fs_info
, ra
);
860 rb_erase(&ref
->node
, &be
->refs
);
867 if (!parent
&& !re
) {
868 re
= lookup_root_entry(&be
->roots
, ref_root
);
871 * This shouldn't happen because we will add our re
872 * above when we lookup the be with !parent, but just in
873 * case catch this case so we don't panic because I
874 * didn't think of some other corner case.
876 btrfs_err(fs_info
, "failed to find root %llu for %llu",
877 generic_ref
->real_root
, be
->bytenr
);
878 dump_block_entry(fs_info
, be
);
879 dump_ref_action(fs_info
, ra
);
884 if (action
== BTRFS_DROP_DELAYED_REF
) {
888 } else if (action
== BTRFS_ADD_DELAYED_REF
) {
893 list_add_tail(&ra
->list
, &be
->actions
);
896 spin_unlock(&fs_info
->ref_verify_lock
);
899 btrfs_free_ref_cache(fs_info
);
900 btrfs_clear_opt(fs_info
->mount_opt
, REF_VERIFY
);
905 /* Free up the ref cache */
906 void btrfs_free_ref_cache(struct btrfs_fs_info
*fs_info
)
908 struct block_entry
*be
;
911 if (!btrfs_test_opt(fs_info
, REF_VERIFY
))
914 spin_lock(&fs_info
->ref_verify_lock
);
915 while ((n
= rb_first(&fs_info
->block_tree
))) {
916 be
= rb_entry(n
, struct block_entry
, node
);
917 rb_erase(&be
->node
, &fs_info
->block_tree
);
918 free_block_entry(be
);
919 cond_resched_lock(&fs_info
->ref_verify_lock
);
921 spin_unlock(&fs_info
->ref_verify_lock
);
924 void btrfs_free_ref_tree_range(struct btrfs_fs_info
*fs_info
, u64 start
,
927 struct block_entry
*be
= NULL
, *entry
;
930 if (!btrfs_test_opt(fs_info
, REF_VERIFY
))
933 spin_lock(&fs_info
->ref_verify_lock
);
934 n
= fs_info
->block_tree
.rb_node
;
936 entry
= rb_entry(n
, struct block_entry
, node
);
937 if (entry
->bytenr
< start
) {
939 } else if (entry
->bytenr
> start
) {
945 /* We want to get as close to start as possible */
947 (entry
->bytenr
< start
&& be
->bytenr
> start
) ||
948 (entry
->bytenr
< start
&& entry
->bytenr
> be
->bytenr
))
953 * Could have an empty block group, maybe have something to check for
954 * this case to verify we were actually empty?
957 spin_unlock(&fs_info
->ref_verify_lock
);
963 be
= rb_entry(n
, struct block_entry
, node
);
965 if (be
->bytenr
< start
&& be
->bytenr
+ be
->len
> start
) {
967 "block entry overlaps a block group [%llu,%llu]!",
969 dump_block_entry(fs_info
, be
);
972 if (be
->bytenr
< start
)
974 if (be
->bytenr
>= start
+ len
)
976 if (be
->bytenr
+ be
->len
> start
+ len
) {
978 "block entry overlaps a block group [%llu,%llu]!",
980 dump_block_entry(fs_info
, be
);
982 rb_erase(&be
->node
, &fs_info
->block_tree
);
983 free_block_entry(be
);
985 spin_unlock(&fs_info
->ref_verify_lock
);
988 /* Walk down all roots and build the ref tree, meant to be called at mount */
989 int btrfs_build_ref_tree(struct btrfs_fs_info
*fs_info
)
991 struct btrfs_root
*extent_root
;
992 struct btrfs_path
*path
;
993 struct extent_buffer
*eb
;
994 int tree_block_level
= 0;
995 u64 bytenr
= 0, num_bytes
= 0;
998 if (!btrfs_test_opt(fs_info
, REF_VERIFY
))
1001 path
= btrfs_alloc_path();
1005 extent_root
= btrfs_extent_root(fs_info
, 0);
1006 eb
= btrfs_read_lock_root_node(extent_root
);
1007 level
= btrfs_header_level(eb
);
1008 path
->nodes
[level
] = eb
;
1009 path
->slots
[level
] = 0;
1010 path
->locks
[level
] = BTRFS_READ_LOCK
;
1014 * We have to keep track of the bytenr/num_bytes we last hit
1015 * because we could have run out of space for an inline ref, and
1016 * would have had to added a ref key item which may appear on a
1017 * different leaf from the original extent item.
1019 ret
= walk_down_tree(extent_root
, path
, level
,
1020 &bytenr
, &num_bytes
, &tree_block_level
);
1023 ret
= walk_up_tree(path
, &level
);
1032 btrfs_free_ref_cache(fs_info
);
1033 btrfs_clear_opt(fs_info
->mount_opt
, REF_VERIFY
);
1035 btrfs_free_path(path
);