1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (C) 2014 Facebook. All rights reserved.
6 #include <linux/sched.h>
7 #include <linux/stacktrace.h>
11 #include "delayed-ref.h"
12 #include "ref-verify.h"
15 * Used to keep track the roots and number of refs each root has for a given
16 * bytenr. This just tracks the number of direct references, no shared
26 * These are meant to represent what should exist in the extent tree, these can
27 * be used to verify the extent tree is consistent as these should all match
28 * what the extent tree says.
42 * Whenever we add/remove a reference we record the action. The action maps
43 * back to the delayed ref action. We hold the ref we are changing in the
44 * action so we can account for the history properly, and we record the root we
45 * were called with since it could be different from ref_root. We also store
46 * stack traces because that's how I roll.
52 struct list_head list
;
53 unsigned long trace
[MAX_TRACE
];
54 unsigned int trace_len
;
58 * One of these for every block we reference, it holds the roots and references
59 * to it as well as all of the ref actions that have occurred to it. We never
60 * free it until we unmount the file system in order to make sure re-allocations
61 * are happening properly.
72 struct list_head actions
;
75 static struct block_entry
*insert_block_entry(struct rb_root
*root
,
76 struct block_entry
*be
)
78 struct rb_node
**p
= &root
->rb_node
;
79 struct rb_node
*parent_node
= NULL
;
80 struct block_entry
*entry
;
84 entry
= rb_entry(parent_node
, struct block_entry
, node
);
85 if (entry
->bytenr
> be
->bytenr
)
87 else if (entry
->bytenr
< be
->bytenr
)
93 rb_link_node(&be
->node
, parent_node
, p
);
94 rb_insert_color(&be
->node
, root
);
98 static struct block_entry
*lookup_block_entry(struct rb_root
*root
, u64 bytenr
)
101 struct block_entry
*entry
= NULL
;
105 entry
= rb_entry(n
, struct block_entry
, node
);
106 if (entry
->bytenr
< bytenr
)
108 else if (entry
->bytenr
> bytenr
)
116 static struct root_entry
*insert_root_entry(struct rb_root
*root
,
117 struct root_entry
*re
)
119 struct rb_node
**p
= &root
->rb_node
;
120 struct rb_node
*parent_node
= NULL
;
121 struct root_entry
*entry
;
125 entry
= rb_entry(parent_node
, struct root_entry
, node
);
126 if (entry
->root_objectid
> re
->root_objectid
)
128 else if (entry
->root_objectid
< re
->root_objectid
)
134 rb_link_node(&re
->node
, parent_node
, p
);
135 rb_insert_color(&re
->node
, root
);
140 static int comp_refs(struct ref_entry
*ref1
, struct ref_entry
*ref2
)
142 if (ref1
->root_objectid
< ref2
->root_objectid
)
144 if (ref1
->root_objectid
> ref2
->root_objectid
)
146 if (ref1
->parent
< ref2
->parent
)
148 if (ref1
->parent
> ref2
->parent
)
150 if (ref1
->owner
< ref2
->owner
)
152 if (ref1
->owner
> ref2
->owner
)
154 if (ref1
->offset
< ref2
->offset
)
156 if (ref1
->offset
> ref2
->offset
)
161 static struct ref_entry
*insert_ref_entry(struct rb_root
*root
,
162 struct ref_entry
*ref
)
164 struct rb_node
**p
= &root
->rb_node
;
165 struct rb_node
*parent_node
= NULL
;
166 struct ref_entry
*entry
;
171 entry
= rb_entry(parent_node
, struct ref_entry
, node
);
172 cmp
= comp_refs(entry
, ref
);
181 rb_link_node(&ref
->node
, parent_node
, p
);
182 rb_insert_color(&ref
->node
, root
);
187 static struct root_entry
*lookup_root_entry(struct rb_root
*root
, u64 objectid
)
190 struct root_entry
*entry
= NULL
;
194 entry
= rb_entry(n
, struct root_entry
, node
);
195 if (entry
->root_objectid
< objectid
)
197 else if (entry
->root_objectid
> objectid
)
205 #ifdef CONFIG_STACKTRACE
206 static void __save_stack_trace(struct ref_action
*ra
)
208 ra
->trace_len
= stack_trace_save(ra
->trace
, MAX_TRACE
, 2);
211 static void __print_stack_trace(struct btrfs_fs_info
*fs_info
,
212 struct ref_action
*ra
)
214 if (ra
->trace_len
== 0) {
215 btrfs_err(fs_info
, " ref-verify: no stacktrace");
218 stack_trace_print(ra
->trace
, ra
->trace_len
, 2);
221 static void inline __save_stack_trace(struct ref_action
*ra
)
225 static void inline __print_stack_trace(struct btrfs_fs_info
*fs_info
,
226 struct ref_action
*ra
)
228 btrfs_err(fs_info
, " ref-verify: no stacktrace support");
232 static void free_block_entry(struct block_entry
*be
)
234 struct root_entry
*re
;
235 struct ref_entry
*ref
;
236 struct ref_action
*ra
;
239 while ((n
= rb_first(&be
->roots
))) {
240 re
= rb_entry(n
, struct root_entry
, node
);
241 rb_erase(&re
->node
, &be
->roots
);
245 while((n
= rb_first(&be
->refs
))) {
246 ref
= rb_entry(n
, struct ref_entry
, node
);
247 rb_erase(&ref
->node
, &be
->refs
);
251 while (!list_empty(&be
->actions
)) {
252 ra
= list_first_entry(&be
->actions
, struct ref_action
,
260 static struct block_entry
*add_block_entry(struct btrfs_fs_info
*fs_info
,
264 struct block_entry
*be
= NULL
, *exist
;
265 struct root_entry
*re
= NULL
;
267 re
= kzalloc(sizeof(struct root_entry
), GFP_KERNEL
);
268 be
= kzalloc(sizeof(struct block_entry
), GFP_KERNEL
);
272 return ERR_PTR(-ENOMEM
);
277 re
->root_objectid
= root_objectid
;
280 spin_lock(&fs_info
->ref_verify_lock
);
281 exist
= insert_block_entry(&fs_info
->block_tree
, be
);
284 struct root_entry
*exist_re
;
286 exist_re
= insert_root_entry(&exist
->roots
, re
);
299 INIT_LIST_HEAD(&be
->actions
);
301 insert_root_entry(&be
->roots
, re
);
307 static int add_tree_block(struct btrfs_fs_info
*fs_info
, u64 ref_root
,
308 u64 parent
, u64 bytenr
, int level
)
310 struct block_entry
*be
;
311 struct root_entry
*re
;
312 struct ref_entry
*ref
= NULL
, *exist
;
314 ref
= kmalloc(sizeof(struct ref_entry
), GFP_KERNEL
);
319 ref
->root_objectid
= 0;
321 ref
->root_objectid
= ref_root
;
322 ref
->parent
= parent
;
327 be
= add_block_entry(fs_info
, bytenr
, fs_info
->nodesize
, ref_root
);
338 re
= lookup_root_entry(&be
->roots
, ref_root
);
342 exist
= insert_ref_entry(&be
->refs
, ref
);
347 spin_unlock(&fs_info
->ref_verify_lock
);
352 static int add_shared_data_ref(struct btrfs_fs_info
*fs_info
,
353 u64 parent
, u32 num_refs
, u64 bytenr
,
356 struct block_entry
*be
;
357 struct ref_entry
*ref
;
359 ref
= kzalloc(sizeof(struct ref_entry
), GFP_KERNEL
);
362 be
= add_block_entry(fs_info
, bytenr
, num_bytes
, 0);
367 be
->num_refs
+= num_refs
;
369 ref
->parent
= parent
;
370 ref
->num_refs
= num_refs
;
371 if (insert_ref_entry(&be
->refs
, ref
)) {
372 spin_unlock(&fs_info
->ref_verify_lock
);
373 btrfs_err(fs_info
, "existing shared ref when reading from disk?");
377 spin_unlock(&fs_info
->ref_verify_lock
);
381 static int add_extent_data_ref(struct btrfs_fs_info
*fs_info
,
382 struct extent_buffer
*leaf
,
383 struct btrfs_extent_data_ref
*dref
,
384 u64 bytenr
, u64 num_bytes
)
386 struct block_entry
*be
;
387 struct ref_entry
*ref
;
388 struct root_entry
*re
;
389 u64 ref_root
= btrfs_extent_data_ref_root(leaf
, dref
);
390 u64 owner
= btrfs_extent_data_ref_objectid(leaf
, dref
);
391 u64 offset
= btrfs_extent_data_ref_offset(leaf
, dref
);
392 u32 num_refs
= btrfs_extent_data_ref_count(leaf
, dref
);
394 ref
= kzalloc(sizeof(struct ref_entry
), GFP_KERNEL
);
397 be
= add_block_entry(fs_info
, bytenr
, num_bytes
, ref_root
);
402 be
->num_refs
+= num_refs
;
406 ref
->root_objectid
= ref_root
;
407 ref
->offset
= offset
;
408 ref
->num_refs
= num_refs
;
409 if (insert_ref_entry(&be
->refs
, ref
)) {
410 spin_unlock(&fs_info
->ref_verify_lock
);
411 btrfs_err(fs_info
, "existing ref when reading from disk?");
416 re
= lookup_root_entry(&be
->roots
, ref_root
);
418 spin_unlock(&fs_info
->ref_verify_lock
);
419 btrfs_err(fs_info
, "missing root in new block entry?");
422 re
->num_refs
+= num_refs
;
423 spin_unlock(&fs_info
->ref_verify_lock
);
427 static int process_extent_item(struct btrfs_fs_info
*fs_info
,
428 struct btrfs_path
*path
, struct btrfs_key
*key
,
429 int slot
, int *tree_block_level
)
431 struct btrfs_extent_item
*ei
;
432 struct btrfs_extent_inline_ref
*iref
;
433 struct btrfs_extent_data_ref
*dref
;
434 struct btrfs_shared_data_ref
*sref
;
435 struct extent_buffer
*leaf
= path
->nodes
[0];
436 u32 item_size
= btrfs_item_size_nr(leaf
, slot
);
437 unsigned long end
, ptr
;
438 u64 offset
, flags
, count
;
441 ei
= btrfs_item_ptr(leaf
, slot
, struct btrfs_extent_item
);
442 flags
= btrfs_extent_flags(leaf
, ei
);
444 if ((key
->type
== BTRFS_EXTENT_ITEM_KEY
) &&
445 flags
& BTRFS_EXTENT_FLAG_TREE_BLOCK
) {
446 struct btrfs_tree_block_info
*info
;
448 info
= (struct btrfs_tree_block_info
*)(ei
+ 1);
449 *tree_block_level
= btrfs_tree_block_level(leaf
, info
);
450 iref
= (struct btrfs_extent_inline_ref
*)(info
+ 1);
452 if (key
->type
== BTRFS_METADATA_ITEM_KEY
)
453 *tree_block_level
= key
->offset
;
454 iref
= (struct btrfs_extent_inline_ref
*)(ei
+ 1);
457 ptr
= (unsigned long)iref
;
458 end
= (unsigned long)ei
+ item_size
;
460 iref
= (struct btrfs_extent_inline_ref
*)ptr
;
461 type
= btrfs_extent_inline_ref_type(leaf
, iref
);
462 offset
= btrfs_extent_inline_ref_offset(leaf
, iref
);
464 case BTRFS_TREE_BLOCK_REF_KEY
:
465 ret
= add_tree_block(fs_info
, offset
, 0, key
->objectid
,
468 case BTRFS_SHARED_BLOCK_REF_KEY
:
469 ret
= add_tree_block(fs_info
, 0, offset
, key
->objectid
,
472 case BTRFS_EXTENT_DATA_REF_KEY
:
473 dref
= (struct btrfs_extent_data_ref
*)(&iref
->offset
);
474 ret
= add_extent_data_ref(fs_info
, leaf
, dref
,
475 key
->objectid
, key
->offset
);
477 case BTRFS_SHARED_DATA_REF_KEY
:
478 sref
= (struct btrfs_shared_data_ref
*)(iref
+ 1);
479 count
= btrfs_shared_data_ref_count(leaf
, sref
);
480 ret
= add_shared_data_ref(fs_info
, offset
, count
,
481 key
->objectid
, key
->offset
);
484 btrfs_err(fs_info
, "invalid key type in iref");
490 ptr
+= btrfs_extent_inline_ref_size(type
);
495 static int process_leaf(struct btrfs_root
*root
,
496 struct btrfs_path
*path
, u64
*bytenr
, u64
*num_bytes
)
498 struct btrfs_fs_info
*fs_info
= root
->fs_info
;
499 struct extent_buffer
*leaf
= path
->nodes
[0];
500 struct btrfs_extent_data_ref
*dref
;
501 struct btrfs_shared_data_ref
*sref
;
503 int i
= 0, tree_block_level
= 0, ret
;
504 struct btrfs_key key
;
505 int nritems
= btrfs_header_nritems(leaf
);
507 for (i
= 0; i
< nritems
; i
++) {
508 btrfs_item_key_to_cpu(leaf
, &key
, i
);
510 case BTRFS_EXTENT_ITEM_KEY
:
511 *num_bytes
= key
.offset
;
513 case BTRFS_METADATA_ITEM_KEY
:
514 *bytenr
= key
.objectid
;
515 ret
= process_extent_item(fs_info
, path
, &key
, i
,
518 case BTRFS_TREE_BLOCK_REF_KEY
:
519 ret
= add_tree_block(fs_info
, key
.offset
, 0,
520 key
.objectid
, tree_block_level
);
522 case BTRFS_SHARED_BLOCK_REF_KEY
:
523 ret
= add_tree_block(fs_info
, 0, key
.offset
,
524 key
.objectid
, tree_block_level
);
526 case BTRFS_EXTENT_DATA_REF_KEY
:
527 dref
= btrfs_item_ptr(leaf
, i
,
528 struct btrfs_extent_data_ref
);
529 ret
= add_extent_data_ref(fs_info
, leaf
, dref
, *bytenr
,
532 case BTRFS_SHARED_DATA_REF_KEY
:
533 sref
= btrfs_item_ptr(leaf
, i
,
534 struct btrfs_shared_data_ref
);
535 count
= btrfs_shared_data_ref_count(leaf
, sref
);
536 ret
= add_shared_data_ref(fs_info
, key
.offset
, count
,
537 *bytenr
, *num_bytes
);
548 /* Walk down to the leaf from the given level */
549 static int walk_down_tree(struct btrfs_root
*root
, struct btrfs_path
*path
,
550 int level
, u64
*bytenr
, u64
*num_bytes
)
552 struct btrfs_fs_info
*fs_info
= root
->fs_info
;
553 struct extent_buffer
*eb
;
554 u64 block_bytenr
, gen
;
559 struct btrfs_key first_key
;
561 block_bytenr
= btrfs_node_blockptr(path
->nodes
[level
],
563 gen
= btrfs_node_ptr_generation(path
->nodes
[level
],
565 btrfs_node_key_to_cpu(path
->nodes
[level
], &first_key
,
567 eb
= read_tree_block(fs_info
, block_bytenr
, gen
,
568 level
- 1, &first_key
);
571 if (!extent_buffer_uptodate(eb
)) {
572 free_extent_buffer(eb
);
575 btrfs_tree_read_lock(eb
);
576 btrfs_set_lock_blocking_read(eb
);
577 path
->nodes
[level
-1] = eb
;
578 path
->slots
[level
-1] = 0;
579 path
->locks
[level
-1] = BTRFS_READ_LOCK_BLOCKING
;
581 ret
= process_leaf(root
, path
, bytenr
, num_bytes
);
590 /* Walk up to the next node that needs to be processed */
591 static int walk_up_tree(struct btrfs_path
*path
, int *level
)
595 for (l
= 0; l
< BTRFS_MAX_LEVEL
; l
++) {
601 btrfs_header_nritems(path
->nodes
[l
])) {
606 btrfs_tree_unlock_rw(path
->nodes
[l
], path
->locks
[l
]);
607 free_extent_buffer(path
->nodes
[l
]);
608 path
->nodes
[l
] = NULL
;
616 static void dump_ref_action(struct btrfs_fs_info
*fs_info
,
617 struct ref_action
*ra
)
620 " Ref action %d, root %llu, ref_root %llu, parent %llu, owner %llu, offset %llu, num_refs %llu",
621 ra
->action
, ra
->root
, ra
->ref
.root_objectid
, ra
->ref
.parent
,
622 ra
->ref
.owner
, ra
->ref
.offset
, ra
->ref
.num_refs
);
623 __print_stack_trace(fs_info
, ra
);
627 * Dumps all the information from the block entry to printk, it's going to be
630 static void dump_block_entry(struct btrfs_fs_info
*fs_info
,
631 struct block_entry
*be
)
633 struct ref_entry
*ref
;
634 struct root_entry
*re
;
635 struct ref_action
*ra
;
639 "dumping block entry [%llu %llu], num_refs %llu, metadata %d, from disk %d",
640 be
->bytenr
, be
->len
, be
->num_refs
, be
->metadata
,
643 for (n
= rb_first(&be
->refs
); n
; n
= rb_next(n
)) {
644 ref
= rb_entry(n
, struct ref_entry
, node
);
646 " ref root %llu, parent %llu, owner %llu, offset %llu, num_refs %llu",
647 ref
->root_objectid
, ref
->parent
, ref
->owner
,
648 ref
->offset
, ref
->num_refs
);
651 for (n
= rb_first(&be
->roots
); n
; n
= rb_next(n
)) {
652 re
= rb_entry(n
, struct root_entry
, node
);
653 btrfs_err(fs_info
, " root entry %llu, num_refs %llu",
654 re
->root_objectid
, re
->num_refs
);
657 list_for_each_entry(ra
, &be
->actions
, list
)
658 dump_ref_action(fs_info
, ra
);
662 * btrfs_ref_tree_mod: called when we modify a ref for a bytenr
664 * This will add an action item to the given bytenr and do sanity checks to make
665 * sure we haven't messed something up. If we are making a new allocation and
666 * this block entry has history we will delete all previous actions as long as
667 * our sanity checks pass as they are no longer needed.
669 int btrfs_ref_tree_mod(struct btrfs_fs_info
*fs_info
,
670 struct btrfs_ref
*generic_ref
)
672 struct ref_entry
*ref
= NULL
, *exist
;
673 struct ref_action
*ra
= NULL
;
674 struct block_entry
*be
= NULL
;
675 struct root_entry
*re
= NULL
;
676 int action
= generic_ref
->action
;
679 u64 bytenr
= generic_ref
->bytenr
;
680 u64 num_bytes
= generic_ref
->len
;
681 u64 parent
= generic_ref
->parent
;
686 if (!btrfs_test_opt(fs_info
, REF_VERIFY
))
689 if (generic_ref
->type
== BTRFS_REF_METADATA
) {
690 ref_root
= generic_ref
->tree_ref
.root
;
691 owner
= generic_ref
->tree_ref
.level
;
694 ref_root
= generic_ref
->data_ref
.ref_root
;
695 owner
= generic_ref
->data_ref
.ino
;
696 offset
= generic_ref
->data_ref
.offset
;
698 metadata
= owner
< BTRFS_FIRST_FREE_OBJECTID
;
700 ref
= kzalloc(sizeof(struct ref_entry
), GFP_NOFS
);
701 ra
= kmalloc(sizeof(struct ref_action
), GFP_NOFS
);
710 ref
->parent
= parent
;
712 ref
->root_objectid
= ref_root
;
714 ref
->offset
= offset
;
716 ref
->num_refs
= (action
== BTRFS_DROP_DELAYED_REF
) ? -1 : 1;
718 memcpy(&ra
->ref
, ref
, sizeof(struct ref_entry
));
720 * Save the extra info from the delayed ref in the ref action to make it
721 * easier to figure out what is happening. The real ref's we add to the
722 * ref tree need to reflect what we save on disk so it matches any
723 * on-disk refs we pre-loaded.
725 ra
->ref
.owner
= owner
;
726 ra
->ref
.offset
= offset
;
727 ra
->ref
.root_objectid
= ref_root
;
728 __save_stack_trace(ra
);
730 INIT_LIST_HEAD(&ra
->list
);
732 ra
->root
= generic_ref
->real_root
;
735 * This is an allocation, preallocate the block_entry in case we haven't
739 if (action
== BTRFS_ADD_DELAYED_EXTENT
) {
741 * For subvol_create we'll just pass in whatever the parent root
742 * is and the new root objectid, so let's not treat the passed
743 * in root as if it really has a ref for this bytenr.
745 be
= add_block_entry(fs_info
, bytenr
, num_bytes
, ref_root
);
755 if (be
->num_refs
!= 1) {
757 "re-allocated a block that still has references to it!");
758 dump_block_entry(fs_info
, be
);
759 dump_ref_action(fs_info
, ra
);
763 while (!list_empty(&be
->actions
)) {
764 struct ref_action
*tmp
;
766 tmp
= list_first_entry(&be
->actions
, struct ref_action
,
768 list_del(&tmp
->list
);
772 struct root_entry
*tmp
;
775 re
= kmalloc(sizeof(struct root_entry
), GFP_NOFS
);
783 * This is the root that is modifying us, so it's the
784 * one we want to lookup below when we modify the
787 ref_root
= generic_ref
->real_root
;
788 re
->root_objectid
= generic_ref
->real_root
;
792 spin_lock(&fs_info
->ref_verify_lock
);
793 be
= lookup_block_entry(&fs_info
->block_tree
, bytenr
);
796 "trying to do action %d to bytenr %llu num_bytes %llu but there is no existing entry!",
797 action
, (unsigned long long)bytenr
,
798 (unsigned long long)num_bytes
);
799 dump_ref_action(fs_info
, ra
);
806 tmp
= insert_root_entry(&be
->roots
, re
);
814 exist
= insert_ref_entry(&be
->refs
, ref
);
816 if (action
== BTRFS_DROP_DELAYED_REF
) {
817 if (exist
->num_refs
== 0) {
819 "dropping a ref for a existing root that doesn't have a ref on the block");
820 dump_block_entry(fs_info
, be
);
821 dump_ref_action(fs_info
, ra
);
826 if (exist
->num_refs
== 0) {
827 rb_erase(&exist
->node
, &be
->refs
);
830 } else if (!be
->metadata
) {
834 "attempting to add another ref for an existing ref on a tree block");
835 dump_block_entry(fs_info
, be
);
836 dump_ref_action(fs_info
, ra
);
842 if (action
== BTRFS_DROP_DELAYED_REF
) {
844 "dropping a ref for a root that doesn't have a ref on the block");
845 dump_block_entry(fs_info
, be
);
846 dump_ref_action(fs_info
, ra
);
852 if (!parent
&& !re
) {
853 re
= lookup_root_entry(&be
->roots
, ref_root
);
856 * This shouldn't happen because we will add our re
857 * above when we lookup the be with !parent, but just in
858 * case catch this case so we don't panic because I
859 * didn't think of some other corner case.
861 btrfs_err(fs_info
, "failed to find root %llu for %llu",
862 generic_ref
->real_root
, be
->bytenr
);
863 dump_block_entry(fs_info
, be
);
864 dump_ref_action(fs_info
, ra
);
869 if (action
== BTRFS_DROP_DELAYED_REF
) {
873 } else if (action
== BTRFS_ADD_DELAYED_REF
) {
878 list_add_tail(&ra
->list
, &be
->actions
);
881 spin_unlock(&fs_info
->ref_verify_lock
);
884 btrfs_clear_opt(fs_info
->mount_opt
, REF_VERIFY
);
888 /* Free up the ref cache */
889 void btrfs_free_ref_cache(struct btrfs_fs_info
*fs_info
)
891 struct block_entry
*be
;
894 if (!btrfs_test_opt(fs_info
, REF_VERIFY
))
897 spin_lock(&fs_info
->ref_verify_lock
);
898 while ((n
= rb_first(&fs_info
->block_tree
))) {
899 be
= rb_entry(n
, struct block_entry
, node
);
900 rb_erase(&be
->node
, &fs_info
->block_tree
);
901 free_block_entry(be
);
902 cond_resched_lock(&fs_info
->ref_verify_lock
);
904 spin_unlock(&fs_info
->ref_verify_lock
);
907 void btrfs_free_ref_tree_range(struct btrfs_fs_info
*fs_info
, u64 start
,
910 struct block_entry
*be
= NULL
, *entry
;
913 if (!btrfs_test_opt(fs_info
, REF_VERIFY
))
916 spin_lock(&fs_info
->ref_verify_lock
);
917 n
= fs_info
->block_tree
.rb_node
;
919 entry
= rb_entry(n
, struct block_entry
, node
);
920 if (entry
->bytenr
< start
) {
922 } else if (entry
->bytenr
> start
) {
928 /* We want to get as close to start as possible */
930 (entry
->bytenr
< start
&& be
->bytenr
> start
) ||
931 (entry
->bytenr
< start
&& entry
->bytenr
> be
->bytenr
))
936 * Could have an empty block group, maybe have something to check for
937 * this case to verify we were actually empty?
940 spin_unlock(&fs_info
->ref_verify_lock
);
946 be
= rb_entry(n
, struct block_entry
, node
);
948 if (be
->bytenr
< start
&& be
->bytenr
+ be
->len
> start
) {
950 "block entry overlaps a block group [%llu,%llu]!",
952 dump_block_entry(fs_info
, be
);
955 if (be
->bytenr
< start
)
957 if (be
->bytenr
>= start
+ len
)
959 if (be
->bytenr
+ be
->len
> start
+ len
) {
961 "block entry overlaps a block group [%llu,%llu]!",
963 dump_block_entry(fs_info
, be
);
965 rb_erase(&be
->node
, &fs_info
->block_tree
);
966 free_block_entry(be
);
968 spin_unlock(&fs_info
->ref_verify_lock
);
971 /* Walk down all roots and build the ref tree, meant to be called at mount */
972 int btrfs_build_ref_tree(struct btrfs_fs_info
*fs_info
)
974 struct btrfs_path
*path
;
975 struct extent_buffer
*eb
;
976 u64 bytenr
= 0, num_bytes
= 0;
979 if (!btrfs_test_opt(fs_info
, REF_VERIFY
))
982 path
= btrfs_alloc_path();
986 eb
= btrfs_read_lock_root_node(fs_info
->extent_root
);
987 btrfs_set_lock_blocking_read(eb
);
988 level
= btrfs_header_level(eb
);
989 path
->nodes
[level
] = eb
;
990 path
->slots
[level
] = 0;
991 path
->locks
[level
] = BTRFS_READ_LOCK_BLOCKING
;
995 * We have to keep track of the bytenr/num_bytes we last hit
996 * because we could have run out of space for an inline ref, and
997 * would have had to added a ref key item which may appear on a
998 * different leaf from the original extent item.
1000 ret
= walk_down_tree(fs_info
->extent_root
, path
, level
,
1001 &bytenr
, &num_bytes
);
1004 ret
= walk_up_tree(path
, &level
);
1013 btrfs_clear_opt(fs_info
->mount_opt
, REF_VERIFY
);
1014 btrfs_free_ref_cache(fs_info
);
1016 btrfs_free_path(path
);