2 * Copyright (C) 2014 Facebook. All rights reserved.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public
6 * License v2 as published by the Free Software Foundation.
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
13 * You should have received a copy of the GNU General Public
14 * License along with this program; if not, write to the
15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16 * Boston, MA 021110-1307, USA.
19 #include <linux/sched.h>
20 #include <linux/stacktrace.h>
24 #include "delayed-ref.h"
25 #include "ref-verify.h"
28 * Used to keep track the roots and number of refs each root has for a given
29 * bytenr. This just tracks the number of direct references, no shared
39 * These are meant to represent what should exist in the extent tree, these can
40 * be used to verify the extent tree is consistent as these should all match
41 * what the extent tree says.
55 * Whenever we add/remove a reference we record the action. The action maps
56 * back to the delayed ref action. We hold the ref we are changing in the
57 * action so we can account for the history properly, and we record the root we
58 * were called with since it could be different from ref_root. We also store
59 * stack traces because thats how I roll.
65 struct list_head list
;
66 unsigned long trace
[MAX_TRACE
];
67 unsigned int trace_len
;
71 * One of these for every block we reference, it holds the roots and references
72 * to it as well as all of the ref actions that have occured to it. We never
73 * free it until we unmount the file system in order to make sure re-allocations
74 * are happening properly.
85 struct list_head actions
;
88 static struct block_entry
*insert_block_entry(struct rb_root
*root
,
89 struct block_entry
*be
)
91 struct rb_node
**p
= &root
->rb_node
;
92 struct rb_node
*parent_node
= NULL
;
93 struct block_entry
*entry
;
97 entry
= rb_entry(parent_node
, struct block_entry
, node
);
98 if (entry
->bytenr
> be
->bytenr
)
100 else if (entry
->bytenr
< be
->bytenr
)
106 rb_link_node(&be
->node
, parent_node
, p
);
107 rb_insert_color(&be
->node
, root
);
111 static struct block_entry
*lookup_block_entry(struct rb_root
*root
, u64 bytenr
)
114 struct block_entry
*entry
= NULL
;
118 entry
= rb_entry(n
, struct block_entry
, node
);
119 if (entry
->bytenr
< bytenr
)
121 else if (entry
->bytenr
> bytenr
)
129 static struct root_entry
*insert_root_entry(struct rb_root
*root
,
130 struct root_entry
*re
)
132 struct rb_node
**p
= &root
->rb_node
;
133 struct rb_node
*parent_node
= NULL
;
134 struct root_entry
*entry
;
138 entry
= rb_entry(parent_node
, struct root_entry
, node
);
139 if (entry
->root_objectid
> re
->root_objectid
)
141 else if (entry
->root_objectid
< re
->root_objectid
)
147 rb_link_node(&re
->node
, parent_node
, p
);
148 rb_insert_color(&re
->node
, root
);
153 static int comp_refs(struct ref_entry
*ref1
, struct ref_entry
*ref2
)
155 if (ref1
->root_objectid
< ref2
->root_objectid
)
157 if (ref1
->root_objectid
> ref2
->root_objectid
)
159 if (ref1
->parent
< ref2
->parent
)
161 if (ref1
->parent
> ref2
->parent
)
163 if (ref1
->owner
< ref2
->owner
)
165 if (ref1
->owner
> ref2
->owner
)
167 if (ref1
->offset
< ref2
->offset
)
169 if (ref1
->offset
> ref2
->offset
)
174 static struct ref_entry
*insert_ref_entry(struct rb_root
*root
,
175 struct ref_entry
*ref
)
177 struct rb_node
**p
= &root
->rb_node
;
178 struct rb_node
*parent_node
= NULL
;
179 struct ref_entry
*entry
;
184 entry
= rb_entry(parent_node
, struct ref_entry
, node
);
185 cmp
= comp_refs(entry
, ref
);
194 rb_link_node(&ref
->node
, parent_node
, p
);
195 rb_insert_color(&ref
->node
, root
);
200 static struct root_entry
*lookup_root_entry(struct rb_root
*root
, u64 objectid
)
203 struct root_entry
*entry
= NULL
;
207 entry
= rb_entry(n
, struct root_entry
, node
);
208 if (entry
->root_objectid
< objectid
)
210 else if (entry
->root_objectid
> objectid
)
218 #ifdef CONFIG_STACKTRACE
219 static void __save_stack_trace(struct ref_action
*ra
)
221 struct stack_trace stack_trace
;
223 stack_trace
.max_entries
= MAX_TRACE
;
224 stack_trace
.nr_entries
= 0;
225 stack_trace
.entries
= ra
->trace
;
226 stack_trace
.skip
= 2;
227 save_stack_trace(&stack_trace
);
228 ra
->trace_len
= stack_trace
.nr_entries
;
231 static void __print_stack_trace(struct btrfs_fs_info
*fs_info
,
232 struct ref_action
*ra
)
234 struct stack_trace trace
;
236 if (ra
->trace_len
== 0) {
237 btrfs_err(fs_info
, " ref-verify: no stacktrace");
240 trace
.nr_entries
= ra
->trace_len
;
241 trace
.entries
= ra
->trace
;
242 print_stack_trace(&trace
, 2);
245 static void inline __save_stack_trace(struct ref_action
*ra
)
249 static void inline __print_stack_trace(struct btrfs_fs_info
*fs_info
,
250 struct ref_action
*ra
)
252 btrfs_err(fs_info
, " ref-verify: no stacktrace support");
256 static void free_block_entry(struct block_entry
*be
)
258 struct root_entry
*re
;
259 struct ref_entry
*ref
;
260 struct ref_action
*ra
;
263 while ((n
= rb_first(&be
->roots
))) {
264 re
= rb_entry(n
, struct root_entry
, node
);
265 rb_erase(&re
->node
, &be
->roots
);
269 while((n
= rb_first(&be
->refs
))) {
270 ref
= rb_entry(n
, struct ref_entry
, node
);
271 rb_erase(&ref
->node
, &be
->refs
);
275 while (!list_empty(&be
->actions
)) {
276 ra
= list_first_entry(&be
->actions
, struct ref_action
,
284 static struct block_entry
*add_block_entry(struct btrfs_fs_info
*fs_info
,
288 struct block_entry
*be
= NULL
, *exist
;
289 struct root_entry
*re
= NULL
;
291 re
= kzalloc(sizeof(struct root_entry
), GFP_KERNEL
);
292 be
= kzalloc(sizeof(struct block_entry
), GFP_KERNEL
);
296 return ERR_PTR(-ENOMEM
);
301 re
->root_objectid
= root_objectid
;
304 spin_lock(&fs_info
->ref_verify_lock
);
305 exist
= insert_block_entry(&fs_info
->block_tree
, be
);
308 struct root_entry
*exist_re
;
310 exist_re
= insert_root_entry(&exist
->roots
, re
);
323 INIT_LIST_HEAD(&be
->actions
);
325 insert_root_entry(&be
->roots
, re
);
331 static int add_tree_block(struct btrfs_fs_info
*fs_info
, u64 ref_root
,
332 u64 parent
, u64 bytenr
, int level
)
334 struct block_entry
*be
;
335 struct root_entry
*re
;
336 struct ref_entry
*ref
= NULL
, *exist
;
338 ref
= kmalloc(sizeof(struct ref_entry
), GFP_KERNEL
);
343 ref
->root_objectid
= 0;
345 ref
->root_objectid
= ref_root
;
346 ref
->parent
= parent
;
351 be
= add_block_entry(fs_info
, bytenr
, fs_info
->nodesize
, ref_root
);
362 re
= lookup_root_entry(&be
->roots
, ref_root
);
366 exist
= insert_ref_entry(&be
->refs
, ref
);
371 spin_unlock(&fs_info
->ref_verify_lock
);
376 static int add_shared_data_ref(struct btrfs_fs_info
*fs_info
,
377 u64 parent
, u32 num_refs
, u64 bytenr
,
380 struct block_entry
*be
;
381 struct ref_entry
*ref
;
383 ref
= kzalloc(sizeof(struct ref_entry
), GFP_KERNEL
);
386 be
= add_block_entry(fs_info
, bytenr
, num_bytes
, 0);
391 be
->num_refs
+= num_refs
;
393 ref
->parent
= parent
;
394 ref
->num_refs
= num_refs
;
395 if (insert_ref_entry(&be
->refs
, ref
)) {
396 spin_unlock(&fs_info
->ref_verify_lock
);
397 btrfs_err(fs_info
, "existing shared ref when reading from disk?");
401 spin_unlock(&fs_info
->ref_verify_lock
);
405 static int add_extent_data_ref(struct btrfs_fs_info
*fs_info
,
406 struct extent_buffer
*leaf
,
407 struct btrfs_extent_data_ref
*dref
,
408 u64 bytenr
, u64 num_bytes
)
410 struct block_entry
*be
;
411 struct ref_entry
*ref
;
412 struct root_entry
*re
;
413 u64 ref_root
= btrfs_extent_data_ref_root(leaf
, dref
);
414 u64 owner
= btrfs_extent_data_ref_objectid(leaf
, dref
);
415 u64 offset
= btrfs_extent_data_ref_offset(leaf
, dref
);
416 u32 num_refs
= btrfs_extent_data_ref_count(leaf
, dref
);
418 ref
= kzalloc(sizeof(struct ref_entry
), GFP_KERNEL
);
421 be
= add_block_entry(fs_info
, bytenr
, num_bytes
, ref_root
);
426 be
->num_refs
+= num_refs
;
430 ref
->root_objectid
= ref_root
;
431 ref
->offset
= offset
;
432 ref
->num_refs
= num_refs
;
433 if (insert_ref_entry(&be
->refs
, ref
)) {
434 spin_unlock(&fs_info
->ref_verify_lock
);
435 btrfs_err(fs_info
, "existing ref when reading from disk?");
440 re
= lookup_root_entry(&be
->roots
, ref_root
);
442 spin_unlock(&fs_info
->ref_verify_lock
);
443 btrfs_err(fs_info
, "missing root in new block entry?");
446 re
->num_refs
+= num_refs
;
447 spin_unlock(&fs_info
->ref_verify_lock
);
451 static int process_extent_item(struct btrfs_fs_info
*fs_info
,
452 struct btrfs_path
*path
, struct btrfs_key
*key
,
453 int slot
, int *tree_block_level
)
455 struct btrfs_extent_item
*ei
;
456 struct btrfs_extent_inline_ref
*iref
;
457 struct btrfs_extent_data_ref
*dref
;
458 struct btrfs_shared_data_ref
*sref
;
459 struct extent_buffer
*leaf
= path
->nodes
[0];
460 u32 item_size
= btrfs_item_size_nr(leaf
, slot
);
461 unsigned long end
, ptr
;
462 u64 offset
, flags
, count
;
465 ei
= btrfs_item_ptr(leaf
, slot
, struct btrfs_extent_item
);
466 flags
= btrfs_extent_flags(leaf
, ei
);
468 if ((key
->type
== BTRFS_EXTENT_ITEM_KEY
) &&
469 flags
& BTRFS_EXTENT_FLAG_TREE_BLOCK
) {
470 struct btrfs_tree_block_info
*info
;
472 info
= (struct btrfs_tree_block_info
*)(ei
+ 1);
473 *tree_block_level
= btrfs_tree_block_level(leaf
, info
);
474 iref
= (struct btrfs_extent_inline_ref
*)(info
+ 1);
476 if (key
->type
== BTRFS_METADATA_ITEM_KEY
)
477 *tree_block_level
= key
->offset
;
478 iref
= (struct btrfs_extent_inline_ref
*)(ei
+ 1);
481 ptr
= (unsigned long)iref
;
482 end
= (unsigned long)ei
+ item_size
;
484 iref
= (struct btrfs_extent_inline_ref
*)ptr
;
485 type
= btrfs_extent_inline_ref_type(leaf
, iref
);
486 offset
= btrfs_extent_inline_ref_offset(leaf
, iref
);
488 case BTRFS_TREE_BLOCK_REF_KEY
:
489 ret
= add_tree_block(fs_info
, offset
, 0, key
->objectid
,
492 case BTRFS_SHARED_BLOCK_REF_KEY
:
493 ret
= add_tree_block(fs_info
, 0, offset
, key
->objectid
,
496 case BTRFS_EXTENT_DATA_REF_KEY
:
497 dref
= (struct btrfs_extent_data_ref
*)(&iref
->offset
);
498 ret
= add_extent_data_ref(fs_info
, leaf
, dref
,
499 key
->objectid
, key
->offset
);
501 case BTRFS_SHARED_DATA_REF_KEY
:
502 sref
= (struct btrfs_shared_data_ref
*)(iref
+ 1);
503 count
= btrfs_shared_data_ref_count(leaf
, sref
);
504 ret
= add_shared_data_ref(fs_info
, offset
, count
,
505 key
->objectid
, key
->offset
);
508 btrfs_err(fs_info
, "invalid key type in iref");
514 ptr
+= btrfs_extent_inline_ref_size(type
);
519 static int process_leaf(struct btrfs_root
*root
,
520 struct btrfs_path
*path
, u64
*bytenr
, u64
*num_bytes
)
522 struct btrfs_fs_info
*fs_info
= root
->fs_info
;
523 struct extent_buffer
*leaf
= path
->nodes
[0];
524 struct btrfs_extent_data_ref
*dref
;
525 struct btrfs_shared_data_ref
*sref
;
527 int i
= 0, tree_block_level
= 0, ret
;
528 struct btrfs_key key
;
529 int nritems
= btrfs_header_nritems(leaf
);
531 for (i
= 0; i
< nritems
; i
++) {
532 btrfs_item_key_to_cpu(leaf
, &key
, i
);
534 case BTRFS_EXTENT_ITEM_KEY
:
535 *num_bytes
= key
.offset
;
536 case BTRFS_METADATA_ITEM_KEY
:
537 *bytenr
= key
.objectid
;
538 ret
= process_extent_item(fs_info
, path
, &key
, i
,
541 case BTRFS_TREE_BLOCK_REF_KEY
:
542 ret
= add_tree_block(fs_info
, key
.offset
, 0,
543 key
.objectid
, tree_block_level
);
545 case BTRFS_SHARED_BLOCK_REF_KEY
:
546 ret
= add_tree_block(fs_info
, 0, key
.offset
,
547 key
.objectid
, tree_block_level
);
549 case BTRFS_EXTENT_DATA_REF_KEY
:
550 dref
= btrfs_item_ptr(leaf
, i
,
551 struct btrfs_extent_data_ref
);
552 ret
= add_extent_data_ref(fs_info
, leaf
, dref
, *bytenr
,
555 case BTRFS_SHARED_DATA_REF_KEY
:
556 sref
= btrfs_item_ptr(leaf
, i
,
557 struct btrfs_shared_data_ref
);
558 count
= btrfs_shared_data_ref_count(leaf
, sref
);
559 ret
= add_shared_data_ref(fs_info
, key
.offset
, count
,
560 *bytenr
, *num_bytes
);
571 /* Walk down to the leaf from the given level */
572 static int walk_down_tree(struct btrfs_root
*root
, struct btrfs_path
*path
,
573 int level
, u64
*bytenr
, u64
*num_bytes
)
575 struct btrfs_fs_info
*fs_info
= root
->fs_info
;
576 struct extent_buffer
*eb
;
577 u64 block_bytenr
, gen
;
582 block_bytenr
= btrfs_node_blockptr(path
->nodes
[level
],
584 gen
= btrfs_node_ptr_generation(path
->nodes
[level
],
586 eb
= read_tree_block(fs_info
, block_bytenr
, gen
);
589 if (!extent_buffer_uptodate(eb
)) {
590 free_extent_buffer(eb
);
593 btrfs_tree_read_lock(eb
);
594 btrfs_set_lock_blocking_rw(eb
, BTRFS_READ_LOCK
);
595 path
->nodes
[level
-1] = eb
;
596 path
->slots
[level
-1] = 0;
597 path
->locks
[level
-1] = BTRFS_READ_LOCK_BLOCKING
;
599 ret
= process_leaf(root
, path
, bytenr
, num_bytes
);
608 /* Walk up to the next node that needs to be processed */
609 static int walk_up_tree(struct btrfs_path
*path
, int *level
)
613 for (l
= 0; l
< BTRFS_MAX_LEVEL
; l
++) {
619 btrfs_header_nritems(path
->nodes
[l
])) {
624 btrfs_tree_unlock_rw(path
->nodes
[l
], path
->locks
[l
]);
625 free_extent_buffer(path
->nodes
[l
]);
626 path
->nodes
[l
] = NULL
;
634 static void dump_ref_action(struct btrfs_fs_info
*fs_info
,
635 struct ref_action
*ra
)
638 " Ref action %d, root %llu, ref_root %llu, parent %llu, owner %llu, offset %llu, num_refs %llu",
639 ra
->action
, ra
->root
, ra
->ref
.root_objectid
, ra
->ref
.parent
,
640 ra
->ref
.owner
, ra
->ref
.offset
, ra
->ref
.num_refs
);
641 __print_stack_trace(fs_info
, ra
);
645 * Dumps all the information from the block entry to printk, it's going to be
648 static void dump_block_entry(struct btrfs_fs_info
*fs_info
,
649 struct block_entry
*be
)
651 struct ref_entry
*ref
;
652 struct root_entry
*re
;
653 struct ref_action
*ra
;
657 "dumping block entry [%llu %llu], num_refs %llu, metadata %d, from disk %d",
658 be
->bytenr
, be
->len
, be
->num_refs
, be
->metadata
,
661 for (n
= rb_first(&be
->refs
); n
; n
= rb_next(n
)) {
662 ref
= rb_entry(n
, struct ref_entry
, node
);
664 " ref root %llu, parent %llu, owner %llu, offset %llu, num_refs %llu",
665 ref
->root_objectid
, ref
->parent
, ref
->owner
,
666 ref
->offset
, ref
->num_refs
);
669 for (n
= rb_first(&be
->roots
); n
; n
= rb_next(n
)) {
670 re
= rb_entry(n
, struct root_entry
, node
);
671 btrfs_err(fs_info
, " root entry %llu, num_refs %llu",
672 re
->root_objectid
, re
->num_refs
);
675 list_for_each_entry(ra
, &be
->actions
, list
)
676 dump_ref_action(fs_info
, ra
);
680 * btrfs_ref_tree_mod: called when we modify a ref for a bytenr
681 * @root: the root we are making this modification from.
682 * @bytenr: the bytenr we are modifying.
683 * @num_bytes: number of bytes.
684 * @parent: the parent bytenr.
685 * @ref_root: the original root owner of the bytenr.
686 * @owner: level in the case of metadata, inode in the case of data.
687 * @offset: 0 for metadata, file offset for data.
688 * @action: the action that we are doing, this is the same as the delayed ref
691 * This will add an action item to the given bytenr and do sanity checks to make
692 * sure we haven't messed something up. If we are making a new allocation and
693 * this block entry has history we will delete all previous actions as long as
694 * our sanity checks pass as they are no longer needed.
696 int btrfs_ref_tree_mod(struct btrfs_root
*root
, u64 bytenr
, u64 num_bytes
,
697 u64 parent
, u64 ref_root
, u64 owner
, u64 offset
,
700 struct btrfs_fs_info
*fs_info
= root
->fs_info
;
701 struct ref_entry
*ref
= NULL
, *exist
;
702 struct ref_action
*ra
= NULL
;
703 struct block_entry
*be
= NULL
;
704 struct root_entry
*re
= NULL
;
706 bool metadata
= owner
< BTRFS_FIRST_FREE_OBJECTID
;
708 if (!btrfs_test_opt(root
->fs_info
, REF_VERIFY
))
711 ref
= kzalloc(sizeof(struct ref_entry
), GFP_NOFS
);
712 ra
= kmalloc(sizeof(struct ref_action
), GFP_NOFS
);
721 ref
->parent
= parent
;
723 ref
->root_objectid
= ref_root
;
725 ref
->offset
= offset
;
727 ref
->num_refs
= (action
== BTRFS_DROP_DELAYED_REF
) ? -1 : 1;
729 memcpy(&ra
->ref
, ref
, sizeof(struct ref_entry
));
731 * Save the extra info from the delayed ref in the ref action to make it
732 * easier to figure out what is happening. The real ref's we add to the
733 * ref tree need to reflect what we save on disk so it matches any
734 * on-disk refs we pre-loaded.
736 ra
->ref
.owner
= owner
;
737 ra
->ref
.offset
= offset
;
738 ra
->ref
.root_objectid
= ref_root
;
739 __save_stack_trace(ra
);
741 INIT_LIST_HEAD(&ra
->list
);
743 ra
->root
= root
->objectid
;
746 * This is an allocation, preallocate the block_entry in case we haven't
750 if (action
== BTRFS_ADD_DELAYED_EXTENT
) {
752 * For subvol_create we'll just pass in whatever the parent root
753 * is and the new root objectid, so let's not treat the passed
754 * in root as if it really has a ref for this bytenr.
756 be
= add_block_entry(root
->fs_info
, bytenr
, num_bytes
, ref_root
);
766 if (be
->num_refs
!= 1) {
768 "re-allocated a block that still has references to it!");
769 dump_block_entry(fs_info
, be
);
770 dump_ref_action(fs_info
, ra
);
774 while (!list_empty(&be
->actions
)) {
775 struct ref_action
*tmp
;
777 tmp
= list_first_entry(&be
->actions
, struct ref_action
,
779 list_del(&tmp
->list
);
783 struct root_entry
*tmp
;
786 re
= kmalloc(sizeof(struct root_entry
), GFP_NOFS
);
794 * This is the root that is modifying us, so it's the
795 * one we want to lookup below when we modify the
798 ref_root
= root
->objectid
;
799 re
->root_objectid
= root
->objectid
;
803 spin_lock(&root
->fs_info
->ref_verify_lock
);
804 be
= lookup_block_entry(&root
->fs_info
->block_tree
, bytenr
);
807 "trying to do action %d to bytenr %llu num_bytes %llu but there is no existing entry!",
808 action
, (unsigned long long)bytenr
,
809 (unsigned long long)num_bytes
);
810 dump_ref_action(fs_info
, ra
);
817 tmp
= insert_root_entry(&be
->roots
, re
);
825 exist
= insert_ref_entry(&be
->refs
, ref
);
827 if (action
== BTRFS_DROP_DELAYED_REF
) {
828 if (exist
->num_refs
== 0) {
830 "dropping a ref for a existing root that doesn't have a ref on the block");
831 dump_block_entry(fs_info
, be
);
832 dump_ref_action(fs_info
, ra
);
837 if (exist
->num_refs
== 0) {
838 rb_erase(&exist
->node
, &be
->refs
);
841 } else if (!be
->metadata
) {
845 "attempting to add another ref for an existing ref on a tree block");
846 dump_block_entry(fs_info
, be
);
847 dump_ref_action(fs_info
, ra
);
853 if (action
== BTRFS_DROP_DELAYED_REF
) {
855 "dropping a ref for a root that doesn't have a ref on the block");
856 dump_block_entry(fs_info
, be
);
857 dump_ref_action(fs_info
, ra
);
863 if (!parent
&& !re
) {
864 re
= lookup_root_entry(&be
->roots
, ref_root
);
867 * This shouldn't happen because we will add our re
868 * above when we lookup the be with !parent, but just in
869 * case catch this case so we don't panic because I
870 * didn't thik of some other corner case.
872 btrfs_err(fs_info
, "failed to find root %llu for %llu",
873 root
->objectid
, be
->bytenr
);
874 dump_block_entry(fs_info
, be
);
875 dump_ref_action(fs_info
, ra
);
880 if (action
== BTRFS_DROP_DELAYED_REF
) {
884 } else if (action
== BTRFS_ADD_DELAYED_REF
) {
889 list_add_tail(&ra
->list
, &be
->actions
);
892 spin_unlock(&root
->fs_info
->ref_verify_lock
);
895 btrfs_clear_opt(fs_info
->mount_opt
, REF_VERIFY
);
899 /* Free up the ref cache */
900 void btrfs_free_ref_cache(struct btrfs_fs_info
*fs_info
)
902 struct block_entry
*be
;
905 if (!btrfs_test_opt(fs_info
, REF_VERIFY
))
908 spin_lock(&fs_info
->ref_verify_lock
);
909 while ((n
= rb_first(&fs_info
->block_tree
))) {
910 be
= rb_entry(n
, struct block_entry
, node
);
911 rb_erase(&be
->node
, &fs_info
->block_tree
);
912 free_block_entry(be
);
913 cond_resched_lock(&fs_info
->ref_verify_lock
);
915 spin_unlock(&fs_info
->ref_verify_lock
);
918 void btrfs_free_ref_tree_range(struct btrfs_fs_info
*fs_info
, u64 start
,
921 struct block_entry
*be
= NULL
, *entry
;
924 if (!btrfs_test_opt(fs_info
, REF_VERIFY
))
927 spin_lock(&fs_info
->ref_verify_lock
);
928 n
= fs_info
->block_tree
.rb_node
;
930 entry
= rb_entry(n
, struct block_entry
, node
);
931 if (entry
->bytenr
< start
) {
933 } else if (entry
->bytenr
> start
) {
939 /* We want to get as close to start as possible */
941 (entry
->bytenr
< start
&& be
->bytenr
> start
) ||
942 (entry
->bytenr
< start
&& entry
->bytenr
> be
->bytenr
))
947 * Could have an empty block group, maybe have something to check for
948 * this case to verify we were actually empty?
951 spin_unlock(&fs_info
->ref_verify_lock
);
957 be
= rb_entry(n
, struct block_entry
, node
);
959 if (be
->bytenr
< start
&& be
->bytenr
+ be
->len
> start
) {
961 "block entry overlaps a block group [%llu,%llu]!",
963 dump_block_entry(fs_info
, be
);
966 if (be
->bytenr
< start
)
968 if (be
->bytenr
>= start
+ len
)
970 if (be
->bytenr
+ be
->len
> start
+ len
) {
972 "block entry overlaps a block group [%llu,%llu]!",
974 dump_block_entry(fs_info
, be
);
976 rb_erase(&be
->node
, &fs_info
->block_tree
);
977 free_block_entry(be
);
979 spin_unlock(&fs_info
->ref_verify_lock
);
982 /* Walk down all roots and build the ref tree, meant to be called at mount */
983 int btrfs_build_ref_tree(struct btrfs_fs_info
*fs_info
)
985 struct btrfs_path
*path
;
986 struct extent_buffer
*eb
;
987 u64 bytenr
= 0, num_bytes
= 0;
990 if (!btrfs_test_opt(fs_info
, REF_VERIFY
))
993 path
= btrfs_alloc_path();
997 eb
= btrfs_read_lock_root_node(fs_info
->extent_root
);
998 btrfs_set_lock_blocking_rw(eb
, BTRFS_READ_LOCK
);
999 level
= btrfs_header_level(eb
);
1000 path
->nodes
[level
] = eb
;
1001 path
->slots
[level
] = 0;
1002 path
->locks
[level
] = BTRFS_READ_LOCK_BLOCKING
;
1006 * We have to keep track of the bytenr/num_bytes we last hit
1007 * because we could have run out of space for an inline ref, and
1008 * would have had to added a ref key item which may appear on a
1009 * different leaf from the original extent item.
1011 ret
= walk_down_tree(fs_info
->extent_root
, path
, level
,
1012 &bytenr
, &num_bytes
);
1015 ret
= walk_up_tree(path
, &level
);
1024 btrfs_clear_opt(fs_info
->mount_opt
, REF_VERIFY
);
1025 btrfs_free_ref_cache(fs_info
);
1027 btrfs_free_path(path
);