1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (C) 2011 STRATO. All rights reserved.
7 #include <linux/rbtree.h>
8 #include <trace/events/btrfs.h>
13 #include "transaction.h"
14 #include "delayed-ref.h"
17 /* Just an arbitrary number so we can be sure this happened */
18 #define BACKREF_FOUND_SHARED 6
20 struct extent_inode_elem
{
23 struct extent_inode_elem
*next
;
26 static int check_extent_in_eb(const struct btrfs_key
*key
,
27 const struct extent_buffer
*eb
,
28 const struct btrfs_file_extent_item
*fi
,
30 struct extent_inode_elem
**eie
,
34 struct extent_inode_elem
*e
;
37 !btrfs_file_extent_compression(eb
, fi
) &&
38 !btrfs_file_extent_encryption(eb
, fi
) &&
39 !btrfs_file_extent_other_encoding(eb
, fi
)) {
43 data_offset
= btrfs_file_extent_offset(eb
, fi
);
44 data_len
= btrfs_file_extent_num_bytes(eb
, fi
);
46 if (extent_item_pos
< data_offset
||
47 extent_item_pos
>= data_offset
+ data_len
)
49 offset
= extent_item_pos
- data_offset
;
52 e
= kmalloc(sizeof(*e
), GFP_NOFS
);
57 e
->inum
= key
->objectid
;
58 e
->offset
= key
->offset
+ offset
;
64 static void free_inode_elem_list(struct extent_inode_elem
*eie
)
66 struct extent_inode_elem
*eie_next
;
68 for (; eie
; eie
= eie_next
) {
74 static int find_extent_in_eb(const struct extent_buffer
*eb
,
75 u64 wanted_disk_byte
, u64 extent_item_pos
,
76 struct extent_inode_elem
**eie
,
81 struct btrfs_file_extent_item
*fi
;
88 * from the shared data ref, we only have the leaf but we need
89 * the key. thus, we must look into all items and see that we
90 * find one (some) with a reference to our extent item.
92 nritems
= btrfs_header_nritems(eb
);
93 for (slot
= 0; slot
< nritems
; ++slot
) {
94 btrfs_item_key_to_cpu(eb
, &key
, slot
);
95 if (key
.type
!= BTRFS_EXTENT_DATA_KEY
)
97 fi
= btrfs_item_ptr(eb
, slot
, struct btrfs_file_extent_item
);
98 extent_type
= btrfs_file_extent_type(eb
, fi
);
99 if (extent_type
== BTRFS_FILE_EXTENT_INLINE
)
101 /* don't skip BTRFS_FILE_EXTENT_PREALLOC, we can handle that */
102 disk_byte
= btrfs_file_extent_disk_bytenr(eb
, fi
);
103 if (disk_byte
!= wanted_disk_byte
)
106 ret
= check_extent_in_eb(&key
, eb
, fi
, extent_item_pos
, eie
, ignore_offset
);
119 #define PREFTREE_INIT { .root = RB_ROOT, .count = 0 }
122 struct preftree direct
; /* BTRFS_SHARED_[DATA|BLOCK]_REF_KEY */
123 struct preftree indirect
; /* BTRFS_[TREE_BLOCK|EXTENT_DATA]_REF_KEY */
124 struct preftree indirect_missing_keys
;
128 * Checks for a shared extent during backref search.
130 * The share_count tracks prelim_refs (direct and indirect) having a
132 * - incremented when a ref->count transitions to >0
133 * - decremented when a ref->count transitions to <1
141 static inline int extent_is_shared(struct share_check
*sc
)
143 return (sc
&& sc
->share_count
> 1) ? BACKREF_FOUND_SHARED
: 0;
146 static struct kmem_cache
*btrfs_prelim_ref_cache
;
148 int __init
btrfs_prelim_ref_init(void)
150 btrfs_prelim_ref_cache
= kmem_cache_create("btrfs_prelim_ref",
151 sizeof(struct prelim_ref
),
155 if (!btrfs_prelim_ref_cache
)
160 void __cold
btrfs_prelim_ref_exit(void)
162 kmem_cache_destroy(btrfs_prelim_ref_cache
);
165 static void free_pref(struct prelim_ref
*ref
)
167 kmem_cache_free(btrfs_prelim_ref_cache
, ref
);
171 * Return 0 when both refs are for the same block (and can be merged).
172 * A -1 return indicates ref1 is a 'lower' block than ref2, while 1
173 * indicates a 'higher' block.
175 static int prelim_ref_compare(struct prelim_ref
*ref1
,
176 struct prelim_ref
*ref2
)
178 if (ref1
->level
< ref2
->level
)
180 if (ref1
->level
> ref2
->level
)
182 if (ref1
->root_id
< ref2
->root_id
)
184 if (ref1
->root_id
> ref2
->root_id
)
186 if (ref1
->key_for_search
.type
< ref2
->key_for_search
.type
)
188 if (ref1
->key_for_search
.type
> ref2
->key_for_search
.type
)
190 if (ref1
->key_for_search
.objectid
< ref2
->key_for_search
.objectid
)
192 if (ref1
->key_for_search
.objectid
> ref2
->key_for_search
.objectid
)
194 if (ref1
->key_for_search
.offset
< ref2
->key_for_search
.offset
)
196 if (ref1
->key_for_search
.offset
> ref2
->key_for_search
.offset
)
198 if (ref1
->parent
< ref2
->parent
)
200 if (ref1
->parent
> ref2
->parent
)
206 static void update_share_count(struct share_check
*sc
, int oldcount
,
209 if ((!sc
) || (oldcount
== 0 && newcount
< 1))
212 if (oldcount
> 0 && newcount
< 1)
214 else if (oldcount
< 1 && newcount
> 0)
219 * Add @newref to the @root rbtree, merging identical refs.
221 * Callers should assume that newref has been freed after calling.
223 static void prelim_ref_insert(const struct btrfs_fs_info
*fs_info
,
224 struct preftree
*preftree
,
225 struct prelim_ref
*newref
,
226 struct share_check
*sc
)
228 struct rb_root
*root
;
230 struct rb_node
*parent
= NULL
;
231 struct prelim_ref
*ref
;
234 root
= &preftree
->root
;
239 ref
= rb_entry(parent
, struct prelim_ref
, rbnode
);
240 result
= prelim_ref_compare(ref
, newref
);
243 } else if (result
> 0) {
246 /* Identical refs, merge them and free @newref */
247 struct extent_inode_elem
*eie
= ref
->inode_list
;
249 while (eie
&& eie
->next
)
253 ref
->inode_list
= newref
->inode_list
;
255 eie
->next
= newref
->inode_list
;
256 trace_btrfs_prelim_ref_merge(fs_info
, ref
, newref
,
259 * A delayed ref can have newref->count < 0.
260 * The ref->count is updated to follow any
261 * BTRFS_[ADD|DROP]_DELAYED_REF actions.
263 update_share_count(sc
, ref
->count
,
264 ref
->count
+ newref
->count
);
265 ref
->count
+= newref
->count
;
271 update_share_count(sc
, 0, newref
->count
);
273 trace_btrfs_prelim_ref_insert(fs_info
, newref
, NULL
, preftree
->count
);
274 rb_link_node(&newref
->rbnode
, parent
, p
);
275 rb_insert_color(&newref
->rbnode
, root
);
279 * Release the entire tree. We don't care about internal consistency so
280 * just free everything and then reset the tree root.
282 static void prelim_release(struct preftree
*preftree
)
284 struct prelim_ref
*ref
, *next_ref
;
286 rbtree_postorder_for_each_entry_safe(ref
, next_ref
, &preftree
->root
,
290 preftree
->root
= RB_ROOT
;
295 * the rules for all callers of this function are:
296 * - obtaining the parent is the goal
297 * - if you add a key, you must know that it is a correct key
298 * - if you cannot add the parent or a correct key, then we will look into the
299 * block later to set a correct key
303 * backref type | shared | indirect | shared | indirect
304 * information | tree | tree | data | data
305 * --------------------+--------+----------+--------+----------
306 * parent logical | y | - | - | -
307 * key to resolve | - | y | y | y
308 * tree block logical | - | - | - | -
309 * root for resolving | y | y | y | y
311 * - column 1: we've the parent -> done
312 * - column 2, 3, 4: we use the key to find the parent
314 * on disk refs (inline or keyed)
315 * ==============================
316 * backref type | shared | indirect | shared | indirect
317 * information | tree | tree | data | data
318 * --------------------+--------+----------+--------+----------
319 * parent logical | y | - | y | -
320 * key to resolve | - | - | - | y
321 * tree block logical | y | y | y | y
322 * root for resolving | - | y | y | y
324 * - column 1, 3: we've the parent -> done
325 * - column 2: we take the first key from the block to find the parent
326 * (see add_missing_keys)
327 * - column 4: we use the key to find the parent
329 * additional information that's available but not required to find the parent
330 * block might help in merging entries to gain some speed.
332 static int add_prelim_ref(const struct btrfs_fs_info
*fs_info
,
333 struct preftree
*preftree
, u64 root_id
,
334 const struct btrfs_key
*key
, int level
, u64 parent
,
335 u64 wanted_disk_byte
, int count
,
336 struct share_check
*sc
, gfp_t gfp_mask
)
338 struct prelim_ref
*ref
;
340 if (root_id
== BTRFS_DATA_RELOC_TREE_OBJECTID
)
343 ref
= kmem_cache_alloc(btrfs_prelim_ref_cache
, gfp_mask
);
347 ref
->root_id
= root_id
;
349 ref
->key_for_search
= *key
;
351 * We can often find data backrefs with an offset that is too
352 * large (>= LLONG_MAX, maximum allowed file offset) due to
353 * underflows when subtracting a file's offset with the data
354 * offset of its corresponding extent data item. This can
355 * happen for example in the clone ioctl.
356 * So if we detect such case we set the search key's offset to
357 * zero to make sure we will find the matching file extent item
358 * at add_all_parents(), otherwise we will miss it because the
359 * offset taken form the backref is much larger then the offset
360 * of the file extent item. This can make us scan a very large
361 * number of file extent items, but at least it will not make
363 * This is an ugly workaround for a behaviour that should have
364 * never existed, but it does and a fix for the clone ioctl
365 * would touch a lot of places, cause backwards incompatibility
366 * and would not fix the problem for extents cloned with older
369 if (ref
->key_for_search
.type
== BTRFS_EXTENT_DATA_KEY
&&
370 ref
->key_for_search
.offset
>= LLONG_MAX
)
371 ref
->key_for_search
.offset
= 0;
373 memset(&ref
->key_for_search
, 0, sizeof(ref
->key_for_search
));
376 ref
->inode_list
= NULL
;
379 ref
->parent
= parent
;
380 ref
->wanted_disk_byte
= wanted_disk_byte
;
381 prelim_ref_insert(fs_info
, preftree
, ref
, sc
);
382 return extent_is_shared(sc
);
385 /* direct refs use root == 0, key == NULL */
386 static int add_direct_ref(const struct btrfs_fs_info
*fs_info
,
387 struct preftrees
*preftrees
, int level
, u64 parent
,
388 u64 wanted_disk_byte
, int count
,
389 struct share_check
*sc
, gfp_t gfp_mask
)
391 return add_prelim_ref(fs_info
, &preftrees
->direct
, 0, NULL
, level
,
392 parent
, wanted_disk_byte
, count
, sc
, gfp_mask
);
395 /* indirect refs use parent == 0 */
396 static int add_indirect_ref(const struct btrfs_fs_info
*fs_info
,
397 struct preftrees
*preftrees
, u64 root_id
,
398 const struct btrfs_key
*key
, int level
,
399 u64 wanted_disk_byte
, int count
,
400 struct share_check
*sc
, gfp_t gfp_mask
)
402 struct preftree
*tree
= &preftrees
->indirect
;
405 tree
= &preftrees
->indirect_missing_keys
;
406 return add_prelim_ref(fs_info
, tree
, root_id
, key
, level
, 0,
407 wanted_disk_byte
, count
, sc
, gfp_mask
);
410 static int add_all_parents(struct btrfs_root
*root
, struct btrfs_path
*path
,
411 struct ulist
*parents
, struct prelim_ref
*ref
,
412 int level
, u64 time_seq
, const u64
*extent_item_pos
,
413 u64 total_refs
, bool ignore_offset
)
417 struct extent_buffer
*eb
;
418 struct btrfs_key key
;
419 struct btrfs_key
*key_for_search
= &ref
->key_for_search
;
420 struct btrfs_file_extent_item
*fi
;
421 struct extent_inode_elem
*eie
= NULL
, *old
= NULL
;
423 u64 wanted_disk_byte
= ref
->wanted_disk_byte
;
427 eb
= path
->nodes
[level
];
428 ret
= ulist_add(parents
, eb
->start
, 0, GFP_NOFS
);
435 * We normally enter this function with the path already pointing to
436 * the first item to check. But sometimes, we may enter it with
437 * slot==nritems. In that case, go to the next leaf before we continue.
439 if (path
->slots
[0] >= btrfs_header_nritems(path
->nodes
[0])) {
440 if (time_seq
== SEQ_LAST
)
441 ret
= btrfs_next_leaf(root
, path
);
443 ret
= btrfs_next_old_leaf(root
, path
, time_seq
);
446 while (!ret
&& count
< total_refs
) {
448 slot
= path
->slots
[0];
450 btrfs_item_key_to_cpu(eb
, &key
, slot
);
452 if (key
.objectid
!= key_for_search
->objectid
||
453 key
.type
!= BTRFS_EXTENT_DATA_KEY
)
456 fi
= btrfs_item_ptr(eb
, slot
, struct btrfs_file_extent_item
);
457 disk_byte
= btrfs_file_extent_disk_bytenr(eb
, fi
);
459 if (disk_byte
== wanted_disk_byte
) {
463 if (extent_item_pos
) {
464 ret
= check_extent_in_eb(&key
, eb
, fi
,
466 &eie
, ignore_offset
);
472 ret
= ulist_add_merge_ptr(parents
, eb
->start
,
473 eie
, (void **)&old
, GFP_NOFS
);
476 if (!ret
&& extent_item_pos
) {
484 if (time_seq
== SEQ_LAST
)
485 ret
= btrfs_next_item(root
, path
);
487 ret
= btrfs_next_old_item(root
, path
, time_seq
);
493 free_inode_elem_list(eie
);
498 * resolve an indirect backref in the form (root_id, key, level)
499 * to a logical address
501 static int resolve_indirect_ref(struct btrfs_fs_info
*fs_info
,
502 struct btrfs_path
*path
, u64 time_seq
,
503 struct prelim_ref
*ref
, struct ulist
*parents
,
504 const u64
*extent_item_pos
, u64 total_refs
,
507 struct btrfs_root
*root
;
508 struct btrfs_key root_key
;
509 struct extent_buffer
*eb
;
512 int level
= ref
->level
;
515 root_key
.objectid
= ref
->root_id
;
516 root_key
.type
= BTRFS_ROOT_ITEM_KEY
;
517 root_key
.offset
= (u64
)-1;
519 index
= srcu_read_lock(&fs_info
->subvol_srcu
);
521 root
= btrfs_get_fs_root(fs_info
, &root_key
, false);
523 srcu_read_unlock(&fs_info
->subvol_srcu
, index
);
528 if (btrfs_is_testing(fs_info
)) {
529 srcu_read_unlock(&fs_info
->subvol_srcu
, index
);
534 if (path
->search_commit_root
)
535 root_level
= btrfs_header_level(root
->commit_root
);
536 else if (time_seq
== SEQ_LAST
)
537 root_level
= btrfs_header_level(root
->node
);
539 root_level
= btrfs_old_root_level(root
, time_seq
);
541 if (root_level
+ 1 == level
) {
542 srcu_read_unlock(&fs_info
->subvol_srcu
, index
);
546 path
->lowest_level
= level
;
547 if (time_seq
== SEQ_LAST
)
548 ret
= btrfs_search_slot(NULL
, root
, &ref
->key_for_search
, path
,
551 ret
= btrfs_search_old_slot(root
, &ref
->key_for_search
, path
,
554 /* root node has been locked, we can release @subvol_srcu safely here */
555 srcu_read_unlock(&fs_info
->subvol_srcu
, index
);
558 "search slot in root %llu (level %d, ref count %d) returned %d for key (%llu %u %llu)",
559 ref
->root_id
, level
, ref
->count
, ret
,
560 ref
->key_for_search
.objectid
, ref
->key_for_search
.type
,
561 ref
->key_for_search
.offset
);
565 eb
= path
->nodes
[level
];
567 if (WARN_ON(!level
)) {
572 eb
= path
->nodes
[level
];
575 ret
= add_all_parents(root
, path
, parents
, ref
, level
, time_seq
,
576 extent_item_pos
, total_refs
, ignore_offset
);
578 path
->lowest_level
= 0;
579 btrfs_release_path(path
);
583 static struct extent_inode_elem
*
584 unode_aux_to_inode_list(struct ulist_node
*node
)
588 return (struct extent_inode_elem
*)(uintptr_t)node
->aux
;
592 * We maintain three seperate rbtrees: one for direct refs, one for
593 * indirect refs which have a key, and one for indirect refs which do not
594 * have a key. Each tree does merge on insertion.
596 * Once all of the references are located, we iterate over the tree of
597 * indirect refs with missing keys. An appropriate key is located and
598 * the ref is moved onto the tree for indirect refs. After all missing
599 * keys are thus located, we iterate over the indirect ref tree, resolve
600 * each reference, and then insert the resolved reference onto the
601 * direct tree (merging there too).
603 * New backrefs (i.e., for parent nodes) are added to the appropriate
604 * rbtree as they are encountered. The new backrefs are subsequently
607 static int resolve_indirect_refs(struct btrfs_fs_info
*fs_info
,
608 struct btrfs_path
*path
, u64 time_seq
,
609 struct preftrees
*preftrees
,
610 const u64
*extent_item_pos
, u64 total_refs
,
611 struct share_check
*sc
, bool ignore_offset
)
615 struct ulist
*parents
;
616 struct ulist_node
*node
;
617 struct ulist_iterator uiter
;
618 struct rb_node
*rnode
;
620 parents
= ulist_alloc(GFP_NOFS
);
625 * We could trade memory usage for performance here by iterating
626 * the tree, allocating new refs for each insertion, and then
627 * freeing the entire indirect tree when we're done. In some test
628 * cases, the tree can grow quite large (~200k objects).
630 while ((rnode
= rb_first(&preftrees
->indirect
.root
))) {
631 struct prelim_ref
*ref
;
633 ref
= rb_entry(rnode
, struct prelim_ref
, rbnode
);
634 if (WARN(ref
->parent
,
635 "BUG: direct ref found in indirect tree")) {
640 rb_erase(&ref
->rbnode
, &preftrees
->indirect
.root
);
641 preftrees
->indirect
.count
--;
643 if (ref
->count
== 0) {
648 if (sc
&& sc
->root_objectid
&&
649 ref
->root_id
!= sc
->root_objectid
) {
651 ret
= BACKREF_FOUND_SHARED
;
654 err
= resolve_indirect_ref(fs_info
, path
, time_seq
, ref
,
655 parents
, extent_item_pos
,
656 total_refs
, ignore_offset
);
658 * we can only tolerate ENOENT,otherwise,we should catch error
659 * and return directly.
661 if (err
== -ENOENT
) {
662 prelim_ref_insert(fs_info
, &preftrees
->direct
, ref
,
671 /* we put the first parent into the ref at hand */
672 ULIST_ITER_INIT(&uiter
);
673 node
= ulist_next(parents
, &uiter
);
674 ref
->parent
= node
? node
->val
: 0;
675 ref
->inode_list
= unode_aux_to_inode_list(node
);
677 /* Add a prelim_ref(s) for any other parent(s). */
678 while ((node
= ulist_next(parents
, &uiter
))) {
679 struct prelim_ref
*new_ref
;
681 new_ref
= kmem_cache_alloc(btrfs_prelim_ref_cache
,
688 memcpy(new_ref
, ref
, sizeof(*ref
));
689 new_ref
->parent
= node
->val
;
690 new_ref
->inode_list
= unode_aux_to_inode_list(node
);
691 prelim_ref_insert(fs_info
, &preftrees
->direct
,
696 * Now it's a direct ref, put it in the the direct tree. We must
697 * do this last because the ref could be merged/freed here.
699 prelim_ref_insert(fs_info
, &preftrees
->direct
, ref
, NULL
);
701 ulist_reinit(parents
);
710 * read tree blocks and add keys where required.
712 static int add_missing_keys(struct btrfs_fs_info
*fs_info
,
713 struct preftrees
*preftrees
)
715 struct prelim_ref
*ref
;
716 struct extent_buffer
*eb
;
717 struct preftree
*tree
= &preftrees
->indirect_missing_keys
;
718 struct rb_node
*node
;
720 while ((node
= rb_first(&tree
->root
))) {
721 ref
= rb_entry(node
, struct prelim_ref
, rbnode
);
722 rb_erase(node
, &tree
->root
);
724 BUG_ON(ref
->parent
); /* should not be a direct ref */
725 BUG_ON(ref
->key_for_search
.type
);
726 BUG_ON(!ref
->wanted_disk_byte
);
728 eb
= read_tree_block(fs_info
, ref
->wanted_disk_byte
, 0,
729 ref
->level
- 1, NULL
);
733 } else if (!extent_buffer_uptodate(eb
)) {
735 free_extent_buffer(eb
);
738 btrfs_tree_read_lock(eb
);
739 if (btrfs_header_level(eb
) == 0)
740 btrfs_item_key_to_cpu(eb
, &ref
->key_for_search
, 0);
742 btrfs_node_key_to_cpu(eb
, &ref
->key_for_search
, 0);
743 btrfs_tree_read_unlock(eb
);
744 free_extent_buffer(eb
);
745 prelim_ref_insert(fs_info
, &preftrees
->indirect
, ref
, NULL
);
752 * add all currently queued delayed refs from this head whose seq nr is
753 * smaller or equal that seq to the list
755 static int add_delayed_refs(const struct btrfs_fs_info
*fs_info
,
756 struct btrfs_delayed_ref_head
*head
, u64 seq
,
757 struct preftrees
*preftrees
, u64
*total_refs
,
758 struct share_check
*sc
)
760 struct btrfs_delayed_ref_node
*node
;
761 struct btrfs_delayed_extent_op
*extent_op
= head
->extent_op
;
762 struct btrfs_key key
;
763 struct btrfs_key tmp_op_key
;
768 if (extent_op
&& extent_op
->update_key
)
769 btrfs_disk_key_to_cpu(&tmp_op_key
, &extent_op
->key
);
771 spin_lock(&head
->lock
);
772 for (n
= rb_first(&head
->ref_tree
); n
; n
= rb_next(n
)) {
773 node
= rb_entry(n
, struct btrfs_delayed_ref_node
,
778 switch (node
->action
) {
779 case BTRFS_ADD_DELAYED_EXTENT
:
780 case BTRFS_UPDATE_DELAYED_HEAD
:
783 case BTRFS_ADD_DELAYED_REF
:
784 count
= node
->ref_mod
;
786 case BTRFS_DROP_DELAYED_REF
:
787 count
= node
->ref_mod
* -1;
792 *total_refs
+= count
;
793 switch (node
->type
) {
794 case BTRFS_TREE_BLOCK_REF_KEY
: {
795 /* NORMAL INDIRECT METADATA backref */
796 struct btrfs_delayed_tree_ref
*ref
;
798 ref
= btrfs_delayed_node_to_tree_ref(node
);
799 ret
= add_indirect_ref(fs_info
, preftrees
, ref
->root
,
800 &tmp_op_key
, ref
->level
+ 1,
801 node
->bytenr
, count
, sc
,
805 case BTRFS_SHARED_BLOCK_REF_KEY
: {
806 /* SHARED DIRECT METADATA backref */
807 struct btrfs_delayed_tree_ref
*ref
;
809 ref
= btrfs_delayed_node_to_tree_ref(node
);
811 ret
= add_direct_ref(fs_info
, preftrees
, ref
->level
+ 1,
812 ref
->parent
, node
->bytenr
, count
,
816 case BTRFS_EXTENT_DATA_REF_KEY
: {
817 /* NORMAL INDIRECT DATA backref */
818 struct btrfs_delayed_data_ref
*ref
;
819 ref
= btrfs_delayed_node_to_data_ref(node
);
821 key
.objectid
= ref
->objectid
;
822 key
.type
= BTRFS_EXTENT_DATA_KEY
;
823 key
.offset
= ref
->offset
;
826 * Found a inum that doesn't match our known inum, we
829 if (sc
&& sc
->inum
&& ref
->objectid
!= sc
->inum
) {
830 ret
= BACKREF_FOUND_SHARED
;
834 ret
= add_indirect_ref(fs_info
, preftrees
, ref
->root
,
835 &key
, 0, node
->bytenr
, count
, sc
,
839 case BTRFS_SHARED_DATA_REF_KEY
: {
840 /* SHARED DIRECT FULL backref */
841 struct btrfs_delayed_data_ref
*ref
;
843 ref
= btrfs_delayed_node_to_data_ref(node
);
845 ret
= add_direct_ref(fs_info
, preftrees
, 0, ref
->parent
,
846 node
->bytenr
, count
, sc
,
854 * We must ignore BACKREF_FOUND_SHARED until all delayed
855 * refs have been checked.
857 if (ret
&& (ret
!= BACKREF_FOUND_SHARED
))
861 ret
= extent_is_shared(sc
);
863 spin_unlock(&head
->lock
);
868 * add all inline backrefs for bytenr to the list
870 * Returns 0 on success, <0 on error, or BACKREF_FOUND_SHARED.
872 static int add_inline_refs(const struct btrfs_fs_info
*fs_info
,
873 struct btrfs_path
*path
, u64 bytenr
,
874 int *info_level
, struct preftrees
*preftrees
,
875 u64
*total_refs
, struct share_check
*sc
)
879 struct extent_buffer
*leaf
;
880 struct btrfs_key key
;
881 struct btrfs_key found_key
;
884 struct btrfs_extent_item
*ei
;
889 * enumerate all inline refs
891 leaf
= path
->nodes
[0];
892 slot
= path
->slots
[0];
894 item_size
= btrfs_item_size_nr(leaf
, slot
);
895 BUG_ON(item_size
< sizeof(*ei
));
897 ei
= btrfs_item_ptr(leaf
, slot
, struct btrfs_extent_item
);
898 flags
= btrfs_extent_flags(leaf
, ei
);
899 *total_refs
+= btrfs_extent_refs(leaf
, ei
);
900 btrfs_item_key_to_cpu(leaf
, &found_key
, slot
);
902 ptr
= (unsigned long)(ei
+ 1);
903 end
= (unsigned long)ei
+ item_size
;
905 if (found_key
.type
== BTRFS_EXTENT_ITEM_KEY
&&
906 flags
& BTRFS_EXTENT_FLAG_TREE_BLOCK
) {
907 struct btrfs_tree_block_info
*info
;
909 info
= (struct btrfs_tree_block_info
*)ptr
;
910 *info_level
= btrfs_tree_block_level(leaf
, info
);
911 ptr
+= sizeof(struct btrfs_tree_block_info
);
913 } else if (found_key
.type
== BTRFS_METADATA_ITEM_KEY
) {
914 *info_level
= found_key
.offset
;
916 BUG_ON(!(flags
& BTRFS_EXTENT_FLAG_DATA
));
920 struct btrfs_extent_inline_ref
*iref
;
924 iref
= (struct btrfs_extent_inline_ref
*)ptr
;
925 type
= btrfs_get_extent_inline_ref_type(leaf
, iref
,
927 if (type
== BTRFS_REF_TYPE_INVALID
)
930 offset
= btrfs_extent_inline_ref_offset(leaf
, iref
);
933 case BTRFS_SHARED_BLOCK_REF_KEY
:
934 ret
= add_direct_ref(fs_info
, preftrees
,
935 *info_level
+ 1, offset
,
936 bytenr
, 1, NULL
, GFP_NOFS
);
938 case BTRFS_SHARED_DATA_REF_KEY
: {
939 struct btrfs_shared_data_ref
*sdref
;
942 sdref
= (struct btrfs_shared_data_ref
*)(iref
+ 1);
943 count
= btrfs_shared_data_ref_count(leaf
, sdref
);
945 ret
= add_direct_ref(fs_info
, preftrees
, 0, offset
,
946 bytenr
, count
, sc
, GFP_NOFS
);
949 case BTRFS_TREE_BLOCK_REF_KEY
:
950 ret
= add_indirect_ref(fs_info
, preftrees
, offset
,
951 NULL
, *info_level
+ 1,
952 bytenr
, 1, NULL
, GFP_NOFS
);
954 case BTRFS_EXTENT_DATA_REF_KEY
: {
955 struct btrfs_extent_data_ref
*dref
;
959 dref
= (struct btrfs_extent_data_ref
*)(&iref
->offset
);
960 count
= btrfs_extent_data_ref_count(leaf
, dref
);
961 key
.objectid
= btrfs_extent_data_ref_objectid(leaf
,
963 key
.type
= BTRFS_EXTENT_DATA_KEY
;
964 key
.offset
= btrfs_extent_data_ref_offset(leaf
, dref
);
966 if (sc
&& sc
->inum
&& key
.objectid
!= sc
->inum
) {
967 ret
= BACKREF_FOUND_SHARED
;
971 root
= btrfs_extent_data_ref_root(leaf
, dref
);
973 ret
= add_indirect_ref(fs_info
, preftrees
, root
,
974 &key
, 0, bytenr
, count
,
983 ptr
+= btrfs_extent_inline_ref_size(type
);
990 * add all non-inline backrefs for bytenr to the list
992 * Returns 0 on success, <0 on error, or BACKREF_FOUND_SHARED.
994 static int add_keyed_refs(struct btrfs_fs_info
*fs_info
,
995 struct btrfs_path
*path
, u64 bytenr
,
996 int info_level
, struct preftrees
*preftrees
,
997 struct share_check
*sc
)
999 struct btrfs_root
*extent_root
= fs_info
->extent_root
;
1002 struct extent_buffer
*leaf
;
1003 struct btrfs_key key
;
1006 ret
= btrfs_next_item(extent_root
, path
);
1014 slot
= path
->slots
[0];
1015 leaf
= path
->nodes
[0];
1016 btrfs_item_key_to_cpu(leaf
, &key
, slot
);
1018 if (key
.objectid
!= bytenr
)
1020 if (key
.type
< BTRFS_TREE_BLOCK_REF_KEY
)
1022 if (key
.type
> BTRFS_SHARED_DATA_REF_KEY
)
1026 case BTRFS_SHARED_BLOCK_REF_KEY
:
1027 /* SHARED DIRECT METADATA backref */
1028 ret
= add_direct_ref(fs_info
, preftrees
,
1029 info_level
+ 1, key
.offset
,
1030 bytenr
, 1, NULL
, GFP_NOFS
);
1032 case BTRFS_SHARED_DATA_REF_KEY
: {
1033 /* SHARED DIRECT FULL backref */
1034 struct btrfs_shared_data_ref
*sdref
;
1037 sdref
= btrfs_item_ptr(leaf
, slot
,
1038 struct btrfs_shared_data_ref
);
1039 count
= btrfs_shared_data_ref_count(leaf
, sdref
);
1040 ret
= add_direct_ref(fs_info
, preftrees
, 0,
1041 key
.offset
, bytenr
, count
,
1045 case BTRFS_TREE_BLOCK_REF_KEY
:
1046 /* NORMAL INDIRECT METADATA backref */
1047 ret
= add_indirect_ref(fs_info
, preftrees
, key
.offset
,
1048 NULL
, info_level
+ 1, bytenr
,
1051 case BTRFS_EXTENT_DATA_REF_KEY
: {
1052 /* NORMAL INDIRECT DATA backref */
1053 struct btrfs_extent_data_ref
*dref
;
1057 dref
= btrfs_item_ptr(leaf
, slot
,
1058 struct btrfs_extent_data_ref
);
1059 count
= btrfs_extent_data_ref_count(leaf
, dref
);
1060 key
.objectid
= btrfs_extent_data_ref_objectid(leaf
,
1062 key
.type
= BTRFS_EXTENT_DATA_KEY
;
1063 key
.offset
= btrfs_extent_data_ref_offset(leaf
, dref
);
1065 if (sc
&& sc
->inum
&& key
.objectid
!= sc
->inum
) {
1066 ret
= BACKREF_FOUND_SHARED
;
1070 root
= btrfs_extent_data_ref_root(leaf
, dref
);
1071 ret
= add_indirect_ref(fs_info
, preftrees
, root
,
1072 &key
, 0, bytenr
, count
,
1088 * this adds all existing backrefs (inline backrefs, backrefs and delayed
1089 * refs) for the given bytenr to the refs list, merges duplicates and resolves
1090 * indirect refs to their parent bytenr.
1091 * When roots are found, they're added to the roots list
1093 * If time_seq is set to SEQ_LAST, it will not search delayed_refs, and behave
1094 * much like trans == NULL case, the difference only lies in it will not
1096 * The special case is for qgroup to search roots in commit_transaction().
1098 * @sc - if !NULL, then immediately return BACKREF_FOUND_SHARED when a
1099 * shared extent is detected.
1101 * Otherwise this returns 0 for success and <0 for an error.
1103 * If ignore_offset is set to false, only extent refs whose offsets match
1104 * extent_item_pos are returned. If true, every extent ref is returned
1105 * and extent_item_pos is ignored.
1107 * FIXME some caching might speed things up
1109 static int find_parent_nodes(struct btrfs_trans_handle
*trans
,
1110 struct btrfs_fs_info
*fs_info
, u64 bytenr
,
1111 u64 time_seq
, struct ulist
*refs
,
1112 struct ulist
*roots
, const u64
*extent_item_pos
,
1113 struct share_check
*sc
, bool ignore_offset
)
1115 struct btrfs_key key
;
1116 struct btrfs_path
*path
;
1117 struct btrfs_delayed_ref_root
*delayed_refs
= NULL
;
1118 struct btrfs_delayed_ref_head
*head
;
1121 struct prelim_ref
*ref
;
1122 struct rb_node
*node
;
1123 struct extent_inode_elem
*eie
= NULL
;
1124 /* total of both direct AND indirect refs! */
1126 struct preftrees preftrees
= {
1127 .direct
= PREFTREE_INIT
,
1128 .indirect
= PREFTREE_INIT
,
1129 .indirect_missing_keys
= PREFTREE_INIT
1132 key
.objectid
= bytenr
;
1133 key
.offset
= (u64
)-1;
1134 if (btrfs_fs_incompat(fs_info
, SKINNY_METADATA
))
1135 key
.type
= BTRFS_METADATA_ITEM_KEY
;
1137 key
.type
= BTRFS_EXTENT_ITEM_KEY
;
1139 path
= btrfs_alloc_path();
1143 path
->search_commit_root
= 1;
1144 path
->skip_locking
= 1;
1147 if (time_seq
== SEQ_LAST
)
1148 path
->skip_locking
= 1;
1151 * grab both a lock on the path and a lock on the delayed ref head.
1152 * We need both to get a consistent picture of how the refs look
1153 * at a specified point in time
1158 ret
= btrfs_search_slot(trans
, fs_info
->extent_root
, &key
, path
, 0, 0);
1163 #ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS
1164 if (trans
&& likely(trans
->type
!= __TRANS_DUMMY
) &&
1165 time_seq
!= SEQ_LAST
) {
1167 if (trans
&& time_seq
!= SEQ_LAST
) {
1170 * look if there are updates for this ref queued and lock the
1173 delayed_refs
= &trans
->transaction
->delayed_refs
;
1174 spin_lock(&delayed_refs
->lock
);
1175 head
= btrfs_find_delayed_ref_head(delayed_refs
, bytenr
);
1177 if (!mutex_trylock(&head
->mutex
)) {
1178 refcount_inc(&head
->refs
);
1179 spin_unlock(&delayed_refs
->lock
);
1181 btrfs_release_path(path
);
1184 * Mutex was contended, block until it's
1185 * released and try again
1187 mutex_lock(&head
->mutex
);
1188 mutex_unlock(&head
->mutex
);
1189 btrfs_put_delayed_ref_head(head
);
1192 spin_unlock(&delayed_refs
->lock
);
1193 ret
= add_delayed_refs(fs_info
, head
, time_seq
,
1194 &preftrees
, &total_refs
, sc
);
1195 mutex_unlock(&head
->mutex
);
1199 spin_unlock(&delayed_refs
->lock
);
1203 if (path
->slots
[0]) {
1204 struct extent_buffer
*leaf
;
1208 leaf
= path
->nodes
[0];
1209 slot
= path
->slots
[0];
1210 btrfs_item_key_to_cpu(leaf
, &key
, slot
);
1211 if (key
.objectid
== bytenr
&&
1212 (key
.type
== BTRFS_EXTENT_ITEM_KEY
||
1213 key
.type
== BTRFS_METADATA_ITEM_KEY
)) {
1214 ret
= add_inline_refs(fs_info
, path
, bytenr
,
1215 &info_level
, &preftrees
,
1219 ret
= add_keyed_refs(fs_info
, path
, bytenr
, info_level
,
1226 btrfs_release_path(path
);
1228 ret
= add_missing_keys(fs_info
, &preftrees
);
1232 WARN_ON(!RB_EMPTY_ROOT(&preftrees
.indirect_missing_keys
.root
));
1234 ret
= resolve_indirect_refs(fs_info
, path
, time_seq
, &preftrees
,
1235 extent_item_pos
, total_refs
, sc
, ignore_offset
);
1239 WARN_ON(!RB_EMPTY_ROOT(&preftrees
.indirect
.root
));
1242 * This walks the tree of merged and resolved refs. Tree blocks are
1243 * read in as needed. Unique entries are added to the ulist, and
1244 * the list of found roots is updated.
1246 * We release the entire tree in one go before returning.
1248 node
= rb_first(&preftrees
.direct
.root
);
1250 ref
= rb_entry(node
, struct prelim_ref
, rbnode
);
1251 node
= rb_next(&ref
->rbnode
);
1253 * ref->count < 0 can happen here if there are delayed
1254 * refs with a node->action of BTRFS_DROP_DELAYED_REF.
1255 * prelim_ref_insert() relies on this when merging
1256 * identical refs to keep the overall count correct.
1257 * prelim_ref_insert() will merge only those refs
1258 * which compare identically. Any refs having
1259 * e.g. different offsets would not be merged,
1260 * and would retain their original ref->count < 0.
1262 if (roots
&& ref
->count
&& ref
->root_id
&& ref
->parent
== 0) {
1263 if (sc
&& sc
->root_objectid
&&
1264 ref
->root_id
!= sc
->root_objectid
) {
1265 ret
= BACKREF_FOUND_SHARED
;
1269 /* no parent == root of tree */
1270 ret
= ulist_add(roots
, ref
->root_id
, 0, GFP_NOFS
);
1274 if (ref
->count
&& ref
->parent
) {
1275 if (extent_item_pos
&& !ref
->inode_list
&&
1277 struct extent_buffer
*eb
;
1279 eb
= read_tree_block(fs_info
, ref
->parent
, 0,
1284 } else if (!extent_buffer_uptodate(eb
)) {
1285 free_extent_buffer(eb
);
1289 btrfs_tree_read_lock(eb
);
1290 btrfs_set_lock_blocking_rw(eb
, BTRFS_READ_LOCK
);
1291 ret
= find_extent_in_eb(eb
, bytenr
,
1292 *extent_item_pos
, &eie
, ignore_offset
);
1293 btrfs_tree_read_unlock_blocking(eb
);
1294 free_extent_buffer(eb
);
1297 ref
->inode_list
= eie
;
1299 ret
= ulist_add_merge_ptr(refs
, ref
->parent
,
1301 (void **)&eie
, GFP_NOFS
);
1304 if (!ret
&& extent_item_pos
) {
1306 * we've recorded that parent, so we must extend
1307 * its inode list here
1312 eie
->next
= ref
->inode_list
;
1320 btrfs_free_path(path
);
1322 prelim_release(&preftrees
.direct
);
1323 prelim_release(&preftrees
.indirect
);
1324 prelim_release(&preftrees
.indirect_missing_keys
);
1327 free_inode_elem_list(eie
);
1331 static void free_leaf_list(struct ulist
*blocks
)
1333 struct ulist_node
*node
= NULL
;
1334 struct extent_inode_elem
*eie
;
1335 struct ulist_iterator uiter
;
1337 ULIST_ITER_INIT(&uiter
);
1338 while ((node
= ulist_next(blocks
, &uiter
))) {
1341 eie
= unode_aux_to_inode_list(node
);
1342 free_inode_elem_list(eie
);
1350 * Finds all leafs with a reference to the specified combination of bytenr and
1351 * offset. key_list_head will point to a list of corresponding keys (caller must
1352 * free each list element). The leafs will be stored in the leafs ulist, which
1353 * must be freed with ulist_free.
1355 * returns 0 on success, <0 on error
1357 static int btrfs_find_all_leafs(struct btrfs_trans_handle
*trans
,
1358 struct btrfs_fs_info
*fs_info
, u64 bytenr
,
1359 u64 time_seq
, struct ulist
**leafs
,
1360 const u64
*extent_item_pos
, bool ignore_offset
)
1364 *leafs
= ulist_alloc(GFP_NOFS
);
1368 ret
= find_parent_nodes(trans
, fs_info
, bytenr
, time_seq
,
1369 *leafs
, NULL
, extent_item_pos
, NULL
, ignore_offset
);
1370 if (ret
< 0 && ret
!= -ENOENT
) {
1371 free_leaf_list(*leafs
);
1379 * walk all backrefs for a given extent to find all roots that reference this
1380 * extent. Walking a backref means finding all extents that reference this
1381 * extent and in turn walk the backrefs of those, too. Naturally this is a
1382 * recursive process, but here it is implemented in an iterative fashion: We
1383 * find all referencing extents for the extent in question and put them on a
1384 * list. In turn, we find all referencing extents for those, further appending
1385 * to the list. The way we iterate the list allows adding more elements after
1386 * the current while iterating. The process stops when we reach the end of the
1387 * list. Found roots are added to the roots list.
1389 * returns 0 on success, < 0 on error.
1391 static int btrfs_find_all_roots_safe(struct btrfs_trans_handle
*trans
,
1392 struct btrfs_fs_info
*fs_info
, u64 bytenr
,
1393 u64 time_seq
, struct ulist
**roots
,
1397 struct ulist_node
*node
= NULL
;
1398 struct ulist_iterator uiter
;
1401 tmp
= ulist_alloc(GFP_NOFS
);
1404 *roots
= ulist_alloc(GFP_NOFS
);
1410 ULIST_ITER_INIT(&uiter
);
1412 ret
= find_parent_nodes(trans
, fs_info
, bytenr
, time_seq
,
1413 tmp
, *roots
, NULL
, NULL
, ignore_offset
);
1414 if (ret
< 0 && ret
!= -ENOENT
) {
1419 node
= ulist_next(tmp
, &uiter
);
1430 int btrfs_find_all_roots(struct btrfs_trans_handle
*trans
,
1431 struct btrfs_fs_info
*fs_info
, u64 bytenr
,
1432 u64 time_seq
, struct ulist
**roots
,
1438 down_read(&fs_info
->commit_root_sem
);
1439 ret
= btrfs_find_all_roots_safe(trans
, fs_info
, bytenr
,
1440 time_seq
, roots
, ignore_offset
);
1442 up_read(&fs_info
->commit_root_sem
);
1447 * btrfs_check_shared - tell us whether an extent is shared
1449 * btrfs_check_shared uses the backref walking code but will short
1450 * circuit as soon as it finds a root or inode that doesn't match the
1451 * one passed in. This provides a significant performance benefit for
1452 * callers (such as fiemap) which want to know whether the extent is
1453 * shared but do not need a ref count.
1455 * This attempts to allocate a transaction in order to account for
1456 * delayed refs, but continues on even when the alloc fails.
1458 * Return: 0 if extent is not shared, 1 if it is shared, < 0 on error.
1460 int btrfs_check_shared(struct btrfs_root
*root
, u64 inum
, u64 bytenr
)
1462 struct btrfs_fs_info
*fs_info
= root
->fs_info
;
1463 struct btrfs_trans_handle
*trans
;
1464 struct ulist
*tmp
= NULL
;
1465 struct ulist
*roots
= NULL
;
1466 struct ulist_iterator uiter
;
1467 struct ulist_node
*node
;
1468 struct seq_list elem
= SEQ_LIST_INIT(elem
);
1470 struct share_check shared
= {
1471 .root_objectid
= root
->objectid
,
1476 tmp
= ulist_alloc(GFP_NOFS
);
1477 roots
= ulist_alloc(GFP_NOFS
);
1478 if (!tmp
|| !roots
) {
1484 trans
= btrfs_join_transaction(root
);
1485 if (IS_ERR(trans
)) {
1487 down_read(&fs_info
->commit_root_sem
);
1489 btrfs_get_tree_mod_seq(fs_info
, &elem
);
1492 ULIST_ITER_INIT(&uiter
);
1494 ret
= find_parent_nodes(trans
, fs_info
, bytenr
, elem
.seq
, tmp
,
1495 roots
, NULL
, &shared
, false);
1496 if (ret
== BACKREF_FOUND_SHARED
) {
1497 /* this is the only condition under which we return 1 */
1501 if (ret
< 0 && ret
!= -ENOENT
)
1504 node
= ulist_next(tmp
, &uiter
);
1508 shared
.share_count
= 0;
1513 btrfs_put_tree_mod_seq(fs_info
, &elem
);
1514 btrfs_end_transaction(trans
);
1516 up_read(&fs_info
->commit_root_sem
);
1523 int btrfs_find_one_extref(struct btrfs_root
*root
, u64 inode_objectid
,
1524 u64 start_off
, struct btrfs_path
*path
,
1525 struct btrfs_inode_extref
**ret_extref
,
1529 struct btrfs_key key
;
1530 struct btrfs_key found_key
;
1531 struct btrfs_inode_extref
*extref
;
1532 const struct extent_buffer
*leaf
;
1535 key
.objectid
= inode_objectid
;
1536 key
.type
= BTRFS_INODE_EXTREF_KEY
;
1537 key
.offset
= start_off
;
1539 ret
= btrfs_search_slot(NULL
, root
, &key
, path
, 0, 0);
1544 leaf
= path
->nodes
[0];
1545 slot
= path
->slots
[0];
1546 if (slot
>= btrfs_header_nritems(leaf
)) {
1548 * If the item at offset is not found,
1549 * btrfs_search_slot will point us to the slot
1550 * where it should be inserted. In our case
1551 * that will be the slot directly before the
1552 * next INODE_REF_KEY_V2 item. In the case
1553 * that we're pointing to the last slot in a
1554 * leaf, we must move one leaf over.
1556 ret
= btrfs_next_leaf(root
, path
);
1565 btrfs_item_key_to_cpu(leaf
, &found_key
, slot
);
1568 * Check that we're still looking at an extended ref key for
1569 * this particular objectid. If we have different
1570 * objectid or type then there are no more to be found
1571 * in the tree and we can exit.
1574 if (found_key
.objectid
!= inode_objectid
)
1576 if (found_key
.type
!= BTRFS_INODE_EXTREF_KEY
)
1580 ptr
= btrfs_item_ptr_offset(leaf
, path
->slots
[0]);
1581 extref
= (struct btrfs_inode_extref
*)ptr
;
1582 *ret_extref
= extref
;
1584 *found_off
= found_key
.offset
;
1592 * this iterates to turn a name (from iref/extref) into a full filesystem path.
1593 * Elements of the path are separated by '/' and the path is guaranteed to be
1594 * 0-terminated. the path is only given within the current file system.
1595 * Therefore, it never starts with a '/'. the caller is responsible to provide
1596 * "size" bytes in "dest". the dest buffer will be filled backwards. finally,
1597 * the start point of the resulting string is returned. this pointer is within
1599 * in case the path buffer would overflow, the pointer is decremented further
1600 * as if output was written to the buffer, though no more output is actually
1601 * generated. that way, the caller can determine how much space would be
1602 * required for the path to fit into the buffer. in that case, the returned
1603 * value will be smaller than dest. callers must check this!
1605 char *btrfs_ref_to_path(struct btrfs_root
*fs_root
, struct btrfs_path
*path
,
1606 u32 name_len
, unsigned long name_off
,
1607 struct extent_buffer
*eb_in
, u64 parent
,
1608 char *dest
, u32 size
)
1613 s64 bytes_left
= ((s64
)size
) - 1;
1614 struct extent_buffer
*eb
= eb_in
;
1615 struct btrfs_key found_key
;
1616 int leave_spinning
= path
->leave_spinning
;
1617 struct btrfs_inode_ref
*iref
;
1619 if (bytes_left
>= 0)
1620 dest
[bytes_left
] = '\0';
1622 path
->leave_spinning
= 1;
1624 bytes_left
-= name_len
;
1625 if (bytes_left
>= 0)
1626 read_extent_buffer(eb
, dest
+ bytes_left
,
1627 name_off
, name_len
);
1629 if (!path
->skip_locking
)
1630 btrfs_tree_read_unlock_blocking(eb
);
1631 free_extent_buffer(eb
);
1633 ret
= btrfs_find_item(fs_root
, path
, parent
, 0,
1634 BTRFS_INODE_REF_KEY
, &found_key
);
1640 next_inum
= found_key
.offset
;
1642 /* regular exit ahead */
1643 if (parent
== next_inum
)
1646 slot
= path
->slots
[0];
1647 eb
= path
->nodes
[0];
1648 /* make sure we can use eb after releasing the path */
1650 if (!path
->skip_locking
)
1651 btrfs_set_lock_blocking_rw(eb
, BTRFS_READ_LOCK
);
1652 path
->nodes
[0] = NULL
;
1655 btrfs_release_path(path
);
1656 iref
= btrfs_item_ptr(eb
, slot
, struct btrfs_inode_ref
);
1658 name_len
= btrfs_inode_ref_name_len(eb
, iref
);
1659 name_off
= (unsigned long)(iref
+ 1);
1663 if (bytes_left
>= 0)
1664 dest
[bytes_left
] = '/';
1667 btrfs_release_path(path
);
1668 path
->leave_spinning
= leave_spinning
;
1671 return ERR_PTR(ret
);
1673 return dest
+ bytes_left
;
1677 * this makes the path point to (logical EXTENT_ITEM *)
1678 * returns BTRFS_EXTENT_FLAG_DATA for data, BTRFS_EXTENT_FLAG_TREE_BLOCK for
1679 * tree blocks and <0 on error.
1681 int extent_from_logical(struct btrfs_fs_info
*fs_info
, u64 logical
,
1682 struct btrfs_path
*path
, struct btrfs_key
*found_key
,
1689 const struct extent_buffer
*eb
;
1690 struct btrfs_extent_item
*ei
;
1691 struct btrfs_key key
;
1693 if (btrfs_fs_incompat(fs_info
, SKINNY_METADATA
))
1694 key
.type
= BTRFS_METADATA_ITEM_KEY
;
1696 key
.type
= BTRFS_EXTENT_ITEM_KEY
;
1697 key
.objectid
= logical
;
1698 key
.offset
= (u64
)-1;
1700 ret
= btrfs_search_slot(NULL
, fs_info
->extent_root
, &key
, path
, 0, 0);
1704 ret
= btrfs_previous_extent_item(fs_info
->extent_root
, path
, 0);
1710 btrfs_item_key_to_cpu(path
->nodes
[0], found_key
, path
->slots
[0]);
1711 if (found_key
->type
== BTRFS_METADATA_ITEM_KEY
)
1712 size
= fs_info
->nodesize
;
1713 else if (found_key
->type
== BTRFS_EXTENT_ITEM_KEY
)
1714 size
= found_key
->offset
;
1716 if (found_key
->objectid
> logical
||
1717 found_key
->objectid
+ size
<= logical
) {
1718 btrfs_debug(fs_info
,
1719 "logical %llu is not within any extent", logical
);
1723 eb
= path
->nodes
[0];
1724 item_size
= btrfs_item_size_nr(eb
, path
->slots
[0]);
1725 BUG_ON(item_size
< sizeof(*ei
));
1727 ei
= btrfs_item_ptr(eb
, path
->slots
[0], struct btrfs_extent_item
);
1728 flags
= btrfs_extent_flags(eb
, ei
);
1730 btrfs_debug(fs_info
,
1731 "logical %llu is at position %llu within the extent (%llu EXTENT_ITEM %llu) flags %#llx size %u",
1732 logical
, logical
- found_key
->objectid
, found_key
->objectid
,
1733 found_key
->offset
, flags
, item_size
);
1735 WARN_ON(!flags_ret
);
1737 if (flags
& BTRFS_EXTENT_FLAG_TREE_BLOCK
)
1738 *flags_ret
= BTRFS_EXTENT_FLAG_TREE_BLOCK
;
1739 else if (flags
& BTRFS_EXTENT_FLAG_DATA
)
1740 *flags_ret
= BTRFS_EXTENT_FLAG_DATA
;
1750 * helper function to iterate extent inline refs. ptr must point to a 0 value
1751 * for the first call and may be modified. it is used to track state.
1752 * if more refs exist, 0 is returned and the next call to
1753 * get_extent_inline_ref must pass the modified ptr parameter to get the
1754 * next ref. after the last ref was processed, 1 is returned.
1755 * returns <0 on error
1757 static int get_extent_inline_ref(unsigned long *ptr
,
1758 const struct extent_buffer
*eb
,
1759 const struct btrfs_key
*key
,
1760 const struct btrfs_extent_item
*ei
,
1762 struct btrfs_extent_inline_ref
**out_eiref
,
1767 struct btrfs_tree_block_info
*info
;
1771 flags
= btrfs_extent_flags(eb
, ei
);
1772 if (flags
& BTRFS_EXTENT_FLAG_TREE_BLOCK
) {
1773 if (key
->type
== BTRFS_METADATA_ITEM_KEY
) {
1774 /* a skinny metadata extent */
1776 (struct btrfs_extent_inline_ref
*)(ei
+ 1);
1778 WARN_ON(key
->type
!= BTRFS_EXTENT_ITEM_KEY
);
1779 info
= (struct btrfs_tree_block_info
*)(ei
+ 1);
1781 (struct btrfs_extent_inline_ref
*)(info
+ 1);
1784 *out_eiref
= (struct btrfs_extent_inline_ref
*)(ei
+ 1);
1786 *ptr
= (unsigned long)*out_eiref
;
1787 if ((unsigned long)(*ptr
) >= (unsigned long)ei
+ item_size
)
1791 end
= (unsigned long)ei
+ item_size
;
1792 *out_eiref
= (struct btrfs_extent_inline_ref
*)(*ptr
);
1793 *out_type
= btrfs_get_extent_inline_ref_type(eb
, *out_eiref
,
1794 BTRFS_REF_TYPE_ANY
);
1795 if (*out_type
== BTRFS_REF_TYPE_INVALID
)
1798 *ptr
+= btrfs_extent_inline_ref_size(*out_type
);
1799 WARN_ON(*ptr
> end
);
1801 return 1; /* last */
1807 * reads the tree block backref for an extent. tree level and root are returned
1808 * through out_level and out_root. ptr must point to a 0 value for the first
1809 * call and may be modified (see get_extent_inline_ref comment).
1810 * returns 0 if data was provided, 1 if there was no more data to provide or
1813 int tree_backref_for_extent(unsigned long *ptr
, struct extent_buffer
*eb
,
1814 struct btrfs_key
*key
, struct btrfs_extent_item
*ei
,
1815 u32 item_size
, u64
*out_root
, u8
*out_level
)
1819 struct btrfs_extent_inline_ref
*eiref
;
1821 if (*ptr
== (unsigned long)-1)
1825 ret
= get_extent_inline_ref(ptr
, eb
, key
, ei
, item_size
,
1830 if (type
== BTRFS_TREE_BLOCK_REF_KEY
||
1831 type
== BTRFS_SHARED_BLOCK_REF_KEY
)
1838 /* we can treat both ref types equally here */
1839 *out_root
= btrfs_extent_inline_ref_offset(eb
, eiref
);
1841 if (key
->type
== BTRFS_EXTENT_ITEM_KEY
) {
1842 struct btrfs_tree_block_info
*info
;
1844 info
= (struct btrfs_tree_block_info
*)(ei
+ 1);
1845 *out_level
= btrfs_tree_block_level(eb
, info
);
1847 ASSERT(key
->type
== BTRFS_METADATA_ITEM_KEY
);
1848 *out_level
= (u8
)key
->offset
;
1852 *ptr
= (unsigned long)-1;
1857 static int iterate_leaf_refs(struct btrfs_fs_info
*fs_info
,
1858 struct extent_inode_elem
*inode_list
,
1859 u64 root
, u64 extent_item_objectid
,
1860 iterate_extent_inodes_t
*iterate
, void *ctx
)
1862 struct extent_inode_elem
*eie
;
1865 for (eie
= inode_list
; eie
; eie
= eie
->next
) {
1866 btrfs_debug(fs_info
,
1867 "ref for %llu resolved, key (%llu EXTEND_DATA %llu), root %llu",
1868 extent_item_objectid
, eie
->inum
,
1870 ret
= iterate(eie
->inum
, eie
->offset
, root
, ctx
);
1872 btrfs_debug(fs_info
,
1873 "stopping iteration for %llu due to ret=%d",
1874 extent_item_objectid
, ret
);
1883 * calls iterate() for every inode that references the extent identified by
1884 * the given parameters.
1885 * when the iterator function returns a non-zero value, iteration stops.
1887 int iterate_extent_inodes(struct btrfs_fs_info
*fs_info
,
1888 u64 extent_item_objectid
, u64 extent_item_pos
,
1889 int search_commit_root
,
1890 iterate_extent_inodes_t
*iterate
, void *ctx
,
1894 struct btrfs_trans_handle
*trans
= NULL
;
1895 struct ulist
*refs
= NULL
;
1896 struct ulist
*roots
= NULL
;
1897 struct ulist_node
*ref_node
= NULL
;
1898 struct ulist_node
*root_node
= NULL
;
1899 struct seq_list tree_mod_seq_elem
= SEQ_LIST_INIT(tree_mod_seq_elem
);
1900 struct ulist_iterator ref_uiter
;
1901 struct ulist_iterator root_uiter
;
1903 btrfs_debug(fs_info
, "resolving all inodes for extent %llu",
1904 extent_item_objectid
);
1906 if (!search_commit_root
) {
1907 trans
= btrfs_join_transaction(fs_info
->extent_root
);
1909 return PTR_ERR(trans
);
1910 btrfs_get_tree_mod_seq(fs_info
, &tree_mod_seq_elem
);
1912 down_read(&fs_info
->commit_root_sem
);
1915 ret
= btrfs_find_all_leafs(trans
, fs_info
, extent_item_objectid
,
1916 tree_mod_seq_elem
.seq
, &refs
,
1917 &extent_item_pos
, ignore_offset
);
1921 ULIST_ITER_INIT(&ref_uiter
);
1922 while (!ret
&& (ref_node
= ulist_next(refs
, &ref_uiter
))) {
1923 ret
= btrfs_find_all_roots_safe(trans
, fs_info
, ref_node
->val
,
1924 tree_mod_seq_elem
.seq
, &roots
,
1928 ULIST_ITER_INIT(&root_uiter
);
1929 while (!ret
&& (root_node
= ulist_next(roots
, &root_uiter
))) {
1930 btrfs_debug(fs_info
,
1931 "root %llu references leaf %llu, data list %#llx",
1932 root_node
->val
, ref_node
->val
,
1934 ret
= iterate_leaf_refs(fs_info
,
1935 (struct extent_inode_elem
*)
1936 (uintptr_t)ref_node
->aux
,
1938 extent_item_objectid
,
1944 free_leaf_list(refs
);
1946 if (!search_commit_root
) {
1947 btrfs_put_tree_mod_seq(fs_info
, &tree_mod_seq_elem
);
1948 btrfs_end_transaction(trans
);
1950 up_read(&fs_info
->commit_root_sem
);
1956 int iterate_inodes_from_logical(u64 logical
, struct btrfs_fs_info
*fs_info
,
1957 struct btrfs_path
*path
,
1958 iterate_extent_inodes_t
*iterate
, void *ctx
,
1962 u64 extent_item_pos
;
1964 struct btrfs_key found_key
;
1965 int search_commit_root
= path
->search_commit_root
;
1967 ret
= extent_from_logical(fs_info
, logical
, path
, &found_key
, &flags
);
1968 btrfs_release_path(path
);
1971 if (flags
& BTRFS_EXTENT_FLAG_TREE_BLOCK
)
1974 extent_item_pos
= logical
- found_key
.objectid
;
1975 ret
= iterate_extent_inodes(fs_info
, found_key
.objectid
,
1976 extent_item_pos
, search_commit_root
,
1977 iterate
, ctx
, ignore_offset
);
1982 typedef int (iterate_irefs_t
)(u64 parent
, u32 name_len
, unsigned long name_off
,
1983 struct extent_buffer
*eb
, void *ctx
);
1985 static int iterate_inode_refs(u64 inum
, struct btrfs_root
*fs_root
,
1986 struct btrfs_path
*path
,
1987 iterate_irefs_t
*iterate
, void *ctx
)
1996 struct extent_buffer
*eb
;
1997 struct btrfs_item
*item
;
1998 struct btrfs_inode_ref
*iref
;
1999 struct btrfs_key found_key
;
2002 ret
= btrfs_find_item(fs_root
, path
, inum
,
2003 parent
? parent
+ 1 : 0, BTRFS_INODE_REF_KEY
,
2009 ret
= found
? 0 : -ENOENT
;
2014 parent
= found_key
.offset
;
2015 slot
= path
->slots
[0];
2016 eb
= btrfs_clone_extent_buffer(path
->nodes
[0]);
2021 extent_buffer_get(eb
);
2022 btrfs_tree_read_lock(eb
);
2023 btrfs_set_lock_blocking_rw(eb
, BTRFS_READ_LOCK
);
2024 btrfs_release_path(path
);
2026 item
= btrfs_item_nr(slot
);
2027 iref
= btrfs_item_ptr(eb
, slot
, struct btrfs_inode_ref
);
2029 for (cur
= 0; cur
< btrfs_item_size(eb
, item
); cur
+= len
) {
2030 name_len
= btrfs_inode_ref_name_len(eb
, iref
);
2031 /* path must be released before calling iterate()! */
2032 btrfs_debug(fs_root
->fs_info
,
2033 "following ref at offset %u for inode %llu in tree %llu",
2034 cur
, found_key
.objectid
, fs_root
->objectid
);
2035 ret
= iterate(parent
, name_len
,
2036 (unsigned long)(iref
+ 1), eb
, ctx
);
2039 len
= sizeof(*iref
) + name_len
;
2040 iref
= (struct btrfs_inode_ref
*)((char *)iref
+ len
);
2042 btrfs_tree_read_unlock_blocking(eb
);
2043 free_extent_buffer(eb
);
2046 btrfs_release_path(path
);
2051 static int iterate_inode_extrefs(u64 inum
, struct btrfs_root
*fs_root
,
2052 struct btrfs_path
*path
,
2053 iterate_irefs_t
*iterate
, void *ctx
)
2060 struct extent_buffer
*eb
;
2061 struct btrfs_inode_extref
*extref
;
2067 ret
= btrfs_find_one_extref(fs_root
, inum
, offset
, path
, &extref
,
2072 ret
= found
? 0 : -ENOENT
;
2077 slot
= path
->slots
[0];
2078 eb
= btrfs_clone_extent_buffer(path
->nodes
[0]);
2083 extent_buffer_get(eb
);
2085 btrfs_tree_read_lock(eb
);
2086 btrfs_set_lock_blocking_rw(eb
, BTRFS_READ_LOCK
);
2087 btrfs_release_path(path
);
2089 item_size
= btrfs_item_size_nr(eb
, slot
);
2090 ptr
= btrfs_item_ptr_offset(eb
, slot
);
2093 while (cur_offset
< item_size
) {
2096 extref
= (struct btrfs_inode_extref
*)(ptr
+ cur_offset
);
2097 parent
= btrfs_inode_extref_parent(eb
, extref
);
2098 name_len
= btrfs_inode_extref_name_len(eb
, extref
);
2099 ret
= iterate(parent
, name_len
,
2100 (unsigned long)&extref
->name
, eb
, ctx
);
2104 cur_offset
+= btrfs_inode_extref_name_len(eb
, extref
);
2105 cur_offset
+= sizeof(*extref
);
2107 btrfs_tree_read_unlock_blocking(eb
);
2108 free_extent_buffer(eb
);
2113 btrfs_release_path(path
);
2118 static int iterate_irefs(u64 inum
, struct btrfs_root
*fs_root
,
2119 struct btrfs_path
*path
, iterate_irefs_t
*iterate
,
2125 ret
= iterate_inode_refs(inum
, fs_root
, path
, iterate
, ctx
);
2128 else if (ret
!= -ENOENT
)
2131 ret
= iterate_inode_extrefs(inum
, fs_root
, path
, iterate
, ctx
);
2132 if (ret
== -ENOENT
&& found_refs
)
2139 * returns 0 if the path could be dumped (probably truncated)
2140 * returns <0 in case of an error
2142 static int inode_to_path(u64 inum
, u32 name_len
, unsigned long name_off
,
2143 struct extent_buffer
*eb
, void *ctx
)
2145 struct inode_fs_paths
*ipath
= ctx
;
2148 int i
= ipath
->fspath
->elem_cnt
;
2149 const int s_ptr
= sizeof(char *);
2152 bytes_left
= ipath
->fspath
->bytes_left
> s_ptr
?
2153 ipath
->fspath
->bytes_left
- s_ptr
: 0;
2155 fspath_min
= (char *)ipath
->fspath
->val
+ (i
+ 1) * s_ptr
;
2156 fspath
= btrfs_ref_to_path(ipath
->fs_root
, ipath
->btrfs_path
, name_len
,
2157 name_off
, eb
, inum
, fspath_min
, bytes_left
);
2159 return PTR_ERR(fspath
);
2161 if (fspath
> fspath_min
) {
2162 ipath
->fspath
->val
[i
] = (u64
)(unsigned long)fspath
;
2163 ++ipath
->fspath
->elem_cnt
;
2164 ipath
->fspath
->bytes_left
= fspath
- fspath_min
;
2166 ++ipath
->fspath
->elem_missed
;
2167 ipath
->fspath
->bytes_missing
+= fspath_min
- fspath
;
2168 ipath
->fspath
->bytes_left
= 0;
2175 * this dumps all file system paths to the inode into the ipath struct, provided
2176 * is has been created large enough. each path is zero-terminated and accessed
2177 * from ipath->fspath->val[i].
2178 * when it returns, there are ipath->fspath->elem_cnt number of paths available
2179 * in ipath->fspath->val[]. when the allocated space wasn't sufficient, the
2180 * number of missed paths is recorded in ipath->fspath->elem_missed, otherwise,
2181 * it's zero. ipath->fspath->bytes_missing holds the number of bytes that would
2182 * have been needed to return all paths.
2184 int paths_from_inode(u64 inum
, struct inode_fs_paths
*ipath
)
2186 return iterate_irefs(inum
, ipath
->fs_root
, ipath
->btrfs_path
,
2187 inode_to_path
, ipath
);
2190 struct btrfs_data_container
*init_data_container(u32 total_bytes
)
2192 struct btrfs_data_container
*data
;
2195 alloc_bytes
= max_t(size_t, total_bytes
, sizeof(*data
));
2196 data
= kvmalloc(alloc_bytes
, GFP_KERNEL
);
2198 return ERR_PTR(-ENOMEM
);
2200 if (total_bytes
>= sizeof(*data
)) {
2201 data
->bytes_left
= total_bytes
- sizeof(*data
);
2202 data
->bytes_missing
= 0;
2204 data
->bytes_missing
= sizeof(*data
) - total_bytes
;
2205 data
->bytes_left
= 0;
2209 data
->elem_missed
= 0;
2215 * allocates space to return multiple file system paths for an inode.
2216 * total_bytes to allocate are passed, note that space usable for actual path
2217 * information will be total_bytes - sizeof(struct inode_fs_paths).
2218 * the returned pointer must be freed with free_ipath() in the end.
2220 struct inode_fs_paths
*init_ipath(s32 total_bytes
, struct btrfs_root
*fs_root
,
2221 struct btrfs_path
*path
)
2223 struct inode_fs_paths
*ifp
;
2224 struct btrfs_data_container
*fspath
;
2226 fspath
= init_data_container(total_bytes
);
2228 return (void *)fspath
;
2230 ifp
= kmalloc(sizeof(*ifp
), GFP_KERNEL
);
2233 return ERR_PTR(-ENOMEM
);
2236 ifp
->btrfs_path
= path
;
2237 ifp
->fspath
= fspath
;
2238 ifp
->fs_root
= fs_root
;
2243 void free_ipath(struct inode_fs_paths
*ipath
)
2247 kvfree(ipath
->fspath
);