2 * Copyright (C) 2011 STRATO. All rights reserved.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public
6 * License v2 as published by the Free Software Foundation.
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
13 * You should have received a copy of the GNU General Public
14 * License along with this program; if not, write to the
15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16 * Boston, MA 021110-1307, USA.
20 #include <linux/rbtree.h>
21 #include <trace/events/btrfs.h>
26 #include "transaction.h"
27 #include "delayed-ref.h"
30 /* Just an arbitrary number so we can be sure this happened */
31 #define BACKREF_FOUND_SHARED 6
33 struct extent_inode_elem
{
36 struct extent_inode_elem
*next
;
39 static int check_extent_in_eb(const struct btrfs_key
*key
,
40 const struct extent_buffer
*eb
,
41 const struct btrfs_file_extent_item
*fi
,
43 struct extent_inode_elem
**eie
)
46 struct extent_inode_elem
*e
;
48 if (!btrfs_file_extent_compression(eb
, fi
) &&
49 !btrfs_file_extent_encryption(eb
, fi
) &&
50 !btrfs_file_extent_other_encoding(eb
, fi
)) {
54 data_offset
= btrfs_file_extent_offset(eb
, fi
);
55 data_len
= btrfs_file_extent_num_bytes(eb
, fi
);
57 if (extent_item_pos
< data_offset
||
58 extent_item_pos
>= data_offset
+ data_len
)
60 offset
= extent_item_pos
- data_offset
;
63 e
= kmalloc(sizeof(*e
), GFP_NOFS
);
68 e
->inum
= key
->objectid
;
69 e
->offset
= key
->offset
+ offset
;
75 static void free_inode_elem_list(struct extent_inode_elem
*eie
)
77 struct extent_inode_elem
*eie_next
;
79 for (; eie
; eie
= eie_next
) {
85 static int find_extent_in_eb(const struct extent_buffer
*eb
,
86 u64 wanted_disk_byte
, u64 extent_item_pos
,
87 struct extent_inode_elem
**eie
)
91 struct btrfs_file_extent_item
*fi
;
98 * from the shared data ref, we only have the leaf but we need
99 * the key. thus, we must look into all items and see that we
100 * find one (some) with a reference to our extent item.
102 nritems
= btrfs_header_nritems(eb
);
103 for (slot
= 0; slot
< nritems
; ++slot
) {
104 btrfs_item_key_to_cpu(eb
, &key
, slot
);
105 if (key
.type
!= BTRFS_EXTENT_DATA_KEY
)
107 fi
= btrfs_item_ptr(eb
, slot
, struct btrfs_file_extent_item
);
108 extent_type
= btrfs_file_extent_type(eb
, fi
);
109 if (extent_type
== BTRFS_FILE_EXTENT_INLINE
)
111 /* don't skip BTRFS_FILE_EXTENT_PREALLOC, we can handle that */
112 disk_byte
= btrfs_file_extent_disk_bytenr(eb
, fi
);
113 if (disk_byte
!= wanted_disk_byte
)
116 ret
= check_extent_in_eb(&key
, eb
, fi
, extent_item_pos
, eie
);
129 #define PREFTREE_INIT { .root = RB_ROOT, .count = 0 }
132 struct preftree direct
; /* BTRFS_SHARED_[DATA|BLOCK]_REF_KEY */
133 struct preftree indirect
; /* BTRFS_[TREE_BLOCK|EXTENT_DATA]_REF_KEY */
134 struct preftree indirect_missing_keys
;
138 * Checks for a shared extent during backref search.
140 * The share_count tracks prelim_refs (direct and indirect) having a
142 * - incremented when a ref->count transitions to >0
143 * - decremented when a ref->count transitions to <1
151 static inline int extent_is_shared(struct share_check
*sc
)
153 return (sc
&& sc
->share_count
> 1) ? BACKREF_FOUND_SHARED
: 0;
156 static struct kmem_cache
*btrfs_prelim_ref_cache
;
158 int __init
btrfs_prelim_ref_init(void)
160 btrfs_prelim_ref_cache
= kmem_cache_create("btrfs_prelim_ref",
161 sizeof(struct prelim_ref
),
165 if (!btrfs_prelim_ref_cache
)
170 void btrfs_prelim_ref_exit(void)
172 kmem_cache_destroy(btrfs_prelim_ref_cache
);
175 static void free_pref(struct prelim_ref
*ref
)
177 kmem_cache_free(btrfs_prelim_ref_cache
, ref
);
181 * Return 0 when both refs are for the same block (and can be merged).
182 * A -1 return indicates ref1 is a 'lower' block than ref2, while 1
183 * indicates a 'higher' block.
185 static int prelim_ref_compare(struct prelim_ref
*ref1
,
186 struct prelim_ref
*ref2
)
188 if (ref1
->level
< ref2
->level
)
190 if (ref1
->level
> ref2
->level
)
192 if (ref1
->root_id
< ref2
->root_id
)
194 if (ref1
->root_id
> ref2
->root_id
)
196 if (ref1
->key_for_search
.type
< ref2
->key_for_search
.type
)
198 if (ref1
->key_for_search
.type
> ref2
->key_for_search
.type
)
200 if (ref1
->key_for_search
.objectid
< ref2
->key_for_search
.objectid
)
202 if (ref1
->key_for_search
.objectid
> ref2
->key_for_search
.objectid
)
204 if (ref1
->key_for_search
.offset
< ref2
->key_for_search
.offset
)
206 if (ref1
->key_for_search
.offset
> ref2
->key_for_search
.offset
)
208 if (ref1
->parent
< ref2
->parent
)
210 if (ref1
->parent
> ref2
->parent
)
216 void update_share_count(struct share_check
*sc
, int oldcount
, int newcount
)
218 if ((!sc
) || (oldcount
== 0 && newcount
< 1))
221 if (oldcount
> 0 && newcount
< 1)
223 else if (oldcount
< 1 && newcount
> 0)
228 * Add @newref to the @root rbtree, merging identical refs.
230 * Callers should assume that newref has been freed after calling.
232 static void prelim_ref_insert(const struct btrfs_fs_info
*fs_info
,
233 struct preftree
*preftree
,
234 struct prelim_ref
*newref
,
235 struct share_check
*sc
)
237 struct rb_root
*root
;
239 struct rb_node
*parent
= NULL
;
240 struct prelim_ref
*ref
;
243 root
= &preftree
->root
;
248 ref
= rb_entry(parent
, struct prelim_ref
, rbnode
);
249 result
= prelim_ref_compare(ref
, newref
);
252 } else if (result
> 0) {
255 /* Identical refs, merge them and free @newref */
256 struct extent_inode_elem
*eie
= ref
->inode_list
;
258 while (eie
&& eie
->next
)
262 ref
->inode_list
= newref
->inode_list
;
264 eie
->next
= newref
->inode_list
;
265 trace_btrfs_prelim_ref_merge(fs_info
, ref
, newref
,
268 * A delayed ref can have newref->count < 0.
269 * The ref->count is updated to follow any
270 * BTRFS_[ADD|DROP]_DELAYED_REF actions.
272 update_share_count(sc
, ref
->count
,
273 ref
->count
+ newref
->count
);
274 ref
->count
+= newref
->count
;
280 update_share_count(sc
, 0, newref
->count
);
282 trace_btrfs_prelim_ref_insert(fs_info
, newref
, NULL
, preftree
->count
);
283 rb_link_node(&newref
->rbnode
, parent
, p
);
284 rb_insert_color(&newref
->rbnode
, root
);
288 * Release the entire tree. We don't care about internal consistency so
289 * just free everything and then reset the tree root.
291 static void prelim_release(struct preftree
*preftree
)
293 struct prelim_ref
*ref
, *next_ref
;
295 rbtree_postorder_for_each_entry_safe(ref
, next_ref
, &preftree
->root
,
299 preftree
->root
= RB_ROOT
;
304 * the rules for all callers of this function are:
305 * - obtaining the parent is the goal
306 * - if you add a key, you must know that it is a correct key
307 * - if you cannot add the parent or a correct key, then we will look into the
308 * block later to set a correct key
312 * backref type | shared | indirect | shared | indirect
313 * information | tree | tree | data | data
314 * --------------------+--------+----------+--------+----------
315 * parent logical | y | - | - | -
316 * key to resolve | - | y | y | y
317 * tree block logical | - | - | - | -
318 * root for resolving | y | y | y | y
320 * - column 1: we've the parent -> done
321 * - column 2, 3, 4: we use the key to find the parent
323 * on disk refs (inline or keyed)
324 * ==============================
325 * backref type | shared | indirect | shared | indirect
326 * information | tree | tree | data | data
327 * --------------------+--------+----------+--------+----------
328 * parent logical | y | - | y | -
329 * key to resolve | - | - | - | y
330 * tree block logical | y | y | y | y
331 * root for resolving | - | y | y | y
333 * - column 1, 3: we've the parent -> done
334 * - column 2: we take the first key from the block to find the parent
335 * (see add_missing_keys)
336 * - column 4: we use the key to find the parent
338 * additional information that's available but not required to find the parent
339 * block might help in merging entries to gain some speed.
341 static int add_prelim_ref(const struct btrfs_fs_info
*fs_info
,
342 struct preftree
*preftree
, u64 root_id
,
343 const struct btrfs_key
*key
, int level
, u64 parent
,
344 u64 wanted_disk_byte
, int count
,
345 struct share_check
*sc
, gfp_t gfp_mask
)
347 struct prelim_ref
*ref
;
349 if (root_id
== BTRFS_DATA_RELOC_TREE_OBJECTID
)
352 ref
= kmem_cache_alloc(btrfs_prelim_ref_cache
, gfp_mask
);
356 ref
->root_id
= root_id
;
358 ref
->key_for_search
= *key
;
360 * We can often find data backrefs with an offset that is too
361 * large (>= LLONG_MAX, maximum allowed file offset) due to
362 * underflows when subtracting a file's offset with the data
363 * offset of its corresponding extent data item. This can
364 * happen for example in the clone ioctl.
365 * So if we detect such case we set the search key's offset to
366 * zero to make sure we will find the matching file extent item
367 * at add_all_parents(), otherwise we will miss it because the
368 * offset taken form the backref is much larger then the offset
369 * of the file extent item. This can make us scan a very large
370 * number of file extent items, but at least it will not make
372 * This is an ugly workaround for a behaviour that should have
373 * never existed, but it does and a fix for the clone ioctl
374 * would touch a lot of places, cause backwards incompatibility
375 * and would not fix the problem for extents cloned with older
378 if (ref
->key_for_search
.type
== BTRFS_EXTENT_DATA_KEY
&&
379 ref
->key_for_search
.offset
>= LLONG_MAX
)
380 ref
->key_for_search
.offset
= 0;
382 memset(&ref
->key_for_search
, 0, sizeof(ref
->key_for_search
));
385 ref
->inode_list
= NULL
;
388 ref
->parent
= parent
;
389 ref
->wanted_disk_byte
= wanted_disk_byte
;
390 prelim_ref_insert(fs_info
, preftree
, ref
, sc
);
391 return extent_is_shared(sc
);
394 /* direct refs use root == 0, key == NULL */
395 static int add_direct_ref(const struct btrfs_fs_info
*fs_info
,
396 struct preftrees
*preftrees
, int level
, u64 parent
,
397 u64 wanted_disk_byte
, int count
,
398 struct share_check
*sc
, gfp_t gfp_mask
)
400 return add_prelim_ref(fs_info
, &preftrees
->direct
, 0, NULL
, level
,
401 parent
, wanted_disk_byte
, count
, sc
, gfp_mask
);
404 /* indirect refs use parent == 0 */
405 static int add_indirect_ref(const struct btrfs_fs_info
*fs_info
,
406 struct preftrees
*preftrees
, u64 root_id
,
407 const struct btrfs_key
*key
, int level
,
408 u64 wanted_disk_byte
, int count
,
409 struct share_check
*sc
, gfp_t gfp_mask
)
411 struct preftree
*tree
= &preftrees
->indirect
;
414 tree
= &preftrees
->indirect_missing_keys
;
415 return add_prelim_ref(fs_info
, tree
, root_id
, key
, level
, 0,
416 wanted_disk_byte
, count
, sc
, gfp_mask
);
419 static int add_all_parents(struct btrfs_root
*root
, struct btrfs_path
*path
,
420 struct ulist
*parents
, struct prelim_ref
*ref
,
421 int level
, u64 time_seq
, const u64
*extent_item_pos
,
426 struct extent_buffer
*eb
;
427 struct btrfs_key key
;
428 struct btrfs_key
*key_for_search
= &ref
->key_for_search
;
429 struct btrfs_file_extent_item
*fi
;
430 struct extent_inode_elem
*eie
= NULL
, *old
= NULL
;
432 u64 wanted_disk_byte
= ref
->wanted_disk_byte
;
436 eb
= path
->nodes
[level
];
437 ret
= ulist_add(parents
, eb
->start
, 0, GFP_NOFS
);
444 * We normally enter this function with the path already pointing to
445 * the first item to check. But sometimes, we may enter it with
446 * slot==nritems. In that case, go to the next leaf before we continue.
448 if (path
->slots
[0] >= btrfs_header_nritems(path
->nodes
[0])) {
449 if (time_seq
== SEQ_LAST
)
450 ret
= btrfs_next_leaf(root
, path
);
452 ret
= btrfs_next_old_leaf(root
, path
, time_seq
);
455 while (!ret
&& count
< total_refs
) {
457 slot
= path
->slots
[0];
459 btrfs_item_key_to_cpu(eb
, &key
, slot
);
461 if (key
.objectid
!= key_for_search
->objectid
||
462 key
.type
!= BTRFS_EXTENT_DATA_KEY
)
465 fi
= btrfs_item_ptr(eb
, slot
, struct btrfs_file_extent_item
);
466 disk_byte
= btrfs_file_extent_disk_bytenr(eb
, fi
);
468 if (disk_byte
== wanted_disk_byte
) {
472 if (extent_item_pos
) {
473 ret
= check_extent_in_eb(&key
, eb
, fi
,
481 ret
= ulist_add_merge_ptr(parents
, eb
->start
,
482 eie
, (void **)&old
, GFP_NOFS
);
485 if (!ret
&& extent_item_pos
) {
493 if (time_seq
== SEQ_LAST
)
494 ret
= btrfs_next_item(root
, path
);
496 ret
= btrfs_next_old_item(root
, path
, time_seq
);
502 free_inode_elem_list(eie
);
507 * resolve an indirect backref in the form (root_id, key, level)
508 * to a logical address
510 static int resolve_indirect_ref(struct btrfs_fs_info
*fs_info
,
511 struct btrfs_path
*path
, u64 time_seq
,
512 struct prelim_ref
*ref
, struct ulist
*parents
,
513 const u64
*extent_item_pos
, u64 total_refs
)
515 struct btrfs_root
*root
;
516 struct btrfs_key root_key
;
517 struct extent_buffer
*eb
;
520 int level
= ref
->level
;
523 root_key
.objectid
= ref
->root_id
;
524 root_key
.type
= BTRFS_ROOT_ITEM_KEY
;
525 root_key
.offset
= (u64
)-1;
527 index
= srcu_read_lock(&fs_info
->subvol_srcu
);
529 root
= btrfs_get_fs_root(fs_info
, &root_key
, false);
531 srcu_read_unlock(&fs_info
->subvol_srcu
, index
);
536 if (btrfs_is_testing(fs_info
)) {
537 srcu_read_unlock(&fs_info
->subvol_srcu
, index
);
542 if (path
->search_commit_root
)
543 root_level
= btrfs_header_level(root
->commit_root
);
544 else if (time_seq
== SEQ_LAST
)
545 root_level
= btrfs_header_level(root
->node
);
547 root_level
= btrfs_old_root_level(root
, time_seq
);
549 if (root_level
+ 1 == level
) {
550 srcu_read_unlock(&fs_info
->subvol_srcu
, index
);
554 path
->lowest_level
= level
;
555 if (time_seq
== SEQ_LAST
)
556 ret
= btrfs_search_slot(NULL
, root
, &ref
->key_for_search
, path
,
559 ret
= btrfs_search_old_slot(root
, &ref
->key_for_search
, path
,
562 /* root node has been locked, we can release @subvol_srcu safely here */
563 srcu_read_unlock(&fs_info
->subvol_srcu
, index
);
566 "search slot in root %llu (level %d, ref count %d) returned %d for key (%llu %u %llu)",
567 ref
->root_id
, level
, ref
->count
, ret
,
568 ref
->key_for_search
.objectid
, ref
->key_for_search
.type
,
569 ref
->key_for_search
.offset
);
573 eb
= path
->nodes
[level
];
575 if (WARN_ON(!level
)) {
580 eb
= path
->nodes
[level
];
583 ret
= add_all_parents(root
, path
, parents
, ref
, level
, time_seq
,
584 extent_item_pos
, total_refs
);
586 path
->lowest_level
= 0;
587 btrfs_release_path(path
);
591 static struct extent_inode_elem
*
592 unode_aux_to_inode_list(struct ulist_node
*node
)
596 return (struct extent_inode_elem
*)(uintptr_t)node
->aux
;
600 * We maintain three seperate rbtrees: one for direct refs, one for
601 * indirect refs which have a key, and one for indirect refs which do not
602 * have a key. Each tree does merge on insertion.
604 * Once all of the references are located, we iterate over the tree of
605 * indirect refs with missing keys. An appropriate key is located and
606 * the ref is moved onto the tree for indirect refs. After all missing
607 * keys are thus located, we iterate over the indirect ref tree, resolve
608 * each reference, and then insert the resolved reference onto the
609 * direct tree (merging there too).
611 * New backrefs (i.e., for parent nodes) are added to the appropriate
612 * rbtree as they are encountered. The new backrefs are subsequently
615 static int resolve_indirect_refs(struct btrfs_fs_info
*fs_info
,
616 struct btrfs_path
*path
, u64 time_seq
,
617 struct preftrees
*preftrees
,
618 const u64
*extent_item_pos
, u64 total_refs
,
619 struct share_check
*sc
)
623 struct ulist
*parents
;
624 struct ulist_node
*node
;
625 struct ulist_iterator uiter
;
626 struct rb_node
*rnode
;
628 parents
= ulist_alloc(GFP_NOFS
);
633 * We could trade memory usage for performance here by iterating
634 * the tree, allocating new refs for each insertion, and then
635 * freeing the entire indirect tree when we're done. In some test
636 * cases, the tree can grow quite large (~200k objects).
638 while ((rnode
= rb_first(&preftrees
->indirect
.root
))) {
639 struct prelim_ref
*ref
;
641 ref
= rb_entry(rnode
, struct prelim_ref
, rbnode
);
642 if (WARN(ref
->parent
,
643 "BUG: direct ref found in indirect tree")) {
648 rb_erase(&ref
->rbnode
, &preftrees
->indirect
.root
);
649 preftrees
->indirect
.count
--;
651 if (ref
->count
== 0) {
656 if (sc
&& sc
->root_objectid
&&
657 ref
->root_id
!= sc
->root_objectid
) {
659 ret
= BACKREF_FOUND_SHARED
;
662 err
= resolve_indirect_ref(fs_info
, path
, time_seq
, ref
,
663 parents
, extent_item_pos
,
666 * we can only tolerate ENOENT,otherwise,we should catch error
667 * and return directly.
669 if (err
== -ENOENT
) {
670 prelim_ref_insert(fs_info
, &preftrees
->direct
, ref
,
679 /* we put the first parent into the ref at hand */
680 ULIST_ITER_INIT(&uiter
);
681 node
= ulist_next(parents
, &uiter
);
682 ref
->parent
= node
? node
->val
: 0;
683 ref
->inode_list
= unode_aux_to_inode_list(node
);
685 /* Add a prelim_ref(s) for any other parent(s). */
686 while ((node
= ulist_next(parents
, &uiter
))) {
687 struct prelim_ref
*new_ref
;
689 new_ref
= kmem_cache_alloc(btrfs_prelim_ref_cache
,
696 memcpy(new_ref
, ref
, sizeof(*ref
));
697 new_ref
->parent
= node
->val
;
698 new_ref
->inode_list
= unode_aux_to_inode_list(node
);
699 prelim_ref_insert(fs_info
, &preftrees
->direct
,
704 * Now it's a direct ref, put it in the the direct tree. We must
705 * do this last because the ref could be merged/freed here.
707 prelim_ref_insert(fs_info
, &preftrees
->direct
, ref
, NULL
);
709 ulist_reinit(parents
);
718 * read tree blocks and add keys where required.
720 static int add_missing_keys(struct btrfs_fs_info
*fs_info
,
721 struct preftrees
*preftrees
, bool lock
)
723 struct prelim_ref
*ref
;
724 struct extent_buffer
*eb
;
725 struct preftree
*tree
= &preftrees
->indirect_missing_keys
;
726 struct rb_node
*node
;
728 while ((node
= rb_first(&tree
->root
))) {
729 ref
= rb_entry(node
, struct prelim_ref
, rbnode
);
730 rb_erase(node
, &tree
->root
);
732 BUG_ON(ref
->parent
); /* should not be a direct ref */
733 BUG_ON(ref
->key_for_search
.type
);
734 BUG_ON(!ref
->wanted_disk_byte
);
736 eb
= read_tree_block(fs_info
, ref
->wanted_disk_byte
, 0);
740 } else if (!extent_buffer_uptodate(eb
)) {
742 free_extent_buffer(eb
);
746 btrfs_tree_read_lock(eb
);
747 if (btrfs_header_level(eb
) == 0)
748 btrfs_item_key_to_cpu(eb
, &ref
->key_for_search
, 0);
750 btrfs_node_key_to_cpu(eb
, &ref
->key_for_search
, 0);
752 btrfs_tree_read_unlock(eb
);
753 free_extent_buffer(eb
);
754 prelim_ref_insert(fs_info
, &preftrees
->indirect
, ref
, NULL
);
761 * add all currently queued delayed refs from this head whose seq nr is
762 * smaller or equal that seq to the list
764 static int add_delayed_refs(const struct btrfs_fs_info
*fs_info
,
765 struct btrfs_delayed_ref_head
*head
, u64 seq
,
766 struct preftrees
*preftrees
, u64
*total_refs
,
767 struct share_check
*sc
)
769 struct btrfs_delayed_ref_node
*node
;
770 struct btrfs_delayed_extent_op
*extent_op
= head
->extent_op
;
771 struct btrfs_key key
;
772 struct btrfs_key tmp_op_key
;
773 struct btrfs_key
*op_key
= NULL
;
777 if (extent_op
&& extent_op
->update_key
) {
778 btrfs_disk_key_to_cpu(&tmp_op_key
, &extent_op
->key
);
779 op_key
= &tmp_op_key
;
782 spin_lock(&head
->lock
);
783 list_for_each_entry(node
, &head
->ref_list
, list
) {
787 switch (node
->action
) {
788 case BTRFS_ADD_DELAYED_EXTENT
:
789 case BTRFS_UPDATE_DELAYED_HEAD
:
792 case BTRFS_ADD_DELAYED_REF
:
793 count
= node
->ref_mod
;
795 case BTRFS_DROP_DELAYED_REF
:
796 count
= node
->ref_mod
* -1;
801 *total_refs
+= count
;
802 switch (node
->type
) {
803 case BTRFS_TREE_BLOCK_REF_KEY
: {
804 /* NORMAL INDIRECT METADATA backref */
805 struct btrfs_delayed_tree_ref
*ref
;
807 ref
= btrfs_delayed_node_to_tree_ref(node
);
808 ret
= add_indirect_ref(fs_info
, preftrees
, ref
->root
,
809 &tmp_op_key
, ref
->level
+ 1,
810 node
->bytenr
, count
, sc
,
814 case BTRFS_SHARED_BLOCK_REF_KEY
: {
815 /* SHARED DIRECT METADATA backref */
816 struct btrfs_delayed_tree_ref
*ref
;
818 ref
= btrfs_delayed_node_to_tree_ref(node
);
820 ret
= add_direct_ref(fs_info
, preftrees
, ref
->level
+ 1,
821 ref
->parent
, node
->bytenr
, count
,
825 case BTRFS_EXTENT_DATA_REF_KEY
: {
826 /* NORMAL INDIRECT DATA backref */
827 struct btrfs_delayed_data_ref
*ref
;
828 ref
= btrfs_delayed_node_to_data_ref(node
);
830 key
.objectid
= ref
->objectid
;
831 key
.type
= BTRFS_EXTENT_DATA_KEY
;
832 key
.offset
= ref
->offset
;
835 * Found a inum that doesn't match our known inum, we
838 if (sc
&& sc
->inum
&& ref
->objectid
!= sc
->inum
) {
839 ret
= BACKREF_FOUND_SHARED
;
843 ret
= add_indirect_ref(fs_info
, preftrees
, ref
->root
,
844 &key
, 0, node
->bytenr
, count
, sc
,
848 case BTRFS_SHARED_DATA_REF_KEY
: {
849 /* SHARED DIRECT FULL backref */
850 struct btrfs_delayed_data_ref
*ref
;
852 ref
= btrfs_delayed_node_to_data_ref(node
);
854 ret
= add_direct_ref(fs_info
, preftrees
, 0, ref
->parent
,
855 node
->bytenr
, count
, sc
,
863 * We must ignore BACKREF_FOUND_SHARED until all delayed
864 * refs have been checked.
866 if (ret
&& (ret
!= BACKREF_FOUND_SHARED
))
870 ret
= extent_is_shared(sc
);
872 spin_unlock(&head
->lock
);
877 * add all inline backrefs for bytenr to the list
879 * Returns 0 on success, <0 on error, or BACKREF_FOUND_SHARED.
881 static int add_inline_refs(const struct btrfs_fs_info
*fs_info
,
882 struct btrfs_path
*path
, u64 bytenr
,
883 int *info_level
, struct preftrees
*preftrees
,
884 u64
*total_refs
, struct share_check
*sc
)
888 struct extent_buffer
*leaf
;
889 struct btrfs_key key
;
890 struct btrfs_key found_key
;
893 struct btrfs_extent_item
*ei
;
898 * enumerate all inline refs
900 leaf
= path
->nodes
[0];
901 slot
= path
->slots
[0];
903 item_size
= btrfs_item_size_nr(leaf
, slot
);
904 BUG_ON(item_size
< sizeof(*ei
));
906 ei
= btrfs_item_ptr(leaf
, slot
, struct btrfs_extent_item
);
907 flags
= btrfs_extent_flags(leaf
, ei
);
908 *total_refs
+= btrfs_extent_refs(leaf
, ei
);
909 btrfs_item_key_to_cpu(leaf
, &found_key
, slot
);
911 ptr
= (unsigned long)(ei
+ 1);
912 end
= (unsigned long)ei
+ item_size
;
914 if (found_key
.type
== BTRFS_EXTENT_ITEM_KEY
&&
915 flags
& BTRFS_EXTENT_FLAG_TREE_BLOCK
) {
916 struct btrfs_tree_block_info
*info
;
918 info
= (struct btrfs_tree_block_info
*)ptr
;
919 *info_level
= btrfs_tree_block_level(leaf
, info
);
920 ptr
+= sizeof(struct btrfs_tree_block_info
);
922 } else if (found_key
.type
== BTRFS_METADATA_ITEM_KEY
) {
923 *info_level
= found_key
.offset
;
925 BUG_ON(!(flags
& BTRFS_EXTENT_FLAG_DATA
));
929 struct btrfs_extent_inline_ref
*iref
;
933 iref
= (struct btrfs_extent_inline_ref
*)ptr
;
934 type
= btrfs_get_extent_inline_ref_type(leaf
, iref
,
936 if (type
== BTRFS_REF_TYPE_INVALID
)
939 offset
= btrfs_extent_inline_ref_offset(leaf
, iref
);
942 case BTRFS_SHARED_BLOCK_REF_KEY
:
943 ret
= add_direct_ref(fs_info
, preftrees
,
944 *info_level
+ 1, offset
,
945 bytenr
, 1, NULL
, GFP_NOFS
);
947 case BTRFS_SHARED_DATA_REF_KEY
: {
948 struct btrfs_shared_data_ref
*sdref
;
951 sdref
= (struct btrfs_shared_data_ref
*)(iref
+ 1);
952 count
= btrfs_shared_data_ref_count(leaf
, sdref
);
954 ret
= add_direct_ref(fs_info
, preftrees
, 0, offset
,
955 bytenr
, count
, sc
, GFP_NOFS
);
958 case BTRFS_TREE_BLOCK_REF_KEY
:
959 ret
= add_indirect_ref(fs_info
, preftrees
, offset
,
960 NULL
, *info_level
+ 1,
961 bytenr
, 1, NULL
, GFP_NOFS
);
963 case BTRFS_EXTENT_DATA_REF_KEY
: {
964 struct btrfs_extent_data_ref
*dref
;
968 dref
= (struct btrfs_extent_data_ref
*)(&iref
->offset
);
969 count
= btrfs_extent_data_ref_count(leaf
, dref
);
970 key
.objectid
= btrfs_extent_data_ref_objectid(leaf
,
972 key
.type
= BTRFS_EXTENT_DATA_KEY
;
973 key
.offset
= btrfs_extent_data_ref_offset(leaf
, dref
);
975 if (sc
&& sc
->inum
&& key
.objectid
!= sc
->inum
) {
976 ret
= BACKREF_FOUND_SHARED
;
980 root
= btrfs_extent_data_ref_root(leaf
, dref
);
982 ret
= add_indirect_ref(fs_info
, preftrees
, root
,
983 &key
, 0, bytenr
, count
,
992 ptr
+= btrfs_extent_inline_ref_size(type
);
999 * add all non-inline backrefs for bytenr to the list
1001 * Returns 0 on success, <0 on error, or BACKREF_FOUND_SHARED.
1003 static int add_keyed_refs(struct btrfs_fs_info
*fs_info
,
1004 struct btrfs_path
*path
, u64 bytenr
,
1005 int info_level
, struct preftrees
*preftrees
,
1006 struct share_check
*sc
)
1008 struct btrfs_root
*extent_root
= fs_info
->extent_root
;
1011 struct extent_buffer
*leaf
;
1012 struct btrfs_key key
;
1015 ret
= btrfs_next_item(extent_root
, path
);
1023 slot
= path
->slots
[0];
1024 leaf
= path
->nodes
[0];
1025 btrfs_item_key_to_cpu(leaf
, &key
, slot
);
1027 if (key
.objectid
!= bytenr
)
1029 if (key
.type
< BTRFS_TREE_BLOCK_REF_KEY
)
1031 if (key
.type
> BTRFS_SHARED_DATA_REF_KEY
)
1035 case BTRFS_SHARED_BLOCK_REF_KEY
:
1036 /* SHARED DIRECT METADATA backref */
1037 ret
= add_direct_ref(fs_info
, preftrees
,
1038 info_level
+ 1, key
.offset
,
1039 bytenr
, 1, NULL
, GFP_NOFS
);
1041 case BTRFS_SHARED_DATA_REF_KEY
: {
1042 /* SHARED DIRECT FULL backref */
1043 struct btrfs_shared_data_ref
*sdref
;
1046 sdref
= btrfs_item_ptr(leaf
, slot
,
1047 struct btrfs_shared_data_ref
);
1048 count
= btrfs_shared_data_ref_count(leaf
, sdref
);
1049 ret
= add_direct_ref(fs_info
, preftrees
, 0,
1050 key
.offset
, bytenr
, count
,
1054 case BTRFS_TREE_BLOCK_REF_KEY
:
1055 /* NORMAL INDIRECT METADATA backref */
1056 ret
= add_indirect_ref(fs_info
, preftrees
, key
.offset
,
1057 NULL
, info_level
+ 1, bytenr
,
1060 case BTRFS_EXTENT_DATA_REF_KEY
: {
1061 /* NORMAL INDIRECT DATA backref */
1062 struct btrfs_extent_data_ref
*dref
;
1066 dref
= btrfs_item_ptr(leaf
, slot
,
1067 struct btrfs_extent_data_ref
);
1068 count
= btrfs_extent_data_ref_count(leaf
, dref
);
1069 key
.objectid
= btrfs_extent_data_ref_objectid(leaf
,
1071 key
.type
= BTRFS_EXTENT_DATA_KEY
;
1072 key
.offset
= btrfs_extent_data_ref_offset(leaf
, dref
);
1074 if (sc
&& sc
->inum
&& key
.objectid
!= sc
->inum
) {
1075 ret
= BACKREF_FOUND_SHARED
;
1079 root
= btrfs_extent_data_ref_root(leaf
, dref
);
1080 ret
= add_indirect_ref(fs_info
, preftrees
, root
,
1081 &key
, 0, bytenr
, count
,
1097 * this adds all existing backrefs (inline backrefs, backrefs and delayed
1098 * refs) for the given bytenr to the refs list, merges duplicates and resolves
1099 * indirect refs to their parent bytenr.
1100 * When roots are found, they're added to the roots list
1102 * If time_seq is set to SEQ_LAST, it will not search delayed_refs, and behave
1103 * much like trans == NULL case, the difference only lies in it will not
1105 * The special case is for qgroup to search roots in commit_transaction().
1107 * @sc - if !NULL, then immediately return BACKREF_FOUND_SHARED when a
1108 * shared extent is detected.
1110 * Otherwise this returns 0 for success and <0 for an error.
1112 * FIXME some caching might speed things up
1114 static int find_parent_nodes(struct btrfs_trans_handle
*trans
,
1115 struct btrfs_fs_info
*fs_info
, u64 bytenr
,
1116 u64 time_seq
, struct ulist
*refs
,
1117 struct ulist
*roots
, const u64
*extent_item_pos
,
1118 struct share_check
*sc
)
1120 struct btrfs_key key
;
1121 struct btrfs_path
*path
;
1122 struct btrfs_delayed_ref_root
*delayed_refs
= NULL
;
1123 struct btrfs_delayed_ref_head
*head
;
1126 struct prelim_ref
*ref
;
1127 struct rb_node
*node
;
1128 struct extent_inode_elem
*eie
= NULL
;
1129 /* total of both direct AND indirect refs! */
1131 struct preftrees preftrees
= {
1132 .direct
= PREFTREE_INIT
,
1133 .indirect
= PREFTREE_INIT
,
1134 .indirect_missing_keys
= PREFTREE_INIT
1137 key
.objectid
= bytenr
;
1138 key
.offset
= (u64
)-1;
1139 if (btrfs_fs_incompat(fs_info
, SKINNY_METADATA
))
1140 key
.type
= BTRFS_METADATA_ITEM_KEY
;
1142 key
.type
= BTRFS_EXTENT_ITEM_KEY
;
1144 path
= btrfs_alloc_path();
1148 path
->search_commit_root
= 1;
1149 path
->skip_locking
= 1;
1152 if (time_seq
== SEQ_LAST
)
1153 path
->skip_locking
= 1;
1156 * grab both a lock on the path and a lock on the delayed ref head.
1157 * We need both to get a consistent picture of how the refs look
1158 * at a specified point in time
1163 ret
= btrfs_search_slot(trans
, fs_info
->extent_root
, &key
, path
, 0, 0);
1168 #ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS
1169 if (trans
&& likely(trans
->type
!= __TRANS_DUMMY
) &&
1170 time_seq
!= SEQ_LAST
) {
1172 if (trans
&& time_seq
!= SEQ_LAST
) {
1175 * look if there are updates for this ref queued and lock the
1178 delayed_refs
= &trans
->transaction
->delayed_refs
;
1179 spin_lock(&delayed_refs
->lock
);
1180 head
= btrfs_find_delayed_ref_head(delayed_refs
, bytenr
);
1182 if (!mutex_trylock(&head
->mutex
)) {
1183 refcount_inc(&head
->node
.refs
);
1184 spin_unlock(&delayed_refs
->lock
);
1186 btrfs_release_path(path
);
1189 * Mutex was contended, block until it's
1190 * released and try again
1192 mutex_lock(&head
->mutex
);
1193 mutex_unlock(&head
->mutex
);
1194 btrfs_put_delayed_ref(&head
->node
);
1197 spin_unlock(&delayed_refs
->lock
);
1198 ret
= add_delayed_refs(fs_info
, head
, time_seq
,
1199 &preftrees
, &total_refs
, sc
);
1200 mutex_unlock(&head
->mutex
);
1204 spin_unlock(&delayed_refs
->lock
);
1208 if (path
->slots
[0]) {
1209 struct extent_buffer
*leaf
;
1213 leaf
= path
->nodes
[0];
1214 slot
= path
->slots
[0];
1215 btrfs_item_key_to_cpu(leaf
, &key
, slot
);
1216 if (key
.objectid
== bytenr
&&
1217 (key
.type
== BTRFS_EXTENT_ITEM_KEY
||
1218 key
.type
== BTRFS_METADATA_ITEM_KEY
)) {
1219 ret
= add_inline_refs(fs_info
, path
, bytenr
,
1220 &info_level
, &preftrees
,
1224 ret
= add_keyed_refs(fs_info
, path
, bytenr
, info_level
,
1231 btrfs_release_path(path
);
1233 ret
= add_missing_keys(fs_info
, &preftrees
, path
->skip_locking
== 0);
1237 WARN_ON(!RB_EMPTY_ROOT(&preftrees
.indirect_missing_keys
.root
));
1239 ret
= resolve_indirect_refs(fs_info
, path
, time_seq
, &preftrees
,
1240 extent_item_pos
, total_refs
, sc
);
1244 WARN_ON(!RB_EMPTY_ROOT(&preftrees
.indirect
.root
));
1247 * This walks the tree of merged and resolved refs. Tree blocks are
1248 * read in as needed. Unique entries are added to the ulist, and
1249 * the list of found roots is updated.
1251 * We release the entire tree in one go before returning.
1253 node
= rb_first(&preftrees
.direct
.root
);
1255 ref
= rb_entry(node
, struct prelim_ref
, rbnode
);
1256 node
= rb_next(&ref
->rbnode
);
1258 * ref->count < 0 can happen here if there are delayed
1259 * refs with a node->action of BTRFS_DROP_DELAYED_REF.
1260 * prelim_ref_insert() relies on this when merging
1261 * identical refs to keep the overall count correct.
1262 * prelim_ref_insert() will merge only those refs
1263 * which compare identically. Any refs having
1264 * e.g. different offsets would not be merged,
1265 * and would retain their original ref->count < 0.
1267 if (roots
&& ref
->count
&& ref
->root_id
&& ref
->parent
== 0) {
1268 if (sc
&& sc
->root_objectid
&&
1269 ref
->root_id
!= sc
->root_objectid
) {
1270 ret
= BACKREF_FOUND_SHARED
;
1274 /* no parent == root of tree */
1275 ret
= ulist_add(roots
, ref
->root_id
, 0, GFP_NOFS
);
1279 if (ref
->count
&& ref
->parent
) {
1280 if (extent_item_pos
&& !ref
->inode_list
&&
1282 struct extent_buffer
*eb
;
1284 eb
= read_tree_block(fs_info
, ref
->parent
, 0);
1288 } else if (!extent_buffer_uptodate(eb
)) {
1289 free_extent_buffer(eb
);
1293 if (!path
->skip_locking
) {
1294 btrfs_tree_read_lock(eb
);
1295 btrfs_set_lock_blocking_rw(eb
, BTRFS_READ_LOCK
);
1297 ret
= find_extent_in_eb(eb
, bytenr
,
1298 *extent_item_pos
, &eie
);
1299 if (!path
->skip_locking
)
1300 btrfs_tree_read_unlock_blocking(eb
);
1301 free_extent_buffer(eb
);
1304 ref
->inode_list
= eie
;
1306 ret
= ulist_add_merge_ptr(refs
, ref
->parent
,
1308 (void **)&eie
, GFP_NOFS
);
1311 if (!ret
&& extent_item_pos
) {
1313 * we've recorded that parent, so we must extend
1314 * its inode list here
1319 eie
->next
= ref
->inode_list
;
1327 btrfs_free_path(path
);
1329 prelim_release(&preftrees
.direct
);
1330 prelim_release(&preftrees
.indirect
);
1331 prelim_release(&preftrees
.indirect_missing_keys
);
1334 free_inode_elem_list(eie
);
1338 static void free_leaf_list(struct ulist
*blocks
)
1340 struct ulist_node
*node
= NULL
;
1341 struct extent_inode_elem
*eie
;
1342 struct ulist_iterator uiter
;
1344 ULIST_ITER_INIT(&uiter
);
1345 while ((node
= ulist_next(blocks
, &uiter
))) {
1348 eie
= unode_aux_to_inode_list(node
);
1349 free_inode_elem_list(eie
);
1357 * Finds all leafs with a reference to the specified combination of bytenr and
1358 * offset. key_list_head will point to a list of corresponding keys (caller must
1359 * free each list element). The leafs will be stored in the leafs ulist, which
1360 * must be freed with ulist_free.
1362 * returns 0 on success, <0 on error
1364 static int btrfs_find_all_leafs(struct btrfs_trans_handle
*trans
,
1365 struct btrfs_fs_info
*fs_info
, u64 bytenr
,
1366 u64 time_seq
, struct ulist
**leafs
,
1367 const u64
*extent_item_pos
)
1371 *leafs
= ulist_alloc(GFP_NOFS
);
1375 ret
= find_parent_nodes(trans
, fs_info
, bytenr
, time_seq
,
1376 *leafs
, NULL
, extent_item_pos
, NULL
);
1377 if (ret
< 0 && ret
!= -ENOENT
) {
1378 free_leaf_list(*leafs
);
1386 * walk all backrefs for a given extent to find all roots that reference this
1387 * extent. Walking a backref means finding all extents that reference this
1388 * extent and in turn walk the backrefs of those, too. Naturally this is a
1389 * recursive process, but here it is implemented in an iterative fashion: We
1390 * find all referencing extents for the extent in question and put them on a
1391 * list. In turn, we find all referencing extents for those, further appending
1392 * to the list. The way we iterate the list allows adding more elements after
1393 * the current while iterating. The process stops when we reach the end of the
1394 * list. Found roots are added to the roots list.
1396 * returns 0 on success, < 0 on error.
1398 static int btrfs_find_all_roots_safe(struct btrfs_trans_handle
*trans
,
1399 struct btrfs_fs_info
*fs_info
, u64 bytenr
,
1400 u64 time_seq
, struct ulist
**roots
)
1403 struct ulist_node
*node
= NULL
;
1404 struct ulist_iterator uiter
;
1407 tmp
= ulist_alloc(GFP_NOFS
);
1410 *roots
= ulist_alloc(GFP_NOFS
);
1416 ULIST_ITER_INIT(&uiter
);
1418 ret
= find_parent_nodes(trans
, fs_info
, bytenr
, time_seq
,
1419 tmp
, *roots
, NULL
, NULL
);
1420 if (ret
< 0 && ret
!= -ENOENT
) {
1426 node
= ulist_next(tmp
, &uiter
);
1437 int btrfs_find_all_roots(struct btrfs_trans_handle
*trans
,
1438 struct btrfs_fs_info
*fs_info
, u64 bytenr
,
1439 u64 time_seq
, struct ulist
**roots
)
1444 down_read(&fs_info
->commit_root_sem
);
1445 ret
= btrfs_find_all_roots_safe(trans
, fs_info
, bytenr
,
1448 up_read(&fs_info
->commit_root_sem
);
1453 * btrfs_check_shared - tell us whether an extent is shared
1455 * btrfs_check_shared uses the backref walking code but will short
1456 * circuit as soon as it finds a root or inode that doesn't match the
1457 * one passed in. This provides a significant performance benefit for
1458 * callers (such as fiemap) which want to know whether the extent is
1459 * shared but do not need a ref count.
1461 * This attempts to attach to the running transaction in order to account for
1462 * delayed refs, but continues on even when no running transaction exists.
1464 * Return: 0 if extent is not shared, 1 if it is shared, < 0 on error.
1466 int btrfs_check_shared(struct btrfs_root
*root
, u64 inum
, u64 bytenr
)
1468 struct btrfs_fs_info
*fs_info
= root
->fs_info
;
1469 struct btrfs_trans_handle
*trans
;
1470 struct ulist
*tmp
= NULL
;
1471 struct ulist
*roots
= NULL
;
1472 struct ulist_iterator uiter
;
1473 struct ulist_node
*node
;
1474 struct seq_list elem
= SEQ_LIST_INIT(elem
);
1476 struct share_check shared
= {
1477 .root_objectid
= root
->objectid
,
1482 tmp
= ulist_alloc(GFP_NOFS
);
1483 roots
= ulist_alloc(GFP_NOFS
);
1484 if (!tmp
|| !roots
) {
1489 trans
= btrfs_attach_transaction(root
);
1490 if (IS_ERR(trans
)) {
1491 if (PTR_ERR(trans
) != -ENOENT
&& PTR_ERR(trans
) != -EROFS
) {
1492 ret
= PTR_ERR(trans
);
1496 down_read(&fs_info
->commit_root_sem
);
1498 btrfs_get_tree_mod_seq(fs_info
, &elem
);
1501 ULIST_ITER_INIT(&uiter
);
1503 ret
= find_parent_nodes(trans
, fs_info
, bytenr
, elem
.seq
, tmp
,
1504 roots
, NULL
, &shared
);
1505 if (ret
== BACKREF_FOUND_SHARED
) {
1506 /* this is the only condition under which we return 1 */
1510 if (ret
< 0 && ret
!= -ENOENT
)
1513 node
= ulist_next(tmp
, &uiter
);
1517 shared
.share_count
= 0;
1522 btrfs_put_tree_mod_seq(fs_info
, &elem
);
1523 btrfs_end_transaction(trans
);
1525 up_read(&fs_info
->commit_root_sem
);
1533 int btrfs_find_one_extref(struct btrfs_root
*root
, u64 inode_objectid
,
1534 u64 start_off
, struct btrfs_path
*path
,
1535 struct btrfs_inode_extref
**ret_extref
,
1539 struct btrfs_key key
;
1540 struct btrfs_key found_key
;
1541 struct btrfs_inode_extref
*extref
;
1542 const struct extent_buffer
*leaf
;
1545 key
.objectid
= inode_objectid
;
1546 key
.type
= BTRFS_INODE_EXTREF_KEY
;
1547 key
.offset
= start_off
;
1549 ret
= btrfs_search_slot(NULL
, root
, &key
, path
, 0, 0);
1554 leaf
= path
->nodes
[0];
1555 slot
= path
->slots
[0];
1556 if (slot
>= btrfs_header_nritems(leaf
)) {
1558 * If the item at offset is not found,
1559 * btrfs_search_slot will point us to the slot
1560 * where it should be inserted. In our case
1561 * that will be the slot directly before the
1562 * next INODE_REF_KEY_V2 item. In the case
1563 * that we're pointing to the last slot in a
1564 * leaf, we must move one leaf over.
1566 ret
= btrfs_next_leaf(root
, path
);
1575 btrfs_item_key_to_cpu(leaf
, &found_key
, slot
);
1578 * Check that we're still looking at an extended ref key for
1579 * this particular objectid. If we have different
1580 * objectid or type then there are no more to be found
1581 * in the tree and we can exit.
1584 if (found_key
.objectid
!= inode_objectid
)
1586 if (found_key
.type
!= BTRFS_INODE_EXTREF_KEY
)
1590 ptr
= btrfs_item_ptr_offset(leaf
, path
->slots
[0]);
1591 extref
= (struct btrfs_inode_extref
*)ptr
;
1592 *ret_extref
= extref
;
1594 *found_off
= found_key
.offset
;
1602 * this iterates to turn a name (from iref/extref) into a full filesystem path.
1603 * Elements of the path are separated by '/' and the path is guaranteed to be
1604 * 0-terminated. the path is only given within the current file system.
1605 * Therefore, it never starts with a '/'. the caller is responsible to provide
1606 * "size" bytes in "dest". the dest buffer will be filled backwards. finally,
1607 * the start point of the resulting string is returned. this pointer is within
1609 * in case the path buffer would overflow, the pointer is decremented further
1610 * as if output was written to the buffer, though no more output is actually
1611 * generated. that way, the caller can determine how much space would be
1612 * required for the path to fit into the buffer. in that case, the returned
1613 * value will be smaller than dest. callers must check this!
1615 char *btrfs_ref_to_path(struct btrfs_root
*fs_root
, struct btrfs_path
*path
,
1616 u32 name_len
, unsigned long name_off
,
1617 struct extent_buffer
*eb_in
, u64 parent
,
1618 char *dest
, u32 size
)
1623 s64 bytes_left
= ((s64
)size
) - 1;
1624 struct extent_buffer
*eb
= eb_in
;
1625 struct btrfs_key found_key
;
1626 int leave_spinning
= path
->leave_spinning
;
1627 struct btrfs_inode_ref
*iref
;
1629 if (bytes_left
>= 0)
1630 dest
[bytes_left
] = '\0';
1632 path
->leave_spinning
= 1;
1634 bytes_left
-= name_len
;
1635 if (bytes_left
>= 0)
1636 read_extent_buffer(eb
, dest
+ bytes_left
,
1637 name_off
, name_len
);
1639 if (!path
->skip_locking
)
1640 btrfs_tree_read_unlock_blocking(eb
);
1641 free_extent_buffer(eb
);
1643 ret
= btrfs_find_item(fs_root
, path
, parent
, 0,
1644 BTRFS_INODE_REF_KEY
, &found_key
);
1650 next_inum
= found_key
.offset
;
1652 /* regular exit ahead */
1653 if (parent
== next_inum
)
1656 slot
= path
->slots
[0];
1657 eb
= path
->nodes
[0];
1658 /* make sure we can use eb after releasing the path */
1660 if (!path
->skip_locking
)
1661 btrfs_set_lock_blocking_rw(eb
, BTRFS_READ_LOCK
);
1662 path
->nodes
[0] = NULL
;
1665 btrfs_release_path(path
);
1666 iref
= btrfs_item_ptr(eb
, slot
, struct btrfs_inode_ref
);
1668 name_len
= btrfs_inode_ref_name_len(eb
, iref
);
1669 name_off
= (unsigned long)(iref
+ 1);
1673 if (bytes_left
>= 0)
1674 dest
[bytes_left
] = '/';
1677 btrfs_release_path(path
);
1678 path
->leave_spinning
= leave_spinning
;
1681 return ERR_PTR(ret
);
1683 return dest
+ bytes_left
;
1687 * this makes the path point to (logical EXTENT_ITEM *)
1688 * returns BTRFS_EXTENT_FLAG_DATA for data, BTRFS_EXTENT_FLAG_TREE_BLOCK for
1689 * tree blocks and <0 on error.
1691 int extent_from_logical(struct btrfs_fs_info
*fs_info
, u64 logical
,
1692 struct btrfs_path
*path
, struct btrfs_key
*found_key
,
1699 const struct extent_buffer
*eb
;
1700 struct btrfs_extent_item
*ei
;
1701 struct btrfs_key key
;
1703 if (btrfs_fs_incompat(fs_info
, SKINNY_METADATA
))
1704 key
.type
= BTRFS_METADATA_ITEM_KEY
;
1706 key
.type
= BTRFS_EXTENT_ITEM_KEY
;
1707 key
.objectid
= logical
;
1708 key
.offset
= (u64
)-1;
1710 ret
= btrfs_search_slot(NULL
, fs_info
->extent_root
, &key
, path
, 0, 0);
1714 ret
= btrfs_previous_extent_item(fs_info
->extent_root
, path
, 0);
1720 btrfs_item_key_to_cpu(path
->nodes
[0], found_key
, path
->slots
[0]);
1721 if (found_key
->type
== BTRFS_METADATA_ITEM_KEY
)
1722 size
= fs_info
->nodesize
;
1723 else if (found_key
->type
== BTRFS_EXTENT_ITEM_KEY
)
1724 size
= found_key
->offset
;
1726 if (found_key
->objectid
> logical
||
1727 found_key
->objectid
+ size
<= logical
) {
1728 btrfs_debug(fs_info
,
1729 "logical %llu is not within any extent", logical
);
1733 eb
= path
->nodes
[0];
1734 item_size
= btrfs_item_size_nr(eb
, path
->slots
[0]);
1735 BUG_ON(item_size
< sizeof(*ei
));
1737 ei
= btrfs_item_ptr(eb
, path
->slots
[0], struct btrfs_extent_item
);
1738 flags
= btrfs_extent_flags(eb
, ei
);
1740 btrfs_debug(fs_info
,
1741 "logical %llu is at position %llu within the extent (%llu EXTENT_ITEM %llu) flags %#llx size %u",
1742 logical
, logical
- found_key
->objectid
, found_key
->objectid
,
1743 found_key
->offset
, flags
, item_size
);
1745 WARN_ON(!flags_ret
);
1747 if (flags
& BTRFS_EXTENT_FLAG_TREE_BLOCK
)
1748 *flags_ret
= BTRFS_EXTENT_FLAG_TREE_BLOCK
;
1749 else if (flags
& BTRFS_EXTENT_FLAG_DATA
)
1750 *flags_ret
= BTRFS_EXTENT_FLAG_DATA
;
1760 * helper function to iterate extent inline refs. ptr must point to a 0 value
1761 * for the first call and may be modified. it is used to track state.
1762 * if more refs exist, 0 is returned and the next call to
1763 * get_extent_inline_ref must pass the modified ptr parameter to get the
1764 * next ref. after the last ref was processed, 1 is returned.
1765 * returns <0 on error
1767 static int get_extent_inline_ref(unsigned long *ptr
,
1768 const struct extent_buffer
*eb
,
1769 const struct btrfs_key
*key
,
1770 const struct btrfs_extent_item
*ei
,
1772 struct btrfs_extent_inline_ref
**out_eiref
,
1777 struct btrfs_tree_block_info
*info
;
1781 flags
= btrfs_extent_flags(eb
, ei
);
1782 if (flags
& BTRFS_EXTENT_FLAG_TREE_BLOCK
) {
1783 if (key
->type
== BTRFS_METADATA_ITEM_KEY
) {
1784 /* a skinny metadata extent */
1786 (struct btrfs_extent_inline_ref
*)(ei
+ 1);
1788 WARN_ON(key
->type
!= BTRFS_EXTENT_ITEM_KEY
);
1789 info
= (struct btrfs_tree_block_info
*)(ei
+ 1);
1791 (struct btrfs_extent_inline_ref
*)(info
+ 1);
1794 *out_eiref
= (struct btrfs_extent_inline_ref
*)(ei
+ 1);
1796 *ptr
= (unsigned long)*out_eiref
;
1797 if ((unsigned long)(*ptr
) >= (unsigned long)ei
+ item_size
)
1801 end
= (unsigned long)ei
+ item_size
;
1802 *out_eiref
= (struct btrfs_extent_inline_ref
*)(*ptr
);
1803 *out_type
= btrfs_get_extent_inline_ref_type(eb
, *out_eiref
,
1804 BTRFS_REF_TYPE_ANY
);
1805 if (*out_type
== BTRFS_REF_TYPE_INVALID
)
1808 *ptr
+= btrfs_extent_inline_ref_size(*out_type
);
1809 WARN_ON(*ptr
> end
);
1811 return 1; /* last */
1817 * reads the tree block backref for an extent. tree level and root are returned
1818 * through out_level and out_root. ptr must point to a 0 value for the first
1819 * call and may be modified (see get_extent_inline_ref comment).
1820 * returns 0 if data was provided, 1 if there was no more data to provide or
1823 int tree_backref_for_extent(unsigned long *ptr
, struct extent_buffer
*eb
,
1824 struct btrfs_key
*key
, struct btrfs_extent_item
*ei
,
1825 u32 item_size
, u64
*out_root
, u8
*out_level
)
1829 struct btrfs_extent_inline_ref
*eiref
;
1831 if (*ptr
== (unsigned long)-1)
1835 ret
= get_extent_inline_ref(ptr
, eb
, key
, ei
, item_size
,
1840 if (type
== BTRFS_TREE_BLOCK_REF_KEY
||
1841 type
== BTRFS_SHARED_BLOCK_REF_KEY
)
1848 /* we can treat both ref types equally here */
1849 *out_root
= btrfs_extent_inline_ref_offset(eb
, eiref
);
1851 if (key
->type
== BTRFS_EXTENT_ITEM_KEY
) {
1852 struct btrfs_tree_block_info
*info
;
1854 info
= (struct btrfs_tree_block_info
*)(ei
+ 1);
1855 *out_level
= btrfs_tree_block_level(eb
, info
);
1857 ASSERT(key
->type
== BTRFS_METADATA_ITEM_KEY
);
1858 *out_level
= (u8
)key
->offset
;
1862 *ptr
= (unsigned long)-1;
1867 static int iterate_leaf_refs(struct btrfs_fs_info
*fs_info
,
1868 struct extent_inode_elem
*inode_list
,
1869 u64 root
, u64 extent_item_objectid
,
1870 iterate_extent_inodes_t
*iterate
, void *ctx
)
1872 struct extent_inode_elem
*eie
;
1875 for (eie
= inode_list
; eie
; eie
= eie
->next
) {
1876 btrfs_debug(fs_info
,
1877 "ref for %llu resolved, key (%llu EXTEND_DATA %llu), root %llu",
1878 extent_item_objectid
, eie
->inum
,
1880 ret
= iterate(eie
->inum
, eie
->offset
, root
, ctx
);
1882 btrfs_debug(fs_info
,
1883 "stopping iteration for %llu due to ret=%d",
1884 extent_item_objectid
, ret
);
1893 * calls iterate() for every inode that references the extent identified by
1894 * the given parameters.
1895 * when the iterator function returns a non-zero value, iteration stops.
1897 int iterate_extent_inodes(struct btrfs_fs_info
*fs_info
,
1898 u64 extent_item_objectid
, u64 extent_item_pos
,
1899 int search_commit_root
,
1900 iterate_extent_inodes_t
*iterate
, void *ctx
)
1903 struct btrfs_trans_handle
*trans
= NULL
;
1904 struct ulist
*refs
= NULL
;
1905 struct ulist
*roots
= NULL
;
1906 struct ulist_node
*ref_node
= NULL
;
1907 struct ulist_node
*root_node
= NULL
;
1908 struct seq_list tree_mod_seq_elem
= SEQ_LIST_INIT(tree_mod_seq_elem
);
1909 struct ulist_iterator ref_uiter
;
1910 struct ulist_iterator root_uiter
;
1912 btrfs_debug(fs_info
, "resolving all inodes for extent %llu",
1913 extent_item_objectid
);
1915 if (!search_commit_root
) {
1916 trans
= btrfs_attach_transaction(fs_info
->extent_root
);
1917 if (IS_ERR(trans
)) {
1918 if (PTR_ERR(trans
) != -ENOENT
&&
1919 PTR_ERR(trans
) != -EROFS
)
1920 return PTR_ERR(trans
);
1926 btrfs_get_tree_mod_seq(fs_info
, &tree_mod_seq_elem
);
1928 down_read(&fs_info
->commit_root_sem
);
1930 ret
= btrfs_find_all_leafs(trans
, fs_info
, extent_item_objectid
,
1931 tree_mod_seq_elem
.seq
, &refs
,
1936 ULIST_ITER_INIT(&ref_uiter
);
1937 while (!ret
&& (ref_node
= ulist_next(refs
, &ref_uiter
))) {
1938 ret
= btrfs_find_all_roots_safe(trans
, fs_info
, ref_node
->val
,
1939 tree_mod_seq_elem
.seq
, &roots
);
1942 ULIST_ITER_INIT(&root_uiter
);
1943 while (!ret
&& (root_node
= ulist_next(roots
, &root_uiter
))) {
1944 btrfs_debug(fs_info
,
1945 "root %llu references leaf %llu, data list %#llx",
1946 root_node
->val
, ref_node
->val
,
1948 ret
= iterate_leaf_refs(fs_info
,
1949 (struct extent_inode_elem
*)
1950 (uintptr_t)ref_node
->aux
,
1952 extent_item_objectid
,
1958 free_leaf_list(refs
);
1961 btrfs_put_tree_mod_seq(fs_info
, &tree_mod_seq_elem
);
1962 btrfs_end_transaction(trans
);
1964 up_read(&fs_info
->commit_root_sem
);
1970 int iterate_inodes_from_logical(u64 logical
, struct btrfs_fs_info
*fs_info
,
1971 struct btrfs_path
*path
,
1972 iterate_extent_inodes_t
*iterate
, void *ctx
)
1975 u64 extent_item_pos
;
1977 struct btrfs_key found_key
;
1978 int search_commit_root
= path
->search_commit_root
;
1980 ret
= extent_from_logical(fs_info
, logical
, path
, &found_key
, &flags
);
1981 btrfs_release_path(path
);
1984 if (flags
& BTRFS_EXTENT_FLAG_TREE_BLOCK
)
1987 extent_item_pos
= logical
- found_key
.objectid
;
1988 ret
= iterate_extent_inodes(fs_info
, found_key
.objectid
,
1989 extent_item_pos
, search_commit_root
,
1995 typedef int (iterate_irefs_t
)(u64 parent
, u32 name_len
, unsigned long name_off
,
1996 struct extent_buffer
*eb
, void *ctx
);
1998 static int iterate_inode_refs(u64 inum
, struct btrfs_root
*fs_root
,
1999 struct btrfs_path
*path
,
2000 iterate_irefs_t
*iterate
, void *ctx
)
2009 struct extent_buffer
*eb
;
2010 struct btrfs_item
*item
;
2011 struct btrfs_inode_ref
*iref
;
2012 struct btrfs_key found_key
;
2015 ret
= btrfs_find_item(fs_root
, path
, inum
,
2016 parent
? parent
+ 1 : 0, BTRFS_INODE_REF_KEY
,
2022 ret
= found
? 0 : -ENOENT
;
2027 parent
= found_key
.offset
;
2028 slot
= path
->slots
[0];
2029 eb
= btrfs_clone_extent_buffer(path
->nodes
[0]);
2034 extent_buffer_get(eb
);
2035 btrfs_tree_read_lock(eb
);
2036 btrfs_set_lock_blocking_rw(eb
, BTRFS_READ_LOCK
);
2037 btrfs_release_path(path
);
2039 item
= btrfs_item_nr(slot
);
2040 iref
= btrfs_item_ptr(eb
, slot
, struct btrfs_inode_ref
);
2042 for (cur
= 0; cur
< btrfs_item_size(eb
, item
); cur
+= len
) {
2043 name_len
= btrfs_inode_ref_name_len(eb
, iref
);
2044 /* path must be released before calling iterate()! */
2045 btrfs_debug(fs_root
->fs_info
,
2046 "following ref at offset %u for inode %llu in tree %llu",
2047 cur
, found_key
.objectid
, fs_root
->objectid
);
2048 ret
= iterate(parent
, name_len
,
2049 (unsigned long)(iref
+ 1), eb
, ctx
);
2052 len
= sizeof(*iref
) + name_len
;
2053 iref
= (struct btrfs_inode_ref
*)((char *)iref
+ len
);
2055 btrfs_tree_read_unlock_blocking(eb
);
2056 free_extent_buffer(eb
);
2059 btrfs_release_path(path
);
2064 static int iterate_inode_extrefs(u64 inum
, struct btrfs_root
*fs_root
,
2065 struct btrfs_path
*path
,
2066 iterate_irefs_t
*iterate
, void *ctx
)
2073 struct extent_buffer
*eb
;
2074 struct btrfs_inode_extref
*extref
;
2080 ret
= btrfs_find_one_extref(fs_root
, inum
, offset
, path
, &extref
,
2085 ret
= found
? 0 : -ENOENT
;
2090 slot
= path
->slots
[0];
2091 eb
= btrfs_clone_extent_buffer(path
->nodes
[0]);
2096 extent_buffer_get(eb
);
2098 btrfs_tree_read_lock(eb
);
2099 btrfs_set_lock_blocking_rw(eb
, BTRFS_READ_LOCK
);
2100 btrfs_release_path(path
);
2102 item_size
= btrfs_item_size_nr(eb
, slot
);
2103 ptr
= btrfs_item_ptr_offset(eb
, slot
);
2106 while (cur_offset
< item_size
) {
2109 extref
= (struct btrfs_inode_extref
*)(ptr
+ cur_offset
);
2110 parent
= btrfs_inode_extref_parent(eb
, extref
);
2111 name_len
= btrfs_inode_extref_name_len(eb
, extref
);
2112 ret
= iterate(parent
, name_len
,
2113 (unsigned long)&extref
->name
, eb
, ctx
);
2117 cur_offset
+= btrfs_inode_extref_name_len(eb
, extref
);
2118 cur_offset
+= sizeof(*extref
);
2120 btrfs_tree_read_unlock_blocking(eb
);
2121 free_extent_buffer(eb
);
2126 btrfs_release_path(path
);
2131 static int iterate_irefs(u64 inum
, struct btrfs_root
*fs_root
,
2132 struct btrfs_path
*path
, iterate_irefs_t
*iterate
,
2138 ret
= iterate_inode_refs(inum
, fs_root
, path
, iterate
, ctx
);
2141 else if (ret
!= -ENOENT
)
2144 ret
= iterate_inode_extrefs(inum
, fs_root
, path
, iterate
, ctx
);
2145 if (ret
== -ENOENT
&& found_refs
)
2152 * returns 0 if the path could be dumped (probably truncated)
2153 * returns <0 in case of an error
2155 static int inode_to_path(u64 inum
, u32 name_len
, unsigned long name_off
,
2156 struct extent_buffer
*eb
, void *ctx
)
2158 struct inode_fs_paths
*ipath
= ctx
;
2161 int i
= ipath
->fspath
->elem_cnt
;
2162 const int s_ptr
= sizeof(char *);
2165 bytes_left
= ipath
->fspath
->bytes_left
> s_ptr
?
2166 ipath
->fspath
->bytes_left
- s_ptr
: 0;
2168 fspath_min
= (char *)ipath
->fspath
->val
+ (i
+ 1) * s_ptr
;
2169 fspath
= btrfs_ref_to_path(ipath
->fs_root
, ipath
->btrfs_path
, name_len
,
2170 name_off
, eb
, inum
, fspath_min
, bytes_left
);
2172 return PTR_ERR(fspath
);
2174 if (fspath
> fspath_min
) {
2175 ipath
->fspath
->val
[i
] = (u64
)(unsigned long)fspath
;
2176 ++ipath
->fspath
->elem_cnt
;
2177 ipath
->fspath
->bytes_left
= fspath
- fspath_min
;
2179 ++ipath
->fspath
->elem_missed
;
2180 ipath
->fspath
->bytes_missing
+= fspath_min
- fspath
;
2181 ipath
->fspath
->bytes_left
= 0;
2188 * this dumps all file system paths to the inode into the ipath struct, provided
2189 * is has been created large enough. each path is zero-terminated and accessed
2190 * from ipath->fspath->val[i].
2191 * when it returns, there are ipath->fspath->elem_cnt number of paths available
2192 * in ipath->fspath->val[]. when the allocated space wasn't sufficient, the
2193 * number of missed paths is recorded in ipath->fspath->elem_missed, otherwise,
2194 * it's zero. ipath->fspath->bytes_missing holds the number of bytes that would
2195 * have been needed to return all paths.
2197 int paths_from_inode(u64 inum
, struct inode_fs_paths
*ipath
)
2199 return iterate_irefs(inum
, ipath
->fs_root
, ipath
->btrfs_path
,
2200 inode_to_path
, ipath
);
2203 struct btrfs_data_container
*init_data_container(u32 total_bytes
)
2205 struct btrfs_data_container
*data
;
2208 alloc_bytes
= max_t(size_t, total_bytes
, sizeof(*data
));
2209 data
= kvmalloc(alloc_bytes
, GFP_KERNEL
);
2211 return ERR_PTR(-ENOMEM
);
2213 if (total_bytes
>= sizeof(*data
)) {
2214 data
->bytes_left
= total_bytes
- sizeof(*data
);
2215 data
->bytes_missing
= 0;
2217 data
->bytes_missing
= sizeof(*data
) - total_bytes
;
2218 data
->bytes_left
= 0;
2222 data
->elem_missed
= 0;
2228 * allocates space to return multiple file system paths for an inode.
2229 * total_bytes to allocate are passed, note that space usable for actual path
2230 * information will be total_bytes - sizeof(struct inode_fs_paths).
2231 * the returned pointer must be freed with free_ipath() in the end.
2233 struct inode_fs_paths
*init_ipath(s32 total_bytes
, struct btrfs_root
*fs_root
,
2234 struct btrfs_path
*path
)
2236 struct inode_fs_paths
*ifp
;
2237 struct btrfs_data_container
*fspath
;
2239 fspath
= init_data_container(total_bytes
);
2241 return (void *)fspath
;
2243 ifp
= kmalloc(sizeof(*ifp
), GFP_KERNEL
);
2246 return ERR_PTR(-ENOMEM
);
2249 ifp
->btrfs_path
= path
;
2250 ifp
->fspath
= fspath
;
2251 ifp
->fs_root
= fs_root
;
2256 void free_ipath(struct inode_fs_paths
*ipath
)
2260 kvfree(ipath
->fspath
);