2 * Copyright (C) 2009 Oracle. All rights reserved.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public
6 * License v2 as published by the Free Software Foundation.
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
13 * You should have received a copy of the GNU General Public
14 * License along with this program; if not, write to the
15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16 * Boston, MA 021110-1307, USA.
19 #include <linux/sched.h>
20 #include <linux/pagemap.h>
21 #include <linux/writeback.h>
22 #include <linux/blkdev.h>
23 #include <linux/rbtree.h>
24 #include <linux/slab.h>
27 #include "transaction.h"
30 #include "btrfs_inode.h"
31 #include "async-thread.h"
32 #include "free-space-cache.h"
33 #include "inode-map.h"
36 * backref_node, mapping_node and tree_block start with this
39 struct rb_node rb_node
;
44 * present a tree block in the backref cache
47 struct rb_node rb_node
;
51 /* objectid of tree block owner, can be not uptodate */
53 /* link to pending, changed or detached list */
54 struct list_head list
;
55 /* list of upper level blocks reference this block */
56 struct list_head upper
;
57 /* list of child blocks in the cache */
58 struct list_head lower
;
59 /* NULL if this node is not tree root */
60 struct btrfs_root
*root
;
61 /* extent buffer got by COW the block */
62 struct extent_buffer
*eb
;
63 /* level of tree block */
65 /* is the block in non-reference counted tree */
66 unsigned int cowonly
:1;
67 /* 1 if no child node in the cache */
68 unsigned int lowest
:1;
69 /* is the extent buffer locked */
70 unsigned int locked
:1;
71 /* has the block been processed */
72 unsigned int processed
:1;
73 /* have backrefs of this block been checked */
74 unsigned int checked
:1;
76 * 1 if corresponding block has been cowed but some upper
77 * level block pointers may not point to the new location
79 unsigned int pending
:1;
81 * 1 if the backref node isn't connected to any other
84 unsigned int detached
:1;
88 * present a block pointer in the backref cache
91 struct list_head list
[2];
92 struct backref_node
*node
[2];
97 #define RELOCATION_RESERVED_NODES 256
99 struct backref_cache
{
100 /* red black tree of all backref nodes in the cache */
101 struct rb_root rb_root
;
102 /* for passing backref nodes to btrfs_reloc_cow_block */
103 struct backref_node
*path
[BTRFS_MAX_LEVEL
];
105 * list of blocks that have been cowed but some block
106 * pointers in upper level blocks may not reflect the
109 struct list_head pending
[BTRFS_MAX_LEVEL
];
110 /* list of backref nodes with no child node */
111 struct list_head leaves
;
112 /* list of blocks that have been cowed in current transaction */
113 struct list_head changed
;
114 /* list of detached backref node. */
115 struct list_head detached
;
124 * map address of tree root to tree
126 struct mapping_node
{
127 struct rb_node rb_node
;
132 struct mapping_tree
{
133 struct rb_root rb_root
;
138 * present a tree block to process
141 struct rb_node rb_node
;
143 struct btrfs_key key
;
144 unsigned int level
:8;
145 unsigned int key_ready
:1;
148 #define MAX_EXTENTS 128
150 struct file_extent_cluster
{
153 u64 boundary
[MAX_EXTENTS
];
157 struct reloc_control
{
158 /* block group to relocate */
159 struct btrfs_block_group_cache
*block_group
;
161 struct btrfs_root
*extent_root
;
162 /* inode for moving data */
163 struct inode
*data_inode
;
165 struct btrfs_block_rsv
*block_rsv
;
167 struct backref_cache backref_cache
;
169 struct file_extent_cluster cluster
;
170 /* tree blocks have been processed */
171 struct extent_io_tree processed_blocks
;
172 /* map start of tree root to corresponding reloc tree */
173 struct mapping_tree reloc_root_tree
;
174 /* list of reloc trees */
175 struct list_head reloc_roots
;
176 /* size of metadata reservation for merging reloc trees */
177 u64 merging_rsv_size
;
178 /* size of relocated tree nodes */
180 /* reserved size for block group relocation*/
186 unsigned int stage
:8;
187 unsigned int create_reloc_tree
:1;
188 unsigned int merge_reloc_tree
:1;
189 unsigned int found_file_extent
:1;
192 /* stages of data relocation */
193 #define MOVE_DATA_EXTENTS 0
194 #define UPDATE_DATA_PTRS 1
196 static void remove_backref_node(struct backref_cache
*cache
,
197 struct backref_node
*node
);
198 static void __mark_block_processed(struct reloc_control
*rc
,
199 struct backref_node
*node
);
201 static void mapping_tree_init(struct mapping_tree
*tree
)
203 tree
->rb_root
= RB_ROOT
;
204 spin_lock_init(&tree
->lock
);
207 static void backref_cache_init(struct backref_cache
*cache
)
210 cache
->rb_root
= RB_ROOT
;
211 for (i
= 0; i
< BTRFS_MAX_LEVEL
; i
++)
212 INIT_LIST_HEAD(&cache
->pending
[i
]);
213 INIT_LIST_HEAD(&cache
->changed
);
214 INIT_LIST_HEAD(&cache
->detached
);
215 INIT_LIST_HEAD(&cache
->leaves
);
218 static void backref_cache_cleanup(struct backref_cache
*cache
)
220 struct backref_node
*node
;
223 while (!list_empty(&cache
->detached
)) {
224 node
= list_entry(cache
->detached
.next
,
225 struct backref_node
, list
);
226 remove_backref_node(cache
, node
);
229 while (!list_empty(&cache
->leaves
)) {
230 node
= list_entry(cache
->leaves
.next
,
231 struct backref_node
, lower
);
232 remove_backref_node(cache
, node
);
235 cache
->last_trans
= 0;
237 for (i
= 0; i
< BTRFS_MAX_LEVEL
; i
++)
238 BUG_ON(!list_empty(&cache
->pending
[i
]));
239 BUG_ON(!list_empty(&cache
->changed
));
240 BUG_ON(!list_empty(&cache
->detached
));
241 BUG_ON(!RB_EMPTY_ROOT(&cache
->rb_root
));
242 BUG_ON(cache
->nr_nodes
);
243 BUG_ON(cache
->nr_edges
);
246 static struct backref_node
*alloc_backref_node(struct backref_cache
*cache
)
248 struct backref_node
*node
;
250 node
= kzalloc(sizeof(*node
), GFP_NOFS
);
252 INIT_LIST_HEAD(&node
->list
);
253 INIT_LIST_HEAD(&node
->upper
);
254 INIT_LIST_HEAD(&node
->lower
);
255 RB_CLEAR_NODE(&node
->rb_node
);
261 static void free_backref_node(struct backref_cache
*cache
,
262 struct backref_node
*node
)
270 static struct backref_edge
*alloc_backref_edge(struct backref_cache
*cache
)
272 struct backref_edge
*edge
;
274 edge
= kzalloc(sizeof(*edge
), GFP_NOFS
);
280 static void free_backref_edge(struct backref_cache
*cache
,
281 struct backref_edge
*edge
)
289 static struct rb_node
*tree_insert(struct rb_root
*root
, u64 bytenr
,
290 struct rb_node
*node
)
292 struct rb_node
**p
= &root
->rb_node
;
293 struct rb_node
*parent
= NULL
;
294 struct tree_entry
*entry
;
298 entry
= rb_entry(parent
, struct tree_entry
, rb_node
);
300 if (bytenr
< entry
->bytenr
)
302 else if (bytenr
> entry
->bytenr
)
308 rb_link_node(node
, parent
, p
);
309 rb_insert_color(node
, root
);
313 static struct rb_node
*tree_search(struct rb_root
*root
, u64 bytenr
)
315 struct rb_node
*n
= root
->rb_node
;
316 struct tree_entry
*entry
;
319 entry
= rb_entry(n
, struct tree_entry
, rb_node
);
321 if (bytenr
< entry
->bytenr
)
323 else if (bytenr
> entry
->bytenr
)
331 static void backref_tree_panic(struct rb_node
*rb_node
, int errno
, u64 bytenr
)
334 struct btrfs_fs_info
*fs_info
= NULL
;
335 struct backref_node
*bnode
= rb_entry(rb_node
, struct backref_node
,
338 fs_info
= bnode
->root
->fs_info
;
339 btrfs_panic(fs_info
, errno
, "Inconsistency in backref cache "
340 "found at offset %llu\n", bytenr
);
344 * walk up backref nodes until reach node presents tree root
346 static struct backref_node
*walk_up_backref(struct backref_node
*node
,
347 struct backref_edge
*edges
[],
350 struct backref_edge
*edge
;
353 while (!list_empty(&node
->upper
)) {
354 edge
= list_entry(node
->upper
.next
,
355 struct backref_edge
, list
[LOWER
]);
357 node
= edge
->node
[UPPER
];
359 BUG_ON(node
->detached
);
365 * walk down backref nodes to find start of next reference path
367 static struct backref_node
*walk_down_backref(struct backref_edge
*edges
[],
370 struct backref_edge
*edge
;
371 struct backref_node
*lower
;
375 edge
= edges
[idx
- 1];
376 lower
= edge
->node
[LOWER
];
377 if (list_is_last(&edge
->list
[LOWER
], &lower
->upper
)) {
381 edge
= list_entry(edge
->list
[LOWER
].next
,
382 struct backref_edge
, list
[LOWER
]);
383 edges
[idx
- 1] = edge
;
385 return edge
->node
[UPPER
];
391 static void unlock_node_buffer(struct backref_node
*node
)
394 btrfs_tree_unlock(node
->eb
);
399 static void drop_node_buffer(struct backref_node
*node
)
402 unlock_node_buffer(node
);
403 free_extent_buffer(node
->eb
);
408 static void drop_backref_node(struct backref_cache
*tree
,
409 struct backref_node
*node
)
411 BUG_ON(!list_empty(&node
->upper
));
413 drop_node_buffer(node
);
414 list_del(&node
->list
);
415 list_del(&node
->lower
);
416 if (!RB_EMPTY_NODE(&node
->rb_node
))
417 rb_erase(&node
->rb_node
, &tree
->rb_root
);
418 free_backref_node(tree
, node
);
422 * remove a backref node from the backref cache
424 static void remove_backref_node(struct backref_cache
*cache
,
425 struct backref_node
*node
)
427 struct backref_node
*upper
;
428 struct backref_edge
*edge
;
433 BUG_ON(!node
->lowest
&& !node
->detached
);
434 while (!list_empty(&node
->upper
)) {
435 edge
= list_entry(node
->upper
.next
, struct backref_edge
,
437 upper
= edge
->node
[UPPER
];
438 list_del(&edge
->list
[LOWER
]);
439 list_del(&edge
->list
[UPPER
]);
440 free_backref_edge(cache
, edge
);
442 if (RB_EMPTY_NODE(&upper
->rb_node
)) {
443 BUG_ON(!list_empty(&node
->upper
));
444 drop_backref_node(cache
, node
);
450 * add the node to leaf node list if no other
451 * child block cached.
453 if (list_empty(&upper
->lower
)) {
454 list_add_tail(&upper
->lower
, &cache
->leaves
);
459 drop_backref_node(cache
, node
);
462 static void update_backref_node(struct backref_cache
*cache
,
463 struct backref_node
*node
, u64 bytenr
)
465 struct rb_node
*rb_node
;
466 rb_erase(&node
->rb_node
, &cache
->rb_root
);
467 node
->bytenr
= bytenr
;
468 rb_node
= tree_insert(&cache
->rb_root
, node
->bytenr
, &node
->rb_node
);
470 backref_tree_panic(rb_node
, -EEXIST
, bytenr
);
474 * update backref cache after a transaction commit
476 static int update_backref_cache(struct btrfs_trans_handle
*trans
,
477 struct backref_cache
*cache
)
479 struct backref_node
*node
;
482 if (cache
->last_trans
== 0) {
483 cache
->last_trans
= trans
->transid
;
487 if (cache
->last_trans
== trans
->transid
)
491 * detached nodes are used to avoid unnecessary backref
492 * lookup. transaction commit changes the extent tree.
493 * so the detached nodes are no longer useful.
495 while (!list_empty(&cache
->detached
)) {
496 node
= list_entry(cache
->detached
.next
,
497 struct backref_node
, list
);
498 remove_backref_node(cache
, node
);
501 while (!list_empty(&cache
->changed
)) {
502 node
= list_entry(cache
->changed
.next
,
503 struct backref_node
, list
);
504 list_del_init(&node
->list
);
505 BUG_ON(node
->pending
);
506 update_backref_node(cache
, node
, node
->new_bytenr
);
510 * some nodes can be left in the pending list if there were
511 * errors during processing the pending nodes.
513 for (level
= 0; level
< BTRFS_MAX_LEVEL
; level
++) {
514 list_for_each_entry(node
, &cache
->pending
[level
], list
) {
515 BUG_ON(!node
->pending
);
516 if (node
->bytenr
== node
->new_bytenr
)
518 update_backref_node(cache
, node
, node
->new_bytenr
);
522 cache
->last_trans
= 0;
527 static int should_ignore_root(struct btrfs_root
*root
)
529 struct btrfs_root
*reloc_root
;
534 reloc_root
= root
->reloc_root
;
538 if (btrfs_root_last_snapshot(&reloc_root
->root_item
) ==
539 root
->fs_info
->running_transaction
->transid
- 1)
542 * if there is reloc tree and it was created in previous
543 * transaction backref lookup can find the reloc tree,
544 * so backref node for the fs tree root is useless for
550 * find reloc tree by address of tree root
552 static struct btrfs_root
*find_reloc_root(struct reloc_control
*rc
,
555 struct rb_node
*rb_node
;
556 struct mapping_node
*node
;
557 struct btrfs_root
*root
= NULL
;
559 spin_lock(&rc
->reloc_root_tree
.lock
);
560 rb_node
= tree_search(&rc
->reloc_root_tree
.rb_root
, bytenr
);
562 node
= rb_entry(rb_node
, struct mapping_node
, rb_node
);
563 root
= (struct btrfs_root
*)node
->data
;
565 spin_unlock(&rc
->reloc_root_tree
.lock
);
569 static int is_cowonly_root(u64 root_objectid
)
571 if (root_objectid
== BTRFS_ROOT_TREE_OBJECTID
||
572 root_objectid
== BTRFS_EXTENT_TREE_OBJECTID
||
573 root_objectid
== BTRFS_CHUNK_TREE_OBJECTID
||
574 root_objectid
== BTRFS_DEV_TREE_OBJECTID
||
575 root_objectid
== BTRFS_TREE_LOG_OBJECTID
||
576 root_objectid
== BTRFS_CSUM_TREE_OBJECTID
||
577 root_objectid
== BTRFS_UUID_TREE_OBJECTID
||
578 root_objectid
== BTRFS_QUOTA_TREE_OBJECTID
)
583 static struct btrfs_root
*read_fs_root(struct btrfs_fs_info
*fs_info
,
586 struct btrfs_key key
;
588 key
.objectid
= root_objectid
;
589 key
.type
= BTRFS_ROOT_ITEM_KEY
;
590 if (is_cowonly_root(root_objectid
))
593 key
.offset
= (u64
)-1;
595 return btrfs_get_fs_root(fs_info
, &key
, false);
598 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
599 static noinline_for_stack
600 struct btrfs_root
*find_tree_root(struct reloc_control
*rc
,
601 struct extent_buffer
*leaf
,
602 struct btrfs_extent_ref_v0
*ref0
)
604 struct btrfs_root
*root
;
605 u64 root_objectid
= btrfs_ref_root_v0(leaf
, ref0
);
606 u64 generation
= btrfs_ref_generation_v0(leaf
, ref0
);
608 BUG_ON(root_objectid
== BTRFS_TREE_RELOC_OBJECTID
);
610 root
= read_fs_root(rc
->extent_root
->fs_info
, root_objectid
);
611 BUG_ON(IS_ERR(root
));
613 if (root
->ref_cows
&&
614 generation
!= btrfs_root_generation(&root
->root_item
))
621 static noinline_for_stack
622 int find_inline_backref(struct extent_buffer
*leaf
, int slot
,
623 unsigned long *ptr
, unsigned long *end
)
625 struct btrfs_key key
;
626 struct btrfs_extent_item
*ei
;
627 struct btrfs_tree_block_info
*bi
;
630 btrfs_item_key_to_cpu(leaf
, &key
, slot
);
632 item_size
= btrfs_item_size_nr(leaf
, slot
);
633 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
634 if (item_size
< sizeof(*ei
)) {
635 WARN_ON(item_size
!= sizeof(struct btrfs_extent_item_v0
));
639 ei
= btrfs_item_ptr(leaf
, slot
, struct btrfs_extent_item
);
640 WARN_ON(!(btrfs_extent_flags(leaf
, ei
) &
641 BTRFS_EXTENT_FLAG_TREE_BLOCK
));
643 if (key
.type
== BTRFS_EXTENT_ITEM_KEY
&&
644 item_size
<= sizeof(*ei
) + sizeof(*bi
)) {
645 WARN_ON(item_size
< sizeof(*ei
) + sizeof(*bi
));
648 if (key
.type
== BTRFS_METADATA_ITEM_KEY
&&
649 item_size
<= sizeof(*ei
)) {
650 WARN_ON(item_size
< sizeof(*ei
));
654 if (key
.type
== BTRFS_EXTENT_ITEM_KEY
) {
655 bi
= (struct btrfs_tree_block_info
*)(ei
+ 1);
656 *ptr
= (unsigned long)(bi
+ 1);
658 *ptr
= (unsigned long)(ei
+ 1);
660 *end
= (unsigned long)ei
+ item_size
;
665 * build backref tree for a given tree block. root of the backref tree
666 * corresponds the tree block, leaves of the backref tree correspond
667 * roots of b-trees that reference the tree block.
669 * the basic idea of this function is check backrefs of a given block
670 * to find upper level blocks that refernece the block, and then check
671 * bakcrefs of these upper level blocks recursively. the recursion stop
672 * when tree root is reached or backrefs for the block is cached.
674 * NOTE: if we find backrefs for a block are cached, we know backrefs
675 * for all upper level blocks that directly/indirectly reference the
676 * block are also cached.
678 static noinline_for_stack
679 struct backref_node
*build_backref_tree(struct reloc_control
*rc
,
680 struct btrfs_key
*node_key
,
681 int level
, u64 bytenr
)
683 struct backref_cache
*cache
= &rc
->backref_cache
;
684 struct btrfs_path
*path1
;
685 struct btrfs_path
*path2
;
686 struct extent_buffer
*eb
;
687 struct btrfs_root
*root
;
688 struct backref_node
*cur
;
689 struct backref_node
*upper
;
690 struct backref_node
*lower
;
691 struct backref_node
*node
= NULL
;
692 struct backref_node
*exist
= NULL
;
693 struct backref_edge
*edge
;
694 struct rb_node
*rb_node
;
695 struct btrfs_key key
;
703 bool need_check
= true;
705 path1
= btrfs_alloc_path();
706 path2
= btrfs_alloc_path();
707 if (!path1
|| !path2
) {
714 node
= alloc_backref_node(cache
);
720 node
->bytenr
= bytenr
;
727 key
.objectid
= cur
->bytenr
;
728 key
.type
= BTRFS_METADATA_ITEM_KEY
;
729 key
.offset
= (u64
)-1;
731 path1
->search_commit_root
= 1;
732 path1
->skip_locking
= 1;
733 ret
= btrfs_search_slot(NULL
, rc
->extent_root
, &key
, path1
,
739 BUG_ON(!ret
|| !path1
->slots
[0]);
743 WARN_ON(cur
->checked
);
744 if (!list_empty(&cur
->upper
)) {
746 * the backref was added previously when processing
747 * backref of type BTRFS_TREE_BLOCK_REF_KEY
749 BUG_ON(!list_is_singular(&cur
->upper
));
750 edge
= list_entry(cur
->upper
.next
, struct backref_edge
,
752 BUG_ON(!list_empty(&edge
->list
[UPPER
]));
753 exist
= edge
->node
[UPPER
];
755 * add the upper level block to pending list if we need
759 list_add_tail(&edge
->list
[UPPER
], &list
);
766 eb
= path1
->nodes
[0];
769 if (path1
->slots
[0] >= btrfs_header_nritems(eb
)) {
770 ret
= btrfs_next_leaf(rc
->extent_root
, path1
);
777 eb
= path1
->nodes
[0];
780 btrfs_item_key_to_cpu(eb
, &key
, path1
->slots
[0]);
781 if (key
.objectid
!= cur
->bytenr
) {
786 if (key
.type
== BTRFS_EXTENT_ITEM_KEY
||
787 key
.type
== BTRFS_METADATA_ITEM_KEY
) {
788 ret
= find_inline_backref(eb
, path1
->slots
[0],
796 /* update key for inline back ref */
797 struct btrfs_extent_inline_ref
*iref
;
798 iref
= (struct btrfs_extent_inline_ref
*)ptr
;
799 key
.type
= btrfs_extent_inline_ref_type(eb
, iref
);
800 key
.offset
= btrfs_extent_inline_ref_offset(eb
, iref
);
801 WARN_ON(key
.type
!= BTRFS_TREE_BLOCK_REF_KEY
&&
802 key
.type
!= BTRFS_SHARED_BLOCK_REF_KEY
);
806 ((key
.type
== BTRFS_TREE_BLOCK_REF_KEY
&&
807 exist
->owner
== key
.offset
) ||
808 (key
.type
== BTRFS_SHARED_BLOCK_REF_KEY
&&
809 exist
->bytenr
== key
.offset
))) {
814 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
815 if (key
.type
== BTRFS_SHARED_BLOCK_REF_KEY
||
816 key
.type
== BTRFS_EXTENT_REF_V0_KEY
) {
817 if (key
.type
== BTRFS_EXTENT_REF_V0_KEY
) {
818 struct btrfs_extent_ref_v0
*ref0
;
819 ref0
= btrfs_item_ptr(eb
, path1
->slots
[0],
820 struct btrfs_extent_ref_v0
);
821 if (key
.objectid
== key
.offset
) {
822 root
= find_tree_root(rc
, eb
, ref0
);
823 if (root
&& !should_ignore_root(root
))
826 list_add(&cur
->list
, &useless
);
829 if (is_cowonly_root(btrfs_ref_root_v0(eb
,
834 BUG_ON(key
.type
== BTRFS_EXTENT_REF_V0_KEY
);
835 if (key
.type
== BTRFS_SHARED_BLOCK_REF_KEY
) {
837 if (key
.objectid
== key
.offset
) {
839 * only root blocks of reloc trees use
840 * backref of this type.
842 root
= find_reloc_root(rc
, cur
->bytenr
);
848 edge
= alloc_backref_edge(cache
);
853 rb_node
= tree_search(&cache
->rb_root
, key
.offset
);
855 upper
= alloc_backref_node(cache
);
857 free_backref_edge(cache
, edge
);
861 upper
->bytenr
= key
.offset
;
862 upper
->level
= cur
->level
+ 1;
864 * backrefs for the upper level block isn't
865 * cached, add the block to pending list
867 list_add_tail(&edge
->list
[UPPER
], &list
);
869 upper
= rb_entry(rb_node
, struct backref_node
,
871 BUG_ON(!upper
->checked
);
872 INIT_LIST_HEAD(&edge
->list
[UPPER
]);
874 list_add_tail(&edge
->list
[LOWER
], &cur
->upper
);
875 edge
->node
[LOWER
] = cur
;
876 edge
->node
[UPPER
] = upper
;
879 } else if (key
.type
!= BTRFS_TREE_BLOCK_REF_KEY
) {
883 /* key.type == BTRFS_TREE_BLOCK_REF_KEY */
884 root
= read_fs_root(rc
->extent_root
->fs_info
, key
.offset
);
893 if (btrfs_root_level(&root
->root_item
) == cur
->level
) {
895 BUG_ON(btrfs_root_bytenr(&root
->root_item
) !=
897 if (should_ignore_root(root
))
898 list_add(&cur
->list
, &useless
);
904 level
= cur
->level
+ 1;
907 * searching the tree to find upper level blocks
908 * reference the block.
910 path2
->search_commit_root
= 1;
911 path2
->skip_locking
= 1;
912 path2
->lowest_level
= level
;
913 ret
= btrfs_search_slot(NULL
, root
, node_key
, path2
, 0, 0);
914 path2
->lowest_level
= 0;
919 if (ret
> 0 && path2
->slots
[level
] > 0)
920 path2
->slots
[level
]--;
922 eb
= path2
->nodes
[level
];
923 WARN_ON(btrfs_node_blockptr(eb
, path2
->slots
[level
]) !=
928 for (; level
< BTRFS_MAX_LEVEL
; level
++) {
929 if (!path2
->nodes
[level
]) {
930 BUG_ON(btrfs_root_bytenr(&root
->root_item
) !=
932 if (should_ignore_root(root
))
933 list_add(&lower
->list
, &useless
);
939 edge
= alloc_backref_edge(cache
);
945 eb
= path2
->nodes
[level
];
946 rb_node
= tree_search(&cache
->rb_root
, eb
->start
);
948 upper
= alloc_backref_node(cache
);
950 free_backref_edge(cache
, edge
);
954 upper
->bytenr
= eb
->start
;
955 upper
->owner
= btrfs_header_owner(eb
);
956 upper
->level
= lower
->level
+ 1;
961 * if we know the block isn't shared
962 * we can void checking its backrefs.
964 if (btrfs_block_can_be_shared(root
, eb
))
970 * add the block to pending list if we
971 * need check its backrefs, we only do this once
972 * while walking up a tree as we will catch
973 * anything else later on.
975 if (!upper
->checked
&& need_check
) {
977 list_add_tail(&edge
->list
[UPPER
],
980 INIT_LIST_HEAD(&edge
->list
[UPPER
]);
982 upper
= rb_entry(rb_node
, struct backref_node
,
984 BUG_ON(!upper
->checked
);
985 INIT_LIST_HEAD(&edge
->list
[UPPER
]);
987 upper
->owner
= btrfs_header_owner(eb
);
989 list_add_tail(&edge
->list
[LOWER
], &lower
->upper
);
990 edge
->node
[LOWER
] = lower
;
991 edge
->node
[UPPER
] = upper
;
998 btrfs_release_path(path2
);
1001 ptr
+= btrfs_extent_inline_ref_size(key
.type
);
1011 btrfs_release_path(path1
);
1016 /* the pending list isn't empty, take the first block to process */
1017 if (!list_empty(&list
)) {
1018 edge
= list_entry(list
.next
, struct backref_edge
, list
[UPPER
]);
1019 list_del_init(&edge
->list
[UPPER
]);
1020 cur
= edge
->node
[UPPER
];
1025 * everything goes well, connect backref nodes and insert backref nodes
1028 BUG_ON(!node
->checked
);
1029 cowonly
= node
->cowonly
;
1031 rb_node
= tree_insert(&cache
->rb_root
, node
->bytenr
,
1034 backref_tree_panic(rb_node
, -EEXIST
, node
->bytenr
);
1035 list_add_tail(&node
->lower
, &cache
->leaves
);
1038 list_for_each_entry(edge
, &node
->upper
, list
[LOWER
])
1039 list_add_tail(&edge
->list
[UPPER
], &list
);
1041 while (!list_empty(&list
)) {
1042 edge
= list_entry(list
.next
, struct backref_edge
, list
[UPPER
]);
1043 list_del_init(&edge
->list
[UPPER
]);
1044 upper
= edge
->node
[UPPER
];
1045 if (upper
->detached
) {
1046 list_del(&edge
->list
[LOWER
]);
1047 lower
= edge
->node
[LOWER
];
1048 free_backref_edge(cache
, edge
);
1049 if (list_empty(&lower
->upper
))
1050 list_add(&lower
->list
, &useless
);
1054 if (!RB_EMPTY_NODE(&upper
->rb_node
)) {
1055 if (upper
->lowest
) {
1056 list_del_init(&upper
->lower
);
1060 list_add_tail(&edge
->list
[UPPER
], &upper
->lower
);
1064 BUG_ON(!upper
->checked
);
1065 BUG_ON(cowonly
!= upper
->cowonly
);
1067 rb_node
= tree_insert(&cache
->rb_root
, upper
->bytenr
,
1070 backref_tree_panic(rb_node
, -EEXIST
,
1074 list_add_tail(&edge
->list
[UPPER
], &upper
->lower
);
1076 list_for_each_entry(edge
, &upper
->upper
, list
[LOWER
])
1077 list_add_tail(&edge
->list
[UPPER
], &list
);
1080 * process useless backref nodes. backref nodes for tree leaves
1081 * are deleted from the cache. backref nodes for upper level
1082 * tree blocks are left in the cache to avoid unnecessary backref
1085 while (!list_empty(&useless
)) {
1086 upper
= list_entry(useless
.next
, struct backref_node
, list
);
1087 list_del_init(&upper
->list
);
1088 BUG_ON(!list_empty(&upper
->upper
));
1091 if (upper
->lowest
) {
1092 list_del_init(&upper
->lower
);
1095 while (!list_empty(&upper
->lower
)) {
1096 edge
= list_entry(upper
->lower
.next
,
1097 struct backref_edge
, list
[UPPER
]);
1098 list_del(&edge
->list
[UPPER
]);
1099 list_del(&edge
->list
[LOWER
]);
1100 lower
= edge
->node
[LOWER
];
1101 free_backref_edge(cache
, edge
);
1103 if (list_empty(&lower
->upper
))
1104 list_add(&lower
->list
, &useless
);
1106 __mark_block_processed(rc
, upper
);
1107 if (upper
->level
> 0) {
1108 list_add(&upper
->list
, &cache
->detached
);
1109 upper
->detached
= 1;
1111 rb_erase(&upper
->rb_node
, &cache
->rb_root
);
1112 free_backref_node(cache
, upper
);
1116 btrfs_free_path(path1
);
1117 btrfs_free_path(path2
);
1119 while (!list_empty(&useless
)) {
1120 lower
= list_entry(useless
.next
,
1121 struct backref_node
, upper
);
1122 list_del_init(&lower
->upper
);
1125 INIT_LIST_HEAD(&list
);
1127 if (RB_EMPTY_NODE(&upper
->rb_node
)) {
1128 list_splice_tail(&upper
->upper
, &list
);
1129 free_backref_node(cache
, upper
);
1132 if (list_empty(&list
))
1135 edge
= list_entry(list
.next
, struct backref_edge
,
1137 list_del(&edge
->list
[LOWER
]);
1138 upper
= edge
->node
[UPPER
];
1139 free_backref_edge(cache
, edge
);
1141 return ERR_PTR(err
);
1143 BUG_ON(node
&& node
->detached
);
1148 * helper to add backref node for the newly created snapshot.
1149 * the backref node is created by cloning backref node that
1150 * corresponds to root of source tree
1152 static int clone_backref_node(struct btrfs_trans_handle
*trans
,
1153 struct reloc_control
*rc
,
1154 struct btrfs_root
*src
,
1155 struct btrfs_root
*dest
)
1157 struct btrfs_root
*reloc_root
= src
->reloc_root
;
1158 struct backref_cache
*cache
= &rc
->backref_cache
;
1159 struct backref_node
*node
= NULL
;
1160 struct backref_node
*new_node
;
1161 struct backref_edge
*edge
;
1162 struct backref_edge
*new_edge
;
1163 struct rb_node
*rb_node
;
1165 if (cache
->last_trans
> 0)
1166 update_backref_cache(trans
, cache
);
1168 rb_node
= tree_search(&cache
->rb_root
, src
->commit_root
->start
);
1170 node
= rb_entry(rb_node
, struct backref_node
, rb_node
);
1174 BUG_ON(node
->new_bytenr
!= reloc_root
->node
->start
);
1178 rb_node
= tree_search(&cache
->rb_root
,
1179 reloc_root
->commit_root
->start
);
1181 node
= rb_entry(rb_node
, struct backref_node
,
1183 BUG_ON(node
->detached
);
1190 new_node
= alloc_backref_node(cache
);
1194 new_node
->bytenr
= dest
->node
->start
;
1195 new_node
->level
= node
->level
;
1196 new_node
->lowest
= node
->lowest
;
1197 new_node
->checked
= 1;
1198 new_node
->root
= dest
;
1200 if (!node
->lowest
) {
1201 list_for_each_entry(edge
, &node
->lower
, list
[UPPER
]) {
1202 new_edge
= alloc_backref_edge(cache
);
1206 new_edge
->node
[UPPER
] = new_node
;
1207 new_edge
->node
[LOWER
] = edge
->node
[LOWER
];
1208 list_add_tail(&new_edge
->list
[UPPER
],
1212 list_add_tail(&new_node
->lower
, &cache
->leaves
);
1215 rb_node
= tree_insert(&cache
->rb_root
, new_node
->bytenr
,
1216 &new_node
->rb_node
);
1218 backref_tree_panic(rb_node
, -EEXIST
, new_node
->bytenr
);
1220 if (!new_node
->lowest
) {
1221 list_for_each_entry(new_edge
, &new_node
->lower
, list
[UPPER
]) {
1222 list_add_tail(&new_edge
->list
[LOWER
],
1223 &new_edge
->node
[LOWER
]->upper
);
1228 while (!list_empty(&new_node
->lower
)) {
1229 new_edge
= list_entry(new_node
->lower
.next
,
1230 struct backref_edge
, list
[UPPER
]);
1231 list_del(&new_edge
->list
[UPPER
]);
1232 free_backref_edge(cache
, new_edge
);
1234 free_backref_node(cache
, new_node
);
1239 * helper to add 'address of tree root -> reloc tree' mapping
1241 static int __must_check
__add_reloc_root(struct btrfs_root
*root
)
1243 struct rb_node
*rb_node
;
1244 struct mapping_node
*node
;
1245 struct reloc_control
*rc
= root
->fs_info
->reloc_ctl
;
1247 node
= kmalloc(sizeof(*node
), GFP_NOFS
);
1251 node
->bytenr
= root
->node
->start
;
1254 spin_lock(&rc
->reloc_root_tree
.lock
);
1255 rb_node
= tree_insert(&rc
->reloc_root_tree
.rb_root
,
1256 node
->bytenr
, &node
->rb_node
);
1257 spin_unlock(&rc
->reloc_root_tree
.lock
);
1259 btrfs_panic(root
->fs_info
, -EEXIST
, "Duplicate root found "
1260 "for start=%llu while inserting into relocation "
1261 "tree\n", node
->bytenr
);
1266 list_add_tail(&root
->root_list
, &rc
->reloc_roots
);
1271 * helper to delete the 'address of tree root -> reloc tree'
1274 static void __del_reloc_root(struct btrfs_root
*root
)
1276 struct rb_node
*rb_node
;
1277 struct mapping_node
*node
= NULL
;
1278 struct reloc_control
*rc
= root
->fs_info
->reloc_ctl
;
1280 spin_lock(&rc
->reloc_root_tree
.lock
);
1281 rb_node
= tree_search(&rc
->reloc_root_tree
.rb_root
,
1284 node
= rb_entry(rb_node
, struct mapping_node
, rb_node
);
1285 rb_erase(&node
->rb_node
, &rc
->reloc_root_tree
.rb_root
);
1287 spin_unlock(&rc
->reloc_root_tree
.lock
);
1291 BUG_ON((struct btrfs_root
*)node
->data
!= root
);
1293 spin_lock(&root
->fs_info
->trans_lock
);
1294 list_del_init(&root
->root_list
);
1295 spin_unlock(&root
->fs_info
->trans_lock
);
1300 * helper to update the 'address of tree root -> reloc tree'
1303 static int __update_reloc_root(struct btrfs_root
*root
, u64 new_bytenr
)
1305 struct rb_node
*rb_node
;
1306 struct mapping_node
*node
= NULL
;
1307 struct reloc_control
*rc
= root
->fs_info
->reloc_ctl
;
1309 spin_lock(&rc
->reloc_root_tree
.lock
);
1310 rb_node
= tree_search(&rc
->reloc_root_tree
.rb_root
,
1313 node
= rb_entry(rb_node
, struct mapping_node
, rb_node
);
1314 rb_erase(&node
->rb_node
, &rc
->reloc_root_tree
.rb_root
);
1316 spin_unlock(&rc
->reloc_root_tree
.lock
);
1320 BUG_ON((struct btrfs_root
*)node
->data
!= root
);
1322 spin_lock(&rc
->reloc_root_tree
.lock
);
1323 node
->bytenr
= new_bytenr
;
1324 rb_node
= tree_insert(&rc
->reloc_root_tree
.rb_root
,
1325 node
->bytenr
, &node
->rb_node
);
1326 spin_unlock(&rc
->reloc_root_tree
.lock
);
1328 backref_tree_panic(rb_node
, -EEXIST
, node
->bytenr
);
1332 static struct btrfs_root
*create_reloc_root(struct btrfs_trans_handle
*trans
,
1333 struct btrfs_root
*root
, u64 objectid
)
1335 struct btrfs_root
*reloc_root
;
1336 struct extent_buffer
*eb
;
1337 struct btrfs_root_item
*root_item
;
1338 struct btrfs_key root_key
;
1342 root_item
= kmalloc(sizeof(*root_item
), GFP_NOFS
);
1345 root_key
.objectid
= BTRFS_TREE_RELOC_OBJECTID
;
1346 root_key
.type
= BTRFS_ROOT_ITEM_KEY
;
1347 root_key
.offset
= objectid
;
1349 if (root
->root_key
.objectid
== objectid
) {
1350 /* called by btrfs_init_reloc_root */
1351 ret
= btrfs_copy_root(trans
, root
, root
->commit_root
, &eb
,
1352 BTRFS_TREE_RELOC_OBJECTID
);
1355 last_snap
= btrfs_root_last_snapshot(&root
->root_item
);
1356 btrfs_set_root_last_snapshot(&root
->root_item
,
1357 trans
->transid
- 1);
1360 * called by btrfs_reloc_post_snapshot_hook.
1361 * the source tree is a reloc tree, all tree blocks
1362 * modified after it was created have RELOC flag
1363 * set in their headers. so it's OK to not update
1364 * the 'last_snapshot'.
1366 ret
= btrfs_copy_root(trans
, root
, root
->node
, &eb
,
1367 BTRFS_TREE_RELOC_OBJECTID
);
1371 memcpy(root_item
, &root
->root_item
, sizeof(*root_item
));
1372 btrfs_set_root_bytenr(root_item
, eb
->start
);
1373 btrfs_set_root_level(root_item
, btrfs_header_level(eb
));
1374 btrfs_set_root_generation(root_item
, trans
->transid
);
1376 if (root
->root_key
.objectid
== objectid
) {
1377 btrfs_set_root_refs(root_item
, 0);
1378 memset(&root_item
->drop_progress
, 0,
1379 sizeof(struct btrfs_disk_key
));
1380 root_item
->drop_level
= 0;
1382 * abuse rtransid, it is safe because it is impossible to
1383 * receive data into a relocation tree.
1385 btrfs_set_root_rtransid(root_item
, last_snap
);
1386 btrfs_set_root_otransid(root_item
, trans
->transid
);
1389 btrfs_tree_unlock(eb
);
1390 free_extent_buffer(eb
);
1392 ret
= btrfs_insert_root(trans
, root
->fs_info
->tree_root
,
1393 &root_key
, root_item
);
1397 reloc_root
= btrfs_read_fs_root(root
->fs_info
->tree_root
, &root_key
);
1398 BUG_ON(IS_ERR(reloc_root
));
1399 reloc_root
->last_trans
= trans
->transid
;
1404 * create reloc tree for a given fs tree. reloc tree is just a
1405 * snapshot of the fs tree with special root objectid.
1407 int btrfs_init_reloc_root(struct btrfs_trans_handle
*trans
,
1408 struct btrfs_root
*root
)
1410 struct btrfs_root
*reloc_root
;
1411 struct reloc_control
*rc
= root
->fs_info
->reloc_ctl
;
1412 struct btrfs_block_rsv
*rsv
;
1416 if (root
->reloc_root
) {
1417 reloc_root
= root
->reloc_root
;
1418 reloc_root
->last_trans
= trans
->transid
;
1422 if (!rc
|| !rc
->create_reloc_tree
||
1423 root
->root_key
.objectid
== BTRFS_TREE_RELOC_OBJECTID
)
1426 if (!trans
->reloc_reserved
) {
1427 rsv
= trans
->block_rsv
;
1428 trans
->block_rsv
= rc
->block_rsv
;
1431 reloc_root
= create_reloc_root(trans
, root
, root
->root_key
.objectid
);
1433 trans
->block_rsv
= rsv
;
1435 ret
= __add_reloc_root(reloc_root
);
1437 root
->reloc_root
= reloc_root
;
1442 * update root item of reloc tree
1444 int btrfs_update_reloc_root(struct btrfs_trans_handle
*trans
,
1445 struct btrfs_root
*root
)
1447 struct btrfs_root
*reloc_root
;
1448 struct btrfs_root_item
*root_item
;
1451 if (!root
->reloc_root
)
1454 reloc_root
= root
->reloc_root
;
1455 root_item
= &reloc_root
->root_item
;
1457 if (root
->fs_info
->reloc_ctl
->merge_reloc_tree
&&
1458 btrfs_root_refs(root_item
) == 0) {
1459 root
->reloc_root
= NULL
;
1460 __del_reloc_root(reloc_root
);
1463 if (reloc_root
->commit_root
!= reloc_root
->node
) {
1464 btrfs_set_root_node(root_item
, reloc_root
->node
);
1465 free_extent_buffer(reloc_root
->commit_root
);
1466 reloc_root
->commit_root
= btrfs_root_node(reloc_root
);
1469 ret
= btrfs_update_root(trans
, root
->fs_info
->tree_root
,
1470 &reloc_root
->root_key
, root_item
);
1478 * helper to find first cached inode with inode number >= objectid
1481 static struct inode
*find_next_inode(struct btrfs_root
*root
, u64 objectid
)
1483 struct rb_node
*node
;
1484 struct rb_node
*prev
;
1485 struct btrfs_inode
*entry
;
1486 struct inode
*inode
;
1488 spin_lock(&root
->inode_lock
);
1490 node
= root
->inode_tree
.rb_node
;
1494 entry
= rb_entry(node
, struct btrfs_inode
, rb_node
);
1496 if (objectid
< btrfs_ino(&entry
->vfs_inode
))
1497 node
= node
->rb_left
;
1498 else if (objectid
> btrfs_ino(&entry
->vfs_inode
))
1499 node
= node
->rb_right
;
1505 entry
= rb_entry(prev
, struct btrfs_inode
, rb_node
);
1506 if (objectid
<= btrfs_ino(&entry
->vfs_inode
)) {
1510 prev
= rb_next(prev
);
1514 entry
= rb_entry(node
, struct btrfs_inode
, rb_node
);
1515 inode
= igrab(&entry
->vfs_inode
);
1517 spin_unlock(&root
->inode_lock
);
1521 objectid
= btrfs_ino(&entry
->vfs_inode
) + 1;
1522 if (cond_resched_lock(&root
->inode_lock
))
1525 node
= rb_next(node
);
1527 spin_unlock(&root
->inode_lock
);
1531 static int in_block_group(u64 bytenr
,
1532 struct btrfs_block_group_cache
*block_group
)
1534 if (bytenr
>= block_group
->key
.objectid
&&
1535 bytenr
< block_group
->key
.objectid
+ block_group
->key
.offset
)
1541 * get new location of data
1543 static int get_new_location(struct inode
*reloc_inode
, u64
*new_bytenr
,
1544 u64 bytenr
, u64 num_bytes
)
1546 struct btrfs_root
*root
= BTRFS_I(reloc_inode
)->root
;
1547 struct btrfs_path
*path
;
1548 struct btrfs_file_extent_item
*fi
;
1549 struct extent_buffer
*leaf
;
1552 path
= btrfs_alloc_path();
1556 bytenr
-= BTRFS_I(reloc_inode
)->index_cnt
;
1557 ret
= btrfs_lookup_file_extent(NULL
, root
, path
, btrfs_ino(reloc_inode
),
1566 leaf
= path
->nodes
[0];
1567 fi
= btrfs_item_ptr(leaf
, path
->slots
[0],
1568 struct btrfs_file_extent_item
);
1570 BUG_ON(btrfs_file_extent_offset(leaf
, fi
) ||
1571 btrfs_file_extent_compression(leaf
, fi
) ||
1572 btrfs_file_extent_encryption(leaf
, fi
) ||
1573 btrfs_file_extent_other_encoding(leaf
, fi
));
1575 if (num_bytes
!= btrfs_file_extent_disk_num_bytes(leaf
, fi
)) {
1580 *new_bytenr
= btrfs_file_extent_disk_bytenr(leaf
, fi
);
1583 btrfs_free_path(path
);
1588 * update file extent items in the tree leaf to point to
1589 * the new locations.
1591 static noinline_for_stack
1592 int replace_file_extents(struct btrfs_trans_handle
*trans
,
1593 struct reloc_control
*rc
,
1594 struct btrfs_root
*root
,
1595 struct extent_buffer
*leaf
)
1597 struct btrfs_key key
;
1598 struct btrfs_file_extent_item
*fi
;
1599 struct inode
*inode
= NULL
;
1611 if (rc
->stage
!= UPDATE_DATA_PTRS
)
1614 /* reloc trees always use full backref */
1615 if (root
->root_key
.objectid
== BTRFS_TREE_RELOC_OBJECTID
)
1616 parent
= leaf
->start
;
1620 nritems
= btrfs_header_nritems(leaf
);
1621 for (i
= 0; i
< nritems
; i
++) {
1623 btrfs_item_key_to_cpu(leaf
, &key
, i
);
1624 if (key
.type
!= BTRFS_EXTENT_DATA_KEY
)
1626 fi
= btrfs_item_ptr(leaf
, i
, struct btrfs_file_extent_item
);
1627 if (btrfs_file_extent_type(leaf
, fi
) ==
1628 BTRFS_FILE_EXTENT_INLINE
)
1630 bytenr
= btrfs_file_extent_disk_bytenr(leaf
, fi
);
1631 num_bytes
= btrfs_file_extent_disk_num_bytes(leaf
, fi
);
1634 if (!in_block_group(bytenr
, rc
->block_group
))
1638 * if we are modifying block in fs tree, wait for readpage
1639 * to complete and drop the extent cache
1641 if (root
->root_key
.objectid
!= BTRFS_TREE_RELOC_OBJECTID
) {
1643 inode
= find_next_inode(root
, key
.objectid
);
1645 } else if (inode
&& btrfs_ino(inode
) < key
.objectid
) {
1646 btrfs_add_delayed_iput(inode
);
1647 inode
= find_next_inode(root
, key
.objectid
);
1649 if (inode
&& btrfs_ino(inode
) == key
.objectid
) {
1651 btrfs_file_extent_num_bytes(leaf
, fi
);
1652 WARN_ON(!IS_ALIGNED(key
.offset
,
1654 WARN_ON(!IS_ALIGNED(end
, root
->sectorsize
));
1656 ret
= try_lock_extent(&BTRFS_I(inode
)->io_tree
,
1661 btrfs_drop_extent_cache(inode
, key
.offset
, end
,
1663 unlock_extent(&BTRFS_I(inode
)->io_tree
,
1668 ret
= get_new_location(rc
->data_inode
, &new_bytenr
,
1672 * Don't have to abort since we've not changed anything
1673 * in the file extent yet.
1678 btrfs_set_file_extent_disk_bytenr(leaf
, fi
, new_bytenr
);
1681 key
.offset
-= btrfs_file_extent_offset(leaf
, fi
);
1682 ret
= btrfs_inc_extent_ref(trans
, root
, new_bytenr
,
1684 btrfs_header_owner(leaf
),
1685 key
.objectid
, key
.offset
, 1);
1687 btrfs_abort_transaction(trans
, root
, ret
);
1691 ret
= btrfs_free_extent(trans
, root
, bytenr
, num_bytes
,
1692 parent
, btrfs_header_owner(leaf
),
1693 key
.objectid
, key
.offset
, 1);
1695 btrfs_abort_transaction(trans
, root
, ret
);
1700 btrfs_mark_buffer_dirty(leaf
);
1702 btrfs_add_delayed_iput(inode
);
1706 static noinline_for_stack
1707 int memcmp_node_keys(struct extent_buffer
*eb
, int slot
,
1708 struct btrfs_path
*path
, int level
)
1710 struct btrfs_disk_key key1
;
1711 struct btrfs_disk_key key2
;
1712 btrfs_node_key(eb
, &key1
, slot
);
1713 btrfs_node_key(path
->nodes
[level
], &key2
, path
->slots
[level
]);
1714 return memcmp(&key1
, &key2
, sizeof(key1
));
1718 * try to replace tree blocks in fs tree with the new blocks
1719 * in reloc tree. tree blocks haven't been modified since the
1720 * reloc tree was create can be replaced.
1722 * if a block was replaced, level of the block + 1 is returned.
1723 * if no block got replaced, 0 is returned. if there are other
1724 * errors, a negative error number is returned.
1726 static noinline_for_stack
1727 int replace_path(struct btrfs_trans_handle
*trans
,
1728 struct btrfs_root
*dest
, struct btrfs_root
*src
,
1729 struct btrfs_path
*path
, struct btrfs_key
*next_key
,
1730 int lowest_level
, int max_level
)
1732 struct extent_buffer
*eb
;
1733 struct extent_buffer
*parent
;
1734 struct btrfs_key key
;
1746 BUG_ON(src
->root_key
.objectid
!= BTRFS_TREE_RELOC_OBJECTID
);
1747 BUG_ON(dest
->root_key
.objectid
== BTRFS_TREE_RELOC_OBJECTID
);
1749 last_snapshot
= btrfs_root_last_snapshot(&src
->root_item
);
1751 slot
= path
->slots
[lowest_level
];
1752 btrfs_node_key_to_cpu(path
->nodes
[lowest_level
], &key
, slot
);
1754 eb
= btrfs_lock_root_node(dest
);
1755 btrfs_set_lock_blocking(eb
);
1756 level
= btrfs_header_level(eb
);
1758 if (level
< lowest_level
) {
1759 btrfs_tree_unlock(eb
);
1760 free_extent_buffer(eb
);
1765 ret
= btrfs_cow_block(trans
, dest
, eb
, NULL
, 0, &eb
);
1768 btrfs_set_lock_blocking(eb
);
1771 next_key
->objectid
= (u64
)-1;
1772 next_key
->type
= (u8
)-1;
1773 next_key
->offset
= (u64
)-1;
1778 level
= btrfs_header_level(parent
);
1779 BUG_ON(level
< lowest_level
);
1781 ret
= btrfs_bin_search(parent
, &key
, level
, &slot
);
1782 if (ret
&& slot
> 0)
1785 if (next_key
&& slot
+ 1 < btrfs_header_nritems(parent
))
1786 btrfs_node_key_to_cpu(parent
, next_key
, slot
+ 1);
1788 old_bytenr
= btrfs_node_blockptr(parent
, slot
);
1789 blocksize
= btrfs_level_size(dest
, level
- 1);
1790 old_ptr_gen
= btrfs_node_ptr_generation(parent
, slot
);
1792 if (level
<= max_level
) {
1793 eb
= path
->nodes
[level
];
1794 new_bytenr
= btrfs_node_blockptr(eb
,
1795 path
->slots
[level
]);
1796 new_ptr_gen
= btrfs_node_ptr_generation(eb
,
1797 path
->slots
[level
]);
1803 if (WARN_ON(new_bytenr
> 0 && new_bytenr
== old_bytenr
)) {
1808 if (new_bytenr
== 0 || old_ptr_gen
> last_snapshot
||
1809 memcmp_node_keys(parent
, slot
, path
, level
)) {
1810 if (level
<= lowest_level
) {
1815 eb
= read_tree_block(dest
, old_bytenr
, blocksize
,
1817 if (!eb
|| !extent_buffer_uptodate(eb
)) {
1818 ret
= (!eb
) ? -ENOMEM
: -EIO
;
1819 free_extent_buffer(eb
);
1822 btrfs_tree_lock(eb
);
1824 ret
= btrfs_cow_block(trans
, dest
, eb
, parent
,
1828 btrfs_set_lock_blocking(eb
);
1830 btrfs_tree_unlock(parent
);
1831 free_extent_buffer(parent
);
1838 btrfs_tree_unlock(parent
);
1839 free_extent_buffer(parent
);
1844 btrfs_node_key_to_cpu(path
->nodes
[level
], &key
,
1845 path
->slots
[level
]);
1846 btrfs_release_path(path
);
1848 path
->lowest_level
= level
;
1849 ret
= btrfs_search_slot(trans
, src
, &key
, path
, 0, 1);
1850 path
->lowest_level
= 0;
1854 * swap blocks in fs tree and reloc tree.
1856 btrfs_set_node_blockptr(parent
, slot
, new_bytenr
);
1857 btrfs_set_node_ptr_generation(parent
, slot
, new_ptr_gen
);
1858 btrfs_mark_buffer_dirty(parent
);
1860 btrfs_set_node_blockptr(path
->nodes
[level
],
1861 path
->slots
[level
], old_bytenr
);
1862 btrfs_set_node_ptr_generation(path
->nodes
[level
],
1863 path
->slots
[level
], old_ptr_gen
);
1864 btrfs_mark_buffer_dirty(path
->nodes
[level
]);
1866 ret
= btrfs_inc_extent_ref(trans
, src
, old_bytenr
, blocksize
,
1867 path
->nodes
[level
]->start
,
1868 src
->root_key
.objectid
, level
- 1, 0,
1871 ret
= btrfs_inc_extent_ref(trans
, dest
, new_bytenr
, blocksize
,
1872 0, dest
->root_key
.objectid
, level
- 1,
1876 ret
= btrfs_free_extent(trans
, src
, new_bytenr
, blocksize
,
1877 path
->nodes
[level
]->start
,
1878 src
->root_key
.objectid
, level
- 1, 0,
1882 ret
= btrfs_free_extent(trans
, dest
, old_bytenr
, blocksize
,
1883 0, dest
->root_key
.objectid
, level
- 1,
1887 btrfs_unlock_up_safe(path
, 0);
1892 btrfs_tree_unlock(parent
);
1893 free_extent_buffer(parent
);
1898 * helper to find next relocated block in reloc tree
1900 static noinline_for_stack
1901 int walk_up_reloc_tree(struct btrfs_root
*root
, struct btrfs_path
*path
,
1904 struct extent_buffer
*eb
;
1909 last_snapshot
= btrfs_root_last_snapshot(&root
->root_item
);
1911 for (i
= 0; i
< *level
; i
++) {
1912 free_extent_buffer(path
->nodes
[i
]);
1913 path
->nodes
[i
] = NULL
;
1916 for (i
= *level
; i
< BTRFS_MAX_LEVEL
&& path
->nodes
[i
]; i
++) {
1917 eb
= path
->nodes
[i
];
1918 nritems
= btrfs_header_nritems(eb
);
1919 while (path
->slots
[i
] + 1 < nritems
) {
1921 if (btrfs_node_ptr_generation(eb
, path
->slots
[i
]) <=
1928 free_extent_buffer(path
->nodes
[i
]);
1929 path
->nodes
[i
] = NULL
;
1935 * walk down reloc tree to find relocated block of lowest level
1937 static noinline_for_stack
1938 int walk_down_reloc_tree(struct btrfs_root
*root
, struct btrfs_path
*path
,
1941 struct extent_buffer
*eb
= NULL
;
1949 last_snapshot
= btrfs_root_last_snapshot(&root
->root_item
);
1951 for (i
= *level
; i
> 0; i
--) {
1952 eb
= path
->nodes
[i
];
1953 nritems
= btrfs_header_nritems(eb
);
1954 while (path
->slots
[i
] < nritems
) {
1955 ptr_gen
= btrfs_node_ptr_generation(eb
, path
->slots
[i
]);
1956 if (ptr_gen
> last_snapshot
)
1960 if (path
->slots
[i
] >= nritems
) {
1971 bytenr
= btrfs_node_blockptr(eb
, path
->slots
[i
]);
1972 blocksize
= btrfs_level_size(root
, i
- 1);
1973 eb
= read_tree_block(root
, bytenr
, blocksize
, ptr_gen
);
1974 if (!eb
|| !extent_buffer_uptodate(eb
)) {
1975 free_extent_buffer(eb
);
1978 BUG_ON(btrfs_header_level(eb
) != i
- 1);
1979 path
->nodes
[i
- 1] = eb
;
1980 path
->slots
[i
- 1] = 0;
1986 * invalidate extent cache for file extents whose key in range of
1987 * [min_key, max_key)
1989 static int invalidate_extent_cache(struct btrfs_root
*root
,
1990 struct btrfs_key
*min_key
,
1991 struct btrfs_key
*max_key
)
1993 struct inode
*inode
= NULL
;
1998 objectid
= min_key
->objectid
;
2003 if (objectid
> max_key
->objectid
)
2006 inode
= find_next_inode(root
, objectid
);
2009 ino
= btrfs_ino(inode
);
2011 if (ino
> max_key
->objectid
) {
2017 if (!S_ISREG(inode
->i_mode
))
2020 if (unlikely(min_key
->objectid
== ino
)) {
2021 if (min_key
->type
> BTRFS_EXTENT_DATA_KEY
)
2023 if (min_key
->type
< BTRFS_EXTENT_DATA_KEY
)
2026 start
= min_key
->offset
;
2027 WARN_ON(!IS_ALIGNED(start
, root
->sectorsize
));
2033 if (unlikely(max_key
->objectid
== ino
)) {
2034 if (max_key
->type
< BTRFS_EXTENT_DATA_KEY
)
2036 if (max_key
->type
> BTRFS_EXTENT_DATA_KEY
) {
2039 if (max_key
->offset
== 0)
2041 end
= max_key
->offset
;
2042 WARN_ON(!IS_ALIGNED(end
, root
->sectorsize
));
2049 /* the lock_extent waits for readpage to complete */
2050 lock_extent(&BTRFS_I(inode
)->io_tree
, start
, end
);
2051 btrfs_drop_extent_cache(inode
, start
, end
, 1);
2052 unlock_extent(&BTRFS_I(inode
)->io_tree
, start
, end
);
2057 static int find_next_key(struct btrfs_path
*path
, int level
,
2058 struct btrfs_key
*key
)
2061 while (level
< BTRFS_MAX_LEVEL
) {
2062 if (!path
->nodes
[level
])
2064 if (path
->slots
[level
] + 1 <
2065 btrfs_header_nritems(path
->nodes
[level
])) {
2066 btrfs_node_key_to_cpu(path
->nodes
[level
], key
,
2067 path
->slots
[level
] + 1);
2076 * merge the relocated tree blocks in reloc tree with corresponding
2079 static noinline_for_stack
int merge_reloc_root(struct reloc_control
*rc
,
2080 struct btrfs_root
*root
)
2082 LIST_HEAD(inode_list
);
2083 struct btrfs_key key
;
2084 struct btrfs_key next_key
;
2085 struct btrfs_trans_handle
*trans
= NULL
;
2086 struct btrfs_root
*reloc_root
;
2087 struct btrfs_root_item
*root_item
;
2088 struct btrfs_path
*path
;
2089 struct extent_buffer
*leaf
;
2097 path
= btrfs_alloc_path();
2102 reloc_root
= root
->reloc_root
;
2103 root_item
= &reloc_root
->root_item
;
2105 if (btrfs_disk_key_objectid(&root_item
->drop_progress
) == 0) {
2106 level
= btrfs_root_level(root_item
);
2107 extent_buffer_get(reloc_root
->node
);
2108 path
->nodes
[level
] = reloc_root
->node
;
2109 path
->slots
[level
] = 0;
2111 btrfs_disk_key_to_cpu(&key
, &root_item
->drop_progress
);
2113 level
= root_item
->drop_level
;
2115 path
->lowest_level
= level
;
2116 ret
= btrfs_search_slot(NULL
, reloc_root
, &key
, path
, 0, 0);
2117 path
->lowest_level
= 0;
2119 btrfs_free_path(path
);
2123 btrfs_node_key_to_cpu(path
->nodes
[level
], &next_key
,
2124 path
->slots
[level
]);
2125 WARN_ON(memcmp(&key
, &next_key
, sizeof(key
)));
2127 btrfs_unlock_up_safe(path
, 0);
2130 min_reserved
= root
->nodesize
* (BTRFS_MAX_LEVEL
- 1) * 2;
2131 memset(&next_key
, 0, sizeof(next_key
));
2134 ret
= btrfs_block_rsv_refill(root
, rc
->block_rsv
, min_reserved
,
2135 BTRFS_RESERVE_FLUSH_ALL
);
2140 trans
= btrfs_start_transaction(root
, 0);
2141 if (IS_ERR(trans
)) {
2142 err
= PTR_ERR(trans
);
2146 trans
->block_rsv
= rc
->block_rsv
;
2151 ret
= walk_down_reloc_tree(reloc_root
, path
, &level
);
2159 if (!find_next_key(path
, level
, &key
) &&
2160 btrfs_comp_cpu_keys(&next_key
, &key
) >= 0) {
2163 ret
= replace_path(trans
, root
, reloc_root
, path
,
2164 &next_key
, level
, max_level
);
2173 btrfs_node_key_to_cpu(path
->nodes
[level
], &key
,
2174 path
->slots
[level
]);
2178 ret
= walk_up_reloc_tree(reloc_root
, path
, &level
);
2184 * save the merging progress in the drop_progress.
2185 * this is OK since root refs == 1 in this case.
2187 btrfs_node_key(path
->nodes
[level
], &root_item
->drop_progress
,
2188 path
->slots
[level
]);
2189 root_item
->drop_level
= level
;
2191 btrfs_end_transaction_throttle(trans
, root
);
2194 btrfs_btree_balance_dirty(root
);
2196 if (replaced
&& rc
->stage
== UPDATE_DATA_PTRS
)
2197 invalidate_extent_cache(root
, &key
, &next_key
);
2201 * handle the case only one block in the fs tree need to be
2202 * relocated and the block is tree root.
2204 leaf
= btrfs_lock_root_node(root
);
2205 ret
= btrfs_cow_block(trans
, root
, leaf
, NULL
, 0, &leaf
);
2206 btrfs_tree_unlock(leaf
);
2207 free_extent_buffer(leaf
);
2211 btrfs_free_path(path
);
2214 memset(&root_item
->drop_progress
, 0,
2215 sizeof(root_item
->drop_progress
));
2216 root_item
->drop_level
= 0;
2217 btrfs_set_root_refs(root_item
, 0);
2218 btrfs_update_reloc_root(trans
, root
);
2222 btrfs_end_transaction_throttle(trans
, root
);
2224 btrfs_btree_balance_dirty(root
);
2226 if (replaced
&& rc
->stage
== UPDATE_DATA_PTRS
)
2227 invalidate_extent_cache(root
, &key
, &next_key
);
2232 static noinline_for_stack
2233 int prepare_to_merge(struct reloc_control
*rc
, int err
)
2235 struct btrfs_root
*root
= rc
->extent_root
;
2236 struct btrfs_root
*reloc_root
;
2237 struct btrfs_trans_handle
*trans
;
2238 LIST_HEAD(reloc_roots
);
2242 mutex_lock(&root
->fs_info
->reloc_mutex
);
2243 rc
->merging_rsv_size
+= root
->nodesize
* (BTRFS_MAX_LEVEL
- 1) * 2;
2244 rc
->merging_rsv_size
+= rc
->nodes_relocated
* 2;
2245 mutex_unlock(&root
->fs_info
->reloc_mutex
);
2249 num_bytes
= rc
->merging_rsv_size
;
2250 ret
= btrfs_block_rsv_add(root
, rc
->block_rsv
, num_bytes
,
2251 BTRFS_RESERVE_FLUSH_ALL
);
2256 trans
= btrfs_join_transaction(rc
->extent_root
);
2257 if (IS_ERR(trans
)) {
2259 btrfs_block_rsv_release(rc
->extent_root
,
2260 rc
->block_rsv
, num_bytes
);
2261 return PTR_ERR(trans
);
2265 if (num_bytes
!= rc
->merging_rsv_size
) {
2266 btrfs_end_transaction(trans
, rc
->extent_root
);
2267 btrfs_block_rsv_release(rc
->extent_root
,
2268 rc
->block_rsv
, num_bytes
);
2273 rc
->merge_reloc_tree
= 1;
2275 while (!list_empty(&rc
->reloc_roots
)) {
2276 reloc_root
= list_entry(rc
->reloc_roots
.next
,
2277 struct btrfs_root
, root_list
);
2278 list_del_init(&reloc_root
->root_list
);
2280 root
= read_fs_root(reloc_root
->fs_info
,
2281 reloc_root
->root_key
.offset
);
2282 BUG_ON(IS_ERR(root
));
2283 BUG_ON(root
->reloc_root
!= reloc_root
);
2286 * set reference count to 1, so btrfs_recover_relocation
2287 * knows it should resumes merging
2290 btrfs_set_root_refs(&reloc_root
->root_item
, 1);
2291 btrfs_update_reloc_root(trans
, root
);
2293 list_add(&reloc_root
->root_list
, &reloc_roots
);
2296 list_splice(&reloc_roots
, &rc
->reloc_roots
);
2299 btrfs_commit_transaction(trans
, rc
->extent_root
);
2301 btrfs_end_transaction(trans
, rc
->extent_root
);
2305 static noinline_for_stack
2306 void free_reloc_roots(struct list_head
*list
)
2308 struct btrfs_root
*reloc_root
;
2310 while (!list_empty(list
)) {
2311 reloc_root
= list_entry(list
->next
, struct btrfs_root
,
2313 __del_reloc_root(reloc_root
);
2317 static noinline_for_stack
2318 int merge_reloc_roots(struct reloc_control
*rc
)
2320 struct btrfs_trans_handle
*trans
;
2321 struct btrfs_root
*root
;
2322 struct btrfs_root
*reloc_root
;
2326 LIST_HEAD(reloc_roots
);
2330 root
= rc
->extent_root
;
2333 * this serializes us with btrfs_record_root_in_transaction,
2334 * we have to make sure nobody is in the middle of
2335 * adding their roots to the list while we are
2338 mutex_lock(&root
->fs_info
->reloc_mutex
);
2339 list_splice_init(&rc
->reloc_roots
, &reloc_roots
);
2340 mutex_unlock(&root
->fs_info
->reloc_mutex
);
2342 while (!list_empty(&reloc_roots
)) {
2344 reloc_root
= list_entry(reloc_roots
.next
,
2345 struct btrfs_root
, root_list
);
2347 if (btrfs_root_refs(&reloc_root
->root_item
) > 0) {
2348 root
= read_fs_root(reloc_root
->fs_info
,
2349 reloc_root
->root_key
.offset
);
2350 BUG_ON(IS_ERR(root
));
2351 BUG_ON(root
->reloc_root
!= reloc_root
);
2353 ret
= merge_reloc_root(rc
, root
);
2355 if (list_empty(&reloc_root
->root_list
))
2356 list_add_tail(&reloc_root
->root_list
,
2361 list_del_init(&reloc_root
->root_list
);
2365 * we keep the old last snapshod transid in rtranid when we
2366 * created the relocation tree.
2368 last_snap
= btrfs_root_rtransid(&reloc_root
->root_item
);
2369 otransid
= btrfs_root_otransid(&reloc_root
->root_item
);
2370 objectid
= reloc_root
->root_key
.offset
;
2372 ret
= btrfs_drop_snapshot(reloc_root
, rc
->block_rsv
, 0, 1);
2374 if (list_empty(&reloc_root
->root_list
))
2375 list_add_tail(&reloc_root
->root_list
,
2380 * recover the last snapshot tranid to avoid
2381 * the space balance break NOCOW.
2383 root
= read_fs_root(rc
->extent_root
->fs_info
,
2388 trans
= btrfs_join_transaction(root
);
2389 BUG_ON(IS_ERR(trans
));
2391 /* Check if the fs/file tree was snapshoted or not. */
2392 if (btrfs_root_last_snapshot(&root
->root_item
) ==
2394 btrfs_set_root_last_snapshot(&root
->root_item
,
2397 btrfs_end_transaction(trans
, root
);
2407 btrfs_std_error(root
->fs_info
, ret
);
2408 if (!list_empty(&reloc_roots
))
2409 free_reloc_roots(&reloc_roots
);
2411 /* new reloc root may be added */
2412 mutex_lock(&root
->fs_info
->reloc_mutex
);
2413 list_splice_init(&rc
->reloc_roots
, &reloc_roots
);
2414 mutex_unlock(&root
->fs_info
->reloc_mutex
);
2415 if (!list_empty(&reloc_roots
))
2416 free_reloc_roots(&reloc_roots
);
2419 BUG_ON(!RB_EMPTY_ROOT(&rc
->reloc_root_tree
.rb_root
));
2423 static void free_block_list(struct rb_root
*blocks
)
2425 struct tree_block
*block
;
2426 struct rb_node
*rb_node
;
2427 while ((rb_node
= rb_first(blocks
))) {
2428 block
= rb_entry(rb_node
, struct tree_block
, rb_node
);
2429 rb_erase(rb_node
, blocks
);
2434 static int record_reloc_root_in_trans(struct btrfs_trans_handle
*trans
,
2435 struct btrfs_root
*reloc_root
)
2437 struct btrfs_root
*root
;
2439 if (reloc_root
->last_trans
== trans
->transid
)
2442 root
= read_fs_root(reloc_root
->fs_info
, reloc_root
->root_key
.offset
);
2443 BUG_ON(IS_ERR(root
));
2444 BUG_ON(root
->reloc_root
!= reloc_root
);
2446 return btrfs_record_root_in_trans(trans
, root
);
2449 static noinline_for_stack
2450 struct btrfs_root
*select_reloc_root(struct btrfs_trans_handle
*trans
,
2451 struct reloc_control
*rc
,
2452 struct backref_node
*node
,
2453 struct backref_edge
*edges
[])
2455 struct backref_node
*next
;
2456 struct btrfs_root
*root
;
2462 next
= walk_up_backref(next
, edges
, &index
);
2465 BUG_ON(!root
->ref_cows
);
2467 if (root
->root_key
.objectid
== BTRFS_TREE_RELOC_OBJECTID
) {
2468 record_reloc_root_in_trans(trans
, root
);
2472 btrfs_record_root_in_trans(trans
, root
);
2473 root
= root
->reloc_root
;
2475 if (next
->new_bytenr
!= root
->node
->start
) {
2476 BUG_ON(next
->new_bytenr
);
2477 BUG_ON(!list_empty(&next
->list
));
2478 next
->new_bytenr
= root
->node
->start
;
2480 list_add_tail(&next
->list
,
2481 &rc
->backref_cache
.changed
);
2482 __mark_block_processed(rc
, next
);
2488 next
= walk_down_backref(edges
, &index
);
2489 if (!next
|| next
->level
<= node
->level
)
2496 /* setup backref node path for btrfs_reloc_cow_block */
2498 rc
->backref_cache
.path
[next
->level
] = next
;
2501 next
= edges
[index
]->node
[UPPER
];
2507 * select a tree root for relocation. return NULL if the block
2508 * is reference counted. we should use do_relocation() in this
2509 * case. return a tree root pointer if the block isn't reference
2510 * counted. return -ENOENT if the block is root of reloc tree.
2512 static noinline_for_stack
2513 struct btrfs_root
*select_one_root(struct btrfs_trans_handle
*trans
,
2514 struct backref_node
*node
)
2516 struct backref_node
*next
;
2517 struct btrfs_root
*root
;
2518 struct btrfs_root
*fs_root
= NULL
;
2519 struct backref_edge
*edges
[BTRFS_MAX_LEVEL
- 1];
2525 next
= walk_up_backref(next
, edges
, &index
);
2529 /* no other choice for non-references counted tree */
2530 if (!root
->ref_cows
)
2533 if (root
->root_key
.objectid
!= BTRFS_TREE_RELOC_OBJECTID
)
2539 next
= walk_down_backref(edges
, &index
);
2540 if (!next
|| next
->level
<= node
->level
)
2545 return ERR_PTR(-ENOENT
);
2549 static noinline_for_stack
2550 u64
calcu_metadata_size(struct reloc_control
*rc
,
2551 struct backref_node
*node
, int reserve
)
2553 struct backref_node
*next
= node
;
2554 struct backref_edge
*edge
;
2555 struct backref_edge
*edges
[BTRFS_MAX_LEVEL
- 1];
2559 BUG_ON(reserve
&& node
->processed
);
2564 if (next
->processed
&& (reserve
|| next
!= node
))
2567 num_bytes
+= btrfs_level_size(rc
->extent_root
,
2570 if (list_empty(&next
->upper
))
2573 edge
= list_entry(next
->upper
.next
,
2574 struct backref_edge
, list
[LOWER
]);
2575 edges
[index
++] = edge
;
2576 next
= edge
->node
[UPPER
];
2578 next
= walk_down_backref(edges
, &index
);
2583 static int reserve_metadata_space(struct btrfs_trans_handle
*trans
,
2584 struct reloc_control
*rc
,
2585 struct backref_node
*node
)
2587 struct btrfs_root
*root
= rc
->extent_root
;
2592 num_bytes
= calcu_metadata_size(rc
, node
, 1) * 2;
2594 trans
->block_rsv
= rc
->block_rsv
;
2595 rc
->reserved_bytes
+= num_bytes
;
2596 ret
= btrfs_block_rsv_refill(root
, rc
->block_rsv
, num_bytes
,
2597 BTRFS_RESERVE_FLUSH_ALL
);
2599 if (ret
== -EAGAIN
) {
2600 tmp
= rc
->extent_root
->nodesize
*
2601 RELOCATION_RESERVED_NODES
;
2602 while (tmp
<= rc
->reserved_bytes
)
2605 * only one thread can access block_rsv at this point,
2606 * so we don't need hold lock to protect block_rsv.
2607 * we expand more reservation size here to allow enough
2608 * space for relocation and we will return eailer in
2611 rc
->block_rsv
->size
= tmp
+ rc
->extent_root
->nodesize
*
2612 RELOCATION_RESERVED_NODES
;
2621 * relocate a block tree, and then update pointers in upper level
2622 * blocks that reference the block to point to the new location.
2624 * if called by link_to_upper, the block has already been relocated.
2625 * in that case this function just updates pointers.
2627 static int do_relocation(struct btrfs_trans_handle
*trans
,
2628 struct reloc_control
*rc
,
2629 struct backref_node
*node
,
2630 struct btrfs_key
*key
,
2631 struct btrfs_path
*path
, int lowest
)
2633 struct backref_node
*upper
;
2634 struct backref_edge
*edge
;
2635 struct backref_edge
*edges
[BTRFS_MAX_LEVEL
- 1];
2636 struct btrfs_root
*root
;
2637 struct extent_buffer
*eb
;
2645 BUG_ON(lowest
&& node
->eb
);
2647 path
->lowest_level
= node
->level
+ 1;
2648 rc
->backref_cache
.path
[node
->level
] = node
;
2649 list_for_each_entry(edge
, &node
->upper
, list
[LOWER
]) {
2652 upper
= edge
->node
[UPPER
];
2653 root
= select_reloc_root(trans
, rc
, upper
, edges
);
2656 if (upper
->eb
&& !upper
->locked
) {
2658 ret
= btrfs_bin_search(upper
->eb
, key
,
2659 upper
->level
, &slot
);
2661 bytenr
= btrfs_node_blockptr(upper
->eb
, slot
);
2662 if (node
->eb
->start
== bytenr
)
2665 drop_node_buffer(upper
);
2669 ret
= btrfs_search_slot(trans
, root
, key
, path
, 0, 1);
2677 upper
->eb
= path
->nodes
[upper
->level
];
2678 path
->nodes
[upper
->level
] = NULL
;
2680 BUG_ON(upper
->eb
!= path
->nodes
[upper
->level
]);
2684 path
->locks
[upper
->level
] = 0;
2686 slot
= path
->slots
[upper
->level
];
2687 btrfs_release_path(path
);
2689 ret
= btrfs_bin_search(upper
->eb
, key
, upper
->level
,
2694 bytenr
= btrfs_node_blockptr(upper
->eb
, slot
);
2696 BUG_ON(bytenr
!= node
->bytenr
);
2698 if (node
->eb
->start
== bytenr
)
2702 blocksize
= btrfs_level_size(root
, node
->level
);
2703 generation
= btrfs_node_ptr_generation(upper
->eb
, slot
);
2704 eb
= read_tree_block(root
, bytenr
, blocksize
, generation
);
2705 if (!eb
|| !extent_buffer_uptodate(eb
)) {
2706 free_extent_buffer(eb
);
2710 btrfs_tree_lock(eb
);
2711 btrfs_set_lock_blocking(eb
);
2714 ret
= btrfs_cow_block(trans
, root
, eb
, upper
->eb
,
2716 btrfs_tree_unlock(eb
);
2717 free_extent_buffer(eb
);
2722 BUG_ON(node
->eb
!= eb
);
2724 btrfs_set_node_blockptr(upper
->eb
, slot
,
2726 btrfs_set_node_ptr_generation(upper
->eb
, slot
,
2728 btrfs_mark_buffer_dirty(upper
->eb
);
2730 ret
= btrfs_inc_extent_ref(trans
, root
,
2731 node
->eb
->start
, blocksize
,
2733 btrfs_header_owner(upper
->eb
),
2737 ret
= btrfs_drop_subtree(trans
, root
, eb
, upper
->eb
);
2741 if (!upper
->pending
)
2742 drop_node_buffer(upper
);
2744 unlock_node_buffer(upper
);
2749 if (!err
&& node
->pending
) {
2750 drop_node_buffer(node
);
2751 list_move_tail(&node
->list
, &rc
->backref_cache
.changed
);
2755 path
->lowest_level
= 0;
2756 BUG_ON(err
== -ENOSPC
);
2760 static int link_to_upper(struct btrfs_trans_handle
*trans
,
2761 struct reloc_control
*rc
,
2762 struct backref_node
*node
,
2763 struct btrfs_path
*path
)
2765 struct btrfs_key key
;
2767 btrfs_node_key_to_cpu(node
->eb
, &key
, 0);
2768 return do_relocation(trans
, rc
, node
, &key
, path
, 0);
2771 static int finish_pending_nodes(struct btrfs_trans_handle
*trans
,
2772 struct reloc_control
*rc
,
2773 struct btrfs_path
*path
, int err
)
2776 struct backref_cache
*cache
= &rc
->backref_cache
;
2777 struct backref_node
*node
;
2781 for (level
= 0; level
< BTRFS_MAX_LEVEL
; level
++) {
2782 while (!list_empty(&cache
->pending
[level
])) {
2783 node
= list_entry(cache
->pending
[level
].next
,
2784 struct backref_node
, list
);
2785 list_move_tail(&node
->list
, &list
);
2786 BUG_ON(!node
->pending
);
2789 ret
= link_to_upper(trans
, rc
, node
, path
);
2794 list_splice_init(&list
, &cache
->pending
[level
]);
2799 static void mark_block_processed(struct reloc_control
*rc
,
2800 u64 bytenr
, u32 blocksize
)
2802 set_extent_bits(&rc
->processed_blocks
, bytenr
, bytenr
+ blocksize
- 1,
2803 EXTENT_DIRTY
, GFP_NOFS
);
2806 static void __mark_block_processed(struct reloc_control
*rc
,
2807 struct backref_node
*node
)
2810 if (node
->level
== 0 ||
2811 in_block_group(node
->bytenr
, rc
->block_group
)) {
2812 blocksize
= btrfs_level_size(rc
->extent_root
, node
->level
);
2813 mark_block_processed(rc
, node
->bytenr
, blocksize
);
2815 node
->processed
= 1;
2819 * mark a block and all blocks directly/indirectly reference the block
2822 static void update_processed_blocks(struct reloc_control
*rc
,
2823 struct backref_node
*node
)
2825 struct backref_node
*next
= node
;
2826 struct backref_edge
*edge
;
2827 struct backref_edge
*edges
[BTRFS_MAX_LEVEL
- 1];
2833 if (next
->processed
)
2836 __mark_block_processed(rc
, next
);
2838 if (list_empty(&next
->upper
))
2841 edge
= list_entry(next
->upper
.next
,
2842 struct backref_edge
, list
[LOWER
]);
2843 edges
[index
++] = edge
;
2844 next
= edge
->node
[UPPER
];
2846 next
= walk_down_backref(edges
, &index
);
2850 static int tree_block_processed(u64 bytenr
, u32 blocksize
,
2851 struct reloc_control
*rc
)
2853 if (test_range_bit(&rc
->processed_blocks
, bytenr
,
2854 bytenr
+ blocksize
- 1, EXTENT_DIRTY
, 1, NULL
))
2859 static int get_tree_block_key(struct reloc_control
*rc
,
2860 struct tree_block
*block
)
2862 struct extent_buffer
*eb
;
2864 BUG_ON(block
->key_ready
);
2865 eb
= read_tree_block(rc
->extent_root
, block
->bytenr
,
2866 block
->key
.objectid
, block
->key
.offset
);
2867 if (!eb
|| !extent_buffer_uptodate(eb
)) {
2868 free_extent_buffer(eb
);
2871 WARN_ON(btrfs_header_level(eb
) != block
->level
);
2872 if (block
->level
== 0)
2873 btrfs_item_key_to_cpu(eb
, &block
->key
, 0);
2875 btrfs_node_key_to_cpu(eb
, &block
->key
, 0);
2876 free_extent_buffer(eb
);
2877 block
->key_ready
= 1;
2881 static int reada_tree_block(struct reloc_control
*rc
,
2882 struct tree_block
*block
)
2884 BUG_ON(block
->key_ready
);
2885 if (block
->key
.type
== BTRFS_METADATA_ITEM_KEY
)
2886 readahead_tree_block(rc
->extent_root
, block
->bytenr
,
2887 block
->key
.objectid
,
2888 rc
->extent_root
->leafsize
);
2890 readahead_tree_block(rc
->extent_root
, block
->bytenr
,
2891 block
->key
.objectid
, block
->key
.offset
);
2896 * helper function to relocate a tree block
2898 static int relocate_tree_block(struct btrfs_trans_handle
*trans
,
2899 struct reloc_control
*rc
,
2900 struct backref_node
*node
,
2901 struct btrfs_key
*key
,
2902 struct btrfs_path
*path
)
2904 struct btrfs_root
*root
;
2910 BUG_ON(node
->processed
);
2911 root
= select_one_root(trans
, node
);
2912 if (root
== ERR_PTR(-ENOENT
)) {
2913 update_processed_blocks(rc
, node
);
2917 if (!root
|| root
->ref_cows
) {
2918 ret
= reserve_metadata_space(trans
, rc
, node
);
2924 if (root
->ref_cows
) {
2925 BUG_ON(node
->new_bytenr
);
2926 BUG_ON(!list_empty(&node
->list
));
2927 btrfs_record_root_in_trans(trans
, root
);
2928 root
= root
->reloc_root
;
2929 node
->new_bytenr
= root
->node
->start
;
2931 list_add_tail(&node
->list
, &rc
->backref_cache
.changed
);
2933 path
->lowest_level
= node
->level
;
2934 ret
= btrfs_search_slot(trans
, root
, key
, path
, 0, 1);
2935 btrfs_release_path(path
);
2940 update_processed_blocks(rc
, node
);
2942 ret
= do_relocation(trans
, rc
, node
, key
, path
, 1);
2945 if (ret
|| node
->level
== 0 || node
->cowonly
)
2946 remove_backref_node(&rc
->backref_cache
, node
);
2951 * relocate a list of blocks
2953 static noinline_for_stack
2954 int relocate_tree_blocks(struct btrfs_trans_handle
*trans
,
2955 struct reloc_control
*rc
, struct rb_root
*blocks
)
2957 struct backref_node
*node
;
2958 struct btrfs_path
*path
;
2959 struct tree_block
*block
;
2960 struct rb_node
*rb_node
;
2964 path
= btrfs_alloc_path();
2967 goto out_free_blocks
;
2970 rb_node
= rb_first(blocks
);
2972 block
= rb_entry(rb_node
, struct tree_block
, rb_node
);
2973 if (!block
->key_ready
)
2974 reada_tree_block(rc
, block
);
2975 rb_node
= rb_next(rb_node
);
2978 rb_node
= rb_first(blocks
);
2980 block
= rb_entry(rb_node
, struct tree_block
, rb_node
);
2981 if (!block
->key_ready
) {
2982 err
= get_tree_block_key(rc
, block
);
2986 rb_node
= rb_next(rb_node
);
2989 rb_node
= rb_first(blocks
);
2991 block
= rb_entry(rb_node
, struct tree_block
, rb_node
);
2993 node
= build_backref_tree(rc
, &block
->key
,
2994 block
->level
, block
->bytenr
);
2996 err
= PTR_ERR(node
);
3000 ret
= relocate_tree_block(trans
, rc
, node
, &block
->key
,
3003 if (ret
!= -EAGAIN
|| rb_node
== rb_first(blocks
))
3007 rb_node
= rb_next(rb_node
);
3010 err
= finish_pending_nodes(trans
, rc
, path
, err
);
3013 btrfs_free_path(path
);
3015 free_block_list(blocks
);
3019 static noinline_for_stack
3020 int prealloc_file_extent_cluster(struct inode
*inode
,
3021 struct file_extent_cluster
*cluster
)
3026 u64 offset
= BTRFS_I(inode
)->index_cnt
;
3031 BUG_ON(cluster
->start
!= cluster
->boundary
[0]);
3032 mutex_lock(&inode
->i_mutex
);
3034 ret
= btrfs_check_data_free_space(inode
, cluster
->end
+
3035 1 - cluster
->start
);
3039 while (nr
< cluster
->nr
) {
3040 start
= cluster
->boundary
[nr
] - offset
;
3041 if (nr
+ 1 < cluster
->nr
)
3042 end
= cluster
->boundary
[nr
+ 1] - 1 - offset
;
3044 end
= cluster
->end
- offset
;
3046 lock_extent(&BTRFS_I(inode
)->io_tree
, start
, end
);
3047 num_bytes
= end
+ 1 - start
;
3048 ret
= btrfs_prealloc_file_range(inode
, 0, start
,
3049 num_bytes
, num_bytes
,
3050 end
+ 1, &alloc_hint
);
3051 unlock_extent(&BTRFS_I(inode
)->io_tree
, start
, end
);
3056 btrfs_free_reserved_data_space(inode
, cluster
->end
+
3057 1 - cluster
->start
);
3059 mutex_unlock(&inode
->i_mutex
);
3063 static noinline_for_stack
3064 int setup_extent_mapping(struct inode
*inode
, u64 start
, u64 end
,
3067 struct btrfs_root
*root
= BTRFS_I(inode
)->root
;
3068 struct extent_map_tree
*em_tree
= &BTRFS_I(inode
)->extent_tree
;
3069 struct extent_map
*em
;
3072 em
= alloc_extent_map();
3077 em
->len
= end
+ 1 - start
;
3078 em
->block_len
= em
->len
;
3079 em
->block_start
= block_start
;
3080 em
->bdev
= root
->fs_info
->fs_devices
->latest_bdev
;
3081 set_bit(EXTENT_FLAG_PINNED
, &em
->flags
);
3083 lock_extent(&BTRFS_I(inode
)->io_tree
, start
, end
);
3085 write_lock(&em_tree
->lock
);
3086 ret
= add_extent_mapping(em_tree
, em
, 0);
3087 write_unlock(&em_tree
->lock
);
3088 if (ret
!= -EEXIST
) {
3089 free_extent_map(em
);
3092 btrfs_drop_extent_cache(inode
, start
, end
, 0);
3094 unlock_extent(&BTRFS_I(inode
)->io_tree
, start
, end
);
3098 static int relocate_file_extent_cluster(struct inode
*inode
,
3099 struct file_extent_cluster
*cluster
)
3103 u64 offset
= BTRFS_I(inode
)->index_cnt
;
3104 unsigned long index
;
3105 unsigned long last_index
;
3107 struct file_ra_state
*ra
;
3108 gfp_t mask
= btrfs_alloc_write_mask(inode
->i_mapping
);
3115 ra
= kzalloc(sizeof(*ra
), GFP_NOFS
);
3119 ret
= prealloc_file_extent_cluster(inode
, cluster
);
3123 file_ra_state_init(ra
, inode
->i_mapping
);
3125 ret
= setup_extent_mapping(inode
, cluster
->start
- offset
,
3126 cluster
->end
- offset
, cluster
->start
);
3130 index
= (cluster
->start
- offset
) >> PAGE_CACHE_SHIFT
;
3131 last_index
= (cluster
->end
- offset
) >> PAGE_CACHE_SHIFT
;
3132 while (index
<= last_index
) {
3133 ret
= btrfs_delalloc_reserve_metadata(inode
, PAGE_CACHE_SIZE
);
3137 page
= find_lock_page(inode
->i_mapping
, index
);
3139 page_cache_sync_readahead(inode
->i_mapping
,
3141 last_index
+ 1 - index
);
3142 page
= find_or_create_page(inode
->i_mapping
, index
,
3145 btrfs_delalloc_release_metadata(inode
,
3152 if (PageReadahead(page
)) {
3153 page_cache_async_readahead(inode
->i_mapping
,
3154 ra
, NULL
, page
, index
,
3155 last_index
+ 1 - index
);
3158 if (!PageUptodate(page
)) {
3159 btrfs_readpage(NULL
, page
);
3161 if (!PageUptodate(page
)) {
3163 page_cache_release(page
);
3164 btrfs_delalloc_release_metadata(inode
,
3171 page_start
= page_offset(page
);
3172 page_end
= page_start
+ PAGE_CACHE_SIZE
- 1;
3174 lock_extent(&BTRFS_I(inode
)->io_tree
, page_start
, page_end
);
3176 set_page_extent_mapped(page
);
3178 if (nr
< cluster
->nr
&&
3179 page_start
+ offset
== cluster
->boundary
[nr
]) {
3180 set_extent_bits(&BTRFS_I(inode
)->io_tree
,
3181 page_start
, page_end
,
3182 EXTENT_BOUNDARY
, GFP_NOFS
);
3186 btrfs_set_extent_delalloc(inode
, page_start
, page_end
, NULL
);
3187 set_page_dirty(page
);
3189 unlock_extent(&BTRFS_I(inode
)->io_tree
,
3190 page_start
, page_end
);
3192 page_cache_release(page
);
3195 balance_dirty_pages_ratelimited(inode
->i_mapping
);
3196 btrfs_throttle(BTRFS_I(inode
)->root
);
3198 WARN_ON(nr
!= cluster
->nr
);
3204 static noinline_for_stack
3205 int relocate_data_extent(struct inode
*inode
, struct btrfs_key
*extent_key
,
3206 struct file_extent_cluster
*cluster
)
3210 if (cluster
->nr
> 0 && extent_key
->objectid
!= cluster
->end
+ 1) {
3211 ret
= relocate_file_extent_cluster(inode
, cluster
);
3218 cluster
->start
= extent_key
->objectid
;
3220 BUG_ON(cluster
->nr
>= MAX_EXTENTS
);
3221 cluster
->end
= extent_key
->objectid
+ extent_key
->offset
- 1;
3222 cluster
->boundary
[cluster
->nr
] = extent_key
->objectid
;
3225 if (cluster
->nr
>= MAX_EXTENTS
) {
3226 ret
= relocate_file_extent_cluster(inode
, cluster
);
3234 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
3235 static int get_ref_objectid_v0(struct reloc_control
*rc
,
3236 struct btrfs_path
*path
,
3237 struct btrfs_key
*extent_key
,
3238 u64
*ref_objectid
, int *path_change
)
3240 struct btrfs_key key
;
3241 struct extent_buffer
*leaf
;
3242 struct btrfs_extent_ref_v0
*ref0
;
3246 leaf
= path
->nodes
[0];
3247 slot
= path
->slots
[0];
3249 if (slot
>= btrfs_header_nritems(leaf
)) {
3250 ret
= btrfs_next_leaf(rc
->extent_root
, path
);
3254 leaf
= path
->nodes
[0];
3255 slot
= path
->slots
[0];
3259 btrfs_item_key_to_cpu(leaf
, &key
, slot
);
3260 if (key
.objectid
!= extent_key
->objectid
)
3263 if (key
.type
!= BTRFS_EXTENT_REF_V0_KEY
) {
3267 ref0
= btrfs_item_ptr(leaf
, slot
,
3268 struct btrfs_extent_ref_v0
);
3269 *ref_objectid
= btrfs_ref_objectid_v0(leaf
, ref0
);
3277 * helper to add a tree block to the list.
3278 * the major work is getting the generation and level of the block
3280 static int add_tree_block(struct reloc_control
*rc
,
3281 struct btrfs_key
*extent_key
,
3282 struct btrfs_path
*path
,
3283 struct rb_root
*blocks
)
3285 struct extent_buffer
*eb
;
3286 struct btrfs_extent_item
*ei
;
3287 struct btrfs_tree_block_info
*bi
;
3288 struct tree_block
*block
;
3289 struct rb_node
*rb_node
;
3294 eb
= path
->nodes
[0];
3295 item_size
= btrfs_item_size_nr(eb
, path
->slots
[0]);
3297 if (extent_key
->type
== BTRFS_METADATA_ITEM_KEY
||
3298 item_size
>= sizeof(*ei
) + sizeof(*bi
)) {
3299 ei
= btrfs_item_ptr(eb
, path
->slots
[0],
3300 struct btrfs_extent_item
);
3301 if (extent_key
->type
== BTRFS_EXTENT_ITEM_KEY
) {
3302 bi
= (struct btrfs_tree_block_info
*)(ei
+ 1);
3303 level
= btrfs_tree_block_level(eb
, bi
);
3305 level
= (int)extent_key
->offset
;
3307 generation
= btrfs_extent_generation(eb
, ei
);
3309 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
3313 BUG_ON(item_size
!= sizeof(struct btrfs_extent_item_v0
));
3314 ret
= get_ref_objectid_v0(rc
, path
, extent_key
,
3318 BUG_ON(ref_owner
>= BTRFS_MAX_LEVEL
);
3319 level
= (int)ref_owner
;
3320 /* FIXME: get real generation */
3327 btrfs_release_path(path
);
3329 BUG_ON(level
== -1);
3331 block
= kmalloc(sizeof(*block
), GFP_NOFS
);
3335 block
->bytenr
= extent_key
->objectid
;
3336 block
->key
.objectid
= rc
->extent_root
->leafsize
;
3337 block
->key
.offset
= generation
;
3338 block
->level
= level
;
3339 block
->key_ready
= 0;
3341 rb_node
= tree_insert(blocks
, block
->bytenr
, &block
->rb_node
);
3343 backref_tree_panic(rb_node
, -EEXIST
, block
->bytenr
);
3349 * helper to add tree blocks for backref of type BTRFS_SHARED_DATA_REF_KEY
3351 static int __add_tree_block(struct reloc_control
*rc
,
3352 u64 bytenr
, u32 blocksize
,
3353 struct rb_root
*blocks
)
3355 struct btrfs_path
*path
;
3356 struct btrfs_key key
;
3358 bool skinny
= btrfs_fs_incompat(rc
->extent_root
->fs_info
,
3361 if (tree_block_processed(bytenr
, blocksize
, rc
))
3364 if (tree_search(blocks
, bytenr
))
3367 path
= btrfs_alloc_path();
3371 key
.objectid
= bytenr
;
3373 key
.type
= BTRFS_METADATA_ITEM_KEY
;
3374 key
.offset
= (u64
)-1;
3376 key
.type
= BTRFS_EXTENT_ITEM_KEY
;
3377 key
.offset
= blocksize
;
3380 path
->search_commit_root
= 1;
3381 path
->skip_locking
= 1;
3382 ret
= btrfs_search_slot(NULL
, rc
->extent_root
, &key
, path
, 0, 0);
3386 if (ret
> 0 && skinny
) {
3387 if (path
->slots
[0]) {
3389 btrfs_item_key_to_cpu(path
->nodes
[0], &key
,
3391 if (key
.objectid
== bytenr
&&
3392 (key
.type
== BTRFS_METADATA_ITEM_KEY
||
3393 (key
.type
== BTRFS_EXTENT_ITEM_KEY
&&
3394 key
.offset
== blocksize
)))
3400 btrfs_release_path(path
);
3406 ret
= add_tree_block(rc
, &key
, path
, blocks
);
3408 btrfs_free_path(path
);
3413 * helper to check if the block use full backrefs for pointers in it
3415 static int block_use_full_backref(struct reloc_control
*rc
,
3416 struct extent_buffer
*eb
)
3421 if (btrfs_header_flag(eb
, BTRFS_HEADER_FLAG_RELOC
) ||
3422 btrfs_header_backref_rev(eb
) < BTRFS_MIXED_BACKREF_REV
)
3425 ret
= btrfs_lookup_extent_info(NULL
, rc
->extent_root
,
3426 eb
->start
, btrfs_header_level(eb
), 1,
3430 if (flags
& BTRFS_BLOCK_FLAG_FULL_BACKREF
)
3437 static int delete_block_group_cache(struct btrfs_fs_info
*fs_info
,
3438 struct inode
*inode
, u64 ino
)
3440 struct btrfs_key key
;
3441 struct btrfs_root
*root
= fs_info
->tree_root
;
3442 struct btrfs_trans_handle
*trans
;
3449 key
.type
= BTRFS_INODE_ITEM_KEY
;
3452 inode
= btrfs_iget(fs_info
->sb
, &key
, root
, NULL
);
3453 if (IS_ERR(inode
) || is_bad_inode(inode
)) {
3460 ret
= btrfs_check_trunc_cache_free_space(root
,
3461 &fs_info
->global_block_rsv
);
3465 trans
= btrfs_join_transaction(root
);
3466 if (IS_ERR(trans
)) {
3467 ret
= PTR_ERR(trans
);
3471 ret
= btrfs_truncate_free_space_cache(root
, trans
, inode
);
3473 btrfs_end_transaction(trans
, root
);
3474 btrfs_btree_balance_dirty(root
);
3481 * helper to add tree blocks for backref of type BTRFS_EXTENT_DATA_REF_KEY
3482 * this function scans fs tree to find blocks reference the data extent
3484 static int find_data_references(struct reloc_control
*rc
,
3485 struct btrfs_key
*extent_key
,
3486 struct extent_buffer
*leaf
,
3487 struct btrfs_extent_data_ref
*ref
,
3488 struct rb_root
*blocks
)
3490 struct btrfs_path
*path
;
3491 struct tree_block
*block
;
3492 struct btrfs_root
*root
;
3493 struct btrfs_file_extent_item
*fi
;
3494 struct rb_node
*rb_node
;
3495 struct btrfs_key key
;
3506 ref_root
= btrfs_extent_data_ref_root(leaf
, ref
);
3507 ref_objectid
= btrfs_extent_data_ref_objectid(leaf
, ref
);
3508 ref_offset
= btrfs_extent_data_ref_offset(leaf
, ref
);
3509 ref_count
= btrfs_extent_data_ref_count(leaf
, ref
);
3512 * This is an extent belonging to the free space cache, lets just delete
3513 * it and redo the search.
3515 if (ref_root
== BTRFS_ROOT_TREE_OBJECTID
) {
3516 ret
= delete_block_group_cache(rc
->extent_root
->fs_info
,
3517 NULL
, ref_objectid
);
3523 path
= btrfs_alloc_path();
3528 root
= read_fs_root(rc
->extent_root
->fs_info
, ref_root
);
3530 err
= PTR_ERR(root
);
3534 key
.objectid
= ref_objectid
;
3535 key
.type
= BTRFS_EXTENT_DATA_KEY
;
3536 if (ref_offset
> ((u64
)-1 << 32))
3539 key
.offset
= ref_offset
;
3541 path
->search_commit_root
= 1;
3542 path
->skip_locking
= 1;
3543 ret
= btrfs_search_slot(NULL
, root
, &key
, path
, 0, 0);
3549 leaf
= path
->nodes
[0];
3550 nritems
= btrfs_header_nritems(leaf
);
3552 * the references in tree blocks that use full backrefs
3553 * are not counted in
3555 if (block_use_full_backref(rc
, leaf
))
3559 rb_node
= tree_search(blocks
, leaf
->start
);
3564 path
->slots
[0] = nritems
;
3567 while (ref_count
> 0) {
3568 while (path
->slots
[0] >= nritems
) {
3569 ret
= btrfs_next_leaf(root
, path
);
3574 if (WARN_ON(ret
> 0))
3577 leaf
= path
->nodes
[0];
3578 nritems
= btrfs_header_nritems(leaf
);
3581 if (block_use_full_backref(rc
, leaf
))
3585 rb_node
= tree_search(blocks
, leaf
->start
);
3590 path
->slots
[0] = nritems
;
3594 btrfs_item_key_to_cpu(leaf
, &key
, path
->slots
[0]);
3595 if (WARN_ON(key
.objectid
!= ref_objectid
||
3596 key
.type
!= BTRFS_EXTENT_DATA_KEY
))
3599 fi
= btrfs_item_ptr(leaf
, path
->slots
[0],
3600 struct btrfs_file_extent_item
);
3602 if (btrfs_file_extent_type(leaf
, fi
) ==
3603 BTRFS_FILE_EXTENT_INLINE
)
3606 if (btrfs_file_extent_disk_bytenr(leaf
, fi
) !=
3607 extent_key
->objectid
)
3610 key
.offset
-= btrfs_file_extent_offset(leaf
, fi
);
3611 if (key
.offset
!= ref_offset
)
3619 if (!tree_block_processed(leaf
->start
, leaf
->len
, rc
)) {
3620 block
= kmalloc(sizeof(*block
), GFP_NOFS
);
3625 block
->bytenr
= leaf
->start
;
3626 btrfs_item_key_to_cpu(leaf
, &block
->key
, 0);
3628 block
->key_ready
= 1;
3629 rb_node
= tree_insert(blocks
, block
->bytenr
,
3632 backref_tree_panic(rb_node
, -EEXIST
,
3638 path
->slots
[0] = nritems
;
3644 btrfs_free_path(path
);
3649 * helper to find all tree blocks that reference a given data extent
3651 static noinline_for_stack
3652 int add_data_references(struct reloc_control
*rc
,
3653 struct btrfs_key
*extent_key
,
3654 struct btrfs_path
*path
,
3655 struct rb_root
*blocks
)
3657 struct btrfs_key key
;
3658 struct extent_buffer
*eb
;
3659 struct btrfs_extent_data_ref
*dref
;
3660 struct btrfs_extent_inline_ref
*iref
;
3663 u32 blocksize
= btrfs_level_size(rc
->extent_root
, 0);
3667 eb
= path
->nodes
[0];
3668 ptr
= btrfs_item_ptr_offset(eb
, path
->slots
[0]);
3669 end
= ptr
+ btrfs_item_size_nr(eb
, path
->slots
[0]);
3670 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
3671 if (ptr
+ sizeof(struct btrfs_extent_item_v0
) == end
)
3675 ptr
+= sizeof(struct btrfs_extent_item
);
3678 iref
= (struct btrfs_extent_inline_ref
*)ptr
;
3679 key
.type
= btrfs_extent_inline_ref_type(eb
, iref
);
3680 if (key
.type
== BTRFS_SHARED_DATA_REF_KEY
) {
3681 key
.offset
= btrfs_extent_inline_ref_offset(eb
, iref
);
3682 ret
= __add_tree_block(rc
, key
.offset
, blocksize
,
3684 } else if (key
.type
== BTRFS_EXTENT_DATA_REF_KEY
) {
3685 dref
= (struct btrfs_extent_data_ref
*)(&iref
->offset
);
3686 ret
= find_data_references(rc
, extent_key
,
3695 ptr
+= btrfs_extent_inline_ref_size(key
.type
);
3701 eb
= path
->nodes
[0];
3702 if (path
->slots
[0] >= btrfs_header_nritems(eb
)) {
3703 ret
= btrfs_next_leaf(rc
->extent_root
, path
);
3710 eb
= path
->nodes
[0];
3713 btrfs_item_key_to_cpu(eb
, &key
, path
->slots
[0]);
3714 if (key
.objectid
!= extent_key
->objectid
)
3717 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
3718 if (key
.type
== BTRFS_SHARED_DATA_REF_KEY
||
3719 key
.type
== BTRFS_EXTENT_REF_V0_KEY
) {
3721 BUG_ON(key
.type
== BTRFS_EXTENT_REF_V0_KEY
);
3722 if (key
.type
== BTRFS_SHARED_DATA_REF_KEY
) {
3724 ret
= __add_tree_block(rc
, key
.offset
, blocksize
,
3726 } else if (key
.type
== BTRFS_EXTENT_DATA_REF_KEY
) {
3727 dref
= btrfs_item_ptr(eb
, path
->slots
[0],
3728 struct btrfs_extent_data_ref
);
3729 ret
= find_data_references(rc
, extent_key
,
3741 btrfs_release_path(path
);
3743 free_block_list(blocks
);
3748 * helper to find next unprocessed extent
3750 static noinline_for_stack
3751 int find_next_extent(struct btrfs_trans_handle
*trans
,
3752 struct reloc_control
*rc
, struct btrfs_path
*path
,
3753 struct btrfs_key
*extent_key
)
3755 struct btrfs_key key
;
3756 struct extent_buffer
*leaf
;
3757 u64 start
, end
, last
;
3760 last
= rc
->block_group
->key
.objectid
+ rc
->block_group
->key
.offset
;
3763 if (rc
->search_start
>= last
) {
3768 key
.objectid
= rc
->search_start
;
3769 key
.type
= BTRFS_EXTENT_ITEM_KEY
;
3772 path
->search_commit_root
= 1;
3773 path
->skip_locking
= 1;
3774 ret
= btrfs_search_slot(NULL
, rc
->extent_root
, &key
, path
,
3779 leaf
= path
->nodes
[0];
3780 if (path
->slots
[0] >= btrfs_header_nritems(leaf
)) {
3781 ret
= btrfs_next_leaf(rc
->extent_root
, path
);
3784 leaf
= path
->nodes
[0];
3787 btrfs_item_key_to_cpu(leaf
, &key
, path
->slots
[0]);
3788 if (key
.objectid
>= last
) {
3793 if (key
.type
!= BTRFS_EXTENT_ITEM_KEY
&&
3794 key
.type
!= BTRFS_METADATA_ITEM_KEY
) {
3799 if (key
.type
== BTRFS_EXTENT_ITEM_KEY
&&
3800 key
.objectid
+ key
.offset
<= rc
->search_start
) {
3805 if (key
.type
== BTRFS_METADATA_ITEM_KEY
&&
3806 key
.objectid
+ rc
->extent_root
->leafsize
<=
3812 ret
= find_first_extent_bit(&rc
->processed_blocks
,
3813 key
.objectid
, &start
, &end
,
3814 EXTENT_DIRTY
, NULL
);
3816 if (ret
== 0 && start
<= key
.objectid
) {
3817 btrfs_release_path(path
);
3818 rc
->search_start
= end
+ 1;
3820 if (key
.type
== BTRFS_EXTENT_ITEM_KEY
)
3821 rc
->search_start
= key
.objectid
+ key
.offset
;
3823 rc
->search_start
= key
.objectid
+
3824 rc
->extent_root
->leafsize
;
3825 memcpy(extent_key
, &key
, sizeof(key
));
3829 btrfs_release_path(path
);
3833 static void set_reloc_control(struct reloc_control
*rc
)
3835 struct btrfs_fs_info
*fs_info
= rc
->extent_root
->fs_info
;
3837 mutex_lock(&fs_info
->reloc_mutex
);
3838 fs_info
->reloc_ctl
= rc
;
3839 mutex_unlock(&fs_info
->reloc_mutex
);
3842 static void unset_reloc_control(struct reloc_control
*rc
)
3844 struct btrfs_fs_info
*fs_info
= rc
->extent_root
->fs_info
;
3846 mutex_lock(&fs_info
->reloc_mutex
);
3847 fs_info
->reloc_ctl
= NULL
;
3848 mutex_unlock(&fs_info
->reloc_mutex
);
3851 static int check_extent_flags(u64 flags
)
3853 if ((flags
& BTRFS_EXTENT_FLAG_DATA
) &&
3854 (flags
& BTRFS_EXTENT_FLAG_TREE_BLOCK
))
3856 if (!(flags
& BTRFS_EXTENT_FLAG_DATA
) &&
3857 !(flags
& BTRFS_EXTENT_FLAG_TREE_BLOCK
))
3859 if ((flags
& BTRFS_EXTENT_FLAG_DATA
) &&
3860 (flags
& BTRFS_BLOCK_FLAG_FULL_BACKREF
))
3865 static noinline_for_stack
3866 int prepare_to_relocate(struct reloc_control
*rc
)
3868 struct btrfs_trans_handle
*trans
;
3870 rc
->block_rsv
= btrfs_alloc_block_rsv(rc
->extent_root
,
3871 BTRFS_BLOCK_RSV_TEMP
);
3875 memset(&rc
->cluster
, 0, sizeof(rc
->cluster
));
3876 rc
->search_start
= rc
->block_group
->key
.objectid
;
3877 rc
->extents_found
= 0;
3878 rc
->nodes_relocated
= 0;
3879 rc
->merging_rsv_size
= 0;
3880 rc
->reserved_bytes
= 0;
3881 rc
->block_rsv
->size
= rc
->extent_root
->nodesize
*
3882 RELOCATION_RESERVED_NODES
;
3884 rc
->create_reloc_tree
= 1;
3885 set_reloc_control(rc
);
3887 trans
= btrfs_join_transaction(rc
->extent_root
);
3888 if (IS_ERR(trans
)) {
3889 unset_reloc_control(rc
);
3891 * extent tree is not a ref_cow tree and has no reloc_root to
3892 * cleanup. And callers are responsible to free the above
3895 return PTR_ERR(trans
);
3897 btrfs_commit_transaction(trans
, rc
->extent_root
);
3901 static noinline_for_stack
int relocate_block_group(struct reloc_control
*rc
)
3903 struct rb_root blocks
= RB_ROOT
;
3904 struct btrfs_key key
;
3905 struct btrfs_trans_handle
*trans
= NULL
;
3906 struct btrfs_path
*path
;
3907 struct btrfs_extent_item
*ei
;
3914 path
= btrfs_alloc_path();
3919 ret
= prepare_to_relocate(rc
);
3926 rc
->reserved_bytes
= 0;
3927 ret
= btrfs_block_rsv_refill(rc
->extent_root
,
3928 rc
->block_rsv
, rc
->block_rsv
->size
,
3929 BTRFS_RESERVE_FLUSH_ALL
);
3935 trans
= btrfs_start_transaction(rc
->extent_root
, 0);
3936 if (IS_ERR(trans
)) {
3937 err
= PTR_ERR(trans
);
3942 if (update_backref_cache(trans
, &rc
->backref_cache
)) {
3943 btrfs_end_transaction(trans
, rc
->extent_root
);
3947 ret
= find_next_extent(trans
, rc
, path
, &key
);
3953 rc
->extents_found
++;
3955 ei
= btrfs_item_ptr(path
->nodes
[0], path
->slots
[0],
3956 struct btrfs_extent_item
);
3957 item_size
= btrfs_item_size_nr(path
->nodes
[0], path
->slots
[0]);
3958 if (item_size
>= sizeof(*ei
)) {
3959 flags
= btrfs_extent_flags(path
->nodes
[0], ei
);
3960 ret
= check_extent_flags(flags
);
3964 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
3966 int path_change
= 0;
3969 sizeof(struct btrfs_extent_item_v0
));
3970 ret
= get_ref_objectid_v0(rc
, path
, &key
, &ref_owner
,
3972 if (ref_owner
< BTRFS_FIRST_FREE_OBJECTID
)
3973 flags
= BTRFS_EXTENT_FLAG_TREE_BLOCK
;
3975 flags
= BTRFS_EXTENT_FLAG_DATA
;
3978 btrfs_release_path(path
);
3980 path
->search_commit_root
= 1;
3981 path
->skip_locking
= 1;
3982 ret
= btrfs_search_slot(NULL
, rc
->extent_root
,
3995 if (flags
& BTRFS_EXTENT_FLAG_TREE_BLOCK
) {
3996 ret
= add_tree_block(rc
, &key
, path
, &blocks
);
3997 } else if (rc
->stage
== UPDATE_DATA_PTRS
&&
3998 (flags
& BTRFS_EXTENT_FLAG_DATA
)) {
3999 ret
= add_data_references(rc
, &key
, path
, &blocks
);
4001 btrfs_release_path(path
);
4009 if (!RB_EMPTY_ROOT(&blocks
)) {
4010 ret
= relocate_tree_blocks(trans
, rc
, &blocks
);
4013 * if we fail to relocate tree blocks, force to update
4014 * backref cache when committing transaction.
4016 rc
->backref_cache
.last_trans
= trans
->transid
- 1;
4018 if (ret
!= -EAGAIN
) {
4022 rc
->extents_found
--;
4023 rc
->search_start
= key
.objectid
;
4027 btrfs_end_transaction_throttle(trans
, rc
->extent_root
);
4028 btrfs_btree_balance_dirty(rc
->extent_root
);
4031 if (rc
->stage
== MOVE_DATA_EXTENTS
&&
4032 (flags
& BTRFS_EXTENT_FLAG_DATA
)) {
4033 rc
->found_file_extent
= 1;
4034 ret
= relocate_data_extent(rc
->data_inode
,
4035 &key
, &rc
->cluster
);
4042 if (trans
&& progress
&& err
== -ENOSPC
) {
4043 ret
= btrfs_force_chunk_alloc(trans
, rc
->extent_root
,
4044 rc
->block_group
->flags
);
4052 btrfs_release_path(path
);
4053 clear_extent_bits(&rc
->processed_blocks
, 0, (u64
)-1, EXTENT_DIRTY
,
4057 btrfs_end_transaction_throttle(trans
, rc
->extent_root
);
4058 btrfs_btree_balance_dirty(rc
->extent_root
);
4062 ret
= relocate_file_extent_cluster(rc
->data_inode
,
4068 rc
->create_reloc_tree
= 0;
4069 set_reloc_control(rc
);
4071 backref_cache_cleanup(&rc
->backref_cache
);
4072 btrfs_block_rsv_release(rc
->extent_root
, rc
->block_rsv
, (u64
)-1);
4074 err
= prepare_to_merge(rc
, err
);
4076 merge_reloc_roots(rc
);
4078 rc
->merge_reloc_tree
= 0;
4079 unset_reloc_control(rc
);
4080 btrfs_block_rsv_release(rc
->extent_root
, rc
->block_rsv
, (u64
)-1);
4082 /* get rid of pinned extents */
4083 trans
= btrfs_join_transaction(rc
->extent_root
);
4085 err
= PTR_ERR(trans
);
4087 btrfs_commit_transaction(trans
, rc
->extent_root
);
4089 btrfs_free_block_rsv(rc
->extent_root
, rc
->block_rsv
);
4090 btrfs_free_path(path
);
4094 static int __insert_orphan_inode(struct btrfs_trans_handle
*trans
,
4095 struct btrfs_root
*root
, u64 objectid
)
4097 struct btrfs_path
*path
;
4098 struct btrfs_inode_item
*item
;
4099 struct extent_buffer
*leaf
;
4102 path
= btrfs_alloc_path();
4106 ret
= btrfs_insert_empty_inode(trans
, root
, path
, objectid
);
4110 leaf
= path
->nodes
[0];
4111 item
= btrfs_item_ptr(leaf
, path
->slots
[0], struct btrfs_inode_item
);
4112 memset_extent_buffer(leaf
, 0, (unsigned long)item
, sizeof(*item
));
4113 btrfs_set_inode_generation(leaf
, item
, 1);
4114 btrfs_set_inode_size(leaf
, item
, 0);
4115 btrfs_set_inode_mode(leaf
, item
, S_IFREG
| 0600);
4116 btrfs_set_inode_flags(leaf
, item
, BTRFS_INODE_NOCOMPRESS
|
4117 BTRFS_INODE_PREALLOC
);
4118 btrfs_mark_buffer_dirty(leaf
);
4119 btrfs_release_path(path
);
4121 btrfs_free_path(path
);
4126 * helper to create inode for data relocation.
4127 * the inode is in data relocation tree and its link count is 0
4129 static noinline_for_stack
4130 struct inode
*create_reloc_inode(struct btrfs_fs_info
*fs_info
,
4131 struct btrfs_block_group_cache
*group
)
4133 struct inode
*inode
= NULL
;
4134 struct btrfs_trans_handle
*trans
;
4135 struct btrfs_root
*root
;
4136 struct btrfs_key key
;
4137 u64 objectid
= BTRFS_FIRST_FREE_OBJECTID
;
4140 root
= read_fs_root(fs_info
, BTRFS_DATA_RELOC_TREE_OBJECTID
);
4142 return ERR_CAST(root
);
4144 trans
= btrfs_start_transaction(root
, 6);
4146 return ERR_CAST(trans
);
4148 err
= btrfs_find_free_objectid(root
, &objectid
);
4152 err
= __insert_orphan_inode(trans
, root
, objectid
);
4155 key
.objectid
= objectid
;
4156 key
.type
= BTRFS_INODE_ITEM_KEY
;
4158 inode
= btrfs_iget(root
->fs_info
->sb
, &key
, root
, NULL
);
4159 BUG_ON(IS_ERR(inode
) || is_bad_inode(inode
));
4160 BTRFS_I(inode
)->index_cnt
= group
->key
.objectid
;
4162 err
= btrfs_orphan_add(trans
, inode
);
4164 btrfs_end_transaction(trans
, root
);
4165 btrfs_btree_balance_dirty(root
);
4169 inode
= ERR_PTR(err
);
4174 static struct reloc_control
*alloc_reloc_control(struct btrfs_fs_info
*fs_info
)
4176 struct reloc_control
*rc
;
4178 rc
= kzalloc(sizeof(*rc
), GFP_NOFS
);
4182 INIT_LIST_HEAD(&rc
->reloc_roots
);
4183 backref_cache_init(&rc
->backref_cache
);
4184 mapping_tree_init(&rc
->reloc_root_tree
);
4185 extent_io_tree_init(&rc
->processed_blocks
,
4186 fs_info
->btree_inode
->i_mapping
);
4191 * function to relocate all extents in a block group.
4193 int btrfs_relocate_block_group(struct btrfs_root
*extent_root
, u64 group_start
)
4195 struct btrfs_fs_info
*fs_info
= extent_root
->fs_info
;
4196 struct reloc_control
*rc
;
4197 struct inode
*inode
;
4198 struct btrfs_path
*path
;
4203 rc
= alloc_reloc_control(fs_info
);
4207 rc
->extent_root
= extent_root
;
4209 rc
->block_group
= btrfs_lookup_block_group(fs_info
, group_start
);
4210 BUG_ON(!rc
->block_group
);
4212 if (!rc
->block_group
->ro
) {
4213 ret
= btrfs_set_block_group_ro(extent_root
, rc
->block_group
);
4221 path
= btrfs_alloc_path();
4227 inode
= lookup_free_space_inode(fs_info
->tree_root
, rc
->block_group
,
4229 btrfs_free_path(path
);
4232 ret
= delete_block_group_cache(fs_info
, inode
, 0);
4234 ret
= PTR_ERR(inode
);
4236 if (ret
&& ret
!= -ENOENT
) {
4241 rc
->data_inode
= create_reloc_inode(fs_info
, rc
->block_group
);
4242 if (IS_ERR(rc
->data_inode
)) {
4243 err
= PTR_ERR(rc
->data_inode
);
4244 rc
->data_inode
= NULL
;
4248 btrfs_info(extent_root
->fs_info
, "relocating block group %llu flags %llu",
4249 rc
->block_group
->key
.objectid
, rc
->block_group
->flags
);
4251 ret
= btrfs_start_delalloc_roots(fs_info
, 0);
4256 btrfs_wait_ordered_roots(fs_info
, -1);
4259 mutex_lock(&fs_info
->cleaner_mutex
);
4260 ret
= relocate_block_group(rc
);
4261 mutex_unlock(&fs_info
->cleaner_mutex
);
4267 if (rc
->extents_found
== 0)
4270 btrfs_info(extent_root
->fs_info
, "found %llu extents",
4273 if (rc
->stage
== MOVE_DATA_EXTENTS
&& rc
->found_file_extent
) {
4274 ret
= btrfs_wait_ordered_range(rc
->data_inode
, 0,
4280 invalidate_mapping_pages(rc
->data_inode
->i_mapping
,
4282 rc
->stage
= UPDATE_DATA_PTRS
;
4286 WARN_ON(rc
->block_group
->pinned
> 0);
4287 WARN_ON(rc
->block_group
->reserved
> 0);
4288 WARN_ON(btrfs_block_group_used(&rc
->block_group
->item
) > 0);
4291 btrfs_set_block_group_rw(extent_root
, rc
->block_group
);
4292 iput(rc
->data_inode
);
4293 btrfs_put_block_group(rc
->block_group
);
4298 static noinline_for_stack
int mark_garbage_root(struct btrfs_root
*root
)
4300 struct btrfs_trans_handle
*trans
;
4303 trans
= btrfs_start_transaction(root
->fs_info
->tree_root
, 0);
4305 return PTR_ERR(trans
);
4307 memset(&root
->root_item
.drop_progress
, 0,
4308 sizeof(root
->root_item
.drop_progress
));
4309 root
->root_item
.drop_level
= 0;
4310 btrfs_set_root_refs(&root
->root_item
, 0);
4311 ret
= btrfs_update_root(trans
, root
->fs_info
->tree_root
,
4312 &root
->root_key
, &root
->root_item
);
4314 err
= btrfs_end_transaction(trans
, root
->fs_info
->tree_root
);
4321 * recover relocation interrupted by system crash.
4323 * this function resumes merging reloc trees with corresponding fs trees.
4324 * this is important for keeping the sharing of tree blocks
4326 int btrfs_recover_relocation(struct btrfs_root
*root
)
4328 LIST_HEAD(reloc_roots
);
4329 struct btrfs_key key
;
4330 struct btrfs_root
*fs_root
;
4331 struct btrfs_root
*reloc_root
;
4332 struct btrfs_path
*path
;
4333 struct extent_buffer
*leaf
;
4334 struct reloc_control
*rc
= NULL
;
4335 struct btrfs_trans_handle
*trans
;
4339 path
= btrfs_alloc_path();
4344 key
.objectid
= BTRFS_TREE_RELOC_OBJECTID
;
4345 key
.type
= BTRFS_ROOT_ITEM_KEY
;
4346 key
.offset
= (u64
)-1;
4349 ret
= btrfs_search_slot(NULL
, root
->fs_info
->tree_root
, &key
,
4356 if (path
->slots
[0] == 0)
4360 leaf
= path
->nodes
[0];
4361 btrfs_item_key_to_cpu(leaf
, &key
, path
->slots
[0]);
4362 btrfs_release_path(path
);
4364 if (key
.objectid
!= BTRFS_TREE_RELOC_OBJECTID
||
4365 key
.type
!= BTRFS_ROOT_ITEM_KEY
)
4368 reloc_root
= btrfs_read_fs_root(root
, &key
);
4369 if (IS_ERR(reloc_root
)) {
4370 err
= PTR_ERR(reloc_root
);
4374 list_add(&reloc_root
->root_list
, &reloc_roots
);
4376 if (btrfs_root_refs(&reloc_root
->root_item
) > 0) {
4377 fs_root
= read_fs_root(root
->fs_info
,
4378 reloc_root
->root_key
.offset
);
4379 if (IS_ERR(fs_root
)) {
4380 ret
= PTR_ERR(fs_root
);
4381 if (ret
!= -ENOENT
) {
4385 ret
= mark_garbage_root(reloc_root
);
4393 if (key
.offset
== 0)
4398 btrfs_release_path(path
);
4400 if (list_empty(&reloc_roots
))
4403 rc
= alloc_reloc_control(root
->fs_info
);
4409 rc
->extent_root
= root
->fs_info
->extent_root
;
4411 set_reloc_control(rc
);
4413 trans
= btrfs_join_transaction(rc
->extent_root
);
4414 if (IS_ERR(trans
)) {
4415 unset_reloc_control(rc
);
4416 err
= PTR_ERR(trans
);
4420 rc
->merge_reloc_tree
= 1;
4422 while (!list_empty(&reloc_roots
)) {
4423 reloc_root
= list_entry(reloc_roots
.next
,
4424 struct btrfs_root
, root_list
);
4425 list_del(&reloc_root
->root_list
);
4427 if (btrfs_root_refs(&reloc_root
->root_item
) == 0) {
4428 list_add_tail(&reloc_root
->root_list
,
4433 fs_root
= read_fs_root(root
->fs_info
,
4434 reloc_root
->root_key
.offset
);
4435 if (IS_ERR(fs_root
)) {
4436 err
= PTR_ERR(fs_root
);
4440 err
= __add_reloc_root(reloc_root
);
4441 BUG_ON(err
< 0); /* -ENOMEM or logic error */
4442 fs_root
->reloc_root
= reloc_root
;
4445 err
= btrfs_commit_transaction(trans
, rc
->extent_root
);
4449 merge_reloc_roots(rc
);
4451 unset_reloc_control(rc
);
4453 trans
= btrfs_join_transaction(rc
->extent_root
);
4455 err
= PTR_ERR(trans
);
4457 err
= btrfs_commit_transaction(trans
, rc
->extent_root
);
4461 if (!list_empty(&reloc_roots
))
4462 free_reloc_roots(&reloc_roots
);
4464 btrfs_free_path(path
);
4467 /* cleanup orphan inode in data relocation tree */
4468 fs_root
= read_fs_root(root
->fs_info
,
4469 BTRFS_DATA_RELOC_TREE_OBJECTID
);
4470 if (IS_ERR(fs_root
))
4471 err
= PTR_ERR(fs_root
);
4473 err
= btrfs_orphan_cleanup(fs_root
);
4479 * helper to add ordered checksum for data relocation.
4481 * cloning checksum properly handles the nodatasum extents.
4482 * it also saves CPU time to re-calculate the checksum.
4484 int btrfs_reloc_clone_csums(struct inode
*inode
, u64 file_pos
, u64 len
)
4486 struct btrfs_ordered_sum
*sums
;
4487 struct btrfs_ordered_extent
*ordered
;
4488 struct btrfs_root
*root
= BTRFS_I(inode
)->root
;
4494 ordered
= btrfs_lookup_ordered_extent(inode
, file_pos
);
4495 BUG_ON(ordered
->file_offset
!= file_pos
|| ordered
->len
!= len
);
4497 disk_bytenr
= file_pos
+ BTRFS_I(inode
)->index_cnt
;
4498 ret
= btrfs_lookup_csums_range(root
->fs_info
->csum_root
, disk_bytenr
,
4499 disk_bytenr
+ len
- 1, &list
, 0);
4503 while (!list_empty(&list
)) {
4504 sums
= list_entry(list
.next
, struct btrfs_ordered_sum
, list
);
4505 list_del_init(&sums
->list
);
4508 * We need to offset the new_bytenr based on where the csum is.
4509 * We need to do this because we will read in entire prealloc
4510 * extents but we may have written to say the middle of the
4511 * prealloc extent, so we need to make sure the csum goes with
4512 * the right disk offset.
4514 * We can do this because the data reloc inode refers strictly
4515 * to the on disk bytes, so we don't have to worry about
4516 * disk_len vs real len like with real inodes since it's all
4519 new_bytenr
= ordered
->start
+ (sums
->bytenr
- disk_bytenr
);
4520 sums
->bytenr
= new_bytenr
;
4522 btrfs_add_ordered_sum(inode
, ordered
, sums
);
4525 btrfs_put_ordered_extent(ordered
);
4529 int btrfs_reloc_cow_block(struct btrfs_trans_handle
*trans
,
4530 struct btrfs_root
*root
, struct extent_buffer
*buf
,
4531 struct extent_buffer
*cow
)
4533 struct reloc_control
*rc
;
4534 struct backref_node
*node
;
4539 rc
= root
->fs_info
->reloc_ctl
;
4543 BUG_ON(rc
->stage
== UPDATE_DATA_PTRS
&&
4544 root
->root_key
.objectid
== BTRFS_DATA_RELOC_TREE_OBJECTID
);
4546 if (root
->root_key
.objectid
== BTRFS_TREE_RELOC_OBJECTID
) {
4547 if (buf
== root
->node
)
4548 __update_reloc_root(root
, cow
->start
);
4551 level
= btrfs_header_level(buf
);
4552 if (btrfs_header_generation(buf
) <=
4553 btrfs_root_last_snapshot(&root
->root_item
))
4556 if (root
->root_key
.objectid
== BTRFS_TREE_RELOC_OBJECTID
&&
4557 rc
->create_reloc_tree
) {
4558 WARN_ON(!first_cow
&& level
== 0);
4560 node
= rc
->backref_cache
.path
[level
];
4561 BUG_ON(node
->bytenr
!= buf
->start
&&
4562 node
->new_bytenr
!= buf
->start
);
4564 drop_node_buffer(node
);
4565 extent_buffer_get(cow
);
4567 node
->new_bytenr
= cow
->start
;
4569 if (!node
->pending
) {
4570 list_move_tail(&node
->list
,
4571 &rc
->backref_cache
.pending
[level
]);
4576 __mark_block_processed(rc
, node
);
4578 if (first_cow
&& level
> 0)
4579 rc
->nodes_relocated
+= buf
->len
;
4582 if (level
== 0 && first_cow
&& rc
->stage
== UPDATE_DATA_PTRS
)
4583 ret
= replace_file_extents(trans
, rc
, root
, cow
);
4588 * called before creating snapshot. it calculates metadata reservation
4589 * requried for relocating tree blocks in the snapshot
4591 void btrfs_reloc_pre_snapshot(struct btrfs_trans_handle
*trans
,
4592 struct btrfs_pending_snapshot
*pending
,
4593 u64
*bytes_to_reserve
)
4595 struct btrfs_root
*root
;
4596 struct reloc_control
*rc
;
4598 root
= pending
->root
;
4599 if (!root
->reloc_root
)
4602 rc
= root
->fs_info
->reloc_ctl
;
4603 if (!rc
->merge_reloc_tree
)
4606 root
= root
->reloc_root
;
4607 BUG_ON(btrfs_root_refs(&root
->root_item
) == 0);
4609 * relocation is in the stage of merging trees. the space
4610 * used by merging a reloc tree is twice the size of
4611 * relocated tree nodes in the worst case. half for cowing
4612 * the reloc tree, half for cowing the fs tree. the space
4613 * used by cowing the reloc tree will be freed after the
4614 * tree is dropped. if we create snapshot, cowing the fs
4615 * tree may use more space than it frees. so we need
4616 * reserve extra space.
4618 *bytes_to_reserve
+= rc
->nodes_relocated
;
4622 * called after snapshot is created. migrate block reservation
4623 * and create reloc root for the newly created snapshot
4625 int btrfs_reloc_post_snapshot(struct btrfs_trans_handle
*trans
,
4626 struct btrfs_pending_snapshot
*pending
)
4628 struct btrfs_root
*root
= pending
->root
;
4629 struct btrfs_root
*reloc_root
;
4630 struct btrfs_root
*new_root
;
4631 struct reloc_control
*rc
;
4634 if (!root
->reloc_root
)
4637 rc
= root
->fs_info
->reloc_ctl
;
4638 rc
->merging_rsv_size
+= rc
->nodes_relocated
;
4640 if (rc
->merge_reloc_tree
) {
4641 ret
= btrfs_block_rsv_migrate(&pending
->block_rsv
,
4643 rc
->nodes_relocated
);
4648 new_root
= pending
->snap
;
4649 reloc_root
= create_reloc_root(trans
, root
->reloc_root
,
4650 new_root
->root_key
.objectid
);
4651 if (IS_ERR(reloc_root
))
4652 return PTR_ERR(reloc_root
);
4654 ret
= __add_reloc_root(reloc_root
);
4656 new_root
->reloc_root
= reloc_root
;
4658 if (rc
->create_reloc_tree
)
4659 ret
= clone_backref_node(trans
, rc
, root
, reloc_root
);