2 * Copyright (C) 2007 Oracle. All rights reserved.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public
6 * License v2 as published by the Free Software Foundation.
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
13 * You should have received a copy of the GNU General Public
14 * License along with this program; if not, write to the
15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16 * Boston, MA 021110-1307, USA.
18 #include <linux/sched.h>
19 #include <linux/pagemap.h>
20 #include <linux/writeback.h>
21 #include <linux/blkdev.h>
22 #include <linux/sort.h>
23 #include <linux/rcupdate.h>
24 #include <linux/kthread.h>
29 #include "print-tree.h"
30 #include "transaction.h"
33 #include "free-space-cache.h"
35 static int update_reserved_extents(struct btrfs_root
*root
,
36 u64 bytenr
, u64 num
, int reserve
);
37 static int update_block_group(struct btrfs_trans_handle
*trans
,
38 struct btrfs_root
*root
,
39 u64 bytenr
, u64 num_bytes
, int alloc
,
41 static int __btrfs_free_extent(struct btrfs_trans_handle
*trans
,
42 struct btrfs_root
*root
,
43 u64 bytenr
, u64 num_bytes
, u64 parent
,
44 u64 root_objectid
, u64 owner_objectid
,
45 u64 owner_offset
, int refs_to_drop
,
46 struct btrfs_delayed_extent_op
*extra_op
);
47 static void __run_delayed_extent_op(struct btrfs_delayed_extent_op
*extent_op
,
48 struct extent_buffer
*leaf
,
49 struct btrfs_extent_item
*ei
);
50 static int alloc_reserved_file_extent(struct btrfs_trans_handle
*trans
,
51 struct btrfs_root
*root
,
52 u64 parent
, u64 root_objectid
,
53 u64 flags
, u64 owner
, u64 offset
,
54 struct btrfs_key
*ins
, int ref_mod
);
55 static int alloc_reserved_tree_block(struct btrfs_trans_handle
*trans
,
56 struct btrfs_root
*root
,
57 u64 parent
, u64 root_objectid
,
58 u64 flags
, struct btrfs_disk_key
*key
,
59 int level
, struct btrfs_key
*ins
);
61 static int do_chunk_alloc(struct btrfs_trans_handle
*trans
,
62 struct btrfs_root
*extent_root
, u64 alloc_bytes
,
63 u64 flags
, int force
);
66 block_group_cache_done(struct btrfs_block_group_cache
*cache
)
69 return cache
->cached
== BTRFS_CACHE_FINISHED
;
72 static int block_group_bits(struct btrfs_block_group_cache
*cache
, u64 bits
)
74 return (cache
->flags
& bits
) == bits
;
78 * this adds the block group to the fs_info rb tree for the block group
81 static int btrfs_add_block_group_cache(struct btrfs_fs_info
*info
,
82 struct btrfs_block_group_cache
*block_group
)
85 struct rb_node
*parent
= NULL
;
86 struct btrfs_block_group_cache
*cache
;
88 spin_lock(&info
->block_group_cache_lock
);
89 p
= &info
->block_group_cache_tree
.rb_node
;
93 cache
= rb_entry(parent
, struct btrfs_block_group_cache
,
95 if (block_group
->key
.objectid
< cache
->key
.objectid
) {
97 } else if (block_group
->key
.objectid
> cache
->key
.objectid
) {
100 spin_unlock(&info
->block_group_cache_lock
);
105 rb_link_node(&block_group
->cache_node
, parent
, p
);
106 rb_insert_color(&block_group
->cache_node
,
107 &info
->block_group_cache_tree
);
108 spin_unlock(&info
->block_group_cache_lock
);
114 * This will return the block group at or after bytenr if contains is 0, else
115 * it will return the block group that contains the bytenr
117 static struct btrfs_block_group_cache
*
118 block_group_cache_tree_search(struct btrfs_fs_info
*info
, u64 bytenr
,
121 struct btrfs_block_group_cache
*cache
, *ret
= NULL
;
125 spin_lock(&info
->block_group_cache_lock
);
126 n
= info
->block_group_cache_tree
.rb_node
;
129 cache
= rb_entry(n
, struct btrfs_block_group_cache
,
131 end
= cache
->key
.objectid
+ cache
->key
.offset
- 1;
132 start
= cache
->key
.objectid
;
134 if (bytenr
< start
) {
135 if (!contains
&& (!ret
|| start
< ret
->key
.objectid
))
138 } else if (bytenr
> start
) {
139 if (contains
&& bytenr
<= end
) {
150 atomic_inc(&ret
->count
);
151 spin_unlock(&info
->block_group_cache_lock
);
157 * We always set EXTENT_LOCKED for the super mirror extents so we don't
158 * overwrite them, so those bits need to be unset. Also, if we are unmounting
159 * with pinned extents still sitting there because we had a block group caching,
160 * we need to clear those now, since we are done.
162 void btrfs_free_pinned_extents(struct btrfs_fs_info
*info
)
164 u64 start
, end
, last
= 0;
168 ret
= find_first_extent_bit(&info
->pinned_extents
, last
,
170 EXTENT_LOCKED
|EXTENT_DIRTY
);
174 clear_extent_bits(&info
->pinned_extents
, start
, end
,
175 EXTENT_LOCKED
|EXTENT_DIRTY
, GFP_NOFS
);
180 static int remove_sb_from_cache(struct btrfs_root
*root
,
181 struct btrfs_block_group_cache
*cache
)
183 struct btrfs_fs_info
*fs_info
= root
->fs_info
;
189 for (i
= 0; i
< BTRFS_SUPER_MIRROR_MAX
; i
++) {
190 bytenr
= btrfs_sb_offset(i
);
191 ret
= btrfs_rmap_block(&root
->fs_info
->mapping_tree
,
192 cache
->key
.objectid
, bytenr
,
193 0, &logical
, &nr
, &stripe_len
);
196 try_lock_extent(&fs_info
->pinned_extents
,
198 logical
[nr
] + stripe_len
- 1, GFP_NOFS
);
207 * this is only called by cache_block_group, since we could have freed extents
208 * we need to check the pinned_extents for any extents that can't be used yet
209 * since their free space will be released as soon as the transaction commits.
211 static u64
add_new_free_space(struct btrfs_block_group_cache
*block_group
,
212 struct btrfs_fs_info
*info
, u64 start
, u64 end
)
214 u64 extent_start
, extent_end
, size
, total_added
= 0;
217 while (start
< end
) {
218 ret
= find_first_extent_bit(&info
->pinned_extents
, start
,
219 &extent_start
, &extent_end
,
220 EXTENT_DIRTY
|EXTENT_LOCKED
);
224 if (extent_start
== start
) {
225 start
= extent_end
+ 1;
226 } else if (extent_start
> start
&& extent_start
< end
) {
227 size
= extent_start
- start
;
229 ret
= btrfs_add_free_space(block_group
, start
,
232 start
= extent_end
+ 1;
241 ret
= btrfs_add_free_space(block_group
, start
, size
);
248 static int caching_kthread(void *data
)
250 struct btrfs_block_group_cache
*block_group
= data
;
251 struct btrfs_fs_info
*fs_info
= block_group
->fs_info
;
253 struct btrfs_path
*path
;
255 struct btrfs_key key
;
256 struct extent_buffer
*leaf
;
262 path
= btrfs_alloc_path();
266 atomic_inc(&block_group
->space_info
->caching_threads
);
267 last
= max_t(u64
, block_group
->key
.objectid
, BTRFS_SUPER_INFO_OFFSET
);
269 * We don't want to deadlock with somebody trying to allocate a new
270 * extent for the extent root while also trying to search the extent
271 * root to add free space. So we skip locking and search the commit
272 * root, since its read-only
274 path
->skip_locking
= 1;
275 path
->search_commit_root
= 1;
280 btrfs_set_key_type(&key
, BTRFS_EXTENT_ITEM_KEY
);
282 /* need to make sure the commit_root doesn't disappear */
283 down_read(&fs_info
->extent_commit_sem
);
285 ret
= btrfs_search_slot(NULL
, fs_info
->extent_root
, &key
, path
, 0, 0);
291 if (block_group
->fs_info
->closing
> 1) {
296 leaf
= path
->nodes
[0];
297 slot
= path
->slots
[0];
298 if (slot
>= btrfs_header_nritems(leaf
)) {
299 ret
= btrfs_next_leaf(fs_info
->extent_root
, path
);
305 if (need_resched() ||
306 btrfs_transaction_in_commit(fs_info
)) {
307 leaf
= path
->nodes
[0];
309 /* this shouldn't happen, but if the
310 * leaf is empty just move on.
312 if (btrfs_header_nritems(leaf
) == 0)
315 * we need to copy the key out so that
316 * we are sure the next search advances
317 * us forward in the btree.
319 btrfs_item_key_to_cpu(leaf
, &key
, 0);
320 btrfs_release_path(fs_info
->extent_root
, path
);
321 up_read(&fs_info
->extent_commit_sem
);
328 btrfs_item_key_to_cpu(leaf
, &key
, slot
);
329 if (key
.objectid
< block_group
->key
.objectid
)
332 if (key
.objectid
>= block_group
->key
.objectid
+
333 block_group
->key
.offset
)
336 if (btrfs_key_type(&key
) == BTRFS_EXTENT_ITEM_KEY
) {
337 total_found
+= add_new_free_space(block_group
,
340 last
= key
.objectid
+ key
.offset
;
343 if (total_found
> (1024 * 1024 * 2)) {
345 wake_up(&block_group
->caching_q
);
352 total_found
+= add_new_free_space(block_group
, fs_info
, last
,
353 block_group
->key
.objectid
+
354 block_group
->key
.offset
);
356 spin_lock(&block_group
->lock
);
357 block_group
->cached
= BTRFS_CACHE_FINISHED
;
358 spin_unlock(&block_group
->lock
);
361 btrfs_free_path(path
);
362 up_read(&fs_info
->extent_commit_sem
);
363 atomic_dec(&block_group
->space_info
->caching_threads
);
364 wake_up(&block_group
->caching_q
);
369 static int cache_block_group(struct btrfs_block_group_cache
*cache
)
371 struct task_struct
*tsk
;
374 spin_lock(&cache
->lock
);
375 if (cache
->cached
!= BTRFS_CACHE_NO
) {
376 spin_unlock(&cache
->lock
);
379 cache
->cached
= BTRFS_CACHE_STARTED
;
380 spin_unlock(&cache
->lock
);
382 tsk
= kthread_run(caching_kthread
, cache
, "btrfs-cache-%llu\n",
383 cache
->key
.objectid
);
386 printk(KERN_ERR
"error running thread %d\n", ret
);
394 * return the block group that starts at or after bytenr
396 static struct btrfs_block_group_cache
*
397 btrfs_lookup_first_block_group(struct btrfs_fs_info
*info
, u64 bytenr
)
399 struct btrfs_block_group_cache
*cache
;
401 cache
= block_group_cache_tree_search(info
, bytenr
, 0);
407 * return the block group that contains the given bytenr
409 struct btrfs_block_group_cache
*btrfs_lookup_block_group(
410 struct btrfs_fs_info
*info
,
413 struct btrfs_block_group_cache
*cache
;
415 cache
= block_group_cache_tree_search(info
, bytenr
, 1);
420 void btrfs_put_block_group(struct btrfs_block_group_cache
*cache
)
422 if (atomic_dec_and_test(&cache
->count
))
426 static struct btrfs_space_info
*__find_space_info(struct btrfs_fs_info
*info
,
429 struct list_head
*head
= &info
->space_info
;
430 struct btrfs_space_info
*found
;
433 list_for_each_entry_rcu(found
, head
, list
) {
434 if (found
->flags
== flags
) {
444 * after adding space to the filesystem, we need to clear the full flags
445 * on all the space infos.
447 void btrfs_clear_space_info_full(struct btrfs_fs_info
*info
)
449 struct list_head
*head
= &info
->space_info
;
450 struct btrfs_space_info
*found
;
453 list_for_each_entry_rcu(found
, head
, list
)
458 static u64
div_factor(u64 num
, int factor
)
467 u64
btrfs_find_block_group(struct btrfs_root
*root
,
468 u64 search_start
, u64 search_hint
, int owner
)
470 struct btrfs_block_group_cache
*cache
;
472 u64 last
= max(search_hint
, search_start
);
479 cache
= btrfs_lookup_first_block_group(root
->fs_info
, last
);
483 spin_lock(&cache
->lock
);
484 last
= cache
->key
.objectid
+ cache
->key
.offset
;
485 used
= btrfs_block_group_used(&cache
->item
);
487 if ((full_search
|| !cache
->ro
) &&
488 block_group_bits(cache
, BTRFS_BLOCK_GROUP_METADATA
)) {
489 if (used
+ cache
->pinned
+ cache
->reserved
<
490 div_factor(cache
->key
.offset
, factor
)) {
491 group_start
= cache
->key
.objectid
;
492 spin_unlock(&cache
->lock
);
493 btrfs_put_block_group(cache
);
497 spin_unlock(&cache
->lock
);
498 btrfs_put_block_group(cache
);
506 if (!full_search
&& factor
< 10) {
516 /* simple helper to search for an existing extent at a given offset */
517 int btrfs_lookup_extent(struct btrfs_root
*root
, u64 start
, u64 len
)
520 struct btrfs_key key
;
521 struct btrfs_path
*path
;
523 path
= btrfs_alloc_path();
525 key
.objectid
= start
;
527 btrfs_set_key_type(&key
, BTRFS_EXTENT_ITEM_KEY
);
528 ret
= btrfs_search_slot(NULL
, root
->fs_info
->extent_root
, &key
, path
,
530 btrfs_free_path(path
);
535 * Back reference rules. Back refs have three main goals:
537 * 1) differentiate between all holders of references to an extent so that
538 * when a reference is dropped we can make sure it was a valid reference
539 * before freeing the extent.
541 * 2) Provide enough information to quickly find the holders of an extent
542 * if we notice a given block is corrupted or bad.
544 * 3) Make it easy to migrate blocks for FS shrinking or storage pool
545 * maintenance. This is actually the same as #2, but with a slightly
546 * different use case.
548 * There are two kinds of back refs. The implicit back refs is optimized
549 * for pointers in non-shared tree blocks. For a given pointer in a block,
550 * back refs of this kind provide information about the block's owner tree
551 * and the pointer's key. These information allow us to find the block by
552 * b-tree searching. The full back refs is for pointers in tree blocks not
553 * referenced by their owner trees. The location of tree block is recorded
554 * in the back refs. Actually the full back refs is generic, and can be
555 * used in all cases the implicit back refs is used. The major shortcoming
556 * of the full back refs is its overhead. Every time a tree block gets
557 * COWed, we have to update back refs entry for all pointers in it.
559 * For a newly allocated tree block, we use implicit back refs for
560 * pointers in it. This means most tree related operations only involve
561 * implicit back refs. For a tree block created in old transaction, the
562 * only way to drop a reference to it is COW it. So we can detect the
563 * event that tree block loses its owner tree's reference and do the
564 * back refs conversion.
566 * When a tree block is COW'd through a tree, there are four cases:
568 * The reference count of the block is one and the tree is the block's
569 * owner tree. Nothing to do in this case.
571 * The reference count of the block is one and the tree is not the
572 * block's owner tree. In this case, full back refs is used for pointers
573 * in the block. Remove these full back refs, add implicit back refs for
574 * every pointers in the new block.
576 * The reference count of the block is greater than one and the tree is
577 * the block's owner tree. In this case, implicit back refs is used for
578 * pointers in the block. Add full back refs for every pointers in the
579 * block, increase lower level extents' reference counts. The original
580 * implicit back refs are entailed to the new block.
582 * The reference count of the block is greater than one and the tree is
583 * not the block's owner tree. Add implicit back refs for every pointer in
584 * the new block, increase lower level extents' reference count.
586 * Back Reference Key composing:
588 * The key objectid corresponds to the first byte in the extent,
589 * The key type is used to differentiate between types of back refs.
590 * There are different meanings of the key offset for different types
593 * File extents can be referenced by:
595 * - multiple snapshots, subvolumes, or different generations in one subvol
596 * - different files inside a single subvolume
597 * - different offsets inside a file (bookend extents in file.c)
599 * The extent ref structure for the implicit back refs has fields for:
601 * - Objectid of the subvolume root
602 * - objectid of the file holding the reference
603 * - original offset in the file
604 * - how many bookend extents
606 * The key offset for the implicit back refs is hash of the first
609 * The extent ref structure for the full back refs has field for:
611 * - number of pointers in the tree leaf
613 * The key offset for the implicit back refs is the first byte of
616 * When a file extent is allocated, The implicit back refs is used.
617 * the fields are filled in:
619 * (root_key.objectid, inode objectid, offset in file, 1)
621 * When a file extent is removed file truncation, we find the
622 * corresponding implicit back refs and check the following fields:
624 * (btrfs_header_owner(leaf), inode objectid, offset in file)
626 * Btree extents can be referenced by:
628 * - Different subvolumes
630 * Both the implicit back refs and the full back refs for tree blocks
631 * only consist of key. The key offset for the implicit back refs is
632 * objectid of block's owner tree. The key offset for the full back refs
633 * is the first byte of parent block.
635 * When implicit back refs is used, information about the lowest key and
636 * level of the tree block are required. These information are stored in
637 * tree block info structure.
640 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
641 static int convert_extent_item_v0(struct btrfs_trans_handle
*trans
,
642 struct btrfs_root
*root
,
643 struct btrfs_path
*path
,
644 u64 owner
, u32 extra_size
)
646 struct btrfs_extent_item
*item
;
647 struct btrfs_extent_item_v0
*ei0
;
648 struct btrfs_extent_ref_v0
*ref0
;
649 struct btrfs_tree_block_info
*bi
;
650 struct extent_buffer
*leaf
;
651 struct btrfs_key key
;
652 struct btrfs_key found_key
;
653 u32 new_size
= sizeof(*item
);
657 leaf
= path
->nodes
[0];
658 BUG_ON(btrfs_item_size_nr(leaf
, path
->slots
[0]) != sizeof(*ei0
));
660 btrfs_item_key_to_cpu(leaf
, &key
, path
->slots
[0]);
661 ei0
= btrfs_item_ptr(leaf
, path
->slots
[0],
662 struct btrfs_extent_item_v0
);
663 refs
= btrfs_extent_refs_v0(leaf
, ei0
);
665 if (owner
== (u64
)-1) {
667 if (path
->slots
[0] >= btrfs_header_nritems(leaf
)) {
668 ret
= btrfs_next_leaf(root
, path
);
672 leaf
= path
->nodes
[0];
674 btrfs_item_key_to_cpu(leaf
, &found_key
,
676 BUG_ON(key
.objectid
!= found_key
.objectid
);
677 if (found_key
.type
!= BTRFS_EXTENT_REF_V0_KEY
) {
681 ref0
= btrfs_item_ptr(leaf
, path
->slots
[0],
682 struct btrfs_extent_ref_v0
);
683 owner
= btrfs_ref_objectid_v0(leaf
, ref0
);
687 btrfs_release_path(root
, path
);
689 if (owner
< BTRFS_FIRST_FREE_OBJECTID
)
690 new_size
+= sizeof(*bi
);
692 new_size
-= sizeof(*ei0
);
693 ret
= btrfs_search_slot(trans
, root
, &key
, path
,
694 new_size
+ extra_size
, 1);
699 ret
= btrfs_extend_item(trans
, root
, path
, new_size
);
702 leaf
= path
->nodes
[0];
703 item
= btrfs_item_ptr(leaf
, path
->slots
[0], struct btrfs_extent_item
);
704 btrfs_set_extent_refs(leaf
, item
, refs
);
705 /* FIXME: get real generation */
706 btrfs_set_extent_generation(leaf
, item
, 0);
707 if (owner
< BTRFS_FIRST_FREE_OBJECTID
) {
708 btrfs_set_extent_flags(leaf
, item
,
709 BTRFS_EXTENT_FLAG_TREE_BLOCK
|
710 BTRFS_BLOCK_FLAG_FULL_BACKREF
);
711 bi
= (struct btrfs_tree_block_info
*)(item
+ 1);
712 /* FIXME: get first key of the block */
713 memset_extent_buffer(leaf
, 0, (unsigned long)bi
, sizeof(*bi
));
714 btrfs_set_tree_block_level(leaf
, bi
, (int)owner
);
716 btrfs_set_extent_flags(leaf
, item
, BTRFS_EXTENT_FLAG_DATA
);
718 btrfs_mark_buffer_dirty(leaf
);
723 static u64
hash_extent_data_ref(u64 root_objectid
, u64 owner
, u64 offset
)
725 u32 high_crc
= ~(u32
)0;
726 u32 low_crc
= ~(u32
)0;
729 lenum
= cpu_to_le64(root_objectid
);
730 high_crc
= crc32c(high_crc
, &lenum
, sizeof(lenum
));
731 lenum
= cpu_to_le64(owner
);
732 low_crc
= crc32c(low_crc
, &lenum
, sizeof(lenum
));
733 lenum
= cpu_to_le64(offset
);
734 low_crc
= crc32c(low_crc
, &lenum
, sizeof(lenum
));
736 return ((u64
)high_crc
<< 31) ^ (u64
)low_crc
;
739 static u64
hash_extent_data_ref_item(struct extent_buffer
*leaf
,
740 struct btrfs_extent_data_ref
*ref
)
742 return hash_extent_data_ref(btrfs_extent_data_ref_root(leaf
, ref
),
743 btrfs_extent_data_ref_objectid(leaf
, ref
),
744 btrfs_extent_data_ref_offset(leaf
, ref
));
747 static int match_extent_data_ref(struct extent_buffer
*leaf
,
748 struct btrfs_extent_data_ref
*ref
,
749 u64 root_objectid
, u64 owner
, u64 offset
)
751 if (btrfs_extent_data_ref_root(leaf
, ref
) != root_objectid
||
752 btrfs_extent_data_ref_objectid(leaf
, ref
) != owner
||
753 btrfs_extent_data_ref_offset(leaf
, ref
) != offset
)
758 static noinline
int lookup_extent_data_ref(struct btrfs_trans_handle
*trans
,
759 struct btrfs_root
*root
,
760 struct btrfs_path
*path
,
761 u64 bytenr
, u64 parent
,
763 u64 owner
, u64 offset
)
765 struct btrfs_key key
;
766 struct btrfs_extent_data_ref
*ref
;
767 struct extent_buffer
*leaf
;
773 key
.objectid
= bytenr
;
775 key
.type
= BTRFS_SHARED_DATA_REF_KEY
;
778 key
.type
= BTRFS_EXTENT_DATA_REF_KEY
;
779 key
.offset
= hash_extent_data_ref(root_objectid
,
784 ret
= btrfs_search_slot(trans
, root
, &key
, path
, -1, 1);
793 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
794 key
.type
= BTRFS_EXTENT_REF_V0_KEY
;
795 btrfs_release_path(root
, path
);
796 ret
= btrfs_search_slot(trans
, root
, &key
, path
, -1, 1);
807 leaf
= path
->nodes
[0];
808 nritems
= btrfs_header_nritems(leaf
);
810 if (path
->slots
[0] >= nritems
) {
811 ret
= btrfs_next_leaf(root
, path
);
817 leaf
= path
->nodes
[0];
818 nritems
= btrfs_header_nritems(leaf
);
822 btrfs_item_key_to_cpu(leaf
, &key
, path
->slots
[0]);
823 if (key
.objectid
!= bytenr
||
824 key
.type
!= BTRFS_EXTENT_DATA_REF_KEY
)
827 ref
= btrfs_item_ptr(leaf
, path
->slots
[0],
828 struct btrfs_extent_data_ref
);
830 if (match_extent_data_ref(leaf
, ref
, root_objectid
,
833 btrfs_release_path(root
, path
);
845 static noinline
int insert_extent_data_ref(struct btrfs_trans_handle
*trans
,
846 struct btrfs_root
*root
,
847 struct btrfs_path
*path
,
848 u64 bytenr
, u64 parent
,
849 u64 root_objectid
, u64 owner
,
850 u64 offset
, int refs_to_add
)
852 struct btrfs_key key
;
853 struct extent_buffer
*leaf
;
858 key
.objectid
= bytenr
;
860 key
.type
= BTRFS_SHARED_DATA_REF_KEY
;
862 size
= sizeof(struct btrfs_shared_data_ref
);
864 key
.type
= BTRFS_EXTENT_DATA_REF_KEY
;
865 key
.offset
= hash_extent_data_ref(root_objectid
,
867 size
= sizeof(struct btrfs_extent_data_ref
);
870 ret
= btrfs_insert_empty_item(trans
, root
, path
, &key
, size
);
871 if (ret
&& ret
!= -EEXIST
)
874 leaf
= path
->nodes
[0];
876 struct btrfs_shared_data_ref
*ref
;
877 ref
= btrfs_item_ptr(leaf
, path
->slots
[0],
878 struct btrfs_shared_data_ref
);
880 btrfs_set_shared_data_ref_count(leaf
, ref
, refs_to_add
);
882 num_refs
= btrfs_shared_data_ref_count(leaf
, ref
);
883 num_refs
+= refs_to_add
;
884 btrfs_set_shared_data_ref_count(leaf
, ref
, num_refs
);
887 struct btrfs_extent_data_ref
*ref
;
888 while (ret
== -EEXIST
) {
889 ref
= btrfs_item_ptr(leaf
, path
->slots
[0],
890 struct btrfs_extent_data_ref
);
891 if (match_extent_data_ref(leaf
, ref
, root_objectid
,
894 btrfs_release_path(root
, path
);
896 ret
= btrfs_insert_empty_item(trans
, root
, path
, &key
,
898 if (ret
&& ret
!= -EEXIST
)
901 leaf
= path
->nodes
[0];
903 ref
= btrfs_item_ptr(leaf
, path
->slots
[0],
904 struct btrfs_extent_data_ref
);
906 btrfs_set_extent_data_ref_root(leaf
, ref
,
908 btrfs_set_extent_data_ref_objectid(leaf
, ref
, owner
);
909 btrfs_set_extent_data_ref_offset(leaf
, ref
, offset
);
910 btrfs_set_extent_data_ref_count(leaf
, ref
, refs_to_add
);
912 num_refs
= btrfs_extent_data_ref_count(leaf
, ref
);
913 num_refs
+= refs_to_add
;
914 btrfs_set_extent_data_ref_count(leaf
, ref
, num_refs
);
917 btrfs_mark_buffer_dirty(leaf
);
920 btrfs_release_path(root
, path
);
924 static noinline
int remove_extent_data_ref(struct btrfs_trans_handle
*trans
,
925 struct btrfs_root
*root
,
926 struct btrfs_path
*path
,
929 struct btrfs_key key
;
930 struct btrfs_extent_data_ref
*ref1
= NULL
;
931 struct btrfs_shared_data_ref
*ref2
= NULL
;
932 struct extent_buffer
*leaf
;
936 leaf
= path
->nodes
[0];
937 btrfs_item_key_to_cpu(leaf
, &key
, path
->slots
[0]);
939 if (key
.type
== BTRFS_EXTENT_DATA_REF_KEY
) {
940 ref1
= btrfs_item_ptr(leaf
, path
->slots
[0],
941 struct btrfs_extent_data_ref
);
942 num_refs
= btrfs_extent_data_ref_count(leaf
, ref1
);
943 } else if (key
.type
== BTRFS_SHARED_DATA_REF_KEY
) {
944 ref2
= btrfs_item_ptr(leaf
, path
->slots
[0],
945 struct btrfs_shared_data_ref
);
946 num_refs
= btrfs_shared_data_ref_count(leaf
, ref2
);
947 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
948 } else if (key
.type
== BTRFS_EXTENT_REF_V0_KEY
) {
949 struct btrfs_extent_ref_v0
*ref0
;
950 ref0
= btrfs_item_ptr(leaf
, path
->slots
[0],
951 struct btrfs_extent_ref_v0
);
952 num_refs
= btrfs_ref_count_v0(leaf
, ref0
);
958 BUG_ON(num_refs
< refs_to_drop
);
959 num_refs
-= refs_to_drop
;
962 ret
= btrfs_del_item(trans
, root
, path
);
964 if (key
.type
== BTRFS_EXTENT_DATA_REF_KEY
)
965 btrfs_set_extent_data_ref_count(leaf
, ref1
, num_refs
);
966 else if (key
.type
== BTRFS_SHARED_DATA_REF_KEY
)
967 btrfs_set_shared_data_ref_count(leaf
, ref2
, num_refs
);
968 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
970 struct btrfs_extent_ref_v0
*ref0
;
971 ref0
= btrfs_item_ptr(leaf
, path
->slots
[0],
972 struct btrfs_extent_ref_v0
);
973 btrfs_set_ref_count_v0(leaf
, ref0
, num_refs
);
976 btrfs_mark_buffer_dirty(leaf
);
981 static noinline u32
extent_data_ref_count(struct btrfs_root
*root
,
982 struct btrfs_path
*path
,
983 struct btrfs_extent_inline_ref
*iref
)
985 struct btrfs_key key
;
986 struct extent_buffer
*leaf
;
987 struct btrfs_extent_data_ref
*ref1
;
988 struct btrfs_shared_data_ref
*ref2
;
991 leaf
= path
->nodes
[0];
992 btrfs_item_key_to_cpu(leaf
, &key
, path
->slots
[0]);
994 if (btrfs_extent_inline_ref_type(leaf
, iref
) ==
995 BTRFS_EXTENT_DATA_REF_KEY
) {
996 ref1
= (struct btrfs_extent_data_ref
*)(&iref
->offset
);
997 num_refs
= btrfs_extent_data_ref_count(leaf
, ref1
);
999 ref2
= (struct btrfs_shared_data_ref
*)(iref
+ 1);
1000 num_refs
= btrfs_shared_data_ref_count(leaf
, ref2
);
1002 } else if (key
.type
== BTRFS_EXTENT_DATA_REF_KEY
) {
1003 ref1
= btrfs_item_ptr(leaf
, path
->slots
[0],
1004 struct btrfs_extent_data_ref
);
1005 num_refs
= btrfs_extent_data_ref_count(leaf
, ref1
);
1006 } else if (key
.type
== BTRFS_SHARED_DATA_REF_KEY
) {
1007 ref2
= btrfs_item_ptr(leaf
, path
->slots
[0],
1008 struct btrfs_shared_data_ref
);
1009 num_refs
= btrfs_shared_data_ref_count(leaf
, ref2
);
1010 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
1011 } else if (key
.type
== BTRFS_EXTENT_REF_V0_KEY
) {
1012 struct btrfs_extent_ref_v0
*ref0
;
1013 ref0
= btrfs_item_ptr(leaf
, path
->slots
[0],
1014 struct btrfs_extent_ref_v0
);
1015 num_refs
= btrfs_ref_count_v0(leaf
, ref0
);
1023 static noinline
int lookup_tree_block_ref(struct btrfs_trans_handle
*trans
,
1024 struct btrfs_root
*root
,
1025 struct btrfs_path
*path
,
1026 u64 bytenr
, u64 parent
,
1029 struct btrfs_key key
;
1032 key
.objectid
= bytenr
;
1034 key
.type
= BTRFS_SHARED_BLOCK_REF_KEY
;
1035 key
.offset
= parent
;
1037 key
.type
= BTRFS_TREE_BLOCK_REF_KEY
;
1038 key
.offset
= root_objectid
;
1041 ret
= btrfs_search_slot(trans
, root
, &key
, path
, -1, 1);
1044 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
1045 if (ret
== -ENOENT
&& parent
) {
1046 btrfs_release_path(root
, path
);
1047 key
.type
= BTRFS_EXTENT_REF_V0_KEY
;
1048 ret
= btrfs_search_slot(trans
, root
, &key
, path
, -1, 1);
1056 static noinline
int insert_tree_block_ref(struct btrfs_trans_handle
*trans
,
1057 struct btrfs_root
*root
,
1058 struct btrfs_path
*path
,
1059 u64 bytenr
, u64 parent
,
1062 struct btrfs_key key
;
1065 key
.objectid
= bytenr
;
1067 key
.type
= BTRFS_SHARED_BLOCK_REF_KEY
;
1068 key
.offset
= parent
;
1070 key
.type
= BTRFS_TREE_BLOCK_REF_KEY
;
1071 key
.offset
= root_objectid
;
1074 ret
= btrfs_insert_empty_item(trans
, root
, path
, &key
, 0);
1075 btrfs_release_path(root
, path
);
1079 static inline int extent_ref_type(u64 parent
, u64 owner
)
1082 if (owner
< BTRFS_FIRST_FREE_OBJECTID
) {
1084 type
= BTRFS_SHARED_BLOCK_REF_KEY
;
1086 type
= BTRFS_TREE_BLOCK_REF_KEY
;
1089 type
= BTRFS_SHARED_DATA_REF_KEY
;
1091 type
= BTRFS_EXTENT_DATA_REF_KEY
;
1096 static int find_next_key(struct btrfs_path
*path
, int level
,
1097 struct btrfs_key
*key
)
1100 for (; level
< BTRFS_MAX_LEVEL
; level
++) {
1101 if (!path
->nodes
[level
])
1103 if (path
->slots
[level
] + 1 >=
1104 btrfs_header_nritems(path
->nodes
[level
]))
1107 btrfs_item_key_to_cpu(path
->nodes
[level
], key
,
1108 path
->slots
[level
] + 1);
1110 btrfs_node_key_to_cpu(path
->nodes
[level
], key
,
1111 path
->slots
[level
] + 1);
1118 * look for inline back ref. if back ref is found, *ref_ret is set
1119 * to the address of inline back ref, and 0 is returned.
1121 * if back ref isn't found, *ref_ret is set to the address where it
1122 * should be inserted, and -ENOENT is returned.
1124 * if insert is true and there are too many inline back refs, the path
1125 * points to the extent item, and -EAGAIN is returned.
1127 * NOTE: inline back refs are ordered in the same way that back ref
1128 * items in the tree are ordered.
1130 static noinline_for_stack
1131 int lookup_inline_extent_backref(struct btrfs_trans_handle
*trans
,
1132 struct btrfs_root
*root
,
1133 struct btrfs_path
*path
,
1134 struct btrfs_extent_inline_ref
**ref_ret
,
1135 u64 bytenr
, u64 num_bytes
,
1136 u64 parent
, u64 root_objectid
,
1137 u64 owner
, u64 offset
, int insert
)
1139 struct btrfs_key key
;
1140 struct extent_buffer
*leaf
;
1141 struct btrfs_extent_item
*ei
;
1142 struct btrfs_extent_inline_ref
*iref
;
1153 key
.objectid
= bytenr
;
1154 key
.type
= BTRFS_EXTENT_ITEM_KEY
;
1155 key
.offset
= num_bytes
;
1157 want
= extent_ref_type(parent
, owner
);
1159 extra_size
= btrfs_extent_inline_ref_size(want
);
1160 path
->keep_locks
= 1;
1163 ret
= btrfs_search_slot(trans
, root
, &key
, path
, extra_size
, 1);
1170 leaf
= path
->nodes
[0];
1171 item_size
= btrfs_item_size_nr(leaf
, path
->slots
[0]);
1172 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
1173 if (item_size
< sizeof(*ei
)) {
1178 ret
= convert_extent_item_v0(trans
, root
, path
, owner
,
1184 leaf
= path
->nodes
[0];
1185 item_size
= btrfs_item_size_nr(leaf
, path
->slots
[0]);
1188 BUG_ON(item_size
< sizeof(*ei
));
1190 ei
= btrfs_item_ptr(leaf
, path
->slots
[0], struct btrfs_extent_item
);
1191 flags
= btrfs_extent_flags(leaf
, ei
);
1193 ptr
= (unsigned long)(ei
+ 1);
1194 end
= (unsigned long)ei
+ item_size
;
1196 if (flags
& BTRFS_EXTENT_FLAG_TREE_BLOCK
) {
1197 ptr
+= sizeof(struct btrfs_tree_block_info
);
1200 BUG_ON(!(flags
& BTRFS_EXTENT_FLAG_DATA
));
1209 iref
= (struct btrfs_extent_inline_ref
*)ptr
;
1210 type
= btrfs_extent_inline_ref_type(leaf
, iref
);
1214 ptr
+= btrfs_extent_inline_ref_size(type
);
1218 if (type
== BTRFS_EXTENT_DATA_REF_KEY
) {
1219 struct btrfs_extent_data_ref
*dref
;
1220 dref
= (struct btrfs_extent_data_ref
*)(&iref
->offset
);
1221 if (match_extent_data_ref(leaf
, dref
, root_objectid
,
1226 if (hash_extent_data_ref_item(leaf
, dref
) <
1227 hash_extent_data_ref(root_objectid
, owner
, offset
))
1231 ref_offset
= btrfs_extent_inline_ref_offset(leaf
, iref
);
1233 if (parent
== ref_offset
) {
1237 if (ref_offset
< parent
)
1240 if (root_objectid
== ref_offset
) {
1244 if (ref_offset
< root_objectid
)
1248 ptr
+= btrfs_extent_inline_ref_size(type
);
1250 if (err
== -ENOENT
&& insert
) {
1251 if (item_size
+ extra_size
>=
1252 BTRFS_MAX_EXTENT_ITEM_SIZE(root
)) {
1257 * To add new inline back ref, we have to make sure
1258 * there is no corresponding back ref item.
1259 * For simplicity, we just do not add new inline back
1260 * ref if there is any kind of item for this block
1262 if (find_next_key(path
, 0, &key
) == 0 &&
1263 key
.objectid
== bytenr
&&
1264 key
.type
< BTRFS_BLOCK_GROUP_ITEM_KEY
) {
1269 *ref_ret
= (struct btrfs_extent_inline_ref
*)ptr
;
1272 path
->keep_locks
= 0;
1273 btrfs_unlock_up_safe(path
, 1);
1279 * helper to add new inline back ref
1281 static noinline_for_stack
1282 int setup_inline_extent_backref(struct btrfs_trans_handle
*trans
,
1283 struct btrfs_root
*root
,
1284 struct btrfs_path
*path
,
1285 struct btrfs_extent_inline_ref
*iref
,
1286 u64 parent
, u64 root_objectid
,
1287 u64 owner
, u64 offset
, int refs_to_add
,
1288 struct btrfs_delayed_extent_op
*extent_op
)
1290 struct extent_buffer
*leaf
;
1291 struct btrfs_extent_item
*ei
;
1294 unsigned long item_offset
;
1300 leaf
= path
->nodes
[0];
1301 ei
= btrfs_item_ptr(leaf
, path
->slots
[0], struct btrfs_extent_item
);
1302 item_offset
= (unsigned long)iref
- (unsigned long)ei
;
1304 type
= extent_ref_type(parent
, owner
);
1305 size
= btrfs_extent_inline_ref_size(type
);
1307 ret
= btrfs_extend_item(trans
, root
, path
, size
);
1310 ei
= btrfs_item_ptr(leaf
, path
->slots
[0], struct btrfs_extent_item
);
1311 refs
= btrfs_extent_refs(leaf
, ei
);
1312 refs
+= refs_to_add
;
1313 btrfs_set_extent_refs(leaf
, ei
, refs
);
1315 __run_delayed_extent_op(extent_op
, leaf
, ei
);
1317 ptr
= (unsigned long)ei
+ item_offset
;
1318 end
= (unsigned long)ei
+ btrfs_item_size_nr(leaf
, path
->slots
[0]);
1319 if (ptr
< end
- size
)
1320 memmove_extent_buffer(leaf
, ptr
+ size
, ptr
,
1323 iref
= (struct btrfs_extent_inline_ref
*)ptr
;
1324 btrfs_set_extent_inline_ref_type(leaf
, iref
, type
);
1325 if (type
== BTRFS_EXTENT_DATA_REF_KEY
) {
1326 struct btrfs_extent_data_ref
*dref
;
1327 dref
= (struct btrfs_extent_data_ref
*)(&iref
->offset
);
1328 btrfs_set_extent_data_ref_root(leaf
, dref
, root_objectid
);
1329 btrfs_set_extent_data_ref_objectid(leaf
, dref
, owner
);
1330 btrfs_set_extent_data_ref_offset(leaf
, dref
, offset
);
1331 btrfs_set_extent_data_ref_count(leaf
, dref
, refs_to_add
);
1332 } else if (type
== BTRFS_SHARED_DATA_REF_KEY
) {
1333 struct btrfs_shared_data_ref
*sref
;
1334 sref
= (struct btrfs_shared_data_ref
*)(iref
+ 1);
1335 btrfs_set_shared_data_ref_count(leaf
, sref
, refs_to_add
);
1336 btrfs_set_extent_inline_ref_offset(leaf
, iref
, parent
);
1337 } else if (type
== BTRFS_SHARED_BLOCK_REF_KEY
) {
1338 btrfs_set_extent_inline_ref_offset(leaf
, iref
, parent
);
1340 btrfs_set_extent_inline_ref_offset(leaf
, iref
, root_objectid
);
1342 btrfs_mark_buffer_dirty(leaf
);
1346 static int lookup_extent_backref(struct btrfs_trans_handle
*trans
,
1347 struct btrfs_root
*root
,
1348 struct btrfs_path
*path
,
1349 struct btrfs_extent_inline_ref
**ref_ret
,
1350 u64 bytenr
, u64 num_bytes
, u64 parent
,
1351 u64 root_objectid
, u64 owner
, u64 offset
)
1355 ret
= lookup_inline_extent_backref(trans
, root
, path
, ref_ret
,
1356 bytenr
, num_bytes
, parent
,
1357 root_objectid
, owner
, offset
, 0);
1361 btrfs_release_path(root
, path
);
1364 if (owner
< BTRFS_FIRST_FREE_OBJECTID
) {
1365 ret
= lookup_tree_block_ref(trans
, root
, path
, bytenr
, parent
,
1368 ret
= lookup_extent_data_ref(trans
, root
, path
, bytenr
, parent
,
1369 root_objectid
, owner
, offset
);
1375 * helper to update/remove inline back ref
1377 static noinline_for_stack
1378 int update_inline_extent_backref(struct btrfs_trans_handle
*trans
,
1379 struct btrfs_root
*root
,
1380 struct btrfs_path
*path
,
1381 struct btrfs_extent_inline_ref
*iref
,
1383 struct btrfs_delayed_extent_op
*extent_op
)
1385 struct extent_buffer
*leaf
;
1386 struct btrfs_extent_item
*ei
;
1387 struct btrfs_extent_data_ref
*dref
= NULL
;
1388 struct btrfs_shared_data_ref
*sref
= NULL
;
1397 leaf
= path
->nodes
[0];
1398 ei
= btrfs_item_ptr(leaf
, path
->slots
[0], struct btrfs_extent_item
);
1399 refs
= btrfs_extent_refs(leaf
, ei
);
1400 WARN_ON(refs_to_mod
< 0 && refs
+ refs_to_mod
<= 0);
1401 refs
+= refs_to_mod
;
1402 btrfs_set_extent_refs(leaf
, ei
, refs
);
1404 __run_delayed_extent_op(extent_op
, leaf
, ei
);
1406 type
= btrfs_extent_inline_ref_type(leaf
, iref
);
1408 if (type
== BTRFS_EXTENT_DATA_REF_KEY
) {
1409 dref
= (struct btrfs_extent_data_ref
*)(&iref
->offset
);
1410 refs
= btrfs_extent_data_ref_count(leaf
, dref
);
1411 } else if (type
== BTRFS_SHARED_DATA_REF_KEY
) {
1412 sref
= (struct btrfs_shared_data_ref
*)(iref
+ 1);
1413 refs
= btrfs_shared_data_ref_count(leaf
, sref
);
1416 BUG_ON(refs_to_mod
!= -1);
1419 BUG_ON(refs_to_mod
< 0 && refs
< -refs_to_mod
);
1420 refs
+= refs_to_mod
;
1423 if (type
== BTRFS_EXTENT_DATA_REF_KEY
)
1424 btrfs_set_extent_data_ref_count(leaf
, dref
, refs
);
1426 btrfs_set_shared_data_ref_count(leaf
, sref
, refs
);
1428 size
= btrfs_extent_inline_ref_size(type
);
1429 item_size
= btrfs_item_size_nr(leaf
, path
->slots
[0]);
1430 ptr
= (unsigned long)iref
;
1431 end
= (unsigned long)ei
+ item_size
;
1432 if (ptr
+ size
< end
)
1433 memmove_extent_buffer(leaf
, ptr
, ptr
+ size
,
1436 ret
= btrfs_truncate_item(trans
, root
, path
, item_size
, 1);
1439 btrfs_mark_buffer_dirty(leaf
);
1443 static noinline_for_stack
1444 int insert_inline_extent_backref(struct btrfs_trans_handle
*trans
,
1445 struct btrfs_root
*root
,
1446 struct btrfs_path
*path
,
1447 u64 bytenr
, u64 num_bytes
, u64 parent
,
1448 u64 root_objectid
, u64 owner
,
1449 u64 offset
, int refs_to_add
,
1450 struct btrfs_delayed_extent_op
*extent_op
)
1452 struct btrfs_extent_inline_ref
*iref
;
1455 ret
= lookup_inline_extent_backref(trans
, root
, path
, &iref
,
1456 bytenr
, num_bytes
, parent
,
1457 root_objectid
, owner
, offset
, 1);
1459 BUG_ON(owner
< BTRFS_FIRST_FREE_OBJECTID
);
1460 ret
= update_inline_extent_backref(trans
, root
, path
, iref
,
1461 refs_to_add
, extent_op
);
1462 } else if (ret
== -ENOENT
) {
1463 ret
= setup_inline_extent_backref(trans
, root
, path
, iref
,
1464 parent
, root_objectid
,
1465 owner
, offset
, refs_to_add
,
1471 static int insert_extent_backref(struct btrfs_trans_handle
*trans
,
1472 struct btrfs_root
*root
,
1473 struct btrfs_path
*path
,
1474 u64 bytenr
, u64 parent
, u64 root_objectid
,
1475 u64 owner
, u64 offset
, int refs_to_add
)
1478 if (owner
< BTRFS_FIRST_FREE_OBJECTID
) {
1479 BUG_ON(refs_to_add
!= 1);
1480 ret
= insert_tree_block_ref(trans
, root
, path
, bytenr
,
1481 parent
, root_objectid
);
1483 ret
= insert_extent_data_ref(trans
, root
, path
, bytenr
,
1484 parent
, root_objectid
,
1485 owner
, offset
, refs_to_add
);
1490 static int remove_extent_backref(struct btrfs_trans_handle
*trans
,
1491 struct btrfs_root
*root
,
1492 struct btrfs_path
*path
,
1493 struct btrfs_extent_inline_ref
*iref
,
1494 int refs_to_drop
, int is_data
)
1498 BUG_ON(!is_data
&& refs_to_drop
!= 1);
1500 ret
= update_inline_extent_backref(trans
, root
, path
, iref
,
1501 -refs_to_drop
, NULL
);
1502 } else if (is_data
) {
1503 ret
= remove_extent_data_ref(trans
, root
, path
, refs_to_drop
);
1505 ret
= btrfs_del_item(trans
, root
, path
);
1510 #ifdef BIO_RW_DISCARD
1511 static void btrfs_issue_discard(struct block_device
*bdev
,
1514 blkdev_issue_discard(bdev
, start
>> 9, len
>> 9, GFP_KERNEL
);
1518 static int btrfs_discard_extent(struct btrfs_root
*root
, u64 bytenr
,
1521 #ifdef BIO_RW_DISCARD
1523 u64 map_length
= num_bytes
;
1524 struct btrfs_multi_bio
*multi
= NULL
;
1526 /* Tell the block device(s) that the sectors can be discarded */
1527 ret
= btrfs_map_block(&root
->fs_info
->mapping_tree
, READ
,
1528 bytenr
, &map_length
, &multi
, 0);
1530 struct btrfs_bio_stripe
*stripe
= multi
->stripes
;
1533 if (map_length
> num_bytes
)
1534 map_length
= num_bytes
;
1536 for (i
= 0; i
< multi
->num_stripes
; i
++, stripe
++) {
1537 btrfs_issue_discard(stripe
->dev
->bdev
,
1550 int btrfs_inc_extent_ref(struct btrfs_trans_handle
*trans
,
1551 struct btrfs_root
*root
,
1552 u64 bytenr
, u64 num_bytes
, u64 parent
,
1553 u64 root_objectid
, u64 owner
, u64 offset
)
1556 BUG_ON(owner
< BTRFS_FIRST_FREE_OBJECTID
&&
1557 root_objectid
== BTRFS_TREE_LOG_OBJECTID
);
1559 if (owner
< BTRFS_FIRST_FREE_OBJECTID
) {
1560 ret
= btrfs_add_delayed_tree_ref(trans
, bytenr
, num_bytes
,
1561 parent
, root_objectid
, (int)owner
,
1562 BTRFS_ADD_DELAYED_REF
, NULL
);
1564 ret
= btrfs_add_delayed_data_ref(trans
, bytenr
, num_bytes
,
1565 parent
, root_objectid
, owner
, offset
,
1566 BTRFS_ADD_DELAYED_REF
, NULL
);
1571 static int __btrfs_inc_extent_ref(struct btrfs_trans_handle
*trans
,
1572 struct btrfs_root
*root
,
1573 u64 bytenr
, u64 num_bytes
,
1574 u64 parent
, u64 root_objectid
,
1575 u64 owner
, u64 offset
, int refs_to_add
,
1576 struct btrfs_delayed_extent_op
*extent_op
)
1578 struct btrfs_path
*path
;
1579 struct extent_buffer
*leaf
;
1580 struct btrfs_extent_item
*item
;
1585 path
= btrfs_alloc_path();
1590 path
->leave_spinning
= 1;
1591 /* this will setup the path even if it fails to insert the back ref */
1592 ret
= insert_inline_extent_backref(trans
, root
->fs_info
->extent_root
,
1593 path
, bytenr
, num_bytes
, parent
,
1594 root_objectid
, owner
, offset
,
1595 refs_to_add
, extent_op
);
1599 if (ret
!= -EAGAIN
) {
1604 leaf
= path
->nodes
[0];
1605 item
= btrfs_item_ptr(leaf
, path
->slots
[0], struct btrfs_extent_item
);
1606 refs
= btrfs_extent_refs(leaf
, item
);
1607 btrfs_set_extent_refs(leaf
, item
, refs
+ refs_to_add
);
1609 __run_delayed_extent_op(extent_op
, leaf
, item
);
1611 btrfs_mark_buffer_dirty(leaf
);
1612 btrfs_release_path(root
->fs_info
->extent_root
, path
);
1615 path
->leave_spinning
= 1;
1617 /* now insert the actual backref */
1618 ret
= insert_extent_backref(trans
, root
->fs_info
->extent_root
,
1619 path
, bytenr
, parent
, root_objectid
,
1620 owner
, offset
, refs_to_add
);
1623 btrfs_free_path(path
);
1627 static int run_delayed_data_ref(struct btrfs_trans_handle
*trans
,
1628 struct btrfs_root
*root
,
1629 struct btrfs_delayed_ref_node
*node
,
1630 struct btrfs_delayed_extent_op
*extent_op
,
1631 int insert_reserved
)
1634 struct btrfs_delayed_data_ref
*ref
;
1635 struct btrfs_key ins
;
1640 ins
.objectid
= node
->bytenr
;
1641 ins
.offset
= node
->num_bytes
;
1642 ins
.type
= BTRFS_EXTENT_ITEM_KEY
;
1644 ref
= btrfs_delayed_node_to_data_ref(node
);
1645 if (node
->type
== BTRFS_SHARED_DATA_REF_KEY
)
1646 parent
= ref
->parent
;
1648 ref_root
= ref
->root
;
1650 if (node
->action
== BTRFS_ADD_DELAYED_REF
&& insert_reserved
) {
1652 BUG_ON(extent_op
->update_key
);
1653 flags
|= extent_op
->flags_to_set
;
1655 ret
= alloc_reserved_file_extent(trans
, root
,
1656 parent
, ref_root
, flags
,
1657 ref
->objectid
, ref
->offset
,
1658 &ins
, node
->ref_mod
);
1659 update_reserved_extents(root
, ins
.objectid
, ins
.offset
, 0);
1660 } else if (node
->action
== BTRFS_ADD_DELAYED_REF
) {
1661 ret
= __btrfs_inc_extent_ref(trans
, root
, node
->bytenr
,
1662 node
->num_bytes
, parent
,
1663 ref_root
, ref
->objectid
,
1664 ref
->offset
, node
->ref_mod
,
1666 } else if (node
->action
== BTRFS_DROP_DELAYED_REF
) {
1667 ret
= __btrfs_free_extent(trans
, root
, node
->bytenr
,
1668 node
->num_bytes
, parent
,
1669 ref_root
, ref
->objectid
,
1670 ref
->offset
, node
->ref_mod
,
1678 static void __run_delayed_extent_op(struct btrfs_delayed_extent_op
*extent_op
,
1679 struct extent_buffer
*leaf
,
1680 struct btrfs_extent_item
*ei
)
1682 u64 flags
= btrfs_extent_flags(leaf
, ei
);
1683 if (extent_op
->update_flags
) {
1684 flags
|= extent_op
->flags_to_set
;
1685 btrfs_set_extent_flags(leaf
, ei
, flags
);
1688 if (extent_op
->update_key
) {
1689 struct btrfs_tree_block_info
*bi
;
1690 BUG_ON(!(flags
& BTRFS_EXTENT_FLAG_TREE_BLOCK
));
1691 bi
= (struct btrfs_tree_block_info
*)(ei
+ 1);
1692 btrfs_set_tree_block_key(leaf
, bi
, &extent_op
->key
);
1696 static int run_delayed_extent_op(struct btrfs_trans_handle
*trans
,
1697 struct btrfs_root
*root
,
1698 struct btrfs_delayed_ref_node
*node
,
1699 struct btrfs_delayed_extent_op
*extent_op
)
1701 struct btrfs_key key
;
1702 struct btrfs_path
*path
;
1703 struct btrfs_extent_item
*ei
;
1704 struct extent_buffer
*leaf
;
1709 path
= btrfs_alloc_path();
1713 key
.objectid
= node
->bytenr
;
1714 key
.type
= BTRFS_EXTENT_ITEM_KEY
;
1715 key
.offset
= node
->num_bytes
;
1718 path
->leave_spinning
= 1;
1719 ret
= btrfs_search_slot(trans
, root
->fs_info
->extent_root
, &key
,
1730 leaf
= path
->nodes
[0];
1731 item_size
= btrfs_item_size_nr(leaf
, path
->slots
[0]);
1732 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
1733 if (item_size
< sizeof(*ei
)) {
1734 ret
= convert_extent_item_v0(trans
, root
->fs_info
->extent_root
,
1740 leaf
= path
->nodes
[0];
1741 item_size
= btrfs_item_size_nr(leaf
, path
->slots
[0]);
1744 BUG_ON(item_size
< sizeof(*ei
));
1745 ei
= btrfs_item_ptr(leaf
, path
->slots
[0], struct btrfs_extent_item
);
1746 __run_delayed_extent_op(extent_op
, leaf
, ei
);
1748 btrfs_mark_buffer_dirty(leaf
);
1750 btrfs_free_path(path
);
1754 static int run_delayed_tree_ref(struct btrfs_trans_handle
*trans
,
1755 struct btrfs_root
*root
,
1756 struct btrfs_delayed_ref_node
*node
,
1757 struct btrfs_delayed_extent_op
*extent_op
,
1758 int insert_reserved
)
1761 struct btrfs_delayed_tree_ref
*ref
;
1762 struct btrfs_key ins
;
1766 ins
.objectid
= node
->bytenr
;
1767 ins
.offset
= node
->num_bytes
;
1768 ins
.type
= BTRFS_EXTENT_ITEM_KEY
;
1770 ref
= btrfs_delayed_node_to_tree_ref(node
);
1771 if (node
->type
== BTRFS_SHARED_BLOCK_REF_KEY
)
1772 parent
= ref
->parent
;
1774 ref_root
= ref
->root
;
1776 BUG_ON(node
->ref_mod
!= 1);
1777 if (node
->action
== BTRFS_ADD_DELAYED_REF
&& insert_reserved
) {
1778 BUG_ON(!extent_op
|| !extent_op
->update_flags
||
1779 !extent_op
->update_key
);
1780 ret
= alloc_reserved_tree_block(trans
, root
,
1782 extent_op
->flags_to_set
,
1785 update_reserved_extents(root
, ins
.objectid
, ins
.offset
, 0);
1786 } else if (node
->action
== BTRFS_ADD_DELAYED_REF
) {
1787 ret
= __btrfs_inc_extent_ref(trans
, root
, node
->bytenr
,
1788 node
->num_bytes
, parent
, ref_root
,
1789 ref
->level
, 0, 1, extent_op
);
1790 } else if (node
->action
== BTRFS_DROP_DELAYED_REF
) {
1791 ret
= __btrfs_free_extent(trans
, root
, node
->bytenr
,
1792 node
->num_bytes
, parent
, ref_root
,
1793 ref
->level
, 0, 1, extent_op
);
1801 /* helper function to actually process a single delayed ref entry */
1802 static int run_one_delayed_ref(struct btrfs_trans_handle
*trans
,
1803 struct btrfs_root
*root
,
1804 struct btrfs_delayed_ref_node
*node
,
1805 struct btrfs_delayed_extent_op
*extent_op
,
1806 int insert_reserved
)
1809 if (btrfs_delayed_ref_is_head(node
)) {
1810 struct btrfs_delayed_ref_head
*head
;
1812 * we've hit the end of the chain and we were supposed
1813 * to insert this extent into the tree. But, it got
1814 * deleted before we ever needed to insert it, so all
1815 * we have to do is clean up the accounting
1818 head
= btrfs_delayed_node_to_head(node
);
1819 if (insert_reserved
) {
1820 if (head
->is_data
) {
1821 ret
= btrfs_del_csums(trans
, root
,
1826 btrfs_update_pinned_extents(root
, node
->bytenr
,
1827 node
->num_bytes
, 1);
1828 update_reserved_extents(root
, node
->bytenr
,
1829 node
->num_bytes
, 0);
1831 mutex_unlock(&head
->mutex
);
1835 if (node
->type
== BTRFS_TREE_BLOCK_REF_KEY
||
1836 node
->type
== BTRFS_SHARED_BLOCK_REF_KEY
)
1837 ret
= run_delayed_tree_ref(trans
, root
, node
, extent_op
,
1839 else if (node
->type
== BTRFS_EXTENT_DATA_REF_KEY
||
1840 node
->type
== BTRFS_SHARED_DATA_REF_KEY
)
1841 ret
= run_delayed_data_ref(trans
, root
, node
, extent_op
,
1848 static noinline
struct btrfs_delayed_ref_node
*
1849 select_delayed_ref(struct btrfs_delayed_ref_head
*head
)
1851 struct rb_node
*node
;
1852 struct btrfs_delayed_ref_node
*ref
;
1853 int action
= BTRFS_ADD_DELAYED_REF
;
1856 * select delayed ref of type BTRFS_ADD_DELAYED_REF first.
1857 * this prevents ref count from going down to zero when
1858 * there still are pending delayed ref.
1860 node
= rb_prev(&head
->node
.rb_node
);
1864 ref
= rb_entry(node
, struct btrfs_delayed_ref_node
,
1866 if (ref
->bytenr
!= head
->node
.bytenr
)
1868 if (ref
->action
== action
)
1870 node
= rb_prev(node
);
1872 if (action
== BTRFS_ADD_DELAYED_REF
) {
1873 action
= BTRFS_DROP_DELAYED_REF
;
1879 static noinline
int run_clustered_refs(struct btrfs_trans_handle
*trans
,
1880 struct btrfs_root
*root
,
1881 struct list_head
*cluster
)
1883 struct btrfs_delayed_ref_root
*delayed_refs
;
1884 struct btrfs_delayed_ref_node
*ref
;
1885 struct btrfs_delayed_ref_head
*locked_ref
= NULL
;
1886 struct btrfs_delayed_extent_op
*extent_op
;
1889 int must_insert_reserved
= 0;
1891 delayed_refs
= &trans
->transaction
->delayed_refs
;
1894 /* pick a new head ref from the cluster list */
1895 if (list_empty(cluster
))
1898 locked_ref
= list_entry(cluster
->next
,
1899 struct btrfs_delayed_ref_head
, cluster
);
1901 /* grab the lock that says we are going to process
1902 * all the refs for this head */
1903 ret
= btrfs_delayed_ref_lock(trans
, locked_ref
);
1906 * we may have dropped the spin lock to get the head
1907 * mutex lock, and that might have given someone else
1908 * time to free the head. If that's true, it has been
1909 * removed from our list and we can move on.
1911 if (ret
== -EAGAIN
) {
1919 * record the must insert reserved flag before we
1920 * drop the spin lock.
1922 must_insert_reserved
= locked_ref
->must_insert_reserved
;
1923 locked_ref
->must_insert_reserved
= 0;
1925 extent_op
= locked_ref
->extent_op
;
1926 locked_ref
->extent_op
= NULL
;
1929 * locked_ref is the head node, so we have to go one
1930 * node back for any delayed ref updates
1932 ref
= select_delayed_ref(locked_ref
);
1934 /* All delayed refs have been processed, Go ahead
1935 * and send the head node to run_one_delayed_ref,
1936 * so that any accounting fixes can happen
1938 ref
= &locked_ref
->node
;
1940 if (extent_op
&& must_insert_reserved
) {
1946 spin_unlock(&delayed_refs
->lock
);
1948 ret
= run_delayed_extent_op(trans
, root
,
1954 spin_lock(&delayed_refs
->lock
);
1958 list_del_init(&locked_ref
->cluster
);
1963 rb_erase(&ref
->rb_node
, &delayed_refs
->root
);
1964 delayed_refs
->num_entries
--;
1966 spin_unlock(&delayed_refs
->lock
);
1968 ret
= run_one_delayed_ref(trans
, root
, ref
, extent_op
,
1969 must_insert_reserved
);
1972 btrfs_put_delayed_ref(ref
);
1977 spin_lock(&delayed_refs
->lock
);
1983 * this starts processing the delayed reference count updates and
1984 * extent insertions we have queued up so far. count can be
1985 * 0, which means to process everything in the tree at the start
1986 * of the run (but not newly added entries), or it can be some target
1987 * number you'd like to process.
1989 int btrfs_run_delayed_refs(struct btrfs_trans_handle
*trans
,
1990 struct btrfs_root
*root
, unsigned long count
)
1992 struct rb_node
*node
;
1993 struct btrfs_delayed_ref_root
*delayed_refs
;
1994 struct btrfs_delayed_ref_node
*ref
;
1995 struct list_head cluster
;
1997 int run_all
= count
== (unsigned long)-1;
2000 if (root
== root
->fs_info
->extent_root
)
2001 root
= root
->fs_info
->tree_root
;
2003 delayed_refs
= &trans
->transaction
->delayed_refs
;
2004 INIT_LIST_HEAD(&cluster
);
2006 spin_lock(&delayed_refs
->lock
);
2008 count
= delayed_refs
->num_entries
* 2;
2012 if (!(run_all
|| run_most
) &&
2013 delayed_refs
->num_heads_ready
< 64)
2017 * go find something we can process in the rbtree. We start at
2018 * the beginning of the tree, and then build a cluster
2019 * of refs to process starting at the first one we are able to
2022 ret
= btrfs_find_ref_cluster(trans
, &cluster
,
2023 delayed_refs
->run_delayed_start
);
2027 ret
= run_clustered_refs(trans
, root
, &cluster
);
2030 count
-= min_t(unsigned long, ret
, count
);
2037 node
= rb_first(&delayed_refs
->root
);
2040 count
= (unsigned long)-1;
2043 ref
= rb_entry(node
, struct btrfs_delayed_ref_node
,
2045 if (btrfs_delayed_ref_is_head(ref
)) {
2046 struct btrfs_delayed_ref_head
*head
;
2048 head
= btrfs_delayed_node_to_head(ref
);
2049 atomic_inc(&ref
->refs
);
2051 spin_unlock(&delayed_refs
->lock
);
2052 mutex_lock(&head
->mutex
);
2053 mutex_unlock(&head
->mutex
);
2055 btrfs_put_delayed_ref(ref
);
2059 node
= rb_next(node
);
2061 spin_unlock(&delayed_refs
->lock
);
2062 schedule_timeout(1);
2066 spin_unlock(&delayed_refs
->lock
);
2070 int btrfs_set_disk_extent_flags(struct btrfs_trans_handle
*trans
,
2071 struct btrfs_root
*root
,
2072 u64 bytenr
, u64 num_bytes
, u64 flags
,
2075 struct btrfs_delayed_extent_op
*extent_op
;
2078 extent_op
= kmalloc(sizeof(*extent_op
), GFP_NOFS
);
2082 extent_op
->flags_to_set
= flags
;
2083 extent_op
->update_flags
= 1;
2084 extent_op
->update_key
= 0;
2085 extent_op
->is_data
= is_data
? 1 : 0;
2087 ret
= btrfs_add_delayed_extent_op(trans
, bytenr
, num_bytes
, extent_op
);
2093 static noinline
int check_delayed_ref(struct btrfs_trans_handle
*trans
,
2094 struct btrfs_root
*root
,
2095 struct btrfs_path
*path
,
2096 u64 objectid
, u64 offset
, u64 bytenr
)
2098 struct btrfs_delayed_ref_head
*head
;
2099 struct btrfs_delayed_ref_node
*ref
;
2100 struct btrfs_delayed_data_ref
*data_ref
;
2101 struct btrfs_delayed_ref_root
*delayed_refs
;
2102 struct rb_node
*node
;
2106 delayed_refs
= &trans
->transaction
->delayed_refs
;
2107 spin_lock(&delayed_refs
->lock
);
2108 head
= btrfs_find_delayed_ref_head(trans
, bytenr
);
2112 if (!mutex_trylock(&head
->mutex
)) {
2113 atomic_inc(&head
->node
.refs
);
2114 spin_unlock(&delayed_refs
->lock
);
2116 btrfs_release_path(root
->fs_info
->extent_root
, path
);
2118 mutex_lock(&head
->mutex
);
2119 mutex_unlock(&head
->mutex
);
2120 btrfs_put_delayed_ref(&head
->node
);
2124 node
= rb_prev(&head
->node
.rb_node
);
2128 ref
= rb_entry(node
, struct btrfs_delayed_ref_node
, rb_node
);
2130 if (ref
->bytenr
!= bytenr
)
2134 if (ref
->type
!= BTRFS_EXTENT_DATA_REF_KEY
)
2137 data_ref
= btrfs_delayed_node_to_data_ref(ref
);
2139 node
= rb_prev(node
);
2141 ref
= rb_entry(node
, struct btrfs_delayed_ref_node
, rb_node
);
2142 if (ref
->bytenr
== bytenr
)
2146 if (data_ref
->root
!= root
->root_key
.objectid
||
2147 data_ref
->objectid
!= objectid
|| data_ref
->offset
!= offset
)
2152 mutex_unlock(&head
->mutex
);
2154 spin_unlock(&delayed_refs
->lock
);
2158 static noinline
int check_committed_ref(struct btrfs_trans_handle
*trans
,
2159 struct btrfs_root
*root
,
2160 struct btrfs_path
*path
,
2161 u64 objectid
, u64 offset
, u64 bytenr
)
2163 struct btrfs_root
*extent_root
= root
->fs_info
->extent_root
;
2164 struct extent_buffer
*leaf
;
2165 struct btrfs_extent_data_ref
*ref
;
2166 struct btrfs_extent_inline_ref
*iref
;
2167 struct btrfs_extent_item
*ei
;
2168 struct btrfs_key key
;
2172 key
.objectid
= bytenr
;
2173 key
.offset
= (u64
)-1;
2174 key
.type
= BTRFS_EXTENT_ITEM_KEY
;
2176 ret
= btrfs_search_slot(NULL
, extent_root
, &key
, path
, 0, 0);
2182 if (path
->slots
[0] == 0)
2186 leaf
= path
->nodes
[0];
2187 btrfs_item_key_to_cpu(leaf
, &key
, path
->slots
[0]);
2189 if (key
.objectid
!= bytenr
|| key
.type
!= BTRFS_EXTENT_ITEM_KEY
)
2193 item_size
= btrfs_item_size_nr(leaf
, path
->slots
[0]);
2194 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
2195 if (item_size
< sizeof(*ei
)) {
2196 WARN_ON(item_size
!= sizeof(struct btrfs_extent_item_v0
));
2200 ei
= btrfs_item_ptr(leaf
, path
->slots
[0], struct btrfs_extent_item
);
2202 if (item_size
!= sizeof(*ei
) +
2203 btrfs_extent_inline_ref_size(BTRFS_EXTENT_DATA_REF_KEY
))
2206 if (btrfs_extent_generation(leaf
, ei
) <=
2207 btrfs_root_last_snapshot(&root
->root_item
))
2210 iref
= (struct btrfs_extent_inline_ref
*)(ei
+ 1);
2211 if (btrfs_extent_inline_ref_type(leaf
, iref
) !=
2212 BTRFS_EXTENT_DATA_REF_KEY
)
2215 ref
= (struct btrfs_extent_data_ref
*)(&iref
->offset
);
2216 if (btrfs_extent_refs(leaf
, ei
) !=
2217 btrfs_extent_data_ref_count(leaf
, ref
) ||
2218 btrfs_extent_data_ref_root(leaf
, ref
) !=
2219 root
->root_key
.objectid
||
2220 btrfs_extent_data_ref_objectid(leaf
, ref
) != objectid
||
2221 btrfs_extent_data_ref_offset(leaf
, ref
) != offset
)
2229 int btrfs_cross_ref_exist(struct btrfs_trans_handle
*trans
,
2230 struct btrfs_root
*root
,
2231 u64 objectid
, u64 offset
, u64 bytenr
)
2233 struct btrfs_path
*path
;
2237 path
= btrfs_alloc_path();
2242 ret
= check_committed_ref(trans
, root
, path
, objectid
,
2244 if (ret
&& ret
!= -ENOENT
)
2247 ret2
= check_delayed_ref(trans
, root
, path
, objectid
,
2249 } while (ret2
== -EAGAIN
);
2251 if (ret2
&& ret2
!= -ENOENT
) {
2256 if (ret
!= -ENOENT
|| ret2
!= -ENOENT
)
2259 btrfs_free_path(path
);
2264 int btrfs_cache_ref(struct btrfs_trans_handle
*trans
, struct btrfs_root
*root
,
2265 struct extent_buffer
*buf
, u32 nr_extents
)
2267 struct btrfs_key key
;
2268 struct btrfs_file_extent_item
*fi
;
2276 if (!root
->ref_cows
)
2279 if (root
->root_key
.objectid
!= BTRFS_TREE_RELOC_OBJECTID
) {
2281 root_gen
= root
->root_key
.offset
;
2284 root_gen
= trans
->transid
- 1;
2287 level
= btrfs_header_level(buf
);
2288 nritems
= btrfs_header_nritems(buf
);
2291 struct btrfs_leaf_ref
*ref
;
2292 struct btrfs_extent_info
*info
;
2294 ref
= btrfs_alloc_leaf_ref(root
, nr_extents
);
2300 ref
->root_gen
= root_gen
;
2301 ref
->bytenr
= buf
->start
;
2302 ref
->owner
= btrfs_header_owner(buf
);
2303 ref
->generation
= btrfs_header_generation(buf
);
2304 ref
->nritems
= nr_extents
;
2305 info
= ref
->extents
;
2307 for (i
= 0; nr_extents
> 0 && i
< nritems
; i
++) {
2309 btrfs_item_key_to_cpu(buf
, &key
, i
);
2310 if (btrfs_key_type(&key
) != BTRFS_EXTENT_DATA_KEY
)
2312 fi
= btrfs_item_ptr(buf
, i
,
2313 struct btrfs_file_extent_item
);
2314 if (btrfs_file_extent_type(buf
, fi
) ==
2315 BTRFS_FILE_EXTENT_INLINE
)
2317 disk_bytenr
= btrfs_file_extent_disk_bytenr(buf
, fi
);
2318 if (disk_bytenr
== 0)
2321 info
->bytenr
= disk_bytenr
;
2323 btrfs_file_extent_disk_num_bytes(buf
, fi
);
2324 info
->objectid
= key
.objectid
;
2325 info
->offset
= key
.offset
;
2329 ret
= btrfs_add_leaf_ref(root
, ref
, shared
);
2330 if (ret
== -EEXIST
&& shared
) {
2331 struct btrfs_leaf_ref
*old
;
2332 old
= btrfs_lookup_leaf_ref(root
, ref
->bytenr
);
2334 btrfs_remove_leaf_ref(root
, old
);
2335 btrfs_free_leaf_ref(root
, old
);
2336 ret
= btrfs_add_leaf_ref(root
, ref
, shared
);
2339 btrfs_free_leaf_ref(root
, ref
);
2345 /* when a block goes through cow, we update the reference counts of
2346 * everything that block points to. The internal pointers of the block
2347 * can be in just about any order, and it is likely to have clusters of
2348 * things that are close together and clusters of things that are not.
2350 * To help reduce the seeks that come with updating all of these reference
2351 * counts, sort them by byte number before actual updates are done.
2353 * struct refsort is used to match byte number to slot in the btree block.
2354 * we sort based on the byte number and then use the slot to actually
2357 * struct refsort is smaller than strcut btrfs_item and smaller than
2358 * struct btrfs_key_ptr. Since we're currently limited to the page size
2359 * for a btree block, there's no way for a kmalloc of refsorts for a
2360 * single node to be bigger than a page.
2368 * for passing into sort()
2370 static int refsort_cmp(const void *a_void
, const void *b_void
)
2372 const struct refsort
*a
= a_void
;
2373 const struct refsort
*b
= b_void
;
2375 if (a
->bytenr
< b
->bytenr
)
2377 if (a
->bytenr
> b
->bytenr
)
2383 static int __btrfs_mod_ref(struct btrfs_trans_handle
*trans
,
2384 struct btrfs_root
*root
,
2385 struct extent_buffer
*buf
,
2386 int full_backref
, int inc
)
2393 struct btrfs_key key
;
2394 struct btrfs_file_extent_item
*fi
;
2398 int (*process_func
)(struct btrfs_trans_handle
*, struct btrfs_root
*,
2399 u64
, u64
, u64
, u64
, u64
, u64
);
2401 ref_root
= btrfs_header_owner(buf
);
2402 nritems
= btrfs_header_nritems(buf
);
2403 level
= btrfs_header_level(buf
);
2405 if (!root
->ref_cows
&& level
== 0)
2409 process_func
= btrfs_inc_extent_ref
;
2411 process_func
= btrfs_free_extent
;
2414 parent
= buf
->start
;
2418 for (i
= 0; i
< nritems
; i
++) {
2420 btrfs_item_key_to_cpu(buf
, &key
, i
);
2421 if (btrfs_key_type(&key
) != BTRFS_EXTENT_DATA_KEY
)
2423 fi
= btrfs_item_ptr(buf
, i
,
2424 struct btrfs_file_extent_item
);
2425 if (btrfs_file_extent_type(buf
, fi
) ==
2426 BTRFS_FILE_EXTENT_INLINE
)
2428 bytenr
= btrfs_file_extent_disk_bytenr(buf
, fi
);
2432 num_bytes
= btrfs_file_extent_disk_num_bytes(buf
, fi
);
2433 key
.offset
-= btrfs_file_extent_offset(buf
, fi
);
2434 ret
= process_func(trans
, root
, bytenr
, num_bytes
,
2435 parent
, ref_root
, key
.objectid
,
2440 bytenr
= btrfs_node_blockptr(buf
, i
);
2441 num_bytes
= btrfs_level_size(root
, level
- 1);
2442 ret
= process_func(trans
, root
, bytenr
, num_bytes
,
2443 parent
, ref_root
, level
- 1, 0);
2454 int btrfs_inc_ref(struct btrfs_trans_handle
*trans
, struct btrfs_root
*root
,
2455 struct extent_buffer
*buf
, int full_backref
)
2457 return __btrfs_mod_ref(trans
, root
, buf
, full_backref
, 1);
2460 int btrfs_dec_ref(struct btrfs_trans_handle
*trans
, struct btrfs_root
*root
,
2461 struct extent_buffer
*buf
, int full_backref
)
2463 return __btrfs_mod_ref(trans
, root
, buf
, full_backref
, 0);
2466 static int write_one_cache_group(struct btrfs_trans_handle
*trans
,
2467 struct btrfs_root
*root
,
2468 struct btrfs_path
*path
,
2469 struct btrfs_block_group_cache
*cache
)
2472 struct btrfs_root
*extent_root
= root
->fs_info
->extent_root
;
2474 struct extent_buffer
*leaf
;
2476 ret
= btrfs_search_slot(trans
, extent_root
, &cache
->key
, path
, 0, 1);
2481 leaf
= path
->nodes
[0];
2482 bi
= btrfs_item_ptr_offset(leaf
, path
->slots
[0]);
2483 write_extent_buffer(leaf
, &cache
->item
, bi
, sizeof(cache
->item
));
2484 btrfs_mark_buffer_dirty(leaf
);
2485 btrfs_release_path(extent_root
, path
);
2493 static struct btrfs_block_group_cache
*
2494 next_block_group(struct btrfs_root
*root
,
2495 struct btrfs_block_group_cache
*cache
)
2497 struct rb_node
*node
;
2498 spin_lock(&root
->fs_info
->block_group_cache_lock
);
2499 node
= rb_next(&cache
->cache_node
);
2500 btrfs_put_block_group(cache
);
2502 cache
= rb_entry(node
, struct btrfs_block_group_cache
,
2504 atomic_inc(&cache
->count
);
2507 spin_unlock(&root
->fs_info
->block_group_cache_lock
);
2511 int btrfs_write_dirty_block_groups(struct btrfs_trans_handle
*trans
,
2512 struct btrfs_root
*root
)
2514 struct btrfs_block_group_cache
*cache
;
2516 struct btrfs_path
*path
;
2519 path
= btrfs_alloc_path();
2525 err
= btrfs_run_delayed_refs(trans
, root
,
2530 cache
= btrfs_lookup_first_block_group(root
->fs_info
, last
);
2534 cache
= next_block_group(root
, cache
);
2544 last
= cache
->key
.objectid
+ cache
->key
.offset
;
2546 err
= write_one_cache_group(trans
, root
, path
, cache
);
2548 btrfs_put_block_group(cache
);
2551 btrfs_free_path(path
);
2555 int btrfs_extent_readonly(struct btrfs_root
*root
, u64 bytenr
)
2557 struct btrfs_block_group_cache
*block_group
;
2560 block_group
= btrfs_lookup_block_group(root
->fs_info
, bytenr
);
2561 if (!block_group
|| block_group
->ro
)
2564 btrfs_put_block_group(block_group
);
2568 static int update_space_info(struct btrfs_fs_info
*info
, u64 flags
,
2569 u64 total_bytes
, u64 bytes_used
,
2570 struct btrfs_space_info
**space_info
)
2572 struct btrfs_space_info
*found
;
2574 found
= __find_space_info(info
, flags
);
2576 spin_lock(&found
->lock
);
2577 found
->total_bytes
+= total_bytes
;
2578 found
->bytes_used
+= bytes_used
;
2580 spin_unlock(&found
->lock
);
2581 *space_info
= found
;
2584 found
= kzalloc(sizeof(*found
), GFP_NOFS
);
2588 INIT_LIST_HEAD(&found
->block_groups
);
2589 init_rwsem(&found
->groups_sem
);
2590 spin_lock_init(&found
->lock
);
2591 found
->flags
= flags
;
2592 found
->total_bytes
= total_bytes
;
2593 found
->bytes_used
= bytes_used
;
2594 found
->bytes_pinned
= 0;
2595 found
->bytes_reserved
= 0;
2596 found
->bytes_readonly
= 0;
2597 found
->bytes_delalloc
= 0;
2599 found
->force_alloc
= 0;
2600 *space_info
= found
;
2601 list_add_rcu(&found
->list
, &info
->space_info
);
2602 atomic_set(&found
->caching_threads
, 0);
2606 static void set_avail_alloc_bits(struct btrfs_fs_info
*fs_info
, u64 flags
)
2608 u64 extra_flags
= flags
& (BTRFS_BLOCK_GROUP_RAID0
|
2609 BTRFS_BLOCK_GROUP_RAID1
|
2610 BTRFS_BLOCK_GROUP_RAID10
|
2611 BTRFS_BLOCK_GROUP_DUP
);
2613 if (flags
& BTRFS_BLOCK_GROUP_DATA
)
2614 fs_info
->avail_data_alloc_bits
|= extra_flags
;
2615 if (flags
& BTRFS_BLOCK_GROUP_METADATA
)
2616 fs_info
->avail_metadata_alloc_bits
|= extra_flags
;
2617 if (flags
& BTRFS_BLOCK_GROUP_SYSTEM
)
2618 fs_info
->avail_system_alloc_bits
|= extra_flags
;
2622 static void set_block_group_readonly(struct btrfs_block_group_cache
*cache
)
2624 spin_lock(&cache
->space_info
->lock
);
2625 spin_lock(&cache
->lock
);
2627 cache
->space_info
->bytes_readonly
+= cache
->key
.offset
-
2628 btrfs_block_group_used(&cache
->item
);
2631 spin_unlock(&cache
->lock
);
2632 spin_unlock(&cache
->space_info
->lock
);
2635 u64
btrfs_reduce_alloc_profile(struct btrfs_root
*root
, u64 flags
)
2637 u64 num_devices
= root
->fs_info
->fs_devices
->rw_devices
;
2639 if (num_devices
== 1)
2640 flags
&= ~(BTRFS_BLOCK_GROUP_RAID1
| BTRFS_BLOCK_GROUP_RAID0
);
2641 if (num_devices
< 4)
2642 flags
&= ~BTRFS_BLOCK_GROUP_RAID10
;
2644 if ((flags
& BTRFS_BLOCK_GROUP_DUP
) &&
2645 (flags
& (BTRFS_BLOCK_GROUP_RAID1
|
2646 BTRFS_BLOCK_GROUP_RAID10
))) {
2647 flags
&= ~BTRFS_BLOCK_GROUP_DUP
;
2650 if ((flags
& BTRFS_BLOCK_GROUP_RAID1
) &&
2651 (flags
& BTRFS_BLOCK_GROUP_RAID10
)) {
2652 flags
&= ~BTRFS_BLOCK_GROUP_RAID1
;
2655 if ((flags
& BTRFS_BLOCK_GROUP_RAID0
) &&
2656 ((flags
& BTRFS_BLOCK_GROUP_RAID1
) |
2657 (flags
& BTRFS_BLOCK_GROUP_RAID10
) |
2658 (flags
& BTRFS_BLOCK_GROUP_DUP
)))
2659 flags
&= ~BTRFS_BLOCK_GROUP_RAID0
;
2663 static u64
btrfs_get_alloc_profile(struct btrfs_root
*root
, u64 data
)
2665 struct btrfs_fs_info
*info
= root
->fs_info
;
2669 alloc_profile
= info
->avail_data_alloc_bits
&
2670 info
->data_alloc_profile
;
2671 data
= BTRFS_BLOCK_GROUP_DATA
| alloc_profile
;
2672 } else if (root
== root
->fs_info
->chunk_root
) {
2673 alloc_profile
= info
->avail_system_alloc_bits
&
2674 info
->system_alloc_profile
;
2675 data
= BTRFS_BLOCK_GROUP_SYSTEM
| alloc_profile
;
2677 alloc_profile
= info
->avail_metadata_alloc_bits
&
2678 info
->metadata_alloc_profile
;
2679 data
= BTRFS_BLOCK_GROUP_METADATA
| alloc_profile
;
2682 return btrfs_reduce_alloc_profile(root
, data
);
2685 void btrfs_set_inode_space_info(struct btrfs_root
*root
, struct inode
*inode
)
2689 alloc_target
= btrfs_get_alloc_profile(root
, 1);
2690 BTRFS_I(inode
)->space_info
= __find_space_info(root
->fs_info
,
2695 * for now this just makes sure we have at least 5% of our metadata space free
2698 int btrfs_check_metadata_free_space(struct btrfs_root
*root
)
2700 struct btrfs_fs_info
*info
= root
->fs_info
;
2701 struct btrfs_space_info
*meta_sinfo
;
2702 u64 alloc_target
, thresh
;
2703 int committed
= 0, ret
;
2705 /* get the space info for where the metadata will live */
2706 alloc_target
= btrfs_get_alloc_profile(root
, 0);
2707 meta_sinfo
= __find_space_info(info
, alloc_target
);
2710 spin_lock(&meta_sinfo
->lock
);
2711 if (!meta_sinfo
->full
)
2712 thresh
= meta_sinfo
->total_bytes
* 80;
2714 thresh
= meta_sinfo
->total_bytes
* 95;
2716 do_div(thresh
, 100);
2718 if (meta_sinfo
->bytes_used
+ meta_sinfo
->bytes_reserved
+
2719 meta_sinfo
->bytes_pinned
+ meta_sinfo
->bytes_readonly
> thresh
) {
2720 struct btrfs_trans_handle
*trans
;
2721 if (!meta_sinfo
->full
) {
2722 meta_sinfo
->force_alloc
= 1;
2723 spin_unlock(&meta_sinfo
->lock
);
2725 trans
= btrfs_start_transaction(root
, 1);
2729 ret
= do_chunk_alloc(trans
, root
->fs_info
->extent_root
,
2730 2 * 1024 * 1024, alloc_target
, 0);
2731 btrfs_end_transaction(trans
, root
);
2734 spin_unlock(&meta_sinfo
->lock
);
2738 trans
= btrfs_join_transaction(root
, 1);
2741 ret
= btrfs_commit_transaction(trans
, root
);
2748 spin_unlock(&meta_sinfo
->lock
);
2754 * This will check the space that the inode allocates from to make sure we have
2755 * enough space for bytes.
2757 int btrfs_check_data_free_space(struct btrfs_root
*root
, struct inode
*inode
,
2760 struct btrfs_space_info
*data_sinfo
;
2761 int ret
= 0, committed
= 0;
2763 /* make sure bytes are sectorsize aligned */
2764 bytes
= (bytes
+ root
->sectorsize
- 1) & ~((u64
)root
->sectorsize
- 1);
2766 data_sinfo
= BTRFS_I(inode
)->space_info
;
2768 /* make sure we have enough space to handle the data first */
2769 spin_lock(&data_sinfo
->lock
);
2770 if (data_sinfo
->total_bytes
- data_sinfo
->bytes_used
-
2771 data_sinfo
->bytes_delalloc
- data_sinfo
->bytes_reserved
-
2772 data_sinfo
->bytes_pinned
- data_sinfo
->bytes_readonly
-
2773 data_sinfo
->bytes_may_use
< bytes
) {
2774 struct btrfs_trans_handle
*trans
;
2777 * if we don't have enough free bytes in this space then we need
2778 * to alloc a new chunk.
2780 if (!data_sinfo
->full
) {
2783 data_sinfo
->force_alloc
= 1;
2784 spin_unlock(&data_sinfo
->lock
);
2786 alloc_target
= btrfs_get_alloc_profile(root
, 1);
2787 trans
= btrfs_start_transaction(root
, 1);
2791 ret
= do_chunk_alloc(trans
, root
->fs_info
->extent_root
,
2792 bytes
+ 2 * 1024 * 1024,
2794 btrfs_end_transaction(trans
, root
);
2799 spin_unlock(&data_sinfo
->lock
);
2801 /* commit the current transaction and try again */
2804 trans
= btrfs_join_transaction(root
, 1);
2807 ret
= btrfs_commit_transaction(trans
, root
);
2813 printk(KERN_ERR
"no space left, need %llu, %llu delalloc bytes"
2814 ", %llu bytes_used, %llu bytes_reserved, "
2815 "%llu bytes_pinned, %llu bytes_readonly, %llu may use "
2816 "%llu total\n", (unsigned long long)bytes
,
2817 (unsigned long long)data_sinfo
->bytes_delalloc
,
2818 (unsigned long long)data_sinfo
->bytes_used
,
2819 (unsigned long long)data_sinfo
->bytes_reserved
,
2820 (unsigned long long)data_sinfo
->bytes_pinned
,
2821 (unsigned long long)data_sinfo
->bytes_readonly
,
2822 (unsigned long long)data_sinfo
->bytes_may_use
,
2823 (unsigned long long)data_sinfo
->total_bytes
);
2826 data_sinfo
->bytes_may_use
+= bytes
;
2827 BTRFS_I(inode
)->reserved_bytes
+= bytes
;
2828 spin_unlock(&data_sinfo
->lock
);
2830 return btrfs_check_metadata_free_space(root
);
2834 * if there was an error for whatever reason after calling
2835 * btrfs_check_data_free_space, call this so we can cleanup the counters.
2837 void btrfs_free_reserved_data_space(struct btrfs_root
*root
,
2838 struct inode
*inode
, u64 bytes
)
2840 struct btrfs_space_info
*data_sinfo
;
2842 /* make sure bytes are sectorsize aligned */
2843 bytes
= (bytes
+ root
->sectorsize
- 1) & ~((u64
)root
->sectorsize
- 1);
2845 data_sinfo
= BTRFS_I(inode
)->space_info
;
2846 spin_lock(&data_sinfo
->lock
);
2847 data_sinfo
->bytes_may_use
-= bytes
;
2848 BTRFS_I(inode
)->reserved_bytes
-= bytes
;
2849 spin_unlock(&data_sinfo
->lock
);
2852 /* called when we are adding a delalloc extent to the inode's io_tree */
2853 void btrfs_delalloc_reserve_space(struct btrfs_root
*root
, struct inode
*inode
,
2856 struct btrfs_space_info
*data_sinfo
;
2858 /* get the space info for where this inode will be storing its data */
2859 data_sinfo
= BTRFS_I(inode
)->space_info
;
2861 /* make sure we have enough space to handle the data first */
2862 spin_lock(&data_sinfo
->lock
);
2863 data_sinfo
->bytes_delalloc
+= bytes
;
2866 * we are adding a delalloc extent without calling
2867 * btrfs_check_data_free_space first. This happens on a weird
2868 * writepage condition, but shouldn't hurt our accounting
2870 if (unlikely(bytes
> BTRFS_I(inode
)->reserved_bytes
)) {
2871 data_sinfo
->bytes_may_use
-= BTRFS_I(inode
)->reserved_bytes
;
2872 BTRFS_I(inode
)->reserved_bytes
= 0;
2874 data_sinfo
->bytes_may_use
-= bytes
;
2875 BTRFS_I(inode
)->reserved_bytes
-= bytes
;
2878 spin_unlock(&data_sinfo
->lock
);
2881 /* called when we are clearing an delalloc extent from the inode's io_tree */
2882 void btrfs_delalloc_free_space(struct btrfs_root
*root
, struct inode
*inode
,
2885 struct btrfs_space_info
*info
;
2887 info
= BTRFS_I(inode
)->space_info
;
2889 spin_lock(&info
->lock
);
2890 info
->bytes_delalloc
-= bytes
;
2891 spin_unlock(&info
->lock
);
2894 static void force_metadata_allocation(struct btrfs_fs_info
*info
)
2896 struct list_head
*head
= &info
->space_info
;
2897 struct btrfs_space_info
*found
;
2900 list_for_each_entry_rcu(found
, head
, list
) {
2901 if (found
->flags
& BTRFS_BLOCK_GROUP_METADATA
)
2902 found
->force_alloc
= 1;
2907 static int do_chunk_alloc(struct btrfs_trans_handle
*trans
,
2908 struct btrfs_root
*extent_root
, u64 alloc_bytes
,
2909 u64 flags
, int force
)
2911 struct btrfs_space_info
*space_info
;
2912 struct btrfs_fs_info
*fs_info
= extent_root
->fs_info
;
2916 mutex_lock(&fs_info
->chunk_mutex
);
2918 flags
= btrfs_reduce_alloc_profile(extent_root
, flags
);
2920 space_info
= __find_space_info(extent_root
->fs_info
, flags
);
2922 ret
= update_space_info(extent_root
->fs_info
, flags
,
2926 BUG_ON(!space_info
);
2928 spin_lock(&space_info
->lock
);
2929 if (space_info
->force_alloc
) {
2931 space_info
->force_alloc
= 0;
2933 if (space_info
->full
) {
2934 spin_unlock(&space_info
->lock
);
2938 thresh
= space_info
->total_bytes
- space_info
->bytes_readonly
;
2939 thresh
= div_factor(thresh
, 6);
2941 (space_info
->bytes_used
+ space_info
->bytes_pinned
+
2942 space_info
->bytes_reserved
+ alloc_bytes
) < thresh
) {
2943 spin_unlock(&space_info
->lock
);
2946 spin_unlock(&space_info
->lock
);
2949 * if we're doing a data chunk, go ahead and make sure that
2950 * we keep a reasonable number of metadata chunks allocated in the
2953 if (flags
& BTRFS_BLOCK_GROUP_DATA
) {
2954 fs_info
->data_chunk_allocations
++;
2955 if (!(fs_info
->data_chunk_allocations
%
2956 fs_info
->metadata_ratio
))
2957 force_metadata_allocation(fs_info
);
2960 ret
= btrfs_alloc_chunk(trans
, extent_root
, flags
);
2962 space_info
->full
= 1;
2964 mutex_unlock(&extent_root
->fs_info
->chunk_mutex
);
2968 static int update_block_group(struct btrfs_trans_handle
*trans
,
2969 struct btrfs_root
*root
,
2970 u64 bytenr
, u64 num_bytes
, int alloc
,
2973 struct btrfs_block_group_cache
*cache
;
2974 struct btrfs_fs_info
*info
= root
->fs_info
;
2975 u64 total
= num_bytes
;
2979 /* block accounting for super block */
2980 spin_lock(&info
->delalloc_lock
);
2981 old_val
= btrfs_super_bytes_used(&info
->super_copy
);
2983 old_val
+= num_bytes
;
2985 old_val
-= num_bytes
;
2986 btrfs_set_super_bytes_used(&info
->super_copy
, old_val
);
2988 /* block accounting for root item */
2989 old_val
= btrfs_root_used(&root
->root_item
);
2991 old_val
+= num_bytes
;
2993 old_val
-= num_bytes
;
2994 btrfs_set_root_used(&root
->root_item
, old_val
);
2995 spin_unlock(&info
->delalloc_lock
);
2998 cache
= btrfs_lookup_block_group(info
, bytenr
);
3001 byte_in_group
= bytenr
- cache
->key
.objectid
;
3002 WARN_ON(byte_in_group
> cache
->key
.offset
);
3004 spin_lock(&cache
->space_info
->lock
);
3005 spin_lock(&cache
->lock
);
3007 old_val
= btrfs_block_group_used(&cache
->item
);
3008 num_bytes
= min(total
, cache
->key
.offset
- byte_in_group
);
3010 old_val
+= num_bytes
;
3011 cache
->space_info
->bytes_used
+= num_bytes
;
3013 cache
->space_info
->bytes_readonly
-= num_bytes
;
3014 btrfs_set_block_group_used(&cache
->item
, old_val
);
3015 spin_unlock(&cache
->lock
);
3016 spin_unlock(&cache
->space_info
->lock
);
3018 old_val
-= num_bytes
;
3019 cache
->space_info
->bytes_used
-= num_bytes
;
3021 cache
->space_info
->bytes_readonly
+= num_bytes
;
3022 btrfs_set_block_group_used(&cache
->item
, old_val
);
3023 spin_unlock(&cache
->lock
);
3024 spin_unlock(&cache
->space_info
->lock
);
3028 ret
= btrfs_discard_extent(root
, bytenr
,
3032 ret
= btrfs_add_free_space(cache
, bytenr
,
3037 btrfs_put_block_group(cache
);
3039 bytenr
+= num_bytes
;
3044 static u64
first_logical_byte(struct btrfs_root
*root
, u64 search_start
)
3046 struct btrfs_block_group_cache
*cache
;
3049 cache
= btrfs_lookup_first_block_group(root
->fs_info
, search_start
);
3053 bytenr
= cache
->key
.objectid
;
3054 btrfs_put_block_group(cache
);
3059 int btrfs_update_pinned_extents(struct btrfs_root
*root
,
3060 u64 bytenr
, u64 num
, int pin
)
3063 struct btrfs_block_group_cache
*cache
;
3064 struct btrfs_fs_info
*fs_info
= root
->fs_info
;
3067 set_extent_dirty(&fs_info
->pinned_extents
,
3068 bytenr
, bytenr
+ num
- 1, GFP_NOFS
);
3071 cache
= btrfs_lookup_block_group(fs_info
, bytenr
);
3073 len
= min(num
, cache
->key
.offset
-
3074 (bytenr
- cache
->key
.objectid
));
3076 spin_lock(&cache
->space_info
->lock
);
3077 spin_lock(&cache
->lock
);
3078 cache
->pinned
+= len
;
3079 cache
->space_info
->bytes_pinned
+= len
;
3080 spin_unlock(&cache
->lock
);
3081 spin_unlock(&cache
->space_info
->lock
);
3082 fs_info
->total_pinned
+= len
;
3087 * in order to not race with the block group caching, we
3088 * only want to unpin the extent if we are cached. If
3089 * we aren't cached, we want to start async caching this
3090 * block group so we can free the extent the next time
3093 spin_lock(&cache
->space_info
->lock
);
3094 spin_lock(&cache
->lock
);
3095 unpin
= (cache
->cached
== BTRFS_CACHE_FINISHED
);
3096 if (likely(unpin
)) {
3097 cache
->pinned
-= len
;
3098 cache
->space_info
->bytes_pinned
-= len
;
3099 fs_info
->total_pinned
-= len
;
3101 spin_unlock(&cache
->lock
);
3102 spin_unlock(&cache
->space_info
->lock
);
3105 clear_extent_dirty(&fs_info
->pinned_extents
,
3106 bytenr
, bytenr
+ len
-1,
3109 cache_block_group(cache
);
3112 btrfs_add_free_space(cache
, bytenr
, len
);
3114 btrfs_put_block_group(cache
);
3121 static int update_reserved_extents(struct btrfs_root
*root
,
3122 u64 bytenr
, u64 num
, int reserve
)
3125 struct btrfs_block_group_cache
*cache
;
3126 struct btrfs_fs_info
*fs_info
= root
->fs_info
;
3129 cache
= btrfs_lookup_block_group(fs_info
, bytenr
);
3131 len
= min(num
, cache
->key
.offset
-
3132 (bytenr
- cache
->key
.objectid
));
3134 spin_lock(&cache
->space_info
->lock
);
3135 spin_lock(&cache
->lock
);
3137 cache
->reserved
+= len
;
3138 cache
->space_info
->bytes_reserved
+= len
;
3140 cache
->reserved
-= len
;
3141 cache
->space_info
->bytes_reserved
-= len
;
3143 spin_unlock(&cache
->lock
);
3144 spin_unlock(&cache
->space_info
->lock
);
3145 btrfs_put_block_group(cache
);
3152 int btrfs_copy_pinned(struct btrfs_root
*root
, struct extent_io_tree
*copy
)
3157 struct extent_io_tree
*pinned_extents
= &root
->fs_info
->pinned_extents
;
3161 ret
= find_first_extent_bit(pinned_extents
, last
,
3162 &start
, &end
, EXTENT_DIRTY
);
3166 set_extent_dirty(copy
, start
, end
, GFP_NOFS
);
3172 int btrfs_finish_extent_commit(struct btrfs_trans_handle
*trans
,
3173 struct btrfs_root
*root
,
3174 struct extent_io_tree
*unpin
)
3181 ret
= find_first_extent_bit(unpin
, 0, &start
, &end
,
3186 ret
= btrfs_discard_extent(root
, start
, end
+ 1 - start
);
3188 /* unlocks the pinned mutex */
3189 btrfs_update_pinned_extents(root
, start
, end
+ 1 - start
, 0);
3190 clear_extent_dirty(unpin
, start
, end
, GFP_NOFS
);
3198 static int pin_down_bytes(struct btrfs_trans_handle
*trans
,
3199 struct btrfs_root
*root
,
3200 struct btrfs_path
*path
,
3201 u64 bytenr
, u64 num_bytes
, int is_data
,
3202 struct extent_buffer
**must_clean
)
3205 struct extent_buffer
*buf
;
3210 buf
= btrfs_find_tree_block(root
, bytenr
, num_bytes
);
3214 /* we can reuse a block if it hasn't been written
3215 * and it is from this transaction. We can't
3216 * reuse anything from the tree log root because
3217 * it has tiny sub-transactions.
3219 if (btrfs_buffer_uptodate(buf
, 0) &&
3220 btrfs_try_tree_lock(buf
)) {
3221 u64 header_owner
= btrfs_header_owner(buf
);
3222 u64 header_transid
= btrfs_header_generation(buf
);
3223 if (header_owner
!= BTRFS_TREE_LOG_OBJECTID
&&
3224 header_transid
== trans
->transid
&&
3225 !btrfs_header_flag(buf
, BTRFS_HEADER_FLAG_WRITTEN
)) {
3229 btrfs_tree_unlock(buf
);
3231 free_extent_buffer(buf
);
3233 btrfs_set_path_blocking(path
);
3234 /* unlocks the pinned mutex */
3235 btrfs_update_pinned_extents(root
, bytenr
, num_bytes
, 1);
3242 static int __btrfs_free_extent(struct btrfs_trans_handle
*trans
,
3243 struct btrfs_root
*root
,
3244 u64 bytenr
, u64 num_bytes
, u64 parent
,
3245 u64 root_objectid
, u64 owner_objectid
,
3246 u64 owner_offset
, int refs_to_drop
,
3247 struct btrfs_delayed_extent_op
*extent_op
)
3249 struct btrfs_key key
;
3250 struct btrfs_path
*path
;
3251 struct btrfs_fs_info
*info
= root
->fs_info
;
3252 struct btrfs_root
*extent_root
= info
->extent_root
;
3253 struct extent_buffer
*leaf
;
3254 struct btrfs_extent_item
*ei
;
3255 struct btrfs_extent_inline_ref
*iref
;
3258 int extent_slot
= 0;
3259 int found_extent
= 0;
3264 path
= btrfs_alloc_path();
3269 path
->leave_spinning
= 1;
3271 is_data
= owner_objectid
>= BTRFS_FIRST_FREE_OBJECTID
;
3272 BUG_ON(!is_data
&& refs_to_drop
!= 1);
3274 ret
= lookup_extent_backref(trans
, extent_root
, path
, &iref
,
3275 bytenr
, num_bytes
, parent
,
3276 root_objectid
, owner_objectid
,
3279 extent_slot
= path
->slots
[0];
3280 while (extent_slot
>= 0) {
3281 btrfs_item_key_to_cpu(path
->nodes
[0], &key
,
3283 if (key
.objectid
!= bytenr
)
3285 if (key
.type
== BTRFS_EXTENT_ITEM_KEY
&&
3286 key
.offset
== num_bytes
) {
3290 if (path
->slots
[0] - extent_slot
> 5)
3294 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
3295 item_size
= btrfs_item_size_nr(path
->nodes
[0], extent_slot
);
3296 if (found_extent
&& item_size
< sizeof(*ei
))
3299 if (!found_extent
) {
3301 ret
= remove_extent_backref(trans
, extent_root
, path
,
3305 btrfs_release_path(extent_root
, path
);
3306 path
->leave_spinning
= 1;
3308 key
.objectid
= bytenr
;
3309 key
.type
= BTRFS_EXTENT_ITEM_KEY
;
3310 key
.offset
= num_bytes
;
3312 ret
= btrfs_search_slot(trans
, extent_root
,
3315 printk(KERN_ERR
"umm, got %d back from search"
3316 ", was looking for %llu\n", ret
,
3317 (unsigned long long)bytenr
);
3318 btrfs_print_leaf(extent_root
, path
->nodes
[0]);
3321 extent_slot
= path
->slots
[0];
3324 btrfs_print_leaf(extent_root
, path
->nodes
[0]);
3326 printk(KERN_ERR
"btrfs unable to find ref byte nr %llu "
3327 "parent %llu root %llu owner %llu offset %llu\n",
3328 (unsigned long long)bytenr
,
3329 (unsigned long long)parent
,
3330 (unsigned long long)root_objectid
,
3331 (unsigned long long)owner_objectid
,
3332 (unsigned long long)owner_offset
);
3335 leaf
= path
->nodes
[0];
3336 item_size
= btrfs_item_size_nr(leaf
, extent_slot
);
3337 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
3338 if (item_size
< sizeof(*ei
)) {
3339 BUG_ON(found_extent
|| extent_slot
!= path
->slots
[0]);
3340 ret
= convert_extent_item_v0(trans
, extent_root
, path
,
3344 btrfs_release_path(extent_root
, path
);
3345 path
->leave_spinning
= 1;
3347 key
.objectid
= bytenr
;
3348 key
.type
= BTRFS_EXTENT_ITEM_KEY
;
3349 key
.offset
= num_bytes
;
3351 ret
= btrfs_search_slot(trans
, extent_root
, &key
, path
,
3354 printk(KERN_ERR
"umm, got %d back from search"
3355 ", was looking for %llu\n", ret
,
3356 (unsigned long long)bytenr
);
3357 btrfs_print_leaf(extent_root
, path
->nodes
[0]);
3360 extent_slot
= path
->slots
[0];
3361 leaf
= path
->nodes
[0];
3362 item_size
= btrfs_item_size_nr(leaf
, extent_slot
);
3365 BUG_ON(item_size
< sizeof(*ei
));
3366 ei
= btrfs_item_ptr(leaf
, extent_slot
,
3367 struct btrfs_extent_item
);
3368 if (owner_objectid
< BTRFS_FIRST_FREE_OBJECTID
) {
3369 struct btrfs_tree_block_info
*bi
;
3370 BUG_ON(item_size
< sizeof(*ei
) + sizeof(*bi
));
3371 bi
= (struct btrfs_tree_block_info
*)(ei
+ 1);
3372 WARN_ON(owner_objectid
!= btrfs_tree_block_level(leaf
, bi
));
3375 refs
= btrfs_extent_refs(leaf
, ei
);
3376 BUG_ON(refs
< refs_to_drop
);
3377 refs
-= refs_to_drop
;
3381 __run_delayed_extent_op(extent_op
, leaf
, ei
);
3383 * In the case of inline back ref, reference count will
3384 * be updated by remove_extent_backref
3387 BUG_ON(!found_extent
);
3389 btrfs_set_extent_refs(leaf
, ei
, refs
);
3390 btrfs_mark_buffer_dirty(leaf
);
3393 ret
= remove_extent_backref(trans
, extent_root
, path
,
3400 struct extent_buffer
*must_clean
= NULL
;
3403 BUG_ON(is_data
&& refs_to_drop
!=
3404 extent_data_ref_count(root
, path
, iref
));
3406 BUG_ON(path
->slots
[0] != extent_slot
);
3408 BUG_ON(path
->slots
[0] != extent_slot
+ 1);
3409 path
->slots
[0] = extent_slot
;
3414 ret
= pin_down_bytes(trans
, root
, path
, bytenr
,
3415 num_bytes
, is_data
, &must_clean
);
3420 * it is going to be very rare for someone to be waiting
3421 * on the block we're freeing. del_items might need to
3422 * schedule, so rather than get fancy, just force it
3426 btrfs_set_lock_blocking(must_clean
);
3428 ret
= btrfs_del_items(trans
, extent_root
, path
, path
->slots
[0],
3431 btrfs_release_path(extent_root
, path
);
3434 clean_tree_block(NULL
, root
, must_clean
);
3435 btrfs_tree_unlock(must_clean
);
3436 free_extent_buffer(must_clean
);
3440 ret
= btrfs_del_csums(trans
, root
, bytenr
, num_bytes
);
3443 invalidate_mapping_pages(info
->btree_inode
->i_mapping
,
3444 bytenr
>> PAGE_CACHE_SHIFT
,
3445 (bytenr
+ num_bytes
- 1) >> PAGE_CACHE_SHIFT
);
3448 ret
= update_block_group(trans
, root
, bytenr
, num_bytes
, 0,
3452 btrfs_free_path(path
);
3457 * when we free an extent, it is possible (and likely) that we free the last
3458 * delayed ref for that extent as well. This searches the delayed ref tree for
3459 * a given extent, and if there are no other delayed refs to be processed, it
3460 * removes it from the tree.
3462 static noinline
int check_ref_cleanup(struct btrfs_trans_handle
*trans
,
3463 struct btrfs_root
*root
, u64 bytenr
)
3465 struct btrfs_delayed_ref_head
*head
;
3466 struct btrfs_delayed_ref_root
*delayed_refs
;
3467 struct btrfs_delayed_ref_node
*ref
;
3468 struct rb_node
*node
;
3471 delayed_refs
= &trans
->transaction
->delayed_refs
;
3472 spin_lock(&delayed_refs
->lock
);
3473 head
= btrfs_find_delayed_ref_head(trans
, bytenr
);
3477 node
= rb_prev(&head
->node
.rb_node
);
3481 ref
= rb_entry(node
, struct btrfs_delayed_ref_node
, rb_node
);
3483 /* there are still entries for this ref, we can't drop it */
3484 if (ref
->bytenr
== bytenr
)
3487 if (head
->extent_op
) {
3488 if (!head
->must_insert_reserved
)
3490 kfree(head
->extent_op
);
3491 head
->extent_op
= NULL
;
3495 * waiting for the lock here would deadlock. If someone else has it
3496 * locked they are already in the process of dropping it anyway
3498 if (!mutex_trylock(&head
->mutex
))
3502 * at this point we have a head with no other entries. Go
3503 * ahead and process it.
3505 head
->node
.in_tree
= 0;
3506 rb_erase(&head
->node
.rb_node
, &delayed_refs
->root
);
3508 delayed_refs
->num_entries
--;
3511 * we don't take a ref on the node because we're removing it from the
3512 * tree, so we just steal the ref the tree was holding.
3514 delayed_refs
->num_heads
--;
3515 if (list_empty(&head
->cluster
))
3516 delayed_refs
->num_heads_ready
--;
3518 list_del_init(&head
->cluster
);
3519 spin_unlock(&delayed_refs
->lock
);
3521 ret
= run_one_delayed_ref(trans
, root
->fs_info
->tree_root
,
3522 &head
->node
, head
->extent_op
,
3523 head
->must_insert_reserved
);
3525 btrfs_put_delayed_ref(&head
->node
);
3528 spin_unlock(&delayed_refs
->lock
);
3532 int btrfs_free_extent(struct btrfs_trans_handle
*trans
,
3533 struct btrfs_root
*root
,
3534 u64 bytenr
, u64 num_bytes
, u64 parent
,
3535 u64 root_objectid
, u64 owner
, u64 offset
)
3540 * tree log blocks never actually go into the extent allocation
3541 * tree, just update pinning info and exit early.
3543 if (root_objectid
== BTRFS_TREE_LOG_OBJECTID
) {
3544 WARN_ON(owner
>= BTRFS_FIRST_FREE_OBJECTID
);
3545 /* unlocks the pinned mutex */
3546 btrfs_update_pinned_extents(root
, bytenr
, num_bytes
, 1);
3547 update_reserved_extents(root
, bytenr
, num_bytes
, 0);
3549 } else if (owner
< BTRFS_FIRST_FREE_OBJECTID
) {
3550 ret
= btrfs_add_delayed_tree_ref(trans
, bytenr
, num_bytes
,
3551 parent
, root_objectid
, (int)owner
,
3552 BTRFS_DROP_DELAYED_REF
, NULL
);
3554 ret
= check_ref_cleanup(trans
, root
, bytenr
);
3557 ret
= btrfs_add_delayed_data_ref(trans
, bytenr
, num_bytes
,
3558 parent
, root_objectid
, owner
,
3559 offset
, BTRFS_DROP_DELAYED_REF
, NULL
);
3565 static u64
stripe_align(struct btrfs_root
*root
, u64 val
)
3567 u64 mask
= ((u64
)root
->stripesize
- 1);
3568 u64 ret
= (val
+ mask
) & ~mask
;
3573 * when we wait for progress in the block group caching, its because
3574 * our allocation attempt failed at least once. So, we must sleep
3575 * and let some progress happen before we try again.
3577 * This function will sleep at least once waiting for new free space to
3578 * show up, and then it will check the block group free space numbers
3579 * for our min num_bytes. Another option is to have it go ahead
3580 * and look in the rbtree for a free extent of a given size, but this
3584 wait_block_group_cache_progress(struct btrfs_block_group_cache
*cache
,
3589 prepare_to_wait(&cache
->caching_q
, &wait
, TASK_UNINTERRUPTIBLE
);
3591 if (block_group_cache_done(cache
)) {
3592 finish_wait(&cache
->caching_q
, &wait
);
3596 finish_wait(&cache
->caching_q
, &wait
);
3598 wait_event(cache
->caching_q
, block_group_cache_done(cache
) ||
3599 (cache
->free_space
>= num_bytes
));
3603 enum btrfs_loop_type
{
3604 LOOP_CACHED_ONLY
= 0,
3605 LOOP_CACHING_NOWAIT
= 1,
3606 LOOP_CACHING_WAIT
= 2,
3607 LOOP_ALLOC_CHUNK
= 3,
3608 LOOP_NO_EMPTY_SIZE
= 4,
3612 * walks the btree of allocated extents and find a hole of a given size.
3613 * The key ins is changed to record the hole:
3614 * ins->objectid == block start
3615 * ins->flags = BTRFS_EXTENT_ITEM_KEY
3616 * ins->offset == number of blocks
3617 * Any available blocks before search_start are skipped.
3619 static noinline
int find_free_extent(struct btrfs_trans_handle
*trans
,
3620 struct btrfs_root
*orig_root
,
3621 u64 num_bytes
, u64 empty_size
,
3622 u64 search_start
, u64 search_end
,
3623 u64 hint_byte
, struct btrfs_key
*ins
,
3624 u64 exclude_start
, u64 exclude_nr
,
3628 struct btrfs_root
*root
= orig_root
->fs_info
->extent_root
;
3629 struct btrfs_free_cluster
*last_ptr
= NULL
;
3630 struct btrfs_block_group_cache
*block_group
= NULL
;
3631 int empty_cluster
= 2 * 1024 * 1024;
3632 int allowed_chunk_alloc
= 0;
3633 struct btrfs_space_info
*space_info
;
3634 int last_ptr_loop
= 0;
3636 bool found_uncached_bg
= false;
3638 WARN_ON(num_bytes
< root
->sectorsize
);
3639 btrfs_set_key_type(ins
, BTRFS_EXTENT_ITEM_KEY
);
3643 space_info
= __find_space_info(root
->fs_info
, data
);
3645 if (orig_root
->ref_cows
|| empty_size
)
3646 allowed_chunk_alloc
= 1;
3648 if (data
& BTRFS_BLOCK_GROUP_METADATA
) {
3649 last_ptr
= &root
->fs_info
->meta_alloc_cluster
;
3650 if (!btrfs_test_opt(root
, SSD
))
3651 empty_cluster
= 64 * 1024;
3654 if ((data
& BTRFS_BLOCK_GROUP_DATA
) && btrfs_test_opt(root
, SSD
)) {
3655 last_ptr
= &root
->fs_info
->data_alloc_cluster
;
3659 spin_lock(&last_ptr
->lock
);
3660 if (last_ptr
->block_group
)
3661 hint_byte
= last_ptr
->window_start
;
3662 spin_unlock(&last_ptr
->lock
);
3665 search_start
= max(search_start
, first_logical_byte(root
, 0));
3666 search_start
= max(search_start
, hint_byte
);
3671 if (search_start
== hint_byte
) {
3672 block_group
= btrfs_lookup_block_group(root
->fs_info
,
3675 * we don't want to use the block group if it doesn't match our
3676 * allocation bits, or if its not cached.
3678 if (block_group
&& block_group_bits(block_group
, data
) &&
3679 block_group_cache_done(block_group
)) {
3680 down_read(&space_info
->groups_sem
);
3681 if (list_empty(&block_group
->list
) ||
3684 * someone is removing this block group,
3685 * we can't jump into the have_block_group
3686 * target because our list pointers are not
3689 btrfs_put_block_group(block_group
);
3690 up_read(&space_info
->groups_sem
);
3692 goto have_block_group
;
3693 } else if (block_group
) {
3694 btrfs_put_block_group(block_group
);
3699 down_read(&space_info
->groups_sem
);
3700 list_for_each_entry(block_group
, &space_info
->block_groups
, list
) {
3704 atomic_inc(&block_group
->count
);
3705 search_start
= block_group
->key
.objectid
;
3708 if (unlikely(block_group
->cached
== BTRFS_CACHE_NO
)) {
3710 * we want to start caching kthreads, but not too many
3711 * right off the bat so we don't overwhelm the system,
3712 * so only start them if there are less than 2 and we're
3713 * in the initial allocation phase.
3715 if (loop
> LOOP_CACHING_NOWAIT
||
3716 atomic_read(&space_info
->caching_threads
) < 2) {
3717 ret
= cache_block_group(block_group
);
3722 cached
= block_group_cache_done(block_group
);
3723 if (unlikely(!cached
)) {
3724 found_uncached_bg
= true;
3726 /* if we only want cached bgs, loop */
3727 if (loop
== LOOP_CACHED_ONLY
)
3731 if (unlikely(block_group
->ro
))
3736 * the refill lock keeps out other
3737 * people trying to start a new cluster
3739 spin_lock(&last_ptr
->refill_lock
);
3740 if (last_ptr
->block_group
&&
3741 (last_ptr
->block_group
->ro
||
3742 !block_group_bits(last_ptr
->block_group
, data
))) {
3744 goto refill_cluster
;
3747 offset
= btrfs_alloc_from_cluster(block_group
, last_ptr
,
3748 num_bytes
, search_start
);
3750 /* we have a block, we're done */
3751 spin_unlock(&last_ptr
->refill_lock
);
3755 spin_lock(&last_ptr
->lock
);
3757 * whoops, this cluster doesn't actually point to
3758 * this block group. Get a ref on the block
3759 * group is does point to and try again
3761 if (!last_ptr_loop
&& last_ptr
->block_group
&&
3762 last_ptr
->block_group
!= block_group
) {
3764 btrfs_put_block_group(block_group
);
3765 block_group
= last_ptr
->block_group
;
3766 atomic_inc(&block_group
->count
);
3767 spin_unlock(&last_ptr
->lock
);
3768 spin_unlock(&last_ptr
->refill_lock
);
3771 search_start
= block_group
->key
.objectid
;
3773 * we know this block group is properly
3774 * in the list because
3775 * btrfs_remove_block_group, drops the
3776 * cluster before it removes the block
3777 * group from the list
3779 goto have_block_group
;
3781 spin_unlock(&last_ptr
->lock
);
3784 * this cluster didn't work out, free it and
3787 btrfs_return_cluster_to_free_space(NULL
, last_ptr
);
3791 /* allocate a cluster in this block group */
3792 ret
= btrfs_find_space_cluster(trans
, root
,
3793 block_group
, last_ptr
,
3795 empty_cluster
+ empty_size
);
3798 * now pull our allocation out of this
3801 offset
= btrfs_alloc_from_cluster(block_group
,
3802 last_ptr
, num_bytes
,
3805 /* we found one, proceed */
3806 spin_unlock(&last_ptr
->refill_lock
);
3809 } else if (!cached
&& loop
> LOOP_CACHING_NOWAIT
) {
3810 spin_unlock(&last_ptr
->refill_lock
);
3812 wait_block_group_cache_progress(block_group
,
3813 num_bytes
+ empty_cluster
+ empty_size
);
3814 goto have_block_group
;
3818 * at this point we either didn't find a cluster
3819 * or we weren't able to allocate a block from our
3820 * cluster. Free the cluster we've been trying
3821 * to use, and go to the next block group
3823 if (loop
< LOOP_NO_EMPTY_SIZE
) {
3824 btrfs_return_cluster_to_free_space(NULL
,
3826 spin_unlock(&last_ptr
->refill_lock
);
3829 spin_unlock(&last_ptr
->refill_lock
);
3832 offset
= btrfs_find_space_for_alloc(block_group
, search_start
,
3833 num_bytes
, empty_size
);
3834 if (!offset
&& (cached
|| (!cached
&&
3835 loop
== LOOP_CACHING_NOWAIT
))) {
3837 } else if (!offset
&& (!cached
&&
3838 loop
> LOOP_CACHING_NOWAIT
)) {
3839 wait_block_group_cache_progress(block_group
,
3840 num_bytes
+ empty_size
);
3841 goto have_block_group
;
3844 search_start
= stripe_align(root
, offset
);
3845 /* move on to the next group */
3846 if (search_start
+ num_bytes
>= search_end
) {
3847 btrfs_add_free_space(block_group
, offset
, num_bytes
);
3851 /* move on to the next group */
3852 if (search_start
+ num_bytes
>
3853 block_group
->key
.objectid
+ block_group
->key
.offset
) {
3854 btrfs_add_free_space(block_group
, offset
, num_bytes
);
3858 if (exclude_nr
> 0 &&
3859 (search_start
+ num_bytes
> exclude_start
&&
3860 search_start
< exclude_start
+ exclude_nr
)) {
3861 search_start
= exclude_start
+ exclude_nr
;
3863 btrfs_add_free_space(block_group
, offset
, num_bytes
);
3865 * if search_start is still in this block group
3866 * then we just re-search this block group
3868 if (search_start
>= block_group
->key
.objectid
&&
3869 search_start
< (block_group
->key
.objectid
+
3870 block_group
->key
.offset
))
3871 goto have_block_group
;
3875 ins
->objectid
= search_start
;
3876 ins
->offset
= num_bytes
;
3878 if (offset
< search_start
)
3879 btrfs_add_free_space(block_group
, offset
,
3880 search_start
- offset
);
3881 BUG_ON(offset
> search_start
);
3883 /* we are all good, lets return */
3886 btrfs_put_block_group(block_group
);
3888 up_read(&space_info
->groups_sem
);
3890 /* LOOP_CACHED_ONLY, only search fully cached block groups
3891 * LOOP_CACHING_NOWAIT, search partially cached block groups, but
3892 * dont wait foR them to finish caching
3893 * LOOP_CACHING_WAIT, search everything, and wait if our bg is caching
3894 * LOOP_ALLOC_CHUNK, force a chunk allocation and try again
3895 * LOOP_NO_EMPTY_SIZE, set empty_size and empty_cluster to 0 and try
3898 if (!ins
->objectid
&& loop
< LOOP_NO_EMPTY_SIZE
&&
3899 (found_uncached_bg
|| empty_size
|| empty_cluster
||
3900 allowed_chunk_alloc
)) {
3901 if (found_uncached_bg
) {
3902 found_uncached_bg
= false;
3903 if (loop
< LOOP_CACHING_WAIT
) {
3909 if (loop
== LOOP_ALLOC_CHUNK
) {
3914 if (allowed_chunk_alloc
) {
3915 ret
= do_chunk_alloc(trans
, root
, num_bytes
+
3916 2 * 1024 * 1024, data
, 1);
3917 allowed_chunk_alloc
= 0;
3919 space_info
->force_alloc
= 1;
3922 if (loop
< LOOP_NO_EMPTY_SIZE
) {
3927 } else if (!ins
->objectid
) {
3931 /* we found what we needed */
3932 if (ins
->objectid
) {
3933 if (!(data
& BTRFS_BLOCK_GROUP_DATA
))
3934 trans
->block_group
= block_group
->key
.objectid
;
3936 btrfs_put_block_group(block_group
);
3943 static void dump_space_info(struct btrfs_space_info
*info
, u64 bytes
)
3945 struct btrfs_block_group_cache
*cache
;
3947 printk(KERN_INFO
"space_info has %llu free, is %sfull\n",
3948 (unsigned long long)(info
->total_bytes
- info
->bytes_used
-
3949 info
->bytes_pinned
- info
->bytes_reserved
),
3950 (info
->full
) ? "" : "not ");
3951 printk(KERN_INFO
"space_info total=%llu, pinned=%llu, delalloc=%llu,"
3952 " may_use=%llu, used=%llu\n",
3953 (unsigned long long)info
->total_bytes
,
3954 (unsigned long long)info
->bytes_pinned
,
3955 (unsigned long long)info
->bytes_delalloc
,
3956 (unsigned long long)info
->bytes_may_use
,
3957 (unsigned long long)info
->bytes_used
);
3959 down_read(&info
->groups_sem
);
3960 list_for_each_entry(cache
, &info
->block_groups
, list
) {
3961 spin_lock(&cache
->lock
);
3962 printk(KERN_INFO
"block group %llu has %llu bytes, %llu used "
3963 "%llu pinned %llu reserved\n",
3964 (unsigned long long)cache
->key
.objectid
,
3965 (unsigned long long)cache
->key
.offset
,
3966 (unsigned long long)btrfs_block_group_used(&cache
->item
),
3967 (unsigned long long)cache
->pinned
,
3968 (unsigned long long)cache
->reserved
);
3969 btrfs_dump_free_space(cache
, bytes
);
3970 spin_unlock(&cache
->lock
);
3972 up_read(&info
->groups_sem
);
3975 static int __btrfs_reserve_extent(struct btrfs_trans_handle
*trans
,
3976 struct btrfs_root
*root
,
3977 u64 num_bytes
, u64 min_alloc_size
,
3978 u64 empty_size
, u64 hint_byte
,
3979 u64 search_end
, struct btrfs_key
*ins
,
3983 u64 search_start
= 0;
3984 struct btrfs_fs_info
*info
= root
->fs_info
;
3986 data
= btrfs_get_alloc_profile(root
, data
);
3989 * the only place that sets empty_size is btrfs_realloc_node, which
3990 * is not called recursively on allocations
3992 if (empty_size
|| root
->ref_cows
) {
3993 if (!(data
& BTRFS_BLOCK_GROUP_METADATA
)) {
3994 ret
= do_chunk_alloc(trans
, root
->fs_info
->extent_root
,
3996 BTRFS_BLOCK_GROUP_METADATA
|
3997 (info
->metadata_alloc_profile
&
3998 info
->avail_metadata_alloc_bits
), 0);
4000 ret
= do_chunk_alloc(trans
, root
->fs_info
->extent_root
,
4001 num_bytes
+ 2 * 1024 * 1024, data
, 0);
4004 WARN_ON(num_bytes
< root
->sectorsize
);
4005 ret
= find_free_extent(trans
, root
, num_bytes
, empty_size
,
4006 search_start
, search_end
, hint_byte
, ins
,
4007 trans
->alloc_exclude_start
,
4008 trans
->alloc_exclude_nr
, data
);
4010 if (ret
== -ENOSPC
&& num_bytes
> min_alloc_size
) {
4011 num_bytes
= num_bytes
>> 1;
4012 num_bytes
= num_bytes
& ~(root
->sectorsize
- 1);
4013 num_bytes
= max(num_bytes
, min_alloc_size
);
4014 do_chunk_alloc(trans
, root
->fs_info
->extent_root
,
4015 num_bytes
, data
, 1);
4018 if (ret
== -ENOSPC
) {
4019 struct btrfs_space_info
*sinfo
;
4021 sinfo
= __find_space_info(root
->fs_info
, data
);
4022 printk(KERN_ERR
"btrfs allocation failed flags %llu, "
4023 "wanted %llu\n", (unsigned long long)data
,
4024 (unsigned long long)num_bytes
);
4025 dump_space_info(sinfo
, num_bytes
);
4031 int btrfs_free_reserved_extent(struct btrfs_root
*root
, u64 start
, u64 len
)
4033 struct btrfs_block_group_cache
*cache
;
4036 cache
= btrfs_lookup_block_group(root
->fs_info
, start
);
4038 printk(KERN_ERR
"Unable to find block group for %llu\n",
4039 (unsigned long long)start
);
4043 ret
= btrfs_discard_extent(root
, start
, len
);
4045 btrfs_add_free_space(cache
, start
, len
);
4046 btrfs_put_block_group(cache
);
4047 update_reserved_extents(root
, start
, len
, 0);
4052 int btrfs_reserve_extent(struct btrfs_trans_handle
*trans
,
4053 struct btrfs_root
*root
,
4054 u64 num_bytes
, u64 min_alloc_size
,
4055 u64 empty_size
, u64 hint_byte
,
4056 u64 search_end
, struct btrfs_key
*ins
,
4060 ret
= __btrfs_reserve_extent(trans
, root
, num_bytes
, min_alloc_size
,
4061 empty_size
, hint_byte
, search_end
, ins
,
4064 update_reserved_extents(root
, ins
->objectid
, ins
->offset
, 1);
4069 static int alloc_reserved_file_extent(struct btrfs_trans_handle
*trans
,
4070 struct btrfs_root
*root
,
4071 u64 parent
, u64 root_objectid
,
4072 u64 flags
, u64 owner
, u64 offset
,
4073 struct btrfs_key
*ins
, int ref_mod
)
4076 struct btrfs_fs_info
*fs_info
= root
->fs_info
;
4077 struct btrfs_extent_item
*extent_item
;
4078 struct btrfs_extent_inline_ref
*iref
;
4079 struct btrfs_path
*path
;
4080 struct extent_buffer
*leaf
;
4085 type
= BTRFS_SHARED_DATA_REF_KEY
;
4087 type
= BTRFS_EXTENT_DATA_REF_KEY
;
4089 size
= sizeof(*extent_item
) + btrfs_extent_inline_ref_size(type
);
4091 path
= btrfs_alloc_path();
4094 path
->leave_spinning
= 1;
4095 ret
= btrfs_insert_empty_item(trans
, fs_info
->extent_root
, path
,
4099 leaf
= path
->nodes
[0];
4100 extent_item
= btrfs_item_ptr(leaf
, path
->slots
[0],
4101 struct btrfs_extent_item
);
4102 btrfs_set_extent_refs(leaf
, extent_item
, ref_mod
);
4103 btrfs_set_extent_generation(leaf
, extent_item
, trans
->transid
);
4104 btrfs_set_extent_flags(leaf
, extent_item
,
4105 flags
| BTRFS_EXTENT_FLAG_DATA
);
4107 iref
= (struct btrfs_extent_inline_ref
*)(extent_item
+ 1);
4108 btrfs_set_extent_inline_ref_type(leaf
, iref
, type
);
4110 struct btrfs_shared_data_ref
*ref
;
4111 ref
= (struct btrfs_shared_data_ref
*)(iref
+ 1);
4112 btrfs_set_extent_inline_ref_offset(leaf
, iref
, parent
);
4113 btrfs_set_shared_data_ref_count(leaf
, ref
, ref_mod
);
4115 struct btrfs_extent_data_ref
*ref
;
4116 ref
= (struct btrfs_extent_data_ref
*)(&iref
->offset
);
4117 btrfs_set_extent_data_ref_root(leaf
, ref
, root_objectid
);
4118 btrfs_set_extent_data_ref_objectid(leaf
, ref
, owner
);
4119 btrfs_set_extent_data_ref_offset(leaf
, ref
, offset
);
4120 btrfs_set_extent_data_ref_count(leaf
, ref
, ref_mod
);
4123 btrfs_mark_buffer_dirty(path
->nodes
[0]);
4124 btrfs_free_path(path
);
4126 ret
= update_block_group(trans
, root
, ins
->objectid
, ins
->offset
,
4129 printk(KERN_ERR
"btrfs update block group failed for %llu "
4130 "%llu\n", (unsigned long long)ins
->objectid
,
4131 (unsigned long long)ins
->offset
);
4137 static int alloc_reserved_tree_block(struct btrfs_trans_handle
*trans
,
4138 struct btrfs_root
*root
,
4139 u64 parent
, u64 root_objectid
,
4140 u64 flags
, struct btrfs_disk_key
*key
,
4141 int level
, struct btrfs_key
*ins
)
4144 struct btrfs_fs_info
*fs_info
= root
->fs_info
;
4145 struct btrfs_extent_item
*extent_item
;
4146 struct btrfs_tree_block_info
*block_info
;
4147 struct btrfs_extent_inline_ref
*iref
;
4148 struct btrfs_path
*path
;
4149 struct extent_buffer
*leaf
;
4150 u32 size
= sizeof(*extent_item
) + sizeof(*block_info
) + sizeof(*iref
);
4152 path
= btrfs_alloc_path();
4155 path
->leave_spinning
= 1;
4156 ret
= btrfs_insert_empty_item(trans
, fs_info
->extent_root
, path
,
4160 leaf
= path
->nodes
[0];
4161 extent_item
= btrfs_item_ptr(leaf
, path
->slots
[0],
4162 struct btrfs_extent_item
);
4163 btrfs_set_extent_refs(leaf
, extent_item
, 1);
4164 btrfs_set_extent_generation(leaf
, extent_item
, trans
->transid
);
4165 btrfs_set_extent_flags(leaf
, extent_item
,
4166 flags
| BTRFS_EXTENT_FLAG_TREE_BLOCK
);
4167 block_info
= (struct btrfs_tree_block_info
*)(extent_item
+ 1);
4169 btrfs_set_tree_block_key(leaf
, block_info
, key
);
4170 btrfs_set_tree_block_level(leaf
, block_info
, level
);
4172 iref
= (struct btrfs_extent_inline_ref
*)(block_info
+ 1);
4174 BUG_ON(!(flags
& BTRFS_BLOCK_FLAG_FULL_BACKREF
));
4175 btrfs_set_extent_inline_ref_type(leaf
, iref
,
4176 BTRFS_SHARED_BLOCK_REF_KEY
);
4177 btrfs_set_extent_inline_ref_offset(leaf
, iref
, parent
);
4179 btrfs_set_extent_inline_ref_type(leaf
, iref
,
4180 BTRFS_TREE_BLOCK_REF_KEY
);
4181 btrfs_set_extent_inline_ref_offset(leaf
, iref
, root_objectid
);
4184 btrfs_mark_buffer_dirty(leaf
);
4185 btrfs_free_path(path
);
4187 ret
= update_block_group(trans
, root
, ins
->objectid
, ins
->offset
,
4190 printk(KERN_ERR
"btrfs update block group failed for %llu "
4191 "%llu\n", (unsigned long long)ins
->objectid
,
4192 (unsigned long long)ins
->offset
);
4198 int btrfs_alloc_reserved_file_extent(struct btrfs_trans_handle
*trans
,
4199 struct btrfs_root
*root
,
4200 u64 root_objectid
, u64 owner
,
4201 u64 offset
, struct btrfs_key
*ins
)
4205 BUG_ON(root_objectid
== BTRFS_TREE_LOG_OBJECTID
);
4207 ret
= btrfs_add_delayed_data_ref(trans
, ins
->objectid
, ins
->offset
,
4208 0, root_objectid
, owner
, offset
,
4209 BTRFS_ADD_DELAYED_EXTENT
, NULL
);
4214 * this is used by the tree logging recovery code. It records that
4215 * an extent has been allocated and makes sure to clear the free
4216 * space cache bits as well
4218 int btrfs_alloc_logged_file_extent(struct btrfs_trans_handle
*trans
,
4219 struct btrfs_root
*root
,
4220 u64 root_objectid
, u64 owner
, u64 offset
,
4221 struct btrfs_key
*ins
)
4224 struct btrfs_block_group_cache
*block_group
;
4226 block_group
= btrfs_lookup_block_group(root
->fs_info
, ins
->objectid
);
4227 cache_block_group(block_group
);
4228 wait_event(block_group
->caching_q
,
4229 block_group_cache_done(block_group
));
4231 ret
= btrfs_remove_free_space(block_group
, ins
->objectid
,
4234 btrfs_put_block_group(block_group
);
4235 ret
= alloc_reserved_file_extent(trans
, root
, 0, root_objectid
,
4236 0, owner
, offset
, ins
, 1);
4241 * finds a free extent and does all the dirty work required for allocation
4242 * returns the key for the extent through ins, and a tree buffer for
4243 * the first block of the extent through buf.
4245 * returns 0 if everything worked, non-zero otherwise.
4247 static int alloc_tree_block(struct btrfs_trans_handle
*trans
,
4248 struct btrfs_root
*root
,
4249 u64 num_bytes
, u64 parent
, u64 root_objectid
,
4250 struct btrfs_disk_key
*key
, int level
,
4251 u64 empty_size
, u64 hint_byte
, u64 search_end
,
4252 struct btrfs_key
*ins
)
4257 ret
= __btrfs_reserve_extent(trans
, root
, num_bytes
, num_bytes
,
4258 empty_size
, hint_byte
, search_end
,
4263 if (root_objectid
== BTRFS_TREE_RELOC_OBJECTID
) {
4265 parent
= ins
->objectid
;
4266 flags
|= BTRFS_BLOCK_FLAG_FULL_BACKREF
;
4270 update_reserved_extents(root
, ins
->objectid
, ins
->offset
, 1);
4271 if (root_objectid
!= BTRFS_TREE_LOG_OBJECTID
) {
4272 struct btrfs_delayed_extent_op
*extent_op
;
4273 extent_op
= kmalloc(sizeof(*extent_op
), GFP_NOFS
);
4276 memcpy(&extent_op
->key
, key
, sizeof(extent_op
->key
));
4278 memset(&extent_op
->key
, 0, sizeof(extent_op
->key
));
4279 extent_op
->flags_to_set
= flags
;
4280 extent_op
->update_key
= 1;
4281 extent_op
->update_flags
= 1;
4282 extent_op
->is_data
= 0;
4284 ret
= btrfs_add_delayed_tree_ref(trans
, ins
->objectid
,
4285 ins
->offset
, parent
, root_objectid
,
4286 level
, BTRFS_ADD_DELAYED_EXTENT
,
4293 struct extent_buffer
*btrfs_init_new_buffer(struct btrfs_trans_handle
*trans
,
4294 struct btrfs_root
*root
,
4295 u64 bytenr
, u32 blocksize
,
4298 struct extent_buffer
*buf
;
4300 buf
= btrfs_find_create_tree_block(root
, bytenr
, blocksize
);
4302 return ERR_PTR(-ENOMEM
);
4303 btrfs_set_header_generation(buf
, trans
->transid
);
4304 btrfs_set_buffer_lockdep_class(buf
, level
);
4305 btrfs_tree_lock(buf
);
4306 clean_tree_block(trans
, root
, buf
);
4308 btrfs_set_lock_blocking(buf
);
4309 btrfs_set_buffer_uptodate(buf
);
4311 if (root
->root_key
.objectid
== BTRFS_TREE_LOG_OBJECTID
) {
4312 set_extent_dirty(&root
->dirty_log_pages
, buf
->start
,
4313 buf
->start
+ buf
->len
- 1, GFP_NOFS
);
4315 set_extent_dirty(&trans
->transaction
->dirty_pages
, buf
->start
,
4316 buf
->start
+ buf
->len
- 1, GFP_NOFS
);
4318 trans
->blocks_used
++;
4319 /* this returns a buffer locked for blocking */
4324 * helper function to allocate a block for a given tree
4325 * returns the tree buffer or NULL.
4327 struct extent_buffer
*btrfs_alloc_free_block(struct btrfs_trans_handle
*trans
,
4328 struct btrfs_root
*root
, u32 blocksize
,
4329 u64 parent
, u64 root_objectid
,
4330 struct btrfs_disk_key
*key
, int level
,
4331 u64 hint
, u64 empty_size
)
4333 struct btrfs_key ins
;
4335 struct extent_buffer
*buf
;
4337 ret
= alloc_tree_block(trans
, root
, blocksize
, parent
, root_objectid
,
4338 key
, level
, empty_size
, hint
, (u64
)-1, &ins
);
4341 return ERR_PTR(ret
);
4344 buf
= btrfs_init_new_buffer(trans
, root
, ins
.objectid
,
4350 int btrfs_drop_leaf_ref(struct btrfs_trans_handle
*trans
,
4351 struct btrfs_root
*root
, struct extent_buffer
*leaf
)
4355 struct btrfs_key key
;
4356 struct btrfs_file_extent_item
*fi
;
4361 BUG_ON(!btrfs_is_leaf(leaf
));
4362 nritems
= btrfs_header_nritems(leaf
);
4364 for (i
= 0; i
< nritems
; i
++) {
4366 btrfs_item_key_to_cpu(leaf
, &key
, i
);
4368 /* only extents have references, skip everything else */
4369 if (btrfs_key_type(&key
) != BTRFS_EXTENT_DATA_KEY
)
4372 fi
= btrfs_item_ptr(leaf
, i
, struct btrfs_file_extent_item
);
4374 /* inline extents live in the btree, they don't have refs */
4375 if (btrfs_file_extent_type(leaf
, fi
) ==
4376 BTRFS_FILE_EXTENT_INLINE
)
4379 disk_bytenr
= btrfs_file_extent_disk_bytenr(leaf
, fi
);
4381 /* holes don't have refs */
4382 if (disk_bytenr
== 0)
4385 num_bytes
= btrfs_file_extent_disk_num_bytes(leaf
, fi
);
4386 ret
= btrfs_free_extent(trans
, root
, disk_bytenr
, num_bytes
,
4387 leaf
->start
, 0, key
.objectid
, 0);
4393 static noinline
int cache_drop_leaf_ref(struct btrfs_trans_handle
*trans
,
4394 struct btrfs_root
*root
,
4395 struct btrfs_leaf_ref
*ref
)
4399 struct btrfs_extent_info
*info
;
4400 struct refsort
*sorted
;
4402 if (ref
->nritems
== 0)
4405 sorted
= kmalloc(sizeof(*sorted
) * ref
->nritems
, GFP_NOFS
);
4406 for (i
= 0; i
< ref
->nritems
; i
++) {
4407 sorted
[i
].bytenr
= ref
->extents
[i
].bytenr
;
4410 sort(sorted
, ref
->nritems
, sizeof(struct refsort
), refsort_cmp
, NULL
);
4413 * the items in the ref were sorted when the ref was inserted
4414 * into the ref cache, so this is already in order
4416 for (i
= 0; i
< ref
->nritems
; i
++) {
4417 info
= ref
->extents
+ sorted
[i
].slot
;
4418 ret
= btrfs_free_extent(trans
, root
, info
->bytenr
,
4419 info
->num_bytes
, ref
->bytenr
,
4420 ref
->owner
, ref
->generation
,
4423 atomic_inc(&root
->fs_info
->throttle_gen
);
4424 wake_up(&root
->fs_info
->transaction_throttle
);
4436 static int drop_snap_lookup_refcount(struct btrfs_trans_handle
*trans
,
4437 struct btrfs_root
*root
, u64 start
,
4442 ret
= btrfs_lookup_extent_refs(trans
, root
, start
, len
, refs
);
4445 #if 0 /* some debugging code in case we see problems here */
4446 /* if the refs count is one, it won't get increased again. But
4447 * if the ref count is > 1, someone may be decreasing it at
4448 * the same time we are.
4451 struct extent_buffer
*eb
= NULL
;
4452 eb
= btrfs_find_create_tree_block(root
, start
, len
);
4454 btrfs_tree_lock(eb
);
4456 mutex_lock(&root
->fs_info
->alloc_mutex
);
4457 ret
= lookup_extent_ref(NULL
, root
, start
, len
, refs
);
4459 mutex_unlock(&root
->fs_info
->alloc_mutex
);
4462 btrfs_tree_unlock(eb
);
4463 free_extent_buffer(eb
);
4466 printk(KERN_ERR
"btrfs block %llu went down to one "
4467 "during drop_snap\n", (unsigned long long)start
);
4479 * this is used while deleting old snapshots, and it drops the refs
4480 * on a whole subtree starting from a level 1 node.
4482 * The idea is to sort all the leaf pointers, and then drop the
4483 * ref on all the leaves in order. Most of the time the leaves
4484 * will have ref cache entries, so no leaf IOs will be required to
4485 * find the extents they have references on.
4487 * For each leaf, any references it has are also dropped in order
4489 * This ends up dropping the references in something close to optimal
4490 * order for reading and modifying the extent allocation tree.
4492 static noinline
int drop_level_one_refs(struct btrfs_trans_handle
*trans
,
4493 struct btrfs_root
*root
,
4494 struct btrfs_path
*path
)
4499 struct extent_buffer
*eb
= path
->nodes
[1];
4500 struct extent_buffer
*leaf
;
4501 struct btrfs_leaf_ref
*ref
;
4502 struct refsort
*sorted
= NULL
;
4503 int nritems
= btrfs_header_nritems(eb
);
4507 int slot
= path
->slots
[1];
4508 u32 blocksize
= btrfs_level_size(root
, 0);
4514 root_owner
= btrfs_header_owner(eb
);
4515 root_gen
= btrfs_header_generation(eb
);
4516 sorted
= kmalloc(sizeof(*sorted
) * nritems
, GFP_NOFS
);
4519 * step one, sort all the leaf pointers so we don't scribble
4520 * randomly into the extent allocation tree
4522 for (i
= slot
; i
< nritems
; i
++) {
4523 sorted
[refi
].bytenr
= btrfs_node_blockptr(eb
, i
);
4524 sorted
[refi
].slot
= i
;
4529 * nritems won't be zero, but if we're picking up drop_snapshot
4530 * after a crash, slot might be > 0, so double check things
4536 sort(sorted
, refi
, sizeof(struct refsort
), refsort_cmp
, NULL
);
4539 * the first loop frees everything the leaves point to
4541 for (i
= 0; i
< refi
; i
++) {
4544 bytenr
= sorted
[i
].bytenr
;
4547 * check the reference count on this leaf. If it is > 1
4548 * we just decrement it below and don't update any
4549 * of the refs the leaf points to.
4551 ret
= drop_snap_lookup_refcount(trans
, root
, bytenr
,
4557 ptr_gen
= btrfs_node_ptr_generation(eb
, sorted
[i
].slot
);
4560 * the leaf only had one reference, which means the
4561 * only thing pointing to this leaf is the snapshot
4562 * we're deleting. It isn't possible for the reference
4563 * count to increase again later
4565 * The reference cache is checked for the leaf,
4566 * and if found we'll be able to drop any refs held by
4567 * the leaf without needing to read it in.
4569 ref
= btrfs_lookup_leaf_ref(root
, bytenr
);
4570 if (ref
&& ref
->generation
!= ptr_gen
) {
4571 btrfs_free_leaf_ref(root
, ref
);
4575 ret
= cache_drop_leaf_ref(trans
, root
, ref
);
4577 btrfs_remove_leaf_ref(root
, ref
);
4578 btrfs_free_leaf_ref(root
, ref
);
4581 * the leaf wasn't in the reference cache, so
4582 * we have to read it.
4584 leaf
= read_tree_block(root
, bytenr
, blocksize
,
4586 ret
= btrfs_drop_leaf_ref(trans
, root
, leaf
);
4588 free_extent_buffer(leaf
);
4590 atomic_inc(&root
->fs_info
->throttle_gen
);
4591 wake_up(&root
->fs_info
->transaction_throttle
);
4596 * run through the loop again to free the refs on the leaves.
4597 * This is faster than doing it in the loop above because
4598 * the leaves are likely to be clustered together. We end up
4599 * working in nice chunks on the extent allocation tree.
4601 for (i
= 0; i
< refi
; i
++) {
4602 bytenr
= sorted
[i
].bytenr
;
4603 ret
= btrfs_free_extent(trans
, root
, bytenr
,
4604 blocksize
, eb
->start
,
4605 root_owner
, root_gen
, 0, 1);
4608 atomic_inc(&root
->fs_info
->throttle_gen
);
4609 wake_up(&root
->fs_info
->transaction_throttle
);
4616 * update the path to show we've processed the entire level 1
4617 * node. This will get saved into the root's drop_snapshot_progress
4618 * field so these drops are not repeated again if this transaction
4621 path
->slots
[1] = nritems
;
4626 * helper function for drop_snapshot, this walks down the tree dropping ref
4627 * counts as it goes.
4629 static noinline
int walk_down_tree(struct btrfs_trans_handle
*trans
,
4630 struct btrfs_root
*root
,
4631 struct btrfs_path
*path
, int *level
)
4637 struct extent_buffer
*next
;
4638 struct extent_buffer
*cur
;
4639 struct extent_buffer
*parent
;
4644 WARN_ON(*level
< 0);
4645 WARN_ON(*level
>= BTRFS_MAX_LEVEL
);
4646 ret
= drop_snap_lookup_refcount(trans
, root
, path
->nodes
[*level
]->start
,
4647 path
->nodes
[*level
]->len
, &refs
);
4653 * walk down to the last node level and free all the leaves
4655 while (*level
>= 0) {
4656 WARN_ON(*level
< 0);
4657 WARN_ON(*level
>= BTRFS_MAX_LEVEL
);
4658 cur
= path
->nodes
[*level
];
4660 if (btrfs_header_level(cur
) != *level
)
4663 if (path
->slots
[*level
] >=
4664 btrfs_header_nritems(cur
))
4667 /* the new code goes down to level 1 and does all the
4668 * leaves pointed to that node in bulk. So, this check
4669 * for level 0 will always be false.
4671 * But, the disk format allows the drop_snapshot_progress
4672 * field in the root to leave things in a state where
4673 * a leaf will need cleaning up here. If someone crashes
4674 * with the old code and then boots with the new code,
4675 * we might find a leaf here.
4678 ret
= btrfs_drop_leaf_ref(trans
, root
, cur
);
4684 * once we get to level one, process the whole node
4685 * at once, including everything below it.
4688 ret
= drop_level_one_refs(trans
, root
, path
);
4693 bytenr
= btrfs_node_blockptr(cur
, path
->slots
[*level
]);
4694 ptr_gen
= btrfs_node_ptr_generation(cur
, path
->slots
[*level
]);
4695 blocksize
= btrfs_level_size(root
, *level
- 1);
4697 ret
= drop_snap_lookup_refcount(trans
, root
, bytenr
,
4702 * if there is more than one reference, we don't need
4703 * to read that node to drop any references it has. We
4704 * just drop the ref we hold on that node and move on to the
4705 * next slot in this level.
4708 parent
= path
->nodes
[*level
];
4709 root_owner
= btrfs_header_owner(parent
);
4710 root_gen
= btrfs_header_generation(parent
);
4711 path
->slots
[*level
]++;
4713 ret
= btrfs_free_extent(trans
, root
, bytenr
,
4714 blocksize
, parent
->start
,
4715 root_owner
, root_gen
,
4719 atomic_inc(&root
->fs_info
->throttle_gen
);
4720 wake_up(&root
->fs_info
->transaction_throttle
);
4727 * we need to keep freeing things in the next level down.
4728 * read the block and loop around to process it
4730 next
= read_tree_block(root
, bytenr
, blocksize
, ptr_gen
);
4731 WARN_ON(*level
<= 0);
4732 if (path
->nodes
[*level
-1])
4733 free_extent_buffer(path
->nodes
[*level
-1]);
4734 path
->nodes
[*level
-1] = next
;
4735 *level
= btrfs_header_level(next
);
4736 path
->slots
[*level
] = 0;
4740 WARN_ON(*level
< 0);
4741 WARN_ON(*level
>= BTRFS_MAX_LEVEL
);
4743 if (path
->nodes
[*level
] == root
->node
) {
4744 parent
= path
->nodes
[*level
];
4745 bytenr
= path
->nodes
[*level
]->start
;
4747 parent
= path
->nodes
[*level
+ 1];
4748 bytenr
= btrfs_node_blockptr(parent
, path
->slots
[*level
+ 1]);
4751 blocksize
= btrfs_level_size(root
, *level
);
4752 root_owner
= btrfs_header_owner(parent
);
4753 root_gen
= btrfs_header_generation(parent
);
4756 * cleanup and free the reference on the last node
4759 ret
= btrfs_free_extent(trans
, root
, bytenr
, blocksize
,
4760 parent
->start
, root_owner
, root_gen
,
4762 free_extent_buffer(path
->nodes
[*level
]);
4763 path
->nodes
[*level
] = NULL
;
4773 struct walk_control
{
4774 u64 refs
[BTRFS_MAX_LEVEL
];
4775 u64 flags
[BTRFS_MAX_LEVEL
];
4776 struct btrfs_key update_progress
;
4784 #define DROP_REFERENCE 1
4785 #define UPDATE_BACKREF 2
4788 * hepler to process tree block while walking down the tree.
4790 * when wc->stage == DROP_REFERENCE, this function checks
4791 * reference count of the block. if the block is shared and
4792 * we need update back refs for the subtree rooted at the
4793 * block, this function changes wc->stage to UPDATE_BACKREF
4795 * when wc->stage == UPDATE_BACKREF, this function updates
4796 * back refs for pointers in the block.
4798 * NOTE: return value 1 means we should stop walking down.
4800 static noinline
int walk_down_proc(struct btrfs_trans_handle
*trans
,
4801 struct btrfs_root
*root
,
4802 struct btrfs_path
*path
,
4803 struct walk_control
*wc
)
4805 int level
= wc
->level
;
4806 struct extent_buffer
*eb
= path
->nodes
[level
];
4807 struct btrfs_key key
;
4808 u64 flag
= BTRFS_BLOCK_FLAG_FULL_BACKREF
;
4811 if (wc
->stage
== UPDATE_BACKREF
&&
4812 btrfs_header_owner(eb
) != root
->root_key
.objectid
)
4816 * when reference count of tree block is 1, it won't increase
4817 * again. once full backref flag is set, we never clear it.
4819 if ((wc
->stage
== DROP_REFERENCE
&& wc
->refs
[level
] != 1) ||
4820 (wc
->stage
== UPDATE_BACKREF
&& !(wc
->flags
[level
] & flag
))) {
4821 BUG_ON(!path
->locks
[level
]);
4822 ret
= btrfs_lookup_extent_info(trans
, root
,
4827 BUG_ON(wc
->refs
[level
] == 0);
4830 if (wc
->stage
== DROP_REFERENCE
&&
4831 wc
->update_ref
&& wc
->refs
[level
] > 1) {
4832 BUG_ON(eb
== root
->node
);
4833 BUG_ON(path
->slots
[level
] > 0);
4835 btrfs_item_key_to_cpu(eb
, &key
, path
->slots
[level
]);
4837 btrfs_node_key_to_cpu(eb
, &key
, path
->slots
[level
]);
4838 if (btrfs_header_owner(eb
) == root
->root_key
.objectid
&&
4839 btrfs_comp_cpu_keys(&key
, &wc
->update_progress
) >= 0) {
4840 wc
->stage
= UPDATE_BACKREF
;
4841 wc
->shared_level
= level
;
4845 if (wc
->stage
== DROP_REFERENCE
) {
4846 if (wc
->refs
[level
] > 1)
4849 if (path
->locks
[level
] && !wc
->keep_locks
) {
4850 btrfs_tree_unlock(eb
);
4851 path
->locks
[level
] = 0;
4856 /* wc->stage == UPDATE_BACKREF */
4857 if (!(wc
->flags
[level
] & flag
)) {
4858 BUG_ON(!path
->locks
[level
]);
4859 ret
= btrfs_inc_ref(trans
, root
, eb
, 1);
4861 ret
= btrfs_dec_ref(trans
, root
, eb
, 0);
4863 ret
= btrfs_set_disk_extent_flags(trans
, root
, eb
->start
,
4866 wc
->flags
[level
] |= flag
;
4870 * the block is shared by multiple trees, so it's not good to
4871 * keep the tree lock
4873 if (path
->locks
[level
] && level
> 0) {
4874 btrfs_tree_unlock(eb
);
4875 path
->locks
[level
] = 0;
4881 * hepler to process tree block while walking up the tree.
4883 * when wc->stage == DROP_REFERENCE, this function drops
4884 * reference count on the block.
4886 * when wc->stage == UPDATE_BACKREF, this function changes
4887 * wc->stage back to DROP_REFERENCE if we changed wc->stage
4888 * to UPDATE_BACKREF previously while processing the block.
4890 * NOTE: return value 1 means we should stop walking up.
4892 static noinline
int walk_up_proc(struct btrfs_trans_handle
*trans
,
4893 struct btrfs_root
*root
,
4894 struct btrfs_path
*path
,
4895 struct walk_control
*wc
)
4898 int level
= wc
->level
;
4899 struct extent_buffer
*eb
= path
->nodes
[level
];
4902 if (wc
->stage
== UPDATE_BACKREF
) {
4903 BUG_ON(wc
->shared_level
< level
);
4904 if (level
< wc
->shared_level
)
4907 BUG_ON(wc
->refs
[level
] <= 1);
4908 ret
= find_next_key(path
, level
+ 1, &wc
->update_progress
);
4912 wc
->stage
= DROP_REFERENCE
;
4913 wc
->shared_level
= -1;
4914 path
->slots
[level
] = 0;
4917 * check reference count again if the block isn't locked.
4918 * we should start walking down the tree again if reference
4921 if (!path
->locks
[level
]) {
4923 btrfs_tree_lock(eb
);
4924 btrfs_set_lock_blocking(eb
);
4925 path
->locks
[level
] = 1;
4927 ret
= btrfs_lookup_extent_info(trans
, root
,
4932 BUG_ON(wc
->refs
[level
] == 0);
4933 if (wc
->refs
[level
] == 1) {
4934 btrfs_tree_unlock(eb
);
4935 path
->locks
[level
] = 0;
4943 /* wc->stage == DROP_REFERENCE */
4944 BUG_ON(wc
->refs
[level
] > 1 && !path
->locks
[level
]);
4946 if (wc
->refs
[level
] == 1) {
4948 if (wc
->flags
[level
] & BTRFS_BLOCK_FLAG_FULL_BACKREF
)
4949 ret
= btrfs_dec_ref(trans
, root
, eb
, 1);
4951 ret
= btrfs_dec_ref(trans
, root
, eb
, 0);
4954 /* make block locked assertion in clean_tree_block happy */
4955 if (!path
->locks
[level
] &&
4956 btrfs_header_generation(eb
) == trans
->transid
) {
4957 btrfs_tree_lock(eb
);
4958 btrfs_set_lock_blocking(eb
);
4959 path
->locks
[level
] = 1;
4961 clean_tree_block(trans
, root
, eb
);
4964 if (eb
== root
->node
) {
4965 if (wc
->flags
[level
] & BTRFS_BLOCK_FLAG_FULL_BACKREF
)
4968 BUG_ON(root
->root_key
.objectid
!=
4969 btrfs_header_owner(eb
));
4971 if (wc
->flags
[level
+ 1] & BTRFS_BLOCK_FLAG_FULL_BACKREF
)
4972 parent
= path
->nodes
[level
+ 1]->start
;
4974 BUG_ON(root
->root_key
.objectid
!=
4975 btrfs_header_owner(path
->nodes
[level
+ 1]));
4978 ret
= btrfs_free_extent(trans
, root
, eb
->start
, eb
->len
, parent
,
4979 root
->root_key
.objectid
, level
, 0);
4982 wc
->refs
[level
] = 0;
4983 wc
->flags
[level
] = 0;
4987 static noinline
int walk_down_tree(struct btrfs_trans_handle
*trans
,
4988 struct btrfs_root
*root
,
4989 struct btrfs_path
*path
,
4990 struct walk_control
*wc
)
4992 struct extent_buffer
*next
;
4993 struct extent_buffer
*cur
;
4997 int level
= wc
->level
;
5000 while (level
>= 0) {
5001 cur
= path
->nodes
[level
];
5002 BUG_ON(path
->slots
[level
] >= btrfs_header_nritems(cur
));
5004 ret
= walk_down_proc(trans
, root
, path
, wc
);
5011 bytenr
= btrfs_node_blockptr(cur
, path
->slots
[level
]);
5012 blocksize
= btrfs_level_size(root
, level
- 1);
5013 ptr_gen
= btrfs_node_ptr_generation(cur
, path
->slots
[level
]);
5015 next
= read_tree_block(root
, bytenr
, blocksize
, ptr_gen
);
5016 btrfs_tree_lock(next
);
5017 btrfs_set_lock_blocking(next
);
5020 BUG_ON(level
!= btrfs_header_level(next
));
5021 path
->nodes
[level
] = next
;
5022 path
->slots
[level
] = 0;
5023 path
->locks
[level
] = 1;
5029 static noinline
int walk_up_tree(struct btrfs_trans_handle
*trans
,
5030 struct btrfs_root
*root
,
5031 struct btrfs_path
*path
,
5032 struct walk_control
*wc
, int max_level
)
5034 int level
= wc
->level
;
5037 path
->slots
[level
] = btrfs_header_nritems(path
->nodes
[level
]);
5038 while (level
< max_level
&& path
->nodes
[level
]) {
5040 if (path
->slots
[level
] + 1 <
5041 btrfs_header_nritems(path
->nodes
[level
])) {
5042 path
->slots
[level
]++;
5045 ret
= walk_up_proc(trans
, root
, path
, wc
);
5049 if (path
->locks
[level
]) {
5050 btrfs_tree_unlock(path
->nodes
[level
]);
5051 path
->locks
[level
] = 0;
5053 free_extent_buffer(path
->nodes
[level
]);
5054 path
->nodes
[level
] = NULL
;
5062 * drop a subvolume tree.
5064 * this function traverses the tree freeing any blocks that only
5065 * referenced by the tree.
5067 * when a shared tree block is found. this function decreases its
5068 * reference count by one. if update_ref is true, this function
5069 * also make sure backrefs for the shared block and all lower level
5070 * blocks are properly updated.
5072 int btrfs_drop_snapshot(struct btrfs_root
*root
, int update_ref
)
5074 struct btrfs_path
*path
;
5075 struct btrfs_trans_handle
*trans
;
5076 struct btrfs_root
*tree_root
= root
->fs_info
->tree_root
;
5077 struct btrfs_root_item
*root_item
= &root
->root_item
;
5078 struct walk_control
*wc
;
5079 struct btrfs_key key
;
5084 path
= btrfs_alloc_path();
5087 wc
= kzalloc(sizeof(*wc
), GFP_NOFS
);
5090 trans
= btrfs_start_transaction(tree_root
, 1);
5092 if (btrfs_disk_key_objectid(&root_item
->drop_progress
) == 0) {
5093 level
= btrfs_header_level(root
->node
);
5094 path
->nodes
[level
] = btrfs_lock_root_node(root
);
5095 btrfs_set_lock_blocking(path
->nodes
[level
]);
5096 path
->slots
[level
] = 0;
5097 path
->locks
[level
] = 1;
5098 memset(&wc
->update_progress
, 0,
5099 sizeof(wc
->update_progress
));
5101 btrfs_disk_key_to_cpu(&key
, &root_item
->drop_progress
);
5102 memcpy(&wc
->update_progress
, &key
,
5103 sizeof(wc
->update_progress
));
5105 level
= root_item
->drop_level
;
5107 path
->lowest_level
= level
;
5108 ret
= btrfs_search_slot(NULL
, root
, &key
, path
, 0, 0);
5109 path
->lowest_level
= 0;
5114 btrfs_node_key_to_cpu(path
->nodes
[level
], &key
,
5115 path
->slots
[level
]);
5116 WARN_ON(memcmp(&key
, &wc
->update_progress
, sizeof(key
)));
5119 * unlock our path, this is safe because only this
5120 * function is allowed to delete this snapshot
5122 btrfs_unlock_up_safe(path
, 0);
5124 level
= btrfs_header_level(root
->node
);
5126 btrfs_tree_lock(path
->nodes
[level
]);
5127 btrfs_set_lock_blocking(path
->nodes
[level
]);
5129 ret
= btrfs_lookup_extent_info(trans
, root
,
5130 path
->nodes
[level
]->start
,
5131 path
->nodes
[level
]->len
,
5135 BUG_ON(wc
->refs
[level
] == 0);
5137 if (level
== root_item
->drop_level
)
5140 btrfs_tree_unlock(path
->nodes
[level
]);
5141 WARN_ON(wc
->refs
[level
] != 1);
5147 wc
->shared_level
= -1;
5148 wc
->stage
= DROP_REFERENCE
;
5149 wc
->update_ref
= update_ref
;
5153 ret
= walk_down_tree(trans
, root
, path
, wc
);
5159 ret
= walk_up_tree(trans
, root
, path
, wc
, BTRFS_MAX_LEVEL
);
5166 BUG_ON(wc
->stage
!= DROP_REFERENCE
);
5170 if (wc
->stage
== DROP_REFERENCE
) {
5172 btrfs_node_key(path
->nodes
[level
],
5173 &root_item
->drop_progress
,
5174 path
->slots
[level
]);
5175 root_item
->drop_level
= level
;
5178 BUG_ON(wc
->level
== 0);
5179 if (trans
->transaction
->in_commit
||
5180 trans
->transaction
->delayed_refs
.flushing
) {
5181 ret
= btrfs_update_root(trans
, tree_root
,
5186 btrfs_end_transaction(trans
, tree_root
);
5187 trans
= btrfs_start_transaction(tree_root
, 1);
5189 unsigned long update
;
5190 update
= trans
->delayed_ref_updates
;
5191 trans
->delayed_ref_updates
= 0;
5193 btrfs_run_delayed_refs(trans
, tree_root
,
5197 btrfs_release_path(root
, path
);
5200 ret
= btrfs_del_root(trans
, tree_root
, &root
->root_key
);
5203 free_extent_buffer(root
->node
);
5204 free_extent_buffer(root
->commit_root
);
5207 btrfs_end_transaction(trans
, tree_root
);
5209 btrfs_free_path(path
);
5214 * drop subtree rooted at tree block 'node'.
5216 * NOTE: this function will unlock and release tree block 'node'
5218 int btrfs_drop_subtree(struct btrfs_trans_handle
*trans
,
5219 struct btrfs_root
*root
,
5220 struct extent_buffer
*node
,
5221 struct extent_buffer
*parent
)
5223 struct btrfs_path
*path
;
5224 struct walk_control
*wc
;
5230 BUG_ON(root
->root_key
.objectid
!= BTRFS_TREE_RELOC_OBJECTID
);
5232 path
= btrfs_alloc_path();
5235 wc
= kzalloc(sizeof(*wc
), GFP_NOFS
);
5238 btrfs_assert_tree_locked(parent
);
5239 parent_level
= btrfs_header_level(parent
);
5240 extent_buffer_get(parent
);
5241 path
->nodes
[parent_level
] = parent
;
5242 path
->slots
[parent_level
] = btrfs_header_nritems(parent
);
5244 btrfs_assert_tree_locked(node
);
5245 level
= btrfs_header_level(node
);
5246 path
->nodes
[level
] = node
;
5247 path
->slots
[level
] = 0;
5248 path
->locks
[level
] = 1;
5250 wc
->refs
[parent_level
] = 1;
5251 wc
->flags
[parent_level
] = BTRFS_BLOCK_FLAG_FULL_BACKREF
;
5253 wc
->shared_level
= -1;
5254 wc
->stage
= DROP_REFERENCE
;
5259 wret
= walk_down_tree(trans
, root
, path
, wc
);
5265 wret
= walk_up_tree(trans
, root
, path
, wc
, parent_level
);
5273 btrfs_free_path(path
);
5278 static unsigned long calc_ra(unsigned long start
, unsigned long last
,
5281 return min(last
, start
+ nr
- 1);
5284 static noinline
int relocate_inode_pages(struct inode
*inode
, u64 start
,
5289 unsigned long first_index
;
5290 unsigned long last_index
;
5293 struct extent_io_tree
*io_tree
= &BTRFS_I(inode
)->io_tree
;
5294 struct file_ra_state
*ra
;
5295 struct btrfs_ordered_extent
*ordered
;
5296 unsigned int total_read
= 0;
5297 unsigned int total_dirty
= 0;
5300 ra
= kzalloc(sizeof(*ra
), GFP_NOFS
);
5302 mutex_lock(&inode
->i_mutex
);
5303 first_index
= start
>> PAGE_CACHE_SHIFT
;
5304 last_index
= (start
+ len
- 1) >> PAGE_CACHE_SHIFT
;
5306 /* make sure the dirty trick played by the caller work */
5307 ret
= invalidate_inode_pages2_range(inode
->i_mapping
,
5308 first_index
, last_index
);
5312 file_ra_state_init(ra
, inode
->i_mapping
);
5314 for (i
= first_index
; i
<= last_index
; i
++) {
5315 if (total_read
% ra
->ra_pages
== 0) {
5316 btrfs_force_ra(inode
->i_mapping
, ra
, NULL
, i
,
5317 calc_ra(i
, last_index
, ra
->ra_pages
));
5321 if (((u64
)i
<< PAGE_CACHE_SHIFT
) > i_size_read(inode
))
5323 page
= grab_cache_page(inode
->i_mapping
, i
);
5328 if (!PageUptodate(page
)) {
5329 btrfs_readpage(NULL
, page
);
5331 if (!PageUptodate(page
)) {
5333 page_cache_release(page
);
5338 wait_on_page_writeback(page
);
5340 page_start
= (u64
)page
->index
<< PAGE_CACHE_SHIFT
;
5341 page_end
= page_start
+ PAGE_CACHE_SIZE
- 1;
5342 lock_extent(io_tree
, page_start
, page_end
, GFP_NOFS
);
5344 ordered
= btrfs_lookup_ordered_extent(inode
, page_start
);
5346 unlock_extent(io_tree
, page_start
, page_end
, GFP_NOFS
);
5348 page_cache_release(page
);
5349 btrfs_start_ordered_extent(inode
, ordered
, 1);
5350 btrfs_put_ordered_extent(ordered
);
5353 set_page_extent_mapped(page
);
5355 if (i
== first_index
)
5356 set_extent_bits(io_tree
, page_start
, page_end
,
5357 EXTENT_BOUNDARY
, GFP_NOFS
);
5358 btrfs_set_extent_delalloc(inode
, page_start
, page_end
);
5360 set_page_dirty(page
);
5363 unlock_extent(io_tree
, page_start
, page_end
, GFP_NOFS
);
5365 page_cache_release(page
);
5370 mutex_unlock(&inode
->i_mutex
);
5371 balance_dirty_pages_ratelimited_nr(inode
->i_mapping
, total_dirty
);
5375 static noinline
int relocate_data_extent(struct inode
*reloc_inode
,
5376 struct btrfs_key
*extent_key
,
5379 struct btrfs_root
*root
= BTRFS_I(reloc_inode
)->root
;
5380 struct extent_map_tree
*em_tree
= &BTRFS_I(reloc_inode
)->extent_tree
;
5381 struct extent_map
*em
;
5382 u64 start
= extent_key
->objectid
- offset
;
5383 u64 end
= start
+ extent_key
->offset
- 1;
5385 em
= alloc_extent_map(GFP_NOFS
);
5386 BUG_ON(!em
|| IS_ERR(em
));
5389 em
->len
= extent_key
->offset
;
5390 em
->block_len
= extent_key
->offset
;
5391 em
->block_start
= extent_key
->objectid
;
5392 em
->bdev
= root
->fs_info
->fs_devices
->latest_bdev
;
5393 set_bit(EXTENT_FLAG_PINNED
, &em
->flags
);
5395 /* setup extent map to cheat btrfs_readpage */
5396 lock_extent(&BTRFS_I(reloc_inode
)->io_tree
, start
, end
, GFP_NOFS
);
5399 spin_lock(&em_tree
->lock
);
5400 ret
= add_extent_mapping(em_tree
, em
);
5401 spin_unlock(&em_tree
->lock
);
5402 if (ret
!= -EEXIST
) {
5403 free_extent_map(em
);
5406 btrfs_drop_extent_cache(reloc_inode
, start
, end
, 0);
5408 unlock_extent(&BTRFS_I(reloc_inode
)->io_tree
, start
, end
, GFP_NOFS
);
5410 return relocate_inode_pages(reloc_inode
, start
, extent_key
->offset
);
5413 struct btrfs_ref_path
{
5415 u64 nodes
[BTRFS_MAX_LEVEL
];
5417 u64 root_generation
;
5424 struct btrfs_key node_keys
[BTRFS_MAX_LEVEL
];
5425 u64 new_nodes
[BTRFS_MAX_LEVEL
];
5428 struct disk_extent
{
5439 static int is_cowonly_root(u64 root_objectid
)
5441 if (root_objectid
== BTRFS_ROOT_TREE_OBJECTID
||
5442 root_objectid
== BTRFS_EXTENT_TREE_OBJECTID
||
5443 root_objectid
== BTRFS_CHUNK_TREE_OBJECTID
||
5444 root_objectid
== BTRFS_DEV_TREE_OBJECTID
||
5445 root_objectid
== BTRFS_TREE_LOG_OBJECTID
||
5446 root_objectid
== BTRFS_CSUM_TREE_OBJECTID
)
5451 static noinline
int __next_ref_path(struct btrfs_trans_handle
*trans
,
5452 struct btrfs_root
*extent_root
,
5453 struct btrfs_ref_path
*ref_path
,
5456 struct extent_buffer
*leaf
;
5457 struct btrfs_path
*path
;
5458 struct btrfs_extent_ref
*ref
;
5459 struct btrfs_key key
;
5460 struct btrfs_key found_key
;
5466 path
= btrfs_alloc_path();
5471 ref_path
->lowest_level
= -1;
5472 ref_path
->current_level
= -1;
5473 ref_path
->shared_level
= -1;
5477 level
= ref_path
->current_level
- 1;
5478 while (level
>= -1) {
5480 if (level
< ref_path
->lowest_level
)
5484 bytenr
= ref_path
->nodes
[level
];
5486 bytenr
= ref_path
->extent_start
;
5487 BUG_ON(bytenr
== 0);
5489 parent
= ref_path
->nodes
[level
+ 1];
5490 ref_path
->nodes
[level
+ 1] = 0;
5491 ref_path
->current_level
= level
;
5492 BUG_ON(parent
== 0);
5494 key
.objectid
= bytenr
;
5495 key
.offset
= parent
+ 1;
5496 key
.type
= BTRFS_EXTENT_REF_KEY
;
5498 ret
= btrfs_search_slot(trans
, extent_root
, &key
, path
, 0, 0);
5503 leaf
= path
->nodes
[0];
5504 nritems
= btrfs_header_nritems(leaf
);
5505 if (path
->slots
[0] >= nritems
) {
5506 ret
= btrfs_next_leaf(extent_root
, path
);
5511 leaf
= path
->nodes
[0];
5514 btrfs_item_key_to_cpu(leaf
, &found_key
, path
->slots
[0]);
5515 if (found_key
.objectid
== bytenr
&&
5516 found_key
.type
== BTRFS_EXTENT_REF_KEY
) {
5517 if (level
< ref_path
->shared_level
)
5518 ref_path
->shared_level
= level
;
5523 btrfs_release_path(extent_root
, path
);
5526 /* reached lowest level */
5530 level
= ref_path
->current_level
;
5531 while (level
< BTRFS_MAX_LEVEL
- 1) {
5535 bytenr
= ref_path
->nodes
[level
];
5537 bytenr
= ref_path
->extent_start
;
5539 BUG_ON(bytenr
== 0);
5541 key
.objectid
= bytenr
;
5543 key
.type
= BTRFS_EXTENT_REF_KEY
;
5545 ret
= btrfs_search_slot(trans
, extent_root
, &key
, path
, 0, 0);
5549 leaf
= path
->nodes
[0];
5550 nritems
= btrfs_header_nritems(leaf
);
5551 if (path
->slots
[0] >= nritems
) {
5552 ret
= btrfs_next_leaf(extent_root
, path
);
5556 /* the extent was freed by someone */
5557 if (ref_path
->lowest_level
== level
)
5559 btrfs_release_path(extent_root
, path
);
5562 leaf
= path
->nodes
[0];
5565 btrfs_item_key_to_cpu(leaf
, &found_key
, path
->slots
[0]);
5566 if (found_key
.objectid
!= bytenr
||
5567 found_key
.type
!= BTRFS_EXTENT_REF_KEY
) {
5568 /* the extent was freed by someone */
5569 if (ref_path
->lowest_level
== level
) {
5573 btrfs_release_path(extent_root
, path
);
5577 ref
= btrfs_item_ptr(leaf
, path
->slots
[0],
5578 struct btrfs_extent_ref
);
5579 ref_objectid
= btrfs_ref_objectid(leaf
, ref
);
5580 if (ref_objectid
< BTRFS_FIRST_FREE_OBJECTID
) {
5582 level
= (int)ref_objectid
;
5583 BUG_ON(level
>= BTRFS_MAX_LEVEL
);
5584 ref_path
->lowest_level
= level
;
5585 ref_path
->current_level
= level
;
5586 ref_path
->nodes
[level
] = bytenr
;
5588 WARN_ON(ref_objectid
!= level
);
5591 WARN_ON(level
!= -1);
5595 if (ref_path
->lowest_level
== level
) {
5596 ref_path
->owner_objectid
= ref_objectid
;
5597 ref_path
->num_refs
= btrfs_ref_num_refs(leaf
, ref
);
5601 * the block is tree root or the block isn't in reference
5604 if (found_key
.objectid
== found_key
.offset
||
5605 is_cowonly_root(btrfs_ref_root(leaf
, ref
))) {
5606 ref_path
->root_objectid
= btrfs_ref_root(leaf
, ref
);
5607 ref_path
->root_generation
=
5608 btrfs_ref_generation(leaf
, ref
);
5610 /* special reference from the tree log */
5611 ref_path
->nodes
[0] = found_key
.offset
;
5612 ref_path
->current_level
= 0;
5619 BUG_ON(ref_path
->nodes
[level
] != 0);
5620 ref_path
->nodes
[level
] = found_key
.offset
;
5621 ref_path
->current_level
= level
;
5624 * the reference was created in the running transaction,
5625 * no need to continue walking up.
5627 if (btrfs_ref_generation(leaf
, ref
) == trans
->transid
) {
5628 ref_path
->root_objectid
= btrfs_ref_root(leaf
, ref
);
5629 ref_path
->root_generation
=
5630 btrfs_ref_generation(leaf
, ref
);
5635 btrfs_release_path(extent_root
, path
);
5638 /* reached max tree level, but no tree root found. */
5641 btrfs_free_path(path
);
5645 static int btrfs_first_ref_path(struct btrfs_trans_handle
*trans
,
5646 struct btrfs_root
*extent_root
,
5647 struct btrfs_ref_path
*ref_path
,
5650 memset(ref_path
, 0, sizeof(*ref_path
));
5651 ref_path
->extent_start
= extent_start
;
5653 return __next_ref_path(trans
, extent_root
, ref_path
, 1);
5656 static int btrfs_next_ref_path(struct btrfs_trans_handle
*trans
,
5657 struct btrfs_root
*extent_root
,
5658 struct btrfs_ref_path
*ref_path
)
5660 return __next_ref_path(trans
, extent_root
, ref_path
, 0);
5663 static noinline
int get_new_locations(struct inode
*reloc_inode
,
5664 struct btrfs_key
*extent_key
,
5665 u64 offset
, int no_fragment
,
5666 struct disk_extent
**extents
,
5669 struct btrfs_root
*root
= BTRFS_I(reloc_inode
)->root
;
5670 struct btrfs_path
*path
;
5671 struct btrfs_file_extent_item
*fi
;
5672 struct extent_buffer
*leaf
;
5673 struct disk_extent
*exts
= *extents
;
5674 struct btrfs_key found_key
;
5679 int max
= *nr_extents
;
5682 WARN_ON(!no_fragment
&& *extents
);
5685 exts
= kmalloc(sizeof(*exts
) * max
, GFP_NOFS
);
5690 path
= btrfs_alloc_path();
5693 cur_pos
= extent_key
->objectid
- offset
;
5694 last_byte
= extent_key
->objectid
+ extent_key
->offset
;
5695 ret
= btrfs_lookup_file_extent(NULL
, root
, path
, reloc_inode
->i_ino
,
5705 leaf
= path
->nodes
[0];
5706 nritems
= btrfs_header_nritems(leaf
);
5707 if (path
->slots
[0] >= nritems
) {
5708 ret
= btrfs_next_leaf(root
, path
);
5713 leaf
= path
->nodes
[0];
5716 btrfs_item_key_to_cpu(leaf
, &found_key
, path
->slots
[0]);
5717 if (found_key
.offset
!= cur_pos
||
5718 found_key
.type
!= BTRFS_EXTENT_DATA_KEY
||
5719 found_key
.objectid
!= reloc_inode
->i_ino
)
5722 fi
= btrfs_item_ptr(leaf
, path
->slots
[0],
5723 struct btrfs_file_extent_item
);
5724 if (btrfs_file_extent_type(leaf
, fi
) !=
5725 BTRFS_FILE_EXTENT_REG
||
5726 btrfs_file_extent_disk_bytenr(leaf
, fi
) == 0)
5730 struct disk_extent
*old
= exts
;
5732 exts
= kzalloc(sizeof(*exts
) * max
, GFP_NOFS
);
5733 memcpy(exts
, old
, sizeof(*exts
) * nr
);
5734 if (old
!= *extents
)
5738 exts
[nr
].disk_bytenr
=
5739 btrfs_file_extent_disk_bytenr(leaf
, fi
);
5740 exts
[nr
].disk_num_bytes
=
5741 btrfs_file_extent_disk_num_bytes(leaf
, fi
);
5742 exts
[nr
].offset
= btrfs_file_extent_offset(leaf
, fi
);
5743 exts
[nr
].num_bytes
= btrfs_file_extent_num_bytes(leaf
, fi
);
5744 exts
[nr
].ram_bytes
= btrfs_file_extent_ram_bytes(leaf
, fi
);
5745 exts
[nr
].compression
= btrfs_file_extent_compression(leaf
, fi
);
5746 exts
[nr
].encryption
= btrfs_file_extent_encryption(leaf
, fi
);
5747 exts
[nr
].other_encoding
= btrfs_file_extent_other_encoding(leaf
,
5749 BUG_ON(exts
[nr
].offset
> 0);
5750 BUG_ON(exts
[nr
].compression
|| exts
[nr
].encryption
);
5751 BUG_ON(exts
[nr
].num_bytes
!= exts
[nr
].disk_num_bytes
);
5753 cur_pos
+= exts
[nr
].num_bytes
;
5756 if (cur_pos
+ offset
>= last_byte
)
5766 BUG_ON(cur_pos
+ offset
> last_byte
);
5767 if (cur_pos
+ offset
< last_byte
) {
5773 btrfs_free_path(path
);
5775 if (exts
!= *extents
)
5784 static noinline
int replace_one_extent(struct btrfs_trans_handle
*trans
,
5785 struct btrfs_root
*root
,
5786 struct btrfs_path
*path
,
5787 struct btrfs_key
*extent_key
,
5788 struct btrfs_key
*leaf_key
,
5789 struct btrfs_ref_path
*ref_path
,
5790 struct disk_extent
*new_extents
,
5793 struct extent_buffer
*leaf
;
5794 struct btrfs_file_extent_item
*fi
;
5795 struct inode
*inode
= NULL
;
5796 struct btrfs_key key
;
5801 u64 search_end
= (u64
)-1;
5804 int extent_locked
= 0;
5808 memcpy(&key
, leaf_key
, sizeof(key
));
5809 if (ref_path
->owner_objectid
!= BTRFS_MULTIPLE_OBJECTIDS
) {
5810 if (key
.objectid
< ref_path
->owner_objectid
||
5811 (key
.objectid
== ref_path
->owner_objectid
&&
5812 key
.type
< BTRFS_EXTENT_DATA_KEY
)) {
5813 key
.objectid
= ref_path
->owner_objectid
;
5814 key
.type
= BTRFS_EXTENT_DATA_KEY
;
5820 ret
= btrfs_search_slot(trans
, root
, &key
, path
, 0, 1);
5824 leaf
= path
->nodes
[0];
5825 nritems
= btrfs_header_nritems(leaf
);
5827 if (extent_locked
&& ret
> 0) {
5829 * the file extent item was modified by someone
5830 * before the extent got locked.
5832 unlock_extent(&BTRFS_I(inode
)->io_tree
, lock_start
,
5833 lock_end
, GFP_NOFS
);
5837 if (path
->slots
[0] >= nritems
) {
5838 if (++nr_scaned
> 2)
5841 BUG_ON(extent_locked
);
5842 ret
= btrfs_next_leaf(root
, path
);
5847 leaf
= path
->nodes
[0];
5848 nritems
= btrfs_header_nritems(leaf
);
5851 btrfs_item_key_to_cpu(leaf
, &key
, path
->slots
[0]);
5853 if (ref_path
->owner_objectid
!= BTRFS_MULTIPLE_OBJECTIDS
) {
5854 if ((key
.objectid
> ref_path
->owner_objectid
) ||
5855 (key
.objectid
== ref_path
->owner_objectid
&&
5856 key
.type
> BTRFS_EXTENT_DATA_KEY
) ||
5857 key
.offset
>= search_end
)
5861 if (inode
&& key
.objectid
!= inode
->i_ino
) {
5862 BUG_ON(extent_locked
);
5863 btrfs_release_path(root
, path
);
5864 mutex_unlock(&inode
->i_mutex
);
5870 if (key
.type
!= BTRFS_EXTENT_DATA_KEY
) {
5875 fi
= btrfs_item_ptr(leaf
, path
->slots
[0],
5876 struct btrfs_file_extent_item
);
5877 extent_type
= btrfs_file_extent_type(leaf
, fi
);
5878 if ((extent_type
!= BTRFS_FILE_EXTENT_REG
&&
5879 extent_type
!= BTRFS_FILE_EXTENT_PREALLOC
) ||
5880 (btrfs_file_extent_disk_bytenr(leaf
, fi
) !=
5881 extent_key
->objectid
)) {
5887 num_bytes
= btrfs_file_extent_num_bytes(leaf
, fi
);
5888 ext_offset
= btrfs_file_extent_offset(leaf
, fi
);
5890 if (search_end
== (u64
)-1) {
5891 search_end
= key
.offset
- ext_offset
+
5892 btrfs_file_extent_ram_bytes(leaf
, fi
);
5895 if (!extent_locked
) {
5896 lock_start
= key
.offset
;
5897 lock_end
= lock_start
+ num_bytes
- 1;
5899 if (lock_start
> key
.offset
||
5900 lock_end
+ 1 < key
.offset
+ num_bytes
) {
5901 unlock_extent(&BTRFS_I(inode
)->io_tree
,
5902 lock_start
, lock_end
, GFP_NOFS
);
5908 btrfs_release_path(root
, path
);
5910 inode
= btrfs_iget_locked(root
->fs_info
->sb
,
5911 key
.objectid
, root
);
5912 if (inode
->i_state
& I_NEW
) {
5913 BTRFS_I(inode
)->root
= root
;
5914 BTRFS_I(inode
)->location
.objectid
=
5916 BTRFS_I(inode
)->location
.type
=
5917 BTRFS_INODE_ITEM_KEY
;
5918 BTRFS_I(inode
)->location
.offset
= 0;
5919 btrfs_read_locked_inode(inode
);
5920 unlock_new_inode(inode
);
5923 * some code call btrfs_commit_transaction while
5924 * holding the i_mutex, so we can't use mutex_lock
5927 if (is_bad_inode(inode
) ||
5928 !mutex_trylock(&inode
->i_mutex
)) {
5931 key
.offset
= (u64
)-1;
5936 if (!extent_locked
) {
5937 struct btrfs_ordered_extent
*ordered
;
5939 btrfs_release_path(root
, path
);
5941 lock_extent(&BTRFS_I(inode
)->io_tree
, lock_start
,
5942 lock_end
, GFP_NOFS
);
5943 ordered
= btrfs_lookup_first_ordered_extent(inode
,
5946 ordered
->file_offset
<= lock_end
&&
5947 ordered
->file_offset
+ ordered
->len
> lock_start
) {
5948 unlock_extent(&BTRFS_I(inode
)->io_tree
,
5949 lock_start
, lock_end
, GFP_NOFS
);
5950 btrfs_start_ordered_extent(inode
, ordered
, 1);
5951 btrfs_put_ordered_extent(ordered
);
5952 key
.offset
+= num_bytes
;
5956 btrfs_put_ordered_extent(ordered
);
5962 if (nr_extents
== 1) {
5963 /* update extent pointer in place */
5964 btrfs_set_file_extent_disk_bytenr(leaf
, fi
,
5965 new_extents
[0].disk_bytenr
);
5966 btrfs_set_file_extent_disk_num_bytes(leaf
, fi
,
5967 new_extents
[0].disk_num_bytes
);
5968 btrfs_mark_buffer_dirty(leaf
);
5970 btrfs_drop_extent_cache(inode
, key
.offset
,
5971 key
.offset
+ num_bytes
- 1, 0);
5973 ret
= btrfs_inc_extent_ref(trans
, root
,
5974 new_extents
[0].disk_bytenr
,
5975 new_extents
[0].disk_num_bytes
,
5977 root
->root_key
.objectid
,
5982 ret
= btrfs_free_extent(trans
, root
,
5983 extent_key
->objectid
,
5986 btrfs_header_owner(leaf
),
5987 btrfs_header_generation(leaf
),
5991 btrfs_release_path(root
, path
);
5992 key
.offset
+= num_bytes
;
6000 * drop old extent pointer at first, then insert the
6001 * new pointers one bye one
6003 btrfs_release_path(root
, path
);
6004 ret
= btrfs_drop_extents(trans
, root
, inode
, key
.offset
,
6005 key
.offset
+ num_bytes
,
6006 key
.offset
, &alloc_hint
);
6009 for (i
= 0; i
< nr_extents
; i
++) {
6010 if (ext_offset
>= new_extents
[i
].num_bytes
) {
6011 ext_offset
-= new_extents
[i
].num_bytes
;
6014 extent_len
= min(new_extents
[i
].num_bytes
-
6015 ext_offset
, num_bytes
);
6017 ret
= btrfs_insert_empty_item(trans
, root
,
6022 leaf
= path
->nodes
[0];
6023 fi
= btrfs_item_ptr(leaf
, path
->slots
[0],
6024 struct btrfs_file_extent_item
);
6025 btrfs_set_file_extent_generation(leaf
, fi
,
6027 btrfs_set_file_extent_type(leaf
, fi
,
6028 BTRFS_FILE_EXTENT_REG
);
6029 btrfs_set_file_extent_disk_bytenr(leaf
, fi
,
6030 new_extents
[i
].disk_bytenr
);
6031 btrfs_set_file_extent_disk_num_bytes(leaf
, fi
,
6032 new_extents
[i
].disk_num_bytes
);
6033 btrfs_set_file_extent_ram_bytes(leaf
, fi
,
6034 new_extents
[i
].ram_bytes
);
6036 btrfs_set_file_extent_compression(leaf
, fi
,
6037 new_extents
[i
].compression
);
6038 btrfs_set_file_extent_encryption(leaf
, fi
,
6039 new_extents
[i
].encryption
);
6040 btrfs_set_file_extent_other_encoding(leaf
, fi
,
6041 new_extents
[i
].other_encoding
);
6043 btrfs_set_file_extent_num_bytes(leaf
, fi
,
6045 ext_offset
+= new_extents
[i
].offset
;
6046 btrfs_set_file_extent_offset(leaf
, fi
,
6048 btrfs_mark_buffer_dirty(leaf
);
6050 btrfs_drop_extent_cache(inode
, key
.offset
,
6051 key
.offset
+ extent_len
- 1, 0);
6053 ret
= btrfs_inc_extent_ref(trans
, root
,
6054 new_extents
[i
].disk_bytenr
,
6055 new_extents
[i
].disk_num_bytes
,
6057 root
->root_key
.objectid
,
6058 trans
->transid
, key
.objectid
);
6060 btrfs_release_path(root
, path
);
6062 inode_add_bytes(inode
, extent_len
);
6065 num_bytes
-= extent_len
;
6066 key
.offset
+= extent_len
;
6071 BUG_ON(i
>= nr_extents
);
6075 if (extent_locked
) {
6076 unlock_extent(&BTRFS_I(inode
)->io_tree
, lock_start
,
6077 lock_end
, GFP_NOFS
);
6081 if (ref_path
->owner_objectid
!= BTRFS_MULTIPLE_OBJECTIDS
&&
6082 key
.offset
>= search_end
)
6089 btrfs_release_path(root
, path
);
6091 mutex_unlock(&inode
->i_mutex
);
6092 if (extent_locked
) {
6093 unlock_extent(&BTRFS_I(inode
)->io_tree
, lock_start
,
6094 lock_end
, GFP_NOFS
);
6101 int btrfs_reloc_tree_cache_ref(struct btrfs_trans_handle
*trans
,
6102 struct btrfs_root
*root
,
6103 struct extent_buffer
*buf
, u64 orig_start
)
6108 BUG_ON(btrfs_header_generation(buf
) != trans
->transid
);
6109 BUG_ON(root
->root_key
.objectid
!= BTRFS_TREE_RELOC_OBJECTID
);
6111 level
= btrfs_header_level(buf
);
6113 struct btrfs_leaf_ref
*ref
;
6114 struct btrfs_leaf_ref
*orig_ref
;
6116 orig_ref
= btrfs_lookup_leaf_ref(root
, orig_start
);
6120 ref
= btrfs_alloc_leaf_ref(root
, orig_ref
->nritems
);
6122 btrfs_free_leaf_ref(root
, orig_ref
);
6126 ref
->nritems
= orig_ref
->nritems
;
6127 memcpy(ref
->extents
, orig_ref
->extents
,
6128 sizeof(ref
->extents
[0]) * ref
->nritems
);
6130 btrfs_free_leaf_ref(root
, orig_ref
);
6132 ref
->root_gen
= trans
->transid
;
6133 ref
->bytenr
= buf
->start
;
6134 ref
->owner
= btrfs_header_owner(buf
);
6135 ref
->generation
= btrfs_header_generation(buf
);
6137 ret
= btrfs_add_leaf_ref(root
, ref
, 0);
6139 btrfs_free_leaf_ref(root
, ref
);
6144 static noinline
int invalidate_extent_cache(struct btrfs_root
*root
,
6145 struct extent_buffer
*leaf
,
6146 struct btrfs_block_group_cache
*group
,
6147 struct btrfs_root
*target_root
)
6149 struct btrfs_key key
;
6150 struct inode
*inode
= NULL
;
6151 struct btrfs_file_extent_item
*fi
;
6153 u64 skip_objectid
= 0;
6157 nritems
= btrfs_header_nritems(leaf
);
6158 for (i
= 0; i
< nritems
; i
++) {
6159 btrfs_item_key_to_cpu(leaf
, &key
, i
);
6160 if (key
.objectid
== skip_objectid
||
6161 key
.type
!= BTRFS_EXTENT_DATA_KEY
)
6163 fi
= btrfs_item_ptr(leaf
, i
, struct btrfs_file_extent_item
);
6164 if (btrfs_file_extent_type(leaf
, fi
) ==
6165 BTRFS_FILE_EXTENT_INLINE
)
6167 if (btrfs_file_extent_disk_bytenr(leaf
, fi
) == 0)
6169 if (!inode
|| inode
->i_ino
!= key
.objectid
) {
6171 inode
= btrfs_ilookup(target_root
->fs_info
->sb
,
6172 key
.objectid
, target_root
, 1);
6175 skip_objectid
= key
.objectid
;
6178 num_bytes
= btrfs_file_extent_num_bytes(leaf
, fi
);
6180 lock_extent(&BTRFS_I(inode
)->io_tree
, key
.offset
,
6181 key
.offset
+ num_bytes
- 1, GFP_NOFS
);
6182 btrfs_drop_extent_cache(inode
, key
.offset
,
6183 key
.offset
+ num_bytes
- 1, 1);
6184 unlock_extent(&BTRFS_I(inode
)->io_tree
, key
.offset
,
6185 key
.offset
+ num_bytes
- 1, GFP_NOFS
);
6192 static noinline
int replace_extents_in_leaf(struct btrfs_trans_handle
*trans
,
6193 struct btrfs_root
*root
,
6194 struct extent_buffer
*leaf
,
6195 struct btrfs_block_group_cache
*group
,
6196 struct inode
*reloc_inode
)
6198 struct btrfs_key key
;
6199 struct btrfs_key extent_key
;
6200 struct btrfs_file_extent_item
*fi
;
6201 struct btrfs_leaf_ref
*ref
;
6202 struct disk_extent
*new_extent
;
6211 new_extent
= kmalloc(sizeof(*new_extent
), GFP_NOFS
);
6212 BUG_ON(!new_extent
);
6214 ref
= btrfs_lookup_leaf_ref(root
, leaf
->start
);
6218 nritems
= btrfs_header_nritems(leaf
);
6219 for (i
= 0; i
< nritems
; i
++) {
6220 btrfs_item_key_to_cpu(leaf
, &key
, i
);
6221 if (btrfs_key_type(&key
) != BTRFS_EXTENT_DATA_KEY
)
6223 fi
= btrfs_item_ptr(leaf
, i
, struct btrfs_file_extent_item
);
6224 if (btrfs_file_extent_type(leaf
, fi
) ==
6225 BTRFS_FILE_EXTENT_INLINE
)
6227 bytenr
= btrfs_file_extent_disk_bytenr(leaf
, fi
);
6228 num_bytes
= btrfs_file_extent_disk_num_bytes(leaf
, fi
);
6233 if (bytenr
>= group
->key
.objectid
+ group
->key
.offset
||
6234 bytenr
+ num_bytes
<= group
->key
.objectid
)
6237 extent_key
.objectid
= bytenr
;
6238 extent_key
.offset
= num_bytes
;
6239 extent_key
.type
= BTRFS_EXTENT_ITEM_KEY
;
6241 ret
= get_new_locations(reloc_inode
, &extent_key
,
6242 group
->key
.objectid
, 1,
6243 &new_extent
, &nr_extent
);
6248 BUG_ON(ref
->extents
[ext_index
].bytenr
!= bytenr
);
6249 BUG_ON(ref
->extents
[ext_index
].num_bytes
!= num_bytes
);
6250 ref
->extents
[ext_index
].bytenr
= new_extent
->disk_bytenr
;
6251 ref
->extents
[ext_index
].num_bytes
= new_extent
->disk_num_bytes
;
6253 btrfs_set_file_extent_disk_bytenr(leaf
, fi
,
6254 new_extent
->disk_bytenr
);
6255 btrfs_set_file_extent_disk_num_bytes(leaf
, fi
,
6256 new_extent
->disk_num_bytes
);
6257 btrfs_mark_buffer_dirty(leaf
);
6259 ret
= btrfs_inc_extent_ref(trans
, root
,
6260 new_extent
->disk_bytenr
,
6261 new_extent
->disk_num_bytes
,
6263 root
->root_key
.objectid
,
6264 trans
->transid
, key
.objectid
);
6267 ret
= btrfs_free_extent(trans
, root
,
6268 bytenr
, num_bytes
, leaf
->start
,
6269 btrfs_header_owner(leaf
),
6270 btrfs_header_generation(leaf
),
6276 BUG_ON(ext_index
+ 1 != ref
->nritems
);
6277 btrfs_free_leaf_ref(root
, ref
);
6281 int btrfs_free_reloc_root(struct btrfs_trans_handle
*trans
,
6282 struct btrfs_root
*root
)
6284 struct btrfs_root
*reloc_root
;
6287 if (root
->reloc_root
) {
6288 reloc_root
= root
->reloc_root
;
6289 root
->reloc_root
= NULL
;
6290 list_add(&reloc_root
->dead_list
,
6291 &root
->fs_info
->dead_reloc_roots
);
6293 btrfs_set_root_bytenr(&reloc_root
->root_item
,
6294 reloc_root
->node
->start
);
6295 btrfs_set_root_level(&root
->root_item
,
6296 btrfs_header_level(reloc_root
->node
));
6297 memset(&reloc_root
->root_item
.drop_progress
, 0,
6298 sizeof(struct btrfs_disk_key
));
6299 reloc_root
->root_item
.drop_level
= 0;
6301 ret
= btrfs_update_root(trans
, root
->fs_info
->tree_root
,
6302 &reloc_root
->root_key
,
6303 &reloc_root
->root_item
);
6309 int btrfs_drop_dead_reloc_roots(struct btrfs_root
*root
)
6311 struct btrfs_trans_handle
*trans
;
6312 struct btrfs_root
*reloc_root
;
6313 struct btrfs_root
*prev_root
= NULL
;
6314 struct list_head dead_roots
;
6318 INIT_LIST_HEAD(&dead_roots
);
6319 list_splice_init(&root
->fs_info
->dead_reloc_roots
, &dead_roots
);
6321 while (!list_empty(&dead_roots
)) {
6322 reloc_root
= list_entry(dead_roots
.prev
,
6323 struct btrfs_root
, dead_list
);
6324 list_del_init(&reloc_root
->dead_list
);
6326 BUG_ON(reloc_root
->commit_root
!= NULL
);
6328 trans
= btrfs_join_transaction(root
, 1);
6331 mutex_lock(&root
->fs_info
->drop_mutex
);
6332 ret
= btrfs_drop_snapshot(trans
, reloc_root
);
6335 mutex_unlock(&root
->fs_info
->drop_mutex
);
6337 nr
= trans
->blocks_used
;
6338 ret
= btrfs_end_transaction(trans
, root
);
6340 btrfs_btree_balance_dirty(root
, nr
);
6343 free_extent_buffer(reloc_root
->node
);
6345 ret
= btrfs_del_root(trans
, root
->fs_info
->tree_root
,
6346 &reloc_root
->root_key
);
6348 mutex_unlock(&root
->fs_info
->drop_mutex
);
6350 nr
= trans
->blocks_used
;
6351 ret
= btrfs_end_transaction(trans
, root
);
6353 btrfs_btree_balance_dirty(root
, nr
);
6356 prev_root
= reloc_root
;
6359 btrfs_remove_leaf_refs(prev_root
, (u64
)-1, 0);
6365 int btrfs_add_dead_reloc_root(struct btrfs_root
*root
)
6367 list_add(&root
->dead_list
, &root
->fs_info
->dead_reloc_roots
);
6371 int btrfs_cleanup_reloc_trees(struct btrfs_root
*root
)
6373 struct btrfs_root
*reloc_root
;
6374 struct btrfs_trans_handle
*trans
;
6375 struct btrfs_key location
;
6379 mutex_lock(&root
->fs_info
->tree_reloc_mutex
);
6380 ret
= btrfs_find_dead_roots(root
, BTRFS_TREE_RELOC_OBJECTID
, NULL
);
6382 found
= !list_empty(&root
->fs_info
->dead_reloc_roots
);
6383 mutex_unlock(&root
->fs_info
->tree_reloc_mutex
);
6386 trans
= btrfs_start_transaction(root
, 1);
6388 ret
= btrfs_commit_transaction(trans
, root
);
6392 location
.objectid
= BTRFS_DATA_RELOC_TREE_OBJECTID
;
6393 location
.offset
= (u64
)-1;
6394 location
.type
= BTRFS_ROOT_ITEM_KEY
;
6396 reloc_root
= btrfs_read_fs_root_no_name(root
->fs_info
, &location
);
6397 BUG_ON(!reloc_root
);
6398 btrfs_orphan_cleanup(reloc_root
);
6402 static noinline
int init_reloc_tree(struct btrfs_trans_handle
*trans
,
6403 struct btrfs_root
*root
)
6405 struct btrfs_root
*reloc_root
;
6406 struct extent_buffer
*eb
;
6407 struct btrfs_root_item
*root_item
;
6408 struct btrfs_key root_key
;
6411 BUG_ON(!root
->ref_cows
);
6412 if (root
->reloc_root
)
6415 root_item
= kmalloc(sizeof(*root_item
), GFP_NOFS
);
6418 ret
= btrfs_copy_root(trans
, root
, root
->commit_root
,
6419 &eb
, BTRFS_TREE_RELOC_OBJECTID
);
6422 root_key
.objectid
= BTRFS_TREE_RELOC_OBJECTID
;
6423 root_key
.offset
= root
->root_key
.objectid
;
6424 root_key
.type
= BTRFS_ROOT_ITEM_KEY
;
6426 memcpy(root_item
, &root
->root_item
, sizeof(root_item
));
6427 btrfs_set_root_refs(root_item
, 0);
6428 btrfs_set_root_bytenr(root_item
, eb
->start
);
6429 btrfs_set_root_level(root_item
, btrfs_header_level(eb
));
6430 btrfs_set_root_generation(root_item
, trans
->transid
);
6432 btrfs_tree_unlock(eb
);
6433 free_extent_buffer(eb
);
6435 ret
= btrfs_insert_root(trans
, root
->fs_info
->tree_root
,
6436 &root_key
, root_item
);
6440 reloc_root
= btrfs_read_fs_root_no_radix(root
->fs_info
->tree_root
,
6442 BUG_ON(!reloc_root
);
6443 reloc_root
->last_trans
= trans
->transid
;
6444 reloc_root
->commit_root
= NULL
;
6445 reloc_root
->ref_tree
= &root
->fs_info
->reloc_ref_tree
;
6447 root
->reloc_root
= reloc_root
;
6452 * Core function of space balance.
6454 * The idea is using reloc trees to relocate tree blocks in reference
6455 * counted roots. There is one reloc tree for each subvol, and all
6456 * reloc trees share same root key objectid. Reloc trees are snapshots
6457 * of the latest committed roots of subvols (root->commit_root).
6459 * To relocate a tree block referenced by a subvol, there are two steps.
6460 * COW the block through subvol's reloc tree, then update block pointer
6461 * in the subvol to point to the new block. Since all reloc trees share
6462 * same root key objectid, doing special handing for tree blocks owned
6463 * by them is easy. Once a tree block has been COWed in one reloc tree,
6464 * we can use the resulting new block directly when the same block is
6465 * required to COW again through other reloc trees. By this way, relocated
6466 * tree blocks are shared between reloc trees, so they are also shared
6469 static noinline
int relocate_one_path(struct btrfs_trans_handle
*trans
,
6470 struct btrfs_root
*root
,
6471 struct btrfs_path
*path
,
6472 struct btrfs_key
*first_key
,
6473 struct btrfs_ref_path
*ref_path
,
6474 struct btrfs_block_group_cache
*group
,
6475 struct inode
*reloc_inode
)
6477 struct btrfs_root
*reloc_root
;
6478 struct extent_buffer
*eb
= NULL
;
6479 struct btrfs_key
*keys
;
6483 int lowest_level
= 0;
6486 if (ref_path
->owner_objectid
< BTRFS_FIRST_FREE_OBJECTID
)
6487 lowest_level
= ref_path
->owner_objectid
;
6489 if (!root
->ref_cows
) {
6490 path
->lowest_level
= lowest_level
;
6491 ret
= btrfs_search_slot(trans
, root
, first_key
, path
, 0, 1);
6493 path
->lowest_level
= 0;
6494 btrfs_release_path(root
, path
);
6498 mutex_lock(&root
->fs_info
->tree_reloc_mutex
);
6499 ret
= init_reloc_tree(trans
, root
);
6501 reloc_root
= root
->reloc_root
;
6503 shared_level
= ref_path
->shared_level
;
6504 ref_path
->shared_level
= BTRFS_MAX_LEVEL
- 1;
6506 keys
= ref_path
->node_keys
;
6507 nodes
= ref_path
->new_nodes
;
6508 memset(&keys
[shared_level
+ 1], 0,
6509 sizeof(*keys
) * (BTRFS_MAX_LEVEL
- shared_level
- 1));
6510 memset(&nodes
[shared_level
+ 1], 0,
6511 sizeof(*nodes
) * (BTRFS_MAX_LEVEL
- shared_level
- 1));
6513 if (nodes
[lowest_level
] == 0) {
6514 path
->lowest_level
= lowest_level
;
6515 ret
= btrfs_search_slot(trans
, reloc_root
, first_key
, path
,
6518 for (level
= lowest_level
; level
< BTRFS_MAX_LEVEL
; level
++) {
6519 eb
= path
->nodes
[level
];
6520 if (!eb
|| eb
== reloc_root
->node
)
6522 nodes
[level
] = eb
->start
;
6524 btrfs_item_key_to_cpu(eb
, &keys
[level
], 0);
6526 btrfs_node_key_to_cpu(eb
, &keys
[level
], 0);
6529 ref_path
->owner_objectid
>= BTRFS_FIRST_FREE_OBJECTID
) {
6530 eb
= path
->nodes
[0];
6531 ret
= replace_extents_in_leaf(trans
, reloc_root
, eb
,
6532 group
, reloc_inode
);
6535 btrfs_release_path(reloc_root
, path
);
6537 ret
= btrfs_merge_path(trans
, reloc_root
, keys
, nodes
,
6543 * replace tree blocks in the fs tree with tree blocks in
6546 ret
= btrfs_merge_path(trans
, root
, keys
, nodes
, lowest_level
);
6549 if (ref_path
->owner_objectid
>= BTRFS_FIRST_FREE_OBJECTID
) {
6550 ret
= btrfs_search_slot(trans
, reloc_root
, first_key
, path
,
6553 extent_buffer_get(path
->nodes
[0]);
6554 eb
= path
->nodes
[0];
6555 btrfs_release_path(reloc_root
, path
);
6556 ret
= invalidate_extent_cache(reloc_root
, eb
, group
, root
);
6558 free_extent_buffer(eb
);
6561 mutex_unlock(&root
->fs_info
->tree_reloc_mutex
);
6562 path
->lowest_level
= 0;
6566 static noinline
int relocate_tree_block(struct btrfs_trans_handle
*trans
,
6567 struct btrfs_root
*root
,
6568 struct btrfs_path
*path
,
6569 struct btrfs_key
*first_key
,
6570 struct btrfs_ref_path
*ref_path
)
6574 ret
= relocate_one_path(trans
, root
, path
, first_key
,
6575 ref_path
, NULL
, NULL
);
6581 static noinline
int del_extent_zero(struct btrfs_trans_handle
*trans
,
6582 struct btrfs_root
*extent_root
,
6583 struct btrfs_path
*path
,
6584 struct btrfs_key
*extent_key
)
6588 ret
= btrfs_search_slot(trans
, extent_root
, extent_key
, path
, -1, 1);
6591 ret
= btrfs_del_item(trans
, extent_root
, path
);
6593 btrfs_release_path(extent_root
, path
);
6597 static noinline
struct btrfs_root
*read_ref_root(struct btrfs_fs_info
*fs_info
,
6598 struct btrfs_ref_path
*ref_path
)
6600 struct btrfs_key root_key
;
6602 root_key
.objectid
= ref_path
->root_objectid
;
6603 root_key
.type
= BTRFS_ROOT_ITEM_KEY
;
6604 if (is_cowonly_root(ref_path
->root_objectid
))
6605 root_key
.offset
= 0;
6607 root_key
.offset
= (u64
)-1;
6609 return btrfs_read_fs_root_no_name(fs_info
, &root_key
);
6612 static noinline
int relocate_one_extent(struct btrfs_root
*extent_root
,
6613 struct btrfs_path
*path
,
6614 struct btrfs_key
*extent_key
,
6615 struct btrfs_block_group_cache
*group
,
6616 struct inode
*reloc_inode
, int pass
)
6618 struct btrfs_trans_handle
*trans
;
6619 struct btrfs_root
*found_root
;
6620 struct btrfs_ref_path
*ref_path
= NULL
;
6621 struct disk_extent
*new_extents
= NULL
;
6626 struct btrfs_key first_key
;
6630 trans
= btrfs_start_transaction(extent_root
, 1);
6633 if (extent_key
->objectid
== 0) {
6634 ret
= del_extent_zero(trans
, extent_root
, path
, extent_key
);
6638 ref_path
= kmalloc(sizeof(*ref_path
), GFP_NOFS
);
6644 for (loops
= 0; ; loops
++) {
6646 ret
= btrfs_first_ref_path(trans
, extent_root
, ref_path
,
6647 extent_key
->objectid
);
6649 ret
= btrfs_next_ref_path(trans
, extent_root
, ref_path
);
6656 if (ref_path
->root_objectid
== BTRFS_TREE_LOG_OBJECTID
||
6657 ref_path
->root_objectid
== BTRFS_TREE_RELOC_OBJECTID
)
6660 found_root
= read_ref_root(extent_root
->fs_info
, ref_path
);
6661 BUG_ON(!found_root
);
6663 * for reference counted tree, only process reference paths
6664 * rooted at the latest committed root.
6666 if (found_root
->ref_cows
&&
6667 ref_path
->root_generation
!= found_root
->root_key
.offset
)
6670 if (ref_path
->owner_objectid
>= BTRFS_FIRST_FREE_OBJECTID
) {
6673 * copy data extents to new locations
6675 u64 group_start
= group
->key
.objectid
;
6676 ret
= relocate_data_extent(reloc_inode
,
6685 level
= ref_path
->owner_objectid
;
6688 if (prev_block
!= ref_path
->nodes
[level
]) {
6689 struct extent_buffer
*eb
;
6690 u64 block_start
= ref_path
->nodes
[level
];
6691 u64 block_size
= btrfs_level_size(found_root
, level
);
6693 eb
= read_tree_block(found_root
, block_start
,
6695 btrfs_tree_lock(eb
);
6696 BUG_ON(level
!= btrfs_header_level(eb
));
6699 btrfs_item_key_to_cpu(eb
, &first_key
, 0);
6701 btrfs_node_key_to_cpu(eb
, &first_key
, 0);
6703 btrfs_tree_unlock(eb
);
6704 free_extent_buffer(eb
);
6705 prev_block
= block_start
;
6708 mutex_lock(&extent_root
->fs_info
->trans_mutex
);
6709 btrfs_record_root_in_trans(found_root
);
6710 mutex_unlock(&extent_root
->fs_info
->trans_mutex
);
6711 if (ref_path
->owner_objectid
>= BTRFS_FIRST_FREE_OBJECTID
) {
6713 * try to update data extent references while
6714 * keeping metadata shared between snapshots.
6717 ret
= relocate_one_path(trans
, found_root
,
6718 path
, &first_key
, ref_path
,
6719 group
, reloc_inode
);
6725 * use fallback method to process the remaining
6729 u64 group_start
= group
->key
.objectid
;
6730 new_extents
= kmalloc(sizeof(*new_extents
),
6733 ret
= get_new_locations(reloc_inode
,
6741 ret
= replace_one_extent(trans
, found_root
,
6743 &first_key
, ref_path
,
6744 new_extents
, nr_extents
);
6746 ret
= relocate_tree_block(trans
, found_root
, path
,
6747 &first_key
, ref_path
);
6754 btrfs_end_transaction(trans
, extent_root
);
6761 static u64
update_block_group_flags(struct btrfs_root
*root
, u64 flags
)
6764 u64 stripped
= BTRFS_BLOCK_GROUP_RAID0
|
6765 BTRFS_BLOCK_GROUP_RAID1
| BTRFS_BLOCK_GROUP_RAID10
;
6767 num_devices
= root
->fs_info
->fs_devices
->rw_devices
;
6768 if (num_devices
== 1) {
6769 stripped
|= BTRFS_BLOCK_GROUP_DUP
;
6770 stripped
= flags
& ~stripped
;
6772 /* turn raid0 into single device chunks */
6773 if (flags
& BTRFS_BLOCK_GROUP_RAID0
)
6776 /* turn mirroring into duplication */
6777 if (flags
& (BTRFS_BLOCK_GROUP_RAID1
|
6778 BTRFS_BLOCK_GROUP_RAID10
))
6779 return stripped
| BTRFS_BLOCK_GROUP_DUP
;
6782 /* they already had raid on here, just return */
6783 if (flags
& stripped
)
6786 stripped
|= BTRFS_BLOCK_GROUP_DUP
;
6787 stripped
= flags
& ~stripped
;
6789 /* switch duplicated blocks with raid1 */
6790 if (flags
& BTRFS_BLOCK_GROUP_DUP
)
6791 return stripped
| BTRFS_BLOCK_GROUP_RAID1
;
6793 /* turn single device chunks into raid0 */
6794 return stripped
| BTRFS_BLOCK_GROUP_RAID0
;
6799 static int __alloc_chunk_for_shrink(struct btrfs_root
*root
,
6800 struct btrfs_block_group_cache
*shrink_block_group
,
6803 struct btrfs_trans_handle
*trans
;
6804 u64 new_alloc_flags
;
6807 spin_lock(&shrink_block_group
->lock
);
6808 if (btrfs_block_group_used(&shrink_block_group
->item
) +
6809 shrink_block_group
->reserved
> 0) {
6810 spin_unlock(&shrink_block_group
->lock
);
6812 trans
= btrfs_start_transaction(root
, 1);
6813 spin_lock(&shrink_block_group
->lock
);
6815 new_alloc_flags
= update_block_group_flags(root
,
6816 shrink_block_group
->flags
);
6817 if (new_alloc_flags
!= shrink_block_group
->flags
) {
6819 btrfs_block_group_used(&shrink_block_group
->item
);
6821 calc
= shrink_block_group
->key
.offset
;
6823 spin_unlock(&shrink_block_group
->lock
);
6825 do_chunk_alloc(trans
, root
->fs_info
->extent_root
,
6826 calc
+ 2 * 1024 * 1024, new_alloc_flags
, force
);
6828 btrfs_end_transaction(trans
, root
);
6830 spin_unlock(&shrink_block_group
->lock
);
6835 int btrfs_prepare_block_group_relocation(struct btrfs_root
*root
,
6836 struct btrfs_block_group_cache
*group
)
6839 __alloc_chunk_for_shrink(root
, group
, 1);
6840 set_block_group_readonly(group
);
6845 static int __insert_orphan_inode(struct btrfs_trans_handle
*trans
,
6846 struct btrfs_root
*root
,
6847 u64 objectid
, u64 size
)
6849 struct btrfs_path
*path
;
6850 struct btrfs_inode_item
*item
;
6851 struct extent_buffer
*leaf
;
6854 path
= btrfs_alloc_path();
6858 path
->leave_spinning
= 1;
6859 ret
= btrfs_insert_empty_inode(trans
, root
, path
, objectid
);
6863 leaf
= path
->nodes
[0];
6864 item
= btrfs_item_ptr(leaf
, path
->slots
[0], struct btrfs_inode_item
);
6865 memset_extent_buffer(leaf
, 0, (unsigned long)item
, sizeof(*item
));
6866 btrfs_set_inode_generation(leaf
, item
, 1);
6867 btrfs_set_inode_size(leaf
, item
, size
);
6868 btrfs_set_inode_mode(leaf
, item
, S_IFREG
| 0600);
6869 btrfs_set_inode_flags(leaf
, item
, BTRFS_INODE_NOCOMPRESS
);
6870 btrfs_mark_buffer_dirty(leaf
);
6871 btrfs_release_path(root
, path
);
6873 btrfs_free_path(path
);
6877 static noinline
struct inode
*create_reloc_inode(struct btrfs_fs_info
*fs_info
,
6878 struct btrfs_block_group_cache
*group
)
6880 struct inode
*inode
= NULL
;
6881 struct btrfs_trans_handle
*trans
;
6882 struct btrfs_root
*root
;
6883 struct btrfs_key root_key
;
6884 u64 objectid
= BTRFS_FIRST_FREE_OBJECTID
;
6887 root_key
.objectid
= BTRFS_DATA_RELOC_TREE_OBJECTID
;
6888 root_key
.type
= BTRFS_ROOT_ITEM_KEY
;
6889 root_key
.offset
= (u64
)-1;
6890 root
= btrfs_read_fs_root_no_name(fs_info
, &root_key
);
6892 return ERR_CAST(root
);
6894 trans
= btrfs_start_transaction(root
, 1);
6897 err
= btrfs_find_free_objectid(trans
, root
, objectid
, &objectid
);
6901 err
= __insert_orphan_inode(trans
, root
, objectid
, group
->key
.offset
);
6904 err
= btrfs_insert_file_extent(trans
, root
, objectid
, 0, 0, 0,
6905 group
->key
.offset
, 0, group
->key
.offset
,
6909 inode
= btrfs_iget_locked(root
->fs_info
->sb
, objectid
, root
);
6910 if (inode
->i_state
& I_NEW
) {
6911 BTRFS_I(inode
)->root
= root
;
6912 BTRFS_I(inode
)->location
.objectid
= objectid
;
6913 BTRFS_I(inode
)->location
.type
= BTRFS_INODE_ITEM_KEY
;
6914 BTRFS_I(inode
)->location
.offset
= 0;
6915 btrfs_read_locked_inode(inode
);
6916 unlock_new_inode(inode
);
6917 BUG_ON(is_bad_inode(inode
));
6921 BTRFS_I(inode
)->index_cnt
= group
->key
.objectid
;
6923 err
= btrfs_orphan_add(trans
, inode
);
6925 btrfs_end_transaction(trans
, root
);
6929 inode
= ERR_PTR(err
);
6934 int btrfs_reloc_clone_csums(struct inode
*inode
, u64 file_pos
, u64 len
)
6937 struct btrfs_ordered_sum
*sums
;
6938 struct btrfs_sector_sum
*sector_sum
;
6939 struct btrfs_ordered_extent
*ordered
;
6940 struct btrfs_root
*root
= BTRFS_I(inode
)->root
;
6941 struct list_head list
;
6946 INIT_LIST_HEAD(&list
);
6948 ordered
= btrfs_lookup_ordered_extent(inode
, file_pos
);
6949 BUG_ON(ordered
->file_offset
!= file_pos
|| ordered
->len
!= len
);
6951 disk_bytenr
= file_pos
+ BTRFS_I(inode
)->index_cnt
;
6952 ret
= btrfs_lookup_csums_range(root
->fs_info
->csum_root
, disk_bytenr
,
6953 disk_bytenr
+ len
- 1, &list
);
6955 while (!list_empty(&list
)) {
6956 sums
= list_entry(list
.next
, struct btrfs_ordered_sum
, list
);
6957 list_del_init(&sums
->list
);
6959 sector_sum
= sums
->sums
;
6960 sums
->bytenr
= ordered
->start
;
6963 while (offset
< sums
->len
) {
6964 sector_sum
->bytenr
+= ordered
->start
- disk_bytenr
;
6966 offset
+= root
->sectorsize
;
6969 btrfs_add_ordered_sum(inode
, ordered
, sums
);
6971 btrfs_put_ordered_extent(ordered
);
6975 int btrfs_relocate_block_group(struct btrfs_root
*root
, u64 group_start
)
6977 struct btrfs_trans_handle
*trans
;
6978 struct btrfs_path
*path
;
6979 struct btrfs_fs_info
*info
= root
->fs_info
;
6980 struct extent_buffer
*leaf
;
6981 struct inode
*reloc_inode
;
6982 struct btrfs_block_group_cache
*block_group
;
6983 struct btrfs_key key
;
6992 root
= root
->fs_info
->extent_root
;
6994 block_group
= btrfs_lookup_block_group(info
, group_start
);
6995 BUG_ON(!block_group
);
6997 printk(KERN_INFO
"btrfs relocating block group %llu flags %llu\n",
6998 (unsigned long long)block_group
->key
.objectid
,
6999 (unsigned long long)block_group
->flags
);
7001 path
= btrfs_alloc_path();
7004 reloc_inode
= create_reloc_inode(info
, block_group
);
7005 BUG_ON(IS_ERR(reloc_inode
));
7007 __alloc_chunk_for_shrink(root
, block_group
, 1);
7008 set_block_group_readonly(block_group
);
7010 btrfs_start_delalloc_inodes(info
->tree_root
);
7011 btrfs_wait_ordered_extents(info
->tree_root
, 0);
7016 key
.objectid
= block_group
->key
.objectid
;
7019 cur_byte
= key
.objectid
;
7021 trans
= btrfs_start_transaction(info
->tree_root
, 1);
7022 btrfs_commit_transaction(trans
, info
->tree_root
);
7024 mutex_lock(&root
->fs_info
->cleaner_mutex
);
7025 btrfs_clean_old_snapshots(info
->tree_root
);
7026 btrfs_remove_leaf_refs(info
->tree_root
, (u64
)-1, 1);
7027 mutex_unlock(&root
->fs_info
->cleaner_mutex
);
7029 trans
= btrfs_start_transaction(info
->tree_root
, 1);
7030 btrfs_commit_transaction(trans
, info
->tree_root
);
7033 ret
= btrfs_search_slot(NULL
, root
, &key
, path
, 0, 0);
7037 leaf
= path
->nodes
[0];
7038 nritems
= btrfs_header_nritems(leaf
);
7039 if (path
->slots
[0] >= nritems
) {
7040 ret
= btrfs_next_leaf(root
, path
);
7047 leaf
= path
->nodes
[0];
7048 nritems
= btrfs_header_nritems(leaf
);
7051 btrfs_item_key_to_cpu(leaf
, &key
, path
->slots
[0]);
7053 if (key
.objectid
>= block_group
->key
.objectid
+
7054 block_group
->key
.offset
)
7057 if (progress
&& need_resched()) {
7058 btrfs_release_path(root
, path
);
7065 if (btrfs_key_type(&key
) != BTRFS_EXTENT_ITEM_KEY
||
7066 key
.objectid
+ key
.offset
<= cur_byte
) {
7072 cur_byte
= key
.objectid
+ key
.offset
;
7073 btrfs_release_path(root
, path
);
7075 __alloc_chunk_for_shrink(root
, block_group
, 0);
7076 ret
= relocate_one_extent(root
, path
, &key
, block_group
,
7082 key
.objectid
= cur_byte
;
7087 btrfs_release_path(root
, path
);
7090 btrfs_wait_ordered_range(reloc_inode
, 0, (u64
)-1);
7091 invalidate_mapping_pages(reloc_inode
->i_mapping
, 0, -1);
7094 if (total_found
> 0) {
7095 printk(KERN_INFO
"btrfs found %llu extents in pass %d\n",
7096 (unsigned long long)total_found
, pass
);
7098 if (total_found
== skipped
&& pass
> 2) {
7100 reloc_inode
= create_reloc_inode(info
, block_group
);
7106 /* delete reloc_inode */
7109 /* unpin extents in this range */
7110 trans
= btrfs_start_transaction(info
->tree_root
, 1);
7111 btrfs_commit_transaction(trans
, info
->tree_root
);
7113 spin_lock(&block_group
->lock
);
7114 WARN_ON(block_group
->pinned
> 0);
7115 WARN_ON(block_group
->reserved
> 0);
7116 WARN_ON(btrfs_block_group_used(&block_group
->item
) > 0);
7117 spin_unlock(&block_group
->lock
);
7118 btrfs_put_block_group(block_group
);
7121 btrfs_free_path(path
);
7126 static int find_first_block_group(struct btrfs_root
*root
,
7127 struct btrfs_path
*path
, struct btrfs_key
*key
)
7130 struct btrfs_key found_key
;
7131 struct extent_buffer
*leaf
;
7134 ret
= btrfs_search_slot(NULL
, root
, key
, path
, 0, 0);
7139 slot
= path
->slots
[0];
7140 leaf
= path
->nodes
[0];
7141 if (slot
>= btrfs_header_nritems(leaf
)) {
7142 ret
= btrfs_next_leaf(root
, path
);
7149 btrfs_item_key_to_cpu(leaf
, &found_key
, slot
);
7151 if (found_key
.objectid
>= key
->objectid
&&
7152 found_key
.type
== BTRFS_BLOCK_GROUP_ITEM_KEY
) {
7163 int btrfs_free_block_groups(struct btrfs_fs_info
*info
)
7165 struct btrfs_block_group_cache
*block_group
;
7166 struct btrfs_space_info
*space_info
;
7169 spin_lock(&info
->block_group_cache_lock
);
7170 while ((n
= rb_last(&info
->block_group_cache_tree
)) != NULL
) {
7171 block_group
= rb_entry(n
, struct btrfs_block_group_cache
,
7173 rb_erase(&block_group
->cache_node
,
7174 &info
->block_group_cache_tree
);
7175 spin_unlock(&info
->block_group_cache_lock
);
7177 down_write(&block_group
->space_info
->groups_sem
);
7178 list_del(&block_group
->list
);
7179 up_write(&block_group
->space_info
->groups_sem
);
7181 if (block_group
->cached
== BTRFS_CACHE_STARTED
)
7182 wait_event(block_group
->caching_q
,
7183 block_group_cache_done(block_group
));
7185 btrfs_remove_free_space_cache(block_group
);
7187 WARN_ON(atomic_read(&block_group
->count
) != 1);
7190 spin_lock(&info
->block_group_cache_lock
);
7192 spin_unlock(&info
->block_group_cache_lock
);
7194 /* now that all the block groups are freed, go through and
7195 * free all the space_info structs. This is only called during
7196 * the final stages of unmount, and so we know nobody is
7197 * using them. We call synchronize_rcu() once before we start,
7198 * just to be on the safe side.
7202 while(!list_empty(&info
->space_info
)) {
7203 space_info
= list_entry(info
->space_info
.next
,
7204 struct btrfs_space_info
,
7207 list_del(&space_info
->list
);
7213 int btrfs_read_block_groups(struct btrfs_root
*root
)
7215 struct btrfs_path
*path
;
7217 struct btrfs_block_group_cache
*cache
;
7218 struct btrfs_fs_info
*info
= root
->fs_info
;
7219 struct btrfs_space_info
*space_info
;
7220 struct btrfs_key key
;
7221 struct btrfs_key found_key
;
7222 struct extent_buffer
*leaf
;
7224 root
= info
->extent_root
;
7227 btrfs_set_key_type(&key
, BTRFS_BLOCK_GROUP_ITEM_KEY
);
7228 path
= btrfs_alloc_path();
7233 ret
= find_first_block_group(root
, path
, &key
);
7241 leaf
= path
->nodes
[0];
7242 btrfs_item_key_to_cpu(leaf
, &found_key
, path
->slots
[0]);
7243 cache
= kzalloc(sizeof(*cache
), GFP_NOFS
);
7249 atomic_set(&cache
->count
, 1);
7250 spin_lock_init(&cache
->lock
);
7251 spin_lock_init(&cache
->tree_lock
);
7252 cache
->fs_info
= info
;
7253 init_waitqueue_head(&cache
->caching_q
);
7254 INIT_LIST_HEAD(&cache
->list
);
7255 INIT_LIST_HEAD(&cache
->cluster_list
);
7258 * we only want to have 32k of ram per block group for keeping
7259 * track of free space, and if we pass 1/2 of that we want to
7260 * start converting things over to using bitmaps
7262 cache
->extents_thresh
= ((1024 * 32) / 2) /
7263 sizeof(struct btrfs_free_space
);
7265 read_extent_buffer(leaf
, &cache
->item
,
7266 btrfs_item_ptr_offset(leaf
, path
->slots
[0]),
7267 sizeof(cache
->item
));
7268 memcpy(&cache
->key
, &found_key
, sizeof(found_key
));
7270 key
.objectid
= found_key
.objectid
+ found_key
.offset
;
7271 btrfs_release_path(root
, path
);
7272 cache
->flags
= btrfs_block_group_flags(&cache
->item
);
7273 cache
->sectorsize
= root
->sectorsize
;
7275 remove_sb_from_cache(root
, cache
);
7278 * check for two cases, either we are full, and therefore
7279 * don't need to bother with the caching work since we won't
7280 * find any space, or we are empty, and we can just add all
7281 * the space in and be done with it. This saves us _alot_ of
7282 * time, particularly in the full case.
7284 if (found_key
.offset
== btrfs_block_group_used(&cache
->item
)) {
7285 cache
->cached
= BTRFS_CACHE_FINISHED
;
7286 } else if (btrfs_block_group_used(&cache
->item
) == 0) {
7287 cache
->cached
= BTRFS_CACHE_FINISHED
;
7288 add_new_free_space(cache
, root
->fs_info
,
7290 found_key
.objectid
+
7294 ret
= update_space_info(info
, cache
->flags
, found_key
.offset
,
7295 btrfs_block_group_used(&cache
->item
),
7298 cache
->space_info
= space_info
;
7299 down_write(&space_info
->groups_sem
);
7300 list_add_tail(&cache
->list
, &space_info
->block_groups
);
7301 up_write(&space_info
->groups_sem
);
7303 ret
= btrfs_add_block_group_cache(root
->fs_info
, cache
);
7306 set_avail_alloc_bits(root
->fs_info
, cache
->flags
);
7307 if (btrfs_chunk_readonly(root
, cache
->key
.objectid
))
7308 set_block_group_readonly(cache
);
7312 btrfs_free_path(path
);
7316 int btrfs_make_block_group(struct btrfs_trans_handle
*trans
,
7317 struct btrfs_root
*root
, u64 bytes_used
,
7318 u64 type
, u64 chunk_objectid
, u64 chunk_offset
,
7322 struct btrfs_root
*extent_root
;
7323 struct btrfs_block_group_cache
*cache
;
7325 extent_root
= root
->fs_info
->extent_root
;
7327 root
->fs_info
->last_trans_log_full_commit
= trans
->transid
;
7329 cache
= kzalloc(sizeof(*cache
), GFP_NOFS
);
7333 cache
->key
.objectid
= chunk_offset
;
7334 cache
->key
.offset
= size
;
7335 cache
->key
.type
= BTRFS_BLOCK_GROUP_ITEM_KEY
;
7336 cache
->sectorsize
= root
->sectorsize
;
7339 * we only want to have 32k of ram per block group for keeping track
7340 * of free space, and if we pass 1/2 of that we want to start
7341 * converting things over to using bitmaps
7343 cache
->extents_thresh
= ((1024 * 32) / 2) /
7344 sizeof(struct btrfs_free_space
);
7345 atomic_set(&cache
->count
, 1);
7346 spin_lock_init(&cache
->lock
);
7347 spin_lock_init(&cache
->tree_lock
);
7348 init_waitqueue_head(&cache
->caching_q
);
7349 INIT_LIST_HEAD(&cache
->list
);
7350 INIT_LIST_HEAD(&cache
->cluster_list
);
7352 btrfs_set_block_group_used(&cache
->item
, bytes_used
);
7353 btrfs_set_block_group_chunk_objectid(&cache
->item
, chunk_objectid
);
7354 cache
->flags
= type
;
7355 btrfs_set_block_group_flags(&cache
->item
, type
);
7357 cache
->cached
= BTRFS_CACHE_FINISHED
;
7358 remove_sb_from_cache(root
, cache
);
7360 add_new_free_space(cache
, root
->fs_info
, chunk_offset
,
7361 chunk_offset
+ size
);
7363 ret
= update_space_info(root
->fs_info
, cache
->flags
, size
, bytes_used
,
7364 &cache
->space_info
);
7366 down_write(&cache
->space_info
->groups_sem
);
7367 list_add_tail(&cache
->list
, &cache
->space_info
->block_groups
);
7368 up_write(&cache
->space_info
->groups_sem
);
7370 ret
= btrfs_add_block_group_cache(root
->fs_info
, cache
);
7373 ret
= btrfs_insert_item(trans
, extent_root
, &cache
->key
, &cache
->item
,
7374 sizeof(cache
->item
));
7377 set_avail_alloc_bits(extent_root
->fs_info
, type
);
7382 int btrfs_remove_block_group(struct btrfs_trans_handle
*trans
,
7383 struct btrfs_root
*root
, u64 group_start
)
7385 struct btrfs_path
*path
;
7386 struct btrfs_block_group_cache
*block_group
;
7387 struct btrfs_free_cluster
*cluster
;
7388 struct btrfs_key key
;
7391 root
= root
->fs_info
->extent_root
;
7393 block_group
= btrfs_lookup_block_group(root
->fs_info
, group_start
);
7394 BUG_ON(!block_group
);
7395 BUG_ON(!block_group
->ro
);
7397 memcpy(&key
, &block_group
->key
, sizeof(key
));
7399 /* make sure this block group isn't part of an allocation cluster */
7400 cluster
= &root
->fs_info
->data_alloc_cluster
;
7401 spin_lock(&cluster
->refill_lock
);
7402 btrfs_return_cluster_to_free_space(block_group
, cluster
);
7403 spin_unlock(&cluster
->refill_lock
);
7406 * make sure this block group isn't part of a metadata
7407 * allocation cluster
7409 cluster
= &root
->fs_info
->meta_alloc_cluster
;
7410 spin_lock(&cluster
->refill_lock
);
7411 btrfs_return_cluster_to_free_space(block_group
, cluster
);
7412 spin_unlock(&cluster
->refill_lock
);
7414 path
= btrfs_alloc_path();
7417 spin_lock(&root
->fs_info
->block_group_cache_lock
);
7418 rb_erase(&block_group
->cache_node
,
7419 &root
->fs_info
->block_group_cache_tree
);
7420 spin_unlock(&root
->fs_info
->block_group_cache_lock
);
7422 down_write(&block_group
->space_info
->groups_sem
);
7424 * we must use list_del_init so people can check to see if they
7425 * are still on the list after taking the semaphore
7427 list_del_init(&block_group
->list
);
7428 up_write(&block_group
->space_info
->groups_sem
);
7430 if (block_group
->cached
== BTRFS_CACHE_STARTED
)
7431 wait_event(block_group
->caching_q
,
7432 block_group_cache_done(block_group
));
7434 btrfs_remove_free_space_cache(block_group
);
7436 spin_lock(&block_group
->space_info
->lock
);
7437 block_group
->space_info
->total_bytes
-= block_group
->key
.offset
;
7438 block_group
->space_info
->bytes_readonly
-= block_group
->key
.offset
;
7439 spin_unlock(&block_group
->space_info
->lock
);
7441 btrfs_clear_space_info_full(root
->fs_info
);
7443 btrfs_put_block_group(block_group
);
7444 btrfs_put_block_group(block_group
);
7446 ret
= btrfs_search_slot(trans
, root
, &key
, path
, -1, 1);
7452 ret
= btrfs_del_item(trans
, root
, path
);
7454 btrfs_free_path(path
);