2 * Copyright (C) 2007 Oracle. All rights reserved.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public
6 * License v2 as published by the Free Software Foundation.
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
13 * You should have received a copy of the GNU General Public
14 * License along with this program; if not, write to the
15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16 * Boston, MA 021110-1307, USA.
18 #include <linux/sched.h>
19 #include <linux/pagemap.h>
20 #include <linux/writeback.h>
21 #include <linux/blkdev.h>
22 #include <linux/sort.h>
23 #include <linux/rcupdate.h>
29 #include "print-tree.h"
30 #include "transaction.h"
33 #include "ref-cache.h"
34 #include "free-space-cache.h"
36 #define PENDING_EXTENT_INSERT 0
37 #define PENDING_EXTENT_DELETE 1
38 #define PENDING_BACKREF_UPDATE 2
40 struct pending_extent_op
{
49 struct list_head list
;
53 static int __btrfs_alloc_reserved_extent(struct btrfs_trans_handle
*trans
,
54 struct btrfs_root
*root
, u64 parent
,
55 u64 root_objectid
, u64 ref_generation
,
56 u64 owner
, struct btrfs_key
*ins
,
58 static int update_reserved_extents(struct btrfs_root
*root
,
59 u64 bytenr
, u64 num
, int reserve
);
60 static int update_block_group(struct btrfs_trans_handle
*trans
,
61 struct btrfs_root
*root
,
62 u64 bytenr
, u64 num_bytes
, int alloc
,
64 static noinline
int __btrfs_free_extent(struct btrfs_trans_handle
*trans
,
65 struct btrfs_root
*root
,
66 u64 bytenr
, u64 num_bytes
, u64 parent
,
67 u64 root_objectid
, u64 ref_generation
,
68 u64 owner_objectid
, int pin
,
71 static int do_chunk_alloc(struct btrfs_trans_handle
*trans
,
72 struct btrfs_root
*extent_root
, u64 alloc_bytes
,
73 u64 flags
, int force
);
75 static int block_group_bits(struct btrfs_block_group_cache
*cache
, u64 bits
)
77 return (cache
->flags
& bits
) == bits
;
81 * this adds the block group to the fs_info rb tree for the block group
84 static int btrfs_add_block_group_cache(struct btrfs_fs_info
*info
,
85 struct btrfs_block_group_cache
*block_group
)
88 struct rb_node
*parent
= NULL
;
89 struct btrfs_block_group_cache
*cache
;
91 spin_lock(&info
->block_group_cache_lock
);
92 p
= &info
->block_group_cache_tree
.rb_node
;
96 cache
= rb_entry(parent
, struct btrfs_block_group_cache
,
98 if (block_group
->key
.objectid
< cache
->key
.objectid
) {
100 } else if (block_group
->key
.objectid
> cache
->key
.objectid
) {
103 spin_unlock(&info
->block_group_cache_lock
);
108 rb_link_node(&block_group
->cache_node
, parent
, p
);
109 rb_insert_color(&block_group
->cache_node
,
110 &info
->block_group_cache_tree
);
111 spin_unlock(&info
->block_group_cache_lock
);
117 * This will return the block group at or after bytenr if contains is 0, else
118 * it will return the block group that contains the bytenr
120 static struct btrfs_block_group_cache
*
121 block_group_cache_tree_search(struct btrfs_fs_info
*info
, u64 bytenr
,
124 struct btrfs_block_group_cache
*cache
, *ret
= NULL
;
128 spin_lock(&info
->block_group_cache_lock
);
129 n
= info
->block_group_cache_tree
.rb_node
;
132 cache
= rb_entry(n
, struct btrfs_block_group_cache
,
134 end
= cache
->key
.objectid
+ cache
->key
.offset
- 1;
135 start
= cache
->key
.objectid
;
137 if (bytenr
< start
) {
138 if (!contains
&& (!ret
|| start
< ret
->key
.objectid
))
141 } else if (bytenr
> start
) {
142 if (contains
&& bytenr
<= end
) {
153 atomic_inc(&ret
->count
);
154 spin_unlock(&info
->block_group_cache_lock
);
160 * this is only called by cache_block_group, since we could have freed extents
161 * we need to check the pinned_extents for any extents that can't be used yet
162 * since their free space will be released as soon as the transaction commits.
164 static int add_new_free_space(struct btrfs_block_group_cache
*block_group
,
165 struct btrfs_fs_info
*info
, u64 start
, u64 end
)
167 u64 extent_start
, extent_end
, size
;
170 while (start
< end
) {
171 ret
= find_first_extent_bit(&info
->pinned_extents
, start
,
172 &extent_start
, &extent_end
,
177 if (extent_start
== start
) {
178 start
= extent_end
+ 1;
179 } else if (extent_start
> start
&& extent_start
< end
) {
180 size
= extent_start
- start
;
181 ret
= btrfs_add_free_space(block_group
, start
,
184 start
= extent_end
+ 1;
192 ret
= btrfs_add_free_space(block_group
, start
, size
);
199 static int remove_sb_from_cache(struct btrfs_root
*root
,
200 struct btrfs_block_group_cache
*cache
)
207 for (i
= 0; i
< BTRFS_SUPER_MIRROR_MAX
; i
++) {
208 bytenr
= btrfs_sb_offset(i
);
209 ret
= btrfs_rmap_block(&root
->fs_info
->mapping_tree
,
210 cache
->key
.objectid
, bytenr
, 0,
211 &logical
, &nr
, &stripe_len
);
214 btrfs_remove_free_space(cache
, logical
[nr
],
222 static int cache_block_group(struct btrfs_root
*root
,
223 struct btrfs_block_group_cache
*block_group
)
225 struct btrfs_path
*path
;
227 struct btrfs_key key
;
228 struct extent_buffer
*leaf
;
235 root
= root
->fs_info
->extent_root
;
237 if (block_group
->cached
)
240 path
= btrfs_alloc_path();
246 * we get into deadlocks with paths held by callers of this function.
247 * since the alloc_mutex is protecting things right now, just
248 * skip the locking here
250 path
->skip_locking
= 1;
251 last
= max_t(u64
, block_group
->key
.objectid
, BTRFS_SUPER_INFO_OFFSET
);
254 btrfs_set_key_type(&key
, BTRFS_EXTENT_ITEM_KEY
);
255 ret
= btrfs_search_slot(NULL
, root
, &key
, path
, 0, 0);
260 leaf
= path
->nodes
[0];
261 slot
= path
->slots
[0];
262 if (slot
>= btrfs_header_nritems(leaf
)) {
263 ret
= btrfs_next_leaf(root
, path
);
271 btrfs_item_key_to_cpu(leaf
, &key
, slot
);
272 if (key
.objectid
< block_group
->key
.objectid
)
275 if (key
.objectid
>= block_group
->key
.objectid
+
276 block_group
->key
.offset
)
279 if (btrfs_key_type(&key
) == BTRFS_EXTENT_ITEM_KEY
) {
280 add_new_free_space(block_group
, root
->fs_info
, last
,
283 last
= key
.objectid
+ key
.offset
;
289 add_new_free_space(block_group
, root
->fs_info
, last
,
290 block_group
->key
.objectid
+
291 block_group
->key
.offset
);
293 block_group
->cached
= 1;
294 remove_sb_from_cache(root
, block_group
);
297 btrfs_free_path(path
);
302 * return the block group that starts at or after bytenr
304 static struct btrfs_block_group_cache
*
305 btrfs_lookup_first_block_group(struct btrfs_fs_info
*info
, u64 bytenr
)
307 struct btrfs_block_group_cache
*cache
;
309 cache
= block_group_cache_tree_search(info
, bytenr
, 0);
315 * return the block group that contains the given bytenr
317 struct btrfs_block_group_cache
*btrfs_lookup_block_group(
318 struct btrfs_fs_info
*info
,
321 struct btrfs_block_group_cache
*cache
;
323 cache
= block_group_cache_tree_search(info
, bytenr
, 1);
328 void btrfs_put_block_group(struct btrfs_block_group_cache
*cache
)
330 if (atomic_dec_and_test(&cache
->count
))
334 static struct btrfs_space_info
*__find_space_info(struct btrfs_fs_info
*info
,
337 struct list_head
*head
= &info
->space_info
;
338 struct btrfs_space_info
*found
;
341 list_for_each_entry_rcu(found
, head
, list
) {
342 if (found
->flags
== flags
) {
352 * after adding space to the filesystem, we need to clear the full flags
353 * on all the space infos.
355 void btrfs_clear_space_info_full(struct btrfs_fs_info
*info
)
357 struct list_head
*head
= &info
->space_info
;
358 struct btrfs_space_info
*found
;
361 list_for_each_entry_rcu(found
, head
, list
)
366 static u64
div_factor(u64 num
, int factor
)
375 u64
btrfs_find_block_group(struct btrfs_root
*root
,
376 u64 search_start
, u64 search_hint
, int owner
)
378 struct btrfs_block_group_cache
*cache
;
380 u64 last
= max(search_hint
, search_start
);
387 cache
= btrfs_lookup_first_block_group(root
->fs_info
, last
);
391 spin_lock(&cache
->lock
);
392 last
= cache
->key
.objectid
+ cache
->key
.offset
;
393 used
= btrfs_block_group_used(&cache
->item
);
395 if ((full_search
|| !cache
->ro
) &&
396 block_group_bits(cache
, BTRFS_BLOCK_GROUP_METADATA
)) {
397 if (used
+ cache
->pinned
+ cache
->reserved
<
398 div_factor(cache
->key
.offset
, factor
)) {
399 group_start
= cache
->key
.objectid
;
400 spin_unlock(&cache
->lock
);
401 btrfs_put_block_group(cache
);
405 spin_unlock(&cache
->lock
);
406 btrfs_put_block_group(cache
);
414 if (!full_search
&& factor
< 10) {
424 /* simple helper to search for an existing extent at a given offset */
425 int btrfs_lookup_extent(struct btrfs_root
*root
, u64 start
, u64 len
)
428 struct btrfs_key key
;
429 struct btrfs_path
*path
;
431 path
= btrfs_alloc_path();
433 key
.objectid
= start
;
435 btrfs_set_key_type(&key
, BTRFS_EXTENT_ITEM_KEY
);
436 ret
= btrfs_search_slot(NULL
, root
->fs_info
->extent_root
, &key
, path
,
438 btrfs_free_path(path
);
443 * Back reference rules. Back refs have three main goals:
445 * 1) differentiate between all holders of references to an extent so that
446 * when a reference is dropped we can make sure it was a valid reference
447 * before freeing the extent.
449 * 2) Provide enough information to quickly find the holders of an extent
450 * if we notice a given block is corrupted or bad.
452 * 3) Make it easy to migrate blocks for FS shrinking or storage pool
453 * maintenance. This is actually the same as #2, but with a slightly
454 * different use case.
456 * File extents can be referenced by:
458 * - multiple snapshots, subvolumes, or different generations in one subvol
459 * - different files inside a single subvolume
460 * - different offsets inside a file (bookend extents in file.c)
462 * The extent ref structure has fields for:
464 * - Objectid of the subvolume root
465 * - Generation number of the tree holding the reference
466 * - objectid of the file holding the reference
467 * - number of references holding by parent node (alway 1 for tree blocks)
469 * Btree leaf may hold multiple references to a file extent. In most cases,
470 * these references are from same file and the corresponding offsets inside
471 * the file are close together.
473 * When a file extent is allocated the fields are filled in:
474 * (root_key.objectid, trans->transid, inode objectid, 1)
476 * When a leaf is cow'd new references are added for every file extent found
477 * in the leaf. It looks similar to the create case, but trans->transid will
478 * be different when the block is cow'd.
480 * (root_key.objectid, trans->transid, inode objectid,
481 * number of references in the leaf)
483 * When a file extent is removed either during snapshot deletion or
484 * file truncation, we find the corresponding back reference and check
485 * the following fields:
487 * (btrfs_header_owner(leaf), btrfs_header_generation(leaf),
490 * Btree extents can be referenced by:
492 * - Different subvolumes
493 * - Different generations of the same subvolume
495 * When a tree block is created, back references are inserted:
497 * (root->root_key.objectid, trans->transid, level, 1)
499 * When a tree block is cow'd, new back references are added for all the
500 * blocks it points to. If the tree block isn't in reference counted root,
501 * the old back references are removed. These new back references are of
502 * the form (trans->transid will have increased since creation):
504 * (root->root_key.objectid, trans->transid, level, 1)
506 * When a backref is in deleting, the following fields are checked:
508 * if backref was for a tree root:
509 * (btrfs_header_owner(itself), btrfs_header_generation(itself), level)
511 * (btrfs_header_owner(parent), btrfs_header_generation(parent), level)
513 * Back Reference Key composing:
515 * The key objectid corresponds to the first byte in the extent, the key
516 * type is set to BTRFS_EXTENT_REF_KEY, and the key offset is the first
517 * byte of parent extent. If a extent is tree root, the key offset is set
518 * to the key objectid.
521 static noinline
int lookup_extent_backref(struct btrfs_trans_handle
*trans
,
522 struct btrfs_root
*root
,
523 struct btrfs_path
*path
,
524 u64 bytenr
, u64 parent
,
525 u64 ref_root
, u64 ref_generation
,
526 u64 owner_objectid
, int del
)
528 struct btrfs_key key
;
529 struct btrfs_extent_ref
*ref
;
530 struct extent_buffer
*leaf
;
534 key
.objectid
= bytenr
;
535 key
.type
= BTRFS_EXTENT_REF_KEY
;
538 ret
= btrfs_search_slot(trans
, root
, &key
, path
, del
? -1 : 0, 1);
546 leaf
= path
->nodes
[0];
547 ref
= btrfs_item_ptr(leaf
, path
->slots
[0], struct btrfs_extent_ref
);
548 ref_objectid
= btrfs_ref_objectid(leaf
, ref
);
549 if (btrfs_ref_root(leaf
, ref
) != ref_root
||
550 btrfs_ref_generation(leaf
, ref
) != ref_generation
||
551 (ref_objectid
!= owner_objectid
&&
552 ref_objectid
!= BTRFS_MULTIPLE_OBJECTIDS
)) {
562 static noinline
int insert_extent_backref(struct btrfs_trans_handle
*trans
,
563 struct btrfs_root
*root
,
564 struct btrfs_path
*path
,
565 u64 bytenr
, u64 parent
,
566 u64 ref_root
, u64 ref_generation
,
570 struct btrfs_key key
;
571 struct extent_buffer
*leaf
;
572 struct btrfs_extent_ref
*ref
;
576 key
.objectid
= bytenr
;
577 key
.type
= BTRFS_EXTENT_REF_KEY
;
580 ret
= btrfs_insert_empty_item(trans
, root
, path
, &key
, sizeof(*ref
));
582 leaf
= path
->nodes
[0];
583 ref
= btrfs_item_ptr(leaf
, path
->slots
[0],
584 struct btrfs_extent_ref
);
585 btrfs_set_ref_root(leaf
, ref
, ref_root
);
586 btrfs_set_ref_generation(leaf
, ref
, ref_generation
);
587 btrfs_set_ref_objectid(leaf
, ref
, owner_objectid
);
588 btrfs_set_ref_num_refs(leaf
, ref
, refs_to_add
);
589 } else if (ret
== -EEXIST
) {
592 BUG_ON(owner_objectid
< BTRFS_FIRST_FREE_OBJECTID
);
593 leaf
= path
->nodes
[0];
594 ref
= btrfs_item_ptr(leaf
, path
->slots
[0],
595 struct btrfs_extent_ref
);
596 if (btrfs_ref_root(leaf
, ref
) != ref_root
||
597 btrfs_ref_generation(leaf
, ref
) != ref_generation
) {
603 num_refs
= btrfs_ref_num_refs(leaf
, ref
);
604 BUG_ON(num_refs
== 0);
605 btrfs_set_ref_num_refs(leaf
, ref
, num_refs
+ refs_to_add
);
607 existing_owner
= btrfs_ref_objectid(leaf
, ref
);
608 if (existing_owner
!= owner_objectid
&&
609 existing_owner
!= BTRFS_MULTIPLE_OBJECTIDS
) {
610 btrfs_set_ref_objectid(leaf
, ref
,
611 BTRFS_MULTIPLE_OBJECTIDS
);
617 btrfs_unlock_up_safe(path
, 1);
618 btrfs_mark_buffer_dirty(path
->nodes
[0]);
620 btrfs_release_path(root
, path
);
624 static noinline
int remove_extent_backref(struct btrfs_trans_handle
*trans
,
625 struct btrfs_root
*root
,
626 struct btrfs_path
*path
,
629 struct extent_buffer
*leaf
;
630 struct btrfs_extent_ref
*ref
;
634 leaf
= path
->nodes
[0];
635 ref
= btrfs_item_ptr(leaf
, path
->slots
[0], struct btrfs_extent_ref
);
636 num_refs
= btrfs_ref_num_refs(leaf
, ref
);
637 BUG_ON(num_refs
< refs_to_drop
);
638 num_refs
-= refs_to_drop
;
640 ret
= btrfs_del_item(trans
, root
, path
);
642 btrfs_set_ref_num_refs(leaf
, ref
, num_refs
);
643 btrfs_mark_buffer_dirty(leaf
);
645 btrfs_release_path(root
, path
);
649 #ifdef BIO_RW_DISCARD
650 static void btrfs_issue_discard(struct block_device
*bdev
,
653 blkdev_issue_discard(bdev
, start
>> 9, len
>> 9, GFP_KERNEL
);
657 static int btrfs_discard_extent(struct btrfs_root
*root
, u64 bytenr
,
660 #ifdef BIO_RW_DISCARD
662 u64 map_length
= num_bytes
;
663 struct btrfs_multi_bio
*multi
= NULL
;
665 /* Tell the block device(s) that the sectors can be discarded */
666 ret
= btrfs_map_block(&root
->fs_info
->mapping_tree
, READ
,
667 bytenr
, &map_length
, &multi
, 0);
669 struct btrfs_bio_stripe
*stripe
= multi
->stripes
;
672 if (map_length
> num_bytes
)
673 map_length
= num_bytes
;
675 for (i
= 0; i
< multi
->num_stripes
; i
++, stripe
++) {
676 btrfs_issue_discard(stripe
->dev
->bdev
,
689 static int __btrfs_update_extent_ref(struct btrfs_trans_handle
*trans
,
690 struct btrfs_root
*root
, u64 bytenr
,
692 u64 orig_parent
, u64 parent
,
693 u64 orig_root
, u64 ref_root
,
694 u64 orig_generation
, u64 ref_generation
,
698 int pin
= owner_objectid
< BTRFS_FIRST_FREE_OBJECTID
;
700 ret
= btrfs_update_delayed_ref(trans
, bytenr
, num_bytes
,
701 orig_parent
, parent
, orig_root
,
702 ref_root
, orig_generation
,
703 ref_generation
, owner_objectid
, pin
);
708 int btrfs_update_extent_ref(struct btrfs_trans_handle
*trans
,
709 struct btrfs_root
*root
, u64 bytenr
,
710 u64 num_bytes
, u64 orig_parent
, u64 parent
,
711 u64 ref_root
, u64 ref_generation
,
715 if (ref_root
== BTRFS_TREE_LOG_OBJECTID
&&
716 owner_objectid
< BTRFS_FIRST_FREE_OBJECTID
)
719 ret
= __btrfs_update_extent_ref(trans
, root
, bytenr
, num_bytes
,
720 orig_parent
, parent
, ref_root
,
721 ref_root
, ref_generation
,
722 ref_generation
, owner_objectid
);
725 static int __btrfs_inc_extent_ref(struct btrfs_trans_handle
*trans
,
726 struct btrfs_root
*root
, u64 bytenr
,
728 u64 orig_parent
, u64 parent
,
729 u64 orig_root
, u64 ref_root
,
730 u64 orig_generation
, u64 ref_generation
,
735 ret
= btrfs_add_delayed_ref(trans
, bytenr
, num_bytes
, parent
, ref_root
,
736 ref_generation
, owner_objectid
,
737 BTRFS_ADD_DELAYED_REF
, 0);
742 static noinline_for_stack
int add_extent_ref(struct btrfs_trans_handle
*trans
,
743 struct btrfs_root
*root
, u64 bytenr
,
744 u64 num_bytes
, u64 parent
, u64 ref_root
,
745 u64 ref_generation
, u64 owner_objectid
,
748 struct btrfs_path
*path
;
750 struct btrfs_key key
;
751 struct extent_buffer
*l
;
752 struct btrfs_extent_item
*item
;
755 path
= btrfs_alloc_path();
760 path
->leave_spinning
= 1;
761 key
.objectid
= bytenr
;
762 key
.type
= BTRFS_EXTENT_ITEM_KEY
;
763 key
.offset
= num_bytes
;
765 /* first find the extent item and update its reference count */
766 ret
= btrfs_search_slot(trans
, root
->fs_info
->extent_root
, &key
,
769 btrfs_set_path_blocking(path
);
775 btrfs_free_path(path
);
780 btrfs_item_key_to_cpu(l
, &key
, path
->slots
[0]);
781 if (key
.objectid
!= bytenr
) {
782 btrfs_print_leaf(root
->fs_info
->extent_root
, path
->nodes
[0]);
783 printk(KERN_ERR
"btrfs wanted %llu found %llu\n",
784 (unsigned long long)bytenr
,
785 (unsigned long long)key
.objectid
);
788 BUG_ON(key
.type
!= BTRFS_EXTENT_ITEM_KEY
);
790 item
= btrfs_item_ptr(l
, path
->slots
[0], struct btrfs_extent_item
);
792 refs
= btrfs_extent_refs(l
, item
);
793 btrfs_set_extent_refs(l
, item
, refs
+ refs_to_add
);
794 btrfs_unlock_up_safe(path
, 1);
796 btrfs_mark_buffer_dirty(path
->nodes
[0]);
798 btrfs_release_path(root
->fs_info
->extent_root
, path
);
801 path
->leave_spinning
= 1;
803 /* now insert the actual backref */
804 ret
= insert_extent_backref(trans
, root
->fs_info
->extent_root
,
805 path
, bytenr
, parent
,
806 ref_root
, ref_generation
,
807 owner_objectid
, refs_to_add
);
809 btrfs_free_path(path
);
813 int btrfs_inc_extent_ref(struct btrfs_trans_handle
*trans
,
814 struct btrfs_root
*root
,
815 u64 bytenr
, u64 num_bytes
, u64 parent
,
816 u64 ref_root
, u64 ref_generation
,
820 if (ref_root
== BTRFS_TREE_LOG_OBJECTID
&&
821 owner_objectid
< BTRFS_FIRST_FREE_OBJECTID
)
824 ret
= __btrfs_inc_extent_ref(trans
, root
, bytenr
, num_bytes
, 0, parent
,
825 0, ref_root
, 0, ref_generation
,
830 static int drop_delayed_ref(struct btrfs_trans_handle
*trans
,
831 struct btrfs_root
*root
,
832 struct btrfs_delayed_ref_node
*node
)
835 struct btrfs_delayed_ref
*ref
= btrfs_delayed_node_to_ref(node
);
837 BUG_ON(node
->ref_mod
== 0);
838 ret
= __btrfs_free_extent(trans
, root
, node
->bytenr
, node
->num_bytes
,
839 node
->parent
, ref
->root
, ref
->generation
,
840 ref
->owner_objectid
, ref
->pin
, node
->ref_mod
);
845 /* helper function to actually process a single delayed ref entry */
846 static noinline
int run_one_delayed_ref(struct btrfs_trans_handle
*trans
,
847 struct btrfs_root
*root
,
848 struct btrfs_delayed_ref_node
*node
,
852 struct btrfs_delayed_ref
*ref
;
854 if (node
->parent
== (u64
)-1) {
855 struct btrfs_delayed_ref_head
*head
;
857 * we've hit the end of the chain and we were supposed
858 * to insert this extent into the tree. But, it got
859 * deleted before we ever needed to insert it, so all
860 * we have to do is clean up the accounting
862 if (insert_reserved
) {
863 update_reserved_extents(root
, node
->bytenr
,
866 head
= btrfs_delayed_node_to_head(node
);
867 mutex_unlock(&head
->mutex
);
871 ref
= btrfs_delayed_node_to_ref(node
);
872 if (ref
->action
== BTRFS_ADD_DELAYED_REF
) {
873 if (insert_reserved
) {
874 struct btrfs_key ins
;
876 ins
.objectid
= node
->bytenr
;
877 ins
.offset
= node
->num_bytes
;
878 ins
.type
= BTRFS_EXTENT_ITEM_KEY
;
880 /* record the full extent allocation */
881 ret
= __btrfs_alloc_reserved_extent(trans
, root
,
882 node
->parent
, ref
->root
,
883 ref
->generation
, ref
->owner_objectid
,
884 &ins
, node
->ref_mod
);
885 update_reserved_extents(root
, node
->bytenr
,
888 /* just add one backref */
889 ret
= add_extent_ref(trans
, root
, node
->bytenr
,
891 node
->parent
, ref
->root
, ref
->generation
,
892 ref
->owner_objectid
, node
->ref_mod
);
895 } else if (ref
->action
== BTRFS_DROP_DELAYED_REF
) {
896 WARN_ON(insert_reserved
);
897 ret
= drop_delayed_ref(trans
, root
, node
);
902 static noinline
struct btrfs_delayed_ref_node
*
903 select_delayed_ref(struct btrfs_delayed_ref_head
*head
)
905 struct rb_node
*node
;
906 struct btrfs_delayed_ref_node
*ref
;
907 int action
= BTRFS_ADD_DELAYED_REF
;
910 * select delayed ref of type BTRFS_ADD_DELAYED_REF first.
911 * this prevents ref count from going down to zero when
912 * there still are pending delayed ref.
914 node
= rb_prev(&head
->node
.rb_node
);
918 ref
= rb_entry(node
, struct btrfs_delayed_ref_node
,
920 if (ref
->bytenr
!= head
->node
.bytenr
)
922 if (btrfs_delayed_node_to_ref(ref
)->action
== action
)
924 node
= rb_prev(node
);
926 if (action
== BTRFS_ADD_DELAYED_REF
) {
927 action
= BTRFS_DROP_DELAYED_REF
;
933 static noinline
int run_clustered_refs(struct btrfs_trans_handle
*trans
,
934 struct btrfs_root
*root
,
935 struct list_head
*cluster
)
937 struct btrfs_delayed_ref_root
*delayed_refs
;
938 struct btrfs_delayed_ref_node
*ref
;
939 struct btrfs_delayed_ref_head
*locked_ref
= NULL
;
942 int must_insert_reserved
= 0;
944 delayed_refs
= &trans
->transaction
->delayed_refs
;
947 /* pick a new head ref from the cluster list */
948 if (list_empty(cluster
))
951 locked_ref
= list_entry(cluster
->next
,
952 struct btrfs_delayed_ref_head
, cluster
);
954 /* grab the lock that says we are going to process
955 * all the refs for this head */
956 ret
= btrfs_delayed_ref_lock(trans
, locked_ref
);
959 * we may have dropped the spin lock to get the head
960 * mutex lock, and that might have given someone else
961 * time to free the head. If that's true, it has been
962 * removed from our list and we can move on.
964 if (ret
== -EAGAIN
) {
972 * record the must insert reserved flag before we
973 * drop the spin lock.
975 must_insert_reserved
= locked_ref
->must_insert_reserved
;
976 locked_ref
->must_insert_reserved
= 0;
979 * locked_ref is the head node, so we have to go one
980 * node back for any delayed ref updates
982 ref
= select_delayed_ref(locked_ref
);
984 /* All delayed refs have been processed, Go ahead
985 * and send the head node to run_one_delayed_ref,
986 * so that any accounting fixes can happen
988 ref
= &locked_ref
->node
;
989 list_del_init(&locked_ref
->cluster
);
994 rb_erase(&ref
->rb_node
, &delayed_refs
->root
);
995 delayed_refs
->num_entries
--;
996 spin_unlock(&delayed_refs
->lock
);
998 ret
= run_one_delayed_ref(trans
, root
, ref
,
999 must_insert_reserved
);
1001 btrfs_put_delayed_ref(ref
);
1005 spin_lock(&delayed_refs
->lock
);
1011 * this starts processing the delayed reference count updates and
1012 * extent insertions we have queued up so far. count can be
1013 * 0, which means to process everything in the tree at the start
1014 * of the run (but not newly added entries), or it can be some target
1015 * number you'd like to process.
1017 int btrfs_run_delayed_refs(struct btrfs_trans_handle
*trans
,
1018 struct btrfs_root
*root
, unsigned long count
)
1020 struct rb_node
*node
;
1021 struct btrfs_delayed_ref_root
*delayed_refs
;
1022 struct btrfs_delayed_ref_node
*ref
;
1023 struct list_head cluster
;
1025 int run_all
= count
== (unsigned long)-1;
1028 if (root
== root
->fs_info
->extent_root
)
1029 root
= root
->fs_info
->tree_root
;
1031 delayed_refs
= &trans
->transaction
->delayed_refs
;
1032 INIT_LIST_HEAD(&cluster
);
1034 spin_lock(&delayed_refs
->lock
);
1036 count
= delayed_refs
->num_entries
* 2;
1040 if (!(run_all
|| run_most
) &&
1041 delayed_refs
->num_heads_ready
< 64)
1045 * go find something we can process in the rbtree. We start at
1046 * the beginning of the tree, and then build a cluster
1047 * of refs to process starting at the first one we are able to
1050 ret
= btrfs_find_ref_cluster(trans
, &cluster
,
1051 delayed_refs
->run_delayed_start
);
1055 ret
= run_clustered_refs(trans
, root
, &cluster
);
1058 count
-= min_t(unsigned long, ret
, count
);
1065 node
= rb_first(&delayed_refs
->root
);
1068 count
= (unsigned long)-1;
1071 ref
= rb_entry(node
, struct btrfs_delayed_ref_node
,
1073 if (btrfs_delayed_ref_is_head(ref
)) {
1074 struct btrfs_delayed_ref_head
*head
;
1076 head
= btrfs_delayed_node_to_head(ref
);
1077 atomic_inc(&ref
->refs
);
1079 spin_unlock(&delayed_refs
->lock
);
1080 mutex_lock(&head
->mutex
);
1081 mutex_unlock(&head
->mutex
);
1083 btrfs_put_delayed_ref(ref
);
1087 node
= rb_next(node
);
1089 spin_unlock(&delayed_refs
->lock
);
1090 schedule_timeout(1);
1094 spin_unlock(&delayed_refs
->lock
);
1098 int btrfs_cross_ref_exist(struct btrfs_trans_handle
*trans
,
1099 struct btrfs_root
*root
, u64 objectid
, u64 bytenr
)
1101 struct btrfs_root
*extent_root
= root
->fs_info
->extent_root
;
1102 struct btrfs_path
*path
;
1103 struct extent_buffer
*leaf
;
1104 struct btrfs_extent_ref
*ref_item
;
1105 struct btrfs_key key
;
1106 struct btrfs_key found_key
;
1112 key
.objectid
= bytenr
;
1113 key
.offset
= (u64
)-1;
1114 key
.type
= BTRFS_EXTENT_ITEM_KEY
;
1116 path
= btrfs_alloc_path();
1117 ret
= btrfs_search_slot(NULL
, extent_root
, &key
, path
, 0, 0);
1123 if (path
->slots
[0] == 0)
1127 leaf
= path
->nodes
[0];
1128 btrfs_item_key_to_cpu(leaf
, &found_key
, path
->slots
[0]);
1130 if (found_key
.objectid
!= bytenr
||
1131 found_key
.type
!= BTRFS_EXTENT_ITEM_KEY
)
1134 last_snapshot
= btrfs_root_last_snapshot(&root
->root_item
);
1136 leaf
= path
->nodes
[0];
1137 nritems
= btrfs_header_nritems(leaf
);
1138 if (path
->slots
[0] >= nritems
) {
1139 ret
= btrfs_next_leaf(extent_root
, path
);
1146 btrfs_item_key_to_cpu(leaf
, &found_key
, path
->slots
[0]);
1147 if (found_key
.objectid
!= bytenr
)
1150 if (found_key
.type
!= BTRFS_EXTENT_REF_KEY
) {
1155 ref_item
= btrfs_item_ptr(leaf
, path
->slots
[0],
1156 struct btrfs_extent_ref
);
1157 ref_root
= btrfs_ref_root(leaf
, ref_item
);
1158 if ((ref_root
!= root
->root_key
.objectid
&&
1159 ref_root
!= BTRFS_TREE_LOG_OBJECTID
) ||
1160 objectid
!= btrfs_ref_objectid(leaf
, ref_item
)) {
1164 if (btrfs_ref_generation(leaf
, ref_item
) <= last_snapshot
) {
1173 btrfs_free_path(path
);
1177 int btrfs_cache_ref(struct btrfs_trans_handle
*trans
, struct btrfs_root
*root
,
1178 struct extent_buffer
*buf
, u32 nr_extents
)
1180 struct btrfs_key key
;
1181 struct btrfs_file_extent_item
*fi
;
1189 if (!root
->ref_cows
)
1192 if (root
->root_key
.objectid
!= BTRFS_TREE_RELOC_OBJECTID
) {
1194 root_gen
= root
->root_key
.offset
;
1197 root_gen
= trans
->transid
- 1;
1200 level
= btrfs_header_level(buf
);
1201 nritems
= btrfs_header_nritems(buf
);
1204 struct btrfs_leaf_ref
*ref
;
1205 struct btrfs_extent_info
*info
;
1207 ref
= btrfs_alloc_leaf_ref(root
, nr_extents
);
1213 ref
->root_gen
= root_gen
;
1214 ref
->bytenr
= buf
->start
;
1215 ref
->owner
= btrfs_header_owner(buf
);
1216 ref
->generation
= btrfs_header_generation(buf
);
1217 ref
->nritems
= nr_extents
;
1218 info
= ref
->extents
;
1220 for (i
= 0; nr_extents
> 0 && i
< nritems
; i
++) {
1222 btrfs_item_key_to_cpu(buf
, &key
, i
);
1223 if (btrfs_key_type(&key
) != BTRFS_EXTENT_DATA_KEY
)
1225 fi
= btrfs_item_ptr(buf
, i
,
1226 struct btrfs_file_extent_item
);
1227 if (btrfs_file_extent_type(buf
, fi
) ==
1228 BTRFS_FILE_EXTENT_INLINE
)
1230 disk_bytenr
= btrfs_file_extent_disk_bytenr(buf
, fi
);
1231 if (disk_bytenr
== 0)
1234 info
->bytenr
= disk_bytenr
;
1236 btrfs_file_extent_disk_num_bytes(buf
, fi
);
1237 info
->objectid
= key
.objectid
;
1238 info
->offset
= key
.offset
;
1242 ret
= btrfs_add_leaf_ref(root
, ref
, shared
);
1243 if (ret
== -EEXIST
&& shared
) {
1244 struct btrfs_leaf_ref
*old
;
1245 old
= btrfs_lookup_leaf_ref(root
, ref
->bytenr
);
1247 btrfs_remove_leaf_ref(root
, old
);
1248 btrfs_free_leaf_ref(root
, old
);
1249 ret
= btrfs_add_leaf_ref(root
, ref
, shared
);
1252 btrfs_free_leaf_ref(root
, ref
);
1258 /* when a block goes through cow, we update the reference counts of
1259 * everything that block points to. The internal pointers of the block
1260 * can be in just about any order, and it is likely to have clusters of
1261 * things that are close together and clusters of things that are not.
1263 * To help reduce the seeks that come with updating all of these reference
1264 * counts, sort them by byte number before actual updates are done.
1266 * struct refsort is used to match byte number to slot in the btree block.
1267 * we sort based on the byte number and then use the slot to actually
1270 * struct refsort is smaller than strcut btrfs_item and smaller than
1271 * struct btrfs_key_ptr. Since we're currently limited to the page size
1272 * for a btree block, there's no way for a kmalloc of refsorts for a
1273 * single node to be bigger than a page.
1281 * for passing into sort()
1283 static int refsort_cmp(const void *a_void
, const void *b_void
)
1285 const struct refsort
*a
= a_void
;
1286 const struct refsort
*b
= b_void
;
1288 if (a
->bytenr
< b
->bytenr
)
1290 if (a
->bytenr
> b
->bytenr
)
1296 noinline
int btrfs_inc_ref(struct btrfs_trans_handle
*trans
,
1297 struct btrfs_root
*root
,
1298 struct extent_buffer
*orig_buf
,
1299 struct extent_buffer
*buf
, u32
*nr_extents
)
1305 u64 orig_generation
;
1306 struct refsort
*sorted
;
1308 u32 nr_file_extents
= 0;
1309 struct btrfs_key key
;
1310 struct btrfs_file_extent_item
*fi
;
1317 int (*process_func
)(struct btrfs_trans_handle
*, struct btrfs_root
*,
1318 u64
, u64
, u64
, u64
, u64
, u64
, u64
, u64
, u64
);
1320 ref_root
= btrfs_header_owner(buf
);
1321 ref_generation
= btrfs_header_generation(buf
);
1322 orig_root
= btrfs_header_owner(orig_buf
);
1323 orig_generation
= btrfs_header_generation(orig_buf
);
1325 nritems
= btrfs_header_nritems(buf
);
1326 level
= btrfs_header_level(buf
);
1328 sorted
= kmalloc(sizeof(struct refsort
) * nritems
, GFP_NOFS
);
1331 if (root
->ref_cows
) {
1332 process_func
= __btrfs_inc_extent_ref
;
1335 root
->root_key
.objectid
!= BTRFS_TREE_LOG_OBJECTID
)
1338 root
->root_key
.objectid
== BTRFS_TREE_LOG_OBJECTID
)
1340 process_func
= __btrfs_update_extent_ref
;
1344 * we make two passes through the items. In the first pass we
1345 * only record the byte number and slot. Then we sort based on
1346 * byte number and do the actual work based on the sorted results
1348 for (i
= 0; i
< nritems
; i
++) {
1351 btrfs_item_key_to_cpu(buf
, &key
, i
);
1352 if (btrfs_key_type(&key
) != BTRFS_EXTENT_DATA_KEY
)
1354 fi
= btrfs_item_ptr(buf
, i
,
1355 struct btrfs_file_extent_item
);
1356 if (btrfs_file_extent_type(buf
, fi
) ==
1357 BTRFS_FILE_EXTENT_INLINE
)
1359 bytenr
= btrfs_file_extent_disk_bytenr(buf
, fi
);
1364 sorted
[refi
].bytenr
= bytenr
;
1365 sorted
[refi
].slot
= i
;
1368 bytenr
= btrfs_node_blockptr(buf
, i
);
1369 sorted
[refi
].bytenr
= bytenr
;
1370 sorted
[refi
].slot
= i
;
1375 * if refi == 0, we didn't actually put anything into the sorted
1376 * array and we're done
1381 sort(sorted
, refi
, sizeof(struct refsort
), refsort_cmp
, NULL
);
1383 for (i
= 0; i
< refi
; i
++) {
1385 slot
= sorted
[i
].slot
;
1386 bytenr
= sorted
[i
].bytenr
;
1389 btrfs_item_key_to_cpu(buf
, &key
, slot
);
1390 fi
= btrfs_item_ptr(buf
, slot
,
1391 struct btrfs_file_extent_item
);
1393 bytenr
= btrfs_file_extent_disk_bytenr(buf
, fi
);
1397 ret
= process_func(trans
, root
, bytenr
,
1398 btrfs_file_extent_disk_num_bytes(buf
, fi
),
1399 orig_buf
->start
, buf
->start
,
1400 orig_root
, ref_root
,
1401 orig_generation
, ref_generation
,
1410 ret
= process_func(trans
, root
, bytenr
, buf
->len
,
1411 orig_buf
->start
, buf
->start
,
1412 orig_root
, ref_root
,
1413 orig_generation
, ref_generation
,
1426 *nr_extents
= nr_file_extents
;
1428 *nr_extents
= nritems
;
1437 int btrfs_update_ref(struct btrfs_trans_handle
*trans
,
1438 struct btrfs_root
*root
, struct extent_buffer
*orig_buf
,
1439 struct extent_buffer
*buf
, int start_slot
, int nr
)
1446 u64 orig_generation
;
1447 struct btrfs_key key
;
1448 struct btrfs_file_extent_item
*fi
;
1454 BUG_ON(start_slot
< 0);
1455 BUG_ON(start_slot
+ nr
> btrfs_header_nritems(buf
));
1457 ref_root
= btrfs_header_owner(buf
);
1458 ref_generation
= btrfs_header_generation(buf
);
1459 orig_root
= btrfs_header_owner(orig_buf
);
1460 orig_generation
= btrfs_header_generation(orig_buf
);
1461 level
= btrfs_header_level(buf
);
1463 if (!root
->ref_cows
) {
1465 root
->root_key
.objectid
!= BTRFS_TREE_LOG_OBJECTID
)
1468 root
->root_key
.objectid
== BTRFS_TREE_LOG_OBJECTID
)
1472 for (i
= 0, slot
= start_slot
; i
< nr
; i
++, slot
++) {
1475 btrfs_item_key_to_cpu(buf
, &key
, slot
);
1476 if (btrfs_key_type(&key
) != BTRFS_EXTENT_DATA_KEY
)
1478 fi
= btrfs_item_ptr(buf
, slot
,
1479 struct btrfs_file_extent_item
);
1480 if (btrfs_file_extent_type(buf
, fi
) ==
1481 BTRFS_FILE_EXTENT_INLINE
)
1483 bytenr
= btrfs_file_extent_disk_bytenr(buf
, fi
);
1486 ret
= __btrfs_update_extent_ref(trans
, root
, bytenr
,
1487 btrfs_file_extent_disk_num_bytes(buf
, fi
),
1488 orig_buf
->start
, buf
->start
,
1489 orig_root
, ref_root
, orig_generation
,
1490 ref_generation
, key
.objectid
);
1494 bytenr
= btrfs_node_blockptr(buf
, slot
);
1495 ret
= __btrfs_update_extent_ref(trans
, root
, bytenr
,
1496 buf
->len
, orig_buf
->start
,
1497 buf
->start
, orig_root
, ref_root
,
1498 orig_generation
, ref_generation
,
1510 static int write_one_cache_group(struct btrfs_trans_handle
*trans
,
1511 struct btrfs_root
*root
,
1512 struct btrfs_path
*path
,
1513 struct btrfs_block_group_cache
*cache
)
1516 struct btrfs_root
*extent_root
= root
->fs_info
->extent_root
;
1518 struct extent_buffer
*leaf
;
1520 ret
= btrfs_search_slot(trans
, extent_root
, &cache
->key
, path
, 0, 1);
1525 leaf
= path
->nodes
[0];
1526 bi
= btrfs_item_ptr_offset(leaf
, path
->slots
[0]);
1527 write_extent_buffer(leaf
, &cache
->item
, bi
, sizeof(cache
->item
));
1528 btrfs_mark_buffer_dirty(leaf
);
1529 btrfs_release_path(extent_root
, path
);
1537 int btrfs_write_dirty_block_groups(struct btrfs_trans_handle
*trans
,
1538 struct btrfs_root
*root
)
1540 struct btrfs_block_group_cache
*cache
, *entry
;
1544 struct btrfs_path
*path
;
1547 path
= btrfs_alloc_path();
1553 spin_lock(&root
->fs_info
->block_group_cache_lock
);
1554 for (n
= rb_first(&root
->fs_info
->block_group_cache_tree
);
1555 n
; n
= rb_next(n
)) {
1556 entry
= rb_entry(n
, struct btrfs_block_group_cache
,
1563 spin_unlock(&root
->fs_info
->block_group_cache_lock
);
1569 last
+= cache
->key
.offset
;
1571 err
= write_one_cache_group(trans
, root
,
1574 * if we fail to write the cache group, we want
1575 * to keep it marked dirty in hopes that a later
1583 btrfs_free_path(path
);
1587 int btrfs_extent_readonly(struct btrfs_root
*root
, u64 bytenr
)
1589 struct btrfs_block_group_cache
*block_group
;
1592 block_group
= btrfs_lookup_block_group(root
->fs_info
, bytenr
);
1593 if (!block_group
|| block_group
->ro
)
1596 btrfs_put_block_group(block_group
);
1600 static int update_space_info(struct btrfs_fs_info
*info
, u64 flags
,
1601 u64 total_bytes
, u64 bytes_used
,
1602 struct btrfs_space_info
**space_info
)
1604 struct btrfs_space_info
*found
;
1606 found
= __find_space_info(info
, flags
);
1608 spin_lock(&found
->lock
);
1609 found
->total_bytes
+= total_bytes
;
1610 found
->bytes_used
+= bytes_used
;
1612 spin_unlock(&found
->lock
);
1613 *space_info
= found
;
1616 found
= kzalloc(sizeof(*found
), GFP_NOFS
);
1620 INIT_LIST_HEAD(&found
->block_groups
);
1621 init_rwsem(&found
->groups_sem
);
1622 spin_lock_init(&found
->lock
);
1623 found
->flags
= flags
;
1624 found
->total_bytes
= total_bytes
;
1625 found
->bytes_used
= bytes_used
;
1626 found
->bytes_pinned
= 0;
1627 found
->bytes_reserved
= 0;
1628 found
->bytes_readonly
= 0;
1629 found
->bytes_delalloc
= 0;
1631 found
->force_alloc
= 0;
1632 *space_info
= found
;
1633 list_add_rcu(&found
->list
, &info
->space_info
);
1637 static void set_avail_alloc_bits(struct btrfs_fs_info
*fs_info
, u64 flags
)
1639 u64 extra_flags
= flags
& (BTRFS_BLOCK_GROUP_RAID0
|
1640 BTRFS_BLOCK_GROUP_RAID1
|
1641 BTRFS_BLOCK_GROUP_RAID10
|
1642 BTRFS_BLOCK_GROUP_DUP
);
1644 if (flags
& BTRFS_BLOCK_GROUP_DATA
)
1645 fs_info
->avail_data_alloc_bits
|= extra_flags
;
1646 if (flags
& BTRFS_BLOCK_GROUP_METADATA
)
1647 fs_info
->avail_metadata_alloc_bits
|= extra_flags
;
1648 if (flags
& BTRFS_BLOCK_GROUP_SYSTEM
)
1649 fs_info
->avail_system_alloc_bits
|= extra_flags
;
1653 static void set_block_group_readonly(struct btrfs_block_group_cache
*cache
)
1655 spin_lock(&cache
->space_info
->lock
);
1656 spin_lock(&cache
->lock
);
1658 cache
->space_info
->bytes_readonly
+= cache
->key
.offset
-
1659 btrfs_block_group_used(&cache
->item
);
1662 spin_unlock(&cache
->lock
);
1663 spin_unlock(&cache
->space_info
->lock
);
1666 u64
btrfs_reduce_alloc_profile(struct btrfs_root
*root
, u64 flags
)
1668 u64 num_devices
= root
->fs_info
->fs_devices
->rw_devices
;
1670 if (num_devices
== 1)
1671 flags
&= ~(BTRFS_BLOCK_GROUP_RAID1
| BTRFS_BLOCK_GROUP_RAID0
);
1672 if (num_devices
< 4)
1673 flags
&= ~BTRFS_BLOCK_GROUP_RAID10
;
1675 if ((flags
& BTRFS_BLOCK_GROUP_DUP
) &&
1676 (flags
& (BTRFS_BLOCK_GROUP_RAID1
|
1677 BTRFS_BLOCK_GROUP_RAID10
))) {
1678 flags
&= ~BTRFS_BLOCK_GROUP_DUP
;
1681 if ((flags
& BTRFS_BLOCK_GROUP_RAID1
) &&
1682 (flags
& BTRFS_BLOCK_GROUP_RAID10
)) {
1683 flags
&= ~BTRFS_BLOCK_GROUP_RAID1
;
1686 if ((flags
& BTRFS_BLOCK_GROUP_RAID0
) &&
1687 ((flags
& BTRFS_BLOCK_GROUP_RAID1
) |
1688 (flags
& BTRFS_BLOCK_GROUP_RAID10
) |
1689 (flags
& BTRFS_BLOCK_GROUP_DUP
)))
1690 flags
&= ~BTRFS_BLOCK_GROUP_RAID0
;
1694 static u64
btrfs_get_alloc_profile(struct btrfs_root
*root
, u64 data
)
1696 struct btrfs_fs_info
*info
= root
->fs_info
;
1700 alloc_profile
= info
->avail_data_alloc_bits
&
1701 info
->data_alloc_profile
;
1702 data
= BTRFS_BLOCK_GROUP_DATA
| alloc_profile
;
1703 } else if (root
== root
->fs_info
->chunk_root
) {
1704 alloc_profile
= info
->avail_system_alloc_bits
&
1705 info
->system_alloc_profile
;
1706 data
= BTRFS_BLOCK_GROUP_SYSTEM
| alloc_profile
;
1708 alloc_profile
= info
->avail_metadata_alloc_bits
&
1709 info
->metadata_alloc_profile
;
1710 data
= BTRFS_BLOCK_GROUP_METADATA
| alloc_profile
;
1713 return btrfs_reduce_alloc_profile(root
, data
);
1716 void btrfs_set_inode_space_info(struct btrfs_root
*root
, struct inode
*inode
)
1720 alloc_target
= btrfs_get_alloc_profile(root
, 1);
1721 BTRFS_I(inode
)->space_info
= __find_space_info(root
->fs_info
,
1726 * for now this just makes sure we have at least 5% of our metadata space free
1729 int btrfs_check_metadata_free_space(struct btrfs_root
*root
)
1731 struct btrfs_fs_info
*info
= root
->fs_info
;
1732 struct btrfs_space_info
*meta_sinfo
;
1733 u64 alloc_target
, thresh
;
1734 int committed
= 0, ret
;
1736 /* get the space info for where the metadata will live */
1737 alloc_target
= btrfs_get_alloc_profile(root
, 0);
1738 meta_sinfo
= __find_space_info(info
, alloc_target
);
1741 spin_lock(&meta_sinfo
->lock
);
1742 if (!meta_sinfo
->full
)
1743 thresh
= meta_sinfo
->total_bytes
* 80;
1745 thresh
= meta_sinfo
->total_bytes
* 95;
1747 do_div(thresh
, 100);
1749 if (meta_sinfo
->bytes_used
+ meta_sinfo
->bytes_reserved
+
1750 meta_sinfo
->bytes_pinned
+ meta_sinfo
->bytes_readonly
> thresh
) {
1751 struct btrfs_trans_handle
*trans
;
1752 if (!meta_sinfo
->full
) {
1753 meta_sinfo
->force_alloc
= 1;
1754 spin_unlock(&meta_sinfo
->lock
);
1756 trans
= btrfs_start_transaction(root
, 1);
1760 ret
= do_chunk_alloc(trans
, root
->fs_info
->extent_root
,
1761 2 * 1024 * 1024, alloc_target
, 0);
1762 btrfs_end_transaction(trans
, root
);
1765 spin_unlock(&meta_sinfo
->lock
);
1769 trans
= btrfs_join_transaction(root
, 1);
1772 ret
= btrfs_commit_transaction(trans
, root
);
1779 spin_unlock(&meta_sinfo
->lock
);
1785 * This will check the space that the inode allocates from to make sure we have
1786 * enough space for bytes.
1788 int btrfs_check_data_free_space(struct btrfs_root
*root
, struct inode
*inode
,
1791 struct btrfs_space_info
*data_sinfo
;
1792 int ret
= 0, committed
= 0;
1794 /* make sure bytes are sectorsize aligned */
1795 bytes
= (bytes
+ root
->sectorsize
- 1) & ~((u64
)root
->sectorsize
- 1);
1797 data_sinfo
= BTRFS_I(inode
)->space_info
;
1799 /* make sure we have enough space to handle the data first */
1800 spin_lock(&data_sinfo
->lock
);
1801 if (data_sinfo
->total_bytes
- data_sinfo
->bytes_used
-
1802 data_sinfo
->bytes_delalloc
- data_sinfo
->bytes_reserved
-
1803 data_sinfo
->bytes_pinned
- data_sinfo
->bytes_readonly
-
1804 data_sinfo
->bytes_may_use
< bytes
) {
1805 struct btrfs_trans_handle
*trans
;
1808 * if we don't have enough free bytes in this space then we need
1809 * to alloc a new chunk.
1811 if (!data_sinfo
->full
) {
1814 data_sinfo
->force_alloc
= 1;
1815 spin_unlock(&data_sinfo
->lock
);
1817 alloc_target
= btrfs_get_alloc_profile(root
, 1);
1818 trans
= btrfs_start_transaction(root
, 1);
1822 ret
= do_chunk_alloc(trans
, root
->fs_info
->extent_root
,
1823 bytes
+ 2 * 1024 * 1024,
1825 btrfs_end_transaction(trans
, root
);
1830 spin_unlock(&data_sinfo
->lock
);
1832 /* commit the current transaction and try again */
1835 trans
= btrfs_join_transaction(root
, 1);
1838 ret
= btrfs_commit_transaction(trans
, root
);
1844 printk(KERN_ERR
"no space left, need %llu, %llu delalloc bytes"
1845 ", %llu bytes_used, %llu bytes_reserved, "
1846 "%llu bytes_pinned, %llu bytes_readonly, %llu may use"
1847 "%llu total\n", (unsigned long long)bytes
,
1848 (unsigned long long)data_sinfo
->bytes_delalloc
,
1849 (unsigned long long)data_sinfo
->bytes_used
,
1850 (unsigned long long)data_sinfo
->bytes_reserved
,
1851 (unsigned long long)data_sinfo
->bytes_pinned
,
1852 (unsigned long long)data_sinfo
->bytes_readonly
,
1853 (unsigned long long)data_sinfo
->bytes_may_use
,
1854 (unsigned long long)data_sinfo
->total_bytes
);
1857 data_sinfo
->bytes_may_use
+= bytes
;
1858 BTRFS_I(inode
)->reserved_bytes
+= bytes
;
1859 spin_unlock(&data_sinfo
->lock
);
1861 return btrfs_check_metadata_free_space(root
);
1865 * if there was an error for whatever reason after calling
1866 * btrfs_check_data_free_space, call this so we can cleanup the counters.
1868 void btrfs_free_reserved_data_space(struct btrfs_root
*root
,
1869 struct inode
*inode
, u64 bytes
)
1871 struct btrfs_space_info
*data_sinfo
;
1873 /* make sure bytes are sectorsize aligned */
1874 bytes
= (bytes
+ root
->sectorsize
- 1) & ~((u64
)root
->sectorsize
- 1);
1876 data_sinfo
= BTRFS_I(inode
)->space_info
;
1877 spin_lock(&data_sinfo
->lock
);
1878 data_sinfo
->bytes_may_use
-= bytes
;
1879 BTRFS_I(inode
)->reserved_bytes
-= bytes
;
1880 spin_unlock(&data_sinfo
->lock
);
1883 /* called when we are adding a delalloc extent to the inode's io_tree */
1884 void btrfs_delalloc_reserve_space(struct btrfs_root
*root
, struct inode
*inode
,
1887 struct btrfs_space_info
*data_sinfo
;
1889 /* get the space info for where this inode will be storing its data */
1890 data_sinfo
= BTRFS_I(inode
)->space_info
;
1892 /* make sure we have enough space to handle the data first */
1893 spin_lock(&data_sinfo
->lock
);
1894 data_sinfo
->bytes_delalloc
+= bytes
;
1897 * we are adding a delalloc extent without calling
1898 * btrfs_check_data_free_space first. This happens on a weird
1899 * writepage condition, but shouldn't hurt our accounting
1901 if (unlikely(bytes
> BTRFS_I(inode
)->reserved_bytes
)) {
1902 data_sinfo
->bytes_may_use
-= BTRFS_I(inode
)->reserved_bytes
;
1903 BTRFS_I(inode
)->reserved_bytes
= 0;
1905 data_sinfo
->bytes_may_use
-= bytes
;
1906 BTRFS_I(inode
)->reserved_bytes
-= bytes
;
1909 spin_unlock(&data_sinfo
->lock
);
1912 /* called when we are clearing an delalloc extent from the inode's io_tree */
1913 void btrfs_delalloc_free_space(struct btrfs_root
*root
, struct inode
*inode
,
1916 struct btrfs_space_info
*info
;
1918 info
= BTRFS_I(inode
)->space_info
;
1920 spin_lock(&info
->lock
);
1921 info
->bytes_delalloc
-= bytes
;
1922 spin_unlock(&info
->lock
);
1925 static void force_metadata_allocation(struct btrfs_fs_info
*info
)
1927 struct list_head
*head
= &info
->space_info
;
1928 struct btrfs_space_info
*found
;
1931 list_for_each_entry_rcu(found
, head
, list
) {
1932 if (found
->flags
& BTRFS_BLOCK_GROUP_METADATA
)
1933 found
->force_alloc
= 1;
1938 static int do_chunk_alloc(struct btrfs_trans_handle
*trans
,
1939 struct btrfs_root
*extent_root
, u64 alloc_bytes
,
1940 u64 flags
, int force
)
1942 struct btrfs_space_info
*space_info
;
1943 struct btrfs_fs_info
*fs_info
= extent_root
->fs_info
;
1947 mutex_lock(&fs_info
->chunk_mutex
);
1949 flags
= btrfs_reduce_alloc_profile(extent_root
, flags
);
1951 space_info
= __find_space_info(extent_root
->fs_info
, flags
);
1953 ret
= update_space_info(extent_root
->fs_info
, flags
,
1957 BUG_ON(!space_info
);
1959 spin_lock(&space_info
->lock
);
1960 if (space_info
->force_alloc
) {
1962 space_info
->force_alloc
= 0;
1964 if (space_info
->full
) {
1965 spin_unlock(&space_info
->lock
);
1969 thresh
= space_info
->total_bytes
- space_info
->bytes_readonly
;
1970 thresh
= div_factor(thresh
, 6);
1972 (space_info
->bytes_used
+ space_info
->bytes_pinned
+
1973 space_info
->bytes_reserved
+ alloc_bytes
) < thresh
) {
1974 spin_unlock(&space_info
->lock
);
1977 spin_unlock(&space_info
->lock
);
1980 * if we're doing a data chunk, go ahead and make sure that
1981 * we keep a reasonable number of metadata chunks allocated in the
1984 if (flags
& BTRFS_BLOCK_GROUP_DATA
) {
1985 fs_info
->data_chunk_allocations
++;
1986 if (!(fs_info
->data_chunk_allocations
%
1987 fs_info
->metadata_ratio
))
1988 force_metadata_allocation(fs_info
);
1991 ret
= btrfs_alloc_chunk(trans
, extent_root
, flags
);
1993 space_info
->full
= 1;
1995 mutex_unlock(&extent_root
->fs_info
->chunk_mutex
);
1999 static int update_block_group(struct btrfs_trans_handle
*trans
,
2000 struct btrfs_root
*root
,
2001 u64 bytenr
, u64 num_bytes
, int alloc
,
2004 struct btrfs_block_group_cache
*cache
;
2005 struct btrfs_fs_info
*info
= root
->fs_info
;
2006 u64 total
= num_bytes
;
2011 cache
= btrfs_lookup_block_group(info
, bytenr
);
2014 byte_in_group
= bytenr
- cache
->key
.objectid
;
2015 WARN_ON(byte_in_group
> cache
->key
.offset
);
2017 spin_lock(&cache
->space_info
->lock
);
2018 spin_lock(&cache
->lock
);
2020 old_val
= btrfs_block_group_used(&cache
->item
);
2021 num_bytes
= min(total
, cache
->key
.offset
- byte_in_group
);
2023 old_val
+= num_bytes
;
2024 cache
->space_info
->bytes_used
+= num_bytes
;
2026 cache
->space_info
->bytes_readonly
-= num_bytes
;
2027 btrfs_set_block_group_used(&cache
->item
, old_val
);
2028 spin_unlock(&cache
->lock
);
2029 spin_unlock(&cache
->space_info
->lock
);
2031 old_val
-= num_bytes
;
2032 cache
->space_info
->bytes_used
-= num_bytes
;
2034 cache
->space_info
->bytes_readonly
+= num_bytes
;
2035 btrfs_set_block_group_used(&cache
->item
, old_val
);
2036 spin_unlock(&cache
->lock
);
2037 spin_unlock(&cache
->space_info
->lock
);
2041 ret
= btrfs_discard_extent(root
, bytenr
,
2045 ret
= btrfs_add_free_space(cache
, bytenr
,
2050 btrfs_put_block_group(cache
);
2052 bytenr
+= num_bytes
;
2057 static u64
first_logical_byte(struct btrfs_root
*root
, u64 search_start
)
2059 struct btrfs_block_group_cache
*cache
;
2062 cache
= btrfs_lookup_first_block_group(root
->fs_info
, search_start
);
2066 bytenr
= cache
->key
.objectid
;
2067 btrfs_put_block_group(cache
);
2072 int btrfs_update_pinned_extents(struct btrfs_root
*root
,
2073 u64 bytenr
, u64 num
, int pin
)
2076 struct btrfs_block_group_cache
*cache
;
2077 struct btrfs_fs_info
*fs_info
= root
->fs_info
;
2080 set_extent_dirty(&fs_info
->pinned_extents
,
2081 bytenr
, bytenr
+ num
- 1, GFP_NOFS
);
2083 clear_extent_dirty(&fs_info
->pinned_extents
,
2084 bytenr
, bytenr
+ num
- 1, GFP_NOFS
);
2088 cache
= btrfs_lookup_block_group(fs_info
, bytenr
);
2090 len
= min(num
, cache
->key
.offset
-
2091 (bytenr
- cache
->key
.objectid
));
2093 spin_lock(&cache
->space_info
->lock
);
2094 spin_lock(&cache
->lock
);
2095 cache
->pinned
+= len
;
2096 cache
->space_info
->bytes_pinned
+= len
;
2097 spin_unlock(&cache
->lock
);
2098 spin_unlock(&cache
->space_info
->lock
);
2099 fs_info
->total_pinned
+= len
;
2101 spin_lock(&cache
->space_info
->lock
);
2102 spin_lock(&cache
->lock
);
2103 cache
->pinned
-= len
;
2104 cache
->space_info
->bytes_pinned
-= len
;
2105 spin_unlock(&cache
->lock
);
2106 spin_unlock(&cache
->space_info
->lock
);
2107 fs_info
->total_pinned
-= len
;
2109 btrfs_add_free_space(cache
, bytenr
, len
);
2111 btrfs_put_block_group(cache
);
2118 static int update_reserved_extents(struct btrfs_root
*root
,
2119 u64 bytenr
, u64 num
, int reserve
)
2122 struct btrfs_block_group_cache
*cache
;
2123 struct btrfs_fs_info
*fs_info
= root
->fs_info
;
2126 cache
= btrfs_lookup_block_group(fs_info
, bytenr
);
2128 len
= min(num
, cache
->key
.offset
-
2129 (bytenr
- cache
->key
.objectid
));
2131 spin_lock(&cache
->space_info
->lock
);
2132 spin_lock(&cache
->lock
);
2134 cache
->reserved
+= len
;
2135 cache
->space_info
->bytes_reserved
+= len
;
2137 cache
->reserved
-= len
;
2138 cache
->space_info
->bytes_reserved
-= len
;
2140 spin_unlock(&cache
->lock
);
2141 spin_unlock(&cache
->space_info
->lock
);
2142 btrfs_put_block_group(cache
);
2149 int btrfs_copy_pinned(struct btrfs_root
*root
, struct extent_io_tree
*copy
)
2154 struct extent_io_tree
*pinned_extents
= &root
->fs_info
->pinned_extents
;
2158 ret
= find_first_extent_bit(pinned_extents
, last
,
2159 &start
, &end
, EXTENT_DIRTY
);
2162 set_extent_dirty(copy
, start
, end
, GFP_NOFS
);
2168 int btrfs_finish_extent_commit(struct btrfs_trans_handle
*trans
,
2169 struct btrfs_root
*root
,
2170 struct extent_io_tree
*unpin
)
2177 ret
= find_first_extent_bit(unpin
, 0, &start
, &end
,
2182 ret
= btrfs_discard_extent(root
, start
, end
+ 1 - start
);
2184 /* unlocks the pinned mutex */
2185 btrfs_update_pinned_extents(root
, start
, end
+ 1 - start
, 0);
2186 clear_extent_dirty(unpin
, start
, end
, GFP_NOFS
);
2193 static int pin_down_bytes(struct btrfs_trans_handle
*trans
,
2194 struct btrfs_root
*root
,
2195 struct btrfs_path
*path
,
2196 u64 bytenr
, u64 num_bytes
, int is_data
,
2197 struct extent_buffer
**must_clean
)
2200 struct extent_buffer
*buf
;
2205 buf
= btrfs_find_tree_block(root
, bytenr
, num_bytes
);
2209 /* we can reuse a block if it hasn't been written
2210 * and it is from this transaction. We can't
2211 * reuse anything from the tree log root because
2212 * it has tiny sub-transactions.
2214 if (btrfs_buffer_uptodate(buf
, 0) &&
2215 btrfs_try_tree_lock(buf
)) {
2216 u64 header_owner
= btrfs_header_owner(buf
);
2217 u64 header_transid
= btrfs_header_generation(buf
);
2218 if (header_owner
!= BTRFS_TREE_LOG_OBJECTID
&&
2219 header_owner
!= BTRFS_TREE_RELOC_OBJECTID
&&
2220 header_owner
!= BTRFS_DATA_RELOC_TREE_OBJECTID
&&
2221 header_transid
== trans
->transid
&&
2222 !btrfs_header_flag(buf
, BTRFS_HEADER_FLAG_WRITTEN
)) {
2226 btrfs_tree_unlock(buf
);
2228 free_extent_buffer(buf
);
2230 btrfs_set_path_blocking(path
);
2231 /* unlocks the pinned mutex */
2232 btrfs_update_pinned_extents(root
, bytenr
, num_bytes
, 1);
2239 * remove an extent from the root, returns 0 on success
2241 static int __free_extent(struct btrfs_trans_handle
*trans
,
2242 struct btrfs_root
*root
,
2243 u64 bytenr
, u64 num_bytes
, u64 parent
,
2244 u64 root_objectid
, u64 ref_generation
,
2245 u64 owner_objectid
, int pin
, int mark_free
,
2248 struct btrfs_path
*path
;
2249 struct btrfs_key key
;
2250 struct btrfs_fs_info
*info
= root
->fs_info
;
2251 struct btrfs_root
*extent_root
= info
->extent_root
;
2252 struct extent_buffer
*leaf
;
2254 int extent_slot
= 0;
2255 int found_extent
= 0;
2257 struct btrfs_extent_item
*ei
;
2260 key
.objectid
= bytenr
;
2261 btrfs_set_key_type(&key
, BTRFS_EXTENT_ITEM_KEY
);
2262 key
.offset
= num_bytes
;
2263 path
= btrfs_alloc_path();
2268 path
->leave_spinning
= 1;
2269 ret
= lookup_extent_backref(trans
, extent_root
, path
,
2270 bytenr
, parent
, root_objectid
,
2271 ref_generation
, owner_objectid
, 1);
2273 struct btrfs_key found_key
;
2274 extent_slot
= path
->slots
[0];
2275 while (extent_slot
> 0) {
2277 btrfs_item_key_to_cpu(path
->nodes
[0], &found_key
,
2279 if (found_key
.objectid
!= bytenr
)
2281 if (found_key
.type
== BTRFS_EXTENT_ITEM_KEY
&&
2282 found_key
.offset
== num_bytes
) {
2286 if (path
->slots
[0] - extent_slot
> 5)
2289 if (!found_extent
) {
2290 ret
= remove_extent_backref(trans
, extent_root
, path
,
2293 btrfs_release_path(extent_root
, path
);
2294 path
->leave_spinning
= 1;
2295 ret
= btrfs_search_slot(trans
, extent_root
,
2298 printk(KERN_ERR
"umm, got %d back from search"
2299 ", was looking for %llu\n", ret
,
2300 (unsigned long long)bytenr
);
2301 btrfs_print_leaf(extent_root
, path
->nodes
[0]);
2304 extent_slot
= path
->slots
[0];
2307 btrfs_print_leaf(extent_root
, path
->nodes
[0]);
2309 printk(KERN_ERR
"btrfs unable to find ref byte nr %llu "
2310 "parent %llu root %llu gen %llu owner %llu\n",
2311 (unsigned long long)bytenr
,
2312 (unsigned long long)parent
,
2313 (unsigned long long)root_objectid
,
2314 (unsigned long long)ref_generation
,
2315 (unsigned long long)owner_objectid
);
2318 leaf
= path
->nodes
[0];
2319 ei
= btrfs_item_ptr(leaf
, extent_slot
,
2320 struct btrfs_extent_item
);
2321 refs
= btrfs_extent_refs(leaf
, ei
);
2324 * we're not allowed to delete the extent item if there
2325 * are other delayed ref updates pending
2328 BUG_ON(refs
< refs_to_drop
);
2329 refs
-= refs_to_drop
;
2330 btrfs_set_extent_refs(leaf
, ei
, refs
);
2331 btrfs_mark_buffer_dirty(leaf
);
2333 if (refs
== 0 && found_extent
&&
2334 path
->slots
[0] == extent_slot
+ 1) {
2335 struct btrfs_extent_ref
*ref
;
2336 ref
= btrfs_item_ptr(leaf
, path
->slots
[0],
2337 struct btrfs_extent_ref
);
2338 BUG_ON(btrfs_ref_num_refs(leaf
, ref
) != refs_to_drop
);
2339 /* if the back ref and the extent are next to each other
2340 * they get deleted below in one shot
2342 path
->slots
[0] = extent_slot
;
2344 } else if (found_extent
) {
2345 /* otherwise delete the extent back ref */
2346 ret
= remove_extent_backref(trans
, extent_root
, path
,
2349 /* if refs are 0, we need to setup the path for deletion */
2351 btrfs_release_path(extent_root
, path
);
2352 path
->leave_spinning
= 1;
2353 ret
= btrfs_search_slot(trans
, extent_root
, &key
, path
,
2362 struct extent_buffer
*must_clean
= NULL
;
2365 ret
= pin_down_bytes(trans
, root
, path
,
2367 owner_objectid
>= BTRFS_FIRST_FREE_OBJECTID
,
2374 /* block accounting for super block */
2375 spin_lock(&info
->delalloc_lock
);
2376 super_used
= btrfs_super_bytes_used(&info
->super_copy
);
2377 btrfs_set_super_bytes_used(&info
->super_copy
,
2378 super_used
- num_bytes
);
2380 /* block accounting for root item */
2381 root_used
= btrfs_root_used(&root
->root_item
);
2382 btrfs_set_root_used(&root
->root_item
,
2383 root_used
- num_bytes
);
2384 spin_unlock(&info
->delalloc_lock
);
2387 * it is going to be very rare for someone to be waiting
2388 * on the block we're freeing. del_items might need to
2389 * schedule, so rather than get fancy, just force it
2393 btrfs_set_lock_blocking(must_clean
);
2395 ret
= btrfs_del_items(trans
, extent_root
, path
, path
->slots
[0],
2398 btrfs_release_path(extent_root
, path
);
2401 clean_tree_block(NULL
, root
, must_clean
);
2402 btrfs_tree_unlock(must_clean
);
2403 free_extent_buffer(must_clean
);
2406 if (owner_objectid
>= BTRFS_FIRST_FREE_OBJECTID
) {
2407 ret
= btrfs_del_csums(trans
, root
, bytenr
, num_bytes
);
2410 invalidate_mapping_pages(info
->btree_inode
->i_mapping
,
2411 bytenr
>> PAGE_CACHE_SHIFT
,
2412 (bytenr
+ num_bytes
- 1) >> PAGE_CACHE_SHIFT
);
2415 ret
= update_block_group(trans
, root
, bytenr
, num_bytes
, 0,
2419 btrfs_free_path(path
);
2424 * remove an extent from the root, returns 0 on success
2426 static int __btrfs_free_extent(struct btrfs_trans_handle
*trans
,
2427 struct btrfs_root
*root
,
2428 u64 bytenr
, u64 num_bytes
, u64 parent
,
2429 u64 root_objectid
, u64 ref_generation
,
2430 u64 owner_objectid
, int pin
,
2433 WARN_ON(num_bytes
< root
->sectorsize
);
2436 * if metadata always pin
2437 * if data pin when any transaction has committed this
2439 if (owner_objectid
< BTRFS_FIRST_FREE_OBJECTID
||
2440 ref_generation
!= trans
->transid
)
2443 if (ref_generation
!= trans
->transid
)
2446 return __free_extent(trans
, root
, bytenr
, num_bytes
, parent
,
2447 root_objectid
, ref_generation
,
2448 owner_objectid
, pin
, pin
== 0, refs_to_drop
);
2452 * when we free an extent, it is possible (and likely) that we free the last
2453 * delayed ref for that extent as well. This searches the delayed ref tree for
2454 * a given extent, and if there are no other delayed refs to be processed, it
2455 * removes it from the tree.
2457 static noinline
int check_ref_cleanup(struct btrfs_trans_handle
*trans
,
2458 struct btrfs_root
*root
, u64 bytenr
)
2460 struct btrfs_delayed_ref_head
*head
;
2461 struct btrfs_delayed_ref_root
*delayed_refs
;
2462 struct btrfs_delayed_ref_node
*ref
;
2463 struct rb_node
*node
;
2466 delayed_refs
= &trans
->transaction
->delayed_refs
;
2467 spin_lock(&delayed_refs
->lock
);
2468 head
= btrfs_find_delayed_ref_head(trans
, bytenr
);
2472 node
= rb_prev(&head
->node
.rb_node
);
2476 ref
= rb_entry(node
, struct btrfs_delayed_ref_node
, rb_node
);
2478 /* there are still entries for this ref, we can't drop it */
2479 if (ref
->bytenr
== bytenr
)
2483 * waiting for the lock here would deadlock. If someone else has it
2484 * locked they are already in the process of dropping it anyway
2486 if (!mutex_trylock(&head
->mutex
))
2490 * at this point we have a head with no other entries. Go
2491 * ahead and process it.
2493 head
->node
.in_tree
= 0;
2494 rb_erase(&head
->node
.rb_node
, &delayed_refs
->root
);
2496 delayed_refs
->num_entries
--;
2499 * we don't take a ref on the node because we're removing it from the
2500 * tree, so we just steal the ref the tree was holding.
2502 delayed_refs
->num_heads
--;
2503 if (list_empty(&head
->cluster
))
2504 delayed_refs
->num_heads_ready
--;
2506 list_del_init(&head
->cluster
);
2507 spin_unlock(&delayed_refs
->lock
);
2509 ret
= run_one_delayed_ref(trans
, root
->fs_info
->tree_root
,
2510 &head
->node
, head
->must_insert_reserved
);
2512 btrfs_put_delayed_ref(&head
->node
);
2515 spin_unlock(&delayed_refs
->lock
);
2519 int btrfs_free_extent(struct btrfs_trans_handle
*trans
,
2520 struct btrfs_root
*root
,
2521 u64 bytenr
, u64 num_bytes
, u64 parent
,
2522 u64 root_objectid
, u64 ref_generation
,
2523 u64 owner_objectid
, int pin
)
2528 * tree log blocks never actually go into the extent allocation
2529 * tree, just update pinning info and exit early.
2531 * data extents referenced by the tree log do need to have
2532 * their reference counts bumped.
2534 if (root
->root_key
.objectid
== BTRFS_TREE_LOG_OBJECTID
&&
2535 owner_objectid
< BTRFS_FIRST_FREE_OBJECTID
) {
2536 /* unlocks the pinned mutex */
2537 btrfs_update_pinned_extents(root
, bytenr
, num_bytes
, 1);
2538 update_reserved_extents(root
, bytenr
, num_bytes
, 0);
2541 ret
= btrfs_add_delayed_ref(trans
, bytenr
, num_bytes
, parent
,
2542 root_objectid
, ref_generation
,
2544 BTRFS_DROP_DELAYED_REF
, 1);
2546 ret
= check_ref_cleanup(trans
, root
, bytenr
);
2552 static u64
stripe_align(struct btrfs_root
*root
, u64 val
)
2554 u64 mask
= ((u64
)root
->stripesize
- 1);
2555 u64 ret
= (val
+ mask
) & ~mask
;
2560 * walks the btree of allocated extents and find a hole of a given size.
2561 * The key ins is changed to record the hole:
2562 * ins->objectid == block start
2563 * ins->flags = BTRFS_EXTENT_ITEM_KEY
2564 * ins->offset == number of blocks
2565 * Any available blocks before search_start are skipped.
2567 static noinline
int find_free_extent(struct btrfs_trans_handle
*trans
,
2568 struct btrfs_root
*orig_root
,
2569 u64 num_bytes
, u64 empty_size
,
2570 u64 search_start
, u64 search_end
,
2571 u64 hint_byte
, struct btrfs_key
*ins
,
2572 u64 exclude_start
, u64 exclude_nr
,
2576 struct btrfs_root
*root
= orig_root
->fs_info
->extent_root
;
2577 struct btrfs_free_cluster
*last_ptr
= NULL
;
2578 struct btrfs_block_group_cache
*block_group
= NULL
;
2579 int empty_cluster
= 2 * 1024 * 1024;
2580 int allowed_chunk_alloc
= 0;
2581 struct btrfs_space_info
*space_info
;
2582 int last_ptr_loop
= 0;
2585 WARN_ON(num_bytes
< root
->sectorsize
);
2586 btrfs_set_key_type(ins
, BTRFS_EXTENT_ITEM_KEY
);
2590 space_info
= __find_space_info(root
->fs_info
, data
);
2592 if (orig_root
->ref_cows
|| empty_size
)
2593 allowed_chunk_alloc
= 1;
2595 if (data
& BTRFS_BLOCK_GROUP_METADATA
) {
2596 last_ptr
= &root
->fs_info
->meta_alloc_cluster
;
2597 if (!btrfs_test_opt(root
, SSD
))
2598 empty_cluster
= 64 * 1024;
2601 if ((data
& BTRFS_BLOCK_GROUP_DATA
) && btrfs_test_opt(root
, SSD
)) {
2602 last_ptr
= &root
->fs_info
->data_alloc_cluster
;
2606 spin_lock(&last_ptr
->lock
);
2607 if (last_ptr
->block_group
)
2608 hint_byte
= last_ptr
->window_start
;
2609 spin_unlock(&last_ptr
->lock
);
2612 search_start
= max(search_start
, first_logical_byte(root
, 0));
2613 search_start
= max(search_start
, hint_byte
);
2620 if (search_start
== hint_byte
) {
2621 block_group
= btrfs_lookup_block_group(root
->fs_info
,
2623 if (block_group
&& block_group_bits(block_group
, data
)) {
2624 down_read(&space_info
->groups_sem
);
2625 if (list_empty(&block_group
->list
) ||
2628 * someone is removing this block group,
2629 * we can't jump into the have_block_group
2630 * target because our list pointers are not
2633 btrfs_put_block_group(block_group
);
2634 up_read(&space_info
->groups_sem
);
2636 goto have_block_group
;
2637 } else if (block_group
) {
2638 btrfs_put_block_group(block_group
);
2643 down_read(&space_info
->groups_sem
);
2644 list_for_each_entry(block_group
, &space_info
->block_groups
, list
) {
2647 atomic_inc(&block_group
->count
);
2648 search_start
= block_group
->key
.objectid
;
2651 if (unlikely(!block_group
->cached
)) {
2652 mutex_lock(&block_group
->cache_mutex
);
2653 ret
= cache_block_group(root
, block_group
);
2654 mutex_unlock(&block_group
->cache_mutex
);
2656 btrfs_put_block_group(block_group
);
2661 if (unlikely(block_group
->ro
))
2666 * the refill lock keeps out other
2667 * people trying to start a new cluster
2669 spin_lock(&last_ptr
->refill_lock
);
2670 if (last_ptr
->block_group
&&
2671 (last_ptr
->block_group
->ro
||
2672 !block_group_bits(last_ptr
->block_group
, data
))) {
2674 goto refill_cluster
;
2677 offset
= btrfs_alloc_from_cluster(block_group
, last_ptr
,
2678 num_bytes
, search_start
);
2680 /* we have a block, we're done */
2681 spin_unlock(&last_ptr
->refill_lock
);
2685 spin_lock(&last_ptr
->lock
);
2687 * whoops, this cluster doesn't actually point to
2688 * this block group. Get a ref on the block
2689 * group is does point to and try again
2691 if (!last_ptr_loop
&& last_ptr
->block_group
&&
2692 last_ptr
->block_group
!= block_group
) {
2694 btrfs_put_block_group(block_group
);
2695 block_group
= last_ptr
->block_group
;
2696 atomic_inc(&block_group
->count
);
2697 spin_unlock(&last_ptr
->lock
);
2698 spin_unlock(&last_ptr
->refill_lock
);
2701 search_start
= block_group
->key
.objectid
;
2703 * we know this block group is properly
2704 * in the list because
2705 * btrfs_remove_block_group, drops the
2706 * cluster before it removes the block
2707 * group from the list
2709 goto have_block_group
;
2711 spin_unlock(&last_ptr
->lock
);
2714 * this cluster didn't work out, free it and
2717 btrfs_return_cluster_to_free_space(NULL
, last_ptr
);
2721 /* allocate a cluster in this block group */
2722 ret
= btrfs_find_space_cluster(trans
,
2723 block_group
, last_ptr
,
2725 empty_cluster
+ empty_size
);
2728 * now pull our allocation out of this
2731 offset
= btrfs_alloc_from_cluster(block_group
,
2732 last_ptr
, num_bytes
,
2735 /* we found one, proceed */
2736 spin_unlock(&last_ptr
->refill_lock
);
2741 * at this point we either didn't find a cluster
2742 * or we weren't able to allocate a block from our
2743 * cluster. Free the cluster we've been trying
2744 * to use, and go to the next block group
2747 btrfs_return_cluster_to_free_space(NULL
,
2749 spin_unlock(&last_ptr
->refill_lock
);
2752 spin_unlock(&last_ptr
->refill_lock
);
2755 offset
= btrfs_find_space_for_alloc(block_group
, search_start
,
2756 num_bytes
, empty_size
);
2760 search_start
= stripe_align(root
, offset
);
2762 /* move on to the next group */
2763 if (search_start
+ num_bytes
>= search_end
) {
2764 btrfs_add_free_space(block_group
, offset
, num_bytes
);
2768 /* move on to the next group */
2769 if (search_start
+ num_bytes
>
2770 block_group
->key
.objectid
+ block_group
->key
.offset
) {
2771 btrfs_add_free_space(block_group
, offset
, num_bytes
);
2775 if (exclude_nr
> 0 &&
2776 (search_start
+ num_bytes
> exclude_start
&&
2777 search_start
< exclude_start
+ exclude_nr
)) {
2778 search_start
= exclude_start
+ exclude_nr
;
2780 btrfs_add_free_space(block_group
, offset
, num_bytes
);
2782 * if search_start is still in this block group
2783 * then we just re-search this block group
2785 if (search_start
>= block_group
->key
.objectid
&&
2786 search_start
< (block_group
->key
.objectid
+
2787 block_group
->key
.offset
))
2788 goto have_block_group
;
2792 ins
->objectid
= search_start
;
2793 ins
->offset
= num_bytes
;
2795 if (offset
< search_start
)
2796 btrfs_add_free_space(block_group
, offset
,
2797 search_start
- offset
);
2798 BUG_ON(offset
> search_start
);
2800 /* we are all good, lets return */
2803 btrfs_put_block_group(block_group
);
2805 up_read(&space_info
->groups_sem
);
2807 /* loop == 0, try to find a clustered alloc in every block group
2808 * loop == 1, try again after forcing a chunk allocation
2809 * loop == 2, set empty_size and empty_cluster to 0 and try again
2811 if (!ins
->objectid
&& loop
< 3 &&
2812 (empty_size
|| empty_cluster
|| allowed_chunk_alloc
)) {
2818 if (allowed_chunk_alloc
) {
2819 ret
= do_chunk_alloc(trans
, root
, num_bytes
+
2820 2 * 1024 * 1024, data
, 1);
2821 allowed_chunk_alloc
= 0;
2823 space_info
->force_alloc
= 1;
2831 } else if (!ins
->objectid
) {
2835 /* we found what we needed */
2836 if (ins
->objectid
) {
2837 if (!(data
& BTRFS_BLOCK_GROUP_DATA
))
2838 trans
->block_group
= block_group
->key
.objectid
;
2840 btrfs_put_block_group(block_group
);
2847 static void dump_space_info(struct btrfs_space_info
*info
, u64 bytes
)
2849 struct btrfs_block_group_cache
*cache
;
2851 printk(KERN_INFO
"space_info has %llu free, is %sfull\n",
2852 (unsigned long long)(info
->total_bytes
- info
->bytes_used
-
2853 info
->bytes_pinned
- info
->bytes_reserved
),
2854 (info
->full
) ? "" : "not ");
2855 printk(KERN_INFO
"space_info total=%llu, pinned=%llu, delalloc=%llu,"
2856 " may_use=%llu, used=%llu\n",
2857 (unsigned long long)info
->total_bytes
,
2858 (unsigned long long)info
->bytes_pinned
,
2859 (unsigned long long)info
->bytes_delalloc
,
2860 (unsigned long long)info
->bytes_may_use
,
2861 (unsigned long long)info
->bytes_used
);
2863 down_read(&info
->groups_sem
);
2864 list_for_each_entry(cache
, &info
->block_groups
, list
) {
2865 spin_lock(&cache
->lock
);
2866 printk(KERN_INFO
"block group %llu has %llu bytes, %llu used "
2867 "%llu pinned %llu reserved\n",
2868 (unsigned long long)cache
->key
.objectid
,
2869 (unsigned long long)cache
->key
.offset
,
2870 (unsigned long long)btrfs_block_group_used(&cache
->item
),
2871 (unsigned long long)cache
->pinned
,
2872 (unsigned long long)cache
->reserved
);
2873 btrfs_dump_free_space(cache
, bytes
);
2874 spin_unlock(&cache
->lock
);
2876 up_read(&info
->groups_sem
);
2879 static int __btrfs_reserve_extent(struct btrfs_trans_handle
*trans
,
2880 struct btrfs_root
*root
,
2881 u64 num_bytes
, u64 min_alloc_size
,
2882 u64 empty_size
, u64 hint_byte
,
2883 u64 search_end
, struct btrfs_key
*ins
,
2887 u64 search_start
= 0;
2888 struct btrfs_fs_info
*info
= root
->fs_info
;
2890 data
= btrfs_get_alloc_profile(root
, data
);
2893 * the only place that sets empty_size is btrfs_realloc_node, which
2894 * is not called recursively on allocations
2896 if (empty_size
|| root
->ref_cows
) {
2897 if (!(data
& BTRFS_BLOCK_GROUP_METADATA
)) {
2898 ret
= do_chunk_alloc(trans
, root
->fs_info
->extent_root
,
2900 BTRFS_BLOCK_GROUP_METADATA
|
2901 (info
->metadata_alloc_profile
&
2902 info
->avail_metadata_alloc_bits
), 0);
2904 ret
= do_chunk_alloc(trans
, root
->fs_info
->extent_root
,
2905 num_bytes
+ 2 * 1024 * 1024, data
, 0);
2908 WARN_ON(num_bytes
< root
->sectorsize
);
2909 ret
= find_free_extent(trans
, root
, num_bytes
, empty_size
,
2910 search_start
, search_end
, hint_byte
, ins
,
2911 trans
->alloc_exclude_start
,
2912 trans
->alloc_exclude_nr
, data
);
2914 if (ret
== -ENOSPC
&& num_bytes
> min_alloc_size
) {
2915 num_bytes
= num_bytes
>> 1;
2916 num_bytes
= num_bytes
& ~(root
->sectorsize
- 1);
2917 num_bytes
= max(num_bytes
, min_alloc_size
);
2918 do_chunk_alloc(trans
, root
->fs_info
->extent_root
,
2919 num_bytes
, data
, 1);
2923 struct btrfs_space_info
*sinfo
;
2925 sinfo
= __find_space_info(root
->fs_info
, data
);
2926 printk(KERN_ERR
"btrfs allocation failed flags %llu, "
2927 "wanted %llu\n", (unsigned long long)data
,
2928 (unsigned long long)num_bytes
);
2929 dump_space_info(sinfo
, num_bytes
);
2936 int btrfs_free_reserved_extent(struct btrfs_root
*root
, u64 start
, u64 len
)
2938 struct btrfs_block_group_cache
*cache
;
2941 cache
= btrfs_lookup_block_group(root
->fs_info
, start
);
2943 printk(KERN_ERR
"Unable to find block group for %llu\n",
2944 (unsigned long long)start
);
2948 ret
= btrfs_discard_extent(root
, start
, len
);
2950 btrfs_add_free_space(cache
, start
, len
);
2951 btrfs_put_block_group(cache
);
2952 update_reserved_extents(root
, start
, len
, 0);
2957 int btrfs_reserve_extent(struct btrfs_trans_handle
*trans
,
2958 struct btrfs_root
*root
,
2959 u64 num_bytes
, u64 min_alloc_size
,
2960 u64 empty_size
, u64 hint_byte
,
2961 u64 search_end
, struct btrfs_key
*ins
,
2965 ret
= __btrfs_reserve_extent(trans
, root
, num_bytes
, min_alloc_size
,
2966 empty_size
, hint_byte
, search_end
, ins
,
2968 update_reserved_extents(root
, ins
->objectid
, ins
->offset
, 1);
2972 static int __btrfs_alloc_reserved_extent(struct btrfs_trans_handle
*trans
,
2973 struct btrfs_root
*root
, u64 parent
,
2974 u64 root_objectid
, u64 ref_generation
,
2975 u64 owner
, struct btrfs_key
*ins
,
2981 u64 num_bytes
= ins
->offset
;
2983 struct btrfs_fs_info
*info
= root
->fs_info
;
2984 struct btrfs_root
*extent_root
= info
->extent_root
;
2985 struct btrfs_extent_item
*extent_item
;
2986 struct btrfs_extent_ref
*ref
;
2987 struct btrfs_path
*path
;
2988 struct btrfs_key keys
[2];
2991 parent
= ins
->objectid
;
2993 /* block accounting for super block */
2994 spin_lock(&info
->delalloc_lock
);
2995 super_used
= btrfs_super_bytes_used(&info
->super_copy
);
2996 btrfs_set_super_bytes_used(&info
->super_copy
, super_used
+ num_bytes
);
2998 /* block accounting for root item */
2999 root_used
= btrfs_root_used(&root
->root_item
);
3000 btrfs_set_root_used(&root
->root_item
, root_used
+ num_bytes
);
3001 spin_unlock(&info
->delalloc_lock
);
3003 memcpy(&keys
[0], ins
, sizeof(*ins
));
3004 keys
[1].objectid
= ins
->objectid
;
3005 keys
[1].type
= BTRFS_EXTENT_REF_KEY
;
3006 keys
[1].offset
= parent
;
3007 sizes
[0] = sizeof(*extent_item
);
3008 sizes
[1] = sizeof(*ref
);
3010 path
= btrfs_alloc_path();
3013 path
->leave_spinning
= 1;
3014 ret
= btrfs_insert_empty_items(trans
, extent_root
, path
, keys
,
3018 extent_item
= btrfs_item_ptr(path
->nodes
[0], path
->slots
[0],
3019 struct btrfs_extent_item
);
3020 btrfs_set_extent_refs(path
->nodes
[0], extent_item
, ref_mod
);
3021 ref
= btrfs_item_ptr(path
->nodes
[0], path
->slots
[0] + 1,
3022 struct btrfs_extent_ref
);
3024 btrfs_set_ref_root(path
->nodes
[0], ref
, root_objectid
);
3025 btrfs_set_ref_generation(path
->nodes
[0], ref
, ref_generation
);
3026 btrfs_set_ref_objectid(path
->nodes
[0], ref
, owner
);
3027 btrfs_set_ref_num_refs(path
->nodes
[0], ref
, ref_mod
);
3029 btrfs_mark_buffer_dirty(path
->nodes
[0]);
3031 trans
->alloc_exclude_start
= 0;
3032 trans
->alloc_exclude_nr
= 0;
3033 btrfs_free_path(path
);
3038 ret
= update_block_group(trans
, root
, ins
->objectid
,
3041 printk(KERN_ERR
"btrfs update block group failed for %llu "
3042 "%llu\n", (unsigned long long)ins
->objectid
,
3043 (unsigned long long)ins
->offset
);
3050 int btrfs_alloc_reserved_extent(struct btrfs_trans_handle
*trans
,
3051 struct btrfs_root
*root
, u64 parent
,
3052 u64 root_objectid
, u64 ref_generation
,
3053 u64 owner
, struct btrfs_key
*ins
)
3057 if (root_objectid
== BTRFS_TREE_LOG_OBJECTID
)
3060 ret
= btrfs_add_delayed_ref(trans
, ins
->objectid
,
3061 ins
->offset
, parent
, root_objectid
,
3062 ref_generation
, owner
,
3063 BTRFS_ADD_DELAYED_EXTENT
, 0);
3069 * this is used by the tree logging recovery code. It records that
3070 * an extent has been allocated and makes sure to clear the free
3071 * space cache bits as well
3073 int btrfs_alloc_logged_extent(struct btrfs_trans_handle
*trans
,
3074 struct btrfs_root
*root
, u64 parent
,
3075 u64 root_objectid
, u64 ref_generation
,
3076 u64 owner
, struct btrfs_key
*ins
)
3079 struct btrfs_block_group_cache
*block_group
;
3081 block_group
= btrfs_lookup_block_group(root
->fs_info
, ins
->objectid
);
3082 mutex_lock(&block_group
->cache_mutex
);
3083 cache_block_group(root
, block_group
);
3084 mutex_unlock(&block_group
->cache_mutex
);
3086 ret
= btrfs_remove_free_space(block_group
, ins
->objectid
,
3089 btrfs_put_block_group(block_group
);
3090 ret
= __btrfs_alloc_reserved_extent(trans
, root
, parent
, root_objectid
,
3091 ref_generation
, owner
, ins
, 1);
3096 * finds a free extent and does all the dirty work required for allocation
3097 * returns the key for the extent through ins, and a tree buffer for
3098 * the first block of the extent through buf.
3100 * returns 0 if everything worked, non-zero otherwise.
3102 int btrfs_alloc_extent(struct btrfs_trans_handle
*trans
,
3103 struct btrfs_root
*root
,
3104 u64 num_bytes
, u64 parent
, u64 min_alloc_size
,
3105 u64 root_objectid
, u64 ref_generation
,
3106 u64 owner_objectid
, u64 empty_size
, u64 hint_byte
,
3107 u64 search_end
, struct btrfs_key
*ins
, u64 data
)
3110 ret
= __btrfs_reserve_extent(trans
, root
, num_bytes
,
3111 min_alloc_size
, empty_size
, hint_byte
,
3112 search_end
, ins
, data
);
3114 if (root_objectid
!= BTRFS_TREE_LOG_OBJECTID
) {
3115 ret
= btrfs_add_delayed_ref(trans
, ins
->objectid
,
3116 ins
->offset
, parent
, root_objectid
,
3117 ref_generation
, owner_objectid
,
3118 BTRFS_ADD_DELAYED_EXTENT
, 0);
3121 update_reserved_extents(root
, ins
->objectid
, ins
->offset
, 1);
3125 struct extent_buffer
*btrfs_init_new_buffer(struct btrfs_trans_handle
*trans
,
3126 struct btrfs_root
*root
,
3127 u64 bytenr
, u32 blocksize
,
3130 struct extent_buffer
*buf
;
3132 buf
= btrfs_find_create_tree_block(root
, bytenr
, blocksize
);
3134 return ERR_PTR(-ENOMEM
);
3135 btrfs_set_header_generation(buf
, trans
->transid
);
3136 btrfs_set_buffer_lockdep_class(buf
, level
);
3137 btrfs_tree_lock(buf
);
3138 clean_tree_block(trans
, root
, buf
);
3140 btrfs_set_lock_blocking(buf
);
3141 btrfs_set_buffer_uptodate(buf
);
3143 if (root
->root_key
.objectid
== BTRFS_TREE_LOG_OBJECTID
) {
3144 set_extent_dirty(&root
->dirty_log_pages
, buf
->start
,
3145 buf
->start
+ buf
->len
- 1, GFP_NOFS
);
3147 set_extent_dirty(&trans
->transaction
->dirty_pages
, buf
->start
,
3148 buf
->start
+ buf
->len
- 1, GFP_NOFS
);
3150 trans
->blocks_used
++;
3151 /* this returns a buffer locked for blocking */
3156 * helper function to allocate a block for a given tree
3157 * returns the tree buffer or NULL.
3159 struct extent_buffer
*btrfs_alloc_free_block(struct btrfs_trans_handle
*trans
,
3160 struct btrfs_root
*root
,
3161 u32 blocksize
, u64 parent
,
3168 struct btrfs_key ins
;
3170 struct extent_buffer
*buf
;
3172 ret
= btrfs_alloc_extent(trans
, root
, blocksize
, parent
, blocksize
,
3173 root_objectid
, ref_generation
, level
,
3174 empty_size
, hint
, (u64
)-1, &ins
, 0);
3177 return ERR_PTR(ret
);
3180 buf
= btrfs_init_new_buffer(trans
, root
, ins
.objectid
,
3185 int btrfs_drop_leaf_ref(struct btrfs_trans_handle
*trans
,
3186 struct btrfs_root
*root
, struct extent_buffer
*leaf
)
3189 u64 leaf_generation
;
3190 struct refsort
*sorted
;
3191 struct btrfs_key key
;
3192 struct btrfs_file_extent_item
*fi
;
3199 BUG_ON(!btrfs_is_leaf(leaf
));
3200 nritems
= btrfs_header_nritems(leaf
);
3201 leaf_owner
= btrfs_header_owner(leaf
);
3202 leaf_generation
= btrfs_header_generation(leaf
);
3204 sorted
= kmalloc(sizeof(*sorted
) * nritems
, GFP_NOFS
);
3205 /* we do this loop twice. The first time we build a list
3206 * of the extents we have a reference on, then we sort the list
3207 * by bytenr. The second time around we actually do the
3210 for (i
= 0; i
< nritems
; i
++) {
3214 btrfs_item_key_to_cpu(leaf
, &key
, i
);
3216 /* only extents have references, skip everything else */
3217 if (btrfs_key_type(&key
) != BTRFS_EXTENT_DATA_KEY
)
3220 fi
= btrfs_item_ptr(leaf
, i
, struct btrfs_file_extent_item
);
3222 /* inline extents live in the btree, they don't have refs */
3223 if (btrfs_file_extent_type(leaf
, fi
) ==
3224 BTRFS_FILE_EXTENT_INLINE
)
3227 disk_bytenr
= btrfs_file_extent_disk_bytenr(leaf
, fi
);
3229 /* holes don't have refs */
3230 if (disk_bytenr
== 0)
3233 sorted
[refi
].bytenr
= disk_bytenr
;
3234 sorted
[refi
].slot
= i
;
3241 sort(sorted
, refi
, sizeof(struct refsort
), refsort_cmp
, NULL
);
3243 for (i
= 0; i
< refi
; i
++) {
3246 disk_bytenr
= sorted
[i
].bytenr
;
3247 slot
= sorted
[i
].slot
;
3251 btrfs_item_key_to_cpu(leaf
, &key
, slot
);
3252 if (btrfs_key_type(&key
) != BTRFS_EXTENT_DATA_KEY
)
3255 fi
= btrfs_item_ptr(leaf
, slot
, struct btrfs_file_extent_item
);
3257 ret
= btrfs_free_extent(trans
, root
, disk_bytenr
,
3258 btrfs_file_extent_disk_num_bytes(leaf
, fi
),
3259 leaf
->start
, leaf_owner
, leaf_generation
,
3263 atomic_inc(&root
->fs_info
->throttle_gen
);
3264 wake_up(&root
->fs_info
->transaction_throttle
);
3272 static noinline
int cache_drop_leaf_ref(struct btrfs_trans_handle
*trans
,
3273 struct btrfs_root
*root
,
3274 struct btrfs_leaf_ref
*ref
)
3278 struct btrfs_extent_info
*info
;
3279 struct refsort
*sorted
;
3281 if (ref
->nritems
== 0)
3284 sorted
= kmalloc(sizeof(*sorted
) * ref
->nritems
, GFP_NOFS
);
3285 for (i
= 0; i
< ref
->nritems
; i
++) {
3286 sorted
[i
].bytenr
= ref
->extents
[i
].bytenr
;
3289 sort(sorted
, ref
->nritems
, sizeof(struct refsort
), refsort_cmp
, NULL
);
3292 * the items in the ref were sorted when the ref was inserted
3293 * into the ref cache, so this is already in order
3295 for (i
= 0; i
< ref
->nritems
; i
++) {
3296 info
= ref
->extents
+ sorted
[i
].slot
;
3297 ret
= btrfs_free_extent(trans
, root
, info
->bytenr
,
3298 info
->num_bytes
, ref
->bytenr
,
3299 ref
->owner
, ref
->generation
,
3302 atomic_inc(&root
->fs_info
->throttle_gen
);
3303 wake_up(&root
->fs_info
->transaction_throttle
);
3314 static int drop_snap_lookup_refcount(struct btrfs_trans_handle
*trans
,
3315 struct btrfs_root
*root
, u64 start
,
3320 ret
= btrfs_lookup_extent_ref(trans
, root
, start
, len
, refs
);
3323 #if 0 /* some debugging code in case we see problems here */
3324 /* if the refs count is one, it won't get increased again. But
3325 * if the ref count is > 1, someone may be decreasing it at
3326 * the same time we are.
3329 struct extent_buffer
*eb
= NULL
;
3330 eb
= btrfs_find_create_tree_block(root
, start
, len
);
3332 btrfs_tree_lock(eb
);
3334 mutex_lock(&root
->fs_info
->alloc_mutex
);
3335 ret
= lookup_extent_ref(NULL
, root
, start
, len
, refs
);
3337 mutex_unlock(&root
->fs_info
->alloc_mutex
);
3340 btrfs_tree_unlock(eb
);
3341 free_extent_buffer(eb
);
3344 printk(KERN_ERR
"btrfs block %llu went down to one "
3345 "during drop_snap\n", (unsigned long long)start
);
3356 * this is used while deleting old snapshots, and it drops the refs
3357 * on a whole subtree starting from a level 1 node.
3359 * The idea is to sort all the leaf pointers, and then drop the
3360 * ref on all the leaves in order. Most of the time the leaves
3361 * will have ref cache entries, so no leaf IOs will be required to
3362 * find the extents they have references on.
3364 * For each leaf, any references it has are also dropped in order
3366 * This ends up dropping the references in something close to optimal
3367 * order for reading and modifying the extent allocation tree.
3369 static noinline
int drop_level_one_refs(struct btrfs_trans_handle
*trans
,
3370 struct btrfs_root
*root
,
3371 struct btrfs_path
*path
)
3376 struct extent_buffer
*eb
= path
->nodes
[1];
3377 struct extent_buffer
*leaf
;
3378 struct btrfs_leaf_ref
*ref
;
3379 struct refsort
*sorted
= NULL
;
3380 int nritems
= btrfs_header_nritems(eb
);
3384 int slot
= path
->slots
[1];
3385 u32 blocksize
= btrfs_level_size(root
, 0);
3391 root_owner
= btrfs_header_owner(eb
);
3392 root_gen
= btrfs_header_generation(eb
);
3393 sorted
= kmalloc(sizeof(*sorted
) * nritems
, GFP_NOFS
);
3396 * step one, sort all the leaf pointers so we don't scribble
3397 * randomly into the extent allocation tree
3399 for (i
= slot
; i
< nritems
; i
++) {
3400 sorted
[refi
].bytenr
= btrfs_node_blockptr(eb
, i
);
3401 sorted
[refi
].slot
= i
;
3406 * nritems won't be zero, but if we're picking up drop_snapshot
3407 * after a crash, slot might be > 0, so double check things
3413 sort(sorted
, refi
, sizeof(struct refsort
), refsort_cmp
, NULL
);
3416 * the first loop frees everything the leaves point to
3418 for (i
= 0; i
< refi
; i
++) {
3421 bytenr
= sorted
[i
].bytenr
;
3424 * check the reference count on this leaf. If it is > 1
3425 * we just decrement it below and don't update any
3426 * of the refs the leaf points to.
3428 ret
= drop_snap_lookup_refcount(trans
, root
, bytenr
,
3434 ptr_gen
= btrfs_node_ptr_generation(eb
, sorted
[i
].slot
);
3437 * the leaf only had one reference, which means the
3438 * only thing pointing to this leaf is the snapshot
3439 * we're deleting. It isn't possible for the reference
3440 * count to increase again later
3442 * The reference cache is checked for the leaf,
3443 * and if found we'll be able to drop any refs held by
3444 * the leaf without needing to read it in.
3446 ref
= btrfs_lookup_leaf_ref(root
, bytenr
);
3447 if (ref
&& ref
->generation
!= ptr_gen
) {
3448 btrfs_free_leaf_ref(root
, ref
);
3452 ret
= cache_drop_leaf_ref(trans
, root
, ref
);
3454 btrfs_remove_leaf_ref(root
, ref
);
3455 btrfs_free_leaf_ref(root
, ref
);
3458 * the leaf wasn't in the reference cache, so
3459 * we have to read it.
3461 leaf
= read_tree_block(root
, bytenr
, blocksize
,
3463 ret
= btrfs_drop_leaf_ref(trans
, root
, leaf
);
3465 free_extent_buffer(leaf
);
3467 atomic_inc(&root
->fs_info
->throttle_gen
);
3468 wake_up(&root
->fs_info
->transaction_throttle
);
3473 * run through the loop again to free the refs on the leaves.
3474 * This is faster than doing it in the loop above because
3475 * the leaves are likely to be clustered together. We end up
3476 * working in nice chunks on the extent allocation tree.
3478 for (i
= 0; i
< refi
; i
++) {
3479 bytenr
= sorted
[i
].bytenr
;
3480 ret
= btrfs_free_extent(trans
, root
, bytenr
,
3481 blocksize
, eb
->start
,
3482 root_owner
, root_gen
, 0, 1);
3485 atomic_inc(&root
->fs_info
->throttle_gen
);
3486 wake_up(&root
->fs_info
->transaction_throttle
);
3493 * update the path to show we've processed the entire level 1
3494 * node. This will get saved into the root's drop_snapshot_progress
3495 * field so these drops are not repeated again if this transaction
3498 path
->slots
[1] = nritems
;
3503 * helper function for drop_snapshot, this walks down the tree dropping ref
3504 * counts as it goes.
3506 static noinline
int walk_down_tree(struct btrfs_trans_handle
*trans
,
3507 struct btrfs_root
*root
,
3508 struct btrfs_path
*path
, int *level
)
3514 struct extent_buffer
*next
;
3515 struct extent_buffer
*cur
;
3516 struct extent_buffer
*parent
;
3521 WARN_ON(*level
< 0);
3522 WARN_ON(*level
>= BTRFS_MAX_LEVEL
);
3523 ret
= drop_snap_lookup_refcount(trans
, root
, path
->nodes
[*level
]->start
,
3524 path
->nodes
[*level
]->len
, &refs
);
3530 * walk down to the last node level and free all the leaves
3532 while (*level
>= 0) {
3533 WARN_ON(*level
< 0);
3534 WARN_ON(*level
>= BTRFS_MAX_LEVEL
);
3535 cur
= path
->nodes
[*level
];
3537 if (btrfs_header_level(cur
) != *level
)
3540 if (path
->slots
[*level
] >=
3541 btrfs_header_nritems(cur
))
3544 /* the new code goes down to level 1 and does all the
3545 * leaves pointed to that node in bulk. So, this check
3546 * for level 0 will always be false.
3548 * But, the disk format allows the drop_snapshot_progress
3549 * field in the root to leave things in a state where
3550 * a leaf will need cleaning up here. If someone crashes
3551 * with the old code and then boots with the new code,
3552 * we might find a leaf here.
3555 ret
= btrfs_drop_leaf_ref(trans
, root
, cur
);
3561 * once we get to level one, process the whole node
3562 * at once, including everything below it.
3565 ret
= drop_level_one_refs(trans
, root
, path
);
3570 bytenr
= btrfs_node_blockptr(cur
, path
->slots
[*level
]);
3571 ptr_gen
= btrfs_node_ptr_generation(cur
, path
->slots
[*level
]);
3572 blocksize
= btrfs_level_size(root
, *level
- 1);
3574 ret
= drop_snap_lookup_refcount(trans
, root
, bytenr
,
3579 * if there is more than one reference, we don't need
3580 * to read that node to drop any references it has. We
3581 * just drop the ref we hold on that node and move on to the
3582 * next slot in this level.
3585 parent
= path
->nodes
[*level
];
3586 root_owner
= btrfs_header_owner(parent
);
3587 root_gen
= btrfs_header_generation(parent
);
3588 path
->slots
[*level
]++;
3590 ret
= btrfs_free_extent(trans
, root
, bytenr
,
3591 blocksize
, parent
->start
,
3592 root_owner
, root_gen
,
3596 atomic_inc(&root
->fs_info
->throttle_gen
);
3597 wake_up(&root
->fs_info
->transaction_throttle
);
3604 * we need to keep freeing things in the next level down.
3605 * read the block and loop around to process it
3607 next
= read_tree_block(root
, bytenr
, blocksize
, ptr_gen
);
3608 WARN_ON(*level
<= 0);
3609 if (path
->nodes
[*level
-1])
3610 free_extent_buffer(path
->nodes
[*level
-1]);
3611 path
->nodes
[*level
-1] = next
;
3612 *level
= btrfs_header_level(next
);
3613 path
->slots
[*level
] = 0;
3617 WARN_ON(*level
< 0);
3618 WARN_ON(*level
>= BTRFS_MAX_LEVEL
);
3620 if (path
->nodes
[*level
] == root
->node
) {
3621 parent
= path
->nodes
[*level
];
3622 bytenr
= path
->nodes
[*level
]->start
;
3624 parent
= path
->nodes
[*level
+ 1];
3625 bytenr
= btrfs_node_blockptr(parent
, path
->slots
[*level
+ 1]);
3628 blocksize
= btrfs_level_size(root
, *level
);
3629 root_owner
= btrfs_header_owner(parent
);
3630 root_gen
= btrfs_header_generation(parent
);
3633 * cleanup and free the reference on the last node
3636 ret
= btrfs_free_extent(trans
, root
, bytenr
, blocksize
,
3637 parent
->start
, root_owner
, root_gen
,
3639 free_extent_buffer(path
->nodes
[*level
]);
3640 path
->nodes
[*level
] = NULL
;
3650 * helper function for drop_subtree, this function is similar to
3651 * walk_down_tree. The main difference is that it checks reference
3652 * counts while tree blocks are locked.
3654 static noinline
int walk_down_subtree(struct btrfs_trans_handle
*trans
,
3655 struct btrfs_root
*root
,
3656 struct btrfs_path
*path
, int *level
)
3658 struct extent_buffer
*next
;
3659 struct extent_buffer
*cur
;
3660 struct extent_buffer
*parent
;
3667 cur
= path
->nodes
[*level
];
3668 ret
= btrfs_lookup_extent_ref(trans
, root
, cur
->start
, cur
->len
,
3674 while (*level
>= 0) {
3675 cur
= path
->nodes
[*level
];
3677 ret
= btrfs_drop_leaf_ref(trans
, root
, cur
);
3679 clean_tree_block(trans
, root
, cur
);
3682 if (path
->slots
[*level
] >= btrfs_header_nritems(cur
)) {
3683 clean_tree_block(trans
, root
, cur
);
3687 bytenr
= btrfs_node_blockptr(cur
, path
->slots
[*level
]);
3688 blocksize
= btrfs_level_size(root
, *level
- 1);
3689 ptr_gen
= btrfs_node_ptr_generation(cur
, path
->slots
[*level
]);
3691 next
= read_tree_block(root
, bytenr
, blocksize
, ptr_gen
);
3692 btrfs_tree_lock(next
);
3693 btrfs_set_lock_blocking(next
);
3695 ret
= btrfs_lookup_extent_ref(trans
, root
, bytenr
, blocksize
,
3699 parent
= path
->nodes
[*level
];
3700 ret
= btrfs_free_extent(trans
, root
, bytenr
,
3701 blocksize
, parent
->start
,
3702 btrfs_header_owner(parent
),
3703 btrfs_header_generation(parent
),
3706 path
->slots
[*level
]++;
3707 btrfs_tree_unlock(next
);
3708 free_extent_buffer(next
);
3712 *level
= btrfs_header_level(next
);
3713 path
->nodes
[*level
] = next
;
3714 path
->slots
[*level
] = 0;
3715 path
->locks
[*level
] = 1;
3719 parent
= path
->nodes
[*level
+ 1];
3720 bytenr
= path
->nodes
[*level
]->start
;
3721 blocksize
= path
->nodes
[*level
]->len
;
3723 ret
= btrfs_free_extent(trans
, root
, bytenr
, blocksize
,
3724 parent
->start
, btrfs_header_owner(parent
),
3725 btrfs_header_generation(parent
), *level
, 1);
3728 if (path
->locks
[*level
]) {
3729 btrfs_tree_unlock(path
->nodes
[*level
]);
3730 path
->locks
[*level
] = 0;
3732 free_extent_buffer(path
->nodes
[*level
]);
3733 path
->nodes
[*level
] = NULL
;
3740 * helper for dropping snapshots. This walks back up the tree in the path
3741 * to find the first node higher up where we haven't yet gone through
3744 static noinline
int walk_up_tree(struct btrfs_trans_handle
*trans
,
3745 struct btrfs_root
*root
,
3746 struct btrfs_path
*path
,
3747 int *level
, int max_level
)
3751 struct btrfs_root_item
*root_item
= &root
->root_item
;
3756 for (i
= *level
; i
< max_level
&& path
->nodes
[i
]; i
++) {
3757 slot
= path
->slots
[i
];
3758 if (slot
< btrfs_header_nritems(path
->nodes
[i
]) - 1) {
3759 struct extent_buffer
*node
;
3760 struct btrfs_disk_key disk_key
;
3763 * there is more work to do in this level.
3764 * Update the drop_progress marker to reflect
3765 * the work we've done so far, and then bump
3768 node
= path
->nodes
[i
];
3771 WARN_ON(*level
== 0);
3772 btrfs_node_key(node
, &disk_key
, path
->slots
[i
]);
3773 memcpy(&root_item
->drop_progress
,
3774 &disk_key
, sizeof(disk_key
));
3775 root_item
->drop_level
= i
;
3778 struct extent_buffer
*parent
;
3781 * this whole node is done, free our reference
3782 * on it and go up one level
3784 if (path
->nodes
[*level
] == root
->node
)
3785 parent
= path
->nodes
[*level
];
3787 parent
= path
->nodes
[*level
+ 1];
3789 root_owner
= btrfs_header_owner(parent
);
3790 root_gen
= btrfs_header_generation(parent
);
3792 clean_tree_block(trans
, root
, path
->nodes
[*level
]);
3793 ret
= btrfs_free_extent(trans
, root
,
3794 path
->nodes
[*level
]->start
,
3795 path
->nodes
[*level
]->len
,
3796 parent
->start
, root_owner
,
3797 root_gen
, *level
, 1);
3799 if (path
->locks
[*level
]) {
3800 btrfs_tree_unlock(path
->nodes
[*level
]);
3801 path
->locks
[*level
] = 0;
3803 free_extent_buffer(path
->nodes
[*level
]);
3804 path
->nodes
[*level
] = NULL
;
3812 * drop the reference count on the tree rooted at 'snap'. This traverses
3813 * the tree freeing any blocks that have a ref count of zero after being
3816 int btrfs_drop_snapshot(struct btrfs_trans_handle
*trans
, struct btrfs_root
3822 struct btrfs_path
*path
;
3826 struct btrfs_root_item
*root_item
= &root
->root_item
;
3828 WARN_ON(!mutex_is_locked(&root
->fs_info
->drop_mutex
));
3829 path
= btrfs_alloc_path();
3832 level
= btrfs_header_level(root
->node
);
3834 if (btrfs_disk_key_objectid(&root_item
->drop_progress
) == 0) {
3835 path
->nodes
[level
] = root
->node
;
3836 extent_buffer_get(root
->node
);
3837 path
->slots
[level
] = 0;
3839 struct btrfs_key key
;
3840 struct btrfs_disk_key found_key
;
3841 struct extent_buffer
*node
;
3843 btrfs_disk_key_to_cpu(&key
, &root_item
->drop_progress
);
3844 level
= root_item
->drop_level
;
3845 path
->lowest_level
= level
;
3846 wret
= btrfs_search_slot(NULL
, root
, &key
, path
, 0, 0);
3851 node
= path
->nodes
[level
];
3852 btrfs_node_key(node
, &found_key
, path
->slots
[level
]);
3853 WARN_ON(memcmp(&found_key
, &root_item
->drop_progress
,
3854 sizeof(found_key
)));
3856 * unlock our path, this is safe because only this
3857 * function is allowed to delete this snapshot
3859 for (i
= 0; i
< BTRFS_MAX_LEVEL
; i
++) {
3860 if (path
->nodes
[i
] && path
->locks
[i
]) {
3862 btrfs_tree_unlock(path
->nodes
[i
]);
3867 unsigned long update
;
3868 wret
= walk_down_tree(trans
, root
, path
, &level
);
3874 wret
= walk_up_tree(trans
, root
, path
, &level
,
3880 if (trans
->transaction
->in_commit
||
3881 trans
->transaction
->delayed_refs
.flushing
) {
3885 atomic_inc(&root
->fs_info
->throttle_gen
);
3886 wake_up(&root
->fs_info
->transaction_throttle
);
3887 for (update_count
= 0; update_count
< 16; update_count
++) {
3888 update
= trans
->delayed_ref_updates
;
3889 trans
->delayed_ref_updates
= 0;
3891 btrfs_run_delayed_refs(trans
, root
, update
);
3896 for (i
= 0; i
<= orig_level
; i
++) {
3897 if (path
->nodes
[i
]) {
3898 free_extent_buffer(path
->nodes
[i
]);
3899 path
->nodes
[i
] = NULL
;
3903 btrfs_free_path(path
);
3907 int btrfs_drop_subtree(struct btrfs_trans_handle
*trans
,
3908 struct btrfs_root
*root
,
3909 struct extent_buffer
*node
,
3910 struct extent_buffer
*parent
)
3912 struct btrfs_path
*path
;
3918 path
= btrfs_alloc_path();
3921 btrfs_assert_tree_locked(parent
);
3922 parent_level
= btrfs_header_level(parent
);
3923 extent_buffer_get(parent
);
3924 path
->nodes
[parent_level
] = parent
;
3925 path
->slots
[parent_level
] = btrfs_header_nritems(parent
);
3927 btrfs_assert_tree_locked(node
);
3928 level
= btrfs_header_level(node
);
3929 extent_buffer_get(node
);
3930 path
->nodes
[level
] = node
;
3931 path
->slots
[level
] = 0;
3934 wret
= walk_down_subtree(trans
, root
, path
, &level
);
3940 wret
= walk_up_tree(trans
, root
, path
, &level
, parent_level
);
3947 btrfs_free_path(path
);
3951 static unsigned long calc_ra(unsigned long start
, unsigned long last
,
3954 return min(last
, start
+ nr
- 1);
3957 static noinline
int relocate_inode_pages(struct inode
*inode
, u64 start
,
3962 unsigned long first_index
;
3963 unsigned long last_index
;
3966 struct extent_io_tree
*io_tree
= &BTRFS_I(inode
)->io_tree
;
3967 struct file_ra_state
*ra
;
3968 struct btrfs_ordered_extent
*ordered
;
3969 unsigned int total_read
= 0;
3970 unsigned int total_dirty
= 0;
3973 ra
= kzalloc(sizeof(*ra
), GFP_NOFS
);
3975 mutex_lock(&inode
->i_mutex
);
3976 first_index
= start
>> PAGE_CACHE_SHIFT
;
3977 last_index
= (start
+ len
- 1) >> PAGE_CACHE_SHIFT
;
3979 /* make sure the dirty trick played by the caller work */
3980 ret
= invalidate_inode_pages2_range(inode
->i_mapping
,
3981 first_index
, last_index
);
3985 file_ra_state_init(ra
, inode
->i_mapping
);
3987 for (i
= first_index
; i
<= last_index
; i
++) {
3988 if (total_read
% ra
->ra_pages
== 0) {
3989 btrfs_force_ra(inode
->i_mapping
, ra
, NULL
, i
,
3990 calc_ra(i
, last_index
, ra
->ra_pages
));
3994 if (((u64
)i
<< PAGE_CACHE_SHIFT
) > i_size_read(inode
))
3996 page
= grab_cache_page(inode
->i_mapping
, i
);
4001 if (!PageUptodate(page
)) {
4002 btrfs_readpage(NULL
, page
);
4004 if (!PageUptodate(page
)) {
4006 page_cache_release(page
);
4011 wait_on_page_writeback(page
);
4013 page_start
= (u64
)page
->index
<< PAGE_CACHE_SHIFT
;
4014 page_end
= page_start
+ PAGE_CACHE_SIZE
- 1;
4015 lock_extent(io_tree
, page_start
, page_end
, GFP_NOFS
);
4017 ordered
= btrfs_lookup_ordered_extent(inode
, page_start
);
4019 unlock_extent(io_tree
, page_start
, page_end
, GFP_NOFS
);
4021 page_cache_release(page
);
4022 btrfs_start_ordered_extent(inode
, ordered
, 1);
4023 btrfs_put_ordered_extent(ordered
);
4026 set_page_extent_mapped(page
);
4028 if (i
== first_index
)
4029 set_extent_bits(io_tree
, page_start
, page_end
,
4030 EXTENT_BOUNDARY
, GFP_NOFS
);
4031 btrfs_set_extent_delalloc(inode
, page_start
, page_end
);
4033 set_page_dirty(page
);
4036 unlock_extent(io_tree
, page_start
, page_end
, GFP_NOFS
);
4038 page_cache_release(page
);
4043 mutex_unlock(&inode
->i_mutex
);
4044 balance_dirty_pages_ratelimited_nr(inode
->i_mapping
, total_dirty
);
4048 static noinline
int relocate_data_extent(struct inode
*reloc_inode
,
4049 struct btrfs_key
*extent_key
,
4052 struct btrfs_root
*root
= BTRFS_I(reloc_inode
)->root
;
4053 struct extent_map_tree
*em_tree
= &BTRFS_I(reloc_inode
)->extent_tree
;
4054 struct extent_map
*em
;
4055 u64 start
= extent_key
->objectid
- offset
;
4056 u64 end
= start
+ extent_key
->offset
- 1;
4058 em
= alloc_extent_map(GFP_NOFS
);
4059 BUG_ON(!em
|| IS_ERR(em
));
4062 em
->len
= extent_key
->offset
;
4063 em
->block_len
= extent_key
->offset
;
4064 em
->block_start
= extent_key
->objectid
;
4065 em
->bdev
= root
->fs_info
->fs_devices
->latest_bdev
;
4066 set_bit(EXTENT_FLAG_PINNED
, &em
->flags
);
4068 /* setup extent map to cheat btrfs_readpage */
4069 lock_extent(&BTRFS_I(reloc_inode
)->io_tree
, start
, end
, GFP_NOFS
);
4072 spin_lock(&em_tree
->lock
);
4073 ret
= add_extent_mapping(em_tree
, em
);
4074 spin_unlock(&em_tree
->lock
);
4075 if (ret
!= -EEXIST
) {
4076 free_extent_map(em
);
4079 btrfs_drop_extent_cache(reloc_inode
, start
, end
, 0);
4081 unlock_extent(&BTRFS_I(reloc_inode
)->io_tree
, start
, end
, GFP_NOFS
);
4083 return relocate_inode_pages(reloc_inode
, start
, extent_key
->offset
);
4086 struct btrfs_ref_path
{
4088 u64 nodes
[BTRFS_MAX_LEVEL
];
4090 u64 root_generation
;
4097 struct btrfs_key node_keys
[BTRFS_MAX_LEVEL
];
4098 u64 new_nodes
[BTRFS_MAX_LEVEL
];
4101 struct disk_extent
{
4112 static int is_cowonly_root(u64 root_objectid
)
4114 if (root_objectid
== BTRFS_ROOT_TREE_OBJECTID
||
4115 root_objectid
== BTRFS_EXTENT_TREE_OBJECTID
||
4116 root_objectid
== BTRFS_CHUNK_TREE_OBJECTID
||
4117 root_objectid
== BTRFS_DEV_TREE_OBJECTID
||
4118 root_objectid
== BTRFS_TREE_LOG_OBJECTID
||
4119 root_objectid
== BTRFS_CSUM_TREE_OBJECTID
)
4124 static noinline
int __next_ref_path(struct btrfs_trans_handle
*trans
,
4125 struct btrfs_root
*extent_root
,
4126 struct btrfs_ref_path
*ref_path
,
4129 struct extent_buffer
*leaf
;
4130 struct btrfs_path
*path
;
4131 struct btrfs_extent_ref
*ref
;
4132 struct btrfs_key key
;
4133 struct btrfs_key found_key
;
4139 path
= btrfs_alloc_path();
4144 ref_path
->lowest_level
= -1;
4145 ref_path
->current_level
= -1;
4146 ref_path
->shared_level
= -1;
4150 level
= ref_path
->current_level
- 1;
4151 while (level
>= -1) {
4153 if (level
< ref_path
->lowest_level
)
4157 bytenr
= ref_path
->nodes
[level
];
4159 bytenr
= ref_path
->extent_start
;
4160 BUG_ON(bytenr
== 0);
4162 parent
= ref_path
->nodes
[level
+ 1];
4163 ref_path
->nodes
[level
+ 1] = 0;
4164 ref_path
->current_level
= level
;
4165 BUG_ON(parent
== 0);
4167 key
.objectid
= bytenr
;
4168 key
.offset
= parent
+ 1;
4169 key
.type
= BTRFS_EXTENT_REF_KEY
;
4171 ret
= btrfs_search_slot(trans
, extent_root
, &key
, path
, 0, 0);
4176 leaf
= path
->nodes
[0];
4177 nritems
= btrfs_header_nritems(leaf
);
4178 if (path
->slots
[0] >= nritems
) {
4179 ret
= btrfs_next_leaf(extent_root
, path
);
4184 leaf
= path
->nodes
[0];
4187 btrfs_item_key_to_cpu(leaf
, &found_key
, path
->slots
[0]);
4188 if (found_key
.objectid
== bytenr
&&
4189 found_key
.type
== BTRFS_EXTENT_REF_KEY
) {
4190 if (level
< ref_path
->shared_level
)
4191 ref_path
->shared_level
= level
;
4196 btrfs_release_path(extent_root
, path
);
4199 /* reached lowest level */
4203 level
= ref_path
->current_level
;
4204 while (level
< BTRFS_MAX_LEVEL
- 1) {
4208 bytenr
= ref_path
->nodes
[level
];
4210 bytenr
= ref_path
->extent_start
;
4212 BUG_ON(bytenr
== 0);
4214 key
.objectid
= bytenr
;
4216 key
.type
= BTRFS_EXTENT_REF_KEY
;
4218 ret
= btrfs_search_slot(trans
, extent_root
, &key
, path
, 0, 0);
4222 leaf
= path
->nodes
[0];
4223 nritems
= btrfs_header_nritems(leaf
);
4224 if (path
->slots
[0] >= nritems
) {
4225 ret
= btrfs_next_leaf(extent_root
, path
);
4229 /* the extent was freed by someone */
4230 if (ref_path
->lowest_level
== level
)
4232 btrfs_release_path(extent_root
, path
);
4235 leaf
= path
->nodes
[0];
4238 btrfs_item_key_to_cpu(leaf
, &found_key
, path
->slots
[0]);
4239 if (found_key
.objectid
!= bytenr
||
4240 found_key
.type
!= BTRFS_EXTENT_REF_KEY
) {
4241 /* the extent was freed by someone */
4242 if (ref_path
->lowest_level
== level
) {
4246 btrfs_release_path(extent_root
, path
);
4250 ref
= btrfs_item_ptr(leaf
, path
->slots
[0],
4251 struct btrfs_extent_ref
);
4252 ref_objectid
= btrfs_ref_objectid(leaf
, ref
);
4253 if (ref_objectid
< BTRFS_FIRST_FREE_OBJECTID
) {
4255 level
= (int)ref_objectid
;
4256 BUG_ON(level
>= BTRFS_MAX_LEVEL
);
4257 ref_path
->lowest_level
= level
;
4258 ref_path
->current_level
= level
;
4259 ref_path
->nodes
[level
] = bytenr
;
4261 WARN_ON(ref_objectid
!= level
);
4264 WARN_ON(level
!= -1);
4268 if (ref_path
->lowest_level
== level
) {
4269 ref_path
->owner_objectid
= ref_objectid
;
4270 ref_path
->num_refs
= btrfs_ref_num_refs(leaf
, ref
);
4274 * the block is tree root or the block isn't in reference
4277 if (found_key
.objectid
== found_key
.offset
||
4278 is_cowonly_root(btrfs_ref_root(leaf
, ref
))) {
4279 ref_path
->root_objectid
= btrfs_ref_root(leaf
, ref
);
4280 ref_path
->root_generation
=
4281 btrfs_ref_generation(leaf
, ref
);
4283 /* special reference from the tree log */
4284 ref_path
->nodes
[0] = found_key
.offset
;
4285 ref_path
->current_level
= 0;
4292 BUG_ON(ref_path
->nodes
[level
] != 0);
4293 ref_path
->nodes
[level
] = found_key
.offset
;
4294 ref_path
->current_level
= level
;
4297 * the reference was created in the running transaction,
4298 * no need to continue walking up.
4300 if (btrfs_ref_generation(leaf
, ref
) == trans
->transid
) {
4301 ref_path
->root_objectid
= btrfs_ref_root(leaf
, ref
);
4302 ref_path
->root_generation
=
4303 btrfs_ref_generation(leaf
, ref
);
4308 btrfs_release_path(extent_root
, path
);
4311 /* reached max tree level, but no tree root found. */
4314 btrfs_free_path(path
);
4318 static int btrfs_first_ref_path(struct btrfs_trans_handle
*trans
,
4319 struct btrfs_root
*extent_root
,
4320 struct btrfs_ref_path
*ref_path
,
4323 memset(ref_path
, 0, sizeof(*ref_path
));
4324 ref_path
->extent_start
= extent_start
;
4326 return __next_ref_path(trans
, extent_root
, ref_path
, 1);
4329 static int btrfs_next_ref_path(struct btrfs_trans_handle
*trans
,
4330 struct btrfs_root
*extent_root
,
4331 struct btrfs_ref_path
*ref_path
)
4333 return __next_ref_path(trans
, extent_root
, ref_path
, 0);
4336 static noinline
int get_new_locations(struct inode
*reloc_inode
,
4337 struct btrfs_key
*extent_key
,
4338 u64 offset
, int no_fragment
,
4339 struct disk_extent
**extents
,
4342 struct btrfs_root
*root
= BTRFS_I(reloc_inode
)->root
;
4343 struct btrfs_path
*path
;
4344 struct btrfs_file_extent_item
*fi
;
4345 struct extent_buffer
*leaf
;
4346 struct disk_extent
*exts
= *extents
;
4347 struct btrfs_key found_key
;
4352 int max
= *nr_extents
;
4355 WARN_ON(!no_fragment
&& *extents
);
4358 exts
= kmalloc(sizeof(*exts
) * max
, GFP_NOFS
);
4363 path
= btrfs_alloc_path();
4366 cur_pos
= extent_key
->objectid
- offset
;
4367 last_byte
= extent_key
->objectid
+ extent_key
->offset
;
4368 ret
= btrfs_lookup_file_extent(NULL
, root
, path
, reloc_inode
->i_ino
,
4378 leaf
= path
->nodes
[0];
4379 nritems
= btrfs_header_nritems(leaf
);
4380 if (path
->slots
[0] >= nritems
) {
4381 ret
= btrfs_next_leaf(root
, path
);
4386 leaf
= path
->nodes
[0];
4389 btrfs_item_key_to_cpu(leaf
, &found_key
, path
->slots
[0]);
4390 if (found_key
.offset
!= cur_pos
||
4391 found_key
.type
!= BTRFS_EXTENT_DATA_KEY
||
4392 found_key
.objectid
!= reloc_inode
->i_ino
)
4395 fi
= btrfs_item_ptr(leaf
, path
->slots
[0],
4396 struct btrfs_file_extent_item
);
4397 if (btrfs_file_extent_type(leaf
, fi
) !=
4398 BTRFS_FILE_EXTENT_REG
||
4399 btrfs_file_extent_disk_bytenr(leaf
, fi
) == 0)
4403 struct disk_extent
*old
= exts
;
4405 exts
= kzalloc(sizeof(*exts
) * max
, GFP_NOFS
);
4406 memcpy(exts
, old
, sizeof(*exts
) * nr
);
4407 if (old
!= *extents
)
4411 exts
[nr
].disk_bytenr
=
4412 btrfs_file_extent_disk_bytenr(leaf
, fi
);
4413 exts
[nr
].disk_num_bytes
=
4414 btrfs_file_extent_disk_num_bytes(leaf
, fi
);
4415 exts
[nr
].offset
= btrfs_file_extent_offset(leaf
, fi
);
4416 exts
[nr
].num_bytes
= btrfs_file_extent_num_bytes(leaf
, fi
);
4417 exts
[nr
].ram_bytes
= btrfs_file_extent_ram_bytes(leaf
, fi
);
4418 exts
[nr
].compression
= btrfs_file_extent_compression(leaf
, fi
);
4419 exts
[nr
].encryption
= btrfs_file_extent_encryption(leaf
, fi
);
4420 exts
[nr
].other_encoding
= btrfs_file_extent_other_encoding(leaf
,
4422 BUG_ON(exts
[nr
].offset
> 0);
4423 BUG_ON(exts
[nr
].compression
|| exts
[nr
].encryption
);
4424 BUG_ON(exts
[nr
].num_bytes
!= exts
[nr
].disk_num_bytes
);
4426 cur_pos
+= exts
[nr
].num_bytes
;
4429 if (cur_pos
+ offset
>= last_byte
)
4439 BUG_ON(cur_pos
+ offset
> last_byte
);
4440 if (cur_pos
+ offset
< last_byte
) {
4446 btrfs_free_path(path
);
4448 if (exts
!= *extents
)
4457 static noinline
int replace_one_extent(struct btrfs_trans_handle
*trans
,
4458 struct btrfs_root
*root
,
4459 struct btrfs_path
*path
,
4460 struct btrfs_key
*extent_key
,
4461 struct btrfs_key
*leaf_key
,
4462 struct btrfs_ref_path
*ref_path
,
4463 struct disk_extent
*new_extents
,
4466 struct extent_buffer
*leaf
;
4467 struct btrfs_file_extent_item
*fi
;
4468 struct inode
*inode
= NULL
;
4469 struct btrfs_key key
;
4474 u64 search_end
= (u64
)-1;
4477 int extent_locked
= 0;
4481 memcpy(&key
, leaf_key
, sizeof(key
));
4482 if (ref_path
->owner_objectid
!= BTRFS_MULTIPLE_OBJECTIDS
) {
4483 if (key
.objectid
< ref_path
->owner_objectid
||
4484 (key
.objectid
== ref_path
->owner_objectid
&&
4485 key
.type
< BTRFS_EXTENT_DATA_KEY
)) {
4486 key
.objectid
= ref_path
->owner_objectid
;
4487 key
.type
= BTRFS_EXTENT_DATA_KEY
;
4493 ret
= btrfs_search_slot(trans
, root
, &key
, path
, 0, 1);
4497 leaf
= path
->nodes
[0];
4498 nritems
= btrfs_header_nritems(leaf
);
4500 if (extent_locked
&& ret
> 0) {
4502 * the file extent item was modified by someone
4503 * before the extent got locked.
4505 unlock_extent(&BTRFS_I(inode
)->io_tree
, lock_start
,
4506 lock_end
, GFP_NOFS
);
4510 if (path
->slots
[0] >= nritems
) {
4511 if (++nr_scaned
> 2)
4514 BUG_ON(extent_locked
);
4515 ret
= btrfs_next_leaf(root
, path
);
4520 leaf
= path
->nodes
[0];
4521 nritems
= btrfs_header_nritems(leaf
);
4524 btrfs_item_key_to_cpu(leaf
, &key
, path
->slots
[0]);
4526 if (ref_path
->owner_objectid
!= BTRFS_MULTIPLE_OBJECTIDS
) {
4527 if ((key
.objectid
> ref_path
->owner_objectid
) ||
4528 (key
.objectid
== ref_path
->owner_objectid
&&
4529 key
.type
> BTRFS_EXTENT_DATA_KEY
) ||
4530 key
.offset
>= search_end
)
4534 if (inode
&& key
.objectid
!= inode
->i_ino
) {
4535 BUG_ON(extent_locked
);
4536 btrfs_release_path(root
, path
);
4537 mutex_unlock(&inode
->i_mutex
);
4543 if (key
.type
!= BTRFS_EXTENT_DATA_KEY
) {
4548 fi
= btrfs_item_ptr(leaf
, path
->slots
[0],
4549 struct btrfs_file_extent_item
);
4550 extent_type
= btrfs_file_extent_type(leaf
, fi
);
4551 if ((extent_type
!= BTRFS_FILE_EXTENT_REG
&&
4552 extent_type
!= BTRFS_FILE_EXTENT_PREALLOC
) ||
4553 (btrfs_file_extent_disk_bytenr(leaf
, fi
) !=
4554 extent_key
->objectid
)) {
4560 num_bytes
= btrfs_file_extent_num_bytes(leaf
, fi
);
4561 ext_offset
= btrfs_file_extent_offset(leaf
, fi
);
4563 if (search_end
== (u64
)-1) {
4564 search_end
= key
.offset
- ext_offset
+
4565 btrfs_file_extent_ram_bytes(leaf
, fi
);
4568 if (!extent_locked
) {
4569 lock_start
= key
.offset
;
4570 lock_end
= lock_start
+ num_bytes
- 1;
4572 if (lock_start
> key
.offset
||
4573 lock_end
+ 1 < key
.offset
+ num_bytes
) {
4574 unlock_extent(&BTRFS_I(inode
)->io_tree
,
4575 lock_start
, lock_end
, GFP_NOFS
);
4581 btrfs_release_path(root
, path
);
4583 inode
= btrfs_iget_locked(root
->fs_info
->sb
,
4584 key
.objectid
, root
);
4585 if (inode
->i_state
& I_NEW
) {
4586 BTRFS_I(inode
)->root
= root
;
4587 BTRFS_I(inode
)->location
.objectid
=
4589 BTRFS_I(inode
)->location
.type
=
4590 BTRFS_INODE_ITEM_KEY
;
4591 BTRFS_I(inode
)->location
.offset
= 0;
4592 btrfs_read_locked_inode(inode
);
4593 unlock_new_inode(inode
);
4596 * some code call btrfs_commit_transaction while
4597 * holding the i_mutex, so we can't use mutex_lock
4600 if (is_bad_inode(inode
) ||
4601 !mutex_trylock(&inode
->i_mutex
)) {
4604 key
.offset
= (u64
)-1;
4609 if (!extent_locked
) {
4610 struct btrfs_ordered_extent
*ordered
;
4612 btrfs_release_path(root
, path
);
4614 lock_extent(&BTRFS_I(inode
)->io_tree
, lock_start
,
4615 lock_end
, GFP_NOFS
);
4616 ordered
= btrfs_lookup_first_ordered_extent(inode
,
4619 ordered
->file_offset
<= lock_end
&&
4620 ordered
->file_offset
+ ordered
->len
> lock_start
) {
4621 unlock_extent(&BTRFS_I(inode
)->io_tree
,
4622 lock_start
, lock_end
, GFP_NOFS
);
4623 btrfs_start_ordered_extent(inode
, ordered
, 1);
4624 btrfs_put_ordered_extent(ordered
);
4625 key
.offset
+= num_bytes
;
4629 btrfs_put_ordered_extent(ordered
);
4635 if (nr_extents
== 1) {
4636 /* update extent pointer in place */
4637 btrfs_set_file_extent_disk_bytenr(leaf
, fi
,
4638 new_extents
[0].disk_bytenr
);
4639 btrfs_set_file_extent_disk_num_bytes(leaf
, fi
,
4640 new_extents
[0].disk_num_bytes
);
4641 btrfs_mark_buffer_dirty(leaf
);
4643 btrfs_drop_extent_cache(inode
, key
.offset
,
4644 key
.offset
+ num_bytes
- 1, 0);
4646 ret
= btrfs_inc_extent_ref(trans
, root
,
4647 new_extents
[0].disk_bytenr
,
4648 new_extents
[0].disk_num_bytes
,
4650 root
->root_key
.objectid
,
4655 ret
= btrfs_free_extent(trans
, root
,
4656 extent_key
->objectid
,
4659 btrfs_header_owner(leaf
),
4660 btrfs_header_generation(leaf
),
4664 btrfs_release_path(root
, path
);
4665 key
.offset
+= num_bytes
;
4673 * drop old extent pointer at first, then insert the
4674 * new pointers one bye one
4676 btrfs_release_path(root
, path
);
4677 ret
= btrfs_drop_extents(trans
, root
, inode
, key
.offset
,
4678 key
.offset
+ num_bytes
,
4679 key
.offset
, &alloc_hint
);
4682 for (i
= 0; i
< nr_extents
; i
++) {
4683 if (ext_offset
>= new_extents
[i
].num_bytes
) {
4684 ext_offset
-= new_extents
[i
].num_bytes
;
4687 extent_len
= min(new_extents
[i
].num_bytes
-
4688 ext_offset
, num_bytes
);
4690 ret
= btrfs_insert_empty_item(trans
, root
,
4695 leaf
= path
->nodes
[0];
4696 fi
= btrfs_item_ptr(leaf
, path
->slots
[0],
4697 struct btrfs_file_extent_item
);
4698 btrfs_set_file_extent_generation(leaf
, fi
,
4700 btrfs_set_file_extent_type(leaf
, fi
,
4701 BTRFS_FILE_EXTENT_REG
);
4702 btrfs_set_file_extent_disk_bytenr(leaf
, fi
,
4703 new_extents
[i
].disk_bytenr
);
4704 btrfs_set_file_extent_disk_num_bytes(leaf
, fi
,
4705 new_extents
[i
].disk_num_bytes
);
4706 btrfs_set_file_extent_ram_bytes(leaf
, fi
,
4707 new_extents
[i
].ram_bytes
);
4709 btrfs_set_file_extent_compression(leaf
, fi
,
4710 new_extents
[i
].compression
);
4711 btrfs_set_file_extent_encryption(leaf
, fi
,
4712 new_extents
[i
].encryption
);
4713 btrfs_set_file_extent_other_encoding(leaf
, fi
,
4714 new_extents
[i
].other_encoding
);
4716 btrfs_set_file_extent_num_bytes(leaf
, fi
,
4718 ext_offset
+= new_extents
[i
].offset
;
4719 btrfs_set_file_extent_offset(leaf
, fi
,
4721 btrfs_mark_buffer_dirty(leaf
);
4723 btrfs_drop_extent_cache(inode
, key
.offset
,
4724 key
.offset
+ extent_len
- 1, 0);
4726 ret
= btrfs_inc_extent_ref(trans
, root
,
4727 new_extents
[i
].disk_bytenr
,
4728 new_extents
[i
].disk_num_bytes
,
4730 root
->root_key
.objectid
,
4731 trans
->transid
, key
.objectid
);
4733 btrfs_release_path(root
, path
);
4735 inode_add_bytes(inode
, extent_len
);
4738 num_bytes
-= extent_len
;
4739 key
.offset
+= extent_len
;
4744 BUG_ON(i
>= nr_extents
);
4748 if (extent_locked
) {
4749 unlock_extent(&BTRFS_I(inode
)->io_tree
, lock_start
,
4750 lock_end
, GFP_NOFS
);
4754 if (ref_path
->owner_objectid
!= BTRFS_MULTIPLE_OBJECTIDS
&&
4755 key
.offset
>= search_end
)
4762 btrfs_release_path(root
, path
);
4764 mutex_unlock(&inode
->i_mutex
);
4765 if (extent_locked
) {
4766 unlock_extent(&BTRFS_I(inode
)->io_tree
, lock_start
,
4767 lock_end
, GFP_NOFS
);
4774 int btrfs_reloc_tree_cache_ref(struct btrfs_trans_handle
*trans
,
4775 struct btrfs_root
*root
,
4776 struct extent_buffer
*buf
, u64 orig_start
)
4781 BUG_ON(btrfs_header_generation(buf
) != trans
->transid
);
4782 BUG_ON(root
->root_key
.objectid
!= BTRFS_TREE_RELOC_OBJECTID
);
4784 level
= btrfs_header_level(buf
);
4786 struct btrfs_leaf_ref
*ref
;
4787 struct btrfs_leaf_ref
*orig_ref
;
4789 orig_ref
= btrfs_lookup_leaf_ref(root
, orig_start
);
4793 ref
= btrfs_alloc_leaf_ref(root
, orig_ref
->nritems
);
4795 btrfs_free_leaf_ref(root
, orig_ref
);
4799 ref
->nritems
= orig_ref
->nritems
;
4800 memcpy(ref
->extents
, orig_ref
->extents
,
4801 sizeof(ref
->extents
[0]) * ref
->nritems
);
4803 btrfs_free_leaf_ref(root
, orig_ref
);
4805 ref
->root_gen
= trans
->transid
;
4806 ref
->bytenr
= buf
->start
;
4807 ref
->owner
= btrfs_header_owner(buf
);
4808 ref
->generation
= btrfs_header_generation(buf
);
4810 ret
= btrfs_add_leaf_ref(root
, ref
, 0);
4812 btrfs_free_leaf_ref(root
, ref
);
4817 static noinline
int invalidate_extent_cache(struct btrfs_root
*root
,
4818 struct extent_buffer
*leaf
,
4819 struct btrfs_block_group_cache
*group
,
4820 struct btrfs_root
*target_root
)
4822 struct btrfs_key key
;
4823 struct inode
*inode
= NULL
;
4824 struct btrfs_file_extent_item
*fi
;
4826 u64 skip_objectid
= 0;
4830 nritems
= btrfs_header_nritems(leaf
);
4831 for (i
= 0; i
< nritems
; i
++) {
4832 btrfs_item_key_to_cpu(leaf
, &key
, i
);
4833 if (key
.objectid
== skip_objectid
||
4834 key
.type
!= BTRFS_EXTENT_DATA_KEY
)
4836 fi
= btrfs_item_ptr(leaf
, i
, struct btrfs_file_extent_item
);
4837 if (btrfs_file_extent_type(leaf
, fi
) ==
4838 BTRFS_FILE_EXTENT_INLINE
)
4840 if (btrfs_file_extent_disk_bytenr(leaf
, fi
) == 0)
4842 if (!inode
|| inode
->i_ino
!= key
.objectid
) {
4844 inode
= btrfs_ilookup(target_root
->fs_info
->sb
,
4845 key
.objectid
, target_root
, 1);
4848 skip_objectid
= key
.objectid
;
4851 num_bytes
= btrfs_file_extent_num_bytes(leaf
, fi
);
4853 lock_extent(&BTRFS_I(inode
)->io_tree
, key
.offset
,
4854 key
.offset
+ num_bytes
- 1, GFP_NOFS
);
4855 btrfs_drop_extent_cache(inode
, key
.offset
,
4856 key
.offset
+ num_bytes
- 1, 1);
4857 unlock_extent(&BTRFS_I(inode
)->io_tree
, key
.offset
,
4858 key
.offset
+ num_bytes
- 1, GFP_NOFS
);
4865 static noinline
int replace_extents_in_leaf(struct btrfs_trans_handle
*trans
,
4866 struct btrfs_root
*root
,
4867 struct extent_buffer
*leaf
,
4868 struct btrfs_block_group_cache
*group
,
4869 struct inode
*reloc_inode
)
4871 struct btrfs_key key
;
4872 struct btrfs_key extent_key
;
4873 struct btrfs_file_extent_item
*fi
;
4874 struct btrfs_leaf_ref
*ref
;
4875 struct disk_extent
*new_extent
;
4884 new_extent
= kmalloc(sizeof(*new_extent
), GFP_NOFS
);
4885 BUG_ON(!new_extent
);
4887 ref
= btrfs_lookup_leaf_ref(root
, leaf
->start
);
4891 nritems
= btrfs_header_nritems(leaf
);
4892 for (i
= 0; i
< nritems
; i
++) {
4893 btrfs_item_key_to_cpu(leaf
, &key
, i
);
4894 if (btrfs_key_type(&key
) != BTRFS_EXTENT_DATA_KEY
)
4896 fi
= btrfs_item_ptr(leaf
, i
, struct btrfs_file_extent_item
);
4897 if (btrfs_file_extent_type(leaf
, fi
) ==
4898 BTRFS_FILE_EXTENT_INLINE
)
4900 bytenr
= btrfs_file_extent_disk_bytenr(leaf
, fi
);
4901 num_bytes
= btrfs_file_extent_disk_num_bytes(leaf
, fi
);
4906 if (bytenr
>= group
->key
.objectid
+ group
->key
.offset
||
4907 bytenr
+ num_bytes
<= group
->key
.objectid
)
4910 extent_key
.objectid
= bytenr
;
4911 extent_key
.offset
= num_bytes
;
4912 extent_key
.type
= BTRFS_EXTENT_ITEM_KEY
;
4914 ret
= get_new_locations(reloc_inode
, &extent_key
,
4915 group
->key
.objectid
, 1,
4916 &new_extent
, &nr_extent
);
4921 BUG_ON(ref
->extents
[ext_index
].bytenr
!= bytenr
);
4922 BUG_ON(ref
->extents
[ext_index
].num_bytes
!= num_bytes
);
4923 ref
->extents
[ext_index
].bytenr
= new_extent
->disk_bytenr
;
4924 ref
->extents
[ext_index
].num_bytes
= new_extent
->disk_num_bytes
;
4926 btrfs_set_file_extent_disk_bytenr(leaf
, fi
,
4927 new_extent
->disk_bytenr
);
4928 btrfs_set_file_extent_disk_num_bytes(leaf
, fi
,
4929 new_extent
->disk_num_bytes
);
4930 btrfs_mark_buffer_dirty(leaf
);
4932 ret
= btrfs_inc_extent_ref(trans
, root
,
4933 new_extent
->disk_bytenr
,
4934 new_extent
->disk_num_bytes
,
4936 root
->root_key
.objectid
,
4937 trans
->transid
, key
.objectid
);
4940 ret
= btrfs_free_extent(trans
, root
,
4941 bytenr
, num_bytes
, leaf
->start
,
4942 btrfs_header_owner(leaf
),
4943 btrfs_header_generation(leaf
),
4949 BUG_ON(ext_index
+ 1 != ref
->nritems
);
4950 btrfs_free_leaf_ref(root
, ref
);
4954 int btrfs_free_reloc_root(struct btrfs_trans_handle
*trans
,
4955 struct btrfs_root
*root
)
4957 struct btrfs_root
*reloc_root
;
4960 if (root
->reloc_root
) {
4961 reloc_root
= root
->reloc_root
;
4962 root
->reloc_root
= NULL
;
4963 list_add(&reloc_root
->dead_list
,
4964 &root
->fs_info
->dead_reloc_roots
);
4966 btrfs_set_root_bytenr(&reloc_root
->root_item
,
4967 reloc_root
->node
->start
);
4968 btrfs_set_root_level(&root
->root_item
,
4969 btrfs_header_level(reloc_root
->node
));
4970 memset(&reloc_root
->root_item
.drop_progress
, 0,
4971 sizeof(struct btrfs_disk_key
));
4972 reloc_root
->root_item
.drop_level
= 0;
4974 ret
= btrfs_update_root(trans
, root
->fs_info
->tree_root
,
4975 &reloc_root
->root_key
,
4976 &reloc_root
->root_item
);
4982 int btrfs_drop_dead_reloc_roots(struct btrfs_root
*root
)
4984 struct btrfs_trans_handle
*trans
;
4985 struct btrfs_root
*reloc_root
;
4986 struct btrfs_root
*prev_root
= NULL
;
4987 struct list_head dead_roots
;
4991 INIT_LIST_HEAD(&dead_roots
);
4992 list_splice_init(&root
->fs_info
->dead_reloc_roots
, &dead_roots
);
4994 while (!list_empty(&dead_roots
)) {
4995 reloc_root
= list_entry(dead_roots
.prev
,
4996 struct btrfs_root
, dead_list
);
4997 list_del_init(&reloc_root
->dead_list
);
4999 BUG_ON(reloc_root
->commit_root
!= NULL
);
5001 trans
= btrfs_join_transaction(root
, 1);
5004 mutex_lock(&root
->fs_info
->drop_mutex
);
5005 ret
= btrfs_drop_snapshot(trans
, reloc_root
);
5008 mutex_unlock(&root
->fs_info
->drop_mutex
);
5010 nr
= trans
->blocks_used
;
5011 ret
= btrfs_end_transaction(trans
, root
);
5013 btrfs_btree_balance_dirty(root
, nr
);
5016 free_extent_buffer(reloc_root
->node
);
5018 ret
= btrfs_del_root(trans
, root
->fs_info
->tree_root
,
5019 &reloc_root
->root_key
);
5021 mutex_unlock(&root
->fs_info
->drop_mutex
);
5023 nr
= trans
->blocks_used
;
5024 ret
= btrfs_end_transaction(trans
, root
);
5026 btrfs_btree_balance_dirty(root
, nr
);
5029 prev_root
= reloc_root
;
5032 btrfs_remove_leaf_refs(prev_root
, (u64
)-1, 0);
5038 int btrfs_add_dead_reloc_root(struct btrfs_root
*root
)
5040 list_add(&root
->dead_list
, &root
->fs_info
->dead_reloc_roots
);
5044 int btrfs_cleanup_reloc_trees(struct btrfs_root
*root
)
5046 struct btrfs_root
*reloc_root
;
5047 struct btrfs_trans_handle
*trans
;
5048 struct btrfs_key location
;
5052 mutex_lock(&root
->fs_info
->tree_reloc_mutex
);
5053 ret
= btrfs_find_dead_roots(root
, BTRFS_TREE_RELOC_OBJECTID
, NULL
);
5055 found
= !list_empty(&root
->fs_info
->dead_reloc_roots
);
5056 mutex_unlock(&root
->fs_info
->tree_reloc_mutex
);
5059 trans
= btrfs_start_transaction(root
, 1);
5061 ret
= btrfs_commit_transaction(trans
, root
);
5065 location
.objectid
= BTRFS_DATA_RELOC_TREE_OBJECTID
;
5066 location
.offset
= (u64
)-1;
5067 location
.type
= BTRFS_ROOT_ITEM_KEY
;
5069 reloc_root
= btrfs_read_fs_root_no_name(root
->fs_info
, &location
);
5070 BUG_ON(!reloc_root
);
5071 btrfs_orphan_cleanup(reloc_root
);
5075 static noinline
int init_reloc_tree(struct btrfs_trans_handle
*trans
,
5076 struct btrfs_root
*root
)
5078 struct btrfs_root
*reloc_root
;
5079 struct extent_buffer
*eb
;
5080 struct btrfs_root_item
*root_item
;
5081 struct btrfs_key root_key
;
5084 BUG_ON(!root
->ref_cows
);
5085 if (root
->reloc_root
)
5088 root_item
= kmalloc(sizeof(*root_item
), GFP_NOFS
);
5091 ret
= btrfs_copy_root(trans
, root
, root
->commit_root
,
5092 &eb
, BTRFS_TREE_RELOC_OBJECTID
);
5095 root_key
.objectid
= BTRFS_TREE_RELOC_OBJECTID
;
5096 root_key
.offset
= root
->root_key
.objectid
;
5097 root_key
.type
= BTRFS_ROOT_ITEM_KEY
;
5099 memcpy(root_item
, &root
->root_item
, sizeof(root_item
));
5100 btrfs_set_root_refs(root_item
, 0);
5101 btrfs_set_root_bytenr(root_item
, eb
->start
);
5102 btrfs_set_root_level(root_item
, btrfs_header_level(eb
));
5103 btrfs_set_root_generation(root_item
, trans
->transid
);
5105 btrfs_tree_unlock(eb
);
5106 free_extent_buffer(eb
);
5108 ret
= btrfs_insert_root(trans
, root
->fs_info
->tree_root
,
5109 &root_key
, root_item
);
5113 reloc_root
= btrfs_read_fs_root_no_radix(root
->fs_info
->tree_root
,
5115 BUG_ON(!reloc_root
);
5116 reloc_root
->last_trans
= trans
->transid
;
5117 reloc_root
->commit_root
= NULL
;
5118 reloc_root
->ref_tree
= &root
->fs_info
->reloc_ref_tree
;
5120 root
->reloc_root
= reloc_root
;
5125 * Core function of space balance.
5127 * The idea is using reloc trees to relocate tree blocks in reference
5128 * counted roots. There is one reloc tree for each subvol, and all
5129 * reloc trees share same root key objectid. Reloc trees are snapshots
5130 * of the latest committed roots of subvols (root->commit_root).
5132 * To relocate a tree block referenced by a subvol, there are two steps.
5133 * COW the block through subvol's reloc tree, then update block pointer
5134 * in the subvol to point to the new block. Since all reloc trees share
5135 * same root key objectid, doing special handing for tree blocks owned
5136 * by them is easy. Once a tree block has been COWed in one reloc tree,
5137 * we can use the resulting new block directly when the same block is
5138 * required to COW again through other reloc trees. By this way, relocated
5139 * tree blocks are shared between reloc trees, so they are also shared
5142 static noinline
int relocate_one_path(struct btrfs_trans_handle
*trans
,
5143 struct btrfs_root
*root
,
5144 struct btrfs_path
*path
,
5145 struct btrfs_key
*first_key
,
5146 struct btrfs_ref_path
*ref_path
,
5147 struct btrfs_block_group_cache
*group
,
5148 struct inode
*reloc_inode
)
5150 struct btrfs_root
*reloc_root
;
5151 struct extent_buffer
*eb
= NULL
;
5152 struct btrfs_key
*keys
;
5156 int lowest_level
= 0;
5159 if (ref_path
->owner_objectid
< BTRFS_FIRST_FREE_OBJECTID
)
5160 lowest_level
= ref_path
->owner_objectid
;
5162 if (!root
->ref_cows
) {
5163 path
->lowest_level
= lowest_level
;
5164 ret
= btrfs_search_slot(trans
, root
, first_key
, path
, 0, 1);
5166 path
->lowest_level
= 0;
5167 btrfs_release_path(root
, path
);
5171 mutex_lock(&root
->fs_info
->tree_reloc_mutex
);
5172 ret
= init_reloc_tree(trans
, root
);
5174 reloc_root
= root
->reloc_root
;
5176 shared_level
= ref_path
->shared_level
;
5177 ref_path
->shared_level
= BTRFS_MAX_LEVEL
- 1;
5179 keys
= ref_path
->node_keys
;
5180 nodes
= ref_path
->new_nodes
;
5181 memset(&keys
[shared_level
+ 1], 0,
5182 sizeof(*keys
) * (BTRFS_MAX_LEVEL
- shared_level
- 1));
5183 memset(&nodes
[shared_level
+ 1], 0,
5184 sizeof(*nodes
) * (BTRFS_MAX_LEVEL
- shared_level
- 1));
5186 if (nodes
[lowest_level
] == 0) {
5187 path
->lowest_level
= lowest_level
;
5188 ret
= btrfs_search_slot(trans
, reloc_root
, first_key
, path
,
5191 for (level
= lowest_level
; level
< BTRFS_MAX_LEVEL
; level
++) {
5192 eb
= path
->nodes
[level
];
5193 if (!eb
|| eb
== reloc_root
->node
)
5195 nodes
[level
] = eb
->start
;
5197 btrfs_item_key_to_cpu(eb
, &keys
[level
], 0);
5199 btrfs_node_key_to_cpu(eb
, &keys
[level
], 0);
5202 ref_path
->owner_objectid
>= BTRFS_FIRST_FREE_OBJECTID
) {
5203 eb
= path
->nodes
[0];
5204 ret
= replace_extents_in_leaf(trans
, reloc_root
, eb
,
5205 group
, reloc_inode
);
5208 btrfs_release_path(reloc_root
, path
);
5210 ret
= btrfs_merge_path(trans
, reloc_root
, keys
, nodes
,
5216 * replace tree blocks in the fs tree with tree blocks in
5219 ret
= btrfs_merge_path(trans
, root
, keys
, nodes
, lowest_level
);
5222 if (ref_path
->owner_objectid
>= BTRFS_FIRST_FREE_OBJECTID
) {
5223 ret
= btrfs_search_slot(trans
, reloc_root
, first_key
, path
,
5226 extent_buffer_get(path
->nodes
[0]);
5227 eb
= path
->nodes
[0];
5228 btrfs_release_path(reloc_root
, path
);
5229 ret
= invalidate_extent_cache(reloc_root
, eb
, group
, root
);
5231 free_extent_buffer(eb
);
5234 mutex_unlock(&root
->fs_info
->tree_reloc_mutex
);
5235 path
->lowest_level
= 0;
5239 static noinline
int relocate_tree_block(struct btrfs_trans_handle
*trans
,
5240 struct btrfs_root
*root
,
5241 struct btrfs_path
*path
,
5242 struct btrfs_key
*first_key
,
5243 struct btrfs_ref_path
*ref_path
)
5247 ret
= relocate_one_path(trans
, root
, path
, first_key
,
5248 ref_path
, NULL
, NULL
);
5254 static noinline
int del_extent_zero(struct btrfs_trans_handle
*trans
,
5255 struct btrfs_root
*extent_root
,
5256 struct btrfs_path
*path
,
5257 struct btrfs_key
*extent_key
)
5261 ret
= btrfs_search_slot(trans
, extent_root
, extent_key
, path
, -1, 1);
5264 ret
= btrfs_del_item(trans
, extent_root
, path
);
5266 btrfs_release_path(extent_root
, path
);
5270 static noinline
struct btrfs_root
*read_ref_root(struct btrfs_fs_info
*fs_info
,
5271 struct btrfs_ref_path
*ref_path
)
5273 struct btrfs_key root_key
;
5275 root_key
.objectid
= ref_path
->root_objectid
;
5276 root_key
.type
= BTRFS_ROOT_ITEM_KEY
;
5277 if (is_cowonly_root(ref_path
->root_objectid
))
5278 root_key
.offset
= 0;
5280 root_key
.offset
= (u64
)-1;
5282 return btrfs_read_fs_root_no_name(fs_info
, &root_key
);
5285 static noinline
int relocate_one_extent(struct btrfs_root
*extent_root
,
5286 struct btrfs_path
*path
,
5287 struct btrfs_key
*extent_key
,
5288 struct btrfs_block_group_cache
*group
,
5289 struct inode
*reloc_inode
, int pass
)
5291 struct btrfs_trans_handle
*trans
;
5292 struct btrfs_root
*found_root
;
5293 struct btrfs_ref_path
*ref_path
= NULL
;
5294 struct disk_extent
*new_extents
= NULL
;
5299 struct btrfs_key first_key
;
5303 trans
= btrfs_start_transaction(extent_root
, 1);
5306 if (extent_key
->objectid
== 0) {
5307 ret
= del_extent_zero(trans
, extent_root
, path
, extent_key
);
5311 ref_path
= kmalloc(sizeof(*ref_path
), GFP_NOFS
);
5317 for (loops
= 0; ; loops
++) {
5319 ret
= btrfs_first_ref_path(trans
, extent_root
, ref_path
,
5320 extent_key
->objectid
);
5322 ret
= btrfs_next_ref_path(trans
, extent_root
, ref_path
);
5329 if (ref_path
->root_objectid
== BTRFS_TREE_LOG_OBJECTID
||
5330 ref_path
->root_objectid
== BTRFS_TREE_RELOC_OBJECTID
)
5333 found_root
= read_ref_root(extent_root
->fs_info
, ref_path
);
5334 BUG_ON(!found_root
);
5336 * for reference counted tree, only process reference paths
5337 * rooted at the latest committed root.
5339 if (found_root
->ref_cows
&&
5340 ref_path
->root_generation
!= found_root
->root_key
.offset
)
5343 if (ref_path
->owner_objectid
>= BTRFS_FIRST_FREE_OBJECTID
) {
5346 * copy data extents to new locations
5348 u64 group_start
= group
->key
.objectid
;
5349 ret
= relocate_data_extent(reloc_inode
,
5358 level
= ref_path
->owner_objectid
;
5361 if (prev_block
!= ref_path
->nodes
[level
]) {
5362 struct extent_buffer
*eb
;
5363 u64 block_start
= ref_path
->nodes
[level
];
5364 u64 block_size
= btrfs_level_size(found_root
, level
);
5366 eb
= read_tree_block(found_root
, block_start
,
5368 btrfs_tree_lock(eb
);
5369 BUG_ON(level
!= btrfs_header_level(eb
));
5372 btrfs_item_key_to_cpu(eb
, &first_key
, 0);
5374 btrfs_node_key_to_cpu(eb
, &first_key
, 0);
5376 btrfs_tree_unlock(eb
);
5377 free_extent_buffer(eb
);
5378 prev_block
= block_start
;
5381 mutex_lock(&extent_root
->fs_info
->trans_mutex
);
5382 btrfs_record_root_in_trans(found_root
);
5383 mutex_unlock(&extent_root
->fs_info
->trans_mutex
);
5384 if (ref_path
->owner_objectid
>= BTRFS_FIRST_FREE_OBJECTID
) {
5386 * try to update data extent references while
5387 * keeping metadata shared between snapshots.
5390 ret
= relocate_one_path(trans
, found_root
,
5391 path
, &first_key
, ref_path
,
5392 group
, reloc_inode
);
5398 * use fallback method to process the remaining
5402 u64 group_start
= group
->key
.objectid
;
5403 new_extents
= kmalloc(sizeof(*new_extents
),
5406 ret
= get_new_locations(reloc_inode
,
5414 ret
= replace_one_extent(trans
, found_root
,
5416 &first_key
, ref_path
,
5417 new_extents
, nr_extents
);
5419 ret
= relocate_tree_block(trans
, found_root
, path
,
5420 &first_key
, ref_path
);
5427 btrfs_end_transaction(trans
, extent_root
);
5433 static u64
update_block_group_flags(struct btrfs_root
*root
, u64 flags
)
5436 u64 stripped
= BTRFS_BLOCK_GROUP_RAID0
|
5437 BTRFS_BLOCK_GROUP_RAID1
| BTRFS_BLOCK_GROUP_RAID10
;
5439 num_devices
= root
->fs_info
->fs_devices
->rw_devices
;
5440 if (num_devices
== 1) {
5441 stripped
|= BTRFS_BLOCK_GROUP_DUP
;
5442 stripped
= flags
& ~stripped
;
5444 /* turn raid0 into single device chunks */
5445 if (flags
& BTRFS_BLOCK_GROUP_RAID0
)
5448 /* turn mirroring into duplication */
5449 if (flags
& (BTRFS_BLOCK_GROUP_RAID1
|
5450 BTRFS_BLOCK_GROUP_RAID10
))
5451 return stripped
| BTRFS_BLOCK_GROUP_DUP
;
5454 /* they already had raid on here, just return */
5455 if (flags
& stripped
)
5458 stripped
|= BTRFS_BLOCK_GROUP_DUP
;
5459 stripped
= flags
& ~stripped
;
5461 /* switch duplicated blocks with raid1 */
5462 if (flags
& BTRFS_BLOCK_GROUP_DUP
)
5463 return stripped
| BTRFS_BLOCK_GROUP_RAID1
;
5465 /* turn single device chunks into raid0 */
5466 return stripped
| BTRFS_BLOCK_GROUP_RAID0
;
5471 static int __alloc_chunk_for_shrink(struct btrfs_root
*root
,
5472 struct btrfs_block_group_cache
*shrink_block_group
,
5475 struct btrfs_trans_handle
*trans
;
5476 u64 new_alloc_flags
;
5479 spin_lock(&shrink_block_group
->lock
);
5480 if (btrfs_block_group_used(&shrink_block_group
->item
) > 0) {
5481 spin_unlock(&shrink_block_group
->lock
);
5483 trans
= btrfs_start_transaction(root
, 1);
5484 spin_lock(&shrink_block_group
->lock
);
5486 new_alloc_flags
= update_block_group_flags(root
,
5487 shrink_block_group
->flags
);
5488 if (new_alloc_flags
!= shrink_block_group
->flags
) {
5490 btrfs_block_group_used(&shrink_block_group
->item
);
5492 calc
= shrink_block_group
->key
.offset
;
5494 spin_unlock(&shrink_block_group
->lock
);
5496 do_chunk_alloc(trans
, root
->fs_info
->extent_root
,
5497 calc
+ 2 * 1024 * 1024, new_alloc_flags
, force
);
5499 btrfs_end_transaction(trans
, root
);
5501 spin_unlock(&shrink_block_group
->lock
);
5505 static int __insert_orphan_inode(struct btrfs_trans_handle
*trans
,
5506 struct btrfs_root
*root
,
5507 u64 objectid
, u64 size
)
5509 struct btrfs_path
*path
;
5510 struct btrfs_inode_item
*item
;
5511 struct extent_buffer
*leaf
;
5514 path
= btrfs_alloc_path();
5518 path
->leave_spinning
= 1;
5519 ret
= btrfs_insert_empty_inode(trans
, root
, path
, objectid
);
5523 leaf
= path
->nodes
[0];
5524 item
= btrfs_item_ptr(leaf
, path
->slots
[0], struct btrfs_inode_item
);
5525 memset_extent_buffer(leaf
, 0, (unsigned long)item
, sizeof(*item
));
5526 btrfs_set_inode_generation(leaf
, item
, 1);
5527 btrfs_set_inode_size(leaf
, item
, size
);
5528 btrfs_set_inode_mode(leaf
, item
, S_IFREG
| 0600);
5529 btrfs_set_inode_flags(leaf
, item
, BTRFS_INODE_NOCOMPRESS
);
5530 btrfs_mark_buffer_dirty(leaf
);
5531 btrfs_release_path(root
, path
);
5533 btrfs_free_path(path
);
5537 static noinline
struct inode
*create_reloc_inode(struct btrfs_fs_info
*fs_info
,
5538 struct btrfs_block_group_cache
*group
)
5540 struct inode
*inode
= NULL
;
5541 struct btrfs_trans_handle
*trans
;
5542 struct btrfs_root
*root
;
5543 struct btrfs_key root_key
;
5544 u64 objectid
= BTRFS_FIRST_FREE_OBJECTID
;
5547 root_key
.objectid
= BTRFS_DATA_RELOC_TREE_OBJECTID
;
5548 root_key
.type
= BTRFS_ROOT_ITEM_KEY
;
5549 root_key
.offset
= (u64
)-1;
5550 root
= btrfs_read_fs_root_no_name(fs_info
, &root_key
);
5552 return ERR_CAST(root
);
5554 trans
= btrfs_start_transaction(root
, 1);
5557 err
= btrfs_find_free_objectid(trans
, root
, objectid
, &objectid
);
5561 err
= __insert_orphan_inode(trans
, root
, objectid
, group
->key
.offset
);
5564 err
= btrfs_insert_file_extent(trans
, root
, objectid
, 0, 0, 0,
5565 group
->key
.offset
, 0, group
->key
.offset
,
5569 inode
= btrfs_iget_locked(root
->fs_info
->sb
, objectid
, root
);
5570 if (inode
->i_state
& I_NEW
) {
5571 BTRFS_I(inode
)->root
= root
;
5572 BTRFS_I(inode
)->location
.objectid
= objectid
;
5573 BTRFS_I(inode
)->location
.type
= BTRFS_INODE_ITEM_KEY
;
5574 BTRFS_I(inode
)->location
.offset
= 0;
5575 btrfs_read_locked_inode(inode
);
5576 unlock_new_inode(inode
);
5577 BUG_ON(is_bad_inode(inode
));
5581 BTRFS_I(inode
)->index_cnt
= group
->key
.objectid
;
5583 err
= btrfs_orphan_add(trans
, inode
);
5585 btrfs_end_transaction(trans
, root
);
5589 inode
= ERR_PTR(err
);
5594 int btrfs_reloc_clone_csums(struct inode
*inode
, u64 file_pos
, u64 len
)
5597 struct btrfs_ordered_sum
*sums
;
5598 struct btrfs_sector_sum
*sector_sum
;
5599 struct btrfs_ordered_extent
*ordered
;
5600 struct btrfs_root
*root
= BTRFS_I(inode
)->root
;
5601 struct list_head list
;
5606 INIT_LIST_HEAD(&list
);
5608 ordered
= btrfs_lookup_ordered_extent(inode
, file_pos
);
5609 BUG_ON(ordered
->file_offset
!= file_pos
|| ordered
->len
!= len
);
5611 disk_bytenr
= file_pos
+ BTRFS_I(inode
)->index_cnt
;
5612 ret
= btrfs_lookup_csums_range(root
->fs_info
->csum_root
, disk_bytenr
,
5613 disk_bytenr
+ len
- 1, &list
);
5615 while (!list_empty(&list
)) {
5616 sums
= list_entry(list
.next
, struct btrfs_ordered_sum
, list
);
5617 list_del_init(&sums
->list
);
5619 sector_sum
= sums
->sums
;
5620 sums
->bytenr
= ordered
->start
;
5623 while (offset
< sums
->len
) {
5624 sector_sum
->bytenr
+= ordered
->start
- disk_bytenr
;
5626 offset
+= root
->sectorsize
;
5629 btrfs_add_ordered_sum(inode
, ordered
, sums
);
5631 btrfs_put_ordered_extent(ordered
);
5635 int btrfs_relocate_block_group(struct btrfs_root
*root
, u64 group_start
)
5637 struct btrfs_trans_handle
*trans
;
5638 struct btrfs_path
*path
;
5639 struct btrfs_fs_info
*info
= root
->fs_info
;
5640 struct extent_buffer
*leaf
;
5641 struct inode
*reloc_inode
;
5642 struct btrfs_block_group_cache
*block_group
;
5643 struct btrfs_key key
;
5652 root
= root
->fs_info
->extent_root
;
5654 block_group
= btrfs_lookup_block_group(info
, group_start
);
5655 BUG_ON(!block_group
);
5657 printk(KERN_INFO
"btrfs relocating block group %llu flags %llu\n",
5658 (unsigned long long)block_group
->key
.objectid
,
5659 (unsigned long long)block_group
->flags
);
5661 path
= btrfs_alloc_path();
5664 reloc_inode
= create_reloc_inode(info
, block_group
);
5665 BUG_ON(IS_ERR(reloc_inode
));
5667 __alloc_chunk_for_shrink(root
, block_group
, 1);
5668 set_block_group_readonly(block_group
);
5670 btrfs_start_delalloc_inodes(info
->tree_root
);
5671 btrfs_wait_ordered_extents(info
->tree_root
, 0);
5676 key
.objectid
= block_group
->key
.objectid
;
5679 cur_byte
= key
.objectid
;
5681 trans
= btrfs_start_transaction(info
->tree_root
, 1);
5682 btrfs_commit_transaction(trans
, info
->tree_root
);
5684 mutex_lock(&root
->fs_info
->cleaner_mutex
);
5685 btrfs_clean_old_snapshots(info
->tree_root
);
5686 btrfs_remove_leaf_refs(info
->tree_root
, (u64
)-1, 1);
5687 mutex_unlock(&root
->fs_info
->cleaner_mutex
);
5689 trans
= btrfs_start_transaction(info
->tree_root
, 1);
5690 btrfs_commit_transaction(trans
, info
->tree_root
);
5693 ret
= btrfs_search_slot(NULL
, root
, &key
, path
, 0, 0);
5697 leaf
= path
->nodes
[0];
5698 nritems
= btrfs_header_nritems(leaf
);
5699 if (path
->slots
[0] >= nritems
) {
5700 ret
= btrfs_next_leaf(root
, path
);
5707 leaf
= path
->nodes
[0];
5708 nritems
= btrfs_header_nritems(leaf
);
5711 btrfs_item_key_to_cpu(leaf
, &key
, path
->slots
[0]);
5713 if (key
.objectid
>= block_group
->key
.objectid
+
5714 block_group
->key
.offset
)
5717 if (progress
&& need_resched()) {
5718 btrfs_release_path(root
, path
);
5725 if (btrfs_key_type(&key
) != BTRFS_EXTENT_ITEM_KEY
||
5726 key
.objectid
+ key
.offset
<= cur_byte
) {
5732 cur_byte
= key
.objectid
+ key
.offset
;
5733 btrfs_release_path(root
, path
);
5735 __alloc_chunk_for_shrink(root
, block_group
, 0);
5736 ret
= relocate_one_extent(root
, path
, &key
, block_group
,
5742 key
.objectid
= cur_byte
;
5747 btrfs_release_path(root
, path
);
5750 btrfs_wait_ordered_range(reloc_inode
, 0, (u64
)-1);
5751 invalidate_mapping_pages(reloc_inode
->i_mapping
, 0, -1);
5754 if (total_found
> 0) {
5755 printk(KERN_INFO
"btrfs found %llu extents in pass %d\n",
5756 (unsigned long long)total_found
, pass
);
5758 if (total_found
== skipped
&& pass
> 2) {
5760 reloc_inode
= create_reloc_inode(info
, block_group
);
5766 /* delete reloc_inode */
5769 /* unpin extents in this range */
5770 trans
= btrfs_start_transaction(info
->tree_root
, 1);
5771 btrfs_commit_transaction(trans
, info
->tree_root
);
5773 spin_lock(&block_group
->lock
);
5774 WARN_ON(block_group
->pinned
> 0);
5775 WARN_ON(block_group
->reserved
> 0);
5776 WARN_ON(btrfs_block_group_used(&block_group
->item
) > 0);
5777 spin_unlock(&block_group
->lock
);
5778 btrfs_put_block_group(block_group
);
5781 btrfs_free_path(path
);
5785 static int find_first_block_group(struct btrfs_root
*root
,
5786 struct btrfs_path
*path
, struct btrfs_key
*key
)
5789 struct btrfs_key found_key
;
5790 struct extent_buffer
*leaf
;
5793 ret
= btrfs_search_slot(NULL
, root
, key
, path
, 0, 0);
5798 slot
= path
->slots
[0];
5799 leaf
= path
->nodes
[0];
5800 if (slot
>= btrfs_header_nritems(leaf
)) {
5801 ret
= btrfs_next_leaf(root
, path
);
5808 btrfs_item_key_to_cpu(leaf
, &found_key
, slot
);
5810 if (found_key
.objectid
>= key
->objectid
&&
5811 found_key
.type
== BTRFS_BLOCK_GROUP_ITEM_KEY
) {
5822 int btrfs_free_block_groups(struct btrfs_fs_info
*info
)
5824 struct btrfs_block_group_cache
*block_group
;
5825 struct btrfs_space_info
*space_info
;
5828 spin_lock(&info
->block_group_cache_lock
);
5829 while ((n
= rb_last(&info
->block_group_cache_tree
)) != NULL
) {
5830 block_group
= rb_entry(n
, struct btrfs_block_group_cache
,
5832 rb_erase(&block_group
->cache_node
,
5833 &info
->block_group_cache_tree
);
5834 spin_unlock(&info
->block_group_cache_lock
);
5836 btrfs_remove_free_space_cache(block_group
);
5837 down_write(&block_group
->space_info
->groups_sem
);
5838 list_del(&block_group
->list
);
5839 up_write(&block_group
->space_info
->groups_sem
);
5841 WARN_ON(atomic_read(&block_group
->count
) != 1);
5844 spin_lock(&info
->block_group_cache_lock
);
5846 spin_unlock(&info
->block_group_cache_lock
);
5848 /* now that all the block groups are freed, go through and
5849 * free all the space_info structs. This is only called during
5850 * the final stages of unmount, and so we know nobody is
5851 * using them. We call synchronize_rcu() once before we start,
5852 * just to be on the safe side.
5856 while(!list_empty(&info
->space_info
)) {
5857 space_info
= list_entry(info
->space_info
.next
,
5858 struct btrfs_space_info
,
5861 list_del(&space_info
->list
);
5867 int btrfs_read_block_groups(struct btrfs_root
*root
)
5869 struct btrfs_path
*path
;
5871 struct btrfs_block_group_cache
*cache
;
5872 struct btrfs_fs_info
*info
= root
->fs_info
;
5873 struct btrfs_space_info
*space_info
;
5874 struct btrfs_key key
;
5875 struct btrfs_key found_key
;
5876 struct extent_buffer
*leaf
;
5878 root
= info
->extent_root
;
5881 btrfs_set_key_type(&key
, BTRFS_BLOCK_GROUP_ITEM_KEY
);
5882 path
= btrfs_alloc_path();
5887 ret
= find_first_block_group(root
, path
, &key
);
5895 leaf
= path
->nodes
[0];
5896 btrfs_item_key_to_cpu(leaf
, &found_key
, path
->slots
[0]);
5897 cache
= kzalloc(sizeof(*cache
), GFP_NOFS
);
5903 atomic_set(&cache
->count
, 1);
5904 spin_lock_init(&cache
->lock
);
5905 spin_lock_init(&cache
->tree_lock
);
5906 mutex_init(&cache
->cache_mutex
);
5907 INIT_LIST_HEAD(&cache
->list
);
5908 INIT_LIST_HEAD(&cache
->cluster_list
);
5909 read_extent_buffer(leaf
, &cache
->item
,
5910 btrfs_item_ptr_offset(leaf
, path
->slots
[0]),
5911 sizeof(cache
->item
));
5912 memcpy(&cache
->key
, &found_key
, sizeof(found_key
));
5914 key
.objectid
= found_key
.objectid
+ found_key
.offset
;
5915 btrfs_release_path(root
, path
);
5916 cache
->flags
= btrfs_block_group_flags(&cache
->item
);
5918 ret
= update_space_info(info
, cache
->flags
, found_key
.offset
,
5919 btrfs_block_group_used(&cache
->item
),
5922 cache
->space_info
= space_info
;
5923 down_write(&space_info
->groups_sem
);
5924 list_add_tail(&cache
->list
, &space_info
->block_groups
);
5925 up_write(&space_info
->groups_sem
);
5927 ret
= btrfs_add_block_group_cache(root
->fs_info
, cache
);
5930 set_avail_alloc_bits(root
->fs_info
, cache
->flags
);
5931 if (btrfs_chunk_readonly(root
, cache
->key
.objectid
))
5932 set_block_group_readonly(cache
);
5936 btrfs_free_path(path
);
5940 int btrfs_make_block_group(struct btrfs_trans_handle
*trans
,
5941 struct btrfs_root
*root
, u64 bytes_used
,
5942 u64 type
, u64 chunk_objectid
, u64 chunk_offset
,
5946 struct btrfs_root
*extent_root
;
5947 struct btrfs_block_group_cache
*cache
;
5949 extent_root
= root
->fs_info
->extent_root
;
5951 root
->fs_info
->last_trans_log_full_commit
= trans
->transid
;
5953 cache
= kzalloc(sizeof(*cache
), GFP_NOFS
);
5957 cache
->key
.objectid
= chunk_offset
;
5958 cache
->key
.offset
= size
;
5959 cache
->key
.type
= BTRFS_BLOCK_GROUP_ITEM_KEY
;
5960 atomic_set(&cache
->count
, 1);
5961 spin_lock_init(&cache
->lock
);
5962 spin_lock_init(&cache
->tree_lock
);
5963 mutex_init(&cache
->cache_mutex
);
5964 INIT_LIST_HEAD(&cache
->list
);
5965 INIT_LIST_HEAD(&cache
->cluster_list
);
5967 btrfs_set_block_group_used(&cache
->item
, bytes_used
);
5968 btrfs_set_block_group_chunk_objectid(&cache
->item
, chunk_objectid
);
5969 cache
->flags
= type
;
5970 btrfs_set_block_group_flags(&cache
->item
, type
);
5972 ret
= update_space_info(root
->fs_info
, cache
->flags
, size
, bytes_used
,
5973 &cache
->space_info
);
5975 down_write(&cache
->space_info
->groups_sem
);
5976 list_add_tail(&cache
->list
, &cache
->space_info
->block_groups
);
5977 up_write(&cache
->space_info
->groups_sem
);
5979 ret
= btrfs_add_block_group_cache(root
->fs_info
, cache
);
5982 ret
= btrfs_insert_item(trans
, extent_root
, &cache
->key
, &cache
->item
,
5983 sizeof(cache
->item
));
5986 set_avail_alloc_bits(extent_root
->fs_info
, type
);
5991 int btrfs_remove_block_group(struct btrfs_trans_handle
*trans
,
5992 struct btrfs_root
*root
, u64 group_start
)
5994 struct btrfs_path
*path
;
5995 struct btrfs_block_group_cache
*block_group
;
5996 struct btrfs_free_cluster
*cluster
;
5997 struct btrfs_key key
;
6000 root
= root
->fs_info
->extent_root
;
6002 block_group
= btrfs_lookup_block_group(root
->fs_info
, group_start
);
6003 BUG_ON(!block_group
);
6004 BUG_ON(!block_group
->ro
);
6006 memcpy(&key
, &block_group
->key
, sizeof(key
));
6008 /* make sure this block group isn't part of an allocation cluster */
6009 cluster
= &root
->fs_info
->data_alloc_cluster
;
6010 spin_lock(&cluster
->refill_lock
);
6011 btrfs_return_cluster_to_free_space(block_group
, cluster
);
6012 spin_unlock(&cluster
->refill_lock
);
6015 * make sure this block group isn't part of a metadata
6016 * allocation cluster
6018 cluster
= &root
->fs_info
->meta_alloc_cluster
;
6019 spin_lock(&cluster
->refill_lock
);
6020 btrfs_return_cluster_to_free_space(block_group
, cluster
);
6021 spin_unlock(&cluster
->refill_lock
);
6023 path
= btrfs_alloc_path();
6026 spin_lock(&root
->fs_info
->block_group_cache_lock
);
6027 rb_erase(&block_group
->cache_node
,
6028 &root
->fs_info
->block_group_cache_tree
);
6029 spin_unlock(&root
->fs_info
->block_group_cache_lock
);
6030 btrfs_remove_free_space_cache(block_group
);
6031 down_write(&block_group
->space_info
->groups_sem
);
6033 * we must use list_del_init so people can check to see if they
6034 * are still on the list after taking the semaphore
6036 list_del_init(&block_group
->list
);
6037 up_write(&block_group
->space_info
->groups_sem
);
6039 spin_lock(&block_group
->space_info
->lock
);
6040 block_group
->space_info
->total_bytes
-= block_group
->key
.offset
;
6041 block_group
->space_info
->bytes_readonly
-= block_group
->key
.offset
;
6042 spin_unlock(&block_group
->space_info
->lock
);
6043 block_group
->space_info
->full
= 0;
6045 btrfs_put_block_group(block_group
);
6046 btrfs_put_block_group(block_group
);
6048 ret
= btrfs_search_slot(trans
, root
, &key
, path
, -1, 1);
6054 ret
= btrfs_del_item(trans
, root
, path
);
6056 btrfs_free_path(path
);