2 * Copyright (C) 2007 Oracle. All rights reserved.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public
6 * License v2 as published by the Free Software Foundation.
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
13 * You should have received a copy of the GNU General Public
14 * License along with this program; if not, write to the
15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16 * Boston, MA 021110-1307, USA.
21 #include "kerncompat.h"
22 #include "radix-tree.h"
25 #include "print-tree.h"
26 #include "transaction.h"
30 #define BLOCK_GROUP_DATA EXTENT_WRITEBACK
31 #define BLOCK_GROUP_METADATA EXTENT_UPTODATE
32 #define BLOCK_GROUP_SYSTEM EXTENT_NEW
34 #define BLOCK_GROUP_DIRTY EXTENT_DIRTY
36 #define PENDING_EXTENT_INSERT 0
37 #define PENDING_EXTENT_DELETE 1
38 #define PENDING_BACKREF_UPDATE 2
40 struct pending_extent_op
{
45 struct btrfs_disk_key key
;
49 static int alloc_reserved_tree_block(struct btrfs_trans_handle
*trans
,
50 struct btrfs_root
*root
,
51 u64 root_objectid
, u64 generation
,
52 u64 flags
, struct btrfs_disk_key
*key
,
53 int level
, struct btrfs_key
*ins
);
54 static int __free_extent(struct btrfs_trans_handle
*trans
,
55 struct btrfs_root
*root
,
56 u64 bytenr
, u64 num_bytes
, u64 parent
,
57 u64 root_objectid
, u64 owner_objectid
,
58 u64 owner_offset
, int refs_to_drop
);
59 static int finish_current_insert(struct btrfs_trans_handle
*trans
, struct
60 btrfs_root
*extent_root
);
61 static int del_pending_extents(struct btrfs_trans_handle
*trans
, struct
62 btrfs_root
*extent_root
);
64 static int remove_sb_from_cache(struct btrfs_root
*root
,
65 struct btrfs_block_group_cache
*cache
)
71 struct extent_io_tree
*free_space_cache
;
73 free_space_cache
= &root
->fs_info
->free_space_cache
;
74 for (i
= 0; i
< BTRFS_SUPER_MIRROR_MAX
; i
++) {
75 bytenr
= btrfs_sb_offset(i
);
76 ret
= btrfs_rmap_block(&root
->fs_info
->mapping_tree
,
77 cache
->key
.objectid
, bytenr
, 0,
78 &logical
, &nr
, &stripe_len
);
81 clear_extent_dirty(free_space_cache
, logical
[nr
],
82 logical
[nr
] + stripe_len
- 1, GFP_NOFS
);
89 static int cache_block_group(struct btrfs_root
*root
,
90 struct btrfs_block_group_cache
*block_group
)
92 struct btrfs_path
*path
;
95 struct extent_buffer
*leaf
;
96 struct extent_io_tree
*free_space_cache
;
104 root
= root
->fs_info
->extent_root
;
105 free_space_cache
= &root
->fs_info
->free_space_cache
;
107 if (block_group
->cached
)
110 path
= btrfs_alloc_path();
115 last
= max_t(u64
, block_group
->key
.objectid
, BTRFS_SUPER_INFO_OFFSET
);
118 btrfs_set_key_type(&key
, BTRFS_EXTENT_ITEM_KEY
);
119 ret
= btrfs_search_slot(NULL
, root
, &key
, path
, 0, 0);
124 leaf
= path
->nodes
[0];
125 slot
= path
->slots
[0];
126 if (slot
>= btrfs_header_nritems(leaf
)) {
127 ret
= btrfs_next_leaf(root
, path
);
136 btrfs_item_key_to_cpu(leaf
, &key
, slot
);
137 if (key
.objectid
< block_group
->key
.objectid
) {
140 if (key
.objectid
>= block_group
->key
.objectid
+
141 block_group
->key
.offset
) {
145 if (btrfs_key_type(&key
) == BTRFS_EXTENT_ITEM_KEY
) {
146 if (key
.objectid
> last
) {
147 hole_size
= key
.objectid
- last
;
148 set_extent_dirty(free_space_cache
, last
,
149 last
+ hole_size
- 1,
152 last
= key
.objectid
+ key
.offset
;
158 if (block_group
->key
.objectid
+
159 block_group
->key
.offset
> last
) {
160 hole_size
= block_group
->key
.objectid
+
161 block_group
->key
.offset
- last
;
162 set_extent_dirty(free_space_cache
, last
,
163 last
+ hole_size
- 1, GFP_NOFS
);
165 remove_sb_from_cache(root
, block_group
);
166 block_group
->cached
= 1;
168 btrfs_free_path(path
);
172 struct btrfs_block_group_cache
*btrfs_lookup_first_block_group(struct
176 struct extent_io_tree
*block_group_cache
;
177 struct btrfs_block_group_cache
*block_group
= NULL
;
183 bytenr
= max_t(u64
, bytenr
,
184 BTRFS_SUPER_INFO_OFFSET
+ BTRFS_SUPER_INFO_SIZE
);
185 block_group_cache
= &info
->block_group_cache
;
186 ret
= find_first_extent_bit(block_group_cache
,
187 bytenr
, &start
, &end
,
188 BLOCK_GROUP_DATA
| BLOCK_GROUP_METADATA
|
193 ret
= get_state_private(block_group_cache
, start
, &ptr
);
197 block_group
= (struct btrfs_block_group_cache
*)(unsigned long)ptr
;
201 struct btrfs_block_group_cache
*btrfs_lookup_block_group(struct
205 struct extent_io_tree
*block_group_cache
;
206 struct btrfs_block_group_cache
*block_group
= NULL
;
212 block_group_cache
= &info
->block_group_cache
;
213 ret
= find_first_extent_bit(block_group_cache
,
214 bytenr
, &start
, &end
,
215 BLOCK_GROUP_DATA
| BLOCK_GROUP_METADATA
|
220 ret
= get_state_private(block_group_cache
, start
, &ptr
);
224 block_group
= (struct btrfs_block_group_cache
*)(unsigned long)ptr
;
225 if (block_group
->key
.objectid
<= bytenr
&& bytenr
<
226 block_group
->key
.objectid
+ block_group
->key
.offset
)
231 static int block_group_bits(struct btrfs_block_group_cache
*cache
, u64 bits
)
233 return (cache
->flags
& bits
) == bits
;
236 static int noinline
find_search_start(struct btrfs_root
*root
,
237 struct btrfs_block_group_cache
**cache_ret
,
238 u64
*start_ret
, int num
, int data
)
241 struct btrfs_block_group_cache
*cache
= *cache_ret
;
245 u64 search_start
= *start_ret
;
252 ret
= cache_block_group(root
, cache
);
256 last
= max(search_start
, cache
->key
.objectid
);
257 if (cache
->ro
|| !block_group_bits(cache
, data
)) {
262 ret
= find_first_extent_bit(&root
->fs_info
->free_space_cache
,
263 last
, &start
, &end
, EXTENT_DIRTY
);
268 start
= max(last
, start
);
270 if (last
- start
< num
) {
273 if (start
+ num
> cache
->key
.objectid
+ cache
->key
.offset
) {
280 cache
= btrfs_lookup_block_group(root
->fs_info
, search_start
);
282 printk("Unable to find block group for %llu\n",
283 (unsigned long long)search_start
);
289 last
= cache
->key
.objectid
+ cache
->key
.offset
;
291 cache
= btrfs_lookup_first_block_group(root
->fs_info
, last
);
301 cache
= btrfs_find_block_group(root
, cache
, last
, data
, 0);
302 cache
= btrfs_find_block_group(root
, cache
, last
, data
, 0);
310 static u64
div_factor(u64 num
, int factor
)
319 static int block_group_state_bits(u64 flags
)
322 if (flags
& BTRFS_BLOCK_GROUP_DATA
)
323 bits
|= BLOCK_GROUP_DATA
;
324 if (flags
& BTRFS_BLOCK_GROUP_METADATA
)
325 bits
|= BLOCK_GROUP_METADATA
;
326 if (flags
& BTRFS_BLOCK_GROUP_SYSTEM
)
327 bits
|= BLOCK_GROUP_SYSTEM
;
331 struct btrfs_block_group_cache
*btrfs_find_block_group(struct btrfs_root
*root
,
332 struct btrfs_block_group_cache
333 *hint
, u64 search_start
,
336 struct btrfs_block_group_cache
*cache
;
337 struct extent_io_tree
*block_group_cache
;
338 struct btrfs_block_group_cache
*found_group
= NULL
;
339 struct btrfs_fs_info
*info
= root
->fs_info
;
352 block_group_cache
= &info
->block_group_cache
;
357 bit
= block_group_state_bits(data
);
360 struct btrfs_block_group_cache
*shint
;
361 shint
= btrfs_lookup_block_group(info
, search_start
);
362 if (shint
&& !shint
->ro
&& block_group_bits(shint
, data
)) {
363 used
= btrfs_block_group_used(&shint
->item
);
364 if (used
+ shint
->pinned
<
365 div_factor(shint
->key
.offset
, factor
)) {
370 if (hint
&& !hint
->ro
&& block_group_bits(hint
, data
)) {
371 used
= btrfs_block_group_used(&hint
->item
);
372 if (used
+ hint
->pinned
<
373 div_factor(hint
->key
.offset
, factor
)) {
376 last
= hint
->key
.objectid
+ hint
->key
.offset
;
380 hint_last
= max(hint
->key
.objectid
, search_start
);
382 hint_last
= search_start
;
388 ret
= find_first_extent_bit(block_group_cache
, last
,
393 ret
= get_state_private(block_group_cache
, start
, &ptr
);
397 cache
= (struct btrfs_block_group_cache
*)(unsigned long)ptr
;
398 last
= cache
->key
.objectid
+ cache
->key
.offset
;
399 used
= btrfs_block_group_used(&cache
->item
);
401 if (!cache
->ro
&& block_group_bits(cache
, data
)) {
403 free_check
= cache
->key
.offset
;
405 free_check
= div_factor(cache
->key
.offset
,
408 if (used
+ cache
->pinned
< free_check
) {
425 * Back reference rules. Back refs have three main goals:
427 * 1) differentiate between all holders of references to an extent so that
428 * when a reference is dropped we can make sure it was a valid reference
429 * before freeing the extent.
431 * 2) Provide enough information to quickly find the holders of an extent
432 * if we notice a given block is corrupted or bad.
434 * 3) Make it easy to migrate blocks for FS shrinking or storage pool
435 * maintenance. This is actually the same as #2, but with a slightly
436 * different use case.
438 * There are two kinds of back refs. The implicit back refs is optimized
439 * for pointers in non-shared tree blocks. For a given pointer in a block,
440 * back refs of this kind provide information about the block's owner tree
441 * and the pointer's key. These information allow us to find the block by
442 * b-tree searching. The full back refs is for pointers in tree blocks not
443 * referenced by their owner trees. The location of tree block is recorded
444 * in the back refs. Actually the full back refs is generic, and can be
445 * used in all cases the implicit back refs is used. The major shortcoming
446 * of the full back refs is its overhead. Every time a tree block gets
447 * COWed, we have to update back refs entry for all pointers in it.
449 * For a newly allocated tree block, we use implicit back refs for
450 * pointers in it. This means most tree related operations only involve
451 * implicit back refs. For a tree block created in old transaction, the
452 * only way to drop a reference to it is COW it. So we can detect the
453 * event that tree block loses its owner tree's reference and do the
454 * back refs conversion.
456 * When a tree block is COW'd through a tree, there are four cases:
458 * The reference count of the block is one and the tree is the block's
459 * owner tree. Nothing to do in this case.
461 * The reference count of the block is one and the tree is not the
462 * block's owner tree. In this case, full back refs is used for pointers
463 * in the block. Remove these full back refs, add implicit back refs for
464 * every pointers in the new block.
466 * The reference count of the block is greater than one and the tree is
467 * the block's owner tree. In this case, implicit back refs is used for
468 * pointers in the block. Add full back refs for every pointers in the
469 * block, increase lower level extents' reference counts. The original
470 * implicit back refs are entailed to the new block.
472 * The reference count of the block is greater than one and the tree is
473 * not the block's owner tree. Add implicit back refs for every pointer in
474 * the new block, increase lower level extents' reference count.
476 * Back Reference Key composing:
478 * The key objectid corresponds to the first byte in the extent,
479 * The key type is used to differentiate between types of back refs.
480 * There are different meanings of the key offset for different types
483 * File extents can be referenced by:
485 * - multiple snapshots, subvolumes, or different generations in one subvol
486 * - different files inside a single subvolume
487 * - different offsets inside a file (bookend extents in file.c)
489 * The extent ref structure for the implicit back refs has fields for:
491 * - Objectid of the subvolume root
492 * - objectid of the file holding the reference
493 * - original offset in the file
494 * - how many bookend extents
496 * The key offset for the implicit back refs is hash of the first
499 * The extent ref structure for the full back refs has field for:
501 * - number of pointers in the tree leaf
503 * The key offset for the implicit back refs is the first byte of
506 * When a file extent is allocated, The implicit back refs is used.
507 * the fields are filled in:
509 * (root_key.objectid, inode objectid, offset in file, 1)
511 * When a file extent is removed file truncation, we find the
512 * corresponding implicit back refs and check the following fields:
514 * (btrfs_header_owner(leaf), inode objectid, offset in file)
516 * Btree extents can be referenced by:
518 * - Different subvolumes
520 * Both the implicit back refs and the full back refs for tree blocks
521 * only consist of key. The key offset for the implicit back refs is
522 * objectid of block's owner tree. The key offset for the full back refs
523 * is the first byte of parent block.
525 * When implicit back refs is used, information about the lowest key and
526 * level of the tree block are required. These information are stored in
527 * tree block info structure.
530 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
531 static int convert_extent_item_v0(struct btrfs_trans_handle
*trans
,
532 struct btrfs_root
*root
,
533 struct btrfs_path
*path
,
534 u64 owner
, u32 extra_size
)
536 struct btrfs_extent_item
*item
;
537 struct btrfs_extent_item_v0
*ei0
;
538 struct btrfs_extent_ref_v0
*ref0
;
539 struct btrfs_tree_block_info
*bi
;
540 struct extent_buffer
*leaf
;
541 struct btrfs_key key
;
542 struct btrfs_key found_key
;
543 u32 new_size
= sizeof(*item
);
547 leaf
= path
->nodes
[0];
548 BUG_ON(btrfs_item_size_nr(leaf
, path
->slots
[0]) != sizeof(*ei0
));
550 btrfs_item_key_to_cpu(leaf
, &key
, path
->slots
[0]);
551 ei0
= btrfs_item_ptr(leaf
, path
->slots
[0],
552 struct btrfs_extent_item_v0
);
553 refs
= btrfs_extent_refs_v0(leaf
, ei0
);
555 if (owner
== (u64
)-1) {
557 if (path
->slots
[0] >= btrfs_header_nritems(leaf
)) {
558 ret
= btrfs_next_leaf(root
, path
);
562 leaf
= path
->nodes
[0];
564 btrfs_item_key_to_cpu(leaf
, &found_key
,
566 BUG_ON(key
.objectid
!= found_key
.objectid
);
567 if (found_key
.type
!= BTRFS_EXTENT_REF_V0_KEY
) {
571 ref0
= btrfs_item_ptr(leaf
, path
->slots
[0],
572 struct btrfs_extent_ref_v0
);
573 owner
= btrfs_ref_objectid_v0(leaf
, ref0
);
577 btrfs_release_path(root
, path
);
579 if (owner
< BTRFS_FIRST_FREE_OBJECTID
)
580 new_size
+= sizeof(*bi
);
582 new_size
-= sizeof(*ei0
);
583 ret
= btrfs_search_slot(trans
, root
, &key
, path
, new_size
, 1);
588 ret
= btrfs_extend_item(trans
, root
, path
, new_size
);
591 leaf
= path
->nodes
[0];
592 item
= btrfs_item_ptr(leaf
, path
->slots
[0], struct btrfs_extent_item
);
593 btrfs_set_extent_refs(leaf
, item
, refs
);
594 /* FIXME: get real generation */
595 btrfs_set_extent_generation(leaf
, item
, 0);
596 if (owner
< BTRFS_FIRST_FREE_OBJECTID
) {
597 btrfs_set_extent_flags(leaf
, item
,
598 BTRFS_EXTENT_FLAG_TREE_BLOCK
|
599 BTRFS_BLOCK_FLAG_FULL_BACKREF
);
600 bi
= (struct btrfs_tree_block_info
*)(item
+ 1);
601 /* FIXME: get first key of the block */
602 memset_extent_buffer(leaf
, 0, (unsigned long)bi
, sizeof(*bi
));
603 btrfs_set_tree_block_level(leaf
, bi
, (int)owner
);
605 btrfs_set_extent_flags(leaf
, item
, BTRFS_EXTENT_FLAG_DATA
);
607 btrfs_mark_buffer_dirty(leaf
);
612 static u64
hash_extent_data_ref(u64 root_objectid
, u64 owner
, u64 offset
)
614 u32 high_crc
= ~(u32
)0;
615 u32 low_crc
= ~(u32
)0;
618 lenum
= cpu_to_le64(root_objectid
);
619 high_crc
= btrfs_crc32c(high_crc
, &lenum
, sizeof(lenum
));
620 lenum
= cpu_to_le64(owner
);
621 low_crc
= btrfs_crc32c(low_crc
, &lenum
, sizeof(lenum
));
622 lenum
= cpu_to_le64(offset
);
623 low_crc
= btrfs_crc32c(low_crc
, &lenum
, sizeof(lenum
));
625 return ((u64
)high_crc
<< 31) ^ (u64
)low_crc
;
628 static u64
hash_extent_data_ref_item(struct extent_buffer
*leaf
,
629 struct btrfs_extent_data_ref
*ref
)
631 return hash_extent_data_ref(btrfs_extent_data_ref_root(leaf
, ref
),
632 btrfs_extent_data_ref_objectid(leaf
, ref
),
633 btrfs_extent_data_ref_offset(leaf
, ref
));
636 static int match_extent_data_ref(struct extent_buffer
*leaf
,
637 struct btrfs_extent_data_ref
*ref
,
638 u64 root_objectid
, u64 owner
, u64 offset
)
640 if (btrfs_extent_data_ref_root(leaf
, ref
) != root_objectid
||
641 btrfs_extent_data_ref_objectid(leaf
, ref
) != owner
||
642 btrfs_extent_data_ref_offset(leaf
, ref
) != offset
)
647 static noinline
int lookup_extent_data_ref(struct btrfs_trans_handle
*trans
,
648 struct btrfs_root
*root
,
649 struct btrfs_path
*path
,
650 u64 bytenr
, u64 parent
,
652 u64 owner
, u64 offset
)
654 struct btrfs_key key
;
655 struct btrfs_extent_data_ref
*ref
;
656 struct extent_buffer
*leaf
;
662 key
.objectid
= bytenr
;
664 key
.type
= BTRFS_SHARED_DATA_REF_KEY
;
667 key
.type
= BTRFS_EXTENT_DATA_REF_KEY
;
668 key
.offset
= hash_extent_data_ref(root_objectid
,
673 ret
= btrfs_search_slot(trans
, root
, &key
, path
, -1, 1);
682 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
683 key
.type
= BTRFS_EXTENT_REF_V0_KEY
;
684 btrfs_release_path(root
, path
);
685 ret
= btrfs_search_slot(trans
, root
, &key
, path
, -1, 1);
696 leaf
= path
->nodes
[0];
697 nritems
= btrfs_header_nritems(leaf
);
699 if (path
->slots
[0] >= nritems
) {
700 ret
= btrfs_next_leaf(root
, path
);
706 leaf
= path
->nodes
[0];
707 nritems
= btrfs_header_nritems(leaf
);
711 btrfs_item_key_to_cpu(leaf
, &key
, path
->slots
[0]);
712 if (key
.objectid
!= bytenr
||
713 key
.type
!= BTRFS_EXTENT_DATA_REF_KEY
)
716 ref
= btrfs_item_ptr(leaf
, path
->slots
[0],
717 struct btrfs_extent_data_ref
);
719 if (match_extent_data_ref(leaf
, ref
, root_objectid
,
722 btrfs_release_path(root
, path
);
734 static noinline
int insert_extent_data_ref(struct btrfs_trans_handle
*trans
,
735 struct btrfs_root
*root
,
736 struct btrfs_path
*path
,
737 u64 bytenr
, u64 parent
,
738 u64 root_objectid
, u64 owner
,
739 u64 offset
, int refs_to_add
)
741 struct btrfs_key key
;
742 struct extent_buffer
*leaf
;
747 key
.objectid
= bytenr
;
749 key
.type
= BTRFS_SHARED_DATA_REF_KEY
;
751 size
= sizeof(struct btrfs_shared_data_ref
);
753 key
.type
= BTRFS_EXTENT_DATA_REF_KEY
;
754 key
.offset
= hash_extent_data_ref(root_objectid
,
756 size
= sizeof(struct btrfs_extent_data_ref
);
759 ret
= btrfs_insert_empty_item(trans
, root
, path
, &key
, size
);
760 if (ret
&& ret
!= -EEXIST
)
763 leaf
= path
->nodes
[0];
765 struct btrfs_shared_data_ref
*ref
;
766 ref
= btrfs_item_ptr(leaf
, path
->slots
[0],
767 struct btrfs_shared_data_ref
);
769 btrfs_set_shared_data_ref_count(leaf
, ref
, refs_to_add
);
771 num_refs
= btrfs_shared_data_ref_count(leaf
, ref
);
772 num_refs
+= refs_to_add
;
773 btrfs_set_shared_data_ref_count(leaf
, ref
, num_refs
);
776 struct btrfs_extent_data_ref
*ref
;
777 while (ret
== -EEXIST
) {
778 ref
= btrfs_item_ptr(leaf
, path
->slots
[0],
779 struct btrfs_extent_data_ref
);
780 if (match_extent_data_ref(leaf
, ref
, root_objectid
,
783 btrfs_release_path(root
, path
);
786 ret
= btrfs_insert_empty_item(trans
, root
, path
, &key
,
788 if (ret
&& ret
!= -EEXIST
)
791 leaf
= path
->nodes
[0];
793 ref
= btrfs_item_ptr(leaf
, path
->slots
[0],
794 struct btrfs_extent_data_ref
);
796 btrfs_set_extent_data_ref_root(leaf
, ref
,
798 btrfs_set_extent_data_ref_objectid(leaf
, ref
, owner
);
799 btrfs_set_extent_data_ref_offset(leaf
, ref
, offset
);
800 btrfs_set_extent_data_ref_count(leaf
, ref
, refs_to_add
);
802 num_refs
= btrfs_extent_data_ref_count(leaf
, ref
);
803 num_refs
+= refs_to_add
;
804 btrfs_set_extent_data_ref_count(leaf
, ref
, num_refs
);
807 btrfs_mark_buffer_dirty(leaf
);
810 btrfs_release_path(root
, path
);
814 static noinline
int remove_extent_data_ref(struct btrfs_trans_handle
*trans
,
815 struct btrfs_root
*root
,
816 struct btrfs_path
*path
,
819 struct btrfs_key key
;
820 struct btrfs_extent_data_ref
*ref1
= NULL
;
821 struct btrfs_shared_data_ref
*ref2
= NULL
;
822 struct extent_buffer
*leaf
;
826 leaf
= path
->nodes
[0];
827 btrfs_item_key_to_cpu(leaf
, &key
, path
->slots
[0]);
829 if (key
.type
== BTRFS_EXTENT_DATA_REF_KEY
) {
830 ref1
= btrfs_item_ptr(leaf
, path
->slots
[0],
831 struct btrfs_extent_data_ref
);
832 num_refs
= btrfs_extent_data_ref_count(leaf
, ref1
);
833 } else if (key
.type
== BTRFS_SHARED_DATA_REF_KEY
) {
834 ref2
= btrfs_item_ptr(leaf
, path
->slots
[0],
835 struct btrfs_shared_data_ref
);
836 num_refs
= btrfs_shared_data_ref_count(leaf
, ref2
);
837 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
838 } else if (key
.type
== BTRFS_EXTENT_REF_V0_KEY
) {
839 struct btrfs_extent_ref_v0
*ref0
;
840 ref0
= btrfs_item_ptr(leaf
, path
->slots
[0],
841 struct btrfs_extent_ref_v0
);
842 num_refs
= btrfs_ref_count_v0(leaf
, ref0
);
848 BUG_ON(num_refs
< refs_to_drop
);
849 num_refs
-= refs_to_drop
;
852 ret
= btrfs_del_item(trans
, root
, path
);
854 if (key
.type
== BTRFS_EXTENT_DATA_REF_KEY
)
855 btrfs_set_extent_data_ref_count(leaf
, ref1
, num_refs
);
856 else if (key
.type
== BTRFS_SHARED_DATA_REF_KEY
)
857 btrfs_set_shared_data_ref_count(leaf
, ref2
, num_refs
);
858 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
860 struct btrfs_extent_ref_v0
*ref0
;
861 ref0
= btrfs_item_ptr(leaf
, path
->slots
[0],
862 struct btrfs_extent_ref_v0
);
863 btrfs_set_ref_count_v0(leaf
, ref0
, num_refs
);
866 btrfs_mark_buffer_dirty(leaf
);
871 static noinline u32
extent_data_ref_count(struct btrfs_root
*root
,
872 struct btrfs_path
*path
,
873 struct btrfs_extent_inline_ref
*iref
)
875 struct btrfs_key key
;
876 struct extent_buffer
*leaf
;
877 struct btrfs_extent_data_ref
*ref1
;
878 struct btrfs_shared_data_ref
*ref2
;
881 leaf
= path
->nodes
[0];
882 btrfs_item_key_to_cpu(leaf
, &key
, path
->slots
[0]);
884 if (btrfs_extent_inline_ref_type(leaf
, iref
) ==
885 BTRFS_EXTENT_DATA_REF_KEY
) {
886 ref1
= (struct btrfs_extent_data_ref
*)(&iref
->offset
);
887 num_refs
= btrfs_extent_data_ref_count(leaf
, ref1
);
889 ref2
= (struct btrfs_shared_data_ref
*)(iref
+ 1);
890 num_refs
= btrfs_shared_data_ref_count(leaf
, ref2
);
892 } else if (key
.type
== BTRFS_EXTENT_DATA_REF_KEY
) {
893 ref1
= btrfs_item_ptr(leaf
, path
->slots
[0],
894 struct btrfs_extent_data_ref
);
895 num_refs
= btrfs_extent_data_ref_count(leaf
, ref1
);
896 } else if (key
.type
== BTRFS_SHARED_DATA_REF_KEY
) {
897 ref2
= btrfs_item_ptr(leaf
, path
->slots
[0],
898 struct btrfs_shared_data_ref
);
899 num_refs
= btrfs_shared_data_ref_count(leaf
, ref2
);
900 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
901 } else if (key
.type
== BTRFS_EXTENT_REF_V0_KEY
) {
902 struct btrfs_extent_ref_v0
*ref0
;
903 ref0
= btrfs_item_ptr(leaf
, path
->slots
[0],
904 struct btrfs_extent_ref_v0
);
905 num_refs
= btrfs_ref_count_v0(leaf
, ref0
);
913 static noinline
int lookup_tree_block_ref(struct btrfs_trans_handle
*trans
,
914 struct btrfs_root
*root
,
915 struct btrfs_path
*path
,
916 u64 bytenr
, u64 parent
,
919 struct btrfs_key key
;
922 key
.objectid
= bytenr
;
924 key
.type
= BTRFS_SHARED_BLOCK_REF_KEY
;
927 key
.type
= BTRFS_TREE_BLOCK_REF_KEY
;
928 key
.offset
= root_objectid
;
931 ret
= btrfs_search_slot(trans
, root
, &key
, path
, -1, 1);
934 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
935 if (ret
== -ENOENT
&& parent
) {
936 btrfs_release_path(root
, path
);
937 key
.type
= BTRFS_EXTENT_REF_V0_KEY
;
938 ret
= btrfs_search_slot(trans
, root
, &key
, path
, -1, 1);
946 static noinline
int insert_tree_block_ref(struct btrfs_trans_handle
*trans
,
947 struct btrfs_root
*root
,
948 struct btrfs_path
*path
,
949 u64 bytenr
, u64 parent
,
952 struct btrfs_key key
;
955 key
.objectid
= bytenr
;
957 key
.type
= BTRFS_SHARED_BLOCK_REF_KEY
;
960 key
.type
= BTRFS_TREE_BLOCK_REF_KEY
;
961 key
.offset
= root_objectid
;
964 ret
= btrfs_insert_empty_item(trans
, root
, path
, &key
, 0);
966 btrfs_release_path(root
, path
);
970 static inline int extent_ref_type(u64 parent
, u64 owner
)
972 if (owner
< BTRFS_FIRST_FREE_OBJECTID
) {
974 return BTRFS_SHARED_BLOCK_REF_KEY
;
976 return BTRFS_TREE_BLOCK_REF_KEY
;
979 return BTRFS_SHARED_DATA_REF_KEY
;
981 return BTRFS_EXTENT_DATA_REF_KEY
;
985 static int find_next_key(struct btrfs_path
*path
, struct btrfs_key
*key
)
989 for (level
= 0; level
< BTRFS_MAX_LEVEL
; level
++) {
990 if (!path
->nodes
[level
])
992 if (path
->slots
[level
] + 1 >=
993 btrfs_header_nritems(path
->nodes
[level
]))
996 btrfs_item_key_to_cpu(path
->nodes
[level
], key
,
997 path
->slots
[level
] + 1);
999 btrfs_node_key_to_cpu(path
->nodes
[level
], key
,
1000 path
->slots
[level
] + 1);
1006 static int lookup_inline_extent_backref(struct btrfs_trans_handle
*trans
,
1007 struct btrfs_root
*root
,
1008 struct btrfs_path
*path
,
1009 struct btrfs_extent_inline_ref
**ref_ret
,
1010 u64 bytenr
, u64 num_bytes
,
1011 u64 parent
, u64 root_objectid
,
1012 u64 owner
, u64 offset
, int insert
)
1014 struct btrfs_key key
;
1015 struct extent_buffer
*leaf
;
1016 struct btrfs_extent_item
*ei
;
1017 struct btrfs_extent_inline_ref
*iref
;
1028 key
.objectid
= bytenr
;
1029 key
.type
= BTRFS_EXTENT_ITEM_KEY
;
1030 key
.offset
= num_bytes
;
1032 want
= extent_ref_type(parent
, owner
);
1034 extra_size
= btrfs_extent_inline_ref_size(want
);
1037 ret
= btrfs_search_slot(trans
, root
, &key
, path
, extra_size
, 1);
1044 leaf
= path
->nodes
[0];
1045 item_size
= btrfs_item_size_nr(leaf
, path
->slots
[0]);
1046 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
1047 if (item_size
< sizeof(*ei
)) {
1052 ret
= convert_extent_item_v0(trans
, root
, path
, owner
,
1058 leaf
= path
->nodes
[0];
1059 item_size
= btrfs_item_size_nr(leaf
, path
->slots
[0]);
1062 BUG_ON(item_size
< sizeof(*ei
));
1064 ei
= btrfs_item_ptr(leaf
, path
->slots
[0], struct btrfs_extent_item
);
1065 flags
= btrfs_extent_flags(leaf
, ei
);
1067 ptr
= (unsigned long)(ei
+ 1);
1068 end
= (unsigned long)ei
+ item_size
;
1070 if (flags
& BTRFS_EXTENT_FLAG_TREE_BLOCK
) {
1071 ptr
+= sizeof(struct btrfs_tree_block_info
);
1074 BUG_ON(!(flags
& BTRFS_EXTENT_FLAG_DATA
));
1083 iref
= (struct btrfs_extent_inline_ref
*)ptr
;
1084 type
= btrfs_extent_inline_ref_type(leaf
, iref
);
1088 ptr
+= btrfs_extent_inline_ref_size(type
);
1092 if (type
== BTRFS_EXTENT_DATA_REF_KEY
) {
1093 struct btrfs_extent_data_ref
*dref
;
1094 dref
= (struct btrfs_extent_data_ref
*)(&iref
->offset
);
1095 if (match_extent_data_ref(leaf
, dref
, root_objectid
,
1100 if (hash_extent_data_ref_item(leaf
, dref
) <
1101 hash_extent_data_ref(root_objectid
, owner
, offset
))
1105 ref_offset
= btrfs_extent_inline_ref_offset(leaf
, iref
);
1107 if (parent
== ref_offset
) {
1111 if (ref_offset
< parent
)
1114 if (root_objectid
== ref_offset
) {
1118 if (ref_offset
< root_objectid
)
1122 ptr
+= btrfs_extent_inline_ref_size(type
);
1124 if (err
== -ENOENT
&& insert
) {
1125 if (item_size
+ extra_size
>=
1126 BTRFS_MAX_EXTENT_ITEM_SIZE(root
)) {
1131 * To add new inline back ref, we have to make sure
1132 * there is no corresponding back ref item.
1133 * For simplicity, we just do not add new inline back
1134 * ref if there is any back ref item.
1136 if (find_next_key(path
, &key
) == 0 && key
.objectid
== bytenr
&&
1137 key
.type
< BTRFS_BLOCK_GROUP_ITEM_KEY
) {
1142 *ref_ret
= (struct btrfs_extent_inline_ref
*)ptr
;
1147 static int setup_inline_extent_backref(struct btrfs_trans_handle
*trans
,
1148 struct btrfs_root
*root
,
1149 struct btrfs_path
*path
,
1150 struct btrfs_extent_inline_ref
*iref
,
1151 u64 parent
, u64 root_objectid
,
1152 u64 owner
, u64 offset
, int refs_to_add
)
1154 struct extent_buffer
*leaf
;
1155 struct btrfs_extent_item
*ei
;
1158 unsigned long item_offset
;
1164 leaf
= path
->nodes
[0];
1165 ei
= btrfs_item_ptr(leaf
, path
->slots
[0], struct btrfs_extent_item
);
1166 item_offset
= (unsigned long)iref
- (unsigned long)ei
;
1168 type
= extent_ref_type(parent
, owner
);
1169 size
= btrfs_extent_inline_ref_size(type
);
1171 ret
= btrfs_extend_item(trans
, root
, path
, size
);
1174 ei
= btrfs_item_ptr(leaf
, path
->slots
[0], struct btrfs_extent_item
);
1175 refs
= btrfs_extent_refs(leaf
, ei
);
1176 refs
+= refs_to_add
;
1177 btrfs_set_extent_refs(leaf
, ei
, refs
);
1179 ptr
= (unsigned long)ei
+ item_offset
;
1180 end
= (unsigned long)ei
+ btrfs_item_size_nr(leaf
, path
->slots
[0]);
1181 if (ptr
< end
- size
)
1182 memmove_extent_buffer(leaf
, ptr
+ size
, ptr
,
1185 iref
= (struct btrfs_extent_inline_ref
*)ptr
;
1186 btrfs_set_extent_inline_ref_type(leaf
, iref
, type
);
1187 if (type
== BTRFS_EXTENT_DATA_REF_KEY
) {
1188 struct btrfs_extent_data_ref
*dref
;
1189 dref
= (struct btrfs_extent_data_ref
*)(&iref
->offset
);
1190 btrfs_set_extent_data_ref_root(leaf
, dref
, root_objectid
);
1191 btrfs_set_extent_data_ref_objectid(leaf
, dref
, owner
);
1192 btrfs_set_extent_data_ref_offset(leaf
, dref
, offset
);
1193 btrfs_set_extent_data_ref_count(leaf
, dref
, refs_to_add
);
1194 } else if (type
== BTRFS_SHARED_DATA_REF_KEY
) {
1195 struct btrfs_shared_data_ref
*sref
;
1196 sref
= (struct btrfs_shared_data_ref
*)(iref
+ 1);
1197 btrfs_set_shared_data_ref_count(leaf
, sref
, refs_to_add
);
1198 btrfs_set_extent_inline_ref_offset(leaf
, iref
, parent
);
1199 } else if (type
== BTRFS_SHARED_BLOCK_REF_KEY
) {
1200 btrfs_set_extent_inline_ref_offset(leaf
, iref
, parent
);
1202 btrfs_set_extent_inline_ref_offset(leaf
, iref
, root_objectid
);
1204 btrfs_mark_buffer_dirty(leaf
);
1208 static int lookup_extent_backref(struct btrfs_trans_handle
*trans
,
1209 struct btrfs_root
*root
,
1210 struct btrfs_path
*path
,
1211 struct btrfs_extent_inline_ref
**ref_ret
,
1212 u64 bytenr
, u64 num_bytes
, u64 parent
,
1213 u64 root_objectid
, u64 owner
, u64 offset
)
1217 ret
= lookup_inline_extent_backref(trans
, root
, path
, ref_ret
,
1218 bytenr
, num_bytes
, parent
,
1219 root_objectid
, owner
, offset
, 0);
1223 btrfs_release_path(root
, path
);
1226 if (owner
< BTRFS_FIRST_FREE_OBJECTID
) {
1227 ret
= lookup_tree_block_ref(trans
, root
, path
, bytenr
, parent
,
1230 ret
= lookup_extent_data_ref(trans
, root
, path
, bytenr
, parent
,
1231 root_objectid
, owner
, offset
);
1236 static int update_inline_extent_backref(struct btrfs_trans_handle
*trans
,
1237 struct btrfs_root
*root
,
1238 struct btrfs_path
*path
,
1239 struct btrfs_extent_inline_ref
*iref
,
1242 struct extent_buffer
*leaf
;
1243 struct btrfs_extent_item
*ei
;
1244 struct btrfs_extent_data_ref
*dref
= NULL
;
1245 struct btrfs_shared_data_ref
*sref
= NULL
;
1254 leaf
= path
->nodes
[0];
1255 ei
= btrfs_item_ptr(leaf
, path
->slots
[0], struct btrfs_extent_item
);
1256 refs
= btrfs_extent_refs(leaf
, ei
);
1257 WARN_ON(refs_to_mod
< 0 && refs
+ refs_to_mod
<= 0);
1258 refs
+= refs_to_mod
;
1259 btrfs_set_extent_refs(leaf
, ei
, refs
);
1261 type
= btrfs_extent_inline_ref_type(leaf
, iref
);
1263 if (type
== BTRFS_EXTENT_DATA_REF_KEY
) {
1264 dref
= (struct btrfs_extent_data_ref
*)(&iref
->offset
);
1265 refs
= btrfs_extent_data_ref_count(leaf
, dref
);
1266 } else if (type
== BTRFS_SHARED_DATA_REF_KEY
) {
1267 sref
= (struct btrfs_shared_data_ref
*)(iref
+ 1);
1268 refs
= btrfs_shared_data_ref_count(leaf
, sref
);
1271 BUG_ON(refs_to_mod
!= -1);
1274 BUG_ON(refs_to_mod
< 0 && refs
< -refs_to_mod
);
1275 refs
+= refs_to_mod
;
1278 if (type
== BTRFS_EXTENT_DATA_REF_KEY
)
1279 btrfs_set_extent_data_ref_count(leaf
, dref
, refs
);
1281 btrfs_set_shared_data_ref_count(leaf
, sref
, refs
);
1283 size
= btrfs_extent_inline_ref_size(type
);
1284 item_size
= btrfs_item_size_nr(leaf
, path
->slots
[0]);
1285 ptr
= (unsigned long)iref
;
1286 end
= (unsigned long)ei
+ item_size
;
1287 if (ptr
+ size
< end
)
1288 memmove_extent_buffer(leaf
, ptr
, ptr
+ size
,
1291 ret
= btrfs_truncate_item(trans
, root
, path
, item_size
, 1);
1294 btrfs_mark_buffer_dirty(leaf
);
1298 static int insert_inline_extent_backref(struct btrfs_trans_handle
*trans
,
1299 struct btrfs_root
*root
,
1300 struct btrfs_path
*path
,
1301 u64 bytenr
, u64 num_bytes
, u64 parent
,
1302 u64 root_objectid
, u64 owner
,
1303 u64 offset
, int refs_to_add
)
1305 struct btrfs_extent_inline_ref
*iref
;
1308 ret
= lookup_inline_extent_backref(trans
, root
, path
, &iref
,
1309 bytenr
, num_bytes
, parent
,
1310 root_objectid
, owner
, offset
, 1);
1312 BUG_ON(owner
< BTRFS_FIRST_FREE_OBJECTID
);
1313 ret
= update_inline_extent_backref(trans
, root
, path
, iref
,
1315 } else if (ret
== -ENOENT
) {
1316 ret
= setup_inline_extent_backref(trans
, root
, path
, iref
,
1317 parent
, root_objectid
,
1318 owner
, offset
, refs_to_add
);
1323 static int insert_extent_backref(struct btrfs_trans_handle
*trans
,
1324 struct btrfs_root
*root
,
1325 struct btrfs_path
*path
,
1326 u64 bytenr
, u64 parent
, u64 root_objectid
,
1327 u64 owner
, u64 offset
, int refs_to_add
)
1331 if (owner
>= BTRFS_FIRST_FREE_OBJECTID
) {
1332 ret
= insert_extent_data_ref(trans
, root
, path
, bytenr
,
1333 parent
, root_objectid
,
1334 owner
, offset
, refs_to_add
);
1336 BUG_ON(refs_to_add
!= 1);
1337 ret
= insert_tree_block_ref(trans
, root
, path
, bytenr
,
1338 parent
, root_objectid
);
1343 static int remove_extent_backref(struct btrfs_trans_handle
*trans
,
1344 struct btrfs_root
*root
,
1345 struct btrfs_path
*path
,
1346 struct btrfs_extent_inline_ref
*iref
,
1347 int refs_to_drop
, int is_data
)
1351 BUG_ON(!is_data
&& refs_to_drop
!= 1);
1353 ret
= update_inline_extent_backref(trans
, root
, path
, iref
,
1355 } else if (is_data
) {
1356 ret
= remove_extent_data_ref(trans
, root
, path
, refs_to_drop
);
1358 ret
= btrfs_del_item(trans
, root
, path
);
1363 int btrfs_inc_extent_ref(struct btrfs_trans_handle
*trans
,
1364 struct btrfs_root
*root
,
1365 u64 bytenr
, u64 num_bytes
, u64 parent
,
1366 u64 root_objectid
, u64 owner
, u64 offset
)
1368 struct btrfs_path
*path
;
1369 struct extent_buffer
*leaf
;
1370 struct btrfs_extent_item
*item
;
1375 path
= btrfs_alloc_path();
1380 path
->leave_spinning
= 1;
1382 ret
= insert_inline_extent_backref(trans
, root
->fs_info
->extent_root
,
1383 path
, bytenr
, num_bytes
, parent
,
1384 root_objectid
, owner
, offset
, 1);
1388 if (ret
!= -EAGAIN
) {
1393 leaf
= path
->nodes
[0];
1394 item
= btrfs_item_ptr(leaf
, path
->slots
[0], struct btrfs_extent_item
);
1395 refs
= btrfs_extent_refs(leaf
, item
);
1396 btrfs_set_extent_refs(leaf
, item
, refs
+ 1);
1398 btrfs_mark_buffer_dirty(leaf
);
1399 btrfs_release_path(root
->fs_info
->extent_root
, path
);
1402 path
->leave_spinning
= 1;
1404 /* now insert the actual backref */
1405 ret
= insert_extent_backref(trans
, root
->fs_info
->extent_root
,
1406 path
, bytenr
, parent
, root_objectid
,
1411 btrfs_free_path(path
);
1412 finish_current_insert(trans
, root
->fs_info
->extent_root
);
1413 del_pending_extents(trans
, root
->fs_info
->extent_root
);
1418 int btrfs_extent_post_op(struct btrfs_trans_handle
*trans
,
1419 struct btrfs_root
*root
)
1421 finish_current_insert(trans
, root
->fs_info
->extent_root
);
1422 del_pending_extents(trans
, root
->fs_info
->extent_root
);
1426 int btrfs_lookup_extent_info(struct btrfs_trans_handle
*trans
,
1427 struct btrfs_root
*root
, u64 bytenr
,
1428 u64 num_bytes
, u64
*refs
, u64
*flags
)
1430 struct btrfs_path
*path
;
1432 struct btrfs_key key
;
1433 struct extent_buffer
*l
;
1434 struct btrfs_extent_item
*item
;
1439 WARN_ON(num_bytes
< root
->sectorsize
);
1440 path
= btrfs_alloc_path();
1442 key
.objectid
= bytenr
;
1443 key
.offset
= num_bytes
;
1444 btrfs_set_key_type(&key
, BTRFS_EXTENT_ITEM_KEY
);
1445 ret
= btrfs_search_slot(trans
, root
->fs_info
->extent_root
, &key
, path
,
1450 btrfs_print_leaf(root
, path
->nodes
[0]);
1451 printk("failed to find block number %Lu\n", bytenr
);
1456 item_size
= btrfs_item_size_nr(l
, path
->slots
[0]);
1457 if (item_size
>= sizeof(*item
)) {
1458 item
= btrfs_item_ptr(l
, path
->slots
[0],
1459 struct btrfs_extent_item
);
1460 num_refs
= btrfs_extent_refs(l
, item
);
1461 extent_flags
= btrfs_extent_flags(l
, item
);
1463 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
1464 struct btrfs_extent_item_v0
*ei0
;
1465 BUG_ON(item_size
!= sizeof(*ei0
));
1466 ei0
= btrfs_item_ptr(l
, path
->slots
[0],
1467 struct btrfs_extent_item_v0
);
1468 num_refs
= btrfs_extent_refs_v0(l
, ei0
);
1469 /* FIXME: this isn't correct for data */
1470 extent_flags
= BTRFS_BLOCK_FLAG_FULL_BACKREF
;
1475 BUG_ON(num_refs
== 0);
1476 item
= btrfs_item_ptr(l
, path
->slots
[0], struct btrfs_extent_item
);
1480 *flags
= extent_flags
;
1482 btrfs_free_path(path
);
1486 int btrfs_set_block_flags(struct btrfs_trans_handle
*trans
,
1487 struct btrfs_root
*root
,
1488 u64 bytenr
, u64 num_bytes
, u64 flags
)
1490 struct btrfs_path
*path
;
1492 struct btrfs_key key
;
1493 struct extent_buffer
*l
;
1494 struct btrfs_extent_item
*item
;
1497 WARN_ON(num_bytes
< root
->sectorsize
);
1498 path
= btrfs_alloc_path();
1500 key
.objectid
= bytenr
;
1501 key
.offset
= num_bytes
;
1502 btrfs_set_key_type(&key
, BTRFS_EXTENT_ITEM_KEY
);
1503 ret
= btrfs_search_slot(trans
, root
->fs_info
->extent_root
, &key
, path
,
1508 btrfs_print_leaf(root
, path
->nodes
[0]);
1509 printk("failed to find block number %Lu\n",
1510 (unsigned long long)bytenr
);
1514 item_size
= btrfs_item_size_nr(l
, path
->slots
[0]);
1515 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
1516 if (item_size
< sizeof(*item
)) {
1517 ret
= convert_extent_item_v0(trans
, root
->fs_info
->extent_root
,
1523 item_size
= btrfs_item_size_nr(l
, path
->slots
[0]);
1526 BUG_ON(item_size
< sizeof(*item
));
1527 item
= btrfs_item_ptr(l
, path
->slots
[0], struct btrfs_extent_item
);
1528 flags
|= btrfs_extent_flags(l
, item
);
1529 btrfs_set_extent_flags(l
, item
, flags
);
1531 btrfs_free_path(path
);
1532 finish_current_insert(trans
, root
->fs_info
->extent_root
);
1533 del_pending_extents(trans
, root
->fs_info
->extent_root
);
1537 static int __btrfs_mod_ref(struct btrfs_trans_handle
*trans
,
1538 struct btrfs_root
*root
,
1539 struct extent_buffer
*buf
,
1540 int record_parent
, int inc
)
1547 struct btrfs_key key
;
1548 struct btrfs_file_extent_item
*fi
;
1553 int (*process_func
)(struct btrfs_trans_handle
*trans
,
1554 struct btrfs_root
*root
,
1555 u64
, u64
, u64
, u64
, u64
, u64
);
1557 ref_root
= btrfs_header_owner(buf
);
1558 nritems
= btrfs_header_nritems(buf
);
1559 level
= btrfs_header_level(buf
);
1561 if (!root
->ref_cows
&& level
== 0)
1565 process_func
= btrfs_inc_extent_ref
;
1567 process_func
= btrfs_free_extent
;
1570 parent
= buf
->start
;
1574 for (i
= 0; i
< nritems
; i
++) {
1577 btrfs_item_key_to_cpu(buf
, &key
, i
);
1578 if (btrfs_key_type(&key
) != BTRFS_EXTENT_DATA_KEY
)
1580 fi
= btrfs_item_ptr(buf
, i
,
1581 struct btrfs_file_extent_item
);
1582 if (btrfs_file_extent_type(buf
, fi
) ==
1583 BTRFS_FILE_EXTENT_INLINE
)
1585 bytenr
= btrfs_file_extent_disk_bytenr(buf
, fi
);
1589 num_bytes
= btrfs_file_extent_disk_num_bytes(buf
, fi
);
1590 key
.offset
-= btrfs_file_extent_offset(buf
, fi
);
1591 ret
= process_func(trans
, root
, bytenr
, num_bytes
,
1592 parent
, ref_root
, key
.objectid
,
1600 bytenr
= btrfs_node_blockptr(buf
, i
);
1601 num_bytes
= btrfs_level_size(root
, level
- 1);
1602 ret
= process_func(trans
, root
, bytenr
, num_bytes
,
1603 parent
, ref_root
, level
- 1, 0);
1615 for (i
=0; i
< faili
; i
++) {
1618 btrfs_item_key_to_cpu(buf
, &key
, i
);
1619 if (btrfs_key_type(&key
) != BTRFS_EXTENT_DATA_KEY
)
1621 fi
= btrfs_item_ptr(buf
, i
,
1622 struct btrfs_file_extent_item
);
1623 if (btrfs_file_extent_type(buf
, fi
) ==
1624 BTRFS_FILE_EXTENT_INLINE
)
1626 disk_bytenr
= btrfs_file_extent_disk_bytenr(buf
, fi
);
1627 if (disk_bytenr
== 0)
1629 err
= btrfs_free_extent(trans
, root
, disk_bytenr
,
1630 btrfs_file_extent_disk_num_bytes(buf
,
1634 bytenr
= btrfs_node_blockptr(buf
, i
);
1635 err
= btrfs_free_extent(trans
, root
, bytenr
,
1636 btrfs_level_size(root
, level
- 1), 0);
1644 int btrfs_inc_ref(struct btrfs_trans_handle
*trans
, struct btrfs_root
*root
,
1645 struct extent_buffer
*buf
, int record_parent
)
1647 return __btrfs_mod_ref(trans
, root
, buf
, record_parent
, 1);
1650 int btrfs_dec_ref(struct btrfs_trans_handle
*trans
, struct btrfs_root
*root
,
1651 struct extent_buffer
*buf
, int record_parent
)
1653 return __btrfs_mod_ref(trans
, root
, buf
, record_parent
, 0);
1656 static int write_one_cache_group(struct btrfs_trans_handle
*trans
,
1657 struct btrfs_root
*root
,
1658 struct btrfs_path
*path
,
1659 struct btrfs_block_group_cache
*cache
)
1663 struct btrfs_root
*extent_root
= root
->fs_info
->extent_root
;
1665 struct extent_buffer
*leaf
;
1667 ret
= btrfs_search_slot(trans
, extent_root
, &cache
->key
, path
, 0, 1);
1672 leaf
= path
->nodes
[0];
1673 bi
= btrfs_item_ptr_offset(leaf
, path
->slots
[0]);
1674 write_extent_buffer(leaf
, &cache
->item
, bi
, sizeof(cache
->item
));
1675 btrfs_mark_buffer_dirty(leaf
);
1676 btrfs_release_path(extent_root
, path
);
1678 finish_current_insert(trans
, extent_root
);
1679 pending_ret
= del_pending_extents(trans
, extent_root
);
1688 int btrfs_write_dirty_block_groups(struct btrfs_trans_handle
*trans
,
1689 struct btrfs_root
*root
)
1691 struct extent_io_tree
*block_group_cache
;
1692 struct btrfs_block_group_cache
*cache
;
1694 struct btrfs_path
*path
;
1700 block_group_cache
= &root
->fs_info
->block_group_cache
;
1701 path
= btrfs_alloc_path();
1706 ret
= find_first_extent_bit(block_group_cache
, last
,
1707 &start
, &end
, BLOCK_GROUP_DIRTY
);
1716 ret
= get_state_private(block_group_cache
, start
, &ptr
);
1719 clear_extent_bits(block_group_cache
, start
, end
,
1720 BLOCK_GROUP_DIRTY
, GFP_NOFS
);
1722 cache
= (struct btrfs_block_group_cache
*)(unsigned long)ptr
;
1723 ret
= write_one_cache_group(trans
, root
, path
, cache
);
1726 btrfs_free_path(path
);
1730 static struct btrfs_space_info
*__find_space_info(struct btrfs_fs_info
*info
,
1733 struct list_head
*head
= &info
->space_info
;
1734 struct list_head
*cur
;
1735 struct btrfs_space_info
*found
;
1736 list_for_each(cur
, head
) {
1737 found
= list_entry(cur
, struct btrfs_space_info
, list
);
1738 if (found
->flags
== flags
)
1745 static int update_space_info(struct btrfs_fs_info
*info
, u64 flags
,
1746 u64 total_bytes
, u64 bytes_used
,
1747 struct btrfs_space_info
**space_info
)
1749 struct btrfs_space_info
*found
;
1751 found
= __find_space_info(info
, flags
);
1753 found
->total_bytes
+= total_bytes
;
1754 found
->bytes_used
+= bytes_used
;
1755 WARN_ON(found
->total_bytes
< found
->bytes_used
);
1756 *space_info
= found
;
1759 found
= kmalloc(sizeof(*found
), GFP_NOFS
);
1763 list_add(&found
->list
, &info
->space_info
);
1764 found
->flags
= flags
;
1765 found
->total_bytes
= total_bytes
;
1766 found
->bytes_used
= bytes_used
;
1767 found
->bytes_pinned
= 0;
1769 *space_info
= found
;
1774 static void set_avail_alloc_bits(struct btrfs_fs_info
*fs_info
, u64 flags
)
1776 u64 extra_flags
= flags
& (BTRFS_BLOCK_GROUP_RAID0
|
1777 BTRFS_BLOCK_GROUP_RAID1
|
1778 BTRFS_BLOCK_GROUP_DUP
);
1780 if (flags
& BTRFS_BLOCK_GROUP_DATA
)
1781 fs_info
->avail_data_alloc_bits
|= extra_flags
;
1782 if (flags
& BTRFS_BLOCK_GROUP_METADATA
)
1783 fs_info
->avail_metadata_alloc_bits
|= extra_flags
;
1784 if (flags
& BTRFS_BLOCK_GROUP_SYSTEM
)
1785 fs_info
->avail_system_alloc_bits
|= extra_flags
;
1789 static int do_chunk_alloc(struct btrfs_trans_handle
*trans
,
1790 struct btrfs_root
*extent_root
, u64 alloc_bytes
,
1793 struct btrfs_space_info
*space_info
;
1799 space_info
= __find_space_info(extent_root
->fs_info
, flags
);
1801 ret
= update_space_info(extent_root
->fs_info
, flags
,
1805 BUG_ON(!space_info
);
1807 if (space_info
->full
)
1810 thresh
= div_factor(space_info
->total_bytes
, 7);
1811 if ((space_info
->bytes_used
+ space_info
->bytes_pinned
+ alloc_bytes
) <
1815 ret
= btrfs_alloc_chunk(trans
, extent_root
, &start
, &num_bytes
, flags
);
1816 if (ret
== -ENOSPC
) {
1817 space_info
->full
= 1;
1823 ret
= btrfs_make_block_group(trans
, extent_root
, 0, flags
,
1824 BTRFS_FIRST_CHUNK_TREE_OBJECTID
, start
, num_bytes
);
1829 static int update_block_group(struct btrfs_trans_handle
*trans
,
1830 struct btrfs_root
*root
,
1831 u64 bytenr
, u64 num_bytes
, int alloc
,
1834 struct btrfs_block_group_cache
*cache
;
1835 struct btrfs_fs_info
*info
= root
->fs_info
;
1836 u64 total
= num_bytes
;
1842 /* block accounting for super block */
1843 old_val
= btrfs_super_bytes_used(&info
->super_copy
);
1845 old_val
+= num_bytes
;
1847 old_val
-= num_bytes
;
1848 btrfs_set_super_bytes_used(&info
->super_copy
, old_val
);
1850 /* block accounting for root item */
1851 old_val
= btrfs_root_used(&root
->root_item
);
1853 old_val
+= num_bytes
;
1855 old_val
-= num_bytes
;
1856 btrfs_set_root_used(&root
->root_item
, old_val
);
1859 cache
= btrfs_lookup_block_group(info
, bytenr
);
1863 byte_in_group
= bytenr
- cache
->key
.objectid
;
1864 WARN_ON(byte_in_group
> cache
->key
.offset
);
1865 start
= cache
->key
.objectid
;
1866 end
= start
+ cache
->key
.offset
- 1;
1867 set_extent_bits(&info
->block_group_cache
, start
, end
,
1868 BLOCK_GROUP_DIRTY
, GFP_NOFS
);
1870 old_val
= btrfs_block_group_used(&cache
->item
);
1871 num_bytes
= min(total
, cache
->key
.offset
- byte_in_group
);
1873 old_val
+= num_bytes
;
1874 cache
->space_info
->bytes_used
+= num_bytes
;
1876 old_val
-= num_bytes
;
1877 cache
->space_info
->bytes_used
-= num_bytes
;
1879 set_extent_dirty(&info
->free_space_cache
,
1880 bytenr
, bytenr
+ num_bytes
- 1,
1884 btrfs_set_block_group_used(&cache
->item
, old_val
);
1886 bytenr
+= num_bytes
;
1891 static int update_pinned_extents(struct btrfs_root
*root
,
1892 u64 bytenr
, u64 num
, int pin
)
1895 struct btrfs_block_group_cache
*cache
;
1896 struct btrfs_fs_info
*fs_info
= root
->fs_info
;
1899 set_extent_dirty(&fs_info
->pinned_extents
,
1900 bytenr
, bytenr
+ num
- 1, GFP_NOFS
);
1902 clear_extent_dirty(&fs_info
->pinned_extents
,
1903 bytenr
, bytenr
+ num
- 1, GFP_NOFS
);
1906 cache
= btrfs_lookup_block_group(fs_info
, bytenr
);
1908 len
= min(num
, cache
->key
.offset
-
1909 (bytenr
- cache
->key
.objectid
));
1911 cache
->pinned
+= len
;
1912 cache
->space_info
->bytes_pinned
+= len
;
1913 fs_info
->total_pinned
+= len
;
1915 cache
->pinned
-= len
;
1916 cache
->space_info
->bytes_pinned
-= len
;
1917 fs_info
->total_pinned
-= len
;
1925 int btrfs_copy_pinned(struct btrfs_root
*root
, struct extent_io_tree
*copy
)
1930 struct extent_io_tree
*pinned_extents
= &root
->fs_info
->pinned_extents
;
1934 ret
= find_first_extent_bit(pinned_extents
, last
,
1935 &start
, &end
, EXTENT_DIRTY
);
1938 set_extent_dirty(copy
, start
, end
, GFP_NOFS
);
1944 int btrfs_finish_extent_commit(struct btrfs_trans_handle
*trans
,
1945 struct btrfs_root
*root
,
1946 struct extent_io_tree
*unpin
)
1951 struct extent_io_tree
*free_space_cache
;
1952 free_space_cache
= &root
->fs_info
->free_space_cache
;
1955 ret
= find_first_extent_bit(unpin
, 0, &start
, &end
,
1959 update_pinned_extents(root
, start
, end
+ 1 - start
, 0);
1960 clear_extent_dirty(unpin
, start
, end
, GFP_NOFS
);
1961 set_extent_dirty(free_space_cache
, start
, end
, GFP_NOFS
);
1966 static int finish_current_insert(struct btrfs_trans_handle
*trans
,
1967 struct btrfs_root
*extent_root
)
1972 struct btrfs_fs_info
*info
= extent_root
->fs_info
;
1973 struct btrfs_path
*path
;
1974 struct pending_extent_op
*extent_op
;
1975 struct btrfs_key key
;
1978 path
= btrfs_alloc_path();
1981 ret
= find_first_extent_bit(&info
->extent_ins
, 0, &start
,
1982 &end
, EXTENT_LOCKED
);
1986 ret
= get_state_private(&info
->extent_ins
, start
, &priv
);
1988 extent_op
= (struct pending_extent_op
*)(unsigned long)priv
;
1990 if (extent_op
->type
== PENDING_EXTENT_INSERT
) {
1991 key
.objectid
= start
;
1992 key
.offset
= end
+ 1 - start
;
1993 key
.type
= BTRFS_EXTENT_ITEM_KEY
;
1994 ret
= alloc_reserved_tree_block(trans
, extent_root
,
1995 extent_root
->root_key
.objectid
,
1999 extent_op
->level
, &key
);
2004 clear_extent_bits(&info
->extent_ins
, start
, end
, EXTENT_LOCKED
,
2008 btrfs_free_path(path
);
2012 static int pin_down_bytes(struct btrfs_trans_handle
*trans
,
2013 struct btrfs_root
*root
,
2014 u64 bytenr
, u64 num_bytes
, int is_data
)
2017 struct extent_buffer
*buf
;
2022 buf
= btrfs_find_tree_block(root
, bytenr
, num_bytes
);
2026 /* we can reuse a block if it hasn't been written
2027 * and it is from this transaction. We can't
2028 * reuse anything from the tree log root because
2029 * it has tiny sub-transactions.
2031 if (btrfs_buffer_uptodate(buf
, 0)) {
2032 u64 header_owner
= btrfs_header_owner(buf
);
2033 u64 header_transid
= btrfs_header_generation(buf
);
2034 if (header_owner
!= BTRFS_TREE_LOG_OBJECTID
&&
2035 header_transid
== trans
->transid
&&
2036 !btrfs_header_flag(buf
, BTRFS_HEADER_FLAG_WRITTEN
)) {
2037 clean_tree_block(NULL
, root
, buf
);
2038 free_extent_buffer(buf
);
2042 free_extent_buffer(buf
);
2044 update_pinned_extents(root
, bytenr
, num_bytes
, 1);
2051 * remove an extent from the root, returns 0 on success
2053 static int __free_extent(struct btrfs_trans_handle
*trans
,
2054 struct btrfs_root
*root
,
2055 u64 bytenr
, u64 num_bytes
, u64 parent
,
2056 u64 root_objectid
, u64 owner_objectid
,
2057 u64 owner_offset
, int refs_to_drop
)
2060 struct btrfs_key key
;
2061 struct btrfs_path
*path
;
2062 struct btrfs_extent_ops
*ops
= root
->fs_info
->extent_ops
;
2063 struct btrfs_root
*extent_root
= root
->fs_info
->extent_root
;
2064 struct extent_buffer
*leaf
;
2065 struct btrfs_extent_item
*ei
;
2066 struct btrfs_extent_inline_ref
*iref
;
2069 int extent_slot
= 0;
2070 int found_extent
= 0;
2075 path
= btrfs_alloc_path();
2080 path
->leave_spinning
= 1;
2082 is_data
= owner_objectid
>= BTRFS_FIRST_FREE_OBJECTID
;
2083 BUG_ON(!is_data
&& refs_to_drop
!= 1);
2085 ret
= lookup_extent_backref(trans
, extent_root
, path
, &iref
,
2086 bytenr
, num_bytes
, parent
,
2087 root_objectid
, owner_objectid
,
2090 extent_slot
= path
->slots
[0];
2091 while (extent_slot
>= 0) {
2092 btrfs_item_key_to_cpu(path
->nodes
[0], &key
,
2094 if (key
.objectid
!= bytenr
)
2096 if (key
.type
== BTRFS_EXTENT_ITEM_KEY
&&
2097 key
.offset
== num_bytes
) {
2101 if (path
->slots
[0] - extent_slot
> 5)
2105 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
2106 item_size
= btrfs_item_size_nr(path
->nodes
[0], extent_slot
);
2107 if (found_extent
&& item_size
< sizeof(*ei
))
2110 if (!found_extent
) {
2112 ret
= remove_extent_backref(trans
, extent_root
, path
,
2116 btrfs_release_path(extent_root
, path
);
2117 path
->leave_spinning
= 1;
2119 key
.objectid
= bytenr
;
2120 key
.type
= BTRFS_EXTENT_ITEM_KEY
;
2121 key
.offset
= num_bytes
;
2123 ret
= btrfs_search_slot(trans
, extent_root
,
2126 printk(KERN_ERR
"umm, got %d back from search"
2127 ", was looking for %llu\n", ret
,
2128 (unsigned long long)bytenr
);
2129 btrfs_print_leaf(extent_root
, path
->nodes
[0]);
2132 extent_slot
= path
->slots
[0];
2135 btrfs_print_leaf(extent_root
, path
->nodes
[0]);
2137 printk(KERN_ERR
"btrfs unable to find ref byte nr %llu "
2138 "parent %llu root %llu owner %llu offset %llu\n",
2139 (unsigned long long)bytenr
,
2140 (unsigned long long)parent
,
2141 (unsigned long long)root_objectid
,
2142 (unsigned long long)owner_objectid
,
2143 (unsigned long long)owner_offset
);
2146 leaf
= path
->nodes
[0];
2147 item_size
= btrfs_item_size_nr(leaf
, extent_slot
);
2148 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
2149 if (item_size
< sizeof(*ei
)) {
2150 BUG_ON(found_extent
|| extent_slot
!= path
->slots
[0]);
2151 ret
= convert_extent_item_v0(trans
, extent_root
, path
,
2155 btrfs_release_path(extent_root
, path
);
2156 path
->leave_spinning
= 1;
2158 key
.objectid
= bytenr
;
2159 key
.type
= BTRFS_EXTENT_ITEM_KEY
;
2160 key
.offset
= num_bytes
;
2162 ret
= btrfs_search_slot(trans
, extent_root
, &key
, path
,
2165 printk(KERN_ERR
"umm, got %d back from search"
2166 ", was looking for %llu\n", ret
,
2167 (unsigned long long)bytenr
);
2168 btrfs_print_leaf(extent_root
, path
->nodes
[0]);
2171 extent_slot
= path
->slots
[0];
2172 leaf
= path
->nodes
[0];
2173 item_size
= btrfs_item_size_nr(leaf
, extent_slot
);
2176 BUG_ON(item_size
< sizeof(*ei
));
2177 ei
= btrfs_item_ptr(leaf
, extent_slot
,
2178 struct btrfs_extent_item
);
2179 if (owner_objectid
< BTRFS_FIRST_FREE_OBJECTID
) {
2180 struct btrfs_tree_block_info
*bi
;
2181 BUG_ON(item_size
< sizeof(*ei
) + sizeof(*bi
));
2182 bi
= (struct btrfs_tree_block_info
*)(ei
+ 1);
2183 WARN_ON(owner_objectid
!= btrfs_tree_block_level(leaf
, bi
));
2186 refs
= btrfs_extent_refs(leaf
, ei
);
2187 BUG_ON(refs
< refs_to_drop
);
2188 refs
-= refs_to_drop
;
2192 * In the case of inline back ref, reference count will
2193 * be updated by remove_extent_backref
2196 BUG_ON(!found_extent
);
2198 btrfs_set_extent_refs(leaf
, ei
, refs
);
2199 btrfs_mark_buffer_dirty(leaf
);
2202 ret
= remove_extent_backref(trans
, extent_root
, path
,
2212 BUG_ON(is_data
&& refs_to_drop
!=
2213 extent_data_ref_count(root
, path
, iref
));
2215 BUG_ON(path
->slots
[0] != extent_slot
);
2217 BUG_ON(path
->slots
[0] != extent_slot
+ 1);
2218 path
->slots
[0] = extent_slot
;
2223 if (ops
&& ops
->free_extent
) {
2224 ret
= ops
->free_extent(root
, bytenr
, num_bytes
);
2232 ret
= pin_down_bytes(trans
, root
, bytenr
, num_bytes
,
2239 ret
= btrfs_del_items(trans
, extent_root
, path
, path
->slots
[0],
2242 btrfs_release_path(extent_root
, path
);
2245 ret
= btrfs_del_csums(trans
, root
, bytenr
, num_bytes
);
2249 ret
= update_block_group(trans
, root
, bytenr
, num_bytes
, 0,
2253 btrfs_free_path(path
);
2254 finish_current_insert(trans
, extent_root
);
2259 * find all the blocks marked as pending in the radix tree and remove
2260 * them from the extent map
2262 static int del_pending_extents(struct btrfs_trans_handle
*trans
, struct
2263 btrfs_root
*extent_root
)
2270 struct extent_io_tree
*pending_del
;
2271 struct extent_io_tree
*extent_ins
;
2272 struct pending_extent_op
*extent_op
;
2274 extent_ins
= &extent_root
->fs_info
->extent_ins
;
2275 pending_del
= &extent_root
->fs_info
->pending_del
;
2278 ret
= find_first_extent_bit(pending_del
, 0, &start
, &end
,
2283 ret
= get_state_private(pending_del
, start
, &priv
);
2285 extent_op
= (struct pending_extent_op
*)(unsigned long)priv
;
2287 clear_extent_bits(pending_del
, start
, end
, EXTENT_LOCKED
,
2290 if (!test_range_bit(extent_ins
, start
, end
,
2291 EXTENT_LOCKED
, 0)) {
2292 ret
= __free_extent(trans
, extent_root
,
2293 start
, end
+ 1 - start
, 0,
2294 extent_root
->root_key
.objectid
,
2295 extent_op
->level
, 0, 1);
2299 ret
= get_state_private(extent_ins
, start
, &priv
);
2301 extent_op
= (struct pending_extent_op
*)
2302 (unsigned long)priv
;
2304 clear_extent_bits(extent_ins
, start
, end
,
2305 EXTENT_LOCKED
, GFP_NOFS
);
2307 if (extent_op
->type
== PENDING_BACKREF_UPDATE
)
2319 * remove an extent from the root, returns 0 on success
2322 int btrfs_free_extent(struct btrfs_trans_handle
*trans
,
2323 struct btrfs_root
*root
,
2324 u64 bytenr
, u64 num_bytes
, u64 parent
,
2325 u64 root_objectid
, u64 owner
, u64 offset
)
2327 struct btrfs_root
*extent_root
= root
->fs_info
->extent_root
;
2331 WARN_ON(num_bytes
< root
->sectorsize
);
2332 if (root
== extent_root
) {
2333 struct pending_extent_op
*extent_op
;
2335 extent_op
= kmalloc(sizeof(*extent_op
), GFP_NOFS
);
2338 extent_op
->type
= PENDING_EXTENT_DELETE
;
2339 extent_op
->bytenr
= bytenr
;
2340 extent_op
->num_bytes
= num_bytes
;
2341 extent_op
->level
= (int)owner
;
2343 set_extent_bits(&root
->fs_info
->pending_del
,
2344 bytenr
, bytenr
+ num_bytes
- 1,
2345 EXTENT_LOCKED
, GFP_NOFS
);
2346 set_state_private(&root
->fs_info
->pending_del
,
2347 bytenr
, (unsigned long)extent_op
);
2350 ret
= __free_extent(trans
, root
, bytenr
, num_bytes
, parent
,
2351 root_objectid
, owner
, offset
, 1);
2352 pending_ret
= del_pending_extents(trans
, root
->fs_info
->extent_root
);
2353 return ret
? ret
: pending_ret
;
2356 static u64
stripe_align(struct btrfs_root
*root
, u64 val
)
2358 u64 mask
= ((u64
)root
->stripesize
- 1);
2359 u64 ret
= (val
+ mask
) & ~mask
;
2364 * walks the btree of allocated extents and find a hole of a given size.
2365 * The key ins is changed to record the hole:
2366 * ins->objectid == block start
2367 * ins->flags = BTRFS_EXTENT_ITEM_KEY
2368 * ins->offset == number of blocks
2369 * Any available blocks before search_start are skipped.
2371 static int noinline
find_free_extent(struct btrfs_trans_handle
*trans
,
2372 struct btrfs_root
*orig_root
,
2373 u64 num_bytes
, u64 empty_size
,
2374 u64 search_start
, u64 search_end
,
2375 u64 hint_byte
, struct btrfs_key
*ins
,
2376 u64 exclude_start
, u64 exclude_nr
,
2380 u64 orig_search_start
= search_start
;
2381 struct btrfs_root
* root
= orig_root
->fs_info
->extent_root
;
2382 struct btrfs_fs_info
*info
= root
->fs_info
;
2383 u64 total_needed
= num_bytes
;
2384 struct btrfs_block_group_cache
*block_group
;
2388 WARN_ON(num_bytes
< root
->sectorsize
);
2389 btrfs_set_key_type(ins
, BTRFS_EXTENT_ITEM_KEY
);
2392 block_group
= btrfs_lookup_first_block_group(info
, hint_byte
);
2394 hint_byte
= search_start
;
2395 block_group
= btrfs_find_block_group(root
, block_group
,
2396 hint_byte
, data
, 1);
2398 block_group
= btrfs_find_block_group(root
,
2400 search_start
, data
, 1);
2403 total_needed
+= empty_size
;
2407 block_group
= btrfs_lookup_first_block_group(info
,
2410 block_group
= btrfs_lookup_first_block_group(info
,
2413 ret
= find_search_start(root
, &block_group
, &search_start
,
2414 total_needed
, data
);
2418 search_start
= stripe_align(root
, search_start
);
2419 ins
->objectid
= search_start
;
2420 ins
->offset
= num_bytes
;
2422 if (ins
->objectid
+ num_bytes
>
2423 block_group
->key
.objectid
+ block_group
->key
.offset
) {
2424 search_start
= block_group
->key
.objectid
+
2425 block_group
->key
.offset
;
2429 if (test_range_bit(&info
->extent_ins
, ins
->objectid
,
2430 ins
->objectid
+ num_bytes
-1, EXTENT_LOCKED
, 0)) {
2431 search_start
= ins
->objectid
+ num_bytes
;
2435 if (test_range_bit(&info
->pinned_extents
, ins
->objectid
,
2436 ins
->objectid
+ num_bytes
-1, EXTENT_DIRTY
, 0)) {
2437 search_start
= ins
->objectid
+ num_bytes
;
2441 if (exclude_nr
> 0 && (ins
->objectid
+ num_bytes
> exclude_start
&&
2442 ins
->objectid
< exclude_start
+ exclude_nr
)) {
2443 search_start
= exclude_start
+ exclude_nr
;
2447 if (!(data
& BTRFS_BLOCK_GROUP_DATA
)) {
2448 block_group
= btrfs_lookup_block_group(info
, ins
->objectid
);
2450 trans
->block_group
= block_group
;
2452 ins
->offset
= num_bytes
;
2456 block_group
= btrfs_lookup_first_block_group(info
, search_start
);
2458 search_start
= orig_search_start
;
2465 total_needed
-= empty_size
;
2471 block_group
= btrfs_find_block_group(root
, block_group
,
2472 search_start
, data
, 0);
2479 static int btrfs_reserve_extent(struct btrfs_trans_handle
*trans
,
2480 struct btrfs_root
*root
,
2481 u64 num_bytes
, u64 empty_size
,
2482 u64 hint_byte
, u64 search_end
,
2483 struct btrfs_key
*ins
, int data
)
2486 u64 search_start
= 0;
2488 struct btrfs_fs_info
*info
= root
->fs_info
;
2490 if (info
->extent_ops
) {
2491 struct btrfs_extent_ops
*ops
= info
->extent_ops
;
2492 ret
= ops
->alloc_extent(root
, num_bytes
, hint_byte
, ins
);
2498 alloc_profile
= info
->avail_data_alloc_bits
&
2499 info
->data_alloc_profile
;
2500 data
= BTRFS_BLOCK_GROUP_DATA
| alloc_profile
;
2501 } else if ((info
->system_allocs
> 0 || root
== info
->chunk_root
) &&
2502 info
->system_allocs
>= 0) {
2503 alloc_profile
= info
->avail_system_alloc_bits
&
2504 info
->system_alloc_profile
;
2505 data
= BTRFS_BLOCK_GROUP_SYSTEM
| alloc_profile
;
2507 alloc_profile
= info
->avail_metadata_alloc_bits
&
2508 info
->metadata_alloc_profile
;
2509 data
= BTRFS_BLOCK_GROUP_METADATA
| alloc_profile
;
2512 if (root
->ref_cows
) {
2513 if (!(data
& BTRFS_BLOCK_GROUP_METADATA
)) {
2514 ret
= do_chunk_alloc(trans
, root
->fs_info
->extent_root
,
2516 BTRFS_BLOCK_GROUP_METADATA
);
2519 ret
= do_chunk_alloc(trans
, root
->fs_info
->extent_root
,
2520 num_bytes
+ 2 * 1024 * 1024, data
);
2524 WARN_ON(num_bytes
< root
->sectorsize
);
2525 ret
= find_free_extent(trans
, root
, num_bytes
, empty_size
,
2526 search_start
, search_end
, hint_byte
, ins
,
2527 trans
->alloc_exclude_start
,
2528 trans
->alloc_exclude_nr
, data
);
2531 clear_extent_dirty(&root
->fs_info
->free_space_cache
,
2532 ins
->objectid
, ins
->objectid
+ ins
->offset
- 1,
2537 static int alloc_reserved_tree_block(struct btrfs_trans_handle
*trans
,
2538 struct btrfs_root
*root
,
2539 u64 root_objectid
, u64 generation
,
2540 u64 flags
, struct btrfs_disk_key
*key
,
2541 int level
, struct btrfs_key
*ins
)
2544 struct btrfs_fs_info
*fs_info
= root
->fs_info
;
2545 struct btrfs_extent_item
*extent_item
;
2546 struct btrfs_tree_block_info
*block_info
;
2547 struct btrfs_extent_inline_ref
*iref
;
2548 struct btrfs_path
*path
;
2549 struct extent_buffer
*leaf
;
2550 u32 size
= sizeof(*extent_item
) + sizeof(*block_info
) + sizeof(*iref
);
2552 path
= btrfs_alloc_path();
2555 path
->leave_spinning
= 1;
2556 ret
= btrfs_insert_empty_item(trans
, fs_info
->extent_root
, path
,
2560 leaf
= path
->nodes
[0];
2561 extent_item
= btrfs_item_ptr(leaf
, path
->slots
[0],
2562 struct btrfs_extent_item
);
2563 btrfs_set_extent_refs(leaf
, extent_item
, 1);
2564 btrfs_set_extent_generation(leaf
, extent_item
, generation
);
2565 btrfs_set_extent_flags(leaf
, extent_item
,
2566 flags
| BTRFS_EXTENT_FLAG_TREE_BLOCK
);
2567 block_info
= (struct btrfs_tree_block_info
*)(extent_item
+ 1);
2569 btrfs_set_tree_block_key(leaf
, block_info
, key
);
2570 btrfs_set_tree_block_level(leaf
, block_info
, level
);
2572 iref
= (struct btrfs_extent_inline_ref
*)(block_info
+ 1);
2573 btrfs_set_extent_inline_ref_type(leaf
, iref
, BTRFS_TREE_BLOCK_REF_KEY
);
2574 btrfs_set_extent_inline_ref_offset(leaf
, iref
, root_objectid
);
2576 btrfs_mark_buffer_dirty(leaf
);
2577 btrfs_free_path(path
);
2579 ret
= update_block_group(trans
, root
, ins
->objectid
, ins
->offset
,
2582 printk(KERN_ERR
"btrfs update block group failed for %llu "
2583 "%llu\n", (unsigned long long)ins
->objectid
,
2584 (unsigned long long)ins
->offset
);
2590 static int alloc_tree_block(struct btrfs_trans_handle
*trans
,
2591 struct btrfs_root
*root
, u64 num_bytes
,
2592 u64 root_objectid
, u64 generation
,
2593 u64 flags
, struct btrfs_disk_key
*key
,
2594 int level
, u64 empty_size
, u64 hint_byte
,
2595 u64 search_end
, struct btrfs_key
*ins
)
2598 ret
= btrfs_reserve_extent(trans
, root
, num_bytes
, empty_size
,
2599 hint_byte
, search_end
, ins
, 0);
2602 if (root_objectid
== BTRFS_EXTENT_TREE_OBJECTID
) {
2603 struct pending_extent_op
*extent_op
;
2605 extent_op
= kmalloc(sizeof(*extent_op
), GFP_NOFS
);
2608 extent_op
->type
= PENDING_EXTENT_INSERT
;
2609 extent_op
->bytenr
= ins
->objectid
;
2610 extent_op
->num_bytes
= ins
->offset
;
2611 extent_op
->level
= level
;
2612 extent_op
->flags
= flags
;
2613 memcpy(&extent_op
->key
, key
, sizeof(*key
));
2615 set_extent_bits(&root
->fs_info
->extent_ins
, ins
->objectid
,
2616 ins
->objectid
+ ins
->offset
- 1,
2617 EXTENT_LOCKED
, GFP_NOFS
);
2618 set_state_private(&root
->fs_info
->extent_ins
,
2619 ins
->objectid
, (unsigned long)extent_op
);
2621 ret
= alloc_reserved_tree_block(trans
, root
, root_objectid
,
2624 finish_current_insert(trans
, root
->fs_info
->extent_root
);
2625 del_pending_extents(trans
, root
->fs_info
->extent_root
);
2631 * helper function to allocate a block for a given tree
2632 * returns the tree buffer or NULL.
2634 struct extent_buffer
*btrfs_alloc_free_block(struct btrfs_trans_handle
*trans
,
2635 struct btrfs_root
*root
,
2636 u32 blocksize
, u64 root_objectid
,
2637 struct btrfs_disk_key
*key
, int level
,
2638 u64 hint
, u64 empty_size
)
2640 struct btrfs_key ins
;
2642 struct extent_buffer
*buf
;
2644 ret
= alloc_tree_block(trans
, root
, blocksize
, root_objectid
,
2645 trans
->transid
, 0, key
, level
,
2646 empty_size
, hint
, (u64
)-1, &ins
);
2649 return ERR_PTR(ret
);
2652 buf
= btrfs_find_create_tree_block(root
, ins
.objectid
, blocksize
);
2654 btrfs_free_extent(trans
, root
, ins
.objectid
, ins
.offset
,
2655 0, root
->root_key
.objectid
, level
, 0);
2657 return ERR_PTR(-ENOMEM
);
2659 btrfs_set_buffer_uptodate(buf
);
2660 trans
->blocks_used
++;
2667 static int noinline
drop_leaf_ref(struct btrfs_trans_handle
*trans
,
2668 struct btrfs_root
*root
,
2669 struct extent_buffer
*leaf
)
2672 u64 leaf_generation
;
2673 struct btrfs_key key
;
2674 struct btrfs_file_extent_item
*fi
;
2679 BUG_ON(!btrfs_is_leaf(leaf
));
2680 nritems
= btrfs_header_nritems(leaf
);
2681 leaf_owner
= btrfs_header_owner(leaf
);
2682 leaf_generation
= btrfs_header_generation(leaf
);
2684 for (i
= 0; i
< nritems
; i
++) {
2687 btrfs_item_key_to_cpu(leaf
, &key
, i
);
2688 if (btrfs_key_type(&key
) != BTRFS_EXTENT_DATA_KEY
)
2690 fi
= btrfs_item_ptr(leaf
, i
, struct btrfs_file_extent_item
);
2691 if (btrfs_file_extent_type(leaf
, fi
) ==
2692 BTRFS_FILE_EXTENT_INLINE
)
2695 * FIXME make sure to insert a trans record that
2696 * repeats the snapshot del on crash
2698 disk_bytenr
= btrfs_file_extent_disk_bytenr(leaf
, fi
);
2699 if (disk_bytenr
== 0)
2701 ret
= btrfs_free_extent(trans
, root
, disk_bytenr
,
2702 btrfs_file_extent_disk_num_bytes(leaf
, fi
),
2703 leaf
->start
, leaf_owner
, leaf_generation
,
2710 static void noinline
reada_walk_down(struct btrfs_root
*root
,
2711 struct extent_buffer
*node
,
2724 nritems
= btrfs_header_nritems(node
);
2725 level
= btrfs_header_level(node
);
2729 for (i
= slot
; i
< nritems
&& skipped
< 32; i
++) {
2730 bytenr
= btrfs_node_blockptr(node
, i
);
2731 if (last
&& ((bytenr
> last
&& bytenr
- last
> 32 * 1024) ||
2732 (last
> bytenr
&& last
- bytenr
> 32 * 1024))) {
2736 blocksize
= btrfs_level_size(root
, level
- 1);
2738 ret
= btrfs_lookup_extent_ref(NULL
, root
, bytenr
,
2746 mutex_unlock(&root
->fs_info
->fs_mutex
);
2747 ret
= readahead_tree_block(root
, bytenr
, blocksize
,
2748 btrfs_node_ptr_generation(node
, i
));
2749 last
= bytenr
+ blocksize
;
2751 mutex_lock(&root
->fs_info
->fs_mutex
);
2758 * helper function for drop_snapshot, this walks down the tree dropping ref
2759 * counts as it goes.
2761 static int noinline
walk_down_tree(struct btrfs_trans_handle
*trans
,
2762 struct btrfs_root
*root
,
2763 struct btrfs_path
*path
, int *level
)
2769 struct extent_buffer
*next
;
2770 struct extent_buffer
*cur
;
2771 struct extent_buffer
*parent
;
2776 WARN_ON(*level
< 0);
2777 WARN_ON(*level
>= BTRFS_MAX_LEVEL
);
2778 ret
= btrfs_lookup_extent_ref(trans
, root
,
2779 path
->nodes
[*level
]->start
,
2780 path
->nodes
[*level
]->len
, &refs
);
2786 * walk down to the last node level and free all the leaves
2788 while(*level
>= 0) {
2789 WARN_ON(*level
< 0);
2790 WARN_ON(*level
>= BTRFS_MAX_LEVEL
);
2791 cur
= path
->nodes
[*level
];
2793 if (btrfs_header_level(cur
) != *level
)
2796 if (path
->slots
[*level
] >=
2797 btrfs_header_nritems(cur
))
2800 ret
= drop_leaf_ref(trans
, root
, cur
);
2804 bytenr
= btrfs_node_blockptr(cur
, path
->slots
[*level
]);
2805 ptr_gen
= btrfs_node_ptr_generation(cur
, path
->slots
[*level
]);
2806 blocksize
= btrfs_level_size(root
, *level
- 1);
2807 ret
= btrfs_lookup_extent_ref(trans
, root
, bytenr
, blocksize
,
2811 parent
= path
->nodes
[*level
];
2812 root_owner
= btrfs_header_owner(parent
);
2813 root_gen
= btrfs_header_generation(parent
);
2814 path
->slots
[*level
]++;
2815 ret
= btrfs_free_extent(trans
, root
, bytenr
, blocksize
,
2816 parent
->start
, root_owner
,
2817 root_gen
, *level
- 1, 1);
2821 next
= btrfs_find_tree_block(root
, bytenr
, blocksize
);
2822 if (!next
|| !btrfs_buffer_uptodate(next
, ptr_gen
)) {
2823 free_extent_buffer(next
);
2824 reada_walk_down(root
, cur
, path
->slots
[*level
]);
2825 mutex_unlock(&root
->fs_info
->fs_mutex
);
2826 next
= read_tree_block(root
, bytenr
, blocksize
,
2828 mutex_lock(&root
->fs_info
->fs_mutex
);
2830 WARN_ON(*level
<= 0);
2831 if (path
->nodes
[*level
-1])
2832 free_extent_buffer(path
->nodes
[*level
-1]);
2833 path
->nodes
[*level
-1] = next
;
2834 *level
= btrfs_header_level(next
);
2835 path
->slots
[*level
] = 0;
2838 WARN_ON(*level
< 0);
2839 WARN_ON(*level
>= BTRFS_MAX_LEVEL
);
2841 if (path
->nodes
[*level
] == root
->node
) {
2842 root_owner
= root
->root_key
.objectid
;
2843 parent
= path
->nodes
[*level
];
2845 parent
= path
->nodes
[*level
+ 1];
2846 root_owner
= btrfs_header_owner(parent
);
2849 root_gen
= btrfs_header_generation(parent
);
2850 ret
= btrfs_free_extent(trans
, root
, path
->nodes
[*level
]->start
,
2851 path
->nodes
[*level
]->len
, parent
->start
,
2852 root_owner
, root_gen
, *level
, 1);
2853 free_extent_buffer(path
->nodes
[*level
]);
2854 path
->nodes
[*level
] = NULL
;
2861 * helper for dropping snapshots. This walks back up the tree in the path
2862 * to find the first node higher up where we haven't yet gone through
2865 static int noinline
walk_up_tree(struct btrfs_trans_handle
*trans
,
2866 struct btrfs_root
*root
,
2867 struct btrfs_path
*path
, int *level
)
2871 struct btrfs_root_item
*root_item
= &root
->root_item
;
2876 for(i
= *level
; i
< BTRFS_MAX_LEVEL
- 1 && path
->nodes
[i
]; i
++) {
2877 slot
= path
->slots
[i
];
2878 if (slot
< btrfs_header_nritems(path
->nodes
[i
]) - 1) {
2879 struct extent_buffer
*node
;
2880 struct btrfs_disk_key disk_key
;
2881 node
= path
->nodes
[i
];
2884 WARN_ON(*level
== 0);
2885 btrfs_node_key(node
, &disk_key
, path
->slots
[i
]);
2886 memcpy(&root_item
->drop_progress
,
2887 &disk_key
, sizeof(disk_key
));
2888 root_item
->drop_level
= i
;
2891 struct extent_buffer
*parent
;
2892 if (path
->nodes
[*level
] == root
->node
)
2893 parent
= path
->nodes
[*level
];
2895 parent
= path
->nodes
[*level
+ 1];
2897 root_owner
= btrfs_header_owner(parent
);
2898 root_gen
= btrfs_header_generation(parent
);
2899 ret
= btrfs_free_extent(trans
, root
,
2900 path
->nodes
[*level
]->start
,
2901 path
->nodes
[*level
]->len
,
2902 parent
->start
, root_owner
,
2903 root_gen
, *level
, 1);
2905 free_extent_buffer(path
->nodes
[*level
]);
2906 path
->nodes
[*level
] = NULL
;
2914 * drop the reference count on the tree rooted at 'snap'. This traverses
2915 * the tree freeing any blocks that have a ref count of zero after being
2918 int btrfs_drop_snapshot(struct btrfs_trans_handle
*trans
, struct btrfs_root
2924 struct btrfs_path
*path
;
2927 struct btrfs_root_item
*root_item
= &root
->root_item
;
2929 path
= btrfs_alloc_path();
2932 level
= btrfs_header_level(root
->node
);
2934 if (btrfs_disk_key_objectid(&root_item
->drop_progress
) == 0) {
2935 path
->nodes
[level
] = root
->node
;
2936 extent_buffer_get(root
->node
);
2937 path
->slots
[level
] = 0;
2939 struct btrfs_key key
;
2940 struct btrfs_disk_key found_key
;
2941 struct extent_buffer
*node
;
2943 btrfs_disk_key_to_cpu(&key
, &root_item
->drop_progress
);
2944 level
= root_item
->drop_level
;
2945 path
->lowest_level
= level
;
2946 wret
= btrfs_search_slot(NULL
, root
, &key
, path
, 0, 0);
2951 node
= path
->nodes
[level
];
2952 btrfs_node_key(node
, &found_key
, path
->slots
[level
]);
2953 WARN_ON(memcmp(&found_key
, &root_item
->drop_progress
,
2954 sizeof(found_key
)));
2957 wret
= walk_down_tree(trans
, root
, path
, &level
);
2963 wret
= walk_up_tree(trans
, root
, path
, &level
);
2973 for (i
= 0; i
<= orig_level
; i
++) {
2974 if (path
->nodes
[i
]) {
2975 free_extent_buffer(path
->nodes
[i
]);
2976 path
->nodes
[i
] = NULL
;
2980 btrfs_free_path(path
);
2986 int btrfs_free_block_groups(struct btrfs_fs_info
*info
)
2993 ret
= find_first_extent_bit(&info
->block_group_cache
, 0,
2994 &start
, &end
, (unsigned int)-1);
2997 ret
= get_state_private(&info
->block_group_cache
, start
, &ptr
);
2999 kfree((void *)(unsigned long)ptr
);
3000 clear_extent_bits(&info
->block_group_cache
, start
,
3001 end
, (unsigned int)-1, GFP_NOFS
);
3004 ret
= find_first_extent_bit(&info
->free_space_cache
, 0,
3005 &start
, &end
, EXTENT_DIRTY
);
3008 clear_extent_dirty(&info
->free_space_cache
, start
,
3014 int find_first_block_group(struct btrfs_root
*root
, struct btrfs_path
*path
,
3015 struct btrfs_key
*key
)
3018 struct btrfs_key found_key
;
3019 struct extent_buffer
*leaf
;
3022 ret
= btrfs_search_slot(NULL
, root
, key
, path
, 0, 0);
3026 slot
= path
->slots
[0];
3027 leaf
= path
->nodes
[0];
3028 if (slot
>= btrfs_header_nritems(leaf
)) {
3029 ret
= btrfs_next_leaf(root
, path
);
3036 btrfs_item_key_to_cpu(leaf
, &found_key
, slot
);
3038 if (found_key
.objectid
>= key
->objectid
&&
3039 found_key
.type
== BTRFS_BLOCK_GROUP_ITEM_KEY
)
3048 int btrfs_read_block_groups(struct btrfs_root
*root
)
3050 struct btrfs_path
*path
;
3053 struct btrfs_block_group_cache
*cache
;
3054 struct btrfs_fs_info
*info
= root
->fs_info
;
3055 struct btrfs_space_info
*space_info
;
3056 struct extent_io_tree
*block_group_cache
;
3057 struct btrfs_key key
;
3058 struct btrfs_key found_key
;
3059 struct extent_buffer
*leaf
;
3061 block_group_cache
= &info
->block_group_cache
;
3063 root
= info
->extent_root
;
3066 btrfs_set_key_type(&key
, BTRFS_BLOCK_GROUP_ITEM_KEY
);
3067 path
= btrfs_alloc_path();
3072 ret
= find_first_block_group(root
, path
, &key
);
3080 leaf
= path
->nodes
[0];
3081 btrfs_item_key_to_cpu(leaf
, &found_key
, path
->slots
[0]);
3082 cache
= kzalloc(sizeof(*cache
), GFP_NOFS
);
3088 read_extent_buffer(leaf
, &cache
->item
,
3089 btrfs_item_ptr_offset(leaf
, path
->slots
[0]),
3090 sizeof(cache
->item
));
3091 memcpy(&cache
->key
, &found_key
, sizeof(found_key
));
3094 key
.objectid
= found_key
.objectid
+ found_key
.offset
;
3095 btrfs_release_path(root
, path
);
3096 cache
->flags
= btrfs_block_group_flags(&cache
->item
);
3098 if (cache
->flags
& BTRFS_BLOCK_GROUP_DATA
) {
3099 bit
= BLOCK_GROUP_DATA
;
3100 } else if (cache
->flags
& BTRFS_BLOCK_GROUP_SYSTEM
) {
3101 bit
= BLOCK_GROUP_SYSTEM
;
3102 } else if (cache
->flags
& BTRFS_BLOCK_GROUP_METADATA
) {
3103 bit
= BLOCK_GROUP_METADATA
;
3105 set_avail_alloc_bits(info
, cache
->flags
);
3106 if (btrfs_chunk_readonly(root
, cache
->key
.objectid
))
3109 ret
= update_space_info(info
, cache
->flags
, found_key
.offset
,
3110 btrfs_block_group_used(&cache
->item
),
3113 cache
->space_info
= space_info
;
3115 /* use EXTENT_LOCKED to prevent merging */
3116 set_extent_bits(block_group_cache
, found_key
.objectid
,
3117 found_key
.objectid
+ found_key
.offset
- 1,
3118 bit
| EXTENT_LOCKED
, GFP_NOFS
);
3119 set_state_private(block_group_cache
, found_key
.objectid
,
3120 (unsigned long)cache
);
3124 btrfs_free_path(path
);
3128 int btrfs_make_block_group(struct btrfs_trans_handle
*trans
,
3129 struct btrfs_root
*root
, u64 bytes_used
,
3130 u64 type
, u64 chunk_objectid
, u64 chunk_offset
,
3135 struct btrfs_root
*extent_root
;
3136 struct btrfs_block_group_cache
*cache
;
3137 struct extent_io_tree
*block_group_cache
;
3139 extent_root
= root
->fs_info
->extent_root
;
3140 block_group_cache
= &root
->fs_info
->block_group_cache
;
3142 cache
= kzalloc(sizeof(*cache
), GFP_NOFS
);
3144 cache
->key
.objectid
= chunk_offset
;
3145 cache
->key
.offset
= size
;
3147 btrfs_set_key_type(&cache
->key
, BTRFS_BLOCK_GROUP_ITEM_KEY
);
3148 btrfs_set_block_group_used(&cache
->item
, bytes_used
);
3149 btrfs_set_block_group_chunk_objectid(&cache
->item
, chunk_objectid
);
3150 cache
->flags
= type
;
3151 btrfs_set_block_group_flags(&cache
->item
, type
);
3153 ret
= update_space_info(root
->fs_info
, cache
->flags
, size
, bytes_used
,
3154 &cache
->space_info
);
3157 bit
= block_group_state_bits(type
);
3158 set_extent_bits(block_group_cache
, chunk_offset
,
3159 chunk_offset
+ size
- 1,
3160 bit
| EXTENT_LOCKED
, GFP_NOFS
);
3162 set_state_private(block_group_cache
, chunk_offset
,
3163 (unsigned long)cache
);
3164 ret
= btrfs_insert_item(trans
, extent_root
, &cache
->key
, &cache
->item
,
3165 sizeof(cache
->item
));
3168 finish_current_insert(trans
, extent_root
);
3169 ret
= del_pending_extents(trans
, extent_root
);
3171 set_avail_alloc_bits(extent_root
->fs_info
, type
);
3176 * This is for converter use only.
3178 * In that case, we don't know where are free blocks located.
3179 * Therefore all block group cache entries must be setup properly
3180 * before doing any block allocation.
3182 int btrfs_make_block_groups(struct btrfs_trans_handle
*trans
,
3183 struct btrfs_root
*root
)
3191 u64 total_metadata
= 0;
3195 struct btrfs_root
*extent_root
;
3196 struct btrfs_block_group_cache
*cache
;
3197 struct extent_io_tree
*block_group_cache
;
3199 extent_root
= root
->fs_info
->extent_root
;
3200 block_group_cache
= &root
->fs_info
->block_group_cache
;
3201 chunk_objectid
= BTRFS_FIRST_CHUNK_TREE_OBJECTID
;
3202 total_bytes
= btrfs_super_total_bytes(&root
->fs_info
->super_copy
);
3203 group_align
= 64 * root
->sectorsize
;
3206 while (cur_start
< total_bytes
) {
3207 group_size
= total_bytes
/ 12;
3208 group_size
= min_t(u64
, group_size
, total_bytes
- cur_start
);
3209 if (cur_start
== 0) {
3210 bit
= BLOCK_GROUP_SYSTEM
;
3211 group_type
= BTRFS_BLOCK_GROUP_SYSTEM
;
3213 group_size
&= ~(group_align
- 1);
3214 group_size
= max_t(u64
, group_size
, 8 * 1024 * 1024);
3215 group_size
= min_t(u64
, group_size
, 32 * 1024 * 1024);
3217 group_size
&= ~(group_align
- 1);
3218 if (total_data
>= total_metadata
* 2) {
3219 group_type
= BTRFS_BLOCK_GROUP_METADATA
;
3220 group_size
= min_t(u64
, group_size
,
3221 1ULL * 1024 * 1024 * 1024);
3222 total_metadata
+= group_size
;
3224 group_type
= BTRFS_BLOCK_GROUP_DATA
;
3225 group_size
= min_t(u64
, group_size
,
3226 5ULL * 1024 * 1024 * 1024);
3227 total_data
+= group_size
;
3229 if ((total_bytes
- cur_start
) * 4 < group_size
* 5)
3230 group_size
= total_bytes
- cur_start
;
3233 cache
= kzalloc(sizeof(*cache
), GFP_NOFS
);
3236 cache
->key
.objectid
= cur_start
;
3237 cache
->key
.offset
= group_size
;
3238 btrfs_set_key_type(&cache
->key
, BTRFS_BLOCK_GROUP_ITEM_KEY
);
3240 btrfs_set_block_group_used(&cache
->item
, 0);
3241 btrfs_set_block_group_chunk_objectid(&cache
->item
,
3243 btrfs_set_block_group_flags(&cache
->item
, group_type
);
3245 cache
->flags
= group_type
;
3247 ret
= update_space_info(root
->fs_info
, group_type
, group_size
,
3248 0, &cache
->space_info
);
3250 set_avail_alloc_bits(extent_root
->fs_info
, group_type
);
3252 set_extent_bits(block_group_cache
, cur_start
,
3253 cur_start
+ group_size
- 1,
3254 bit
| EXTENT_LOCKED
, GFP_NOFS
);
3255 set_state_private(block_group_cache
, cur_start
,
3256 (unsigned long)cache
);
3257 cur_start
+= group_size
;
3259 /* then insert all the items */
3261 while(cur_start
< total_bytes
) {
3262 cache
= btrfs_lookup_block_group(root
->fs_info
, cur_start
);
3265 ret
= btrfs_insert_item(trans
, extent_root
, &cache
->key
, &cache
->item
,
3266 sizeof(cache
->item
));
3269 finish_current_insert(trans
, extent_root
);
3270 ret
= del_pending_extents(trans
, extent_root
);
3273 cur_start
= cache
->key
.objectid
+ cache
->key
.offset
;
3278 int btrfs_update_block_group(struct btrfs_trans_handle
*trans
,
3279 struct btrfs_root
*root
,
3280 u64 bytenr
, u64 num_bytes
, int alloc
,
3283 return update_block_group(trans
, root
, bytenr
, num_bytes
,