2 * Copyright (C) 2007 Oracle. All rights reserved.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public
6 * License v2 as published by the Free Software Foundation.
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
13 * You should have received a copy of the GNU General Public
14 * License along with this program; if not, write to the
15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16 * Boston, MA 021110-1307, USA.
23 #include "kerncompat.h"
24 #include "radix-tree.h"
27 #include "print-tree.h"
28 #include "transaction.h"
31 #include "free-space-cache.h"
34 #define PENDING_EXTENT_INSERT 0
35 #define PENDING_EXTENT_DELETE 1
36 #define PENDING_BACKREF_UPDATE 2
38 struct pending_extent_op
{
43 struct btrfs_disk_key key
;
47 static int alloc_reserved_tree_block(struct btrfs_trans_handle
*trans
,
48 struct btrfs_root
*root
,
49 u64 root_objectid
, u64 generation
,
50 u64 flags
, struct btrfs_disk_key
*key
,
51 int level
, struct btrfs_key
*ins
);
52 static int __free_extent(struct btrfs_trans_handle
*trans
,
53 struct btrfs_root
*root
,
54 u64 bytenr
, u64 num_bytes
, u64 parent
,
55 u64 root_objectid
, u64 owner_objectid
,
56 u64 owner_offset
, int refs_to_drop
);
57 static int finish_current_insert(struct btrfs_trans_handle
*trans
, struct
58 btrfs_root
*extent_root
);
59 static int del_pending_extents(struct btrfs_trans_handle
*trans
, struct
60 btrfs_root
*extent_root
);
61 static struct btrfs_block_group_cache
*
62 btrfs_find_block_group(struct btrfs_root
*root
, struct btrfs_block_group_cache
63 *hint
, u64 search_start
, int data
, int owner
);
65 static int remove_sb_from_cache(struct btrfs_root
*root
,
66 struct btrfs_block_group_cache
*cache
)
72 struct btrfs_fs_info
*fs_info
= root
->fs_info
;
73 struct extent_io_tree
*free_space_cache
;
75 free_space_cache
= &fs_info
->free_space_cache
;
76 for (i
= 0; i
< BTRFS_SUPER_MIRROR_MAX
; i
++) {
77 bytenr
= btrfs_sb_offset(i
);
78 ret
= btrfs_rmap_block(fs_info
, cache
->key
.objectid
, bytenr
, 0,
79 &logical
, &nr
, &stripe_len
);
82 clear_extent_dirty(free_space_cache
, logical
[nr
],
83 logical
[nr
] + stripe_len
- 1);
90 static int cache_block_group(struct btrfs_root
*root
,
91 struct btrfs_block_group_cache
*block_group
)
93 struct btrfs_path
*path
;
96 struct extent_buffer
*leaf
;
97 struct extent_io_tree
*free_space_cache
;
105 root
= root
->fs_info
->extent_root
;
106 free_space_cache
= &root
->fs_info
->free_space_cache
;
108 if (block_group
->cached
)
111 path
= btrfs_alloc_path();
116 last
= max_t(u64
, block_group
->key
.objectid
, BTRFS_SUPER_INFO_OFFSET
);
121 ret
= btrfs_search_slot(NULL
, root
, &key
, path
, 0, 0);
126 leaf
= path
->nodes
[0];
127 slot
= path
->slots
[0];
128 if (slot
>= btrfs_header_nritems(leaf
)) {
129 ret
= btrfs_next_leaf(root
, path
);
138 btrfs_item_key_to_cpu(leaf
, &key
, slot
);
139 if (key
.objectid
< block_group
->key
.objectid
) {
142 if (key
.objectid
>= block_group
->key
.objectid
+
143 block_group
->key
.offset
) {
147 if (key
.type
== BTRFS_EXTENT_ITEM_KEY
||
148 key
.type
== BTRFS_METADATA_ITEM_KEY
) {
149 if (key
.objectid
> last
) {
150 hole_size
= key
.objectid
- last
;
151 set_extent_dirty(free_space_cache
, last
,
152 last
+ hole_size
- 1);
154 if (key
.type
== BTRFS_METADATA_ITEM_KEY
)
155 last
= key
.objectid
+ root
->fs_info
->nodesize
;
157 last
= key
.objectid
+ key
.offset
;
163 if (block_group
->key
.objectid
+
164 block_group
->key
.offset
> last
) {
165 hole_size
= block_group
->key
.objectid
+
166 block_group
->key
.offset
- last
;
167 set_extent_dirty(free_space_cache
, last
, last
+ hole_size
- 1);
169 remove_sb_from_cache(root
, block_group
);
170 block_group
->cached
= 1;
172 btrfs_free_path(path
);
176 struct btrfs_block_group_cache
*btrfs_lookup_first_block_group(struct
180 struct extent_io_tree
*block_group_cache
;
181 struct btrfs_block_group_cache
*block_group
= NULL
;
187 bytenr
= max_t(u64
, bytenr
,
188 BTRFS_SUPER_INFO_OFFSET
+ BTRFS_SUPER_INFO_SIZE
);
189 block_group_cache
= &info
->block_group_cache
;
190 ret
= find_first_extent_bit(block_group_cache
,
191 bytenr
, &start
, &end
,
192 BLOCK_GROUP_DATA
| BLOCK_GROUP_METADATA
|
197 ret
= get_state_private(block_group_cache
, start
, &ptr
);
201 block_group
= (struct btrfs_block_group_cache
*)(unsigned long)ptr
;
205 struct btrfs_block_group_cache
*btrfs_lookup_block_group(struct
209 struct extent_io_tree
*block_group_cache
;
210 struct btrfs_block_group_cache
*block_group
= NULL
;
216 block_group_cache
= &info
->block_group_cache
;
217 ret
= find_first_extent_bit(block_group_cache
,
218 bytenr
, &start
, &end
,
219 BLOCK_GROUP_DATA
| BLOCK_GROUP_METADATA
|
224 ret
= get_state_private(block_group_cache
, start
, &ptr
);
228 block_group
= (struct btrfs_block_group_cache
*)(unsigned long)ptr
;
229 if (block_group
->key
.objectid
<= bytenr
&& bytenr
<
230 block_group
->key
.objectid
+ block_group
->key
.offset
)
235 static int block_group_bits(struct btrfs_block_group_cache
*cache
, u64 bits
)
237 return (cache
->flags
& bits
) == bits
;
240 static int noinline
find_search_start(struct btrfs_root
*root
,
241 struct btrfs_block_group_cache
**cache_ret
,
242 u64
*start_ret
, int num
, int data
)
245 struct btrfs_block_group_cache
*cache
= *cache_ret
;
246 u64 last
= *start_ret
;
249 u64 search_start
= *start_ret
;
255 ret
= cache_block_group(root
, cache
);
259 last
= max(search_start
, cache
->key
.objectid
);
260 if (cache
->ro
|| !block_group_bits(cache
, data
))
264 ret
= find_first_extent_bit(&root
->fs_info
->free_space_cache
,
265 last
, &start
, &end
, EXTENT_DIRTY
);
270 start
= max(last
, start
);
272 if (last
- start
< num
) {
275 if (start
+ num
> cache
->key
.objectid
+ cache
->key
.offset
) {
283 cache
= btrfs_lookup_block_group(root
->fs_info
, search_start
);
285 printk("Unable to find block group for %llu\n",
286 (unsigned long long)search_start
);
292 last
= cache
->key
.objectid
+ cache
->key
.offset
;
294 cache
= btrfs_lookup_first_block_group(root
->fs_info
, last
);
307 static int block_group_state_bits(u64 flags
)
310 if (flags
& BTRFS_BLOCK_GROUP_DATA
)
311 bits
|= BLOCK_GROUP_DATA
;
312 if (flags
& BTRFS_BLOCK_GROUP_METADATA
)
313 bits
|= BLOCK_GROUP_METADATA
;
314 if (flags
& BTRFS_BLOCK_GROUP_SYSTEM
)
315 bits
|= BLOCK_GROUP_SYSTEM
;
319 static struct btrfs_block_group_cache
*
320 btrfs_find_block_group(struct btrfs_root
*root
, struct btrfs_block_group_cache
321 *hint
, u64 search_start
, int data
, int owner
)
323 struct btrfs_block_group_cache
*cache
;
324 struct extent_io_tree
*block_group_cache
;
325 struct btrfs_block_group_cache
*found_group
= NULL
;
326 struct btrfs_fs_info
*info
= root
->fs_info
;
339 block_group_cache
= &info
->block_group_cache
;
344 bit
= block_group_state_bits(data
);
347 struct btrfs_block_group_cache
*shint
;
348 shint
= btrfs_lookup_block_group(info
, search_start
);
349 if (shint
&& !shint
->ro
&& block_group_bits(shint
, data
)) {
350 used
= btrfs_block_group_used(&shint
->item
);
351 if (used
+ shint
->pinned
<
352 div_factor(shint
->key
.offset
, factor
)) {
357 if (hint
&& !hint
->ro
&& block_group_bits(hint
, data
)) {
358 used
= btrfs_block_group_used(&hint
->item
);
359 if (used
+ hint
->pinned
<
360 div_factor(hint
->key
.offset
, factor
)) {
363 last
= hint
->key
.objectid
+ hint
->key
.offset
;
367 hint_last
= max(hint
->key
.objectid
, search_start
);
369 hint_last
= search_start
;
375 ret
= find_first_extent_bit(block_group_cache
, last
,
380 ret
= get_state_private(block_group_cache
, start
, &ptr
);
384 cache
= (struct btrfs_block_group_cache
*)(unsigned long)ptr
;
385 last
= cache
->key
.objectid
+ cache
->key
.offset
;
386 used
= btrfs_block_group_used(&cache
->item
);
388 if (!cache
->ro
&& block_group_bits(cache
, data
)) {
390 free_check
= cache
->key
.offset
;
392 free_check
= div_factor(cache
->key
.offset
,
395 if (used
+ cache
->pinned
< free_check
) {
412 * Back reference rules. Back refs have three main goals:
414 * 1) differentiate between all holders of references to an extent so that
415 * when a reference is dropped we can make sure it was a valid reference
416 * before freeing the extent.
418 * 2) Provide enough information to quickly find the holders of an extent
419 * if we notice a given block is corrupted or bad.
421 * 3) Make it easy to migrate blocks for FS shrinking or storage pool
422 * maintenance. This is actually the same as #2, but with a slightly
423 * different use case.
425 * There are two kinds of back refs. The implicit back refs is optimized
426 * for pointers in non-shared tree blocks. For a given pointer in a block,
427 * back refs of this kind provide information about the block's owner tree
428 * and the pointer's key. These information allow us to find the block by
429 * b-tree searching. The full back refs is for pointers in tree blocks not
430 * referenced by their owner trees. The location of tree block is recorded
431 * in the back refs. Actually the full back refs is generic, and can be
432 * used in all cases the implicit back refs is used. The major shortcoming
433 * of the full back refs is its overhead. Every time a tree block gets
434 * COWed, we have to update back refs entry for all pointers in it.
436 * For a newly allocated tree block, we use implicit back refs for
437 * pointers in it. This means most tree related operations only involve
438 * implicit back refs. For a tree block created in old transaction, the
439 * only way to drop a reference to it is COW it. So we can detect the
440 * event that tree block loses its owner tree's reference and do the
441 * back refs conversion.
443 * When a tree block is COW'd through a tree, there are four cases:
445 * The reference count of the block is one and the tree is the block's
446 * owner tree. Nothing to do in this case.
448 * The reference count of the block is one and the tree is not the
449 * block's owner tree. In this case, full back refs is used for pointers
450 * in the block. Remove these full back refs, add implicit back refs for
451 * every pointers in the new block.
453 * The reference count of the block is greater than one and the tree is
454 * the block's owner tree. In this case, implicit back refs is used for
455 * pointers in the block. Add full back refs for every pointers in the
456 * block, increase lower level extents' reference counts. The original
457 * implicit back refs are entailed to the new block.
459 * The reference count of the block is greater than one and the tree is
460 * not the block's owner tree. Add implicit back refs for every pointer in
461 * the new block, increase lower level extents' reference count.
463 * Back Reference Key composing:
465 * The key objectid corresponds to the first byte in the extent,
466 * The key type is used to differentiate between types of back refs.
467 * There are different meanings of the key offset for different types
470 * File extents can be referenced by:
472 * - multiple snapshots, subvolumes, or different generations in one subvol
473 * - different files inside a single subvolume
474 * - different offsets inside a file (bookend extents in file.c)
476 * The extent ref structure for the implicit back refs has fields for:
478 * - Objectid of the subvolume root
479 * - objectid of the file holding the reference
480 * - original offset in the file
481 * - how many bookend extents
483 * The key offset for the implicit back refs is hash of the first
486 * The extent ref structure for the full back refs has field for:
488 * - number of pointers in the tree leaf
490 * The key offset for the implicit back refs is the first byte of
493 * When a file extent is allocated, The implicit back refs is used.
494 * the fields are filled in:
496 * (root_key.objectid, inode objectid, offset in file, 1)
498 * When a file extent is removed file truncation, we find the
499 * corresponding implicit back refs and check the following fields:
501 * (btrfs_header_owner(leaf), inode objectid, offset in file)
503 * Btree extents can be referenced by:
505 * - Different subvolumes
507 * Both the implicit back refs and the full back refs for tree blocks
508 * only consist of key. The key offset for the implicit back refs is
509 * objectid of block's owner tree. The key offset for the full back refs
510 * is the first byte of parent block.
512 * When implicit back refs is used, information about the lowest key and
513 * level of the tree block are required. These information are stored in
514 * tree block info structure.
517 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
518 static int convert_extent_item_v0(struct btrfs_trans_handle
*trans
,
519 struct btrfs_root
*root
,
520 struct btrfs_path
*path
,
521 u64 owner
, u32 extra_size
)
523 struct btrfs_extent_item
*item
;
524 struct btrfs_extent_item_v0
*ei0
;
525 struct btrfs_extent_ref_v0
*ref0
;
526 struct btrfs_tree_block_info
*bi
;
527 struct extent_buffer
*leaf
;
528 struct btrfs_key key
;
529 struct btrfs_key found_key
;
530 u32 new_size
= sizeof(*item
);
534 leaf
= path
->nodes
[0];
535 BUG_ON(btrfs_item_size_nr(leaf
, path
->slots
[0]) != sizeof(*ei0
));
537 btrfs_item_key_to_cpu(leaf
, &key
, path
->slots
[0]);
538 ei0
= btrfs_item_ptr(leaf
, path
->slots
[0],
539 struct btrfs_extent_item_v0
);
540 refs
= btrfs_extent_refs_v0(leaf
, ei0
);
542 if (owner
== (u64
)-1) {
544 if (path
->slots
[0] >= btrfs_header_nritems(leaf
)) {
545 ret
= btrfs_next_leaf(root
, path
);
549 leaf
= path
->nodes
[0];
551 btrfs_item_key_to_cpu(leaf
, &found_key
,
553 BUG_ON(key
.objectid
!= found_key
.objectid
);
554 if (found_key
.type
!= BTRFS_EXTENT_REF_V0_KEY
) {
558 ref0
= btrfs_item_ptr(leaf
, path
->slots
[0],
559 struct btrfs_extent_ref_v0
);
560 owner
= btrfs_ref_objectid_v0(leaf
, ref0
);
564 btrfs_release_path(path
);
566 if (owner
< BTRFS_FIRST_FREE_OBJECTID
)
567 new_size
+= sizeof(*bi
);
569 new_size
-= sizeof(*ei0
);
570 ret
= btrfs_search_slot(trans
, root
, &key
, path
, new_size
, 1);
575 ret
= btrfs_extend_item(root
, path
, new_size
);
578 leaf
= path
->nodes
[0];
579 item
= btrfs_item_ptr(leaf
, path
->slots
[0], struct btrfs_extent_item
);
580 btrfs_set_extent_refs(leaf
, item
, refs
);
581 /* FIXME: get real generation */
582 btrfs_set_extent_generation(leaf
, item
, 0);
583 if (owner
< BTRFS_FIRST_FREE_OBJECTID
) {
584 btrfs_set_extent_flags(leaf
, item
,
585 BTRFS_EXTENT_FLAG_TREE_BLOCK
|
586 BTRFS_BLOCK_FLAG_FULL_BACKREF
);
587 bi
= (struct btrfs_tree_block_info
*)(item
+ 1);
588 /* FIXME: get first key of the block */
589 memset_extent_buffer(leaf
, 0, (unsigned long)bi
, sizeof(*bi
));
590 btrfs_set_tree_block_level(leaf
, bi
, (int)owner
);
592 btrfs_set_extent_flags(leaf
, item
, BTRFS_EXTENT_FLAG_DATA
);
594 btrfs_mark_buffer_dirty(leaf
);
599 u64
hash_extent_data_ref(u64 root_objectid
, u64 owner
, u64 offset
)
601 u32 high_crc
= ~(u32
)0;
602 u32 low_crc
= ~(u32
)0;
605 lenum
= cpu_to_le64(root_objectid
);
606 high_crc
= btrfs_crc32c(high_crc
, &lenum
, sizeof(lenum
));
607 lenum
= cpu_to_le64(owner
);
608 low_crc
= btrfs_crc32c(low_crc
, &lenum
, sizeof(lenum
));
609 lenum
= cpu_to_le64(offset
);
610 low_crc
= btrfs_crc32c(low_crc
, &lenum
, sizeof(lenum
));
612 return ((u64
)high_crc
<< 31) ^ (u64
)low_crc
;
615 static u64
hash_extent_data_ref_item(struct extent_buffer
*leaf
,
616 struct btrfs_extent_data_ref
*ref
)
618 return hash_extent_data_ref(btrfs_extent_data_ref_root(leaf
, ref
),
619 btrfs_extent_data_ref_objectid(leaf
, ref
),
620 btrfs_extent_data_ref_offset(leaf
, ref
));
623 static int match_extent_data_ref(struct extent_buffer
*leaf
,
624 struct btrfs_extent_data_ref
*ref
,
625 u64 root_objectid
, u64 owner
, u64 offset
)
627 if (btrfs_extent_data_ref_root(leaf
, ref
) != root_objectid
||
628 btrfs_extent_data_ref_objectid(leaf
, ref
) != owner
||
629 btrfs_extent_data_ref_offset(leaf
, ref
) != offset
)
634 static noinline
int lookup_extent_data_ref(struct btrfs_trans_handle
*trans
,
635 struct btrfs_root
*root
,
636 struct btrfs_path
*path
,
637 u64 bytenr
, u64 parent
,
639 u64 owner
, u64 offset
)
641 struct btrfs_key key
;
642 struct btrfs_extent_data_ref
*ref
;
643 struct extent_buffer
*leaf
;
649 key
.objectid
= bytenr
;
651 key
.type
= BTRFS_SHARED_DATA_REF_KEY
;
654 key
.type
= BTRFS_EXTENT_DATA_REF_KEY
;
655 key
.offset
= hash_extent_data_ref(root_objectid
,
660 ret
= btrfs_search_slot(trans
, root
, &key
, path
, -1, 1);
669 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
670 key
.type
= BTRFS_EXTENT_REF_V0_KEY
;
671 btrfs_release_path(path
);
672 ret
= btrfs_search_slot(trans
, root
, &key
, path
, -1, 1);
683 leaf
= path
->nodes
[0];
684 nritems
= btrfs_header_nritems(leaf
);
686 if (path
->slots
[0] >= nritems
) {
687 ret
= btrfs_next_leaf(root
, path
);
693 leaf
= path
->nodes
[0];
694 nritems
= btrfs_header_nritems(leaf
);
698 btrfs_item_key_to_cpu(leaf
, &key
, path
->slots
[0]);
699 if (key
.objectid
!= bytenr
||
700 key
.type
!= BTRFS_EXTENT_DATA_REF_KEY
)
703 ref
= btrfs_item_ptr(leaf
, path
->slots
[0],
704 struct btrfs_extent_data_ref
);
706 if (match_extent_data_ref(leaf
, ref
, root_objectid
,
709 btrfs_release_path(path
);
721 static noinline
int insert_extent_data_ref(struct btrfs_trans_handle
*trans
,
722 struct btrfs_root
*root
,
723 struct btrfs_path
*path
,
724 u64 bytenr
, u64 parent
,
725 u64 root_objectid
, u64 owner
,
726 u64 offset
, int refs_to_add
)
728 struct btrfs_key key
;
729 struct extent_buffer
*leaf
;
734 key
.objectid
= bytenr
;
736 key
.type
= BTRFS_SHARED_DATA_REF_KEY
;
738 size
= sizeof(struct btrfs_shared_data_ref
);
740 key
.type
= BTRFS_EXTENT_DATA_REF_KEY
;
741 key
.offset
= hash_extent_data_ref(root_objectid
,
743 size
= sizeof(struct btrfs_extent_data_ref
);
746 ret
= btrfs_insert_empty_item(trans
, root
, path
, &key
, size
);
747 if (ret
&& ret
!= -EEXIST
)
750 leaf
= path
->nodes
[0];
752 struct btrfs_shared_data_ref
*ref
;
753 ref
= btrfs_item_ptr(leaf
, path
->slots
[0],
754 struct btrfs_shared_data_ref
);
756 btrfs_set_shared_data_ref_count(leaf
, ref
, refs_to_add
);
758 num_refs
= btrfs_shared_data_ref_count(leaf
, ref
);
759 num_refs
+= refs_to_add
;
760 btrfs_set_shared_data_ref_count(leaf
, ref
, num_refs
);
763 struct btrfs_extent_data_ref
*ref
;
764 while (ret
== -EEXIST
) {
765 ref
= btrfs_item_ptr(leaf
, path
->slots
[0],
766 struct btrfs_extent_data_ref
);
767 if (match_extent_data_ref(leaf
, ref
, root_objectid
,
770 btrfs_release_path(path
);
773 ret
= btrfs_insert_empty_item(trans
, root
, path
, &key
,
775 if (ret
&& ret
!= -EEXIST
)
778 leaf
= path
->nodes
[0];
780 ref
= btrfs_item_ptr(leaf
, path
->slots
[0],
781 struct btrfs_extent_data_ref
);
783 btrfs_set_extent_data_ref_root(leaf
, ref
,
785 btrfs_set_extent_data_ref_objectid(leaf
, ref
, owner
);
786 btrfs_set_extent_data_ref_offset(leaf
, ref
, offset
);
787 btrfs_set_extent_data_ref_count(leaf
, ref
, refs_to_add
);
789 num_refs
= btrfs_extent_data_ref_count(leaf
, ref
);
790 num_refs
+= refs_to_add
;
791 btrfs_set_extent_data_ref_count(leaf
, ref
, num_refs
);
794 btrfs_mark_buffer_dirty(leaf
);
797 btrfs_release_path(path
);
801 static noinline
int remove_extent_data_ref(struct btrfs_trans_handle
*trans
,
802 struct btrfs_root
*root
,
803 struct btrfs_path
*path
,
806 struct btrfs_key key
;
807 struct btrfs_extent_data_ref
*ref1
= NULL
;
808 struct btrfs_shared_data_ref
*ref2
= NULL
;
809 struct extent_buffer
*leaf
;
813 leaf
= path
->nodes
[0];
814 btrfs_item_key_to_cpu(leaf
, &key
, path
->slots
[0]);
816 if (key
.type
== BTRFS_EXTENT_DATA_REF_KEY
) {
817 ref1
= btrfs_item_ptr(leaf
, path
->slots
[0],
818 struct btrfs_extent_data_ref
);
819 num_refs
= btrfs_extent_data_ref_count(leaf
, ref1
);
820 } else if (key
.type
== BTRFS_SHARED_DATA_REF_KEY
) {
821 ref2
= btrfs_item_ptr(leaf
, path
->slots
[0],
822 struct btrfs_shared_data_ref
);
823 num_refs
= btrfs_shared_data_ref_count(leaf
, ref2
);
824 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
825 } else if (key
.type
== BTRFS_EXTENT_REF_V0_KEY
) {
826 struct btrfs_extent_ref_v0
*ref0
;
827 ref0
= btrfs_item_ptr(leaf
, path
->slots
[0],
828 struct btrfs_extent_ref_v0
);
829 num_refs
= btrfs_ref_count_v0(leaf
, ref0
);
835 BUG_ON(num_refs
< refs_to_drop
);
836 num_refs
-= refs_to_drop
;
839 ret
= btrfs_del_item(trans
, root
, path
);
841 if (key
.type
== BTRFS_EXTENT_DATA_REF_KEY
)
842 btrfs_set_extent_data_ref_count(leaf
, ref1
, num_refs
);
843 else if (key
.type
== BTRFS_SHARED_DATA_REF_KEY
)
844 btrfs_set_shared_data_ref_count(leaf
, ref2
, num_refs
);
845 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
847 struct btrfs_extent_ref_v0
*ref0
;
848 ref0
= btrfs_item_ptr(leaf
, path
->slots
[0],
849 struct btrfs_extent_ref_v0
);
850 btrfs_set_ref_count_v0(leaf
, ref0
, num_refs
);
853 btrfs_mark_buffer_dirty(leaf
);
858 static noinline u32
extent_data_ref_count(struct btrfs_path
*path
,
859 struct btrfs_extent_inline_ref
*iref
)
861 struct btrfs_key key
;
862 struct extent_buffer
*leaf
;
863 struct btrfs_extent_data_ref
*ref1
;
864 struct btrfs_shared_data_ref
*ref2
;
867 leaf
= path
->nodes
[0];
868 btrfs_item_key_to_cpu(leaf
, &key
, path
->slots
[0]);
870 if (btrfs_extent_inline_ref_type(leaf
, iref
) ==
871 BTRFS_EXTENT_DATA_REF_KEY
) {
872 ref1
= (struct btrfs_extent_data_ref
*)(&iref
->offset
);
873 num_refs
= btrfs_extent_data_ref_count(leaf
, ref1
);
875 ref2
= (struct btrfs_shared_data_ref
*)(iref
+ 1);
876 num_refs
= btrfs_shared_data_ref_count(leaf
, ref2
);
878 } else if (key
.type
== BTRFS_EXTENT_DATA_REF_KEY
) {
879 ref1
= btrfs_item_ptr(leaf
, path
->slots
[0],
880 struct btrfs_extent_data_ref
);
881 num_refs
= btrfs_extent_data_ref_count(leaf
, ref1
);
882 } else if (key
.type
== BTRFS_SHARED_DATA_REF_KEY
) {
883 ref2
= btrfs_item_ptr(leaf
, path
->slots
[0],
884 struct btrfs_shared_data_ref
);
885 num_refs
= btrfs_shared_data_ref_count(leaf
, ref2
);
886 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
887 } else if (key
.type
== BTRFS_EXTENT_REF_V0_KEY
) {
888 struct btrfs_extent_ref_v0
*ref0
;
889 ref0
= btrfs_item_ptr(leaf
, path
->slots
[0],
890 struct btrfs_extent_ref_v0
);
891 num_refs
= btrfs_ref_count_v0(leaf
, ref0
);
899 static noinline
int lookup_tree_block_ref(struct btrfs_trans_handle
*trans
,
900 struct btrfs_root
*root
,
901 struct btrfs_path
*path
,
902 u64 bytenr
, u64 parent
,
905 struct btrfs_key key
;
908 key
.objectid
= bytenr
;
910 key
.type
= BTRFS_SHARED_BLOCK_REF_KEY
;
913 key
.type
= BTRFS_TREE_BLOCK_REF_KEY
;
914 key
.offset
= root_objectid
;
917 ret
= btrfs_search_slot(trans
, root
, &key
, path
, -1, 1);
920 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
921 if (ret
== -ENOENT
&& parent
) {
922 btrfs_release_path(path
);
923 key
.type
= BTRFS_EXTENT_REF_V0_KEY
;
924 ret
= btrfs_search_slot(trans
, root
, &key
, path
, -1, 1);
932 static noinline
int insert_tree_block_ref(struct btrfs_trans_handle
*trans
,
933 struct btrfs_root
*root
,
934 struct btrfs_path
*path
,
935 u64 bytenr
, u64 parent
,
938 struct btrfs_key key
;
941 key
.objectid
= bytenr
;
943 key
.type
= BTRFS_SHARED_BLOCK_REF_KEY
;
946 key
.type
= BTRFS_TREE_BLOCK_REF_KEY
;
947 key
.offset
= root_objectid
;
950 ret
= btrfs_insert_empty_item(trans
, root
, path
, &key
, 0);
952 btrfs_release_path(path
);
956 static inline int extent_ref_type(u64 parent
, u64 owner
)
959 if (owner
< BTRFS_FIRST_FREE_OBJECTID
) {
961 type
= BTRFS_SHARED_BLOCK_REF_KEY
;
963 type
= BTRFS_TREE_BLOCK_REF_KEY
;
966 type
= BTRFS_SHARED_DATA_REF_KEY
;
968 type
= BTRFS_EXTENT_DATA_REF_KEY
;
973 static int lookup_inline_extent_backref(struct btrfs_trans_handle
*trans
,
974 struct btrfs_root
*root
,
975 struct btrfs_path
*path
,
976 struct btrfs_extent_inline_ref
**ref_ret
,
977 u64 bytenr
, u64 num_bytes
,
978 u64 parent
, u64 root_objectid
,
979 u64 owner
, u64 offset
, int insert
)
981 struct btrfs_key key
;
982 struct extent_buffer
*leaf
;
983 struct btrfs_extent_item
*ei
;
984 struct btrfs_extent_inline_ref
*iref
;
994 int skinny_metadata
=
995 btrfs_fs_incompat(root
->fs_info
, SKINNY_METADATA
);
997 key
.objectid
= bytenr
;
998 key
.type
= BTRFS_EXTENT_ITEM_KEY
;
999 key
.offset
= num_bytes
;
1001 want
= extent_ref_type(parent
, owner
);
1003 extra_size
= btrfs_extent_inline_ref_size(want
);
1007 if (owner
< BTRFS_FIRST_FREE_OBJECTID
&& skinny_metadata
) {
1008 skinny_metadata
= 1;
1009 key
.type
= BTRFS_METADATA_ITEM_KEY
;
1011 } else if (skinny_metadata
) {
1012 skinny_metadata
= 0;
1016 ret
= btrfs_search_slot(trans
, root
, &key
, path
, extra_size
, 1);
1023 * We may be a newly converted file system which still has the old fat
1024 * extent entries for metadata, so try and see if we have one of those.
1026 if (ret
> 0 && skinny_metadata
) {
1027 skinny_metadata
= 0;
1028 if (path
->slots
[0]) {
1030 btrfs_item_key_to_cpu(path
->nodes
[0], &key
,
1032 if (key
.objectid
== bytenr
&&
1033 key
.type
== BTRFS_EXTENT_ITEM_KEY
&&
1034 key
.offset
== num_bytes
)
1038 key
.type
= BTRFS_EXTENT_ITEM_KEY
;
1039 key
.offset
= num_bytes
;
1040 btrfs_release_path(path
);
1046 printf("Failed to find [%llu, %u, %llu]\n", key
.objectid
, key
.type
, key
.offset
);
1052 leaf
= path
->nodes
[0];
1053 item_size
= btrfs_item_size_nr(leaf
, path
->slots
[0]);
1054 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
1055 if (item_size
< sizeof(*ei
)) {
1060 ret
= convert_extent_item_v0(trans
, root
, path
, owner
,
1066 leaf
= path
->nodes
[0];
1067 item_size
= btrfs_item_size_nr(leaf
, path
->slots
[0]);
1070 if (item_size
< sizeof(*ei
)) {
1071 printf("Size is %u, needs to be %u, slot %d\n",
1072 (unsigned)item_size
,
1073 (unsigned)sizeof(*ei
), path
->slots
[0]);
1074 btrfs_print_leaf(root
, leaf
);
1077 BUG_ON(item_size
< sizeof(*ei
));
1079 ei
= btrfs_item_ptr(leaf
, path
->slots
[0], struct btrfs_extent_item
);
1080 flags
= btrfs_extent_flags(leaf
, ei
);
1082 ptr
= (unsigned long)(ei
+ 1);
1083 end
= (unsigned long)ei
+ item_size
;
1085 if (flags
& BTRFS_EXTENT_FLAG_TREE_BLOCK
&& !skinny_metadata
) {
1086 ptr
+= sizeof(struct btrfs_tree_block_info
);
1088 } else if (!(flags
& BTRFS_EXTENT_FLAG_TREE_BLOCK
)) {
1089 if (!(flags
& BTRFS_EXTENT_FLAG_DATA
)) {
1100 iref
= (struct btrfs_extent_inline_ref
*)ptr
;
1101 type
= btrfs_extent_inline_ref_type(leaf
, iref
);
1105 ptr
+= btrfs_extent_inline_ref_size(type
);
1109 if (type
== BTRFS_EXTENT_DATA_REF_KEY
) {
1110 struct btrfs_extent_data_ref
*dref
;
1111 dref
= (struct btrfs_extent_data_ref
*)(&iref
->offset
);
1112 if (match_extent_data_ref(leaf
, dref
, root_objectid
,
1117 if (hash_extent_data_ref_item(leaf
, dref
) <
1118 hash_extent_data_ref(root_objectid
, owner
, offset
))
1122 ref_offset
= btrfs_extent_inline_ref_offset(leaf
, iref
);
1124 if (parent
== ref_offset
) {
1128 if (ref_offset
< parent
)
1131 if (root_objectid
== ref_offset
) {
1135 if (ref_offset
< root_objectid
)
1139 ptr
+= btrfs_extent_inline_ref_size(type
);
1141 if (err
== -ENOENT
&& insert
) {
1142 if (item_size
+ extra_size
>=
1143 BTRFS_MAX_EXTENT_ITEM_SIZE(root
)) {
1148 * To add new inline back ref, we have to make sure
1149 * there is no corresponding back ref item.
1150 * For simplicity, we just do not add new inline back
1151 * ref if there is any back ref item.
1153 if (find_next_key(path
, &key
) == 0 && key
.objectid
== bytenr
&&
1154 key
.type
< BTRFS_BLOCK_GROUP_ITEM_KEY
) {
1159 *ref_ret
= (struct btrfs_extent_inline_ref
*)ptr
;
1164 static int setup_inline_extent_backref(struct btrfs_root
*root
,
1165 struct btrfs_path
*path
,
1166 struct btrfs_extent_inline_ref
*iref
,
1167 u64 parent
, u64 root_objectid
,
1168 u64 owner
, u64 offset
, int refs_to_add
)
1170 struct extent_buffer
*leaf
;
1171 struct btrfs_extent_item
*ei
;
1174 unsigned long item_offset
;
1180 leaf
= path
->nodes
[0];
1181 ei
= btrfs_item_ptr(leaf
, path
->slots
[0], struct btrfs_extent_item
);
1182 item_offset
= (unsigned long)iref
- (unsigned long)ei
;
1184 type
= extent_ref_type(parent
, owner
);
1185 size
= btrfs_extent_inline_ref_size(type
);
1187 ret
= btrfs_extend_item(root
, path
, size
);
1190 ei
= btrfs_item_ptr(leaf
, path
->slots
[0], struct btrfs_extent_item
);
1191 refs
= btrfs_extent_refs(leaf
, ei
);
1192 refs
+= refs_to_add
;
1193 btrfs_set_extent_refs(leaf
, ei
, refs
);
1195 ptr
= (unsigned long)ei
+ item_offset
;
1196 end
= (unsigned long)ei
+ btrfs_item_size_nr(leaf
, path
->slots
[0]);
1197 if (ptr
< end
- size
)
1198 memmove_extent_buffer(leaf
, ptr
+ size
, ptr
,
1201 iref
= (struct btrfs_extent_inline_ref
*)ptr
;
1202 btrfs_set_extent_inline_ref_type(leaf
, iref
, type
);
1203 if (type
== BTRFS_EXTENT_DATA_REF_KEY
) {
1204 struct btrfs_extent_data_ref
*dref
;
1205 dref
= (struct btrfs_extent_data_ref
*)(&iref
->offset
);
1206 btrfs_set_extent_data_ref_root(leaf
, dref
, root_objectid
);
1207 btrfs_set_extent_data_ref_objectid(leaf
, dref
, owner
);
1208 btrfs_set_extent_data_ref_offset(leaf
, dref
, offset
);
1209 btrfs_set_extent_data_ref_count(leaf
, dref
, refs_to_add
);
1210 } else if (type
== BTRFS_SHARED_DATA_REF_KEY
) {
1211 struct btrfs_shared_data_ref
*sref
;
1212 sref
= (struct btrfs_shared_data_ref
*)(iref
+ 1);
1213 btrfs_set_shared_data_ref_count(leaf
, sref
, refs_to_add
);
1214 btrfs_set_extent_inline_ref_offset(leaf
, iref
, parent
);
1215 } else if (type
== BTRFS_SHARED_BLOCK_REF_KEY
) {
1216 btrfs_set_extent_inline_ref_offset(leaf
, iref
, parent
);
1218 btrfs_set_extent_inline_ref_offset(leaf
, iref
, root_objectid
);
1220 btrfs_mark_buffer_dirty(leaf
);
1224 static int lookup_extent_backref(struct btrfs_trans_handle
*trans
,
1225 struct btrfs_root
*root
,
1226 struct btrfs_path
*path
,
1227 struct btrfs_extent_inline_ref
**ref_ret
,
1228 u64 bytenr
, u64 num_bytes
, u64 parent
,
1229 u64 root_objectid
, u64 owner
, u64 offset
)
1233 ret
= lookup_inline_extent_backref(trans
, root
, path
, ref_ret
,
1234 bytenr
, num_bytes
, parent
,
1235 root_objectid
, owner
, offset
, 0);
1239 btrfs_release_path(path
);
1242 if (owner
< BTRFS_FIRST_FREE_OBJECTID
) {
1243 ret
= lookup_tree_block_ref(trans
, root
, path
, bytenr
, parent
,
1246 ret
= lookup_extent_data_ref(trans
, root
, path
, bytenr
, parent
,
1247 root_objectid
, owner
, offset
);
1252 static int update_inline_extent_backref(struct btrfs_trans_handle
*trans
,
1253 struct btrfs_root
*root
,
1254 struct btrfs_path
*path
,
1255 struct btrfs_extent_inline_ref
*iref
,
1258 struct extent_buffer
*leaf
;
1259 struct btrfs_extent_item
*ei
;
1260 struct btrfs_extent_data_ref
*dref
= NULL
;
1261 struct btrfs_shared_data_ref
*sref
= NULL
;
1270 leaf
= path
->nodes
[0];
1271 ei
= btrfs_item_ptr(leaf
, path
->slots
[0], struct btrfs_extent_item
);
1272 refs
= btrfs_extent_refs(leaf
, ei
);
1273 WARN_ON(refs_to_mod
< 0 && refs
+ refs_to_mod
<= 0);
1274 refs
+= refs_to_mod
;
1275 btrfs_set_extent_refs(leaf
, ei
, refs
);
1277 type
= btrfs_extent_inline_ref_type(leaf
, iref
);
1279 if (type
== BTRFS_EXTENT_DATA_REF_KEY
) {
1280 dref
= (struct btrfs_extent_data_ref
*)(&iref
->offset
);
1281 refs
= btrfs_extent_data_ref_count(leaf
, dref
);
1282 } else if (type
== BTRFS_SHARED_DATA_REF_KEY
) {
1283 sref
= (struct btrfs_shared_data_ref
*)(iref
+ 1);
1284 refs
= btrfs_shared_data_ref_count(leaf
, sref
);
1287 BUG_ON(refs_to_mod
!= -1);
1290 BUG_ON(refs_to_mod
< 0 && refs
< -refs_to_mod
);
1291 refs
+= refs_to_mod
;
1294 if (type
== BTRFS_EXTENT_DATA_REF_KEY
)
1295 btrfs_set_extent_data_ref_count(leaf
, dref
, refs
);
1297 btrfs_set_shared_data_ref_count(leaf
, sref
, refs
);
1299 size
= btrfs_extent_inline_ref_size(type
);
1300 item_size
= btrfs_item_size_nr(leaf
, path
->slots
[0]);
1301 ptr
= (unsigned long)iref
;
1302 end
= (unsigned long)ei
+ item_size
;
1303 if (ptr
+ size
< end
)
1304 memmove_extent_buffer(leaf
, ptr
, ptr
+ size
,
1307 ret
= btrfs_truncate_item(root
, path
, item_size
, 1);
1310 btrfs_mark_buffer_dirty(leaf
);
1314 static int insert_inline_extent_backref(struct btrfs_trans_handle
*trans
,
1315 struct btrfs_root
*root
,
1316 struct btrfs_path
*path
,
1317 u64 bytenr
, u64 num_bytes
, u64 parent
,
1318 u64 root_objectid
, u64 owner
,
1319 u64 offset
, int refs_to_add
)
1321 struct btrfs_extent_inline_ref
*iref
;
1324 ret
= lookup_inline_extent_backref(trans
, root
, path
, &iref
,
1325 bytenr
, num_bytes
, parent
,
1326 root_objectid
, owner
, offset
, 1);
1328 BUG_ON(owner
< BTRFS_FIRST_FREE_OBJECTID
);
1329 ret
= update_inline_extent_backref(trans
, root
, path
, iref
,
1331 } else if (ret
== -ENOENT
) {
1332 ret
= setup_inline_extent_backref(root
, path
, iref
,
1333 parent
, root_objectid
,
1334 owner
, offset
, refs_to_add
);
1339 static int insert_extent_backref(struct btrfs_trans_handle
*trans
,
1340 struct btrfs_root
*root
,
1341 struct btrfs_path
*path
,
1342 u64 bytenr
, u64 parent
, u64 root_objectid
,
1343 u64 owner
, u64 offset
, int refs_to_add
)
1347 if (owner
>= BTRFS_FIRST_FREE_OBJECTID
) {
1348 ret
= insert_extent_data_ref(trans
, root
, path
, bytenr
,
1349 parent
, root_objectid
,
1350 owner
, offset
, refs_to_add
);
1352 BUG_ON(refs_to_add
!= 1);
1353 ret
= insert_tree_block_ref(trans
, root
, path
, bytenr
,
1354 parent
, root_objectid
);
1359 static int remove_extent_backref(struct btrfs_trans_handle
*trans
,
1360 struct btrfs_root
*root
,
1361 struct btrfs_path
*path
,
1362 struct btrfs_extent_inline_ref
*iref
,
1363 int refs_to_drop
, int is_data
)
1367 BUG_ON(!is_data
&& refs_to_drop
!= 1);
1369 ret
= update_inline_extent_backref(trans
, root
, path
, iref
,
1371 } else if (is_data
) {
1372 ret
= remove_extent_data_ref(trans
, root
, path
, refs_to_drop
);
1374 ret
= btrfs_del_item(trans
, root
, path
);
1379 int btrfs_inc_extent_ref(struct btrfs_trans_handle
*trans
,
1380 struct btrfs_root
*root
,
1381 u64 bytenr
, u64 num_bytes
, u64 parent
,
1382 u64 root_objectid
, u64 owner
, u64 offset
)
1384 struct btrfs_path
*path
;
1385 struct extent_buffer
*leaf
;
1386 struct btrfs_extent_item
*item
;
1391 path
= btrfs_alloc_path();
1397 ret
= insert_inline_extent_backref(trans
, root
->fs_info
->extent_root
,
1398 path
, bytenr
, num_bytes
, parent
,
1399 root_objectid
, owner
, offset
, 1);
1403 if (ret
!= -EAGAIN
) {
1408 leaf
= path
->nodes
[0];
1409 item
= btrfs_item_ptr(leaf
, path
->slots
[0], struct btrfs_extent_item
);
1410 refs
= btrfs_extent_refs(leaf
, item
);
1411 btrfs_set_extent_refs(leaf
, item
, refs
+ 1);
1413 btrfs_mark_buffer_dirty(leaf
);
1414 btrfs_release_path(path
);
1418 /* now insert the actual backref */
1419 ret
= insert_extent_backref(trans
, root
->fs_info
->extent_root
,
1420 path
, bytenr
, parent
, root_objectid
,
1425 btrfs_free_path(path
);
1426 finish_current_insert(trans
, root
->fs_info
->extent_root
);
1427 del_pending_extents(trans
, root
->fs_info
->extent_root
);
1432 int btrfs_extent_post_op(struct btrfs_trans_handle
*trans
,
1433 struct btrfs_root
*root
)
1435 finish_current_insert(trans
, root
->fs_info
->extent_root
);
1436 del_pending_extents(trans
, root
->fs_info
->extent_root
);
1440 int btrfs_lookup_extent_info(struct btrfs_trans_handle
*trans
,
1441 struct btrfs_root
*root
, u64 bytenr
,
1442 u64 offset
, int metadata
, u64
*refs
, u64
*flags
)
1444 struct btrfs_path
*path
;
1446 struct btrfs_key key
;
1447 struct extent_buffer
*l
;
1448 struct btrfs_extent_item
*item
;
1454 !btrfs_fs_incompat(root
->fs_info
, SKINNY_METADATA
)) {
1455 offset
= root
->fs_info
->nodesize
;
1459 path
= btrfs_alloc_path();
1464 key
.objectid
= bytenr
;
1465 key
.offset
= offset
;
1467 key
.type
= BTRFS_METADATA_ITEM_KEY
;
1469 key
.type
= BTRFS_EXTENT_ITEM_KEY
;
1472 ret
= btrfs_search_slot(trans
, root
->fs_info
->extent_root
, &key
, path
,
1478 * Deal with the fact that we may have mixed SKINNY and normal refs. If
1479 * we didn't find what we wanted check and see if we have a normal ref
1480 * right next to us, or re-search if we are on the edge of the leaf just
1483 if (ret
> 0 && metadata
) {
1484 if (path
->slots
[0]) {
1486 btrfs_item_key_to_cpu(path
->nodes
[0], &key
,
1488 if (key
.objectid
== bytenr
&&
1489 key
.type
== BTRFS_EXTENT_ITEM_KEY
&&
1490 key
.offset
== root
->fs_info
->nodesize
)
1495 btrfs_release_path(path
);
1496 key
.type
= BTRFS_EXTENT_ITEM_KEY
;
1497 key
.offset
= root
->fs_info
->nodesize
;
1509 item_size
= btrfs_item_size_nr(l
, path
->slots
[0]);
1510 if (item_size
>= sizeof(*item
)) {
1511 item
= btrfs_item_ptr(l
, path
->slots
[0],
1512 struct btrfs_extent_item
);
1513 num_refs
= btrfs_extent_refs(l
, item
);
1514 extent_flags
= btrfs_extent_flags(l
, item
);
1516 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
1517 struct btrfs_extent_item_v0
*ei0
;
1518 BUG_ON(item_size
!= sizeof(*ei0
));
1519 ei0
= btrfs_item_ptr(l
, path
->slots
[0],
1520 struct btrfs_extent_item_v0
);
1521 num_refs
= btrfs_extent_refs_v0(l
, ei0
);
1522 /* FIXME: this isn't correct for data */
1523 extent_flags
= BTRFS_BLOCK_FLAG_FULL_BACKREF
;
1528 item
= btrfs_item_ptr(l
, path
->slots
[0], struct btrfs_extent_item
);
1532 *flags
= extent_flags
;
1534 btrfs_free_path(path
);
1538 int btrfs_set_block_flags(struct btrfs_trans_handle
*trans
,
1539 struct btrfs_root
*root
,
1540 u64 bytenr
, int level
, u64 flags
)
1542 struct btrfs_path
*path
;
1544 struct btrfs_key key
;
1545 struct extent_buffer
*l
;
1546 struct btrfs_extent_item
*item
;
1548 int skinny_metadata
=
1549 btrfs_fs_incompat(root
->fs_info
, SKINNY_METADATA
);
1551 path
= btrfs_alloc_path();
1556 key
.objectid
= bytenr
;
1557 if (skinny_metadata
) {
1559 key
.type
= BTRFS_METADATA_ITEM_KEY
;
1561 key
.offset
= root
->fs_info
->nodesize
;
1562 key
.type
= BTRFS_EXTENT_ITEM_KEY
;
1566 ret
= btrfs_search_slot(trans
, root
->fs_info
->extent_root
, &key
, path
,
1571 if (ret
> 0 && skinny_metadata
) {
1572 skinny_metadata
= 0;
1573 if (path
->slots
[0]) {
1575 btrfs_item_key_to_cpu(path
->nodes
[0], &key
,
1577 if (key
.objectid
== bytenr
&&
1578 key
.offset
== root
->fs_info
->nodesize
&&
1579 key
.type
== BTRFS_EXTENT_ITEM_KEY
)
1583 btrfs_release_path(path
);
1584 key
.offset
= root
->fs_info
->nodesize
;
1585 key
.type
= BTRFS_EXTENT_ITEM_KEY
;
1591 btrfs_print_leaf(root
, path
->nodes
[0]);
1592 printk("failed to find block number %Lu\n",
1593 (unsigned long long)bytenr
);
1597 item_size
= btrfs_item_size_nr(l
, path
->slots
[0]);
1598 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
1599 if (item_size
< sizeof(*item
)) {
1600 ret
= convert_extent_item_v0(trans
, root
->fs_info
->extent_root
,
1606 item_size
= btrfs_item_size_nr(l
, path
->slots
[0]);
1609 BUG_ON(item_size
< sizeof(*item
));
1610 item
= btrfs_item_ptr(l
, path
->slots
[0], struct btrfs_extent_item
);
1611 flags
|= btrfs_extent_flags(l
, item
);
1612 btrfs_set_extent_flags(l
, item
, flags
);
1614 btrfs_free_path(path
);
1615 finish_current_insert(trans
, root
->fs_info
->extent_root
);
1616 del_pending_extents(trans
, root
->fs_info
->extent_root
);
1620 static int __btrfs_mod_ref(struct btrfs_trans_handle
*trans
,
1621 struct btrfs_root
*root
,
1622 struct extent_buffer
*buf
,
1623 int record_parent
, int inc
)
1630 struct btrfs_key key
;
1631 struct btrfs_file_extent_item
*fi
;
1635 int (*process_func
)(struct btrfs_trans_handle
*trans
,
1636 struct btrfs_root
*root
,
1637 u64
, u64
, u64
, u64
, u64
, u64
);
1639 ref_root
= btrfs_header_owner(buf
);
1640 nritems
= btrfs_header_nritems(buf
);
1641 level
= btrfs_header_level(buf
);
1643 if (!root
->ref_cows
&& level
== 0)
1647 process_func
= btrfs_inc_extent_ref
;
1649 process_func
= btrfs_free_extent
;
1652 parent
= buf
->start
;
1656 for (i
= 0; i
< nritems
; i
++) {
1659 btrfs_item_key_to_cpu(buf
, &key
, i
);
1660 if (key
.type
!= BTRFS_EXTENT_DATA_KEY
)
1662 fi
= btrfs_item_ptr(buf
, i
,
1663 struct btrfs_file_extent_item
);
1664 if (btrfs_file_extent_type(buf
, fi
) ==
1665 BTRFS_FILE_EXTENT_INLINE
)
1667 bytenr
= btrfs_file_extent_disk_bytenr(buf
, fi
);
1671 num_bytes
= btrfs_file_extent_disk_num_bytes(buf
, fi
);
1672 key
.offset
-= btrfs_file_extent_offset(buf
, fi
);
1673 ret
= process_func(trans
, root
, bytenr
, num_bytes
,
1674 parent
, ref_root
, key
.objectid
,
1681 bytenr
= btrfs_node_blockptr(buf
, i
);
1682 num_bytes
= root
->fs_info
->nodesize
;
1683 ret
= process_func(trans
, root
, bytenr
, num_bytes
,
1684 parent
, ref_root
, level
- 1, 0);
1697 int btrfs_inc_ref(struct btrfs_trans_handle
*trans
, struct btrfs_root
*root
,
1698 struct extent_buffer
*buf
, int record_parent
)
1700 return __btrfs_mod_ref(trans
, root
, buf
, record_parent
, 1);
1703 int btrfs_dec_ref(struct btrfs_trans_handle
*trans
, struct btrfs_root
*root
,
1704 struct extent_buffer
*buf
, int record_parent
)
1706 return __btrfs_mod_ref(trans
, root
, buf
, record_parent
, 0);
1709 static int write_one_cache_group(struct btrfs_trans_handle
*trans
,
1710 struct btrfs_root
*root
,
1711 struct btrfs_path
*path
,
1712 struct btrfs_block_group_cache
*cache
)
1716 struct btrfs_root
*extent_root
= root
->fs_info
->extent_root
;
1718 struct extent_buffer
*leaf
;
1720 ret
= btrfs_search_slot(trans
, extent_root
, &cache
->key
, path
, 0, 1);
1725 leaf
= path
->nodes
[0];
1726 bi
= btrfs_item_ptr_offset(leaf
, path
->slots
[0]);
1727 write_extent_buffer(leaf
, &cache
->item
, bi
, sizeof(cache
->item
));
1728 btrfs_mark_buffer_dirty(leaf
);
1729 btrfs_release_path(path
);
1731 finish_current_insert(trans
, extent_root
);
1732 pending_ret
= del_pending_extents(trans
, extent_root
);
1741 int btrfs_write_dirty_block_groups(struct btrfs_trans_handle
*trans
,
1742 struct btrfs_root
*root
)
1744 struct extent_io_tree
*block_group_cache
;
1745 struct btrfs_block_group_cache
*cache
;
1747 struct btrfs_path
*path
;
1753 block_group_cache
= &root
->fs_info
->block_group_cache
;
1754 path
= btrfs_alloc_path();
1759 ret
= find_first_extent_bit(block_group_cache
, last
,
1760 &start
, &end
, BLOCK_GROUP_DIRTY
);
1769 ret
= get_state_private(block_group_cache
, start
, &ptr
);
1772 clear_extent_bits(block_group_cache
, start
, end
,
1775 cache
= (struct btrfs_block_group_cache
*)(unsigned long)ptr
;
1776 ret
= write_one_cache_group(trans
, root
, path
, cache
);
1778 btrfs_free_path(path
);
1782 static struct btrfs_space_info
*__find_space_info(struct btrfs_fs_info
*info
,
1785 struct btrfs_space_info
*found
;
1787 flags
&= BTRFS_BLOCK_GROUP_TYPE_MASK
;
1789 list_for_each_entry(found
, &info
->space_info
, list
) {
1790 if (found
->flags
& flags
)
1797 static int free_space_info(struct btrfs_fs_info
*fs_info
, u64 flags
,
1798 u64 total_bytes
, u64 bytes_used
,
1799 struct btrfs_space_info
**space_info
)
1801 struct btrfs_space_info
*found
;
1803 /* only support free block group which is empty */
1807 found
= __find_space_info(fs_info
, flags
);
1810 if (found
->total_bytes
< total_bytes
) {
1812 "WARNING: bad space info to free %llu only have %llu\n",
1813 total_bytes
, found
->total_bytes
);
1816 found
->total_bytes
-= total_bytes
;
1818 *space_info
= found
;
1822 static int update_space_info(struct btrfs_fs_info
*info
, u64 flags
,
1823 u64 total_bytes
, u64 bytes_used
,
1824 struct btrfs_space_info
**space_info
)
1826 struct btrfs_space_info
*found
;
1828 found
= __find_space_info(info
, flags
);
1830 found
->total_bytes
+= total_bytes
;
1831 found
->bytes_used
+= bytes_used
;
1832 if (found
->total_bytes
< found
->bytes_used
) {
1833 fprintf(stderr
, "warning, bad space info total_bytes "
1835 (unsigned long long)found
->total_bytes
,
1836 (unsigned long long)found
->bytes_used
);
1838 *space_info
= found
;
1841 found
= kmalloc(sizeof(*found
), GFP_NOFS
);
1845 list_add(&found
->list
, &info
->space_info
);
1846 found
->flags
= flags
& BTRFS_BLOCK_GROUP_TYPE_MASK
;
1847 found
->total_bytes
= total_bytes
;
1848 found
->bytes_used
= bytes_used
;
1849 found
->bytes_pinned
= 0;
1851 *space_info
= found
;
1856 static void set_avail_alloc_bits(struct btrfs_fs_info
*fs_info
, u64 flags
)
1858 u64 extra_flags
= flags
& (BTRFS_BLOCK_GROUP_RAID0
|
1859 BTRFS_BLOCK_GROUP_RAID1
|
1860 BTRFS_BLOCK_GROUP_RAID10
|
1861 BTRFS_BLOCK_GROUP_RAID5
|
1862 BTRFS_BLOCK_GROUP_RAID6
|
1863 BTRFS_BLOCK_GROUP_DUP
);
1865 if (flags
& BTRFS_BLOCK_GROUP_DATA
)
1866 fs_info
->avail_data_alloc_bits
|= extra_flags
;
1867 if (flags
& BTRFS_BLOCK_GROUP_METADATA
)
1868 fs_info
->avail_metadata_alloc_bits
|= extra_flags
;
1869 if (flags
& BTRFS_BLOCK_GROUP_SYSTEM
)
1870 fs_info
->avail_system_alloc_bits
|= extra_flags
;
1874 static int do_chunk_alloc(struct btrfs_trans_handle
*trans
,
1875 struct btrfs_fs_info
*fs_info
, u64 alloc_bytes
,
1878 struct btrfs_space_info
*space_info
;
1884 space_info
= __find_space_info(fs_info
, flags
);
1886 ret
= update_space_info(fs_info
, flags
, 0, 0, &space_info
);
1889 BUG_ON(!space_info
);
1891 if (space_info
->full
)
1894 thresh
= div_factor(space_info
->total_bytes
, 7);
1895 if ((space_info
->bytes_used
+ space_info
->bytes_pinned
+ alloc_bytes
) <
1900 * Avoid allocating given chunk type
1902 if (fs_info
->avoid_meta_chunk_alloc
&&
1903 (flags
& BTRFS_BLOCK_GROUP_METADATA
))
1905 if (fs_info
->avoid_sys_chunk_alloc
&&
1906 (flags
& BTRFS_BLOCK_GROUP_SYSTEM
))
1909 ret
= btrfs_alloc_chunk(trans
, fs_info
, &start
, &num_bytes
,
1911 if (ret
== -ENOSPC
) {
1912 space_info
->full
= 1;
1918 ret
= btrfs_make_block_group(trans
, fs_info
, 0, space_info
->flags
,
1919 BTRFS_FIRST_CHUNK_TREE_OBJECTID
, start
, num_bytes
);
1924 static int update_block_group(struct btrfs_trans_handle
*trans
,
1925 struct btrfs_root
*root
,
1926 u64 bytenr
, u64 num_bytes
, int alloc
,
1929 struct btrfs_block_group_cache
*cache
;
1930 struct btrfs_fs_info
*info
= root
->fs_info
;
1931 u64 total
= num_bytes
;
1937 /* block accounting for super block */
1938 old_val
= btrfs_super_bytes_used(info
->super_copy
);
1940 old_val
+= num_bytes
;
1942 old_val
-= num_bytes
;
1943 btrfs_set_super_bytes_used(info
->super_copy
, old_val
);
1945 /* block accounting for root item */
1946 old_val
= btrfs_root_used(&root
->root_item
);
1948 old_val
+= num_bytes
;
1950 old_val
-= num_bytes
;
1951 btrfs_set_root_used(&root
->root_item
, old_val
);
1954 cache
= btrfs_lookup_block_group(info
, bytenr
);
1958 byte_in_group
= bytenr
- cache
->key
.objectid
;
1959 WARN_ON(byte_in_group
> cache
->key
.offset
);
1960 start
= cache
->key
.objectid
;
1961 end
= start
+ cache
->key
.offset
- 1;
1962 set_extent_bits(&info
->block_group_cache
, start
, end
,
1965 old_val
= btrfs_block_group_used(&cache
->item
);
1966 num_bytes
= min(total
, cache
->key
.offset
- byte_in_group
);
1969 old_val
+= num_bytes
;
1970 cache
->space_info
->bytes_used
+= num_bytes
;
1972 old_val
-= num_bytes
;
1973 cache
->space_info
->bytes_used
-= num_bytes
;
1975 set_extent_dirty(&info
->free_space_cache
,
1976 bytenr
, bytenr
+ num_bytes
- 1);
1979 btrfs_set_block_group_used(&cache
->item
, old_val
);
1981 bytenr
+= num_bytes
;
1986 static int update_pinned_extents(struct btrfs_root
*root
,
1987 u64 bytenr
, u64 num
, int pin
)
1990 struct btrfs_block_group_cache
*cache
;
1991 struct btrfs_fs_info
*fs_info
= root
->fs_info
;
1994 set_extent_dirty(&fs_info
->pinned_extents
,
1995 bytenr
, bytenr
+ num
- 1);
1997 clear_extent_dirty(&fs_info
->pinned_extents
,
1998 bytenr
, bytenr
+ num
- 1);
2001 cache
= btrfs_lookup_block_group(fs_info
, bytenr
);
2003 len
= min((u64
)fs_info
->sectorsize
, num
);
2007 len
= min(num
, cache
->key
.offset
-
2008 (bytenr
- cache
->key
.objectid
));
2010 cache
->pinned
+= len
;
2011 cache
->space_info
->bytes_pinned
+= len
;
2012 fs_info
->total_pinned
+= len
;
2014 cache
->pinned
-= len
;
2015 cache
->space_info
->bytes_pinned
-= len
;
2016 fs_info
->total_pinned
-= len
;
2025 int btrfs_finish_extent_commit(struct btrfs_trans_handle
*trans
,
2026 struct btrfs_root
*root
,
2027 struct extent_io_tree
*unpin
)
2032 struct extent_io_tree
*free_space_cache
;
2033 free_space_cache
= &root
->fs_info
->free_space_cache
;
2036 ret
= find_first_extent_bit(unpin
, 0, &start
, &end
,
2040 update_pinned_extents(root
, start
, end
+ 1 - start
, 0);
2041 clear_extent_dirty(unpin
, start
, end
);
2042 set_extent_dirty(free_space_cache
, start
, end
);
2047 static int extent_root_pending_ops(struct btrfs_fs_info
*info
)
2053 ret
= find_first_extent_bit(&info
->extent_ins
, 0, &start
,
2054 &end
, EXTENT_LOCKED
);
2056 ret
= find_first_extent_bit(&info
->pending_del
, 0, &start
, &end
,
2062 static int finish_current_insert(struct btrfs_trans_handle
*trans
,
2063 struct btrfs_root
*extent_root
)
2068 struct btrfs_fs_info
*info
= extent_root
->fs_info
;
2069 struct pending_extent_op
*extent_op
;
2070 struct btrfs_key key
;
2072 int skinny_metadata
=
2073 btrfs_fs_incompat(extent_root
->fs_info
, SKINNY_METADATA
);
2076 ret
= find_first_extent_bit(&info
->extent_ins
, 0, &start
,
2077 &end
, EXTENT_LOCKED
);
2081 ret
= get_state_private(&info
->extent_ins
, start
, &priv
);
2083 extent_op
= (struct pending_extent_op
*)(unsigned long)priv
;
2085 if (extent_op
->type
== PENDING_EXTENT_INSERT
) {
2086 key
.objectid
= start
;
2087 if (skinny_metadata
) {
2088 key
.offset
= extent_op
->level
;
2089 key
.type
= BTRFS_METADATA_ITEM_KEY
;
2091 key
.offset
= extent_op
->num_bytes
;
2092 key
.type
= BTRFS_EXTENT_ITEM_KEY
;
2094 ret
= alloc_reserved_tree_block(trans
, extent_root
,
2095 extent_root
->root_key
.objectid
,
2099 extent_op
->level
, &key
);
2105 clear_extent_bits(&info
->extent_ins
, start
, end
, EXTENT_LOCKED
);
2111 static int pin_down_bytes(struct btrfs_trans_handle
*trans
,
2112 struct btrfs_root
*root
,
2113 u64 bytenr
, u64 num_bytes
, int is_data
)
2116 struct extent_buffer
*buf
;
2121 buf
= btrfs_find_tree_block(root
->fs_info
, bytenr
, num_bytes
);
2125 /* we can reuse a block if it hasn't been written
2126 * and it is from this transaction. We can't
2127 * reuse anything from the tree log root because
2128 * it has tiny sub-transactions.
2130 if (btrfs_buffer_uptodate(buf
, 0)) {
2131 u64 header_owner
= btrfs_header_owner(buf
);
2132 u64 header_transid
= btrfs_header_generation(buf
);
2133 if (header_owner
!= BTRFS_TREE_LOG_OBJECTID
&&
2134 header_transid
== trans
->transid
&&
2135 !btrfs_header_flag(buf
, BTRFS_HEADER_FLAG_WRITTEN
)) {
2136 clean_tree_block(NULL
, root
, buf
);
2137 free_extent_buffer(buf
);
2141 free_extent_buffer(buf
);
2143 update_pinned_extents(root
, bytenr
, num_bytes
, 1);
2149 void btrfs_pin_extent(struct btrfs_fs_info
*fs_info
,
2150 u64 bytenr
, u64 num_bytes
)
2152 update_pinned_extents(fs_info
->extent_root
, bytenr
, num_bytes
, 1);
2155 void btrfs_unpin_extent(struct btrfs_fs_info
*fs_info
,
2156 u64 bytenr
, u64 num_bytes
)
2158 update_pinned_extents(fs_info
->extent_root
, bytenr
, num_bytes
, 0);
2162 * remove an extent from the root, returns 0 on success
2164 static int __free_extent(struct btrfs_trans_handle
*trans
,
2165 struct btrfs_root
*root
,
2166 u64 bytenr
, u64 num_bytes
, u64 parent
,
2167 u64 root_objectid
, u64 owner_objectid
,
2168 u64 owner_offset
, int refs_to_drop
)
2171 struct btrfs_key key
;
2172 struct btrfs_path
*path
;
2173 struct btrfs_root
*extent_root
= root
->fs_info
->extent_root
;
2174 struct extent_buffer
*leaf
;
2175 struct btrfs_extent_item
*ei
;
2176 struct btrfs_extent_inline_ref
*iref
;
2179 int extent_slot
= 0;
2180 int found_extent
= 0;
2184 int skinny_metadata
=
2185 btrfs_fs_incompat(extent_root
->fs_info
, SKINNY_METADATA
);
2187 if (root
->fs_info
->free_extent_hook
) {
2188 root
->fs_info
->free_extent_hook(trans
, root
, bytenr
, num_bytes
,
2189 parent
, root_objectid
, owner_objectid
,
2190 owner_offset
, refs_to_drop
);
2193 path
= btrfs_alloc_path();
2199 is_data
= owner_objectid
>= BTRFS_FIRST_FREE_OBJECTID
;
2201 skinny_metadata
= 0;
2202 BUG_ON(!is_data
&& refs_to_drop
!= 1);
2204 ret
= lookup_extent_backref(trans
, extent_root
, path
, &iref
,
2205 bytenr
, num_bytes
, parent
,
2206 root_objectid
, owner_objectid
,
2209 extent_slot
= path
->slots
[0];
2210 while (extent_slot
>= 0) {
2211 btrfs_item_key_to_cpu(path
->nodes
[0], &key
,
2213 if (key
.objectid
!= bytenr
)
2215 if (key
.type
== BTRFS_EXTENT_ITEM_KEY
&&
2216 key
.offset
== num_bytes
) {
2220 if (key
.type
== BTRFS_METADATA_ITEM_KEY
&&
2221 key
.offset
== owner_objectid
) {
2225 if (path
->slots
[0] - extent_slot
> 5)
2229 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
2230 item_size
= btrfs_item_size_nr(path
->nodes
[0], extent_slot
);
2231 if (found_extent
&& item_size
< sizeof(*ei
))
2234 if (!found_extent
) {
2236 ret
= remove_extent_backref(trans
, extent_root
, path
,
2240 btrfs_release_path(path
);
2242 key
.objectid
= bytenr
;
2244 if (skinny_metadata
) {
2245 key
.type
= BTRFS_METADATA_ITEM_KEY
;
2246 key
.offset
= owner_objectid
;
2248 key
.type
= BTRFS_EXTENT_ITEM_KEY
;
2249 key
.offset
= num_bytes
;
2252 ret
= btrfs_search_slot(trans
, extent_root
,
2254 if (ret
> 0 && skinny_metadata
&& path
->slots
[0]) {
2256 btrfs_item_key_to_cpu(path
->nodes
[0],
2259 if (key
.objectid
== bytenr
&&
2260 key
.type
== BTRFS_EXTENT_ITEM_KEY
&&
2261 key
.offset
== num_bytes
)
2265 if (ret
> 0 && skinny_metadata
) {
2266 skinny_metadata
= 0;
2267 btrfs_release_path(path
);
2268 key
.type
= BTRFS_EXTENT_ITEM_KEY
;
2269 key
.offset
= num_bytes
;
2270 ret
= btrfs_search_slot(trans
, extent_root
,
2275 printk(KERN_ERR
"umm, got %d back from search"
2276 ", was looking for %llu\n", ret
,
2277 (unsigned long long)bytenr
);
2278 btrfs_print_leaf(extent_root
, path
->nodes
[0]);
2281 extent_slot
= path
->slots
[0];
2284 printk(KERN_ERR
"btrfs unable to find ref byte nr %llu "
2285 "parent %llu root %llu owner %llu offset %llu\n",
2286 (unsigned long long)bytenr
,
2287 (unsigned long long)parent
,
2288 (unsigned long long)root_objectid
,
2289 (unsigned long long)owner_objectid
,
2290 (unsigned long long)owner_offset
);
2295 leaf
= path
->nodes
[0];
2296 item_size
= btrfs_item_size_nr(leaf
, extent_slot
);
2297 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
2298 if (item_size
< sizeof(*ei
)) {
2299 BUG_ON(found_extent
|| extent_slot
!= path
->slots
[0]);
2300 ret
= convert_extent_item_v0(trans
, extent_root
, path
,
2304 btrfs_release_path(path
);
2306 key
.objectid
= bytenr
;
2307 key
.type
= BTRFS_EXTENT_ITEM_KEY
;
2308 key
.offset
= num_bytes
;
2310 ret
= btrfs_search_slot(trans
, extent_root
, &key
, path
,
2313 printk(KERN_ERR
"umm, got %d back from search"
2314 ", was looking for %llu\n", ret
,
2315 (unsigned long long)bytenr
);
2316 btrfs_print_leaf(extent_root
, path
->nodes
[0]);
2319 extent_slot
= path
->slots
[0];
2320 leaf
= path
->nodes
[0];
2321 item_size
= btrfs_item_size_nr(leaf
, extent_slot
);
2324 BUG_ON(item_size
< sizeof(*ei
));
2325 ei
= btrfs_item_ptr(leaf
, extent_slot
,
2326 struct btrfs_extent_item
);
2327 if (owner_objectid
< BTRFS_FIRST_FREE_OBJECTID
&&
2328 key
.type
== BTRFS_EXTENT_ITEM_KEY
) {
2329 struct btrfs_tree_block_info
*bi
;
2330 BUG_ON(item_size
< sizeof(*ei
) + sizeof(*bi
));
2331 bi
= (struct btrfs_tree_block_info
*)(ei
+ 1);
2332 WARN_ON(owner_objectid
!= btrfs_tree_block_level(leaf
, bi
));
2335 refs
= btrfs_extent_refs(leaf
, ei
);
2336 BUG_ON(refs
< refs_to_drop
);
2337 refs
-= refs_to_drop
;
2341 * In the case of inline back ref, reference count will
2342 * be updated by remove_extent_backref
2345 BUG_ON(!found_extent
);
2347 btrfs_set_extent_refs(leaf
, ei
, refs
);
2348 btrfs_mark_buffer_dirty(leaf
);
2351 ret
= remove_extent_backref(trans
, extent_root
, path
,
2361 BUG_ON(is_data
&& refs_to_drop
!=
2362 extent_data_ref_count(path
, iref
));
2364 BUG_ON(path
->slots
[0] != extent_slot
);
2366 BUG_ON(path
->slots
[0] != extent_slot
+ 1);
2367 path
->slots
[0] = extent_slot
;
2373 ret
= pin_down_bytes(trans
, root
, bytenr
, num_bytes
,
2380 ret
= btrfs_del_items(trans
, extent_root
, path
, path
->slots
[0],
2383 btrfs_release_path(path
);
2386 ret
= btrfs_del_csums(trans
, root
, bytenr
, num_bytes
);
2390 update_block_group(trans
, root
, bytenr
, num_bytes
, 0, mark_free
);
2393 btrfs_free_path(path
);
2394 finish_current_insert(trans
, extent_root
);
2399 * find all the blocks marked as pending in the radix tree and remove
2400 * them from the extent map
2402 static int del_pending_extents(struct btrfs_trans_handle
*trans
, struct
2403 btrfs_root
*extent_root
)
2410 struct extent_io_tree
*pending_del
;
2411 struct extent_io_tree
*extent_ins
;
2412 struct pending_extent_op
*extent_op
;
2414 extent_ins
= &extent_root
->fs_info
->extent_ins
;
2415 pending_del
= &extent_root
->fs_info
->pending_del
;
2418 ret
= find_first_extent_bit(pending_del
, 0, &start
, &end
,
2423 ret
= get_state_private(pending_del
, start
, &priv
);
2425 extent_op
= (struct pending_extent_op
*)(unsigned long)priv
;
2427 clear_extent_bits(pending_del
, start
, end
, EXTENT_LOCKED
);
2429 if (!test_range_bit(extent_ins
, start
, end
,
2430 EXTENT_LOCKED
, 0)) {
2431 ret
= __free_extent(trans
, extent_root
,
2432 start
, end
+ 1 - start
, 0,
2433 extent_root
->root_key
.objectid
,
2434 extent_op
->level
, 0, 1);
2438 ret
= get_state_private(extent_ins
, start
, &priv
);
2440 extent_op
= (struct pending_extent_op
*)
2441 (unsigned long)priv
;
2443 clear_extent_bits(extent_ins
, start
, end
,
2446 if (extent_op
->type
== PENDING_BACKREF_UPDATE
)
2458 int btrfs_free_tree_block(struct btrfs_trans_handle
*trans
,
2459 struct btrfs_root
*root
,
2460 struct extent_buffer
*buf
,
2461 u64 parent
, int last_ref
)
2463 return btrfs_free_extent(trans
, root
, buf
->start
, buf
->len
, parent
,
2464 root
->root_key
.objectid
,
2465 btrfs_header_level(buf
), 0);
2469 * remove an extent from the root, returns 0 on success
2472 int btrfs_free_extent(struct btrfs_trans_handle
*trans
,
2473 struct btrfs_root
*root
,
2474 u64 bytenr
, u64 num_bytes
, u64 parent
,
2475 u64 root_objectid
, u64 owner
, u64 offset
)
2477 struct btrfs_root
*extent_root
= root
->fs_info
->extent_root
;
2481 WARN_ON(num_bytes
< root
->fs_info
->sectorsize
);
2482 if (root
== extent_root
) {
2483 struct pending_extent_op
*extent_op
;
2485 extent_op
= kmalloc(sizeof(*extent_op
), GFP_NOFS
);
2488 extent_op
->type
= PENDING_EXTENT_DELETE
;
2489 extent_op
->bytenr
= bytenr
;
2490 extent_op
->num_bytes
= num_bytes
;
2491 extent_op
->level
= (int)owner
;
2493 set_extent_bits(&root
->fs_info
->pending_del
,
2494 bytenr
, bytenr
+ num_bytes
- 1,
2496 set_state_private(&root
->fs_info
->pending_del
,
2497 bytenr
, (unsigned long)extent_op
);
2500 ret
= __free_extent(trans
, root
, bytenr
, num_bytes
, parent
,
2501 root_objectid
, owner
, offset
, 1);
2502 pending_ret
= del_pending_extents(trans
, root
->fs_info
->extent_root
);
2503 return ret
? ret
: pending_ret
;
2506 static u64
stripe_align(struct btrfs_root
*root
, u64 val
)
2508 return round_up(val
, (u64
)root
->fs_info
->stripesize
);
2512 * walks the btree of allocated extents and find a hole of a given size.
2513 * The key ins is changed to record the hole:
2514 * ins->objectid == block start
2515 * ins->flags = BTRFS_EXTENT_ITEM_KEY
2516 * ins->offset == number of blocks
2517 * Any available blocks before search_start are skipped.
2519 static int noinline
find_free_extent(struct btrfs_trans_handle
*trans
,
2520 struct btrfs_root
*orig_root
,
2521 u64 num_bytes
, u64 empty_size
,
2522 u64 search_start
, u64 search_end
,
2523 u64 hint_byte
, struct btrfs_key
*ins
,
2524 u64 exclude_start
, u64 exclude_nr
,
2528 u64 orig_search_start
= search_start
;
2529 struct btrfs_root
* root
= orig_root
->fs_info
->extent_root
;
2530 struct btrfs_fs_info
*info
= root
->fs_info
;
2531 u64 total_needed
= num_bytes
;
2532 struct btrfs_block_group_cache
*block_group
;
2536 WARN_ON(num_bytes
< info
->sectorsize
);
2537 ins
->type
= BTRFS_EXTENT_ITEM_KEY
;
2539 search_start
= stripe_align(root
, search_start
);
2542 block_group
= btrfs_lookup_first_block_group(info
, hint_byte
);
2544 hint_byte
= search_start
;
2545 block_group
= btrfs_find_block_group(root
, block_group
,
2546 hint_byte
, data
, 1);
2548 block_group
= btrfs_find_block_group(root
,
2550 search_start
, data
, 1);
2553 total_needed
+= empty_size
;
2556 search_start
= stripe_align(root
, search_start
);
2558 block_group
= btrfs_lookup_first_block_group(info
,
2561 block_group
= btrfs_lookup_first_block_group(info
,
2564 ret
= find_search_start(root
, &block_group
, &search_start
,
2565 total_needed
, data
);
2569 ins
->objectid
= search_start
;
2570 ins
->offset
= num_bytes
;
2572 if (ins
->objectid
+ num_bytes
>
2573 block_group
->key
.objectid
+ block_group
->key
.offset
) {
2574 search_start
= block_group
->key
.objectid
+
2575 block_group
->key
.offset
;
2579 if (test_range_bit(&info
->extent_ins
, ins
->objectid
,
2580 ins
->objectid
+ num_bytes
-1, EXTENT_LOCKED
, 0)) {
2581 search_start
= ins
->objectid
+ num_bytes
;
2585 if (test_range_bit(&info
->pinned_extents
, ins
->objectid
,
2586 ins
->objectid
+ num_bytes
-1, EXTENT_DIRTY
, 0)) {
2587 search_start
= ins
->objectid
+ num_bytes
;
2591 if (info
->excluded_extents
&&
2592 test_range_bit(info
->excluded_extents
, ins
->objectid
,
2593 ins
->objectid
+ num_bytes
-1, EXTENT_DIRTY
, 0)) {
2594 search_start
= ins
->objectid
+ num_bytes
;
2598 if (exclude_nr
> 0 && (ins
->objectid
+ num_bytes
> exclude_start
&&
2599 ins
->objectid
< exclude_start
+ exclude_nr
)) {
2600 search_start
= exclude_start
+ exclude_nr
;
2604 if (!(data
& BTRFS_BLOCK_GROUP_DATA
)) {
2605 if (check_crossing_stripes(info
, ins
->objectid
, num_bytes
)) {
2606 struct btrfs_block_group_cache
*bg_cache
;
2609 bg_cache
= btrfs_lookup_block_group(info
, ins
->objectid
);
2612 bg_offset
= ins
->objectid
- bg_cache
->key
.objectid
;
2614 search_start
= round_up(
2615 bg_offset
+ num_bytes
, BTRFS_STRIPE_LEN
) +
2616 bg_cache
->key
.objectid
;
2620 block_group
= btrfs_lookup_block_group(info
, ins
->objectid
);
2622 trans
->block_group
= block_group
;
2624 ins
->offset
= num_bytes
;
2628 block_group
= btrfs_lookup_first_block_group(info
, search_start
);
2630 search_start
= orig_search_start
;
2637 total_needed
-= empty_size
;
2643 block_group
= btrfs_find_block_group(root
, block_group
,
2644 search_start
, data
, 0);
2651 int btrfs_reserve_extent(struct btrfs_trans_handle
*trans
,
2652 struct btrfs_root
*root
,
2653 u64 num_bytes
, u64 empty_size
,
2654 u64 hint_byte
, u64 search_end
,
2655 struct btrfs_key
*ins
, int data
)
2658 u64 search_start
= 0;
2660 struct btrfs_fs_info
*info
= root
->fs_info
;
2663 alloc_profile
= info
->avail_data_alloc_bits
&
2664 info
->data_alloc_profile
;
2665 data
= BTRFS_BLOCK_GROUP_DATA
| alloc_profile
;
2666 } else if (info
->system_allocs
== 1 || root
== info
->chunk_root
) {
2667 alloc_profile
= info
->avail_system_alloc_bits
&
2668 info
->system_alloc_profile
;
2669 data
= BTRFS_BLOCK_GROUP_SYSTEM
| alloc_profile
;
2671 alloc_profile
= info
->avail_metadata_alloc_bits
&
2672 info
->metadata_alloc_profile
;
2673 data
= BTRFS_BLOCK_GROUP_METADATA
| alloc_profile
;
2676 if (root
->ref_cows
) {
2677 if (!(data
& BTRFS_BLOCK_GROUP_METADATA
)) {
2678 ret
= do_chunk_alloc(trans
, info
,
2680 BTRFS_BLOCK_GROUP_METADATA
);
2683 ret
= do_chunk_alloc(trans
, info
,
2684 num_bytes
+ SZ_2M
, data
);
2688 WARN_ON(num_bytes
< info
->sectorsize
);
2689 ret
= find_free_extent(trans
, root
, num_bytes
, empty_size
,
2690 search_start
, search_end
, hint_byte
, ins
,
2691 trans
->alloc_exclude_start
,
2692 trans
->alloc_exclude_nr
, data
);
2695 clear_extent_dirty(&info
->free_space_cache
,
2696 ins
->objectid
, ins
->objectid
+ ins
->offset
- 1);
2700 static int alloc_reserved_tree_block(struct btrfs_trans_handle
*trans
,
2701 struct btrfs_root
*root
,
2702 u64 root_objectid
, u64 generation
,
2703 u64 flags
, struct btrfs_disk_key
*key
,
2704 int level
, struct btrfs_key
*ins
)
2707 struct btrfs_fs_info
*fs_info
= root
->fs_info
;
2708 struct btrfs_extent_item
*extent_item
;
2709 struct btrfs_tree_block_info
*block_info
;
2710 struct btrfs_extent_inline_ref
*iref
;
2711 struct btrfs_path
*path
;
2712 struct extent_buffer
*leaf
;
2713 u32 size
= sizeof(*extent_item
) + sizeof(*iref
);
2714 int skinny_metadata
= btrfs_fs_incompat(fs_info
, SKINNY_METADATA
);
2716 if (!skinny_metadata
)
2717 size
+= sizeof(*block_info
);
2719 path
= btrfs_alloc_path();
2723 ret
= btrfs_insert_empty_item(trans
, fs_info
->extent_root
, path
,
2727 leaf
= path
->nodes
[0];
2728 extent_item
= btrfs_item_ptr(leaf
, path
->slots
[0],
2729 struct btrfs_extent_item
);
2730 btrfs_set_extent_refs(leaf
, extent_item
, 1);
2731 btrfs_set_extent_generation(leaf
, extent_item
, generation
);
2732 btrfs_set_extent_flags(leaf
, extent_item
,
2733 flags
| BTRFS_EXTENT_FLAG_TREE_BLOCK
);
2735 if (skinny_metadata
) {
2736 iref
= (struct btrfs_extent_inline_ref
*)(extent_item
+ 1);
2738 block_info
= (struct btrfs_tree_block_info
*)(extent_item
+ 1);
2739 btrfs_set_tree_block_key(leaf
, block_info
, key
);
2740 btrfs_set_tree_block_level(leaf
, block_info
, level
);
2741 iref
= (struct btrfs_extent_inline_ref
*)(block_info
+ 1);
2744 btrfs_set_extent_inline_ref_type(leaf
, iref
, BTRFS_TREE_BLOCK_REF_KEY
);
2745 btrfs_set_extent_inline_ref_offset(leaf
, iref
, root_objectid
);
2747 btrfs_mark_buffer_dirty(leaf
);
2748 btrfs_free_path(path
);
2750 ret
= update_block_group(trans
, root
, ins
->objectid
, fs_info
->nodesize
,
2755 static int alloc_tree_block(struct btrfs_trans_handle
*trans
,
2756 struct btrfs_root
*root
, u64 num_bytes
,
2757 u64 root_objectid
, u64 generation
,
2758 u64 flags
, struct btrfs_disk_key
*key
,
2759 int level
, u64 empty_size
, u64 hint_byte
,
2760 u64 search_end
, struct btrfs_key
*ins
)
2763 ret
= btrfs_reserve_extent(trans
, root
, num_bytes
, empty_size
,
2764 hint_byte
, search_end
, ins
, 0);
2767 if (root_objectid
== BTRFS_EXTENT_TREE_OBJECTID
) {
2768 struct pending_extent_op
*extent_op
;
2770 extent_op
= kmalloc(sizeof(*extent_op
), GFP_NOFS
);
2773 extent_op
->type
= PENDING_EXTENT_INSERT
;
2774 extent_op
->bytenr
= ins
->objectid
;
2775 extent_op
->num_bytes
= ins
->offset
;
2776 extent_op
->level
= level
;
2777 extent_op
->flags
= flags
;
2778 memcpy(&extent_op
->key
, key
, sizeof(*key
));
2780 set_extent_bits(&root
->fs_info
->extent_ins
, ins
->objectid
,
2781 ins
->objectid
+ ins
->offset
- 1,
2783 set_state_private(&root
->fs_info
->extent_ins
,
2784 ins
->objectid
, (unsigned long)extent_op
);
2786 if (btrfs_fs_incompat(root
->fs_info
, SKINNY_METADATA
)) {
2787 ins
->offset
= level
;
2788 ins
->type
= BTRFS_METADATA_ITEM_KEY
;
2790 ret
= alloc_reserved_tree_block(trans
, root
, root_objectid
,
2793 finish_current_insert(trans
, root
->fs_info
->extent_root
);
2794 del_pending_extents(trans
, root
->fs_info
->extent_root
);
2800 * helper function to allocate a block for a given tree
2801 * returns the tree buffer or NULL.
2803 struct extent_buffer
*btrfs_alloc_free_block(struct btrfs_trans_handle
*trans
,
2804 struct btrfs_root
*root
,
2805 u32 blocksize
, u64 root_objectid
,
2806 struct btrfs_disk_key
*key
, int level
,
2807 u64 hint
, u64 empty_size
)
2809 struct btrfs_key ins
;
2811 struct extent_buffer
*buf
;
2813 ret
= alloc_tree_block(trans
, root
, blocksize
, root_objectid
,
2814 trans
->transid
, 0, key
, level
,
2815 empty_size
, hint
, (u64
)-1, &ins
);
2818 return ERR_PTR(ret
);
2821 buf
= btrfs_find_create_tree_block(root
->fs_info
, ins
.objectid
);
2823 btrfs_free_extent(trans
, root
, ins
.objectid
, ins
.offset
,
2824 0, root
->root_key
.objectid
, level
, 0);
2826 return ERR_PTR(-ENOMEM
);
2828 btrfs_set_buffer_uptodate(buf
);
2829 trans
->blocks_used
++;
2836 static int noinline
drop_leaf_ref(struct btrfs_trans_handle
*trans
,
2837 struct btrfs_root
*root
,
2838 struct extent_buffer
*leaf
)
2841 u64 leaf_generation
;
2842 struct btrfs_key key
;
2843 struct btrfs_file_extent_item
*fi
;
2848 BUG_ON(!btrfs_is_leaf(leaf
));
2849 nritems
= btrfs_header_nritems(leaf
);
2850 leaf_owner
= btrfs_header_owner(leaf
);
2851 leaf_generation
= btrfs_header_generation(leaf
);
2853 for (i
= 0; i
< nritems
; i
++) {
2856 btrfs_item_key_to_cpu(leaf
, &key
, i
);
2857 if (btrfs_key_type(&key
) != BTRFS_EXTENT_DATA_KEY
)
2859 fi
= btrfs_item_ptr(leaf
, i
, struct btrfs_file_extent_item
);
2860 if (btrfs_file_extent_type(leaf
, fi
) ==
2861 BTRFS_FILE_EXTENT_INLINE
)
2864 * FIXME make sure to insert a trans record that
2865 * repeats the snapshot del on crash
2867 disk_bytenr
= btrfs_file_extent_disk_bytenr(leaf
, fi
);
2868 if (disk_bytenr
== 0)
2870 ret
= btrfs_free_extent(trans
, root
, disk_bytenr
,
2871 btrfs_file_extent_disk_num_bytes(leaf
, fi
),
2872 leaf
->start
, leaf_owner
, leaf_generation
,
2879 static void noinline
reada_walk_down(struct btrfs_root
*root
,
2880 struct extent_buffer
*node
,
2893 nritems
= btrfs_header_nritems(node
);
2894 level
= btrfs_header_level(node
);
2898 for (i
= slot
; i
< nritems
&& skipped
< 32; i
++) {
2899 bytenr
= btrfs_node_blockptr(node
, i
);
2900 if (last
&& ((bytenr
> last
&& bytenr
- last
> SZ_32K
) ||
2901 (last
> bytenr
&& last
- bytenr
> SZ_32K
))) {
2905 blocksize
= btrfs_level_size(root
, level
- 1);
2907 ret
= btrfs_lookup_extent_ref(NULL
, root
, bytenr
,
2915 mutex_unlock(&root
->fs_info
->fs_mutex
);
2916 ret
= readahead_tree_block(root
, bytenr
, blocksize
,
2917 btrfs_node_ptr_generation(node
, i
));
2918 last
= bytenr
+ blocksize
;
2920 mutex_lock(&root
->fs_info
->fs_mutex
);
2927 * helper function for drop_snapshot, this walks down the tree dropping ref
2928 * counts as it goes.
2930 static int noinline
walk_down_tree(struct btrfs_trans_handle
*trans
,
2931 struct btrfs_root
*root
,
2932 struct btrfs_path
*path
, int *level
)
2938 struct extent_buffer
*next
;
2939 struct extent_buffer
*cur
;
2940 struct extent_buffer
*parent
;
2945 WARN_ON(*level
< 0);
2946 WARN_ON(*level
>= BTRFS_MAX_LEVEL
);
2947 ret
= btrfs_lookup_extent_ref(trans
, root
,
2948 path
->nodes
[*level
]->start
,
2949 path
->nodes
[*level
]->len
, &refs
);
2955 * walk down to the last node level and free all the leaves
2957 while(*level
>= 0) {
2958 WARN_ON(*level
< 0);
2959 WARN_ON(*level
>= BTRFS_MAX_LEVEL
);
2960 cur
= path
->nodes
[*level
];
2962 if (btrfs_header_level(cur
) != *level
)
2965 if (path
->slots
[*level
] >=
2966 btrfs_header_nritems(cur
))
2969 ret
= drop_leaf_ref(trans
, root
, cur
);
2973 bytenr
= btrfs_node_blockptr(cur
, path
->slots
[*level
]);
2974 ptr_gen
= btrfs_node_ptr_generation(cur
, path
->slots
[*level
]);
2975 blocksize
= btrfs_level_size(root
, *level
- 1);
2976 ret
= btrfs_lookup_extent_ref(trans
, root
, bytenr
, blocksize
,
2980 parent
= path
->nodes
[*level
];
2981 root_owner
= btrfs_header_owner(parent
);
2982 root_gen
= btrfs_header_generation(parent
);
2983 path
->slots
[*level
]++;
2984 ret
= btrfs_free_extent(trans
, root
, bytenr
, blocksize
,
2985 parent
->start
, root_owner
,
2986 root_gen
, *level
- 1, 1);
2990 next
= btrfs_find_tree_block(root
, bytenr
, blocksize
);
2991 if (!next
|| !btrfs_buffer_uptodate(next
, ptr_gen
)) {
2992 free_extent_buffer(next
);
2993 reada_walk_down(root
, cur
, path
->slots
[*level
]);
2994 mutex_unlock(&root
->fs_info
->fs_mutex
);
2995 next
= read_tree_block(root
, bytenr
, blocksize
,
2997 mutex_lock(&root
->fs_info
->fs_mutex
);
2998 if (!extent_buffer_uptodate(next
)) {
3000 ret
= PTR_ERR(next
);
3006 WARN_ON(*level
<= 0);
3007 if (path
->nodes
[*level
-1])
3008 free_extent_buffer(path
->nodes
[*level
-1]);
3009 path
->nodes
[*level
-1] = next
;
3010 *level
= btrfs_header_level(next
);
3011 path
->slots
[*level
] = 0;
3014 WARN_ON(*level
< 0);
3015 WARN_ON(*level
>= BTRFS_MAX_LEVEL
);
3017 if (path
->nodes
[*level
] == root
->node
) {
3018 root_owner
= root
->root_key
.objectid
;
3019 parent
= path
->nodes
[*level
];
3021 parent
= path
->nodes
[*level
+ 1];
3022 root_owner
= btrfs_header_owner(parent
);
3025 root_gen
= btrfs_header_generation(parent
);
3026 ret
= btrfs_free_extent(trans
, root
, path
->nodes
[*level
]->start
,
3027 path
->nodes
[*level
]->len
, parent
->start
,
3028 root_owner
, root_gen
, *level
, 1);
3029 free_extent_buffer(path
->nodes
[*level
]);
3030 path
->nodes
[*level
] = NULL
;
3037 * helper for dropping snapshots. This walks back up the tree in the path
3038 * to find the first node higher up where we haven't yet gone through
3041 static int noinline
walk_up_tree(struct btrfs_trans_handle
*trans
,
3042 struct btrfs_root
*root
,
3043 struct btrfs_path
*path
, int *level
)
3047 struct btrfs_root_item
*root_item
= &root
->root_item
;
3052 for(i
= *level
; i
< BTRFS_MAX_LEVEL
- 1 && path
->nodes
[i
]; i
++) {
3053 slot
= path
->slots
[i
];
3054 if (slot
< btrfs_header_nritems(path
->nodes
[i
]) - 1) {
3055 struct extent_buffer
*node
;
3056 struct btrfs_disk_key disk_key
;
3057 node
= path
->nodes
[i
];
3060 WARN_ON(*level
== 0);
3061 btrfs_node_key(node
, &disk_key
, path
->slots
[i
]);
3062 memcpy(&root_item
->drop_progress
,
3063 &disk_key
, sizeof(disk_key
));
3064 root_item
->drop_level
= i
;
3067 struct extent_buffer
*parent
;
3068 if (path
->nodes
[*level
] == root
->node
)
3069 parent
= path
->nodes
[*level
];
3071 parent
= path
->nodes
[*level
+ 1];
3073 root_owner
= btrfs_header_owner(parent
);
3074 root_gen
= btrfs_header_generation(parent
);
3075 ret
= btrfs_free_extent(trans
, root
,
3076 path
->nodes
[*level
]->start
,
3077 path
->nodes
[*level
]->len
,
3078 parent
->start
, root_owner
,
3079 root_gen
, *level
, 1);
3081 free_extent_buffer(path
->nodes
[*level
]);
3082 path
->nodes
[*level
] = NULL
;
3091 int btrfs_free_block_groups(struct btrfs_fs_info
*info
)
3093 struct btrfs_space_info
*sinfo
;
3094 struct btrfs_block_group_cache
*cache
;
3101 ret
= find_first_extent_bit(&info
->block_group_cache
, 0,
3102 &start
, &end
, (unsigned int)-1);
3105 ret
= get_state_private(&info
->block_group_cache
, start
, &ptr
);
3107 cache
= u64_to_ptr(ptr
);
3108 if (cache
->free_space_ctl
) {
3109 btrfs_remove_free_space_cache(cache
);
3110 kfree(cache
->free_space_ctl
);
3114 clear_extent_bits(&info
->block_group_cache
, start
,
3115 end
, (unsigned int)-1);
3118 ret
= find_first_extent_bit(&info
->free_space_cache
, 0,
3119 &start
, &end
, EXTENT_DIRTY
);
3122 clear_extent_dirty(&info
->free_space_cache
, start
, end
);
3125 while (!list_empty(&info
->space_info
)) {
3126 sinfo
= list_entry(info
->space_info
.next
,
3127 struct btrfs_space_info
, list
);
3128 list_del_init(&sinfo
->list
);
3134 static int find_first_block_group(struct btrfs_root
*root
,
3135 struct btrfs_path
*path
, struct btrfs_key
*key
)
3138 struct btrfs_key found_key
;
3139 struct extent_buffer
*leaf
;
3142 ret
= btrfs_search_slot(NULL
, root
, key
, path
, 0, 0);
3146 slot
= path
->slots
[0];
3147 leaf
= path
->nodes
[0];
3148 if (slot
>= btrfs_header_nritems(leaf
)) {
3149 ret
= btrfs_next_leaf(root
, path
);
3156 btrfs_item_key_to_cpu(leaf
, &found_key
, slot
);
3158 if (found_key
.objectid
>= key
->objectid
&&
3159 found_key
.type
== BTRFS_BLOCK_GROUP_ITEM_KEY
)
3168 static void account_super_bytes(struct btrfs_fs_info
*fs_info
,
3169 struct btrfs_block_group_cache
*cache
)
3176 if (cache
->key
.objectid
< BTRFS_SUPER_INFO_OFFSET
) {
3177 stripe_len
= BTRFS_SUPER_INFO_OFFSET
- cache
->key
.objectid
;
3178 cache
->bytes_super
+= stripe_len
;
3181 for (i
= 0; i
< BTRFS_SUPER_MIRROR_MAX
; i
++) {
3182 bytenr
= btrfs_sb_offset(i
);
3183 ret
= btrfs_rmap_block(fs_info
,
3184 cache
->key
.objectid
, bytenr
,
3185 0, &logical
, &nr
, &stripe_len
);
3192 if (logical
[nr
] > cache
->key
.objectid
+
3196 if (logical
[nr
] + stripe_len
<= cache
->key
.objectid
)
3199 start
= logical
[nr
];
3200 if (start
< cache
->key
.objectid
) {
3201 start
= cache
->key
.objectid
;
3202 len
= (logical
[nr
] + stripe_len
) - start
;
3204 len
= min_t(u64
, stripe_len
,
3205 cache
->key
.objectid
+
3206 cache
->key
.offset
- start
);
3209 cache
->bytes_super
+= len
;
3216 int btrfs_read_block_groups(struct btrfs_root
*root
)
3218 struct btrfs_path
*path
;
3221 struct btrfs_block_group_cache
*cache
;
3222 struct btrfs_fs_info
*info
= root
->fs_info
;
3223 struct btrfs_space_info
*space_info
;
3224 struct extent_io_tree
*block_group_cache
;
3225 struct btrfs_key key
;
3226 struct btrfs_key found_key
;
3227 struct extent_buffer
*leaf
;
3229 block_group_cache
= &info
->block_group_cache
;
3231 root
= info
->extent_root
;
3234 key
.type
= BTRFS_BLOCK_GROUP_ITEM_KEY
;
3235 path
= btrfs_alloc_path();
3240 ret
= find_first_block_group(root
, path
, &key
);
3248 leaf
= path
->nodes
[0];
3249 btrfs_item_key_to_cpu(leaf
, &found_key
, path
->slots
[0]);
3251 cache
= kzalloc(sizeof(*cache
), GFP_NOFS
);
3257 read_extent_buffer(leaf
, &cache
->item
,
3258 btrfs_item_ptr_offset(leaf
, path
->slots
[0]),
3259 sizeof(cache
->item
));
3260 memcpy(&cache
->key
, &found_key
, sizeof(found_key
));
3263 key
.objectid
= found_key
.objectid
+ found_key
.offset
;
3264 if (found_key
.offset
== 0)
3266 btrfs_release_path(path
);
3269 * Skip 0 sized block group, don't insert them into block
3270 * group cache tree, as its length is 0, it won't get
3271 * freed at close_ctree() time.
3273 if (found_key
.offset
== 0) {
3278 cache
->flags
= btrfs_block_group_flags(&cache
->item
);
3280 if (cache
->flags
& BTRFS_BLOCK_GROUP_DATA
) {
3281 bit
= BLOCK_GROUP_DATA
;
3282 } else if (cache
->flags
& BTRFS_BLOCK_GROUP_SYSTEM
) {
3283 bit
= BLOCK_GROUP_SYSTEM
;
3284 } else if (cache
->flags
& BTRFS_BLOCK_GROUP_METADATA
) {
3285 bit
= BLOCK_GROUP_METADATA
;
3287 set_avail_alloc_bits(info
, cache
->flags
);
3288 if (btrfs_chunk_readonly(info
, cache
->key
.objectid
))
3291 account_super_bytes(info
, cache
);
3293 ret
= update_space_info(info
, cache
->flags
, found_key
.offset
,
3294 btrfs_block_group_used(&cache
->item
),
3297 cache
->space_info
= space_info
;
3299 /* use EXTENT_LOCKED to prevent merging */
3300 set_extent_bits(block_group_cache
, found_key
.objectid
,
3301 found_key
.objectid
+ found_key
.offset
- 1,
3302 bit
| EXTENT_LOCKED
);
3303 set_state_private(block_group_cache
, found_key
.objectid
,
3304 (unsigned long)cache
);
3308 btrfs_free_path(path
);
3312 struct btrfs_block_group_cache
*
3313 btrfs_add_block_group(struct btrfs_fs_info
*fs_info
, u64 bytes_used
, u64 type
,
3314 u64 chunk_objectid
, u64 chunk_offset
, u64 size
)
3318 struct btrfs_block_group_cache
*cache
;
3319 struct extent_io_tree
*block_group_cache
;
3321 block_group_cache
= &fs_info
->block_group_cache
;
3323 cache
= kzalloc(sizeof(*cache
), GFP_NOFS
);
3325 cache
->key
.objectid
= chunk_offset
;
3326 cache
->key
.offset
= size
;
3328 cache
->key
.type
= BTRFS_BLOCK_GROUP_ITEM_KEY
;
3329 btrfs_set_block_group_used(&cache
->item
, bytes_used
);
3330 btrfs_set_block_group_chunk_objectid(&cache
->item
, chunk_objectid
);
3331 cache
->flags
= type
;
3332 btrfs_set_block_group_flags(&cache
->item
, type
);
3334 account_super_bytes(fs_info
, cache
);
3335 ret
= update_space_info(fs_info
, cache
->flags
, size
, bytes_used
,
3336 &cache
->space_info
);
3339 bit
= block_group_state_bits(type
);
3340 ret
= set_extent_bits(block_group_cache
, chunk_offset
,
3341 chunk_offset
+ size
- 1,
3342 bit
| EXTENT_LOCKED
);
3345 ret
= set_state_private(block_group_cache
, chunk_offset
,
3346 (unsigned long)cache
);
3348 set_avail_alloc_bits(fs_info
, type
);
3353 int btrfs_make_block_group(struct btrfs_trans_handle
*trans
,
3354 struct btrfs_fs_info
*fs_info
, u64 bytes_used
,
3355 u64 type
, u64 chunk_objectid
, u64 chunk_offset
,
3359 struct btrfs_root
*extent_root
= fs_info
->extent_root
;
3360 struct btrfs_block_group_cache
*cache
;
3362 cache
= btrfs_add_block_group(fs_info
, bytes_used
, type
,
3363 chunk_objectid
, chunk_offset
, size
);
3364 ret
= btrfs_insert_item(trans
, extent_root
, &cache
->key
, &cache
->item
,
3365 sizeof(cache
->item
));
3368 ret
= finish_current_insert(trans
, extent_root
);
3370 ret
= del_pending_extents(trans
, extent_root
);
3377 * This is for converter use only.
3379 * In that case, we don't know where are free blocks located.
3380 * Therefore all block group cache entries must be setup properly
3381 * before doing any block allocation.
3383 int btrfs_make_block_groups(struct btrfs_trans_handle
*trans
,
3384 struct btrfs_fs_info
*fs_info
)
3392 u64 total_metadata
= 0;
3396 struct btrfs_root
*extent_root
= fs_info
->extent_root
;
3397 struct btrfs_block_group_cache
*cache
;
3398 struct extent_io_tree
*block_group_cache
;
3400 block_group_cache
= &fs_info
->block_group_cache
;
3401 chunk_objectid
= BTRFS_FIRST_CHUNK_TREE_OBJECTID
;
3402 total_bytes
= btrfs_super_total_bytes(fs_info
->super_copy
);
3403 group_align
= 64 * fs_info
->sectorsize
;
3406 while (cur_start
< total_bytes
) {
3407 group_size
= total_bytes
/ 12;
3408 group_size
= min_t(u64
, group_size
, total_bytes
- cur_start
);
3409 if (cur_start
== 0) {
3410 bit
= BLOCK_GROUP_SYSTEM
;
3411 group_type
= BTRFS_BLOCK_GROUP_SYSTEM
;
3413 group_size
&= ~(group_align
- 1);
3414 group_size
= max_t(u64
, group_size
, SZ_8M
);
3415 group_size
= min_t(u64
, group_size
, SZ_32M
);
3417 group_size
&= ~(group_align
- 1);
3418 if (total_data
>= total_metadata
* 2) {
3419 group_type
= BTRFS_BLOCK_GROUP_METADATA
;
3420 group_size
= min_t(u64
, group_size
, SZ_1G
);
3421 total_metadata
+= group_size
;
3423 group_type
= BTRFS_BLOCK_GROUP_DATA
;
3424 group_size
= min_t(u64
, group_size
,
3426 total_data
+= group_size
;
3428 if ((total_bytes
- cur_start
) * 4 < group_size
* 5)
3429 group_size
= total_bytes
- cur_start
;
3432 cache
= kzalloc(sizeof(*cache
), GFP_NOFS
);
3435 cache
->key
.objectid
= cur_start
;
3436 cache
->key
.offset
= group_size
;
3437 cache
->key
.type
= BTRFS_BLOCK_GROUP_ITEM_KEY
;
3439 btrfs_set_block_group_used(&cache
->item
, 0);
3440 btrfs_set_block_group_chunk_objectid(&cache
->item
,
3442 btrfs_set_block_group_flags(&cache
->item
, group_type
);
3444 cache
->flags
= group_type
;
3446 ret
= update_space_info(fs_info
, group_type
, group_size
,
3447 0, &cache
->space_info
);
3449 set_avail_alloc_bits(fs_info
, group_type
);
3451 set_extent_bits(block_group_cache
, cur_start
,
3452 cur_start
+ group_size
- 1,
3453 bit
| EXTENT_LOCKED
);
3454 set_state_private(block_group_cache
, cur_start
,
3455 (unsigned long)cache
);
3456 cur_start
+= group_size
;
3458 /* then insert all the items */
3460 while(cur_start
< total_bytes
) {
3461 cache
= btrfs_lookup_block_group(fs_info
, cur_start
);
3464 ret
= btrfs_insert_item(trans
, extent_root
, &cache
->key
, &cache
->item
,
3465 sizeof(cache
->item
));
3468 finish_current_insert(trans
, extent_root
);
3469 ret
= del_pending_extents(trans
, extent_root
);
3472 cur_start
= cache
->key
.objectid
+ cache
->key
.offset
;
3477 int btrfs_update_block_group(struct btrfs_trans_handle
*trans
,
3478 struct btrfs_root
*root
,
3479 u64 bytenr
, u64 num_bytes
, int alloc
,
3482 return update_block_group(trans
, root
, bytenr
, num_bytes
,
3487 * Just remove a block group item in extent tree
3488 * Caller should ensure the block group is empty and all space is pinned.
3489 * Or new tree block/data may be allocated into it.
3491 static int free_block_group_item(struct btrfs_trans_handle
*trans
,
3492 struct btrfs_fs_info
*fs_info
,
3493 u64 bytenr
, u64 len
)
3495 struct btrfs_path
*path
;
3496 struct btrfs_key key
;
3497 struct btrfs_root
*root
= fs_info
->extent_root
;
3500 key
.objectid
= bytenr
;
3502 key
.type
= BTRFS_BLOCK_GROUP_ITEM_KEY
;
3504 path
= btrfs_alloc_path();
3508 ret
= btrfs_search_slot(trans
, root
, &key
, path
, -1, 1);
3516 ret
= btrfs_del_item(trans
, root
, path
);
3518 btrfs_free_path(path
);
3522 static int free_dev_extent_item(struct btrfs_trans_handle
*trans
,
3523 struct btrfs_fs_info
*fs_info
,
3524 u64 devid
, u64 dev_offset
)
3526 struct btrfs_root
*root
= fs_info
->dev_root
;
3527 struct btrfs_path
*path
;
3528 struct btrfs_key key
;
3531 path
= btrfs_alloc_path();
3535 key
.objectid
= devid
;
3536 key
.type
= BTRFS_DEV_EXTENT_KEY
;
3537 key
.offset
= dev_offset
;
3539 ret
= btrfs_search_slot(trans
, root
, &key
, path
, -1, 1);
3547 ret
= btrfs_del_item(trans
, root
, path
);
3549 btrfs_free_path(path
);
3553 static int free_chunk_dev_extent_items(struct btrfs_trans_handle
*trans
,
3554 struct btrfs_fs_info
*fs_info
,
3557 struct btrfs_chunk
*chunk
= NULL
;
3558 struct btrfs_root
*root
= fs_info
->chunk_root
;
3559 struct btrfs_path
*path
;
3560 struct btrfs_key key
;
3565 path
= btrfs_alloc_path();
3569 key
.objectid
= BTRFS_FIRST_CHUNK_TREE_OBJECTID
;
3570 key
.type
= BTRFS_CHUNK_ITEM_KEY
;
3571 key
.offset
= chunk_offset
;
3573 ret
= btrfs_search_slot(trans
, root
, &key
, path
, 0, 0);
3580 chunk
= btrfs_item_ptr(path
->nodes
[0], path
->slots
[0],
3581 struct btrfs_chunk
);
3582 num_stripes
= btrfs_chunk_num_stripes(path
->nodes
[0], chunk
);
3583 for (i
= 0; i
< num_stripes
; i
++) {
3584 ret
= free_dev_extent_item(trans
, fs_info
,
3585 btrfs_stripe_devid_nr(path
->nodes
[0], chunk
, i
),
3586 btrfs_stripe_offset_nr(path
->nodes
[0], chunk
, i
));
3591 btrfs_free_path(path
);
3595 static int free_system_chunk_item(struct btrfs_super_block
*super
,
3596 struct btrfs_key
*key
)
3598 struct btrfs_disk_key
*disk_key
;
3599 struct btrfs_key cpu_key
;
3600 u32 array_size
= btrfs_super_sys_array_size(super
);
3601 char *ptr
= (char *)super
->sys_chunk_array
;
3605 while (cur
< btrfs_super_sys_array_size(super
)) {
3606 struct btrfs_chunk
*chunk
;
3610 disk_key
= (struct btrfs_disk_key
*)(ptr
+ cur
);
3611 btrfs_disk_key_to_cpu(&cpu_key
, disk_key
);
3612 if (cpu_key
.type
!= BTRFS_CHUNK_ITEM_KEY
) {
3618 chunk
= (struct btrfs_chunk
*)(ptr
+ cur
+ sizeof(*disk_key
));
3619 num_stripes
= btrfs_stack_chunk_num_stripes(chunk
);
3620 chunk_len
= btrfs_chunk_item_size(num_stripes
) +
3623 if (key
->objectid
== cpu_key
.objectid
&&
3624 key
->offset
== cpu_key
.offset
&&
3625 key
->type
== cpu_key
.type
) {
3626 memmove(ptr
+ cur
, ptr
+ cur
+ chunk_len
,
3627 array_size
- cur
- chunk_len
);
3628 array_size
-= chunk_len
;
3629 btrfs_set_super_sys_array_size(super
, array_size
);
3640 static int free_chunk_item(struct btrfs_trans_handle
*trans
,
3641 struct btrfs_fs_info
*fs_info
,
3644 struct btrfs_path
*path
;
3645 struct btrfs_key key
;
3646 struct btrfs_root
*root
= fs_info
->chunk_root
;
3647 struct btrfs_chunk
*chunk
;
3651 key
.objectid
= BTRFS_FIRST_CHUNK_TREE_OBJECTID
;
3652 key
.offset
= bytenr
;
3653 key
.type
= BTRFS_CHUNK_ITEM_KEY
;
3655 path
= btrfs_alloc_path();
3659 ret
= btrfs_search_slot(trans
, root
, &key
, path
, -1, 1);
3666 chunk
= btrfs_item_ptr(path
->nodes
[0], path
->slots
[0],
3667 struct btrfs_chunk
);
3668 chunk_type
= btrfs_chunk_type(path
->nodes
[0], chunk
);
3670 ret
= btrfs_del_item(trans
, root
, path
);
3674 if (chunk_type
& BTRFS_BLOCK_GROUP_SYSTEM
)
3675 ret
= free_system_chunk_item(fs_info
->super_copy
, &key
);
3677 btrfs_free_path(path
);
3681 static u64
get_dev_extent_len(struct map_lookup
*map
)
3685 switch (map
->type
& BTRFS_BLOCK_GROUP_PROFILE_MASK
) {
3686 case 0: /* Single */
3687 case BTRFS_BLOCK_GROUP_DUP
:
3688 case BTRFS_BLOCK_GROUP_RAID1
:
3691 case BTRFS_BLOCK_GROUP_RAID5
:
3692 div
= (map
->num_stripes
- 1);
3694 case BTRFS_BLOCK_GROUP_RAID6
:
3695 div
= (map
->num_stripes
- 2);
3697 case BTRFS_BLOCK_GROUP_RAID10
:
3698 div
= (map
->num_stripes
/ map
->sub_stripes
);
3701 /* normally, read chunk security hook should handled it */
3704 return map
->ce
.size
/ div
;
3707 /* free block group/chunk related caches */
3708 static int free_block_group_cache(struct btrfs_trans_handle
*trans
,
3709 struct btrfs_fs_info
*fs_info
,
3710 u64 bytenr
, u64 len
)
3712 struct btrfs_block_group_cache
*cache
;
3713 struct cache_extent
*ce
;
3714 struct map_lookup
*map
;
3719 /* Free block group cache first */
3720 cache
= btrfs_lookup_block_group(fs_info
, bytenr
);
3723 flags
= cache
->flags
;
3724 if (cache
->free_space_ctl
) {
3725 btrfs_remove_free_space_cache(cache
);
3726 kfree(cache
->free_space_ctl
);
3728 clear_extent_bits(&fs_info
->block_group_cache
, bytenr
, bytenr
+ len
- 1,
3730 ret
= free_space_info(fs_info
, flags
, len
, 0, NULL
);
3735 /* Then free mapping info and dev usage info */
3736 ce
= search_cache_extent(&fs_info
->mapping_tree
.cache_tree
, bytenr
);
3737 if (!ce
|| ce
->start
!= bytenr
) {
3741 map
= container_of(ce
, struct map_lookup
, ce
);
3742 for (i
= 0; i
< map
->num_stripes
; i
++) {
3743 struct btrfs_device
*device
;
3745 device
= map
->stripes
[i
].dev
;
3746 device
->bytes_used
-= get_dev_extent_len(map
);
3747 ret
= btrfs_update_device(trans
, device
);
3751 remove_cache_extent(&fs_info
->mapping_tree
.cache_tree
, ce
);
3757 int btrfs_free_block_group(struct btrfs_trans_handle
*trans
,
3758 struct btrfs_fs_info
*fs_info
, u64 bytenr
, u64 len
)
3760 struct btrfs_root
*extent_root
= fs_info
->extent_root
;
3761 struct btrfs_path
*path
;
3762 struct btrfs_block_group_item
*bgi
;
3763 struct btrfs_key key
;
3766 path
= btrfs_alloc_path();
3770 key
.objectid
= bytenr
;
3771 key
.type
= BTRFS_BLOCK_GROUP_ITEM_KEY
;
3774 /* Double check the block group to ensure it's empty */
3775 ret
= btrfs_search_slot(trans
, extent_root
, &key
, path
, 0, 0);
3783 bgi
= btrfs_item_ptr(path
->nodes
[0], path
->slots
[0],
3784 struct btrfs_block_group_item
);
3785 if (btrfs_disk_block_group_used(path
->nodes
[0], bgi
)) {
3787 "WARNING: block group [%llu,%llu) is not empty\n",
3788 bytenr
, bytenr
+ len
);
3792 btrfs_release_path(path
);
3795 * Now pin all space in the block group, to prevent further transaction
3796 * allocate space from it.
3797 * Every operation needs a transaction must be in the range.
3799 btrfs_pin_extent(fs_info
, bytenr
, len
);
3801 /* delete block group item and chunk item */
3802 ret
= free_block_group_item(trans
, fs_info
, bytenr
, len
);
3805 "failed to free block group item for [%llu,%llu)\n",
3806 bytenr
, bytenr
+ len
);
3807 btrfs_unpin_extent(fs_info
, bytenr
, len
);
3811 ret
= free_chunk_dev_extent_items(trans
, fs_info
, bytenr
);
3814 "failed to dev extents belongs to [%llu,%llu)\n",
3815 bytenr
, bytenr
+ len
);
3816 btrfs_unpin_extent(fs_info
, bytenr
, len
);
3819 ret
= free_chunk_item(trans
, fs_info
, bytenr
);
3822 "failed to free chunk for [%llu,%llu)\n",
3823 bytenr
, bytenr
+ len
);
3824 btrfs_unpin_extent(fs_info
, bytenr
, len
);
3828 /* Now release the block_group_cache */
3829 ret
= free_block_group_cache(trans
, fs_info
, bytenr
, len
);
3830 btrfs_unpin_extent(fs_info
, bytenr
, len
);
3833 btrfs_free_path(path
);
3838 * Fixup block accounting. The initial block accounting created by
3839 * make_block_groups isn't accuracy in this case.
3841 int btrfs_fix_block_accounting(struct btrfs_trans_handle
*trans
,
3842 struct btrfs_root
*root
)
3848 struct btrfs_path path
;
3849 struct btrfs_key key
;
3850 struct extent_buffer
*leaf
;
3851 struct btrfs_block_group_cache
*cache
;
3852 struct btrfs_fs_info
*fs_info
= root
->fs_info
;
3854 root
= root
->fs_info
->extent_root
;
3856 while(extent_root_pending_ops(fs_info
)) {
3857 ret
= finish_current_insert(trans
, root
);
3860 ret
= del_pending_extents(trans
, root
);
3866 cache
= btrfs_lookup_first_block_group(fs_info
, start
);
3869 start
= cache
->key
.objectid
+ cache
->key
.offset
;
3870 btrfs_set_block_group_used(&cache
->item
, 0);
3871 cache
->space_info
->bytes_used
= 0;
3872 set_extent_bits(&root
->fs_info
->block_group_cache
,
3873 cache
->key
.objectid
,
3874 cache
->key
.objectid
+ cache
->key
.offset
-1,
3878 btrfs_init_path(&path
);
3881 key
.type
= BTRFS_EXTENT_ITEM_KEY
;
3882 ret
= btrfs_search_slot(trans
, root
->fs_info
->extent_root
,
3887 leaf
= path
.nodes
[0];
3888 slot
= path
.slots
[0];
3889 if (slot
>= btrfs_header_nritems(leaf
)) {
3890 ret
= btrfs_next_leaf(root
, &path
);
3895 leaf
= path
.nodes
[0];
3896 slot
= path
.slots
[0];
3898 btrfs_item_key_to_cpu(leaf
, &key
, slot
);
3899 if (key
.type
== BTRFS_EXTENT_ITEM_KEY
) {
3900 bytes_used
+= key
.offset
;
3901 ret
= btrfs_update_block_group(trans
, root
,
3902 key
.objectid
, key
.offset
, 1, 0);
3904 } else if (key
.type
== BTRFS_METADATA_ITEM_KEY
) {
3905 bytes_used
+= fs_info
->nodesize
;
3906 ret
= btrfs_update_block_group(trans
, root
,
3907 key
.objectid
, fs_info
->nodesize
, 1, 0);
3913 btrfs_set_super_bytes_used(root
->fs_info
->super_copy
, bytes_used
);
3916 btrfs_release_path(&path
);
3920 static void __get_extent_size(struct btrfs_root
*root
, struct btrfs_path
*path
,
3921 u64
*start
, u64
*len
)
3923 struct btrfs_key key
;
3925 btrfs_item_key_to_cpu(path
->nodes
[0], &key
, path
->slots
[0]);
3926 BUG_ON(!(key
.type
== BTRFS_EXTENT_ITEM_KEY
||
3927 key
.type
== BTRFS_METADATA_ITEM_KEY
));
3928 *start
= key
.objectid
;
3929 if (key
.type
== BTRFS_EXTENT_ITEM_KEY
)
3932 *len
= root
->fs_info
->nodesize
;
3936 * Find first overlap extent for range [bytenr, bytenr + len)
3937 * Return 0 for found and point path to it.
3938 * Return >0 for not found.
3941 int btrfs_search_overlap_extent(struct btrfs_root
*root
,
3942 struct btrfs_path
*path
, u64 bytenr
, u64 len
)
3944 struct btrfs_key key
;
3949 key
.objectid
= bytenr
;
3950 key
.type
= BTRFS_EXTENT_DATA_KEY
;
3951 key
.offset
= (u64
)-1;
3953 ret
= btrfs_search_slot(NULL
, root
, &key
, path
, 0, 0);
3958 ret
= btrfs_previous_extent_item(root
, path
, 0);
3961 /* no previous, check next extent */
3964 __get_extent_size(root
, path
, &cur_start
, &cur_len
);
3966 if (cur_start
+ cur_len
> bytenr
)
3970 ret
= btrfs_next_extent_item(root
, path
, bytenr
+ len
);
3973 /* No next, prev already checked, no overlap */
3976 __get_extent_size(root
, path
, &cur_start
, &cur_len
);
3978 if (cur_start
< bytenr
+ len
)
3983 static int __btrfs_record_file_extent(struct btrfs_trans_handle
*trans
,
3984 struct btrfs_root
*root
, u64 objectid
,
3985 struct btrfs_inode_item
*inode
,
3986 u64 file_pos
, u64 disk_bytenr
,
3990 struct btrfs_fs_info
*info
= root
->fs_info
;
3991 struct btrfs_root
*extent_root
= info
->extent_root
;
3992 struct extent_buffer
*leaf
;
3993 struct btrfs_file_extent_item
*fi
;
3994 struct btrfs_key ins_key
;
3995 struct btrfs_path
*path
;
3996 struct btrfs_extent_item
*ei
;
3998 u64 extent_num_bytes
;
4001 u64 num_bytes
= *ret_num_bytes
;
4004 * All supported file system should not use its 0 extent.
4007 * And hole extent has no size limit, no need to loop.
4009 if (disk_bytenr
== 0) {
4010 ret
= btrfs_insert_file_extent(trans
, root
, objectid
,
4011 file_pos
, disk_bytenr
,
4012 num_bytes
, num_bytes
);
4015 num_bytes
= min_t(u64
, num_bytes
, BTRFS_MAX_EXTENT_SIZE
);
4017 path
= btrfs_alloc_path();
4021 /* First to check extent overlap */
4022 ret
= btrfs_search_overlap_extent(extent_root
, path
, disk_bytenr
,
4031 __get_extent_size(extent_root
, path
, &cur_start
, &cur_len
);
4033 * For convert case, this extent should be a subset of
4036 BUG_ON(disk_bytenr
< cur_start
);
4038 extent_bytenr
= cur_start
;
4039 extent_num_bytes
= cur_len
;
4040 extent_offset
= disk_bytenr
- extent_bytenr
;
4042 /* No overlap, create new extent */
4043 btrfs_release_path(path
);
4044 ins_key
.objectid
= disk_bytenr
;
4045 ins_key
.offset
= num_bytes
;
4046 ins_key
.type
= BTRFS_EXTENT_ITEM_KEY
;
4048 ret
= btrfs_insert_empty_item(trans
, extent_root
, path
,
4049 &ins_key
, sizeof(*ei
));
4051 leaf
= path
->nodes
[0];
4052 ei
= btrfs_item_ptr(leaf
, path
->slots
[0],
4053 struct btrfs_extent_item
);
4055 btrfs_set_extent_refs(leaf
, ei
, 0);
4056 btrfs_set_extent_generation(leaf
, ei
, 0);
4057 btrfs_set_extent_flags(leaf
, ei
,
4058 BTRFS_EXTENT_FLAG_DATA
);
4059 btrfs_mark_buffer_dirty(leaf
);
4061 ret
= btrfs_update_block_group(trans
, root
, disk_bytenr
,
4065 } else if (ret
!= -EEXIST
) {
4068 btrfs_extent_post_op(trans
, extent_root
);
4069 extent_bytenr
= disk_bytenr
;
4070 extent_num_bytes
= num_bytes
;
4073 btrfs_release_path(path
);
4074 ins_key
.objectid
= objectid
;
4075 ins_key
.offset
= file_pos
;
4076 ins_key
.type
= BTRFS_EXTENT_DATA_KEY
;
4077 ret
= btrfs_insert_empty_item(trans
, root
, path
, &ins_key
,
4081 leaf
= path
->nodes
[0];
4082 fi
= btrfs_item_ptr(leaf
, path
->slots
[0],
4083 struct btrfs_file_extent_item
);
4084 btrfs_set_file_extent_generation(leaf
, fi
, trans
->transid
);
4085 btrfs_set_file_extent_type(leaf
, fi
, BTRFS_FILE_EXTENT_REG
);
4086 btrfs_set_file_extent_disk_bytenr(leaf
, fi
, extent_bytenr
);
4087 btrfs_set_file_extent_disk_num_bytes(leaf
, fi
, extent_num_bytes
);
4088 btrfs_set_file_extent_offset(leaf
, fi
, extent_offset
);
4089 btrfs_set_file_extent_num_bytes(leaf
, fi
, num_bytes
);
4090 btrfs_set_file_extent_ram_bytes(leaf
, fi
, extent_num_bytes
);
4091 btrfs_set_file_extent_compression(leaf
, fi
, 0);
4092 btrfs_set_file_extent_encryption(leaf
, fi
, 0);
4093 btrfs_set_file_extent_other_encoding(leaf
, fi
, 0);
4094 btrfs_mark_buffer_dirty(leaf
);
4096 nbytes
= btrfs_stack_inode_nbytes(inode
) + num_bytes
;
4097 btrfs_set_stack_inode_nbytes(inode
, nbytes
);
4098 btrfs_release_path(path
);
4100 ret
= btrfs_inc_extent_ref(trans
, root
, extent_bytenr
, extent_num_bytes
,
4101 0, root
->root_key
.objectid
, objectid
,
4102 file_pos
- extent_offset
);
4106 *ret_num_bytes
= min(extent_num_bytes
- extent_offset
, num_bytes
);
4108 btrfs_free_path(path
);
4113 * Record a file extent. Do all the required works, such as inserting
4114 * file extent item, inserting extent item and backref item into extent
4115 * tree and updating block accounting.
4117 int btrfs_record_file_extent(struct btrfs_trans_handle
*trans
,
4118 struct btrfs_root
*root
, u64 objectid
,
4119 struct btrfs_inode_item
*inode
,
4120 u64 file_pos
, u64 disk_bytenr
,
4123 u64 cur_disk_bytenr
= disk_bytenr
;
4124 u64 cur_file_pos
= file_pos
;
4125 u64 cur_num_bytes
= num_bytes
;
4128 while (num_bytes
> 0) {
4129 ret
= __btrfs_record_file_extent(trans
, root
, objectid
,
4130 inode
, cur_file_pos
,
4135 cur_disk_bytenr
+= cur_num_bytes
;
4136 cur_file_pos
+= cur_num_bytes
;
4137 num_bytes
-= cur_num_bytes
;
4143 static int add_excluded_extent(struct btrfs_root
*root
,
4144 u64 start
, u64 num_bytes
)
4146 u64 end
= start
+ num_bytes
- 1;
4147 set_extent_bits(&root
->fs_info
->pinned_extents
,
4148 start
, end
, EXTENT_UPTODATE
);
4152 void free_excluded_extents(struct btrfs_root
*root
,
4153 struct btrfs_block_group_cache
*cache
)
4157 start
= cache
->key
.objectid
;
4158 end
= start
+ cache
->key
.offset
- 1;
4160 clear_extent_bits(&root
->fs_info
->pinned_extents
,
4161 start
, end
, EXTENT_UPTODATE
);
4164 int exclude_super_stripes(struct btrfs_root
*root
,
4165 struct btrfs_block_group_cache
*cache
)
4172 if (cache
->key
.objectid
< BTRFS_SUPER_INFO_OFFSET
) {
4173 stripe_len
= BTRFS_SUPER_INFO_OFFSET
- cache
->key
.objectid
;
4174 cache
->bytes_super
+= stripe_len
;
4175 ret
= add_excluded_extent(root
, cache
->key
.objectid
,
4181 for (i
= 0; i
< BTRFS_SUPER_MIRROR_MAX
; i
++) {
4182 bytenr
= btrfs_sb_offset(i
);
4183 ret
= btrfs_rmap_block(root
->fs_info
,
4184 cache
->key
.objectid
, bytenr
,
4185 0, &logical
, &nr
, &stripe_len
);
4192 if (logical
[nr
] > cache
->key
.objectid
+
4196 if (logical
[nr
] + stripe_len
<= cache
->key
.objectid
)
4199 start
= logical
[nr
];
4200 if (start
< cache
->key
.objectid
) {
4201 start
= cache
->key
.objectid
;
4202 len
= (logical
[nr
] + stripe_len
) - start
;
4204 len
= min_t(u64
, stripe_len
,
4205 cache
->key
.objectid
+
4206 cache
->key
.offset
- start
);
4209 cache
->bytes_super
+= len
;
4210 ret
= add_excluded_extent(root
, start
, len
);
4222 u64
add_new_free_space(struct btrfs_block_group_cache
*block_group
,
4223 struct btrfs_fs_info
*info
, u64 start
, u64 end
)
4225 u64 extent_start
, extent_end
, size
, total_added
= 0;
4228 while (start
< end
) {
4229 ret
= find_first_extent_bit(&info
->pinned_extents
, start
,
4230 &extent_start
, &extent_end
,
4231 EXTENT_DIRTY
| EXTENT_UPTODATE
);
4235 if (extent_start
<= start
) {
4236 start
= extent_end
+ 1;
4237 } else if (extent_start
> start
&& extent_start
< end
) {
4238 size
= extent_start
- start
;
4239 total_added
+= size
;
4240 ret
= btrfs_add_free_space(block_group
->free_space_ctl
,
4242 BUG_ON(ret
); /* -ENOMEM or logic error */
4243 start
= extent_end
+ 1;
4251 total_added
+= size
;
4252 ret
= btrfs_add_free_space(block_group
->free_space_ctl
, start
,
4254 BUG_ON(ret
); /* -ENOMEM or logic error */