2 * Copyright (C) 2007 Oracle. All rights reserved.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public
6 * License v2 as published by the Free Software Foundation.
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
13 * You should have received a copy of the GNU General Public
14 * License along with this program; if not, write to the
15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16 * Boston, MA 021110-1307, USA.
22 #include "kerncompat.h"
23 #include "radix-tree.h"
26 #include "print-tree.h"
27 #include "transaction.h"
30 #include "free-space-cache.h"
34 #define PENDING_EXTENT_INSERT 0
35 #define PENDING_EXTENT_DELETE 1
36 #define PENDING_BACKREF_UPDATE 2
38 struct pending_extent_op
{
43 struct btrfs_disk_key key
;
47 static int alloc_reserved_tree_block(struct btrfs_trans_handle
*trans
,
48 struct btrfs_root
*root
,
49 u64 root_objectid
, u64 generation
,
50 u64 flags
, struct btrfs_disk_key
*key
,
51 int level
, struct btrfs_key
*ins
);
52 static int __free_extent(struct btrfs_trans_handle
*trans
,
53 struct btrfs_root
*root
,
54 u64 bytenr
, u64 num_bytes
, u64 parent
,
55 u64 root_objectid
, u64 owner_objectid
,
56 u64 owner_offset
, int refs_to_drop
);
57 static int finish_current_insert(struct btrfs_trans_handle
*trans
, struct
58 btrfs_root
*extent_root
);
59 static int del_pending_extents(struct btrfs_trans_handle
*trans
, struct
60 btrfs_root
*extent_root
);
61 static struct btrfs_block_group_cache
*
62 btrfs_find_block_group(struct btrfs_root
*root
, struct btrfs_block_group_cache
63 *hint
, u64 search_start
, int data
, int owner
);
65 static int remove_sb_from_cache(struct btrfs_root
*root
,
66 struct btrfs_block_group_cache
*cache
)
72 struct extent_io_tree
*free_space_cache
;
74 free_space_cache
= &root
->fs_info
->free_space_cache
;
75 for (i
= 0; i
< BTRFS_SUPER_MIRROR_MAX
; i
++) {
76 bytenr
= btrfs_sb_offset(i
);
77 ret
= btrfs_rmap_block(&root
->fs_info
->mapping_tree
,
78 cache
->key
.objectid
, bytenr
, 0,
79 &logical
, &nr
, &stripe_len
);
82 clear_extent_dirty(free_space_cache
, logical
[nr
],
83 logical
[nr
] + stripe_len
- 1, GFP_NOFS
);
90 static int cache_block_group(struct btrfs_root
*root
,
91 struct btrfs_block_group_cache
*block_group
)
93 struct btrfs_path
*path
;
96 struct extent_buffer
*leaf
;
97 struct extent_io_tree
*free_space_cache
;
105 root
= root
->fs_info
->extent_root
;
106 free_space_cache
= &root
->fs_info
->free_space_cache
;
108 if (block_group
->cached
)
111 path
= btrfs_alloc_path();
116 last
= max_t(u64
, block_group
->key
.objectid
, BTRFS_SUPER_INFO_OFFSET
);
121 ret
= btrfs_search_slot(NULL
, root
, &key
, path
, 0, 0);
126 leaf
= path
->nodes
[0];
127 slot
= path
->slots
[0];
128 if (slot
>= btrfs_header_nritems(leaf
)) {
129 ret
= btrfs_next_leaf(root
, path
);
138 btrfs_item_key_to_cpu(leaf
, &key
, slot
);
139 if (key
.objectid
< block_group
->key
.objectid
) {
142 if (key
.objectid
>= block_group
->key
.objectid
+
143 block_group
->key
.offset
) {
147 if (key
.type
== BTRFS_EXTENT_ITEM_KEY
||
148 key
.type
== BTRFS_METADATA_ITEM_KEY
) {
149 if (key
.objectid
> last
) {
150 hole_size
= key
.objectid
- last
;
151 set_extent_dirty(free_space_cache
, last
,
152 last
+ hole_size
- 1,
155 if (key
.type
== BTRFS_METADATA_ITEM_KEY
)
156 last
= key
.objectid
+ root
->leafsize
;
158 last
= key
.objectid
+ key
.offset
;
164 if (block_group
->key
.objectid
+
165 block_group
->key
.offset
> last
) {
166 hole_size
= block_group
->key
.objectid
+
167 block_group
->key
.offset
- last
;
168 set_extent_dirty(free_space_cache
, last
,
169 last
+ hole_size
- 1, GFP_NOFS
);
171 remove_sb_from_cache(root
, block_group
);
172 block_group
->cached
= 1;
174 btrfs_free_path(path
);
178 struct btrfs_block_group_cache
*btrfs_lookup_first_block_group(struct
182 struct extent_io_tree
*block_group_cache
;
183 struct btrfs_block_group_cache
*block_group
= NULL
;
189 bytenr
= max_t(u64
, bytenr
,
190 BTRFS_SUPER_INFO_OFFSET
+ BTRFS_SUPER_INFO_SIZE
);
191 block_group_cache
= &info
->block_group_cache
;
192 ret
= find_first_extent_bit(block_group_cache
,
193 bytenr
, &start
, &end
,
194 BLOCK_GROUP_DATA
| BLOCK_GROUP_METADATA
|
199 ret
= get_state_private(block_group_cache
, start
, &ptr
);
203 block_group
= (struct btrfs_block_group_cache
*)(unsigned long)ptr
;
207 struct btrfs_block_group_cache
*btrfs_lookup_block_group(struct
211 struct extent_io_tree
*block_group_cache
;
212 struct btrfs_block_group_cache
*block_group
= NULL
;
218 block_group_cache
= &info
->block_group_cache
;
219 ret
= find_first_extent_bit(block_group_cache
,
220 bytenr
, &start
, &end
,
221 BLOCK_GROUP_DATA
| BLOCK_GROUP_METADATA
|
226 ret
= get_state_private(block_group_cache
, start
, &ptr
);
230 block_group
= (struct btrfs_block_group_cache
*)(unsigned long)ptr
;
231 if (block_group
->key
.objectid
<= bytenr
&& bytenr
<
232 block_group
->key
.objectid
+ block_group
->key
.offset
)
237 static int block_group_bits(struct btrfs_block_group_cache
*cache
, u64 bits
)
239 return (cache
->flags
& bits
) == bits
;
242 static int noinline
find_search_start(struct btrfs_root
*root
,
243 struct btrfs_block_group_cache
**cache_ret
,
244 u64
*start_ret
, int num
, int data
)
247 struct btrfs_block_group_cache
*cache
= *cache_ret
;
248 u64 last
= *start_ret
;
251 u64 search_start
= *start_ret
;
257 ret
= cache_block_group(root
, cache
);
261 last
= max(search_start
, cache
->key
.objectid
);
262 if (cache
->ro
|| !block_group_bits(cache
, data
))
266 ret
= find_first_extent_bit(&root
->fs_info
->free_space_cache
,
267 last
, &start
, &end
, EXTENT_DIRTY
);
272 start
= max(last
, start
);
274 if (last
- start
< num
) {
277 if (start
+ num
> cache
->key
.objectid
+ cache
->key
.offset
) {
285 cache
= btrfs_lookup_block_group(root
->fs_info
, search_start
);
287 printk("Unable to find block group for %llu\n",
288 (unsigned long long)search_start
);
294 last
= cache
->key
.objectid
+ cache
->key
.offset
;
296 cache
= btrfs_lookup_first_block_group(root
->fs_info
, last
);
309 static int block_group_state_bits(u64 flags
)
312 if (flags
& BTRFS_BLOCK_GROUP_DATA
)
313 bits
|= BLOCK_GROUP_DATA
;
314 if (flags
& BTRFS_BLOCK_GROUP_METADATA
)
315 bits
|= BLOCK_GROUP_METADATA
;
316 if (flags
& BTRFS_BLOCK_GROUP_SYSTEM
)
317 bits
|= BLOCK_GROUP_SYSTEM
;
321 static struct btrfs_block_group_cache
*
322 btrfs_find_block_group(struct btrfs_root
*root
, struct btrfs_block_group_cache
323 *hint
, u64 search_start
, int data
, int owner
)
325 struct btrfs_block_group_cache
*cache
;
326 struct extent_io_tree
*block_group_cache
;
327 struct btrfs_block_group_cache
*found_group
= NULL
;
328 struct btrfs_fs_info
*info
= root
->fs_info
;
341 block_group_cache
= &info
->block_group_cache
;
346 bit
= block_group_state_bits(data
);
349 struct btrfs_block_group_cache
*shint
;
350 shint
= btrfs_lookup_block_group(info
, search_start
);
351 if (shint
&& !shint
->ro
&& block_group_bits(shint
, data
)) {
352 used
= btrfs_block_group_used(&shint
->item
);
353 if (used
+ shint
->pinned
<
354 div_factor(shint
->key
.offset
, factor
)) {
359 if (hint
&& !hint
->ro
&& block_group_bits(hint
, data
)) {
360 used
= btrfs_block_group_used(&hint
->item
);
361 if (used
+ hint
->pinned
<
362 div_factor(hint
->key
.offset
, factor
)) {
365 last
= hint
->key
.objectid
+ hint
->key
.offset
;
369 hint_last
= max(hint
->key
.objectid
, search_start
);
371 hint_last
= search_start
;
377 ret
= find_first_extent_bit(block_group_cache
, last
,
382 ret
= get_state_private(block_group_cache
, start
, &ptr
);
386 cache
= (struct btrfs_block_group_cache
*)(unsigned long)ptr
;
387 last
= cache
->key
.objectid
+ cache
->key
.offset
;
388 used
= btrfs_block_group_used(&cache
->item
);
390 if (!cache
->ro
&& block_group_bits(cache
, data
)) {
392 free_check
= cache
->key
.offset
;
394 free_check
= div_factor(cache
->key
.offset
,
397 if (used
+ cache
->pinned
< free_check
) {
414 * Back reference rules. Back refs have three main goals:
416 * 1) differentiate between all holders of references to an extent so that
417 * when a reference is dropped we can make sure it was a valid reference
418 * before freeing the extent.
420 * 2) Provide enough information to quickly find the holders of an extent
421 * if we notice a given block is corrupted or bad.
423 * 3) Make it easy to migrate blocks for FS shrinking or storage pool
424 * maintenance. This is actually the same as #2, but with a slightly
425 * different use case.
427 * There are two kinds of back refs. The implicit back refs is optimized
428 * for pointers in non-shared tree blocks. For a given pointer in a block,
429 * back refs of this kind provide information about the block's owner tree
430 * and the pointer's key. These information allow us to find the block by
431 * b-tree searching. The full back refs is for pointers in tree blocks not
432 * referenced by their owner trees. The location of tree block is recorded
433 * in the back refs. Actually the full back refs is generic, and can be
434 * used in all cases the implicit back refs is used. The major shortcoming
435 * of the full back refs is its overhead. Every time a tree block gets
436 * COWed, we have to update back refs entry for all pointers in it.
438 * For a newly allocated tree block, we use implicit back refs for
439 * pointers in it. This means most tree related operations only involve
440 * implicit back refs. For a tree block created in old transaction, the
441 * only way to drop a reference to it is COW it. So we can detect the
442 * event that tree block loses its owner tree's reference and do the
443 * back refs conversion.
445 * When a tree block is COW'd through a tree, there are four cases:
447 * The reference count of the block is one and the tree is the block's
448 * owner tree. Nothing to do in this case.
450 * The reference count of the block is one and the tree is not the
451 * block's owner tree. In this case, full back refs is used for pointers
452 * in the block. Remove these full back refs, add implicit back refs for
453 * every pointers in the new block.
455 * The reference count of the block is greater than one and the tree is
456 * the block's owner tree. In this case, implicit back refs is used for
457 * pointers in the block. Add full back refs for every pointers in the
458 * block, increase lower level extents' reference counts. The original
459 * implicit back refs are entailed to the new block.
461 * The reference count of the block is greater than one and the tree is
462 * not the block's owner tree. Add implicit back refs for every pointer in
463 * the new block, increase lower level extents' reference count.
465 * Back Reference Key composing:
467 * The key objectid corresponds to the first byte in the extent,
468 * The key type is used to differentiate between types of back refs.
469 * There are different meanings of the key offset for different types
472 * File extents can be referenced by:
474 * - multiple snapshots, subvolumes, or different generations in one subvol
475 * - different files inside a single subvolume
476 * - different offsets inside a file (bookend extents in file.c)
478 * The extent ref structure for the implicit back refs has fields for:
480 * - Objectid of the subvolume root
481 * - objectid of the file holding the reference
482 * - original offset in the file
483 * - how many bookend extents
485 * The key offset for the implicit back refs is hash of the first
488 * The extent ref structure for the full back refs has field for:
490 * - number of pointers in the tree leaf
492 * The key offset for the implicit back refs is the first byte of
495 * When a file extent is allocated, The implicit back refs is used.
496 * the fields are filled in:
498 * (root_key.objectid, inode objectid, offset in file, 1)
500 * When a file extent is removed file truncation, we find the
501 * corresponding implicit back refs and check the following fields:
503 * (btrfs_header_owner(leaf), inode objectid, offset in file)
505 * Btree extents can be referenced by:
507 * - Different subvolumes
509 * Both the implicit back refs and the full back refs for tree blocks
510 * only consist of key. The key offset for the implicit back refs is
511 * objectid of block's owner tree. The key offset for the full back refs
512 * is the first byte of parent block.
514 * When implicit back refs is used, information about the lowest key and
515 * level of the tree block are required. These information are stored in
516 * tree block info structure.
519 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
520 static int convert_extent_item_v0(struct btrfs_trans_handle
*trans
,
521 struct btrfs_root
*root
,
522 struct btrfs_path
*path
,
523 u64 owner
, u32 extra_size
)
525 struct btrfs_extent_item
*item
;
526 struct btrfs_extent_item_v0
*ei0
;
527 struct btrfs_extent_ref_v0
*ref0
;
528 struct btrfs_tree_block_info
*bi
;
529 struct extent_buffer
*leaf
;
530 struct btrfs_key key
;
531 struct btrfs_key found_key
;
532 u32 new_size
= sizeof(*item
);
536 leaf
= path
->nodes
[0];
537 BUG_ON(btrfs_item_size_nr(leaf
, path
->slots
[0]) != sizeof(*ei0
));
539 btrfs_item_key_to_cpu(leaf
, &key
, path
->slots
[0]);
540 ei0
= btrfs_item_ptr(leaf
, path
->slots
[0],
541 struct btrfs_extent_item_v0
);
542 refs
= btrfs_extent_refs_v0(leaf
, ei0
);
544 if (owner
== (u64
)-1) {
546 if (path
->slots
[0] >= btrfs_header_nritems(leaf
)) {
547 ret
= btrfs_next_leaf(root
, path
);
551 leaf
= path
->nodes
[0];
553 btrfs_item_key_to_cpu(leaf
, &found_key
,
555 BUG_ON(key
.objectid
!= found_key
.objectid
);
556 if (found_key
.type
!= BTRFS_EXTENT_REF_V0_KEY
) {
560 ref0
= btrfs_item_ptr(leaf
, path
->slots
[0],
561 struct btrfs_extent_ref_v0
);
562 owner
= btrfs_ref_objectid_v0(leaf
, ref0
);
566 btrfs_release_path(path
);
568 if (owner
< BTRFS_FIRST_FREE_OBJECTID
)
569 new_size
+= sizeof(*bi
);
571 new_size
-= sizeof(*ei0
);
572 ret
= btrfs_search_slot(trans
, root
, &key
, path
, new_size
, 1);
577 ret
= btrfs_extend_item(trans
, root
, path
, new_size
);
580 leaf
= path
->nodes
[0];
581 item
= btrfs_item_ptr(leaf
, path
->slots
[0], struct btrfs_extent_item
);
582 btrfs_set_extent_refs(leaf
, item
, refs
);
583 /* FIXME: get real generation */
584 btrfs_set_extent_generation(leaf
, item
, 0);
585 if (owner
< BTRFS_FIRST_FREE_OBJECTID
) {
586 btrfs_set_extent_flags(leaf
, item
,
587 BTRFS_EXTENT_FLAG_TREE_BLOCK
|
588 BTRFS_BLOCK_FLAG_FULL_BACKREF
);
589 bi
= (struct btrfs_tree_block_info
*)(item
+ 1);
590 /* FIXME: get first key of the block */
591 memset_extent_buffer(leaf
, 0, (unsigned long)bi
, sizeof(*bi
));
592 btrfs_set_tree_block_level(leaf
, bi
, (int)owner
);
594 btrfs_set_extent_flags(leaf
, item
, BTRFS_EXTENT_FLAG_DATA
);
596 btrfs_mark_buffer_dirty(leaf
);
601 static u64
hash_extent_data_ref(u64 root_objectid
, u64 owner
, u64 offset
)
603 u32 high_crc
= ~(u32
)0;
604 u32 low_crc
= ~(u32
)0;
607 lenum
= cpu_to_le64(root_objectid
);
608 high_crc
= btrfs_crc32c(high_crc
, &lenum
, sizeof(lenum
));
609 lenum
= cpu_to_le64(owner
);
610 low_crc
= btrfs_crc32c(low_crc
, &lenum
, sizeof(lenum
));
611 lenum
= cpu_to_le64(offset
);
612 low_crc
= btrfs_crc32c(low_crc
, &lenum
, sizeof(lenum
));
614 return ((u64
)high_crc
<< 31) ^ (u64
)low_crc
;
617 static u64
hash_extent_data_ref_item(struct extent_buffer
*leaf
,
618 struct btrfs_extent_data_ref
*ref
)
620 return hash_extent_data_ref(btrfs_extent_data_ref_root(leaf
, ref
),
621 btrfs_extent_data_ref_objectid(leaf
, ref
),
622 btrfs_extent_data_ref_offset(leaf
, ref
));
625 static int match_extent_data_ref(struct extent_buffer
*leaf
,
626 struct btrfs_extent_data_ref
*ref
,
627 u64 root_objectid
, u64 owner
, u64 offset
)
629 if (btrfs_extent_data_ref_root(leaf
, ref
) != root_objectid
||
630 btrfs_extent_data_ref_objectid(leaf
, ref
) != owner
||
631 btrfs_extent_data_ref_offset(leaf
, ref
) != offset
)
636 static noinline
int lookup_extent_data_ref(struct btrfs_trans_handle
*trans
,
637 struct btrfs_root
*root
,
638 struct btrfs_path
*path
,
639 u64 bytenr
, u64 parent
,
641 u64 owner
, u64 offset
)
643 struct btrfs_key key
;
644 struct btrfs_extent_data_ref
*ref
;
645 struct extent_buffer
*leaf
;
651 key
.objectid
= bytenr
;
653 key
.type
= BTRFS_SHARED_DATA_REF_KEY
;
656 key
.type
= BTRFS_EXTENT_DATA_REF_KEY
;
657 key
.offset
= hash_extent_data_ref(root_objectid
,
662 ret
= btrfs_search_slot(trans
, root
, &key
, path
, -1, 1);
671 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
672 key
.type
= BTRFS_EXTENT_REF_V0_KEY
;
673 btrfs_release_path(path
);
674 ret
= btrfs_search_slot(trans
, root
, &key
, path
, -1, 1);
685 leaf
= path
->nodes
[0];
686 nritems
= btrfs_header_nritems(leaf
);
688 if (path
->slots
[0] >= nritems
) {
689 ret
= btrfs_next_leaf(root
, path
);
695 leaf
= path
->nodes
[0];
696 nritems
= btrfs_header_nritems(leaf
);
700 btrfs_item_key_to_cpu(leaf
, &key
, path
->slots
[0]);
701 if (key
.objectid
!= bytenr
||
702 key
.type
!= BTRFS_EXTENT_DATA_REF_KEY
)
705 ref
= btrfs_item_ptr(leaf
, path
->slots
[0],
706 struct btrfs_extent_data_ref
);
708 if (match_extent_data_ref(leaf
, ref
, root_objectid
,
711 btrfs_release_path(path
);
723 static noinline
int insert_extent_data_ref(struct btrfs_trans_handle
*trans
,
724 struct btrfs_root
*root
,
725 struct btrfs_path
*path
,
726 u64 bytenr
, u64 parent
,
727 u64 root_objectid
, u64 owner
,
728 u64 offset
, int refs_to_add
)
730 struct btrfs_key key
;
731 struct extent_buffer
*leaf
;
736 key
.objectid
= bytenr
;
738 key
.type
= BTRFS_SHARED_DATA_REF_KEY
;
740 size
= sizeof(struct btrfs_shared_data_ref
);
742 key
.type
= BTRFS_EXTENT_DATA_REF_KEY
;
743 key
.offset
= hash_extent_data_ref(root_objectid
,
745 size
= sizeof(struct btrfs_extent_data_ref
);
748 ret
= btrfs_insert_empty_item(trans
, root
, path
, &key
, size
);
749 if (ret
&& ret
!= -EEXIST
)
752 leaf
= path
->nodes
[0];
754 struct btrfs_shared_data_ref
*ref
;
755 ref
= btrfs_item_ptr(leaf
, path
->slots
[0],
756 struct btrfs_shared_data_ref
);
758 btrfs_set_shared_data_ref_count(leaf
, ref
, refs_to_add
);
760 num_refs
= btrfs_shared_data_ref_count(leaf
, ref
);
761 num_refs
+= refs_to_add
;
762 btrfs_set_shared_data_ref_count(leaf
, ref
, num_refs
);
765 struct btrfs_extent_data_ref
*ref
;
766 while (ret
== -EEXIST
) {
767 ref
= btrfs_item_ptr(leaf
, path
->slots
[0],
768 struct btrfs_extent_data_ref
);
769 if (match_extent_data_ref(leaf
, ref
, root_objectid
,
772 btrfs_release_path(path
);
775 ret
= btrfs_insert_empty_item(trans
, root
, path
, &key
,
777 if (ret
&& ret
!= -EEXIST
)
780 leaf
= path
->nodes
[0];
782 ref
= btrfs_item_ptr(leaf
, path
->slots
[0],
783 struct btrfs_extent_data_ref
);
785 btrfs_set_extent_data_ref_root(leaf
, ref
,
787 btrfs_set_extent_data_ref_objectid(leaf
, ref
, owner
);
788 btrfs_set_extent_data_ref_offset(leaf
, ref
, offset
);
789 btrfs_set_extent_data_ref_count(leaf
, ref
, refs_to_add
);
791 num_refs
= btrfs_extent_data_ref_count(leaf
, ref
);
792 num_refs
+= refs_to_add
;
793 btrfs_set_extent_data_ref_count(leaf
, ref
, num_refs
);
796 btrfs_mark_buffer_dirty(leaf
);
799 btrfs_release_path(path
);
803 static noinline
int remove_extent_data_ref(struct btrfs_trans_handle
*trans
,
804 struct btrfs_root
*root
,
805 struct btrfs_path
*path
,
808 struct btrfs_key key
;
809 struct btrfs_extent_data_ref
*ref1
= NULL
;
810 struct btrfs_shared_data_ref
*ref2
= NULL
;
811 struct extent_buffer
*leaf
;
815 leaf
= path
->nodes
[0];
816 btrfs_item_key_to_cpu(leaf
, &key
, path
->slots
[0]);
818 if (key
.type
== BTRFS_EXTENT_DATA_REF_KEY
) {
819 ref1
= btrfs_item_ptr(leaf
, path
->slots
[0],
820 struct btrfs_extent_data_ref
);
821 num_refs
= btrfs_extent_data_ref_count(leaf
, ref1
);
822 } else if (key
.type
== BTRFS_SHARED_DATA_REF_KEY
) {
823 ref2
= btrfs_item_ptr(leaf
, path
->slots
[0],
824 struct btrfs_shared_data_ref
);
825 num_refs
= btrfs_shared_data_ref_count(leaf
, ref2
);
826 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
827 } else if (key
.type
== BTRFS_EXTENT_REF_V0_KEY
) {
828 struct btrfs_extent_ref_v0
*ref0
;
829 ref0
= btrfs_item_ptr(leaf
, path
->slots
[0],
830 struct btrfs_extent_ref_v0
);
831 num_refs
= btrfs_ref_count_v0(leaf
, ref0
);
837 BUG_ON(num_refs
< refs_to_drop
);
838 num_refs
-= refs_to_drop
;
841 ret
= btrfs_del_item(trans
, root
, path
);
843 if (key
.type
== BTRFS_EXTENT_DATA_REF_KEY
)
844 btrfs_set_extent_data_ref_count(leaf
, ref1
, num_refs
);
845 else if (key
.type
== BTRFS_SHARED_DATA_REF_KEY
)
846 btrfs_set_shared_data_ref_count(leaf
, ref2
, num_refs
);
847 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
849 struct btrfs_extent_ref_v0
*ref0
;
850 ref0
= btrfs_item_ptr(leaf
, path
->slots
[0],
851 struct btrfs_extent_ref_v0
);
852 btrfs_set_ref_count_v0(leaf
, ref0
, num_refs
);
855 btrfs_mark_buffer_dirty(leaf
);
860 static noinline u32
extent_data_ref_count(struct btrfs_root
*root
,
861 struct btrfs_path
*path
,
862 struct btrfs_extent_inline_ref
*iref
)
864 struct btrfs_key key
;
865 struct extent_buffer
*leaf
;
866 struct btrfs_extent_data_ref
*ref1
;
867 struct btrfs_shared_data_ref
*ref2
;
870 leaf
= path
->nodes
[0];
871 btrfs_item_key_to_cpu(leaf
, &key
, path
->slots
[0]);
873 if (btrfs_extent_inline_ref_type(leaf
, iref
) ==
874 BTRFS_EXTENT_DATA_REF_KEY
) {
875 ref1
= (struct btrfs_extent_data_ref
*)(&iref
->offset
);
876 num_refs
= btrfs_extent_data_ref_count(leaf
, ref1
);
878 ref2
= (struct btrfs_shared_data_ref
*)(iref
+ 1);
879 num_refs
= btrfs_shared_data_ref_count(leaf
, ref2
);
881 } else if (key
.type
== BTRFS_EXTENT_DATA_REF_KEY
) {
882 ref1
= btrfs_item_ptr(leaf
, path
->slots
[0],
883 struct btrfs_extent_data_ref
);
884 num_refs
= btrfs_extent_data_ref_count(leaf
, ref1
);
885 } else if (key
.type
== BTRFS_SHARED_DATA_REF_KEY
) {
886 ref2
= btrfs_item_ptr(leaf
, path
->slots
[0],
887 struct btrfs_shared_data_ref
);
888 num_refs
= btrfs_shared_data_ref_count(leaf
, ref2
);
889 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
890 } else if (key
.type
== BTRFS_EXTENT_REF_V0_KEY
) {
891 struct btrfs_extent_ref_v0
*ref0
;
892 ref0
= btrfs_item_ptr(leaf
, path
->slots
[0],
893 struct btrfs_extent_ref_v0
);
894 num_refs
= btrfs_ref_count_v0(leaf
, ref0
);
902 static noinline
int lookup_tree_block_ref(struct btrfs_trans_handle
*trans
,
903 struct btrfs_root
*root
,
904 struct btrfs_path
*path
,
905 u64 bytenr
, u64 parent
,
908 struct btrfs_key key
;
911 key
.objectid
= bytenr
;
913 key
.type
= BTRFS_SHARED_BLOCK_REF_KEY
;
916 key
.type
= BTRFS_TREE_BLOCK_REF_KEY
;
917 key
.offset
= root_objectid
;
920 ret
= btrfs_search_slot(trans
, root
, &key
, path
, -1, 1);
923 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
924 if (ret
== -ENOENT
&& parent
) {
925 btrfs_release_path(path
);
926 key
.type
= BTRFS_EXTENT_REF_V0_KEY
;
927 ret
= btrfs_search_slot(trans
, root
, &key
, path
, -1, 1);
935 static noinline
int insert_tree_block_ref(struct btrfs_trans_handle
*trans
,
936 struct btrfs_root
*root
,
937 struct btrfs_path
*path
,
938 u64 bytenr
, u64 parent
,
941 struct btrfs_key key
;
944 key
.objectid
= bytenr
;
946 key
.type
= BTRFS_SHARED_BLOCK_REF_KEY
;
949 key
.type
= BTRFS_TREE_BLOCK_REF_KEY
;
950 key
.offset
= root_objectid
;
953 ret
= btrfs_insert_empty_item(trans
, root
, path
, &key
, 0);
955 btrfs_release_path(path
);
959 static inline int extent_ref_type(u64 parent
, u64 owner
)
962 if (owner
< BTRFS_FIRST_FREE_OBJECTID
) {
964 type
= BTRFS_SHARED_BLOCK_REF_KEY
;
966 type
= BTRFS_TREE_BLOCK_REF_KEY
;
969 type
= BTRFS_SHARED_DATA_REF_KEY
;
971 type
= BTRFS_EXTENT_DATA_REF_KEY
;
976 static int lookup_inline_extent_backref(struct btrfs_trans_handle
*trans
,
977 struct btrfs_root
*root
,
978 struct btrfs_path
*path
,
979 struct btrfs_extent_inline_ref
**ref_ret
,
980 u64 bytenr
, u64 num_bytes
,
981 u64 parent
, u64 root_objectid
,
982 u64 owner
, u64 offset
, int insert
)
984 struct btrfs_key key
;
985 struct extent_buffer
*leaf
;
986 struct btrfs_extent_item
*ei
;
987 struct btrfs_extent_inline_ref
*iref
;
997 int skinny_metadata
=
998 btrfs_fs_incompat(root
->fs_info
,
999 BTRFS_FEATURE_INCOMPAT_SKINNY_METADATA
);
1001 key
.objectid
= bytenr
;
1002 key
.type
= BTRFS_EXTENT_ITEM_KEY
;
1003 key
.offset
= num_bytes
;
1005 want
= extent_ref_type(parent
, owner
);
1007 extra_size
= btrfs_extent_inline_ref_size(want
);
1011 if (owner
< BTRFS_FIRST_FREE_OBJECTID
&& skinny_metadata
) {
1012 skinny_metadata
= 1;
1013 key
.type
= BTRFS_METADATA_ITEM_KEY
;
1015 } else if (skinny_metadata
) {
1016 skinny_metadata
= 0;
1020 ret
= btrfs_search_slot(trans
, root
, &key
, path
, extra_size
, 1);
1027 * We may be a newly converted file system which still has the old fat
1028 * extent entries for metadata, so try and see if we have one of those.
1030 if (ret
> 0 && skinny_metadata
) {
1031 skinny_metadata
= 0;
1032 if (path
->slots
[0]) {
1034 btrfs_item_key_to_cpu(path
->nodes
[0], &key
,
1036 if (key
.objectid
== bytenr
&&
1037 key
.type
== BTRFS_EXTENT_ITEM_KEY
&&
1038 key
.offset
== num_bytes
)
1042 key
.type
= BTRFS_EXTENT_ITEM_KEY
;
1043 key
.offset
= num_bytes
;
1044 btrfs_release_path(path
);
1050 printf("Failed to find [%llu, %u, %llu]\n", key
.objectid
, key
.type
, key
.offset
);
1056 leaf
= path
->nodes
[0];
1057 item_size
= btrfs_item_size_nr(leaf
, path
->slots
[0]);
1058 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
1059 if (item_size
< sizeof(*ei
)) {
1064 ret
= convert_extent_item_v0(trans
, root
, path
, owner
,
1070 leaf
= path
->nodes
[0];
1071 item_size
= btrfs_item_size_nr(leaf
, path
->slots
[0]);
1074 if (item_size
< sizeof(*ei
)) {
1075 printf("Size is %u, needs to be %u, slot %d\n",
1076 (unsigned)item_size
,
1077 (unsigned)sizeof(*ei
), path
->slots
[0]);
1078 btrfs_print_leaf(root
, leaf
);
1081 BUG_ON(item_size
< sizeof(*ei
));
1083 ei
= btrfs_item_ptr(leaf
, path
->slots
[0], struct btrfs_extent_item
);
1084 flags
= btrfs_extent_flags(leaf
, ei
);
1086 ptr
= (unsigned long)(ei
+ 1);
1087 end
= (unsigned long)ei
+ item_size
;
1089 if (flags
& BTRFS_EXTENT_FLAG_TREE_BLOCK
&& !skinny_metadata
) {
1090 ptr
+= sizeof(struct btrfs_tree_block_info
);
1092 } else if (!(flags
& BTRFS_EXTENT_FLAG_TREE_BLOCK
)) {
1093 if (!(flags
& BTRFS_EXTENT_FLAG_DATA
)) {
1104 iref
= (struct btrfs_extent_inline_ref
*)ptr
;
1105 type
= btrfs_extent_inline_ref_type(leaf
, iref
);
1109 ptr
+= btrfs_extent_inline_ref_size(type
);
1113 if (type
== BTRFS_EXTENT_DATA_REF_KEY
) {
1114 struct btrfs_extent_data_ref
*dref
;
1115 dref
= (struct btrfs_extent_data_ref
*)(&iref
->offset
);
1116 if (match_extent_data_ref(leaf
, dref
, root_objectid
,
1121 if (hash_extent_data_ref_item(leaf
, dref
) <
1122 hash_extent_data_ref(root_objectid
, owner
, offset
))
1126 ref_offset
= btrfs_extent_inline_ref_offset(leaf
, iref
);
1128 if (parent
== ref_offset
) {
1132 if (ref_offset
< parent
)
1135 if (root_objectid
== ref_offset
) {
1139 if (ref_offset
< root_objectid
)
1143 ptr
+= btrfs_extent_inline_ref_size(type
);
1145 if (err
== -ENOENT
&& insert
) {
1146 if (item_size
+ extra_size
>=
1147 BTRFS_MAX_EXTENT_ITEM_SIZE(root
)) {
1152 * To add new inline back ref, we have to make sure
1153 * there is no corresponding back ref item.
1154 * For simplicity, we just do not add new inline back
1155 * ref if there is any back ref item.
1157 if (find_next_key(path
, &key
) == 0 && key
.objectid
== bytenr
&&
1158 key
.type
< BTRFS_BLOCK_GROUP_ITEM_KEY
) {
1163 *ref_ret
= (struct btrfs_extent_inline_ref
*)ptr
;
1168 static int setup_inline_extent_backref(struct btrfs_trans_handle
*trans
,
1169 struct btrfs_root
*root
,
1170 struct btrfs_path
*path
,
1171 struct btrfs_extent_inline_ref
*iref
,
1172 u64 parent
, u64 root_objectid
,
1173 u64 owner
, u64 offset
, int refs_to_add
)
1175 struct extent_buffer
*leaf
;
1176 struct btrfs_extent_item
*ei
;
1179 unsigned long item_offset
;
1185 leaf
= path
->nodes
[0];
1186 ei
= btrfs_item_ptr(leaf
, path
->slots
[0], struct btrfs_extent_item
);
1187 item_offset
= (unsigned long)iref
- (unsigned long)ei
;
1189 type
= extent_ref_type(parent
, owner
);
1190 size
= btrfs_extent_inline_ref_size(type
);
1192 ret
= btrfs_extend_item(trans
, root
, path
, size
);
1195 ei
= btrfs_item_ptr(leaf
, path
->slots
[0], struct btrfs_extent_item
);
1196 refs
= btrfs_extent_refs(leaf
, ei
);
1197 refs
+= refs_to_add
;
1198 btrfs_set_extent_refs(leaf
, ei
, refs
);
1200 ptr
= (unsigned long)ei
+ item_offset
;
1201 end
= (unsigned long)ei
+ btrfs_item_size_nr(leaf
, path
->slots
[0]);
1202 if (ptr
< end
- size
)
1203 memmove_extent_buffer(leaf
, ptr
+ size
, ptr
,
1206 iref
= (struct btrfs_extent_inline_ref
*)ptr
;
1207 btrfs_set_extent_inline_ref_type(leaf
, iref
, type
);
1208 if (type
== BTRFS_EXTENT_DATA_REF_KEY
) {
1209 struct btrfs_extent_data_ref
*dref
;
1210 dref
= (struct btrfs_extent_data_ref
*)(&iref
->offset
);
1211 btrfs_set_extent_data_ref_root(leaf
, dref
, root_objectid
);
1212 btrfs_set_extent_data_ref_objectid(leaf
, dref
, owner
);
1213 btrfs_set_extent_data_ref_offset(leaf
, dref
, offset
);
1214 btrfs_set_extent_data_ref_count(leaf
, dref
, refs_to_add
);
1215 } else if (type
== BTRFS_SHARED_DATA_REF_KEY
) {
1216 struct btrfs_shared_data_ref
*sref
;
1217 sref
= (struct btrfs_shared_data_ref
*)(iref
+ 1);
1218 btrfs_set_shared_data_ref_count(leaf
, sref
, refs_to_add
);
1219 btrfs_set_extent_inline_ref_offset(leaf
, iref
, parent
);
1220 } else if (type
== BTRFS_SHARED_BLOCK_REF_KEY
) {
1221 btrfs_set_extent_inline_ref_offset(leaf
, iref
, parent
);
1223 btrfs_set_extent_inline_ref_offset(leaf
, iref
, root_objectid
);
1225 btrfs_mark_buffer_dirty(leaf
);
1229 static int lookup_extent_backref(struct btrfs_trans_handle
*trans
,
1230 struct btrfs_root
*root
,
1231 struct btrfs_path
*path
,
1232 struct btrfs_extent_inline_ref
**ref_ret
,
1233 u64 bytenr
, u64 num_bytes
, u64 parent
,
1234 u64 root_objectid
, u64 owner
, u64 offset
)
1238 ret
= lookup_inline_extent_backref(trans
, root
, path
, ref_ret
,
1239 bytenr
, num_bytes
, parent
,
1240 root_objectid
, owner
, offset
, 0);
1244 btrfs_release_path(path
);
1247 if (owner
< BTRFS_FIRST_FREE_OBJECTID
) {
1248 ret
= lookup_tree_block_ref(trans
, root
, path
, bytenr
, parent
,
1251 ret
= lookup_extent_data_ref(trans
, root
, path
, bytenr
, parent
,
1252 root_objectid
, owner
, offset
);
1257 static int update_inline_extent_backref(struct btrfs_trans_handle
*trans
,
1258 struct btrfs_root
*root
,
1259 struct btrfs_path
*path
,
1260 struct btrfs_extent_inline_ref
*iref
,
1263 struct extent_buffer
*leaf
;
1264 struct btrfs_extent_item
*ei
;
1265 struct btrfs_extent_data_ref
*dref
= NULL
;
1266 struct btrfs_shared_data_ref
*sref
= NULL
;
1275 leaf
= path
->nodes
[0];
1276 ei
= btrfs_item_ptr(leaf
, path
->slots
[0], struct btrfs_extent_item
);
1277 refs
= btrfs_extent_refs(leaf
, ei
);
1278 WARN_ON(refs_to_mod
< 0 && refs
+ refs_to_mod
<= 0);
1279 refs
+= refs_to_mod
;
1280 btrfs_set_extent_refs(leaf
, ei
, refs
);
1282 type
= btrfs_extent_inline_ref_type(leaf
, iref
);
1284 if (type
== BTRFS_EXTENT_DATA_REF_KEY
) {
1285 dref
= (struct btrfs_extent_data_ref
*)(&iref
->offset
);
1286 refs
= btrfs_extent_data_ref_count(leaf
, dref
);
1287 } else if (type
== BTRFS_SHARED_DATA_REF_KEY
) {
1288 sref
= (struct btrfs_shared_data_ref
*)(iref
+ 1);
1289 refs
= btrfs_shared_data_ref_count(leaf
, sref
);
1292 BUG_ON(refs_to_mod
!= -1);
1295 BUG_ON(refs_to_mod
< 0 && refs
< -refs_to_mod
);
1296 refs
+= refs_to_mod
;
1299 if (type
== BTRFS_EXTENT_DATA_REF_KEY
)
1300 btrfs_set_extent_data_ref_count(leaf
, dref
, refs
);
1302 btrfs_set_shared_data_ref_count(leaf
, sref
, refs
);
1304 size
= btrfs_extent_inline_ref_size(type
);
1305 item_size
= btrfs_item_size_nr(leaf
, path
->slots
[0]);
1306 ptr
= (unsigned long)iref
;
1307 end
= (unsigned long)ei
+ item_size
;
1308 if (ptr
+ size
< end
)
1309 memmove_extent_buffer(leaf
, ptr
, ptr
+ size
,
1312 ret
= btrfs_truncate_item(trans
, root
, path
, item_size
, 1);
1315 btrfs_mark_buffer_dirty(leaf
);
1319 static int insert_inline_extent_backref(struct btrfs_trans_handle
*trans
,
1320 struct btrfs_root
*root
,
1321 struct btrfs_path
*path
,
1322 u64 bytenr
, u64 num_bytes
, u64 parent
,
1323 u64 root_objectid
, u64 owner
,
1324 u64 offset
, int refs_to_add
)
1326 struct btrfs_extent_inline_ref
*iref
;
1329 ret
= lookup_inline_extent_backref(trans
, root
, path
, &iref
,
1330 bytenr
, num_bytes
, parent
,
1331 root_objectid
, owner
, offset
, 1);
1333 BUG_ON(owner
< BTRFS_FIRST_FREE_OBJECTID
);
1334 ret
= update_inline_extent_backref(trans
, root
, path
, iref
,
1336 } else if (ret
== -ENOENT
) {
1337 ret
= setup_inline_extent_backref(trans
, root
, path
, iref
,
1338 parent
, root_objectid
,
1339 owner
, offset
, refs_to_add
);
1344 static int insert_extent_backref(struct btrfs_trans_handle
*trans
,
1345 struct btrfs_root
*root
,
1346 struct btrfs_path
*path
,
1347 u64 bytenr
, u64 parent
, u64 root_objectid
,
1348 u64 owner
, u64 offset
, int refs_to_add
)
1352 if (owner
>= BTRFS_FIRST_FREE_OBJECTID
) {
1353 ret
= insert_extent_data_ref(trans
, root
, path
, bytenr
,
1354 parent
, root_objectid
,
1355 owner
, offset
, refs_to_add
);
1357 BUG_ON(refs_to_add
!= 1);
1358 ret
= insert_tree_block_ref(trans
, root
, path
, bytenr
,
1359 parent
, root_objectid
);
1364 static int remove_extent_backref(struct btrfs_trans_handle
*trans
,
1365 struct btrfs_root
*root
,
1366 struct btrfs_path
*path
,
1367 struct btrfs_extent_inline_ref
*iref
,
1368 int refs_to_drop
, int is_data
)
1372 BUG_ON(!is_data
&& refs_to_drop
!= 1);
1374 ret
= update_inline_extent_backref(trans
, root
, path
, iref
,
1376 } else if (is_data
) {
1377 ret
= remove_extent_data_ref(trans
, root
, path
, refs_to_drop
);
1379 ret
= btrfs_del_item(trans
, root
, path
);
1384 int btrfs_inc_extent_ref(struct btrfs_trans_handle
*trans
,
1385 struct btrfs_root
*root
,
1386 u64 bytenr
, u64 num_bytes
, u64 parent
,
1387 u64 root_objectid
, u64 owner
, u64 offset
)
1389 struct btrfs_path
*path
;
1390 struct extent_buffer
*leaf
;
1391 struct btrfs_extent_item
*item
;
1396 path
= btrfs_alloc_path();
1402 ret
= insert_inline_extent_backref(trans
, root
->fs_info
->extent_root
,
1403 path
, bytenr
, num_bytes
, parent
,
1404 root_objectid
, owner
, offset
, 1);
1408 if (ret
!= -EAGAIN
) {
1413 leaf
= path
->nodes
[0];
1414 item
= btrfs_item_ptr(leaf
, path
->slots
[0], struct btrfs_extent_item
);
1415 refs
= btrfs_extent_refs(leaf
, item
);
1416 btrfs_set_extent_refs(leaf
, item
, refs
+ 1);
1418 btrfs_mark_buffer_dirty(leaf
);
1419 btrfs_release_path(path
);
1423 /* now insert the actual backref */
1424 ret
= insert_extent_backref(trans
, root
->fs_info
->extent_root
,
1425 path
, bytenr
, parent
, root_objectid
,
1430 btrfs_free_path(path
);
1431 finish_current_insert(trans
, root
->fs_info
->extent_root
);
1432 del_pending_extents(trans
, root
->fs_info
->extent_root
);
1437 int btrfs_extent_post_op(struct btrfs_trans_handle
*trans
,
1438 struct btrfs_root
*root
)
1440 finish_current_insert(trans
, root
->fs_info
->extent_root
);
1441 del_pending_extents(trans
, root
->fs_info
->extent_root
);
1445 int btrfs_lookup_extent_info(struct btrfs_trans_handle
*trans
,
1446 struct btrfs_root
*root
, u64 bytenr
,
1447 u64 offset
, int metadata
, u64
*refs
, u64
*flags
)
1449 struct btrfs_path
*path
;
1451 struct btrfs_key key
;
1452 struct extent_buffer
*l
;
1453 struct btrfs_extent_item
*item
;
1459 !btrfs_fs_incompat(root
->fs_info
,
1460 BTRFS_FEATURE_INCOMPAT_SKINNY_METADATA
)) {
1461 offset
= root
->leafsize
;
1465 path
= btrfs_alloc_path();
1470 key
.objectid
= bytenr
;
1471 key
.offset
= offset
;
1473 key
.type
= BTRFS_METADATA_ITEM_KEY
;
1475 key
.type
= BTRFS_EXTENT_ITEM_KEY
;
1478 ret
= btrfs_search_slot(trans
, root
->fs_info
->extent_root
, &key
, path
,
1484 * Deal with the fact that we may have mixed SKINNY and normal refs. If
1485 * we didn't find what we wanted check and see if we have a normal ref
1486 * right next to us, or re-search if we are on the edge of the leaf just
1489 if (ret
> 0 && metadata
) {
1490 if (path
->slots
[0]) {
1492 btrfs_item_key_to_cpu(path
->nodes
[0], &key
,
1494 if (key
.objectid
== bytenr
&&
1495 key
.type
== BTRFS_EXTENT_ITEM_KEY
&&
1496 key
.offset
== root
->leafsize
)
1501 btrfs_release_path(path
);
1502 key
.type
= BTRFS_EXTENT_ITEM_KEY
;
1503 key
.offset
= root
->leafsize
;
1515 item_size
= btrfs_item_size_nr(l
, path
->slots
[0]);
1516 if (item_size
>= sizeof(*item
)) {
1517 item
= btrfs_item_ptr(l
, path
->slots
[0],
1518 struct btrfs_extent_item
);
1519 num_refs
= btrfs_extent_refs(l
, item
);
1520 extent_flags
= btrfs_extent_flags(l
, item
);
1522 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
1523 struct btrfs_extent_item_v0
*ei0
;
1524 BUG_ON(item_size
!= sizeof(*ei0
));
1525 ei0
= btrfs_item_ptr(l
, path
->slots
[0],
1526 struct btrfs_extent_item_v0
);
1527 num_refs
= btrfs_extent_refs_v0(l
, ei0
);
1528 /* FIXME: this isn't correct for data */
1529 extent_flags
= BTRFS_BLOCK_FLAG_FULL_BACKREF
;
1534 item
= btrfs_item_ptr(l
, path
->slots
[0], struct btrfs_extent_item
);
1538 *flags
= extent_flags
;
1540 btrfs_free_path(path
);
1544 int btrfs_set_block_flags(struct btrfs_trans_handle
*trans
,
1545 struct btrfs_root
*root
,
1546 u64 bytenr
, int level
, u64 flags
)
1548 struct btrfs_path
*path
;
1550 struct btrfs_key key
;
1551 struct extent_buffer
*l
;
1552 struct btrfs_extent_item
*item
;
1554 int skinny_metadata
=
1555 btrfs_fs_incompat(root
->fs_info
,
1556 BTRFS_FEATURE_INCOMPAT_SKINNY_METADATA
);
1558 path
= btrfs_alloc_path();
1563 key
.objectid
= bytenr
;
1564 if (skinny_metadata
) {
1566 key
.type
= BTRFS_METADATA_ITEM_KEY
;
1568 key
.offset
= root
->leafsize
;
1569 key
.type
= BTRFS_EXTENT_ITEM_KEY
;
1573 ret
= btrfs_search_slot(trans
, root
->fs_info
->extent_root
, &key
, path
,
1578 if (ret
> 0 && skinny_metadata
) {
1579 skinny_metadata
= 0;
1580 if (path
->slots
[0]) {
1582 btrfs_item_key_to_cpu(path
->nodes
[0], &key
,
1584 if (key
.objectid
== bytenr
&&
1585 key
.offset
== root
->leafsize
&&
1586 key
.type
== BTRFS_EXTENT_ITEM_KEY
)
1590 btrfs_release_path(path
);
1591 key
.offset
= root
->leafsize
;
1592 key
.type
= BTRFS_EXTENT_ITEM_KEY
;
1598 btrfs_print_leaf(root
, path
->nodes
[0]);
1599 printk("failed to find block number %Lu\n",
1600 (unsigned long long)bytenr
);
1604 item_size
= btrfs_item_size_nr(l
, path
->slots
[0]);
1605 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
1606 if (item_size
< sizeof(*item
)) {
1607 ret
= convert_extent_item_v0(trans
, root
->fs_info
->extent_root
,
1613 item_size
= btrfs_item_size_nr(l
, path
->slots
[0]);
1616 BUG_ON(item_size
< sizeof(*item
));
1617 item
= btrfs_item_ptr(l
, path
->slots
[0], struct btrfs_extent_item
);
1618 flags
|= btrfs_extent_flags(l
, item
);
1619 btrfs_set_extent_flags(l
, item
, flags
);
1621 btrfs_free_path(path
);
1622 finish_current_insert(trans
, root
->fs_info
->extent_root
);
1623 del_pending_extents(trans
, root
->fs_info
->extent_root
);
1627 static int __btrfs_mod_ref(struct btrfs_trans_handle
*trans
,
1628 struct btrfs_root
*root
,
1629 struct extent_buffer
*buf
,
1630 int record_parent
, int inc
)
1637 struct btrfs_key key
;
1638 struct btrfs_file_extent_item
*fi
;
1642 int (*process_func
)(struct btrfs_trans_handle
*trans
,
1643 struct btrfs_root
*root
,
1644 u64
, u64
, u64
, u64
, u64
, u64
);
1646 ref_root
= btrfs_header_owner(buf
);
1647 nritems
= btrfs_header_nritems(buf
);
1648 level
= btrfs_header_level(buf
);
1650 if (!root
->ref_cows
&& level
== 0)
1654 process_func
= btrfs_inc_extent_ref
;
1656 process_func
= btrfs_free_extent
;
1659 parent
= buf
->start
;
1663 for (i
= 0; i
< nritems
; i
++) {
1666 btrfs_item_key_to_cpu(buf
, &key
, i
);
1667 if (btrfs_key_type(&key
) != BTRFS_EXTENT_DATA_KEY
)
1669 fi
= btrfs_item_ptr(buf
, i
,
1670 struct btrfs_file_extent_item
);
1671 if (btrfs_file_extent_type(buf
, fi
) ==
1672 BTRFS_FILE_EXTENT_INLINE
)
1674 bytenr
= btrfs_file_extent_disk_bytenr(buf
, fi
);
1678 num_bytes
= btrfs_file_extent_disk_num_bytes(buf
, fi
);
1679 key
.offset
-= btrfs_file_extent_offset(buf
, fi
);
1680 ret
= process_func(trans
, root
, bytenr
, num_bytes
,
1681 parent
, ref_root
, key
.objectid
,
1688 bytenr
= btrfs_node_blockptr(buf
, i
);
1689 num_bytes
= btrfs_level_size(root
, level
- 1);
1690 ret
= process_func(trans
, root
, bytenr
, num_bytes
,
1691 parent
, ref_root
, level
- 1, 0);
1704 int btrfs_inc_ref(struct btrfs_trans_handle
*trans
, struct btrfs_root
*root
,
1705 struct extent_buffer
*buf
, int record_parent
)
1707 return __btrfs_mod_ref(trans
, root
, buf
, record_parent
, 1);
1710 int btrfs_dec_ref(struct btrfs_trans_handle
*trans
, struct btrfs_root
*root
,
1711 struct extent_buffer
*buf
, int record_parent
)
1713 return __btrfs_mod_ref(trans
, root
, buf
, record_parent
, 0);
1716 static int write_one_cache_group(struct btrfs_trans_handle
*trans
,
1717 struct btrfs_root
*root
,
1718 struct btrfs_path
*path
,
1719 struct btrfs_block_group_cache
*cache
)
1723 struct btrfs_root
*extent_root
= root
->fs_info
->extent_root
;
1725 struct extent_buffer
*leaf
;
1727 ret
= btrfs_search_slot(trans
, extent_root
, &cache
->key
, path
, 0, 1);
1732 leaf
= path
->nodes
[0];
1733 bi
= btrfs_item_ptr_offset(leaf
, path
->slots
[0]);
1734 write_extent_buffer(leaf
, &cache
->item
, bi
, sizeof(cache
->item
));
1735 btrfs_mark_buffer_dirty(leaf
);
1736 btrfs_release_path(path
);
1738 finish_current_insert(trans
, extent_root
);
1739 pending_ret
= del_pending_extents(trans
, extent_root
);
1748 int btrfs_write_dirty_block_groups(struct btrfs_trans_handle
*trans
,
1749 struct btrfs_root
*root
)
1751 struct extent_io_tree
*block_group_cache
;
1752 struct btrfs_block_group_cache
*cache
;
1754 struct btrfs_path
*path
;
1760 block_group_cache
= &root
->fs_info
->block_group_cache
;
1761 path
= btrfs_alloc_path();
1766 ret
= find_first_extent_bit(block_group_cache
, last
,
1767 &start
, &end
, BLOCK_GROUP_DIRTY
);
1776 ret
= get_state_private(block_group_cache
, start
, &ptr
);
1779 clear_extent_bits(block_group_cache
, start
, end
,
1780 BLOCK_GROUP_DIRTY
, GFP_NOFS
);
1782 cache
= (struct btrfs_block_group_cache
*)(unsigned long)ptr
;
1783 ret
= write_one_cache_group(trans
, root
, path
, cache
);
1785 btrfs_free_path(path
);
1789 static struct btrfs_space_info
*__find_space_info(struct btrfs_fs_info
*info
,
1792 struct btrfs_space_info
*found
;
1794 flags
&= BTRFS_BLOCK_GROUP_TYPE_MASK
;
1796 list_for_each_entry(found
, &info
->space_info
, list
) {
1797 if (found
->flags
& flags
)
1804 static int free_space_info(struct btrfs_fs_info
*fs_info
, u64 flags
,
1805 u64 total_bytes
, u64 bytes_used
,
1806 struct btrfs_space_info
**space_info
)
1808 struct btrfs_space_info
*found
;
1810 /* only support free block group which is empty */
1814 found
= __find_space_info(fs_info
, flags
);
1817 if (found
->total_bytes
< total_bytes
) {
1819 "WARNING: bad space info to free %llu only have %llu\n",
1820 total_bytes
, found
->total_bytes
);
1823 found
->total_bytes
-= total_bytes
;
1825 *space_info
= found
;
1829 static int update_space_info(struct btrfs_fs_info
*info
, u64 flags
,
1830 u64 total_bytes
, u64 bytes_used
,
1831 struct btrfs_space_info
**space_info
)
1833 struct btrfs_space_info
*found
;
1835 found
= __find_space_info(info
, flags
);
1837 found
->total_bytes
+= total_bytes
;
1838 found
->bytes_used
+= bytes_used
;
1839 if (found
->total_bytes
< found
->bytes_used
) {
1840 fprintf(stderr
, "warning, bad space info total_bytes "
1842 (unsigned long long)found
->total_bytes
,
1843 (unsigned long long)found
->bytes_used
);
1845 *space_info
= found
;
1848 found
= kmalloc(sizeof(*found
), GFP_NOFS
);
1852 list_add(&found
->list
, &info
->space_info
);
1853 found
->flags
= flags
& BTRFS_BLOCK_GROUP_TYPE_MASK
;
1854 found
->total_bytes
= total_bytes
;
1855 found
->bytes_used
= bytes_used
;
1856 found
->bytes_pinned
= 0;
1858 *space_info
= found
;
1863 static void set_avail_alloc_bits(struct btrfs_fs_info
*fs_info
, u64 flags
)
1865 u64 extra_flags
= flags
& (BTRFS_BLOCK_GROUP_RAID0
|
1866 BTRFS_BLOCK_GROUP_RAID1
|
1867 BTRFS_BLOCK_GROUP_RAID10
|
1868 BTRFS_BLOCK_GROUP_RAID5
|
1869 BTRFS_BLOCK_GROUP_RAID6
|
1870 BTRFS_BLOCK_GROUP_DUP
);
1872 if (flags
& BTRFS_BLOCK_GROUP_DATA
)
1873 fs_info
->avail_data_alloc_bits
|= extra_flags
;
1874 if (flags
& BTRFS_BLOCK_GROUP_METADATA
)
1875 fs_info
->avail_metadata_alloc_bits
|= extra_flags
;
1876 if (flags
& BTRFS_BLOCK_GROUP_SYSTEM
)
1877 fs_info
->avail_system_alloc_bits
|= extra_flags
;
1881 static int do_chunk_alloc(struct btrfs_trans_handle
*trans
,
1882 struct btrfs_root
*extent_root
, u64 alloc_bytes
,
1885 struct btrfs_space_info
*space_info
;
1891 space_info
= __find_space_info(extent_root
->fs_info
, flags
);
1893 ret
= update_space_info(extent_root
->fs_info
, flags
,
1897 BUG_ON(!space_info
);
1899 if (space_info
->full
)
1902 thresh
= div_factor(space_info
->total_bytes
, 7);
1903 if ((space_info
->bytes_used
+ space_info
->bytes_pinned
+ alloc_bytes
) <
1907 ret
= btrfs_alloc_chunk(trans
, extent_root
, &start
, &num_bytes
,
1909 if (ret
== -ENOSPC
) {
1910 space_info
->full
= 1;
1916 ret
= btrfs_make_block_group(trans
, extent_root
, 0, space_info
->flags
,
1917 BTRFS_FIRST_CHUNK_TREE_OBJECTID
, start
, num_bytes
);
1922 static int update_block_group(struct btrfs_trans_handle
*trans
,
1923 struct btrfs_root
*root
,
1924 u64 bytenr
, u64 num_bytes
, int alloc
,
1927 struct btrfs_block_group_cache
*cache
;
1928 struct btrfs_fs_info
*info
= root
->fs_info
;
1929 u64 total
= num_bytes
;
1935 /* block accounting for super block */
1936 old_val
= btrfs_super_bytes_used(info
->super_copy
);
1938 old_val
+= num_bytes
;
1940 old_val
-= num_bytes
;
1941 btrfs_set_super_bytes_used(info
->super_copy
, old_val
);
1943 /* block accounting for root item */
1944 old_val
= btrfs_root_used(&root
->root_item
);
1946 old_val
+= num_bytes
;
1948 old_val
-= num_bytes
;
1949 btrfs_set_root_used(&root
->root_item
, old_val
);
1952 cache
= btrfs_lookup_block_group(info
, bytenr
);
1956 byte_in_group
= bytenr
- cache
->key
.objectid
;
1957 WARN_ON(byte_in_group
> cache
->key
.offset
);
1958 start
= cache
->key
.objectid
;
1959 end
= start
+ cache
->key
.offset
- 1;
1960 set_extent_bits(&info
->block_group_cache
, start
, end
,
1961 BLOCK_GROUP_DIRTY
, GFP_NOFS
);
1963 old_val
= btrfs_block_group_used(&cache
->item
);
1964 num_bytes
= min(total
, cache
->key
.offset
- byte_in_group
);
1967 old_val
+= num_bytes
;
1968 cache
->space_info
->bytes_used
+= num_bytes
;
1970 old_val
-= num_bytes
;
1971 cache
->space_info
->bytes_used
-= num_bytes
;
1973 set_extent_dirty(&info
->free_space_cache
,
1974 bytenr
, bytenr
+ num_bytes
- 1,
1978 btrfs_set_block_group_used(&cache
->item
, old_val
);
1980 bytenr
+= num_bytes
;
1985 static int update_pinned_extents(struct btrfs_root
*root
,
1986 u64 bytenr
, u64 num
, int pin
)
1989 struct btrfs_block_group_cache
*cache
;
1990 struct btrfs_fs_info
*fs_info
= root
->fs_info
;
1993 set_extent_dirty(&fs_info
->pinned_extents
,
1994 bytenr
, bytenr
+ num
- 1, GFP_NOFS
);
1996 clear_extent_dirty(&fs_info
->pinned_extents
,
1997 bytenr
, bytenr
+ num
- 1, GFP_NOFS
);
2000 cache
= btrfs_lookup_block_group(fs_info
, bytenr
);
2002 len
= min((u64
)root
->sectorsize
, num
);
2006 len
= min(num
, cache
->key
.offset
-
2007 (bytenr
- cache
->key
.objectid
));
2009 cache
->pinned
+= len
;
2010 cache
->space_info
->bytes_pinned
+= len
;
2011 fs_info
->total_pinned
+= len
;
2013 cache
->pinned
-= len
;
2014 cache
->space_info
->bytes_pinned
-= len
;
2015 fs_info
->total_pinned
-= len
;
2024 int btrfs_finish_extent_commit(struct btrfs_trans_handle
*trans
,
2025 struct btrfs_root
*root
,
2026 struct extent_io_tree
*unpin
)
2031 struct extent_io_tree
*free_space_cache
;
2032 free_space_cache
= &root
->fs_info
->free_space_cache
;
2035 ret
= find_first_extent_bit(unpin
, 0, &start
, &end
,
2039 update_pinned_extents(root
, start
, end
+ 1 - start
, 0);
2040 clear_extent_dirty(unpin
, start
, end
, GFP_NOFS
);
2041 set_extent_dirty(free_space_cache
, start
, end
, GFP_NOFS
);
2046 static int extent_root_pending_ops(struct btrfs_fs_info
*info
)
2052 ret
= find_first_extent_bit(&info
->extent_ins
, 0, &start
,
2053 &end
, EXTENT_LOCKED
);
2055 ret
= find_first_extent_bit(&info
->pending_del
, 0, &start
, &end
,
2061 static int finish_current_insert(struct btrfs_trans_handle
*trans
,
2062 struct btrfs_root
*extent_root
)
2067 struct btrfs_fs_info
*info
= extent_root
->fs_info
;
2068 struct pending_extent_op
*extent_op
;
2069 struct btrfs_key key
;
2071 int skinny_metadata
=
2072 btrfs_fs_incompat(extent_root
->fs_info
,
2073 BTRFS_FEATURE_INCOMPAT_SKINNY_METADATA
);
2076 ret
= find_first_extent_bit(&info
->extent_ins
, 0, &start
,
2077 &end
, EXTENT_LOCKED
);
2081 ret
= get_state_private(&info
->extent_ins
, start
, &priv
);
2083 extent_op
= (struct pending_extent_op
*)(unsigned long)priv
;
2085 if (extent_op
->type
== PENDING_EXTENT_INSERT
) {
2086 key
.objectid
= start
;
2087 if (skinny_metadata
) {
2088 key
.offset
= extent_op
->level
;
2089 key
.type
= BTRFS_METADATA_ITEM_KEY
;
2091 key
.offset
= extent_op
->num_bytes
;
2092 key
.type
= BTRFS_EXTENT_ITEM_KEY
;
2094 ret
= alloc_reserved_tree_block(trans
, extent_root
,
2095 extent_root
->root_key
.objectid
,
2099 extent_op
->level
, &key
);
2105 clear_extent_bits(&info
->extent_ins
, start
, end
, EXTENT_LOCKED
,
2112 static int pin_down_bytes(struct btrfs_trans_handle
*trans
,
2113 struct btrfs_root
*root
,
2114 u64 bytenr
, u64 num_bytes
, int is_data
)
2117 struct extent_buffer
*buf
;
2122 buf
= btrfs_find_tree_block(root
, bytenr
, num_bytes
);
2126 /* we can reuse a block if it hasn't been written
2127 * and it is from this transaction. We can't
2128 * reuse anything from the tree log root because
2129 * it has tiny sub-transactions.
2131 if (btrfs_buffer_uptodate(buf
, 0)) {
2132 u64 header_owner
= btrfs_header_owner(buf
);
2133 u64 header_transid
= btrfs_header_generation(buf
);
2134 if (header_owner
!= BTRFS_TREE_LOG_OBJECTID
&&
2135 header_transid
== trans
->transid
&&
2136 !btrfs_header_flag(buf
, BTRFS_HEADER_FLAG_WRITTEN
)) {
2137 clean_tree_block(NULL
, root
, buf
);
2138 free_extent_buffer(buf
);
2142 free_extent_buffer(buf
);
2144 update_pinned_extents(root
, bytenr
, num_bytes
, 1);
2150 void btrfs_pin_extent(struct btrfs_fs_info
*fs_info
,
2151 u64 bytenr
, u64 num_bytes
)
2153 update_pinned_extents(fs_info
->extent_root
, bytenr
, num_bytes
, 1);
2156 void btrfs_unpin_extent(struct btrfs_fs_info
*fs_info
,
2157 u64 bytenr
, u64 num_bytes
)
2159 update_pinned_extents(fs_info
->extent_root
, bytenr
, num_bytes
, 0);
2163 * remove an extent from the root, returns 0 on success
2165 static int __free_extent(struct btrfs_trans_handle
*trans
,
2166 struct btrfs_root
*root
,
2167 u64 bytenr
, u64 num_bytes
, u64 parent
,
2168 u64 root_objectid
, u64 owner_objectid
,
2169 u64 owner_offset
, int refs_to_drop
)
2172 struct btrfs_key key
;
2173 struct btrfs_path
*path
;
2174 struct btrfs_extent_ops
*ops
= root
->fs_info
->extent_ops
;
2175 struct btrfs_root
*extent_root
= root
->fs_info
->extent_root
;
2176 struct extent_buffer
*leaf
;
2177 struct btrfs_extent_item
*ei
;
2178 struct btrfs_extent_inline_ref
*iref
;
2181 int extent_slot
= 0;
2182 int found_extent
= 0;
2186 int skinny_metadata
=
2187 btrfs_fs_incompat(extent_root
->fs_info
,
2188 BTRFS_FEATURE_INCOMPAT_SKINNY_METADATA
);
2190 if (root
->fs_info
->free_extent_hook
) {
2191 root
->fs_info
->free_extent_hook(trans
, root
, bytenr
, num_bytes
,
2192 parent
, root_objectid
, owner_objectid
,
2193 owner_offset
, refs_to_drop
);
2196 path
= btrfs_alloc_path();
2202 is_data
= owner_objectid
>= BTRFS_FIRST_FREE_OBJECTID
;
2204 skinny_metadata
= 0;
2205 BUG_ON(!is_data
&& refs_to_drop
!= 1);
2207 ret
= lookup_extent_backref(trans
, extent_root
, path
, &iref
,
2208 bytenr
, num_bytes
, parent
,
2209 root_objectid
, owner_objectid
,
2212 extent_slot
= path
->slots
[0];
2213 while (extent_slot
>= 0) {
2214 btrfs_item_key_to_cpu(path
->nodes
[0], &key
,
2216 if (key
.objectid
!= bytenr
)
2218 if (key
.type
== BTRFS_EXTENT_ITEM_KEY
&&
2219 key
.offset
== num_bytes
) {
2223 if (key
.type
== BTRFS_METADATA_ITEM_KEY
&&
2224 key
.offset
== owner_objectid
) {
2228 if (path
->slots
[0] - extent_slot
> 5)
2232 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
2233 item_size
= btrfs_item_size_nr(path
->nodes
[0], extent_slot
);
2234 if (found_extent
&& item_size
< sizeof(*ei
))
2237 if (!found_extent
) {
2239 ret
= remove_extent_backref(trans
, extent_root
, path
,
2243 btrfs_release_path(path
);
2245 key
.objectid
= bytenr
;
2247 if (skinny_metadata
) {
2248 key
.type
= BTRFS_METADATA_ITEM_KEY
;
2249 key
.offset
= owner_objectid
;
2251 key
.type
= BTRFS_EXTENT_ITEM_KEY
;
2252 key
.offset
= num_bytes
;
2255 ret
= btrfs_search_slot(trans
, extent_root
,
2257 if (ret
> 0 && skinny_metadata
&& path
->slots
[0]) {
2259 btrfs_item_key_to_cpu(path
->nodes
[0],
2262 if (key
.objectid
== bytenr
&&
2263 key
.type
== BTRFS_EXTENT_ITEM_KEY
&&
2264 key
.offset
== num_bytes
)
2268 if (ret
> 0 && skinny_metadata
) {
2269 skinny_metadata
= 0;
2270 btrfs_release_path(path
);
2271 key
.type
= BTRFS_EXTENT_ITEM_KEY
;
2272 key
.offset
= num_bytes
;
2273 ret
= btrfs_search_slot(trans
, extent_root
,
2278 printk(KERN_ERR
"umm, got %d back from search"
2279 ", was looking for %llu\n", ret
,
2280 (unsigned long long)bytenr
);
2281 btrfs_print_leaf(extent_root
, path
->nodes
[0]);
2284 extent_slot
= path
->slots
[0];
2287 printk(KERN_ERR
"btrfs unable to find ref byte nr %llu "
2288 "parent %llu root %llu owner %llu offset %llu\n",
2289 (unsigned long long)bytenr
,
2290 (unsigned long long)parent
,
2291 (unsigned long long)root_objectid
,
2292 (unsigned long long)owner_objectid
,
2293 (unsigned long long)owner_offset
);
2298 leaf
= path
->nodes
[0];
2299 item_size
= btrfs_item_size_nr(leaf
, extent_slot
);
2300 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
2301 if (item_size
< sizeof(*ei
)) {
2302 BUG_ON(found_extent
|| extent_slot
!= path
->slots
[0]);
2303 ret
= convert_extent_item_v0(trans
, extent_root
, path
,
2307 btrfs_release_path(path
);
2309 key
.objectid
= bytenr
;
2310 key
.type
= BTRFS_EXTENT_ITEM_KEY
;
2311 key
.offset
= num_bytes
;
2313 ret
= btrfs_search_slot(trans
, extent_root
, &key
, path
,
2316 printk(KERN_ERR
"umm, got %d back from search"
2317 ", was looking for %llu\n", ret
,
2318 (unsigned long long)bytenr
);
2319 btrfs_print_leaf(extent_root
, path
->nodes
[0]);
2322 extent_slot
= path
->slots
[0];
2323 leaf
= path
->nodes
[0];
2324 item_size
= btrfs_item_size_nr(leaf
, extent_slot
);
2327 BUG_ON(item_size
< sizeof(*ei
));
2328 ei
= btrfs_item_ptr(leaf
, extent_slot
,
2329 struct btrfs_extent_item
);
2330 if (owner_objectid
< BTRFS_FIRST_FREE_OBJECTID
&&
2331 key
.type
== BTRFS_EXTENT_ITEM_KEY
) {
2332 struct btrfs_tree_block_info
*bi
;
2333 BUG_ON(item_size
< sizeof(*ei
) + sizeof(*bi
));
2334 bi
= (struct btrfs_tree_block_info
*)(ei
+ 1);
2335 WARN_ON(owner_objectid
!= btrfs_tree_block_level(leaf
, bi
));
2338 refs
= btrfs_extent_refs(leaf
, ei
);
2339 BUG_ON(refs
< refs_to_drop
);
2340 refs
-= refs_to_drop
;
2344 * In the case of inline back ref, reference count will
2345 * be updated by remove_extent_backref
2348 BUG_ON(!found_extent
);
2350 btrfs_set_extent_refs(leaf
, ei
, refs
);
2351 btrfs_mark_buffer_dirty(leaf
);
2354 ret
= remove_extent_backref(trans
, extent_root
, path
,
2364 BUG_ON(is_data
&& refs_to_drop
!=
2365 extent_data_ref_count(root
, path
, iref
));
2367 BUG_ON(path
->slots
[0] != extent_slot
);
2369 BUG_ON(path
->slots
[0] != extent_slot
+ 1);
2370 path
->slots
[0] = extent_slot
;
2375 if (ops
&& ops
->free_extent
) {
2376 ret
= ops
->free_extent(root
, bytenr
, num_bytes
);
2384 ret
= pin_down_bytes(trans
, root
, bytenr
, num_bytes
,
2391 ret
= btrfs_del_items(trans
, extent_root
, path
, path
->slots
[0],
2394 btrfs_release_path(path
);
2397 ret
= btrfs_del_csums(trans
, root
, bytenr
, num_bytes
);
2401 update_block_group(trans
, root
, bytenr
, num_bytes
, 0, mark_free
);
2404 btrfs_free_path(path
);
2405 finish_current_insert(trans
, extent_root
);
2410 * find all the blocks marked as pending in the radix tree and remove
2411 * them from the extent map
2413 static int del_pending_extents(struct btrfs_trans_handle
*trans
, struct
2414 btrfs_root
*extent_root
)
2421 struct extent_io_tree
*pending_del
;
2422 struct extent_io_tree
*extent_ins
;
2423 struct pending_extent_op
*extent_op
;
2425 extent_ins
= &extent_root
->fs_info
->extent_ins
;
2426 pending_del
= &extent_root
->fs_info
->pending_del
;
2429 ret
= find_first_extent_bit(pending_del
, 0, &start
, &end
,
2434 ret
= get_state_private(pending_del
, start
, &priv
);
2436 extent_op
= (struct pending_extent_op
*)(unsigned long)priv
;
2438 clear_extent_bits(pending_del
, start
, end
, EXTENT_LOCKED
,
2441 if (!test_range_bit(extent_ins
, start
, end
,
2442 EXTENT_LOCKED
, 0)) {
2443 ret
= __free_extent(trans
, extent_root
,
2444 start
, end
+ 1 - start
, 0,
2445 extent_root
->root_key
.objectid
,
2446 extent_op
->level
, 0, 1);
2450 ret
= get_state_private(extent_ins
, start
, &priv
);
2452 extent_op
= (struct pending_extent_op
*)
2453 (unsigned long)priv
;
2455 clear_extent_bits(extent_ins
, start
, end
,
2456 EXTENT_LOCKED
, GFP_NOFS
);
2458 if (extent_op
->type
== PENDING_BACKREF_UPDATE
)
2470 * remove an extent from the root, returns 0 on success
2473 int btrfs_free_extent(struct btrfs_trans_handle
*trans
,
2474 struct btrfs_root
*root
,
2475 u64 bytenr
, u64 num_bytes
, u64 parent
,
2476 u64 root_objectid
, u64 owner
, u64 offset
)
2478 struct btrfs_root
*extent_root
= root
->fs_info
->extent_root
;
2482 WARN_ON(num_bytes
< root
->sectorsize
);
2483 if (root
== extent_root
) {
2484 struct pending_extent_op
*extent_op
;
2486 extent_op
= kmalloc(sizeof(*extent_op
), GFP_NOFS
);
2489 extent_op
->type
= PENDING_EXTENT_DELETE
;
2490 extent_op
->bytenr
= bytenr
;
2491 extent_op
->num_bytes
= num_bytes
;
2492 extent_op
->level
= (int)owner
;
2494 set_extent_bits(&root
->fs_info
->pending_del
,
2495 bytenr
, bytenr
+ num_bytes
- 1,
2496 EXTENT_LOCKED
, GFP_NOFS
);
2497 set_state_private(&root
->fs_info
->pending_del
,
2498 bytenr
, (unsigned long)extent_op
);
2501 ret
= __free_extent(trans
, root
, bytenr
, num_bytes
, parent
,
2502 root_objectid
, owner
, offset
, 1);
2503 pending_ret
= del_pending_extents(trans
, root
->fs_info
->extent_root
);
2504 return ret
? ret
: pending_ret
;
2507 static u64
stripe_align(struct btrfs_root
*root
, u64 val
)
2509 u64 mask
= ((u64
)root
->stripesize
- 1);
2510 u64 ret
= (val
+ mask
) & ~mask
;
2515 * walks the btree of allocated extents and find a hole of a given size.
2516 * The key ins is changed to record the hole:
2517 * ins->objectid == block start
2518 * ins->flags = BTRFS_EXTENT_ITEM_KEY
2519 * ins->offset == number of blocks
2520 * Any available blocks before search_start are skipped.
2522 static int noinline
find_free_extent(struct btrfs_trans_handle
*trans
,
2523 struct btrfs_root
*orig_root
,
2524 u64 num_bytes
, u64 empty_size
,
2525 u64 search_start
, u64 search_end
,
2526 u64 hint_byte
, struct btrfs_key
*ins
,
2527 u64 exclude_start
, u64 exclude_nr
,
2531 u64 orig_search_start
= search_start
;
2532 struct btrfs_root
* root
= orig_root
->fs_info
->extent_root
;
2533 struct btrfs_fs_info
*info
= root
->fs_info
;
2534 u64 total_needed
= num_bytes
;
2535 struct btrfs_block_group_cache
*block_group
;
2539 WARN_ON(num_bytes
< root
->sectorsize
);
2540 btrfs_set_key_type(ins
, BTRFS_EXTENT_ITEM_KEY
);
2542 search_start
= stripe_align(root
, search_start
);
2545 block_group
= btrfs_lookup_first_block_group(info
, hint_byte
);
2547 hint_byte
= search_start
;
2548 block_group
= btrfs_find_block_group(root
, block_group
,
2549 hint_byte
, data
, 1);
2551 block_group
= btrfs_find_block_group(root
,
2553 search_start
, data
, 1);
2556 total_needed
+= empty_size
;
2559 search_start
= stripe_align(root
, search_start
);
2561 block_group
= btrfs_lookup_first_block_group(info
,
2564 block_group
= btrfs_lookup_first_block_group(info
,
2567 ret
= find_search_start(root
, &block_group
, &search_start
,
2568 total_needed
, data
);
2572 ins
->objectid
= search_start
;
2573 ins
->offset
= num_bytes
;
2575 if (ins
->objectid
+ num_bytes
>
2576 block_group
->key
.objectid
+ block_group
->key
.offset
) {
2577 search_start
= block_group
->key
.objectid
+
2578 block_group
->key
.offset
;
2582 if (test_range_bit(&info
->extent_ins
, ins
->objectid
,
2583 ins
->objectid
+ num_bytes
-1, EXTENT_LOCKED
, 0)) {
2584 search_start
= ins
->objectid
+ num_bytes
;
2588 if (test_range_bit(&info
->pinned_extents
, ins
->objectid
,
2589 ins
->objectid
+ num_bytes
-1, EXTENT_DIRTY
, 0)) {
2590 search_start
= ins
->objectid
+ num_bytes
;
2594 if (info
->excluded_extents
&&
2595 test_range_bit(info
->excluded_extents
, ins
->objectid
,
2596 ins
->objectid
+ num_bytes
-1, EXTENT_DIRTY
, 0)) {
2597 search_start
= ins
->objectid
+ num_bytes
;
2601 if (exclude_nr
> 0 && (ins
->objectid
+ num_bytes
> exclude_start
&&
2602 ins
->objectid
< exclude_start
+ exclude_nr
)) {
2603 search_start
= exclude_start
+ exclude_nr
;
2607 if (!(data
& BTRFS_BLOCK_GROUP_DATA
)) {
2608 if (check_crossing_stripes(ins
->objectid
, num_bytes
)) {
2609 search_start
= round_down(ins
->objectid
+ num_bytes
,
2613 block_group
= btrfs_lookup_block_group(info
, ins
->objectid
);
2615 trans
->block_group
= block_group
;
2617 ins
->offset
= num_bytes
;
2621 block_group
= btrfs_lookup_first_block_group(info
, search_start
);
2623 search_start
= orig_search_start
;
2630 total_needed
-= empty_size
;
2636 block_group
= btrfs_find_block_group(root
, block_group
,
2637 search_start
, data
, 0);
2644 int btrfs_reserve_extent(struct btrfs_trans_handle
*trans
,
2645 struct btrfs_root
*root
,
2646 u64 num_bytes
, u64 empty_size
,
2647 u64 hint_byte
, u64 search_end
,
2648 struct btrfs_key
*ins
, int data
)
2651 u64 search_start
= 0;
2653 struct btrfs_fs_info
*info
= root
->fs_info
;
2655 if (info
->extent_ops
) {
2656 struct btrfs_extent_ops
*ops
= info
->extent_ops
;
2657 ret
= ops
->alloc_extent(root
, num_bytes
, hint_byte
, ins
, !data
);
2663 alloc_profile
= info
->avail_data_alloc_bits
&
2664 info
->data_alloc_profile
;
2665 data
= BTRFS_BLOCK_GROUP_DATA
| alloc_profile
;
2666 } else if ((info
->system_allocs
> 0 || root
== info
->chunk_root
) &&
2667 info
->system_allocs
>= 0) {
2668 alloc_profile
= info
->avail_system_alloc_bits
&
2669 info
->system_alloc_profile
;
2670 data
= BTRFS_BLOCK_GROUP_SYSTEM
| alloc_profile
;
2672 alloc_profile
= info
->avail_metadata_alloc_bits
&
2673 info
->metadata_alloc_profile
;
2674 data
= BTRFS_BLOCK_GROUP_METADATA
| alloc_profile
;
2677 if (root
->ref_cows
) {
2678 if (!(data
& BTRFS_BLOCK_GROUP_METADATA
)) {
2679 ret
= do_chunk_alloc(trans
, root
->fs_info
->extent_root
,
2681 BTRFS_BLOCK_GROUP_METADATA
);
2684 ret
= do_chunk_alloc(trans
, root
->fs_info
->extent_root
,
2685 num_bytes
+ 2 * 1024 * 1024, data
);
2689 WARN_ON(num_bytes
< root
->sectorsize
);
2690 ret
= find_free_extent(trans
, root
, num_bytes
, empty_size
,
2691 search_start
, search_end
, hint_byte
, ins
,
2692 trans
->alloc_exclude_start
,
2693 trans
->alloc_exclude_nr
, data
);
2696 clear_extent_dirty(&root
->fs_info
->free_space_cache
,
2697 ins
->objectid
, ins
->objectid
+ ins
->offset
- 1,
2702 static int alloc_reserved_tree_block(struct btrfs_trans_handle
*trans
,
2703 struct btrfs_root
*root
,
2704 u64 root_objectid
, u64 generation
,
2705 u64 flags
, struct btrfs_disk_key
*key
,
2706 int level
, struct btrfs_key
*ins
)
2709 struct btrfs_fs_info
*fs_info
= root
->fs_info
;
2710 struct btrfs_extent_item
*extent_item
;
2711 struct btrfs_tree_block_info
*block_info
;
2712 struct btrfs_extent_inline_ref
*iref
;
2713 struct btrfs_path
*path
;
2714 struct extent_buffer
*leaf
;
2715 u32 size
= sizeof(*extent_item
) + sizeof(*iref
);
2716 int skinny_metadata
=
2717 btrfs_fs_incompat(fs_info
,
2718 BTRFS_FEATURE_INCOMPAT_SKINNY_METADATA
);
2720 if (!skinny_metadata
)
2721 size
+= sizeof(*block_info
);
2723 path
= btrfs_alloc_path();
2726 ret
= btrfs_insert_empty_item(trans
, fs_info
->extent_root
, path
,
2730 leaf
= path
->nodes
[0];
2731 extent_item
= btrfs_item_ptr(leaf
, path
->slots
[0],
2732 struct btrfs_extent_item
);
2733 btrfs_set_extent_refs(leaf
, extent_item
, 1);
2734 btrfs_set_extent_generation(leaf
, extent_item
, generation
);
2735 btrfs_set_extent_flags(leaf
, extent_item
,
2736 flags
| BTRFS_EXTENT_FLAG_TREE_BLOCK
);
2738 if (skinny_metadata
) {
2739 iref
= (struct btrfs_extent_inline_ref
*)(extent_item
+ 1);
2741 block_info
= (struct btrfs_tree_block_info
*)(extent_item
+ 1);
2742 btrfs_set_tree_block_key(leaf
, block_info
, key
);
2743 btrfs_set_tree_block_level(leaf
, block_info
, level
);
2744 iref
= (struct btrfs_extent_inline_ref
*)(block_info
+ 1);
2747 btrfs_set_extent_inline_ref_type(leaf
, iref
, BTRFS_TREE_BLOCK_REF_KEY
);
2748 btrfs_set_extent_inline_ref_offset(leaf
, iref
, root_objectid
);
2750 btrfs_mark_buffer_dirty(leaf
);
2751 btrfs_free_path(path
);
2753 ret
= update_block_group(trans
, root
, ins
->objectid
, root
->leafsize
,
2758 static int alloc_tree_block(struct btrfs_trans_handle
*trans
,
2759 struct btrfs_root
*root
, u64 num_bytes
,
2760 u64 root_objectid
, u64 generation
,
2761 u64 flags
, struct btrfs_disk_key
*key
,
2762 int level
, u64 empty_size
, u64 hint_byte
,
2763 u64 search_end
, struct btrfs_key
*ins
)
2766 ret
= btrfs_reserve_extent(trans
, root
, num_bytes
, empty_size
,
2767 hint_byte
, search_end
, ins
, 0);
2770 if (root_objectid
== BTRFS_EXTENT_TREE_OBJECTID
) {
2771 struct pending_extent_op
*extent_op
;
2773 extent_op
= kmalloc(sizeof(*extent_op
), GFP_NOFS
);
2776 extent_op
->type
= PENDING_EXTENT_INSERT
;
2777 extent_op
->bytenr
= ins
->objectid
;
2778 extent_op
->num_bytes
= ins
->offset
;
2779 extent_op
->level
= level
;
2780 extent_op
->flags
= flags
;
2781 memcpy(&extent_op
->key
, key
, sizeof(*key
));
2783 set_extent_bits(&root
->fs_info
->extent_ins
, ins
->objectid
,
2784 ins
->objectid
+ ins
->offset
- 1,
2785 EXTENT_LOCKED
, GFP_NOFS
);
2786 set_state_private(&root
->fs_info
->extent_ins
,
2787 ins
->objectid
, (unsigned long)extent_op
);
2789 if (btrfs_fs_incompat(root
->fs_info
,
2790 BTRFS_FEATURE_INCOMPAT_SKINNY_METADATA
)) {
2791 ins
->offset
= level
;
2792 ins
->type
= BTRFS_METADATA_ITEM_KEY
;
2794 ret
= alloc_reserved_tree_block(trans
, root
, root_objectid
,
2797 finish_current_insert(trans
, root
->fs_info
->extent_root
);
2798 del_pending_extents(trans
, root
->fs_info
->extent_root
);
2804 * helper function to allocate a block for a given tree
2805 * returns the tree buffer or NULL.
2807 struct extent_buffer
*btrfs_alloc_free_block(struct btrfs_trans_handle
*trans
,
2808 struct btrfs_root
*root
,
2809 u32 blocksize
, u64 root_objectid
,
2810 struct btrfs_disk_key
*key
, int level
,
2811 u64 hint
, u64 empty_size
)
2813 struct btrfs_key ins
;
2815 struct extent_buffer
*buf
;
2817 ret
= alloc_tree_block(trans
, root
, blocksize
, root_objectid
,
2818 trans
->transid
, 0, key
, level
,
2819 empty_size
, hint
, (u64
)-1, &ins
);
2822 return ERR_PTR(ret
);
2825 buf
= btrfs_find_create_tree_block(root
, ins
.objectid
, blocksize
);
2827 btrfs_free_extent(trans
, root
, ins
.objectid
, ins
.offset
,
2828 0, root
->root_key
.objectid
, level
, 0);
2830 return ERR_PTR(-ENOMEM
);
2832 btrfs_set_buffer_uptodate(buf
);
2833 trans
->blocks_used
++;
2840 static int noinline
drop_leaf_ref(struct btrfs_trans_handle
*trans
,
2841 struct btrfs_root
*root
,
2842 struct extent_buffer
*leaf
)
2845 u64 leaf_generation
;
2846 struct btrfs_key key
;
2847 struct btrfs_file_extent_item
*fi
;
2852 BUG_ON(!btrfs_is_leaf(leaf
));
2853 nritems
= btrfs_header_nritems(leaf
);
2854 leaf_owner
= btrfs_header_owner(leaf
);
2855 leaf_generation
= btrfs_header_generation(leaf
);
2857 for (i
= 0; i
< nritems
; i
++) {
2860 btrfs_item_key_to_cpu(leaf
, &key
, i
);
2861 if (btrfs_key_type(&key
) != BTRFS_EXTENT_DATA_KEY
)
2863 fi
= btrfs_item_ptr(leaf
, i
, struct btrfs_file_extent_item
);
2864 if (btrfs_file_extent_type(leaf
, fi
) ==
2865 BTRFS_FILE_EXTENT_INLINE
)
2868 * FIXME make sure to insert a trans record that
2869 * repeats the snapshot del on crash
2871 disk_bytenr
= btrfs_file_extent_disk_bytenr(leaf
, fi
);
2872 if (disk_bytenr
== 0)
2874 ret
= btrfs_free_extent(trans
, root
, disk_bytenr
,
2875 btrfs_file_extent_disk_num_bytes(leaf
, fi
),
2876 leaf
->start
, leaf_owner
, leaf_generation
,
2883 static void noinline
reada_walk_down(struct btrfs_root
*root
,
2884 struct extent_buffer
*node
,
2897 nritems
= btrfs_header_nritems(node
);
2898 level
= btrfs_header_level(node
);
2902 for (i
= slot
; i
< nritems
&& skipped
< 32; i
++) {
2903 bytenr
= btrfs_node_blockptr(node
, i
);
2904 if (last
&& ((bytenr
> last
&& bytenr
- last
> 32 * 1024) ||
2905 (last
> bytenr
&& last
- bytenr
> 32 * 1024))) {
2909 blocksize
= btrfs_level_size(root
, level
- 1);
2911 ret
= btrfs_lookup_extent_ref(NULL
, root
, bytenr
,
2919 mutex_unlock(&root
->fs_info
->fs_mutex
);
2920 ret
= readahead_tree_block(root
, bytenr
, blocksize
,
2921 btrfs_node_ptr_generation(node
, i
));
2922 last
= bytenr
+ blocksize
;
2924 mutex_lock(&root
->fs_info
->fs_mutex
);
2931 * helper function for drop_snapshot, this walks down the tree dropping ref
2932 * counts as it goes.
2934 static int noinline
walk_down_tree(struct btrfs_trans_handle
*trans
,
2935 struct btrfs_root
*root
,
2936 struct btrfs_path
*path
, int *level
)
2942 struct extent_buffer
*next
;
2943 struct extent_buffer
*cur
;
2944 struct extent_buffer
*parent
;
2949 WARN_ON(*level
< 0);
2950 WARN_ON(*level
>= BTRFS_MAX_LEVEL
);
2951 ret
= btrfs_lookup_extent_ref(trans
, root
,
2952 path
->nodes
[*level
]->start
,
2953 path
->nodes
[*level
]->len
, &refs
);
2959 * walk down to the last node level and free all the leaves
2961 while(*level
>= 0) {
2962 WARN_ON(*level
< 0);
2963 WARN_ON(*level
>= BTRFS_MAX_LEVEL
);
2964 cur
= path
->nodes
[*level
];
2966 if (btrfs_header_level(cur
) != *level
)
2969 if (path
->slots
[*level
] >=
2970 btrfs_header_nritems(cur
))
2973 ret
= drop_leaf_ref(trans
, root
, cur
);
2977 bytenr
= btrfs_node_blockptr(cur
, path
->slots
[*level
]);
2978 ptr_gen
= btrfs_node_ptr_generation(cur
, path
->slots
[*level
]);
2979 blocksize
= btrfs_level_size(root
, *level
- 1);
2980 ret
= btrfs_lookup_extent_ref(trans
, root
, bytenr
, blocksize
,
2984 parent
= path
->nodes
[*level
];
2985 root_owner
= btrfs_header_owner(parent
);
2986 root_gen
= btrfs_header_generation(parent
);
2987 path
->slots
[*level
]++;
2988 ret
= btrfs_free_extent(trans
, root
, bytenr
, blocksize
,
2989 parent
->start
, root_owner
,
2990 root_gen
, *level
- 1, 1);
2994 next
= btrfs_find_tree_block(root
, bytenr
, blocksize
);
2995 if (!next
|| !btrfs_buffer_uptodate(next
, ptr_gen
)) {
2996 free_extent_buffer(next
);
2997 reada_walk_down(root
, cur
, path
->slots
[*level
]);
2998 mutex_unlock(&root
->fs_info
->fs_mutex
);
2999 next
= read_tree_block(root
, bytenr
, blocksize
,
3001 mutex_lock(&root
->fs_info
->fs_mutex
);
3002 if (!extent_buffer_uptodate(next
)) {
3004 ret
= PTR_ERR(next
);
3010 WARN_ON(*level
<= 0);
3011 if (path
->nodes
[*level
-1])
3012 free_extent_buffer(path
->nodes
[*level
-1]);
3013 path
->nodes
[*level
-1] = next
;
3014 *level
= btrfs_header_level(next
);
3015 path
->slots
[*level
] = 0;
3018 WARN_ON(*level
< 0);
3019 WARN_ON(*level
>= BTRFS_MAX_LEVEL
);
3021 if (path
->nodes
[*level
] == root
->node
) {
3022 root_owner
= root
->root_key
.objectid
;
3023 parent
= path
->nodes
[*level
];
3025 parent
= path
->nodes
[*level
+ 1];
3026 root_owner
= btrfs_header_owner(parent
);
3029 root_gen
= btrfs_header_generation(parent
);
3030 ret
= btrfs_free_extent(trans
, root
, path
->nodes
[*level
]->start
,
3031 path
->nodes
[*level
]->len
, parent
->start
,
3032 root_owner
, root_gen
, *level
, 1);
3033 free_extent_buffer(path
->nodes
[*level
]);
3034 path
->nodes
[*level
] = NULL
;
3041 * helper for dropping snapshots. This walks back up the tree in the path
3042 * to find the first node higher up where we haven't yet gone through
3045 static int noinline
walk_up_tree(struct btrfs_trans_handle
*trans
,
3046 struct btrfs_root
*root
,
3047 struct btrfs_path
*path
, int *level
)
3051 struct btrfs_root_item
*root_item
= &root
->root_item
;
3056 for(i
= *level
; i
< BTRFS_MAX_LEVEL
- 1 && path
->nodes
[i
]; i
++) {
3057 slot
= path
->slots
[i
];
3058 if (slot
< btrfs_header_nritems(path
->nodes
[i
]) - 1) {
3059 struct extent_buffer
*node
;
3060 struct btrfs_disk_key disk_key
;
3061 node
= path
->nodes
[i
];
3064 WARN_ON(*level
== 0);
3065 btrfs_node_key(node
, &disk_key
, path
->slots
[i
]);
3066 memcpy(&root_item
->drop_progress
,
3067 &disk_key
, sizeof(disk_key
));
3068 root_item
->drop_level
= i
;
3071 struct extent_buffer
*parent
;
3072 if (path
->nodes
[*level
] == root
->node
)
3073 parent
= path
->nodes
[*level
];
3075 parent
= path
->nodes
[*level
+ 1];
3077 root_owner
= btrfs_header_owner(parent
);
3078 root_gen
= btrfs_header_generation(parent
);
3079 ret
= btrfs_free_extent(trans
, root
,
3080 path
->nodes
[*level
]->start
,
3081 path
->nodes
[*level
]->len
,
3082 parent
->start
, root_owner
,
3083 root_gen
, *level
, 1);
3085 free_extent_buffer(path
->nodes
[*level
]);
3086 path
->nodes
[*level
] = NULL
;
3095 int btrfs_free_block_groups(struct btrfs_fs_info
*info
)
3097 struct btrfs_space_info
*sinfo
;
3098 struct btrfs_block_group_cache
*cache
;
3105 ret
= find_first_extent_bit(&info
->block_group_cache
, 0,
3106 &start
, &end
, (unsigned int)-1);
3109 ret
= get_state_private(&info
->block_group_cache
, start
, &ptr
);
3111 cache
= u64_to_ptr(ptr
);
3112 if (cache
->free_space_ctl
) {
3113 btrfs_remove_free_space_cache(cache
);
3114 kfree(cache
->free_space_ctl
);
3118 clear_extent_bits(&info
->block_group_cache
, start
,
3119 end
, (unsigned int)-1, GFP_NOFS
);
3122 ret
= find_first_extent_bit(&info
->free_space_cache
, 0,
3123 &start
, &end
, EXTENT_DIRTY
);
3126 clear_extent_dirty(&info
->free_space_cache
, start
,
3130 while (!list_empty(&info
->space_info
)) {
3131 sinfo
= list_entry(info
->space_info
.next
,
3132 struct btrfs_space_info
, list
);
3133 list_del_init(&sinfo
->list
);
3139 static int find_first_block_group(struct btrfs_root
*root
,
3140 struct btrfs_path
*path
, struct btrfs_key
*key
)
3143 struct btrfs_key found_key
;
3144 struct extent_buffer
*leaf
;
3147 ret
= btrfs_search_slot(NULL
, root
, key
, path
, 0, 0);
3151 slot
= path
->slots
[0];
3152 leaf
= path
->nodes
[0];
3153 if (slot
>= btrfs_header_nritems(leaf
)) {
3154 ret
= btrfs_next_leaf(root
, path
);
3161 btrfs_item_key_to_cpu(leaf
, &found_key
, slot
);
3163 if (found_key
.objectid
>= key
->objectid
&&
3164 found_key
.type
== BTRFS_BLOCK_GROUP_ITEM_KEY
)
3173 static void account_super_bytes(struct btrfs_fs_info
*fs_info
,
3174 struct btrfs_block_group_cache
*cache
)
3181 if (cache
->key
.objectid
< BTRFS_SUPER_INFO_OFFSET
) {
3182 stripe_len
= BTRFS_SUPER_INFO_OFFSET
- cache
->key
.objectid
;
3183 cache
->bytes_super
+= stripe_len
;
3186 for (i
= 0; i
< BTRFS_SUPER_MIRROR_MAX
; i
++) {
3187 bytenr
= btrfs_sb_offset(i
);
3188 ret
= btrfs_rmap_block(&fs_info
->mapping_tree
,
3189 cache
->key
.objectid
, bytenr
,
3190 0, &logical
, &nr
, &stripe_len
);
3197 if (logical
[nr
] > cache
->key
.objectid
+
3201 if (logical
[nr
] + stripe_len
<= cache
->key
.objectid
)
3204 start
= logical
[nr
];
3205 if (start
< cache
->key
.objectid
) {
3206 start
= cache
->key
.objectid
;
3207 len
= (logical
[nr
] + stripe_len
) - start
;
3209 len
= min_t(u64
, stripe_len
,
3210 cache
->key
.objectid
+
3211 cache
->key
.offset
- start
);
3214 cache
->bytes_super
+= len
;
3221 int btrfs_read_block_groups(struct btrfs_root
*root
)
3223 struct btrfs_path
*path
;
3226 struct btrfs_block_group_cache
*cache
;
3227 struct btrfs_fs_info
*info
= root
->fs_info
;
3228 struct btrfs_space_info
*space_info
;
3229 struct extent_io_tree
*block_group_cache
;
3230 struct btrfs_key key
;
3231 struct btrfs_key found_key
;
3232 struct extent_buffer
*leaf
;
3234 block_group_cache
= &info
->block_group_cache
;
3236 root
= info
->extent_root
;
3239 btrfs_set_key_type(&key
, BTRFS_BLOCK_GROUP_ITEM_KEY
);
3240 path
= btrfs_alloc_path();
3245 ret
= find_first_block_group(root
, path
, &key
);
3253 leaf
= path
->nodes
[0];
3254 btrfs_item_key_to_cpu(leaf
, &found_key
, path
->slots
[0]);
3255 cache
= kzalloc(sizeof(*cache
), GFP_NOFS
);
3261 read_extent_buffer(leaf
, &cache
->item
,
3262 btrfs_item_ptr_offset(leaf
, path
->slots
[0]),
3263 sizeof(cache
->item
));
3264 memcpy(&cache
->key
, &found_key
, sizeof(found_key
));
3267 key
.objectid
= found_key
.objectid
+ found_key
.offset
;
3268 btrfs_release_path(path
);
3269 cache
->flags
= btrfs_block_group_flags(&cache
->item
);
3271 if (cache
->flags
& BTRFS_BLOCK_GROUP_DATA
) {
3272 bit
= BLOCK_GROUP_DATA
;
3273 } else if (cache
->flags
& BTRFS_BLOCK_GROUP_SYSTEM
) {
3274 bit
= BLOCK_GROUP_SYSTEM
;
3275 } else if (cache
->flags
& BTRFS_BLOCK_GROUP_METADATA
) {
3276 bit
= BLOCK_GROUP_METADATA
;
3278 set_avail_alloc_bits(info
, cache
->flags
);
3279 if (btrfs_chunk_readonly(root
, cache
->key
.objectid
))
3282 account_super_bytes(info
, cache
);
3284 ret
= update_space_info(info
, cache
->flags
, found_key
.offset
,
3285 btrfs_block_group_used(&cache
->item
),
3288 cache
->space_info
= space_info
;
3290 /* use EXTENT_LOCKED to prevent merging */
3291 set_extent_bits(block_group_cache
, found_key
.objectid
,
3292 found_key
.objectid
+ found_key
.offset
- 1,
3293 bit
| EXTENT_LOCKED
, GFP_NOFS
);
3294 set_state_private(block_group_cache
, found_key
.objectid
,
3295 (unsigned long)cache
);
3299 btrfs_free_path(path
);
3303 struct btrfs_block_group_cache
*
3304 btrfs_add_block_group(struct btrfs_fs_info
*fs_info
, u64 bytes_used
, u64 type
,
3305 u64 chunk_objectid
, u64 chunk_offset
, u64 size
)
3309 struct btrfs_block_group_cache
*cache
;
3310 struct extent_io_tree
*block_group_cache
;
3312 block_group_cache
= &fs_info
->block_group_cache
;
3314 cache
= kzalloc(sizeof(*cache
), GFP_NOFS
);
3316 cache
->key
.objectid
= chunk_offset
;
3317 cache
->key
.offset
= size
;
3319 btrfs_set_key_type(&cache
->key
, BTRFS_BLOCK_GROUP_ITEM_KEY
);
3320 btrfs_set_block_group_used(&cache
->item
, bytes_used
);
3321 btrfs_set_block_group_chunk_objectid(&cache
->item
, chunk_objectid
);
3322 cache
->flags
= type
;
3323 btrfs_set_block_group_flags(&cache
->item
, type
);
3325 account_super_bytes(fs_info
, cache
);
3326 ret
= update_space_info(fs_info
, cache
->flags
, size
, bytes_used
,
3327 &cache
->space_info
);
3330 bit
= block_group_state_bits(type
);
3331 ret
= set_extent_bits(block_group_cache
, chunk_offset
,
3332 chunk_offset
+ size
- 1,
3333 bit
| EXTENT_LOCKED
, GFP_NOFS
);
3336 ret
= set_state_private(block_group_cache
, chunk_offset
,
3337 (unsigned long)cache
);
3339 set_avail_alloc_bits(fs_info
, type
);
3344 int btrfs_make_block_group(struct btrfs_trans_handle
*trans
,
3345 struct btrfs_root
*root
, u64 bytes_used
,
3346 u64 type
, u64 chunk_objectid
, u64 chunk_offset
,
3350 struct btrfs_root
*extent_root
;
3351 struct btrfs_block_group_cache
*cache
;
3353 cache
= btrfs_add_block_group(root
->fs_info
, bytes_used
, type
,
3354 chunk_objectid
, chunk_offset
, size
);
3355 extent_root
= root
->fs_info
->extent_root
;
3356 ret
= btrfs_insert_item(trans
, extent_root
, &cache
->key
, &cache
->item
,
3357 sizeof(cache
->item
));
3360 ret
= finish_current_insert(trans
, extent_root
);
3362 ret
= del_pending_extents(trans
, extent_root
);
3369 * This is for converter use only.
3371 * In that case, we don't know where are free blocks located.
3372 * Therefore all block group cache entries must be setup properly
3373 * before doing any block allocation.
3375 int btrfs_make_block_groups(struct btrfs_trans_handle
*trans
,
3376 struct btrfs_root
*root
)
3384 u64 total_metadata
= 0;
3388 struct btrfs_root
*extent_root
;
3389 struct btrfs_block_group_cache
*cache
;
3390 struct extent_io_tree
*block_group_cache
;
3392 extent_root
= root
->fs_info
->extent_root
;
3393 block_group_cache
= &root
->fs_info
->block_group_cache
;
3394 chunk_objectid
= BTRFS_FIRST_CHUNK_TREE_OBJECTID
;
3395 total_bytes
= btrfs_super_total_bytes(root
->fs_info
->super_copy
);
3396 group_align
= 64 * root
->sectorsize
;
3399 while (cur_start
< total_bytes
) {
3400 group_size
= total_bytes
/ 12;
3401 group_size
= min_t(u64
, group_size
, total_bytes
- cur_start
);
3402 if (cur_start
== 0) {
3403 bit
= BLOCK_GROUP_SYSTEM
;
3404 group_type
= BTRFS_BLOCK_GROUP_SYSTEM
;
3406 group_size
&= ~(group_align
- 1);
3407 group_size
= max_t(u64
, group_size
, 8 * 1024 * 1024);
3408 group_size
= min_t(u64
, group_size
, 32 * 1024 * 1024);
3410 group_size
&= ~(group_align
- 1);
3411 if (total_data
>= total_metadata
* 2) {
3412 group_type
= BTRFS_BLOCK_GROUP_METADATA
;
3413 group_size
= min_t(u64
, group_size
,
3414 1ULL * 1024 * 1024 * 1024);
3415 total_metadata
+= group_size
;
3417 group_type
= BTRFS_BLOCK_GROUP_DATA
;
3418 group_size
= min_t(u64
, group_size
,
3419 5ULL * 1024 * 1024 * 1024);
3420 total_data
+= group_size
;
3422 if ((total_bytes
- cur_start
) * 4 < group_size
* 5)
3423 group_size
= total_bytes
- cur_start
;
3426 cache
= kzalloc(sizeof(*cache
), GFP_NOFS
);
3429 cache
->key
.objectid
= cur_start
;
3430 cache
->key
.offset
= group_size
;
3431 btrfs_set_key_type(&cache
->key
, BTRFS_BLOCK_GROUP_ITEM_KEY
);
3433 btrfs_set_block_group_used(&cache
->item
, 0);
3434 btrfs_set_block_group_chunk_objectid(&cache
->item
,
3436 btrfs_set_block_group_flags(&cache
->item
, group_type
);
3438 cache
->flags
= group_type
;
3440 ret
= update_space_info(root
->fs_info
, group_type
, group_size
,
3441 0, &cache
->space_info
);
3443 set_avail_alloc_bits(extent_root
->fs_info
, group_type
);
3445 set_extent_bits(block_group_cache
, cur_start
,
3446 cur_start
+ group_size
- 1,
3447 bit
| EXTENT_LOCKED
, GFP_NOFS
);
3448 set_state_private(block_group_cache
, cur_start
,
3449 (unsigned long)cache
);
3450 cur_start
+= group_size
;
3452 /* then insert all the items */
3454 while(cur_start
< total_bytes
) {
3455 cache
= btrfs_lookup_block_group(root
->fs_info
, cur_start
);
3458 ret
= btrfs_insert_item(trans
, extent_root
, &cache
->key
, &cache
->item
,
3459 sizeof(cache
->item
));
3462 finish_current_insert(trans
, extent_root
);
3463 ret
= del_pending_extents(trans
, extent_root
);
3466 cur_start
= cache
->key
.objectid
+ cache
->key
.offset
;
3471 int btrfs_update_block_group(struct btrfs_trans_handle
*trans
,
3472 struct btrfs_root
*root
,
3473 u64 bytenr
, u64 num_bytes
, int alloc
,
3476 return update_block_group(trans
, root
, bytenr
, num_bytes
,
3481 * Just remove a block group item in extent tree
3482 * Caller should ensure the block group is empty and all space is pinned.
3483 * Or new tree block/data may be allocated into it.
3485 static int free_block_group_item(struct btrfs_trans_handle
*trans
,
3486 struct btrfs_fs_info
*fs_info
,
3487 u64 bytenr
, u64 len
)
3489 struct btrfs_path
*path
;
3490 struct btrfs_key key
;
3491 struct btrfs_root
*root
= fs_info
->extent_root
;
3494 key
.objectid
= bytenr
;
3496 key
.type
= BTRFS_BLOCK_GROUP_ITEM_KEY
;
3498 path
= btrfs_alloc_path();
3502 ret
= btrfs_search_slot(trans
, root
, &key
, path
, -1, 1);
3510 ret
= btrfs_del_item(trans
, root
, path
);
3512 btrfs_free_path(path
);
3516 static int free_dev_extent_item(struct btrfs_trans_handle
*trans
,
3517 struct btrfs_fs_info
*fs_info
,
3518 u64 devid
, u64 dev_offset
)
3520 struct btrfs_root
*root
= fs_info
->dev_root
;
3521 struct btrfs_path
*path
;
3522 struct btrfs_key key
;
3525 path
= btrfs_alloc_path();
3529 key
.objectid
= devid
;
3530 key
.type
= BTRFS_DEV_EXTENT_KEY
;
3531 key
.offset
= dev_offset
;
3533 ret
= btrfs_search_slot(trans
, root
, &key
, path
, -1, 1);
3541 ret
= btrfs_del_item(trans
, root
, path
);
3543 btrfs_free_path(path
);
3547 static int free_chunk_dev_extent_items(struct btrfs_trans_handle
*trans
,
3548 struct btrfs_fs_info
*fs_info
,
3551 struct btrfs_chunk
*chunk
= NULL
;
3552 struct btrfs_root
*root
= fs_info
->chunk_root
;
3553 struct btrfs_path
*path
;
3554 struct btrfs_key key
;
3559 path
= btrfs_alloc_path();
3563 key
.objectid
= BTRFS_FIRST_CHUNK_TREE_OBJECTID
;
3564 key
.type
= BTRFS_CHUNK_ITEM_KEY
;
3565 key
.offset
= chunk_offset
;
3567 ret
= btrfs_search_slot(trans
, root
, &key
, path
, 0, 0);
3574 chunk
= btrfs_item_ptr(path
->nodes
[0], path
->slots
[0],
3575 struct btrfs_chunk
);
3576 num_stripes
= btrfs_chunk_num_stripes(path
->nodes
[0], chunk
);
3577 for (i
= 0; i
< num_stripes
; i
++) {
3578 ret
= free_dev_extent_item(trans
, fs_info
,
3579 btrfs_stripe_devid_nr(path
->nodes
[0], chunk
, i
),
3580 btrfs_stripe_offset_nr(path
->nodes
[0], chunk
, i
));
3585 btrfs_free_path(path
);
3589 static int free_system_chunk_item(struct btrfs_super_block
*super
,
3590 struct btrfs_key
*key
)
3592 struct btrfs_disk_key
*disk_key
;
3593 struct btrfs_key cpu_key
;
3594 u32 array_size
= btrfs_super_sys_array_size(super
);
3595 char *ptr
= (char *)super
->sys_chunk_array
;
3599 while (cur
< btrfs_super_sys_array_size(super
)) {
3600 struct btrfs_chunk
*chunk
;
3604 disk_key
= (struct btrfs_disk_key
*)(ptr
+ cur
);
3605 btrfs_disk_key_to_cpu(&cpu_key
, disk_key
);
3606 if (cpu_key
.type
!= BTRFS_CHUNK_ITEM_KEY
) {
3612 chunk
= (struct btrfs_chunk
*)(ptr
+ cur
+ sizeof(*disk_key
));
3613 num_stripes
= btrfs_stack_chunk_num_stripes(chunk
);
3614 chunk_len
= btrfs_chunk_item_size(num_stripes
) +
3617 if (key
->objectid
== cpu_key
.objectid
&&
3618 key
->offset
== cpu_key
.offset
&&
3619 key
->type
== cpu_key
.type
) {
3620 memmove(ptr
+ cur
, ptr
+ cur
+ chunk_len
,
3621 array_size
- cur
- chunk_len
);
3622 array_size
-= chunk_len
;
3623 btrfs_set_super_sys_array_size(super
, array_size
);
3634 static int free_chunk_item(struct btrfs_trans_handle
*trans
,
3635 struct btrfs_fs_info
*fs_info
,
3636 u64 bytenr
, u64 len
)
3638 struct btrfs_path
*path
;
3639 struct btrfs_key key
;
3640 struct btrfs_root
*root
= fs_info
->chunk_root
;
3641 struct btrfs_chunk
*chunk
;
3645 key
.objectid
= BTRFS_FIRST_CHUNK_TREE_OBJECTID
;
3646 key
.offset
= bytenr
;
3647 key
.type
= BTRFS_CHUNK_ITEM_KEY
;
3649 path
= btrfs_alloc_path();
3653 ret
= btrfs_search_slot(trans
, root
, &key
, path
, -1, 1);
3660 chunk
= btrfs_item_ptr(path
->nodes
[0], path
->slots
[0],
3661 struct btrfs_chunk
);
3662 chunk_type
= btrfs_chunk_type(path
->nodes
[0], chunk
);
3664 ret
= btrfs_del_item(trans
, root
, path
);
3668 if (chunk_type
& BTRFS_BLOCK_GROUP_SYSTEM
)
3669 ret
= free_system_chunk_item(fs_info
->super_copy
, &key
);
3671 btrfs_free_path(path
);
3675 static u64
get_dev_extent_len(struct map_lookup
*map
)
3679 switch (map
->type
& BTRFS_BLOCK_GROUP_PROFILE_MASK
) {
3680 case 0: /* Single */
3681 case BTRFS_BLOCK_GROUP_DUP
:
3682 case BTRFS_BLOCK_GROUP_RAID1
:
3685 case BTRFS_BLOCK_GROUP_RAID5
:
3686 div
= (map
->num_stripes
- 1);
3688 case BTRFS_BLOCK_GROUP_RAID6
:
3689 div
= (map
->num_stripes
- 2);
3691 case BTRFS_BLOCK_GROUP_RAID10
:
3692 div
= (map
->num_stripes
/ map
->sub_stripes
);
3695 /* normally, read chunk security hook should handled it */
3698 return map
->ce
.size
/ div
;
3701 /* free block group/chunk related caches */
3702 static int free_block_group_cache(struct btrfs_trans_handle
*trans
,
3703 struct btrfs_fs_info
*fs_info
,
3704 u64 bytenr
, u64 len
)
3706 struct btrfs_block_group_cache
*cache
;
3707 struct cache_extent
*ce
;
3708 struct map_lookup
*map
;
3713 /* Free block group cache first */
3714 cache
= btrfs_lookup_block_group(fs_info
, bytenr
);
3717 flags
= cache
->flags
;
3718 if (cache
->free_space_ctl
) {
3719 btrfs_remove_free_space_cache(cache
);
3720 kfree(cache
->free_space_ctl
);
3722 clear_extent_bits(&fs_info
->block_group_cache
, bytenr
, bytenr
+ len
,
3723 (unsigned int)-1, GFP_NOFS
);
3724 ret
= free_space_info(fs_info
, flags
, len
, 0, NULL
);
3729 /* Then free mapping info and dev usage info */
3730 ce
= search_cache_extent(&fs_info
->mapping_tree
.cache_tree
, bytenr
);
3731 if (!ce
|| ce
->start
!= bytenr
) {
3735 map
= container_of(ce
, struct map_lookup
, ce
);
3736 for (i
= 0; i
< map
->num_stripes
; i
++) {
3737 struct btrfs_device
*device
;
3739 device
= map
->stripes
[i
].dev
;
3740 device
->bytes_used
-= get_dev_extent_len(map
);
3741 ret
= btrfs_update_device(trans
, device
);
3745 remove_cache_extent(&fs_info
->mapping_tree
.cache_tree
, ce
);
3751 int btrfs_free_block_group(struct btrfs_trans_handle
*trans
,
3752 struct btrfs_fs_info
*fs_info
, u64 bytenr
, u64 len
)
3754 struct btrfs_root
*extent_root
= fs_info
->extent_root
;
3755 struct btrfs_path
*path
;
3756 struct btrfs_block_group_item
*bgi
;
3757 struct btrfs_key key
;
3760 path
= btrfs_alloc_path();
3764 key
.objectid
= bytenr
;
3765 key
.type
= BTRFS_BLOCK_GROUP_ITEM_KEY
;
3768 /* Double check the block group to ensure it's empty */
3769 ret
= btrfs_search_slot(trans
, extent_root
, &key
, path
, 0, 0);
3777 bgi
= btrfs_item_ptr(path
->nodes
[0], path
->slots
[0],
3778 struct btrfs_block_group_item
);
3779 if (btrfs_disk_block_group_used(path
->nodes
[0], bgi
)) {
3781 "WARNING: block group [%llu,%llu) is not empty\n",
3782 bytenr
, bytenr
+ len
);
3786 btrfs_release_path(path
);
3789 * Now pin all space in the block group, to prevent further transaction
3790 * allocate space from it.
3791 * Every operation needs a transaction must be in the range.
3793 btrfs_pin_extent(fs_info
, bytenr
, len
);
3795 /* delete block group item and chunk item */
3796 ret
= free_block_group_item(trans
, fs_info
, bytenr
, len
);
3799 "failed to free block group item for [%llu,%llu)\n",
3800 bytenr
, bytenr
+ len
);
3801 btrfs_unpin_extent(fs_info
, bytenr
, len
);
3805 ret
= free_chunk_dev_extent_items(trans
, fs_info
, bytenr
);
3808 "failed to dev extents belongs to [%llu,%llu)\n",
3809 bytenr
, bytenr
+ len
);
3810 btrfs_unpin_extent(fs_info
, bytenr
, len
);
3813 ret
= free_chunk_item(trans
, fs_info
, bytenr
, len
);
3816 "failed to free chunk for [%llu,%llu)\n",
3817 bytenr
, bytenr
+ len
);
3818 btrfs_unpin_extent(fs_info
, bytenr
, len
);
3822 /* Now release the block_group_cache */
3823 ret
= free_block_group_cache(trans
, fs_info
, bytenr
, len
);
3824 btrfs_unpin_extent(fs_info
, bytenr
, len
);
3827 btrfs_free_path(path
);
3832 * Fixup block accounting. The initial block accounting created by
3833 * make_block_groups isn't accuracy in this case.
3835 int btrfs_fix_block_accounting(struct btrfs_trans_handle
*trans
,
3836 struct btrfs_root
*root
)
3842 struct btrfs_path path
;
3843 struct btrfs_key key
;
3844 struct extent_buffer
*leaf
;
3845 struct btrfs_block_group_cache
*cache
;
3846 struct btrfs_fs_info
*fs_info
= root
->fs_info
;
3848 root
= root
->fs_info
->extent_root
;
3850 while(extent_root_pending_ops(fs_info
)) {
3851 ret
= finish_current_insert(trans
, root
);
3854 ret
= del_pending_extents(trans
, root
);
3860 cache
= btrfs_lookup_first_block_group(fs_info
, start
);
3863 start
= cache
->key
.objectid
+ cache
->key
.offset
;
3864 btrfs_set_block_group_used(&cache
->item
, 0);
3865 cache
->space_info
->bytes_used
= 0;
3866 set_extent_bits(&root
->fs_info
->block_group_cache
,
3867 cache
->key
.objectid
,
3868 cache
->key
.objectid
+ cache
->key
.offset
-1,
3869 BLOCK_GROUP_DIRTY
, GFP_NOFS
);
3872 btrfs_init_path(&path
);
3875 btrfs_set_key_type(&key
, BTRFS_EXTENT_ITEM_KEY
);
3876 ret
= btrfs_search_slot(trans
, root
->fs_info
->extent_root
,
3881 leaf
= path
.nodes
[0];
3882 slot
= path
.slots
[0];
3883 if (slot
>= btrfs_header_nritems(leaf
)) {
3884 ret
= btrfs_next_leaf(root
, &path
);
3889 leaf
= path
.nodes
[0];
3890 slot
= path
.slots
[0];
3892 btrfs_item_key_to_cpu(leaf
, &key
, slot
);
3893 if (key
.type
== BTRFS_EXTENT_ITEM_KEY
) {
3894 bytes_used
+= key
.offset
;
3895 ret
= btrfs_update_block_group(trans
, root
,
3896 key
.objectid
, key
.offset
, 1, 0);
3898 } else if (key
.type
== BTRFS_METADATA_ITEM_KEY
) {
3899 bytes_used
+= root
->leafsize
;
3900 ret
= btrfs_update_block_group(trans
, root
,
3901 key
.objectid
, root
->leafsize
, 1, 0);
3906 btrfs_set_super_bytes_used(root
->fs_info
->super_copy
, bytes_used
);
3907 btrfs_release_path(&path
);
3912 * Record a file extent. Do all the required works, such as inserting
3913 * file extent item, inserting extent item and backref item into extent
3914 * tree and updating block accounting.
3916 int btrfs_record_file_extent(struct btrfs_trans_handle
*trans
,
3917 struct btrfs_root
*root
, u64 objectid
,
3918 struct btrfs_inode_item
*inode
,
3919 u64 file_pos
, u64 disk_bytenr
,
3923 struct btrfs_fs_info
*info
= root
->fs_info
;
3924 struct btrfs_root
*extent_root
= info
->extent_root
;
3925 struct extent_buffer
*leaf
;
3926 struct btrfs_file_extent_item
*fi
;
3927 struct btrfs_key ins_key
;
3928 struct btrfs_path path
;
3929 struct btrfs_extent_item
*ei
;
3932 if (disk_bytenr
== 0) {
3933 ret
= btrfs_insert_file_extent(trans
, root
, objectid
,
3934 file_pos
, disk_bytenr
,
3935 num_bytes
, num_bytes
);
3939 btrfs_init_path(&path
);
3941 ins_key
.objectid
= objectid
;
3942 ins_key
.offset
= file_pos
;
3943 btrfs_set_key_type(&ins_key
, BTRFS_EXTENT_DATA_KEY
);
3944 ret
= btrfs_insert_empty_item(trans
, root
, &path
, &ins_key
,
3948 leaf
= path
.nodes
[0];
3949 fi
= btrfs_item_ptr(leaf
, path
.slots
[0],
3950 struct btrfs_file_extent_item
);
3951 btrfs_set_file_extent_generation(leaf
, fi
, trans
->transid
);
3952 btrfs_set_file_extent_type(leaf
, fi
, BTRFS_FILE_EXTENT_REG
);
3953 btrfs_set_file_extent_disk_bytenr(leaf
, fi
, disk_bytenr
);
3954 btrfs_set_file_extent_disk_num_bytes(leaf
, fi
, num_bytes
);
3955 btrfs_set_file_extent_offset(leaf
, fi
, 0);
3956 btrfs_set_file_extent_num_bytes(leaf
, fi
, num_bytes
);
3957 btrfs_set_file_extent_ram_bytes(leaf
, fi
, num_bytes
);
3958 btrfs_set_file_extent_compression(leaf
, fi
, 0);
3959 btrfs_set_file_extent_encryption(leaf
, fi
, 0);
3960 btrfs_set_file_extent_other_encoding(leaf
, fi
, 0);
3961 btrfs_mark_buffer_dirty(leaf
);
3963 nbytes
= btrfs_stack_inode_nbytes(inode
) + num_bytes
;
3964 btrfs_set_stack_inode_nbytes(inode
, nbytes
);
3966 btrfs_release_path(&path
);
3968 ins_key
.objectid
= disk_bytenr
;
3969 ins_key
.offset
= num_bytes
;
3970 ins_key
.type
= BTRFS_EXTENT_ITEM_KEY
;
3972 ret
= btrfs_insert_empty_item(trans
, extent_root
, &path
,
3973 &ins_key
, sizeof(*ei
));
3975 leaf
= path
.nodes
[0];
3976 ei
= btrfs_item_ptr(leaf
, path
.slots
[0],
3977 struct btrfs_extent_item
);
3979 btrfs_set_extent_refs(leaf
, ei
, 0);
3980 btrfs_set_extent_generation(leaf
, ei
, 0);
3981 btrfs_set_extent_flags(leaf
, ei
, BTRFS_EXTENT_FLAG_DATA
);
3983 btrfs_mark_buffer_dirty(leaf
);
3985 ret
= btrfs_update_block_group(trans
, root
, disk_bytenr
,
3989 } else if (ret
!= -EEXIST
) {
3992 btrfs_extent_post_op(trans
, extent_root
);
3994 ret
= btrfs_inc_extent_ref(trans
, root
, disk_bytenr
, num_bytes
, 0,
3995 root
->root_key
.objectid
,
3996 objectid
, file_pos
);
4001 btrfs_release_path(&path
);