1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (C) 2007 Oracle. All rights reserved.
6 #include <linux/sched.h>
7 #include <linux/sched/signal.h>
8 #include <linux/pagemap.h>
9 #include <linux/writeback.h>
10 #include <linux/blkdev.h>
11 #include <linux/sort.h>
12 #include <linux/rcupdate.h>
13 #include <linux/kthread.h>
14 #include <linux/slab.h>
15 #include <linux/ratelimit.h>
16 #include <linux/percpu_counter.h>
17 #include <linux/lockdep.h>
18 #include <linux/crc32c.h>
22 #include "print-tree.h"
26 #include "free-space-cache.h"
27 #include "free-space-tree.h"
30 #include "ref-verify.h"
31 #include "space-info.h"
32 #include "block-rsv.h"
33 #include "delalloc-space.h"
34 #include "block-group.h"
37 #undef SCRAMBLE_DELAYED_REFS
40 static int __btrfs_free_extent(struct btrfs_trans_handle
*trans
,
41 struct btrfs_delayed_ref_node
*node
, u64 parent
,
42 u64 root_objectid
, u64 owner_objectid
,
43 u64 owner_offset
, int refs_to_drop
,
44 struct btrfs_delayed_extent_op
*extra_op
);
45 static void __run_delayed_extent_op(struct btrfs_delayed_extent_op
*extent_op
,
46 struct extent_buffer
*leaf
,
47 struct btrfs_extent_item
*ei
);
48 static int alloc_reserved_file_extent(struct btrfs_trans_handle
*trans
,
49 u64 parent
, u64 root_objectid
,
50 u64 flags
, u64 owner
, u64 offset
,
51 struct btrfs_key
*ins
, int ref_mod
);
52 static int alloc_reserved_tree_block(struct btrfs_trans_handle
*trans
,
53 struct btrfs_delayed_ref_node
*node
,
54 struct btrfs_delayed_extent_op
*extent_op
);
55 static int find_next_key(struct btrfs_path
*path
, int level
,
56 struct btrfs_key
*key
);
58 static int block_group_bits(struct btrfs_block_group
*cache
, u64 bits
)
60 return (cache
->flags
& bits
) == bits
;
63 int btrfs_add_excluded_extent(struct btrfs_fs_info
*fs_info
,
64 u64 start
, u64 num_bytes
)
66 u64 end
= start
+ num_bytes
- 1;
67 set_extent_bits(&fs_info
->excluded_extents
, start
, end
,
72 void btrfs_free_excluded_extents(struct btrfs_block_group
*cache
)
74 struct btrfs_fs_info
*fs_info
= cache
->fs_info
;
78 end
= start
+ cache
->length
- 1;
80 clear_extent_bits(&fs_info
->excluded_extents
, start
, end
,
84 static u64
generic_ref_to_space_flags(struct btrfs_ref
*ref
)
86 if (ref
->type
== BTRFS_REF_METADATA
) {
87 if (ref
->tree_ref
.root
== BTRFS_CHUNK_TREE_OBJECTID
)
88 return BTRFS_BLOCK_GROUP_SYSTEM
;
90 return BTRFS_BLOCK_GROUP_METADATA
;
92 return BTRFS_BLOCK_GROUP_DATA
;
95 static void add_pinned_bytes(struct btrfs_fs_info
*fs_info
,
96 struct btrfs_ref
*ref
)
98 struct btrfs_space_info
*space_info
;
99 u64 flags
= generic_ref_to_space_flags(ref
);
101 space_info
= btrfs_find_space_info(fs_info
, flags
);
103 percpu_counter_add_batch(&space_info
->total_bytes_pinned
, ref
->len
,
104 BTRFS_TOTAL_BYTES_PINNED_BATCH
);
107 static void sub_pinned_bytes(struct btrfs_fs_info
*fs_info
,
108 struct btrfs_ref
*ref
)
110 struct btrfs_space_info
*space_info
;
111 u64 flags
= generic_ref_to_space_flags(ref
);
113 space_info
= btrfs_find_space_info(fs_info
, flags
);
115 percpu_counter_add_batch(&space_info
->total_bytes_pinned
, -ref
->len
,
116 BTRFS_TOTAL_BYTES_PINNED_BATCH
);
119 /* simple helper to search for an existing data extent at a given offset */
120 int btrfs_lookup_data_extent(struct btrfs_fs_info
*fs_info
, u64 start
, u64 len
)
123 struct btrfs_key key
;
124 struct btrfs_path
*path
;
126 path
= btrfs_alloc_path();
130 key
.objectid
= start
;
132 key
.type
= BTRFS_EXTENT_ITEM_KEY
;
133 ret
= btrfs_search_slot(NULL
, fs_info
->extent_root
, &key
, path
, 0, 0);
134 btrfs_free_path(path
);
139 * helper function to lookup reference count and flags of a tree block.
141 * the head node for delayed ref is used to store the sum of all the
142 * reference count modifications queued up in the rbtree. the head
143 * node may also store the extent flags to set. This way you can check
144 * to see what the reference count and extent flags would be if all of
145 * the delayed refs are not processed.
147 int btrfs_lookup_extent_info(struct btrfs_trans_handle
*trans
,
148 struct btrfs_fs_info
*fs_info
, u64 bytenr
,
149 u64 offset
, int metadata
, u64
*refs
, u64
*flags
)
151 struct btrfs_delayed_ref_head
*head
;
152 struct btrfs_delayed_ref_root
*delayed_refs
;
153 struct btrfs_path
*path
;
154 struct btrfs_extent_item
*ei
;
155 struct extent_buffer
*leaf
;
156 struct btrfs_key key
;
163 * If we don't have skinny metadata, don't bother doing anything
166 if (metadata
&& !btrfs_fs_incompat(fs_info
, SKINNY_METADATA
)) {
167 offset
= fs_info
->nodesize
;
171 path
= btrfs_alloc_path();
176 path
->skip_locking
= 1;
177 path
->search_commit_root
= 1;
181 key
.objectid
= bytenr
;
184 key
.type
= BTRFS_METADATA_ITEM_KEY
;
186 key
.type
= BTRFS_EXTENT_ITEM_KEY
;
188 ret
= btrfs_search_slot(trans
, fs_info
->extent_root
, &key
, path
, 0, 0);
192 if (ret
> 0 && metadata
&& key
.type
== BTRFS_METADATA_ITEM_KEY
) {
193 if (path
->slots
[0]) {
195 btrfs_item_key_to_cpu(path
->nodes
[0], &key
,
197 if (key
.objectid
== bytenr
&&
198 key
.type
== BTRFS_EXTENT_ITEM_KEY
&&
199 key
.offset
== fs_info
->nodesize
)
205 leaf
= path
->nodes
[0];
206 item_size
= btrfs_item_size_nr(leaf
, path
->slots
[0]);
207 if (item_size
>= sizeof(*ei
)) {
208 ei
= btrfs_item_ptr(leaf
, path
->slots
[0],
209 struct btrfs_extent_item
);
210 num_refs
= btrfs_extent_refs(leaf
, ei
);
211 extent_flags
= btrfs_extent_flags(leaf
, ei
);
214 btrfs_print_v0_err(fs_info
);
216 btrfs_abort_transaction(trans
, ret
);
218 btrfs_handle_fs_error(fs_info
, ret
, NULL
);
223 BUG_ON(num_refs
== 0);
233 delayed_refs
= &trans
->transaction
->delayed_refs
;
234 spin_lock(&delayed_refs
->lock
);
235 head
= btrfs_find_delayed_ref_head(delayed_refs
, bytenr
);
237 if (!mutex_trylock(&head
->mutex
)) {
238 refcount_inc(&head
->refs
);
239 spin_unlock(&delayed_refs
->lock
);
241 btrfs_release_path(path
);
244 * Mutex was contended, block until it's released and try
247 mutex_lock(&head
->mutex
);
248 mutex_unlock(&head
->mutex
);
249 btrfs_put_delayed_ref_head(head
);
252 spin_lock(&head
->lock
);
253 if (head
->extent_op
&& head
->extent_op
->update_flags
)
254 extent_flags
|= head
->extent_op
->flags_to_set
;
256 BUG_ON(num_refs
== 0);
258 num_refs
+= head
->ref_mod
;
259 spin_unlock(&head
->lock
);
260 mutex_unlock(&head
->mutex
);
262 spin_unlock(&delayed_refs
->lock
);
264 WARN_ON(num_refs
== 0);
268 *flags
= extent_flags
;
270 btrfs_free_path(path
);
275 * Back reference rules. Back refs have three main goals:
277 * 1) differentiate between all holders of references to an extent so that
278 * when a reference is dropped we can make sure it was a valid reference
279 * before freeing the extent.
281 * 2) Provide enough information to quickly find the holders of an extent
282 * if we notice a given block is corrupted or bad.
284 * 3) Make it easy to migrate blocks for FS shrinking or storage pool
285 * maintenance. This is actually the same as #2, but with a slightly
286 * different use case.
288 * There are two kinds of back refs. The implicit back refs is optimized
289 * for pointers in non-shared tree blocks. For a given pointer in a block,
290 * back refs of this kind provide information about the block's owner tree
291 * and the pointer's key. These information allow us to find the block by
292 * b-tree searching. The full back refs is for pointers in tree blocks not
293 * referenced by their owner trees. The location of tree block is recorded
294 * in the back refs. Actually the full back refs is generic, and can be
295 * used in all cases the implicit back refs is used. The major shortcoming
296 * of the full back refs is its overhead. Every time a tree block gets
297 * COWed, we have to update back refs entry for all pointers in it.
299 * For a newly allocated tree block, we use implicit back refs for
300 * pointers in it. This means most tree related operations only involve
301 * implicit back refs. For a tree block created in old transaction, the
302 * only way to drop a reference to it is COW it. So we can detect the
303 * event that tree block loses its owner tree's reference and do the
304 * back refs conversion.
306 * When a tree block is COWed through a tree, there are four cases:
308 * The reference count of the block is one and the tree is the block's
309 * owner tree. Nothing to do in this case.
311 * The reference count of the block is one and the tree is not the
312 * block's owner tree. In this case, full back refs is used for pointers
313 * in the block. Remove these full back refs, add implicit back refs for
314 * every pointers in the new block.
316 * The reference count of the block is greater than one and the tree is
317 * the block's owner tree. In this case, implicit back refs is used for
318 * pointers in the block. Add full back refs for every pointers in the
319 * block, increase lower level extents' reference counts. The original
320 * implicit back refs are entailed to the new block.
322 * The reference count of the block is greater than one and the tree is
323 * not the block's owner tree. Add implicit back refs for every pointer in
324 * the new block, increase lower level extents' reference count.
326 * Back Reference Key composing:
328 * The key objectid corresponds to the first byte in the extent,
329 * The key type is used to differentiate between types of back refs.
330 * There are different meanings of the key offset for different types
333 * File extents can be referenced by:
335 * - multiple snapshots, subvolumes, or different generations in one subvol
336 * - different files inside a single subvolume
337 * - different offsets inside a file (bookend extents in file.c)
339 * The extent ref structure for the implicit back refs has fields for:
341 * - Objectid of the subvolume root
342 * - objectid of the file holding the reference
343 * - original offset in the file
344 * - how many bookend extents
346 * The key offset for the implicit back refs is hash of the first
349 * The extent ref structure for the full back refs has field for:
351 * - number of pointers in the tree leaf
353 * The key offset for the implicit back refs is the first byte of
356 * When a file extent is allocated, The implicit back refs is used.
357 * the fields are filled in:
359 * (root_key.objectid, inode objectid, offset in file, 1)
361 * When a file extent is removed file truncation, we find the
362 * corresponding implicit back refs and check the following fields:
364 * (btrfs_header_owner(leaf), inode objectid, offset in file)
366 * Btree extents can be referenced by:
368 * - Different subvolumes
370 * Both the implicit back refs and the full back refs for tree blocks
371 * only consist of key. The key offset for the implicit back refs is
372 * objectid of block's owner tree. The key offset for the full back refs
373 * is the first byte of parent block.
375 * When implicit back refs is used, information about the lowest key and
376 * level of the tree block are required. These information are stored in
377 * tree block info structure.
381 * is_data == BTRFS_REF_TYPE_BLOCK, tree block type is required,
382 * is_data == BTRFS_REF_TYPE_DATA, data type is requiried,
383 * is_data == BTRFS_REF_TYPE_ANY, either type is OK.
385 int btrfs_get_extent_inline_ref_type(const struct extent_buffer
*eb
,
386 struct btrfs_extent_inline_ref
*iref
,
387 enum btrfs_inline_ref_type is_data
)
389 int type
= btrfs_extent_inline_ref_type(eb
, iref
);
390 u64 offset
= btrfs_extent_inline_ref_offset(eb
, iref
);
392 if (type
== BTRFS_TREE_BLOCK_REF_KEY
||
393 type
== BTRFS_SHARED_BLOCK_REF_KEY
||
394 type
== BTRFS_SHARED_DATA_REF_KEY
||
395 type
== BTRFS_EXTENT_DATA_REF_KEY
) {
396 if (is_data
== BTRFS_REF_TYPE_BLOCK
) {
397 if (type
== BTRFS_TREE_BLOCK_REF_KEY
)
399 if (type
== BTRFS_SHARED_BLOCK_REF_KEY
) {
402 * Every shared one has parent tree
403 * block, which must be aligned to
407 IS_ALIGNED(offset
, eb
->fs_info
->nodesize
))
410 } else if (is_data
== BTRFS_REF_TYPE_DATA
) {
411 if (type
== BTRFS_EXTENT_DATA_REF_KEY
)
413 if (type
== BTRFS_SHARED_DATA_REF_KEY
) {
416 * Every shared one has parent tree
417 * block, which must be aligned to
421 IS_ALIGNED(offset
, eb
->fs_info
->nodesize
))
425 ASSERT(is_data
== BTRFS_REF_TYPE_ANY
);
430 btrfs_print_leaf((struct extent_buffer
*)eb
);
431 btrfs_err(eb
->fs_info
, "eb %llu invalid extent inline ref type %d",
435 return BTRFS_REF_TYPE_INVALID
;
438 u64
hash_extent_data_ref(u64 root_objectid
, u64 owner
, u64 offset
)
440 u32 high_crc
= ~(u32
)0;
441 u32 low_crc
= ~(u32
)0;
444 lenum
= cpu_to_le64(root_objectid
);
445 high_crc
= btrfs_crc32c(high_crc
, &lenum
, sizeof(lenum
));
446 lenum
= cpu_to_le64(owner
);
447 low_crc
= btrfs_crc32c(low_crc
, &lenum
, sizeof(lenum
));
448 lenum
= cpu_to_le64(offset
);
449 low_crc
= btrfs_crc32c(low_crc
, &lenum
, sizeof(lenum
));
451 return ((u64
)high_crc
<< 31) ^ (u64
)low_crc
;
454 static u64
hash_extent_data_ref_item(struct extent_buffer
*leaf
,
455 struct btrfs_extent_data_ref
*ref
)
457 return hash_extent_data_ref(btrfs_extent_data_ref_root(leaf
, ref
),
458 btrfs_extent_data_ref_objectid(leaf
, ref
),
459 btrfs_extent_data_ref_offset(leaf
, ref
));
462 static int match_extent_data_ref(struct extent_buffer
*leaf
,
463 struct btrfs_extent_data_ref
*ref
,
464 u64 root_objectid
, u64 owner
, u64 offset
)
466 if (btrfs_extent_data_ref_root(leaf
, ref
) != root_objectid
||
467 btrfs_extent_data_ref_objectid(leaf
, ref
) != owner
||
468 btrfs_extent_data_ref_offset(leaf
, ref
) != offset
)
473 static noinline
int lookup_extent_data_ref(struct btrfs_trans_handle
*trans
,
474 struct btrfs_path
*path
,
475 u64 bytenr
, u64 parent
,
477 u64 owner
, u64 offset
)
479 struct btrfs_root
*root
= trans
->fs_info
->extent_root
;
480 struct btrfs_key key
;
481 struct btrfs_extent_data_ref
*ref
;
482 struct extent_buffer
*leaf
;
488 key
.objectid
= bytenr
;
490 key
.type
= BTRFS_SHARED_DATA_REF_KEY
;
493 key
.type
= BTRFS_EXTENT_DATA_REF_KEY
;
494 key
.offset
= hash_extent_data_ref(root_objectid
,
499 ret
= btrfs_search_slot(trans
, root
, &key
, path
, -1, 1);
511 leaf
= path
->nodes
[0];
512 nritems
= btrfs_header_nritems(leaf
);
514 if (path
->slots
[0] >= nritems
) {
515 ret
= btrfs_next_leaf(root
, path
);
521 leaf
= path
->nodes
[0];
522 nritems
= btrfs_header_nritems(leaf
);
526 btrfs_item_key_to_cpu(leaf
, &key
, path
->slots
[0]);
527 if (key
.objectid
!= bytenr
||
528 key
.type
!= BTRFS_EXTENT_DATA_REF_KEY
)
531 ref
= btrfs_item_ptr(leaf
, path
->slots
[0],
532 struct btrfs_extent_data_ref
);
534 if (match_extent_data_ref(leaf
, ref
, root_objectid
,
537 btrfs_release_path(path
);
549 static noinline
int insert_extent_data_ref(struct btrfs_trans_handle
*trans
,
550 struct btrfs_path
*path
,
551 u64 bytenr
, u64 parent
,
552 u64 root_objectid
, u64 owner
,
553 u64 offset
, int refs_to_add
)
555 struct btrfs_root
*root
= trans
->fs_info
->extent_root
;
556 struct btrfs_key key
;
557 struct extent_buffer
*leaf
;
562 key
.objectid
= bytenr
;
564 key
.type
= BTRFS_SHARED_DATA_REF_KEY
;
566 size
= sizeof(struct btrfs_shared_data_ref
);
568 key
.type
= BTRFS_EXTENT_DATA_REF_KEY
;
569 key
.offset
= hash_extent_data_ref(root_objectid
,
571 size
= sizeof(struct btrfs_extent_data_ref
);
574 ret
= btrfs_insert_empty_item(trans
, root
, path
, &key
, size
);
575 if (ret
&& ret
!= -EEXIST
)
578 leaf
= path
->nodes
[0];
580 struct btrfs_shared_data_ref
*ref
;
581 ref
= btrfs_item_ptr(leaf
, path
->slots
[0],
582 struct btrfs_shared_data_ref
);
584 btrfs_set_shared_data_ref_count(leaf
, ref
, refs_to_add
);
586 num_refs
= btrfs_shared_data_ref_count(leaf
, ref
);
587 num_refs
+= refs_to_add
;
588 btrfs_set_shared_data_ref_count(leaf
, ref
, num_refs
);
591 struct btrfs_extent_data_ref
*ref
;
592 while (ret
== -EEXIST
) {
593 ref
= btrfs_item_ptr(leaf
, path
->slots
[0],
594 struct btrfs_extent_data_ref
);
595 if (match_extent_data_ref(leaf
, ref
, root_objectid
,
598 btrfs_release_path(path
);
600 ret
= btrfs_insert_empty_item(trans
, root
, path
, &key
,
602 if (ret
&& ret
!= -EEXIST
)
605 leaf
= path
->nodes
[0];
607 ref
= btrfs_item_ptr(leaf
, path
->slots
[0],
608 struct btrfs_extent_data_ref
);
610 btrfs_set_extent_data_ref_root(leaf
, ref
,
612 btrfs_set_extent_data_ref_objectid(leaf
, ref
, owner
);
613 btrfs_set_extent_data_ref_offset(leaf
, ref
, offset
);
614 btrfs_set_extent_data_ref_count(leaf
, ref
, refs_to_add
);
616 num_refs
= btrfs_extent_data_ref_count(leaf
, ref
);
617 num_refs
+= refs_to_add
;
618 btrfs_set_extent_data_ref_count(leaf
, ref
, num_refs
);
621 btrfs_mark_buffer_dirty(leaf
);
624 btrfs_release_path(path
);
628 static noinline
int remove_extent_data_ref(struct btrfs_trans_handle
*trans
,
629 struct btrfs_path
*path
,
630 int refs_to_drop
, int *last_ref
)
632 struct btrfs_key key
;
633 struct btrfs_extent_data_ref
*ref1
= NULL
;
634 struct btrfs_shared_data_ref
*ref2
= NULL
;
635 struct extent_buffer
*leaf
;
639 leaf
= path
->nodes
[0];
640 btrfs_item_key_to_cpu(leaf
, &key
, path
->slots
[0]);
642 if (key
.type
== BTRFS_EXTENT_DATA_REF_KEY
) {
643 ref1
= btrfs_item_ptr(leaf
, path
->slots
[0],
644 struct btrfs_extent_data_ref
);
645 num_refs
= btrfs_extent_data_ref_count(leaf
, ref1
);
646 } else if (key
.type
== BTRFS_SHARED_DATA_REF_KEY
) {
647 ref2
= btrfs_item_ptr(leaf
, path
->slots
[0],
648 struct btrfs_shared_data_ref
);
649 num_refs
= btrfs_shared_data_ref_count(leaf
, ref2
);
650 } else if (unlikely(key
.type
== BTRFS_EXTENT_REF_V0_KEY
)) {
651 btrfs_print_v0_err(trans
->fs_info
);
652 btrfs_abort_transaction(trans
, -EINVAL
);
658 BUG_ON(num_refs
< refs_to_drop
);
659 num_refs
-= refs_to_drop
;
662 ret
= btrfs_del_item(trans
, trans
->fs_info
->extent_root
, path
);
665 if (key
.type
== BTRFS_EXTENT_DATA_REF_KEY
)
666 btrfs_set_extent_data_ref_count(leaf
, ref1
, num_refs
);
667 else if (key
.type
== BTRFS_SHARED_DATA_REF_KEY
)
668 btrfs_set_shared_data_ref_count(leaf
, ref2
, num_refs
);
669 btrfs_mark_buffer_dirty(leaf
);
674 static noinline u32
extent_data_ref_count(struct btrfs_path
*path
,
675 struct btrfs_extent_inline_ref
*iref
)
677 struct btrfs_key key
;
678 struct extent_buffer
*leaf
;
679 struct btrfs_extent_data_ref
*ref1
;
680 struct btrfs_shared_data_ref
*ref2
;
684 leaf
= path
->nodes
[0];
685 btrfs_item_key_to_cpu(leaf
, &key
, path
->slots
[0]);
687 BUG_ON(key
.type
== BTRFS_EXTENT_REF_V0_KEY
);
690 * If type is invalid, we should have bailed out earlier than
693 type
= btrfs_get_extent_inline_ref_type(leaf
, iref
, BTRFS_REF_TYPE_DATA
);
694 ASSERT(type
!= BTRFS_REF_TYPE_INVALID
);
695 if (type
== BTRFS_EXTENT_DATA_REF_KEY
) {
696 ref1
= (struct btrfs_extent_data_ref
*)(&iref
->offset
);
697 num_refs
= btrfs_extent_data_ref_count(leaf
, ref1
);
699 ref2
= (struct btrfs_shared_data_ref
*)(iref
+ 1);
700 num_refs
= btrfs_shared_data_ref_count(leaf
, ref2
);
702 } else if (key
.type
== BTRFS_EXTENT_DATA_REF_KEY
) {
703 ref1
= btrfs_item_ptr(leaf
, path
->slots
[0],
704 struct btrfs_extent_data_ref
);
705 num_refs
= btrfs_extent_data_ref_count(leaf
, ref1
);
706 } else if (key
.type
== BTRFS_SHARED_DATA_REF_KEY
) {
707 ref2
= btrfs_item_ptr(leaf
, path
->slots
[0],
708 struct btrfs_shared_data_ref
);
709 num_refs
= btrfs_shared_data_ref_count(leaf
, ref2
);
716 static noinline
int lookup_tree_block_ref(struct btrfs_trans_handle
*trans
,
717 struct btrfs_path
*path
,
718 u64 bytenr
, u64 parent
,
721 struct btrfs_root
*root
= trans
->fs_info
->extent_root
;
722 struct btrfs_key key
;
725 key
.objectid
= bytenr
;
727 key
.type
= BTRFS_SHARED_BLOCK_REF_KEY
;
730 key
.type
= BTRFS_TREE_BLOCK_REF_KEY
;
731 key
.offset
= root_objectid
;
734 ret
= btrfs_search_slot(trans
, root
, &key
, path
, -1, 1);
740 static noinline
int insert_tree_block_ref(struct btrfs_trans_handle
*trans
,
741 struct btrfs_path
*path
,
742 u64 bytenr
, u64 parent
,
745 struct btrfs_key key
;
748 key
.objectid
= bytenr
;
750 key
.type
= BTRFS_SHARED_BLOCK_REF_KEY
;
753 key
.type
= BTRFS_TREE_BLOCK_REF_KEY
;
754 key
.offset
= root_objectid
;
757 ret
= btrfs_insert_empty_item(trans
, trans
->fs_info
->extent_root
,
759 btrfs_release_path(path
);
763 static inline int extent_ref_type(u64 parent
, u64 owner
)
766 if (owner
< BTRFS_FIRST_FREE_OBJECTID
) {
768 type
= BTRFS_SHARED_BLOCK_REF_KEY
;
770 type
= BTRFS_TREE_BLOCK_REF_KEY
;
773 type
= BTRFS_SHARED_DATA_REF_KEY
;
775 type
= BTRFS_EXTENT_DATA_REF_KEY
;
780 static int find_next_key(struct btrfs_path
*path
, int level
,
781 struct btrfs_key
*key
)
784 for (; level
< BTRFS_MAX_LEVEL
; level
++) {
785 if (!path
->nodes
[level
])
787 if (path
->slots
[level
] + 1 >=
788 btrfs_header_nritems(path
->nodes
[level
]))
791 btrfs_item_key_to_cpu(path
->nodes
[level
], key
,
792 path
->slots
[level
] + 1);
794 btrfs_node_key_to_cpu(path
->nodes
[level
], key
,
795 path
->slots
[level
] + 1);
802 * look for inline back ref. if back ref is found, *ref_ret is set
803 * to the address of inline back ref, and 0 is returned.
805 * if back ref isn't found, *ref_ret is set to the address where it
806 * should be inserted, and -ENOENT is returned.
808 * if insert is true and there are too many inline back refs, the path
809 * points to the extent item, and -EAGAIN is returned.
811 * NOTE: inline back refs are ordered in the same way that back ref
812 * items in the tree are ordered.
814 static noinline_for_stack
815 int lookup_inline_extent_backref(struct btrfs_trans_handle
*trans
,
816 struct btrfs_path
*path
,
817 struct btrfs_extent_inline_ref
**ref_ret
,
818 u64 bytenr
, u64 num_bytes
,
819 u64 parent
, u64 root_objectid
,
820 u64 owner
, u64 offset
, int insert
)
822 struct btrfs_fs_info
*fs_info
= trans
->fs_info
;
823 struct btrfs_root
*root
= fs_info
->extent_root
;
824 struct btrfs_key key
;
825 struct extent_buffer
*leaf
;
826 struct btrfs_extent_item
*ei
;
827 struct btrfs_extent_inline_ref
*iref
;
837 bool skinny_metadata
= btrfs_fs_incompat(fs_info
, SKINNY_METADATA
);
840 key
.objectid
= bytenr
;
841 key
.type
= BTRFS_EXTENT_ITEM_KEY
;
842 key
.offset
= num_bytes
;
844 want
= extent_ref_type(parent
, owner
);
846 extra_size
= btrfs_extent_inline_ref_size(want
);
847 path
->keep_locks
= 1;
852 * Owner is our level, so we can just add one to get the level for the
853 * block we are interested in.
855 if (skinny_metadata
&& owner
< BTRFS_FIRST_FREE_OBJECTID
) {
856 key
.type
= BTRFS_METADATA_ITEM_KEY
;
861 ret
= btrfs_search_slot(trans
, root
, &key
, path
, extra_size
, 1);
868 * We may be a newly converted file system which still has the old fat
869 * extent entries for metadata, so try and see if we have one of those.
871 if (ret
> 0 && skinny_metadata
) {
872 skinny_metadata
= false;
873 if (path
->slots
[0]) {
875 btrfs_item_key_to_cpu(path
->nodes
[0], &key
,
877 if (key
.objectid
== bytenr
&&
878 key
.type
== BTRFS_EXTENT_ITEM_KEY
&&
879 key
.offset
== num_bytes
)
883 key
.objectid
= bytenr
;
884 key
.type
= BTRFS_EXTENT_ITEM_KEY
;
885 key
.offset
= num_bytes
;
886 btrfs_release_path(path
);
891 if (ret
&& !insert
) {
894 } else if (WARN_ON(ret
)) {
899 leaf
= path
->nodes
[0];
900 item_size
= btrfs_item_size_nr(leaf
, path
->slots
[0]);
901 if (unlikely(item_size
< sizeof(*ei
))) {
903 btrfs_print_v0_err(fs_info
);
904 btrfs_abort_transaction(trans
, err
);
908 ei
= btrfs_item_ptr(leaf
, path
->slots
[0], struct btrfs_extent_item
);
909 flags
= btrfs_extent_flags(leaf
, ei
);
911 ptr
= (unsigned long)(ei
+ 1);
912 end
= (unsigned long)ei
+ item_size
;
914 if (flags
& BTRFS_EXTENT_FLAG_TREE_BLOCK
&& !skinny_metadata
) {
915 ptr
+= sizeof(struct btrfs_tree_block_info
);
919 if (owner
>= BTRFS_FIRST_FREE_OBJECTID
)
920 needed
= BTRFS_REF_TYPE_DATA
;
922 needed
= BTRFS_REF_TYPE_BLOCK
;
930 iref
= (struct btrfs_extent_inline_ref
*)ptr
;
931 type
= btrfs_get_extent_inline_ref_type(leaf
, iref
, needed
);
932 if (type
== BTRFS_REF_TYPE_INVALID
) {
940 ptr
+= btrfs_extent_inline_ref_size(type
);
944 if (type
== BTRFS_EXTENT_DATA_REF_KEY
) {
945 struct btrfs_extent_data_ref
*dref
;
946 dref
= (struct btrfs_extent_data_ref
*)(&iref
->offset
);
947 if (match_extent_data_ref(leaf
, dref
, root_objectid
,
952 if (hash_extent_data_ref_item(leaf
, dref
) <
953 hash_extent_data_ref(root_objectid
, owner
, offset
))
957 ref_offset
= btrfs_extent_inline_ref_offset(leaf
, iref
);
959 if (parent
== ref_offset
) {
963 if (ref_offset
< parent
)
966 if (root_objectid
== ref_offset
) {
970 if (ref_offset
< root_objectid
)
974 ptr
+= btrfs_extent_inline_ref_size(type
);
976 if (err
== -ENOENT
&& insert
) {
977 if (item_size
+ extra_size
>=
978 BTRFS_MAX_EXTENT_ITEM_SIZE(root
)) {
983 * To add new inline back ref, we have to make sure
984 * there is no corresponding back ref item.
985 * For simplicity, we just do not add new inline back
986 * ref if there is any kind of item for this block
988 if (find_next_key(path
, 0, &key
) == 0 &&
989 key
.objectid
== bytenr
&&
990 key
.type
< BTRFS_BLOCK_GROUP_ITEM_KEY
) {
995 *ref_ret
= (struct btrfs_extent_inline_ref
*)ptr
;
998 path
->keep_locks
= 0;
999 btrfs_unlock_up_safe(path
, 1);
1005 * helper to add new inline back ref
1007 static noinline_for_stack
1008 void setup_inline_extent_backref(struct btrfs_fs_info
*fs_info
,
1009 struct btrfs_path
*path
,
1010 struct btrfs_extent_inline_ref
*iref
,
1011 u64 parent
, u64 root_objectid
,
1012 u64 owner
, u64 offset
, int refs_to_add
,
1013 struct btrfs_delayed_extent_op
*extent_op
)
1015 struct extent_buffer
*leaf
;
1016 struct btrfs_extent_item
*ei
;
1019 unsigned long item_offset
;
1024 leaf
= path
->nodes
[0];
1025 ei
= btrfs_item_ptr(leaf
, path
->slots
[0], struct btrfs_extent_item
);
1026 item_offset
= (unsigned long)iref
- (unsigned long)ei
;
1028 type
= extent_ref_type(parent
, owner
);
1029 size
= btrfs_extent_inline_ref_size(type
);
1031 btrfs_extend_item(path
, size
);
1033 ei
= btrfs_item_ptr(leaf
, path
->slots
[0], struct btrfs_extent_item
);
1034 refs
= btrfs_extent_refs(leaf
, ei
);
1035 refs
+= refs_to_add
;
1036 btrfs_set_extent_refs(leaf
, ei
, refs
);
1038 __run_delayed_extent_op(extent_op
, leaf
, ei
);
1040 ptr
= (unsigned long)ei
+ item_offset
;
1041 end
= (unsigned long)ei
+ btrfs_item_size_nr(leaf
, path
->slots
[0]);
1042 if (ptr
< end
- size
)
1043 memmove_extent_buffer(leaf
, ptr
+ size
, ptr
,
1046 iref
= (struct btrfs_extent_inline_ref
*)ptr
;
1047 btrfs_set_extent_inline_ref_type(leaf
, iref
, type
);
1048 if (type
== BTRFS_EXTENT_DATA_REF_KEY
) {
1049 struct btrfs_extent_data_ref
*dref
;
1050 dref
= (struct btrfs_extent_data_ref
*)(&iref
->offset
);
1051 btrfs_set_extent_data_ref_root(leaf
, dref
, root_objectid
);
1052 btrfs_set_extent_data_ref_objectid(leaf
, dref
, owner
);
1053 btrfs_set_extent_data_ref_offset(leaf
, dref
, offset
);
1054 btrfs_set_extent_data_ref_count(leaf
, dref
, refs_to_add
);
1055 } else if (type
== BTRFS_SHARED_DATA_REF_KEY
) {
1056 struct btrfs_shared_data_ref
*sref
;
1057 sref
= (struct btrfs_shared_data_ref
*)(iref
+ 1);
1058 btrfs_set_shared_data_ref_count(leaf
, sref
, refs_to_add
);
1059 btrfs_set_extent_inline_ref_offset(leaf
, iref
, parent
);
1060 } else if (type
== BTRFS_SHARED_BLOCK_REF_KEY
) {
1061 btrfs_set_extent_inline_ref_offset(leaf
, iref
, parent
);
1063 btrfs_set_extent_inline_ref_offset(leaf
, iref
, root_objectid
);
1065 btrfs_mark_buffer_dirty(leaf
);
1068 static int lookup_extent_backref(struct btrfs_trans_handle
*trans
,
1069 struct btrfs_path
*path
,
1070 struct btrfs_extent_inline_ref
**ref_ret
,
1071 u64 bytenr
, u64 num_bytes
, u64 parent
,
1072 u64 root_objectid
, u64 owner
, u64 offset
)
1076 ret
= lookup_inline_extent_backref(trans
, path
, ref_ret
, bytenr
,
1077 num_bytes
, parent
, root_objectid
,
1082 btrfs_release_path(path
);
1085 if (owner
< BTRFS_FIRST_FREE_OBJECTID
) {
1086 ret
= lookup_tree_block_ref(trans
, path
, bytenr
, parent
,
1089 ret
= lookup_extent_data_ref(trans
, path
, bytenr
, parent
,
1090 root_objectid
, owner
, offset
);
1096 * helper to update/remove inline back ref
1098 static noinline_for_stack
1099 void update_inline_extent_backref(struct btrfs_path
*path
,
1100 struct btrfs_extent_inline_ref
*iref
,
1102 struct btrfs_delayed_extent_op
*extent_op
,
1105 struct extent_buffer
*leaf
= path
->nodes
[0];
1106 struct btrfs_extent_item
*ei
;
1107 struct btrfs_extent_data_ref
*dref
= NULL
;
1108 struct btrfs_shared_data_ref
*sref
= NULL
;
1116 ei
= btrfs_item_ptr(leaf
, path
->slots
[0], struct btrfs_extent_item
);
1117 refs
= btrfs_extent_refs(leaf
, ei
);
1118 WARN_ON(refs_to_mod
< 0 && refs
+ refs_to_mod
<= 0);
1119 refs
+= refs_to_mod
;
1120 btrfs_set_extent_refs(leaf
, ei
, refs
);
1122 __run_delayed_extent_op(extent_op
, leaf
, ei
);
1125 * If type is invalid, we should have bailed out after
1126 * lookup_inline_extent_backref().
1128 type
= btrfs_get_extent_inline_ref_type(leaf
, iref
, BTRFS_REF_TYPE_ANY
);
1129 ASSERT(type
!= BTRFS_REF_TYPE_INVALID
);
1131 if (type
== BTRFS_EXTENT_DATA_REF_KEY
) {
1132 dref
= (struct btrfs_extent_data_ref
*)(&iref
->offset
);
1133 refs
= btrfs_extent_data_ref_count(leaf
, dref
);
1134 } else if (type
== BTRFS_SHARED_DATA_REF_KEY
) {
1135 sref
= (struct btrfs_shared_data_ref
*)(iref
+ 1);
1136 refs
= btrfs_shared_data_ref_count(leaf
, sref
);
1139 BUG_ON(refs_to_mod
!= -1);
1142 BUG_ON(refs_to_mod
< 0 && refs
< -refs_to_mod
);
1143 refs
+= refs_to_mod
;
1146 if (type
== BTRFS_EXTENT_DATA_REF_KEY
)
1147 btrfs_set_extent_data_ref_count(leaf
, dref
, refs
);
1149 btrfs_set_shared_data_ref_count(leaf
, sref
, refs
);
1152 size
= btrfs_extent_inline_ref_size(type
);
1153 item_size
= btrfs_item_size_nr(leaf
, path
->slots
[0]);
1154 ptr
= (unsigned long)iref
;
1155 end
= (unsigned long)ei
+ item_size
;
1156 if (ptr
+ size
< end
)
1157 memmove_extent_buffer(leaf
, ptr
, ptr
+ size
,
1160 btrfs_truncate_item(path
, item_size
, 1);
1162 btrfs_mark_buffer_dirty(leaf
);
1165 static noinline_for_stack
1166 int insert_inline_extent_backref(struct btrfs_trans_handle
*trans
,
1167 struct btrfs_path
*path
,
1168 u64 bytenr
, u64 num_bytes
, u64 parent
,
1169 u64 root_objectid
, u64 owner
,
1170 u64 offset
, int refs_to_add
,
1171 struct btrfs_delayed_extent_op
*extent_op
)
1173 struct btrfs_extent_inline_ref
*iref
;
1176 ret
= lookup_inline_extent_backref(trans
, path
, &iref
, bytenr
,
1177 num_bytes
, parent
, root_objectid
,
1180 BUG_ON(owner
< BTRFS_FIRST_FREE_OBJECTID
);
1181 update_inline_extent_backref(path
, iref
, refs_to_add
,
1183 } else if (ret
== -ENOENT
) {
1184 setup_inline_extent_backref(trans
->fs_info
, path
, iref
, parent
,
1185 root_objectid
, owner
, offset
,
1186 refs_to_add
, extent_op
);
1192 static int remove_extent_backref(struct btrfs_trans_handle
*trans
,
1193 struct btrfs_path
*path
,
1194 struct btrfs_extent_inline_ref
*iref
,
1195 int refs_to_drop
, int is_data
, int *last_ref
)
1199 BUG_ON(!is_data
&& refs_to_drop
!= 1);
1201 update_inline_extent_backref(path
, iref
, -refs_to_drop
, NULL
,
1203 } else if (is_data
) {
1204 ret
= remove_extent_data_ref(trans
, path
, refs_to_drop
,
1208 ret
= btrfs_del_item(trans
, trans
->fs_info
->extent_root
, path
);
1213 static int btrfs_issue_discard(struct block_device
*bdev
, u64 start
, u64 len
,
1214 u64
*discarded_bytes
)
1217 u64 bytes_left
, end
;
1218 u64 aligned_start
= ALIGN(start
, 1 << 9);
1220 if (WARN_ON(start
!= aligned_start
)) {
1221 len
-= aligned_start
- start
;
1222 len
= round_down(len
, 1 << 9);
1223 start
= aligned_start
;
1226 *discarded_bytes
= 0;
1234 /* Skip any superblocks on this device. */
1235 for (j
= 0; j
< BTRFS_SUPER_MIRROR_MAX
; j
++) {
1236 u64 sb_start
= btrfs_sb_offset(j
);
1237 u64 sb_end
= sb_start
+ BTRFS_SUPER_INFO_SIZE
;
1238 u64 size
= sb_start
- start
;
1240 if (!in_range(sb_start
, start
, bytes_left
) &&
1241 !in_range(sb_end
, start
, bytes_left
) &&
1242 !in_range(start
, sb_start
, BTRFS_SUPER_INFO_SIZE
))
1246 * Superblock spans beginning of range. Adjust start and
1249 if (sb_start
<= start
) {
1250 start
+= sb_end
- start
;
1255 bytes_left
= end
- start
;
1260 ret
= blkdev_issue_discard(bdev
, start
>> 9, size
>> 9,
1263 *discarded_bytes
+= size
;
1264 else if (ret
!= -EOPNOTSUPP
)
1273 bytes_left
= end
- start
;
1277 ret
= blkdev_issue_discard(bdev
, start
>> 9, bytes_left
>> 9,
1280 *discarded_bytes
+= bytes_left
;
1285 int btrfs_discard_extent(struct btrfs_fs_info
*fs_info
, u64 bytenr
,
1286 u64 num_bytes
, u64
*actual_bytes
)
1289 u64 discarded_bytes
= 0;
1290 u64 end
= bytenr
+ num_bytes
;
1292 struct btrfs_bio
*bbio
= NULL
;
1296 * Avoid races with device replace and make sure our bbio has devices
1297 * associated to its stripes that don't go away while we are discarding.
1299 btrfs_bio_counter_inc_blocked(fs_info
);
1301 struct btrfs_bio_stripe
*stripe
;
1304 num_bytes
= end
- cur
;
1305 /* Tell the block device(s) that the sectors can be discarded */
1306 ret
= btrfs_map_block(fs_info
, BTRFS_MAP_DISCARD
, cur
,
1307 &num_bytes
, &bbio
, 0);
1309 * Error can be -ENOMEM, -ENOENT (no such chunk mapping) or
1310 * -EOPNOTSUPP. For any such error, @num_bytes is not updated,
1311 * thus we can't continue anyway.
1316 stripe
= bbio
->stripes
;
1317 for (i
= 0; i
< bbio
->num_stripes
; i
++, stripe
++) {
1319 struct request_queue
*req_q
;
1321 if (!stripe
->dev
->bdev
) {
1322 ASSERT(btrfs_test_opt(fs_info
, DEGRADED
));
1325 req_q
= bdev_get_queue(stripe
->dev
->bdev
);
1326 if (!blk_queue_discard(req_q
))
1329 ret
= btrfs_issue_discard(stripe
->dev
->bdev
,
1334 discarded_bytes
+= bytes
;
1335 } else if (ret
!= -EOPNOTSUPP
) {
1337 * Logic errors or -ENOMEM, or -EIO, but
1338 * unlikely to happen.
1340 * And since there are two loops, explicitly
1341 * go to out to avoid confusion.
1343 btrfs_put_bbio(bbio
);
1348 * Just in case we get back EOPNOTSUPP for some reason,
1349 * just ignore the return value so we don't screw up
1350 * people calling discard_extent.
1354 btrfs_put_bbio(bbio
);
1358 btrfs_bio_counter_dec(fs_info
);
1361 *actual_bytes
= discarded_bytes
;
1364 if (ret
== -EOPNOTSUPP
)
1369 /* Can return -ENOMEM */
1370 int btrfs_inc_extent_ref(struct btrfs_trans_handle
*trans
,
1371 struct btrfs_ref
*generic_ref
)
1373 struct btrfs_fs_info
*fs_info
= trans
->fs_info
;
1374 int old_ref_mod
, new_ref_mod
;
1377 ASSERT(generic_ref
->type
!= BTRFS_REF_NOT_SET
&&
1378 generic_ref
->action
);
1379 BUG_ON(generic_ref
->type
== BTRFS_REF_METADATA
&&
1380 generic_ref
->tree_ref
.root
== BTRFS_TREE_LOG_OBJECTID
);
1382 if (generic_ref
->type
== BTRFS_REF_METADATA
)
1383 ret
= btrfs_add_delayed_tree_ref(trans
, generic_ref
,
1384 NULL
, &old_ref_mod
, &new_ref_mod
);
1386 ret
= btrfs_add_delayed_data_ref(trans
, generic_ref
, 0,
1387 &old_ref_mod
, &new_ref_mod
);
1389 btrfs_ref_tree_mod(fs_info
, generic_ref
);
1391 if (ret
== 0 && old_ref_mod
< 0 && new_ref_mod
>= 0)
1392 sub_pinned_bytes(fs_info
, generic_ref
);
1398 * __btrfs_inc_extent_ref - insert backreference for a given extent
1400 * @trans: Handle of transaction
1402 * @node: The delayed ref node used to get the bytenr/length for
1403 * extent whose references are incremented.
1405 * @parent: If this is a shared extent (BTRFS_SHARED_DATA_REF_KEY/
1406 * BTRFS_SHARED_BLOCK_REF_KEY) then it holds the logical
1407 * bytenr of the parent block. Since new extents are always
1408 * created with indirect references, this will only be the case
1409 * when relocating a shared extent. In that case, root_objectid
1410 * will be BTRFS_TREE_RELOC_OBJECTID. Otheriwse, parent must
1413 * @root_objectid: The id of the root where this modification has originated,
1414 * this can be either one of the well-known metadata trees or
1415 * the subvolume id which references this extent.
1417 * @owner: For data extents it is the inode number of the owning file.
1418 * For metadata extents this parameter holds the level in the
1419 * tree of the extent.
1421 * @offset: For metadata extents the offset is ignored and is currently
1422 * always passed as 0. For data extents it is the fileoffset
1423 * this extent belongs to.
1425 * @refs_to_add Number of references to add
1427 * @extent_op Pointer to a structure, holding information necessary when
1428 * updating a tree block's flags
1431 static int __btrfs_inc_extent_ref(struct btrfs_trans_handle
*trans
,
1432 struct btrfs_delayed_ref_node
*node
,
1433 u64 parent
, u64 root_objectid
,
1434 u64 owner
, u64 offset
, int refs_to_add
,
1435 struct btrfs_delayed_extent_op
*extent_op
)
1437 struct btrfs_path
*path
;
1438 struct extent_buffer
*leaf
;
1439 struct btrfs_extent_item
*item
;
1440 struct btrfs_key key
;
1441 u64 bytenr
= node
->bytenr
;
1442 u64 num_bytes
= node
->num_bytes
;
1446 path
= btrfs_alloc_path();
1450 path
->leave_spinning
= 1;
1451 /* this will setup the path even if it fails to insert the back ref */
1452 ret
= insert_inline_extent_backref(trans
, path
, bytenr
, num_bytes
,
1453 parent
, root_objectid
, owner
,
1454 offset
, refs_to_add
, extent_op
);
1455 if ((ret
< 0 && ret
!= -EAGAIN
) || !ret
)
1459 * Ok we had -EAGAIN which means we didn't have space to insert and
1460 * inline extent ref, so just update the reference count and add a
1463 leaf
= path
->nodes
[0];
1464 btrfs_item_key_to_cpu(leaf
, &key
, path
->slots
[0]);
1465 item
= btrfs_item_ptr(leaf
, path
->slots
[0], struct btrfs_extent_item
);
1466 refs
= btrfs_extent_refs(leaf
, item
);
1467 btrfs_set_extent_refs(leaf
, item
, refs
+ refs_to_add
);
1469 __run_delayed_extent_op(extent_op
, leaf
, item
);
1471 btrfs_mark_buffer_dirty(leaf
);
1472 btrfs_release_path(path
);
1474 path
->leave_spinning
= 1;
1475 /* now insert the actual backref */
1476 if (owner
< BTRFS_FIRST_FREE_OBJECTID
) {
1477 BUG_ON(refs_to_add
!= 1);
1478 ret
= insert_tree_block_ref(trans
, path
, bytenr
, parent
,
1481 ret
= insert_extent_data_ref(trans
, path
, bytenr
, parent
,
1482 root_objectid
, owner
, offset
,
1486 btrfs_abort_transaction(trans
, ret
);
1488 btrfs_free_path(path
);
1492 static int run_delayed_data_ref(struct btrfs_trans_handle
*trans
,
1493 struct btrfs_delayed_ref_node
*node
,
1494 struct btrfs_delayed_extent_op
*extent_op
,
1495 int insert_reserved
)
1498 struct btrfs_delayed_data_ref
*ref
;
1499 struct btrfs_key ins
;
1504 ins
.objectid
= node
->bytenr
;
1505 ins
.offset
= node
->num_bytes
;
1506 ins
.type
= BTRFS_EXTENT_ITEM_KEY
;
1508 ref
= btrfs_delayed_node_to_data_ref(node
);
1509 trace_run_delayed_data_ref(trans
->fs_info
, node
, ref
, node
->action
);
1511 if (node
->type
== BTRFS_SHARED_DATA_REF_KEY
)
1512 parent
= ref
->parent
;
1513 ref_root
= ref
->root
;
1515 if (node
->action
== BTRFS_ADD_DELAYED_REF
&& insert_reserved
) {
1517 flags
|= extent_op
->flags_to_set
;
1518 ret
= alloc_reserved_file_extent(trans
, parent
, ref_root
,
1519 flags
, ref
->objectid
,
1522 } else if (node
->action
== BTRFS_ADD_DELAYED_REF
) {
1523 ret
= __btrfs_inc_extent_ref(trans
, node
, parent
, ref_root
,
1524 ref
->objectid
, ref
->offset
,
1525 node
->ref_mod
, extent_op
);
1526 } else if (node
->action
== BTRFS_DROP_DELAYED_REF
) {
1527 ret
= __btrfs_free_extent(trans
, node
, parent
,
1528 ref_root
, ref
->objectid
,
1529 ref
->offset
, node
->ref_mod
,
1537 static void __run_delayed_extent_op(struct btrfs_delayed_extent_op
*extent_op
,
1538 struct extent_buffer
*leaf
,
1539 struct btrfs_extent_item
*ei
)
1541 u64 flags
= btrfs_extent_flags(leaf
, ei
);
1542 if (extent_op
->update_flags
) {
1543 flags
|= extent_op
->flags_to_set
;
1544 btrfs_set_extent_flags(leaf
, ei
, flags
);
1547 if (extent_op
->update_key
) {
1548 struct btrfs_tree_block_info
*bi
;
1549 BUG_ON(!(flags
& BTRFS_EXTENT_FLAG_TREE_BLOCK
));
1550 bi
= (struct btrfs_tree_block_info
*)(ei
+ 1);
1551 btrfs_set_tree_block_key(leaf
, bi
, &extent_op
->key
);
1555 static int run_delayed_extent_op(struct btrfs_trans_handle
*trans
,
1556 struct btrfs_delayed_ref_head
*head
,
1557 struct btrfs_delayed_extent_op
*extent_op
)
1559 struct btrfs_fs_info
*fs_info
= trans
->fs_info
;
1560 struct btrfs_key key
;
1561 struct btrfs_path
*path
;
1562 struct btrfs_extent_item
*ei
;
1563 struct extent_buffer
*leaf
;
1567 int metadata
= !extent_op
->is_data
;
1569 if (TRANS_ABORTED(trans
))
1572 if (metadata
&& !btrfs_fs_incompat(fs_info
, SKINNY_METADATA
))
1575 path
= btrfs_alloc_path();
1579 key
.objectid
= head
->bytenr
;
1582 key
.type
= BTRFS_METADATA_ITEM_KEY
;
1583 key
.offset
= extent_op
->level
;
1585 key
.type
= BTRFS_EXTENT_ITEM_KEY
;
1586 key
.offset
= head
->num_bytes
;
1590 path
->leave_spinning
= 1;
1591 ret
= btrfs_search_slot(trans
, fs_info
->extent_root
, &key
, path
, 0, 1);
1598 if (path
->slots
[0] > 0) {
1600 btrfs_item_key_to_cpu(path
->nodes
[0], &key
,
1602 if (key
.objectid
== head
->bytenr
&&
1603 key
.type
== BTRFS_EXTENT_ITEM_KEY
&&
1604 key
.offset
== head
->num_bytes
)
1608 btrfs_release_path(path
);
1611 key
.objectid
= head
->bytenr
;
1612 key
.offset
= head
->num_bytes
;
1613 key
.type
= BTRFS_EXTENT_ITEM_KEY
;
1622 leaf
= path
->nodes
[0];
1623 item_size
= btrfs_item_size_nr(leaf
, path
->slots
[0]);
1625 if (unlikely(item_size
< sizeof(*ei
))) {
1627 btrfs_print_v0_err(fs_info
);
1628 btrfs_abort_transaction(trans
, err
);
1632 ei
= btrfs_item_ptr(leaf
, path
->slots
[0], struct btrfs_extent_item
);
1633 __run_delayed_extent_op(extent_op
, leaf
, ei
);
1635 btrfs_mark_buffer_dirty(leaf
);
1637 btrfs_free_path(path
);
1641 static int run_delayed_tree_ref(struct btrfs_trans_handle
*trans
,
1642 struct btrfs_delayed_ref_node
*node
,
1643 struct btrfs_delayed_extent_op
*extent_op
,
1644 int insert_reserved
)
1647 struct btrfs_delayed_tree_ref
*ref
;
1651 ref
= btrfs_delayed_node_to_tree_ref(node
);
1652 trace_run_delayed_tree_ref(trans
->fs_info
, node
, ref
, node
->action
);
1654 if (node
->type
== BTRFS_SHARED_BLOCK_REF_KEY
)
1655 parent
= ref
->parent
;
1656 ref_root
= ref
->root
;
1658 if (node
->ref_mod
!= 1) {
1659 btrfs_err(trans
->fs_info
,
1660 "btree block(%llu) has %d references rather than 1: action %d ref_root %llu parent %llu",
1661 node
->bytenr
, node
->ref_mod
, node
->action
, ref_root
,
1665 if (node
->action
== BTRFS_ADD_DELAYED_REF
&& insert_reserved
) {
1666 BUG_ON(!extent_op
|| !extent_op
->update_flags
);
1667 ret
= alloc_reserved_tree_block(trans
, node
, extent_op
);
1668 } else if (node
->action
== BTRFS_ADD_DELAYED_REF
) {
1669 ret
= __btrfs_inc_extent_ref(trans
, node
, parent
, ref_root
,
1670 ref
->level
, 0, 1, extent_op
);
1671 } else if (node
->action
== BTRFS_DROP_DELAYED_REF
) {
1672 ret
= __btrfs_free_extent(trans
, node
, parent
, ref_root
,
1673 ref
->level
, 0, 1, extent_op
);
1680 /* helper function to actually process a single delayed ref entry */
1681 static int run_one_delayed_ref(struct btrfs_trans_handle
*trans
,
1682 struct btrfs_delayed_ref_node
*node
,
1683 struct btrfs_delayed_extent_op
*extent_op
,
1684 int insert_reserved
)
1688 if (TRANS_ABORTED(trans
)) {
1689 if (insert_reserved
)
1690 btrfs_pin_extent(trans
, node
->bytenr
, node
->num_bytes
, 1);
1694 if (node
->type
== BTRFS_TREE_BLOCK_REF_KEY
||
1695 node
->type
== BTRFS_SHARED_BLOCK_REF_KEY
)
1696 ret
= run_delayed_tree_ref(trans
, node
, extent_op
,
1698 else if (node
->type
== BTRFS_EXTENT_DATA_REF_KEY
||
1699 node
->type
== BTRFS_SHARED_DATA_REF_KEY
)
1700 ret
= run_delayed_data_ref(trans
, node
, extent_op
,
1704 if (ret
&& insert_reserved
)
1705 btrfs_pin_extent(trans
, node
->bytenr
, node
->num_bytes
, 1);
1709 static inline struct btrfs_delayed_ref_node
*
1710 select_delayed_ref(struct btrfs_delayed_ref_head
*head
)
1712 struct btrfs_delayed_ref_node
*ref
;
1714 if (RB_EMPTY_ROOT(&head
->ref_tree
.rb_root
))
1718 * Select a delayed ref of type BTRFS_ADD_DELAYED_REF first.
1719 * This is to prevent a ref count from going down to zero, which deletes
1720 * the extent item from the extent tree, when there still are references
1721 * to add, which would fail because they would not find the extent item.
1723 if (!list_empty(&head
->ref_add_list
))
1724 return list_first_entry(&head
->ref_add_list
,
1725 struct btrfs_delayed_ref_node
, add_list
);
1727 ref
= rb_entry(rb_first_cached(&head
->ref_tree
),
1728 struct btrfs_delayed_ref_node
, ref_node
);
1729 ASSERT(list_empty(&ref
->add_list
));
1733 static void unselect_delayed_ref_head(struct btrfs_delayed_ref_root
*delayed_refs
,
1734 struct btrfs_delayed_ref_head
*head
)
1736 spin_lock(&delayed_refs
->lock
);
1737 head
->processing
= 0;
1738 delayed_refs
->num_heads_ready
++;
1739 spin_unlock(&delayed_refs
->lock
);
1740 btrfs_delayed_ref_unlock(head
);
1743 static struct btrfs_delayed_extent_op
*cleanup_extent_op(
1744 struct btrfs_delayed_ref_head
*head
)
1746 struct btrfs_delayed_extent_op
*extent_op
= head
->extent_op
;
1751 if (head
->must_insert_reserved
) {
1752 head
->extent_op
= NULL
;
1753 btrfs_free_delayed_extent_op(extent_op
);
1759 static int run_and_cleanup_extent_op(struct btrfs_trans_handle
*trans
,
1760 struct btrfs_delayed_ref_head
*head
)
1762 struct btrfs_delayed_extent_op
*extent_op
;
1765 extent_op
= cleanup_extent_op(head
);
1768 head
->extent_op
= NULL
;
1769 spin_unlock(&head
->lock
);
1770 ret
= run_delayed_extent_op(trans
, head
, extent_op
);
1771 btrfs_free_delayed_extent_op(extent_op
);
1772 return ret
? ret
: 1;
1775 void btrfs_cleanup_ref_head_accounting(struct btrfs_fs_info
*fs_info
,
1776 struct btrfs_delayed_ref_root
*delayed_refs
,
1777 struct btrfs_delayed_ref_head
*head
)
1779 int nr_items
= 1; /* Dropping this ref head update. */
1781 if (head
->total_ref_mod
< 0) {
1782 struct btrfs_space_info
*space_info
;
1786 flags
= BTRFS_BLOCK_GROUP_DATA
;
1787 else if (head
->is_system
)
1788 flags
= BTRFS_BLOCK_GROUP_SYSTEM
;
1790 flags
= BTRFS_BLOCK_GROUP_METADATA
;
1791 space_info
= btrfs_find_space_info(fs_info
, flags
);
1793 percpu_counter_add_batch(&space_info
->total_bytes_pinned
,
1795 BTRFS_TOTAL_BYTES_PINNED_BATCH
);
1798 * We had csum deletions accounted for in our delayed refs rsv,
1799 * we need to drop the csum leaves for this update from our
1802 if (head
->is_data
) {
1803 spin_lock(&delayed_refs
->lock
);
1804 delayed_refs
->pending_csums
-= head
->num_bytes
;
1805 spin_unlock(&delayed_refs
->lock
);
1806 nr_items
+= btrfs_csum_bytes_to_leaves(fs_info
,
1811 btrfs_delayed_refs_rsv_release(fs_info
, nr_items
);
1814 static int cleanup_ref_head(struct btrfs_trans_handle
*trans
,
1815 struct btrfs_delayed_ref_head
*head
)
1818 struct btrfs_fs_info
*fs_info
= trans
->fs_info
;
1819 struct btrfs_delayed_ref_root
*delayed_refs
;
1822 delayed_refs
= &trans
->transaction
->delayed_refs
;
1824 ret
= run_and_cleanup_extent_op(trans
, head
);
1826 unselect_delayed_ref_head(delayed_refs
, head
);
1827 btrfs_debug(fs_info
, "run_delayed_extent_op returned %d", ret
);
1834 * Need to drop our head ref lock and re-acquire the delayed ref lock
1835 * and then re-check to make sure nobody got added.
1837 spin_unlock(&head
->lock
);
1838 spin_lock(&delayed_refs
->lock
);
1839 spin_lock(&head
->lock
);
1840 if (!RB_EMPTY_ROOT(&head
->ref_tree
.rb_root
) || head
->extent_op
) {
1841 spin_unlock(&head
->lock
);
1842 spin_unlock(&delayed_refs
->lock
);
1845 btrfs_delete_ref_head(delayed_refs
, head
);
1846 spin_unlock(&head
->lock
);
1847 spin_unlock(&delayed_refs
->lock
);
1849 if (head
->must_insert_reserved
) {
1850 btrfs_pin_extent(trans
, head
->bytenr
, head
->num_bytes
, 1);
1851 if (head
->is_data
) {
1852 ret
= btrfs_del_csums(trans
, fs_info
->csum_root
,
1853 head
->bytenr
, head
->num_bytes
);
1857 btrfs_cleanup_ref_head_accounting(fs_info
, delayed_refs
, head
);
1859 trace_run_delayed_ref_head(fs_info
, head
, 0);
1860 btrfs_delayed_ref_unlock(head
);
1861 btrfs_put_delayed_ref_head(head
);
1865 static struct btrfs_delayed_ref_head
*btrfs_obtain_ref_head(
1866 struct btrfs_trans_handle
*trans
)
1868 struct btrfs_delayed_ref_root
*delayed_refs
=
1869 &trans
->transaction
->delayed_refs
;
1870 struct btrfs_delayed_ref_head
*head
= NULL
;
1873 spin_lock(&delayed_refs
->lock
);
1874 head
= btrfs_select_ref_head(delayed_refs
);
1876 spin_unlock(&delayed_refs
->lock
);
1881 * Grab the lock that says we are going to process all the refs for
1884 ret
= btrfs_delayed_ref_lock(delayed_refs
, head
);
1885 spin_unlock(&delayed_refs
->lock
);
1888 * We may have dropped the spin lock to get the head mutex lock, and
1889 * that might have given someone else time to free the head. If that's
1890 * true, it has been removed from our list and we can move on.
1893 head
= ERR_PTR(-EAGAIN
);
1898 static int btrfs_run_delayed_refs_for_head(struct btrfs_trans_handle
*trans
,
1899 struct btrfs_delayed_ref_head
*locked_ref
,
1900 unsigned long *run_refs
)
1902 struct btrfs_fs_info
*fs_info
= trans
->fs_info
;
1903 struct btrfs_delayed_ref_root
*delayed_refs
;
1904 struct btrfs_delayed_extent_op
*extent_op
;
1905 struct btrfs_delayed_ref_node
*ref
;
1906 int must_insert_reserved
= 0;
1909 delayed_refs
= &trans
->transaction
->delayed_refs
;
1911 lockdep_assert_held(&locked_ref
->mutex
);
1912 lockdep_assert_held(&locked_ref
->lock
);
1914 while ((ref
= select_delayed_ref(locked_ref
))) {
1916 btrfs_check_delayed_seq(fs_info
, ref
->seq
)) {
1917 spin_unlock(&locked_ref
->lock
);
1918 unselect_delayed_ref_head(delayed_refs
, locked_ref
);
1924 rb_erase_cached(&ref
->ref_node
, &locked_ref
->ref_tree
);
1925 RB_CLEAR_NODE(&ref
->ref_node
);
1926 if (!list_empty(&ref
->add_list
))
1927 list_del(&ref
->add_list
);
1929 * When we play the delayed ref, also correct the ref_mod on
1932 switch (ref
->action
) {
1933 case BTRFS_ADD_DELAYED_REF
:
1934 case BTRFS_ADD_DELAYED_EXTENT
:
1935 locked_ref
->ref_mod
-= ref
->ref_mod
;
1937 case BTRFS_DROP_DELAYED_REF
:
1938 locked_ref
->ref_mod
+= ref
->ref_mod
;
1943 atomic_dec(&delayed_refs
->num_entries
);
1946 * Record the must_insert_reserved flag before we drop the
1949 must_insert_reserved
= locked_ref
->must_insert_reserved
;
1950 locked_ref
->must_insert_reserved
= 0;
1952 extent_op
= locked_ref
->extent_op
;
1953 locked_ref
->extent_op
= NULL
;
1954 spin_unlock(&locked_ref
->lock
);
1956 ret
= run_one_delayed_ref(trans
, ref
, extent_op
,
1957 must_insert_reserved
);
1959 btrfs_free_delayed_extent_op(extent_op
);
1961 unselect_delayed_ref_head(delayed_refs
, locked_ref
);
1962 btrfs_put_delayed_ref(ref
);
1963 btrfs_debug(fs_info
, "run_one_delayed_ref returned %d",
1968 btrfs_put_delayed_ref(ref
);
1971 spin_lock(&locked_ref
->lock
);
1972 btrfs_merge_delayed_refs(trans
, delayed_refs
, locked_ref
);
1979 * Returns 0 on success or if called with an already aborted transaction.
1980 * Returns -ENOMEM or -EIO on failure and will abort the transaction.
1982 static noinline
int __btrfs_run_delayed_refs(struct btrfs_trans_handle
*trans
,
1985 struct btrfs_fs_info
*fs_info
= trans
->fs_info
;
1986 struct btrfs_delayed_ref_root
*delayed_refs
;
1987 struct btrfs_delayed_ref_head
*locked_ref
= NULL
;
1988 ktime_t start
= ktime_get();
1990 unsigned long count
= 0;
1991 unsigned long actual_count
= 0;
1993 delayed_refs
= &trans
->transaction
->delayed_refs
;
1996 locked_ref
= btrfs_obtain_ref_head(trans
);
1997 if (IS_ERR_OR_NULL(locked_ref
)) {
1998 if (PTR_ERR(locked_ref
) == -EAGAIN
) {
2007 * We need to try and merge add/drops of the same ref since we
2008 * can run into issues with relocate dropping the implicit ref
2009 * and then it being added back again before the drop can
2010 * finish. If we merged anything we need to re-loop so we can
2012 * Or we can get node references of the same type that weren't
2013 * merged when created due to bumps in the tree mod seq, and
2014 * we need to merge them to prevent adding an inline extent
2015 * backref before dropping it (triggering a BUG_ON at
2016 * insert_inline_extent_backref()).
2018 spin_lock(&locked_ref
->lock
);
2019 btrfs_merge_delayed_refs(trans
, delayed_refs
, locked_ref
);
2021 ret
= btrfs_run_delayed_refs_for_head(trans
, locked_ref
,
2023 if (ret
< 0 && ret
!= -EAGAIN
) {
2025 * Error, btrfs_run_delayed_refs_for_head already
2026 * unlocked everything so just bail out
2031 * Success, perform the usual cleanup of a processed
2034 ret
= cleanup_ref_head(trans
, locked_ref
);
2036 /* We dropped our lock, we need to loop. */
2045 * Either success case or btrfs_run_delayed_refs_for_head
2046 * returned -EAGAIN, meaning we need to select another head
2051 } while ((nr
!= -1 && count
< nr
) || locked_ref
);
2054 * We don't want to include ref heads since we can have empty ref heads
2055 * and those will drastically skew our runtime down since we just do
2056 * accounting, no actual extent tree updates.
2058 if (actual_count
> 0) {
2059 u64 runtime
= ktime_to_ns(ktime_sub(ktime_get(), start
));
2063 * We weigh the current average higher than our current runtime
2064 * to avoid large swings in the average.
2066 spin_lock(&delayed_refs
->lock
);
2067 avg
= fs_info
->avg_delayed_ref_runtime
* 3 + runtime
;
2068 fs_info
->avg_delayed_ref_runtime
= avg
>> 2; /* div by 4 */
2069 spin_unlock(&delayed_refs
->lock
);
2074 #ifdef SCRAMBLE_DELAYED_REFS
2076 * Normally delayed refs get processed in ascending bytenr order. This
2077 * correlates in most cases to the order added. To expose dependencies on this
2078 * order, we start to process the tree in the middle instead of the beginning
2080 static u64
find_middle(struct rb_root
*root
)
2082 struct rb_node
*n
= root
->rb_node
;
2083 struct btrfs_delayed_ref_node
*entry
;
2086 u64 first
= 0, last
= 0;
2090 entry
= rb_entry(n
, struct btrfs_delayed_ref_node
, rb_node
);
2091 first
= entry
->bytenr
;
2095 entry
= rb_entry(n
, struct btrfs_delayed_ref_node
, rb_node
);
2096 last
= entry
->bytenr
;
2101 entry
= rb_entry(n
, struct btrfs_delayed_ref_node
, rb_node
);
2102 WARN_ON(!entry
->in_tree
);
2104 middle
= entry
->bytenr
;
2117 static inline u64
heads_to_leaves(struct btrfs_fs_info
*fs_info
, u64 heads
)
2121 num_bytes
= heads
* (sizeof(struct btrfs_extent_item
) +
2122 sizeof(struct btrfs_extent_inline_ref
));
2123 if (!btrfs_fs_incompat(fs_info
, SKINNY_METADATA
))
2124 num_bytes
+= heads
* sizeof(struct btrfs_tree_block_info
);
2127 * We don't ever fill up leaves all the way so multiply by 2 just to be
2128 * closer to what we're really going to want to use.
2130 return div_u64(num_bytes
, BTRFS_LEAF_DATA_SIZE(fs_info
));
2134 * Takes the number of bytes to be csumm'ed and figures out how many leaves it
2135 * would require to store the csums for that many bytes.
2137 u64
btrfs_csum_bytes_to_leaves(struct btrfs_fs_info
*fs_info
, u64 csum_bytes
)
2140 u64 num_csums_per_leaf
;
2143 csum_size
= BTRFS_MAX_ITEM_SIZE(fs_info
);
2144 num_csums_per_leaf
= div64_u64(csum_size
,
2145 (u64
)btrfs_super_csum_size(fs_info
->super_copy
));
2146 num_csums
= div64_u64(csum_bytes
, fs_info
->sectorsize
);
2147 num_csums
+= num_csums_per_leaf
- 1;
2148 num_csums
= div64_u64(num_csums
, num_csums_per_leaf
);
2153 * this starts processing the delayed reference count updates and
2154 * extent insertions we have queued up so far. count can be
2155 * 0, which means to process everything in the tree at the start
2156 * of the run (but not newly added entries), or it can be some target
2157 * number you'd like to process.
2159 * Returns 0 on success or if called with an aborted transaction
2160 * Returns <0 on error and aborts the transaction
2162 int btrfs_run_delayed_refs(struct btrfs_trans_handle
*trans
,
2163 unsigned long count
)
2165 struct btrfs_fs_info
*fs_info
= trans
->fs_info
;
2166 struct rb_node
*node
;
2167 struct btrfs_delayed_ref_root
*delayed_refs
;
2168 struct btrfs_delayed_ref_head
*head
;
2170 int run_all
= count
== (unsigned long)-1;
2172 /* We'll clean this up in btrfs_cleanup_transaction */
2173 if (TRANS_ABORTED(trans
))
2176 if (test_bit(BTRFS_FS_CREATING_FREE_SPACE_TREE
, &fs_info
->flags
))
2179 delayed_refs
= &trans
->transaction
->delayed_refs
;
2181 count
= atomic_read(&delayed_refs
->num_entries
) * 2;
2184 #ifdef SCRAMBLE_DELAYED_REFS
2185 delayed_refs
->run_delayed_start
= find_middle(&delayed_refs
->root
);
2187 ret
= __btrfs_run_delayed_refs(trans
, count
);
2189 btrfs_abort_transaction(trans
, ret
);
2194 btrfs_create_pending_block_groups(trans
);
2196 spin_lock(&delayed_refs
->lock
);
2197 node
= rb_first_cached(&delayed_refs
->href_root
);
2199 spin_unlock(&delayed_refs
->lock
);
2202 head
= rb_entry(node
, struct btrfs_delayed_ref_head
,
2204 refcount_inc(&head
->refs
);
2205 spin_unlock(&delayed_refs
->lock
);
2207 /* Mutex was contended, block until it's released and retry. */
2208 mutex_lock(&head
->mutex
);
2209 mutex_unlock(&head
->mutex
);
2211 btrfs_put_delayed_ref_head(head
);
2219 int btrfs_set_disk_extent_flags(struct btrfs_trans_handle
*trans
,
2220 struct extent_buffer
*eb
, u64 flags
,
2221 int level
, int is_data
)
2223 struct btrfs_delayed_extent_op
*extent_op
;
2226 extent_op
= btrfs_alloc_delayed_extent_op();
2230 extent_op
->flags_to_set
= flags
;
2231 extent_op
->update_flags
= true;
2232 extent_op
->update_key
= false;
2233 extent_op
->is_data
= is_data
? true : false;
2234 extent_op
->level
= level
;
2236 ret
= btrfs_add_delayed_extent_op(trans
, eb
->start
, eb
->len
, extent_op
);
2238 btrfs_free_delayed_extent_op(extent_op
);
2242 static noinline
int check_delayed_ref(struct btrfs_root
*root
,
2243 struct btrfs_path
*path
,
2244 u64 objectid
, u64 offset
, u64 bytenr
)
2246 struct btrfs_delayed_ref_head
*head
;
2247 struct btrfs_delayed_ref_node
*ref
;
2248 struct btrfs_delayed_data_ref
*data_ref
;
2249 struct btrfs_delayed_ref_root
*delayed_refs
;
2250 struct btrfs_transaction
*cur_trans
;
2251 struct rb_node
*node
;
2254 spin_lock(&root
->fs_info
->trans_lock
);
2255 cur_trans
= root
->fs_info
->running_transaction
;
2257 refcount_inc(&cur_trans
->use_count
);
2258 spin_unlock(&root
->fs_info
->trans_lock
);
2262 delayed_refs
= &cur_trans
->delayed_refs
;
2263 spin_lock(&delayed_refs
->lock
);
2264 head
= btrfs_find_delayed_ref_head(delayed_refs
, bytenr
);
2266 spin_unlock(&delayed_refs
->lock
);
2267 btrfs_put_transaction(cur_trans
);
2271 if (!mutex_trylock(&head
->mutex
)) {
2272 refcount_inc(&head
->refs
);
2273 spin_unlock(&delayed_refs
->lock
);
2275 btrfs_release_path(path
);
2278 * Mutex was contended, block until it's released and let
2281 mutex_lock(&head
->mutex
);
2282 mutex_unlock(&head
->mutex
);
2283 btrfs_put_delayed_ref_head(head
);
2284 btrfs_put_transaction(cur_trans
);
2287 spin_unlock(&delayed_refs
->lock
);
2289 spin_lock(&head
->lock
);
2291 * XXX: We should replace this with a proper search function in the
2294 for (node
= rb_first_cached(&head
->ref_tree
); node
;
2295 node
= rb_next(node
)) {
2296 ref
= rb_entry(node
, struct btrfs_delayed_ref_node
, ref_node
);
2297 /* If it's a shared ref we know a cross reference exists */
2298 if (ref
->type
!= BTRFS_EXTENT_DATA_REF_KEY
) {
2303 data_ref
= btrfs_delayed_node_to_data_ref(ref
);
2306 * If our ref doesn't match the one we're currently looking at
2307 * then we have a cross reference.
2309 if (data_ref
->root
!= root
->root_key
.objectid
||
2310 data_ref
->objectid
!= objectid
||
2311 data_ref
->offset
!= offset
) {
2316 spin_unlock(&head
->lock
);
2317 mutex_unlock(&head
->mutex
);
2318 btrfs_put_transaction(cur_trans
);
2322 static noinline
int check_committed_ref(struct btrfs_root
*root
,
2323 struct btrfs_path
*path
,
2324 u64 objectid
, u64 offset
, u64 bytenr
)
2326 struct btrfs_fs_info
*fs_info
= root
->fs_info
;
2327 struct btrfs_root
*extent_root
= fs_info
->extent_root
;
2328 struct extent_buffer
*leaf
;
2329 struct btrfs_extent_data_ref
*ref
;
2330 struct btrfs_extent_inline_ref
*iref
;
2331 struct btrfs_extent_item
*ei
;
2332 struct btrfs_key key
;
2337 key
.objectid
= bytenr
;
2338 key
.offset
= (u64
)-1;
2339 key
.type
= BTRFS_EXTENT_ITEM_KEY
;
2341 ret
= btrfs_search_slot(NULL
, extent_root
, &key
, path
, 0, 0);
2344 BUG_ON(ret
== 0); /* Corruption */
2347 if (path
->slots
[0] == 0)
2351 leaf
= path
->nodes
[0];
2352 btrfs_item_key_to_cpu(leaf
, &key
, path
->slots
[0]);
2354 if (key
.objectid
!= bytenr
|| key
.type
!= BTRFS_EXTENT_ITEM_KEY
)
2358 item_size
= btrfs_item_size_nr(leaf
, path
->slots
[0]);
2359 ei
= btrfs_item_ptr(leaf
, path
->slots
[0], struct btrfs_extent_item
);
2361 /* If extent item has more than 1 inline ref then it's shared */
2362 if (item_size
!= sizeof(*ei
) +
2363 btrfs_extent_inline_ref_size(BTRFS_EXTENT_DATA_REF_KEY
))
2366 /* If extent created before last snapshot => it's definitely shared */
2367 if (btrfs_extent_generation(leaf
, ei
) <=
2368 btrfs_root_last_snapshot(&root
->root_item
))
2371 iref
= (struct btrfs_extent_inline_ref
*)(ei
+ 1);
2373 /* If this extent has SHARED_DATA_REF then it's shared */
2374 type
= btrfs_get_extent_inline_ref_type(leaf
, iref
, BTRFS_REF_TYPE_DATA
);
2375 if (type
!= BTRFS_EXTENT_DATA_REF_KEY
)
2378 ref
= (struct btrfs_extent_data_ref
*)(&iref
->offset
);
2379 if (btrfs_extent_refs(leaf
, ei
) !=
2380 btrfs_extent_data_ref_count(leaf
, ref
) ||
2381 btrfs_extent_data_ref_root(leaf
, ref
) !=
2382 root
->root_key
.objectid
||
2383 btrfs_extent_data_ref_objectid(leaf
, ref
) != objectid
||
2384 btrfs_extent_data_ref_offset(leaf
, ref
) != offset
)
2392 int btrfs_cross_ref_exist(struct btrfs_root
*root
, u64 objectid
, u64 offset
,
2395 struct btrfs_path
*path
;
2398 path
= btrfs_alloc_path();
2403 ret
= check_committed_ref(root
, path
, objectid
,
2405 if (ret
&& ret
!= -ENOENT
)
2408 ret
= check_delayed_ref(root
, path
, objectid
, offset
, bytenr
);
2409 } while (ret
== -EAGAIN
);
2412 btrfs_free_path(path
);
2413 if (root
->root_key
.objectid
== BTRFS_DATA_RELOC_TREE_OBJECTID
)
2418 static int __btrfs_mod_ref(struct btrfs_trans_handle
*trans
,
2419 struct btrfs_root
*root
,
2420 struct extent_buffer
*buf
,
2421 int full_backref
, int inc
)
2423 struct btrfs_fs_info
*fs_info
= root
->fs_info
;
2429 struct btrfs_key key
;
2430 struct btrfs_file_extent_item
*fi
;
2431 struct btrfs_ref generic_ref
= { 0 };
2432 bool for_reloc
= btrfs_header_flag(buf
, BTRFS_HEADER_FLAG_RELOC
);
2438 if (btrfs_is_testing(fs_info
))
2441 ref_root
= btrfs_header_owner(buf
);
2442 nritems
= btrfs_header_nritems(buf
);
2443 level
= btrfs_header_level(buf
);
2445 if (!test_bit(BTRFS_ROOT_REF_COWS
, &root
->state
) && level
== 0)
2449 parent
= buf
->start
;
2453 action
= BTRFS_ADD_DELAYED_REF
;
2455 action
= BTRFS_DROP_DELAYED_REF
;
2457 for (i
= 0; i
< nritems
; i
++) {
2459 btrfs_item_key_to_cpu(buf
, &key
, i
);
2460 if (key
.type
!= BTRFS_EXTENT_DATA_KEY
)
2462 fi
= btrfs_item_ptr(buf
, i
,
2463 struct btrfs_file_extent_item
);
2464 if (btrfs_file_extent_type(buf
, fi
) ==
2465 BTRFS_FILE_EXTENT_INLINE
)
2467 bytenr
= btrfs_file_extent_disk_bytenr(buf
, fi
);
2471 num_bytes
= btrfs_file_extent_disk_num_bytes(buf
, fi
);
2472 key
.offset
-= btrfs_file_extent_offset(buf
, fi
);
2473 btrfs_init_generic_ref(&generic_ref
, action
, bytenr
,
2475 generic_ref
.real_root
= root
->root_key
.objectid
;
2476 btrfs_init_data_ref(&generic_ref
, ref_root
, key
.objectid
,
2478 generic_ref
.skip_qgroup
= for_reloc
;
2480 ret
= btrfs_inc_extent_ref(trans
, &generic_ref
);
2482 ret
= btrfs_free_extent(trans
, &generic_ref
);
2486 bytenr
= btrfs_node_blockptr(buf
, i
);
2487 num_bytes
= fs_info
->nodesize
;
2488 btrfs_init_generic_ref(&generic_ref
, action
, bytenr
,
2490 generic_ref
.real_root
= root
->root_key
.objectid
;
2491 btrfs_init_tree_ref(&generic_ref
, level
- 1, ref_root
);
2492 generic_ref
.skip_qgroup
= for_reloc
;
2494 ret
= btrfs_inc_extent_ref(trans
, &generic_ref
);
2496 ret
= btrfs_free_extent(trans
, &generic_ref
);
2506 int btrfs_inc_ref(struct btrfs_trans_handle
*trans
, struct btrfs_root
*root
,
2507 struct extent_buffer
*buf
, int full_backref
)
2509 return __btrfs_mod_ref(trans
, root
, buf
, full_backref
, 1);
2512 int btrfs_dec_ref(struct btrfs_trans_handle
*trans
, struct btrfs_root
*root
,
2513 struct extent_buffer
*buf
, int full_backref
)
2515 return __btrfs_mod_ref(trans
, root
, buf
, full_backref
, 0);
2518 int btrfs_extent_readonly(struct btrfs_fs_info
*fs_info
, u64 bytenr
)
2520 struct btrfs_block_group
*block_group
;
2523 block_group
= btrfs_lookup_block_group(fs_info
, bytenr
);
2524 if (!block_group
|| block_group
->ro
)
2527 btrfs_put_block_group(block_group
);
2531 static u64
get_alloc_profile_by_root(struct btrfs_root
*root
, int data
)
2533 struct btrfs_fs_info
*fs_info
= root
->fs_info
;
2538 flags
= BTRFS_BLOCK_GROUP_DATA
;
2539 else if (root
== fs_info
->chunk_root
)
2540 flags
= BTRFS_BLOCK_GROUP_SYSTEM
;
2542 flags
= BTRFS_BLOCK_GROUP_METADATA
;
2544 ret
= btrfs_get_alloc_profile(fs_info
, flags
);
2548 static u64
first_logical_byte(struct btrfs_fs_info
*fs_info
, u64 search_start
)
2550 struct btrfs_block_group
*cache
;
2553 spin_lock(&fs_info
->block_group_cache_lock
);
2554 bytenr
= fs_info
->first_logical_byte
;
2555 spin_unlock(&fs_info
->block_group_cache_lock
);
2557 if (bytenr
< (u64
)-1)
2560 cache
= btrfs_lookup_first_block_group(fs_info
, search_start
);
2564 bytenr
= cache
->start
;
2565 btrfs_put_block_group(cache
);
2570 static int pin_down_extent(struct btrfs_trans_handle
*trans
,
2571 struct btrfs_block_group
*cache
,
2572 u64 bytenr
, u64 num_bytes
, int reserved
)
2574 struct btrfs_fs_info
*fs_info
= cache
->fs_info
;
2576 spin_lock(&cache
->space_info
->lock
);
2577 spin_lock(&cache
->lock
);
2578 cache
->pinned
+= num_bytes
;
2579 btrfs_space_info_update_bytes_pinned(fs_info
, cache
->space_info
,
2582 cache
->reserved
-= num_bytes
;
2583 cache
->space_info
->bytes_reserved
-= num_bytes
;
2585 spin_unlock(&cache
->lock
);
2586 spin_unlock(&cache
->space_info
->lock
);
2588 percpu_counter_add_batch(&cache
->space_info
->total_bytes_pinned
,
2589 num_bytes
, BTRFS_TOTAL_BYTES_PINNED_BATCH
);
2590 set_extent_dirty(&trans
->transaction
->pinned_extents
, bytenr
,
2591 bytenr
+ num_bytes
- 1, GFP_NOFS
| __GFP_NOFAIL
);
2595 int btrfs_pin_extent(struct btrfs_trans_handle
*trans
,
2596 u64 bytenr
, u64 num_bytes
, int reserved
)
2598 struct btrfs_block_group
*cache
;
2600 cache
= btrfs_lookup_block_group(trans
->fs_info
, bytenr
);
2601 BUG_ON(!cache
); /* Logic error */
2603 pin_down_extent(trans
, cache
, bytenr
, num_bytes
, reserved
);
2605 btrfs_put_block_group(cache
);
2610 * this function must be called within transaction
2612 int btrfs_pin_extent_for_log_replay(struct btrfs_trans_handle
*trans
,
2613 u64 bytenr
, u64 num_bytes
)
2615 struct btrfs_block_group
*cache
;
2618 btrfs_add_excluded_extent(trans
->fs_info
, bytenr
, num_bytes
);
2620 cache
= btrfs_lookup_block_group(trans
->fs_info
, bytenr
);
2625 * pull in the free space cache (if any) so that our pin
2626 * removes the free space from the cache. We have load_only set
2627 * to one because the slow code to read in the free extents does check
2628 * the pinned extents.
2630 btrfs_cache_block_group(cache
, 1);
2632 pin_down_extent(trans
, cache
, bytenr
, num_bytes
, 0);
2634 /* remove us from the free space cache (if we're there at all) */
2635 ret
= btrfs_remove_free_space(cache
, bytenr
, num_bytes
);
2636 btrfs_put_block_group(cache
);
2640 static int __exclude_logged_extent(struct btrfs_fs_info
*fs_info
,
2641 u64 start
, u64 num_bytes
)
2644 struct btrfs_block_group
*block_group
;
2645 struct btrfs_caching_control
*caching_ctl
;
2647 block_group
= btrfs_lookup_block_group(fs_info
, start
);
2651 btrfs_cache_block_group(block_group
, 0);
2652 caching_ctl
= btrfs_get_caching_control(block_group
);
2656 BUG_ON(!btrfs_block_group_done(block_group
));
2657 ret
= btrfs_remove_free_space(block_group
, start
, num_bytes
);
2659 mutex_lock(&caching_ctl
->mutex
);
2661 if (start
>= caching_ctl
->progress
) {
2662 ret
= btrfs_add_excluded_extent(fs_info
, start
,
2664 } else if (start
+ num_bytes
<= caching_ctl
->progress
) {
2665 ret
= btrfs_remove_free_space(block_group
,
2668 num_bytes
= caching_ctl
->progress
- start
;
2669 ret
= btrfs_remove_free_space(block_group
,
2674 num_bytes
= (start
+ num_bytes
) -
2675 caching_ctl
->progress
;
2676 start
= caching_ctl
->progress
;
2677 ret
= btrfs_add_excluded_extent(fs_info
, start
,
2681 mutex_unlock(&caching_ctl
->mutex
);
2682 btrfs_put_caching_control(caching_ctl
);
2684 btrfs_put_block_group(block_group
);
2688 int btrfs_exclude_logged_extents(struct extent_buffer
*eb
)
2690 struct btrfs_fs_info
*fs_info
= eb
->fs_info
;
2691 struct btrfs_file_extent_item
*item
;
2692 struct btrfs_key key
;
2697 if (!btrfs_fs_incompat(fs_info
, MIXED_GROUPS
))
2700 for (i
= 0; i
< btrfs_header_nritems(eb
); i
++) {
2701 btrfs_item_key_to_cpu(eb
, &key
, i
);
2702 if (key
.type
!= BTRFS_EXTENT_DATA_KEY
)
2704 item
= btrfs_item_ptr(eb
, i
, struct btrfs_file_extent_item
);
2705 found_type
= btrfs_file_extent_type(eb
, item
);
2706 if (found_type
== BTRFS_FILE_EXTENT_INLINE
)
2708 if (btrfs_file_extent_disk_bytenr(eb
, item
) == 0)
2710 key
.objectid
= btrfs_file_extent_disk_bytenr(eb
, item
);
2711 key
.offset
= btrfs_file_extent_disk_num_bytes(eb
, item
);
2712 ret
= __exclude_logged_extent(fs_info
, key
.objectid
, key
.offset
);
2721 btrfs_inc_block_group_reservations(struct btrfs_block_group
*bg
)
2723 atomic_inc(&bg
->reservations
);
2726 void btrfs_prepare_extent_commit(struct btrfs_fs_info
*fs_info
)
2728 struct btrfs_caching_control
*next
;
2729 struct btrfs_caching_control
*caching_ctl
;
2730 struct btrfs_block_group
*cache
;
2732 down_write(&fs_info
->commit_root_sem
);
2734 list_for_each_entry_safe(caching_ctl
, next
,
2735 &fs_info
->caching_block_groups
, list
) {
2736 cache
= caching_ctl
->block_group
;
2737 if (btrfs_block_group_done(cache
)) {
2738 cache
->last_byte_to_unpin
= (u64
)-1;
2739 list_del_init(&caching_ctl
->list
);
2740 btrfs_put_caching_control(caching_ctl
);
2742 cache
->last_byte_to_unpin
= caching_ctl
->progress
;
2746 up_write(&fs_info
->commit_root_sem
);
2748 btrfs_update_global_block_rsv(fs_info
);
2752 * Returns the free cluster for the given space info and sets empty_cluster to
2753 * what it should be based on the mount options.
2755 static struct btrfs_free_cluster
*
2756 fetch_cluster_info(struct btrfs_fs_info
*fs_info
,
2757 struct btrfs_space_info
*space_info
, u64
*empty_cluster
)
2759 struct btrfs_free_cluster
*ret
= NULL
;
2762 if (btrfs_mixed_space_info(space_info
))
2765 if (space_info
->flags
& BTRFS_BLOCK_GROUP_METADATA
) {
2766 ret
= &fs_info
->meta_alloc_cluster
;
2767 if (btrfs_test_opt(fs_info
, SSD
))
2768 *empty_cluster
= SZ_2M
;
2770 *empty_cluster
= SZ_64K
;
2771 } else if ((space_info
->flags
& BTRFS_BLOCK_GROUP_DATA
) &&
2772 btrfs_test_opt(fs_info
, SSD_SPREAD
)) {
2773 *empty_cluster
= SZ_2M
;
2774 ret
= &fs_info
->data_alloc_cluster
;
2780 static int unpin_extent_range(struct btrfs_fs_info
*fs_info
,
2782 const bool return_free_space
)
2784 struct btrfs_block_group
*cache
= NULL
;
2785 struct btrfs_space_info
*space_info
;
2786 struct btrfs_block_rsv
*global_rsv
= &fs_info
->global_block_rsv
;
2787 struct btrfs_free_cluster
*cluster
= NULL
;
2789 u64 total_unpinned
= 0;
2790 u64 empty_cluster
= 0;
2793 while (start
<= end
) {
2796 start
>= cache
->start
+ cache
->length
) {
2798 btrfs_put_block_group(cache
);
2800 cache
= btrfs_lookup_block_group(fs_info
, start
);
2801 BUG_ON(!cache
); /* Logic error */
2803 cluster
= fetch_cluster_info(fs_info
,
2806 empty_cluster
<<= 1;
2809 len
= cache
->start
+ cache
->length
- start
;
2810 len
= min(len
, end
+ 1 - start
);
2812 if (start
< cache
->last_byte_to_unpin
) {
2813 len
= min(len
, cache
->last_byte_to_unpin
- start
);
2814 if (return_free_space
)
2815 btrfs_add_free_space(cache
, start
, len
);
2819 total_unpinned
+= len
;
2820 space_info
= cache
->space_info
;
2823 * If this space cluster has been marked as fragmented and we've
2824 * unpinned enough in this block group to potentially allow a
2825 * cluster to be created inside of it go ahead and clear the
2828 if (cluster
&& cluster
->fragmented
&&
2829 total_unpinned
> empty_cluster
) {
2830 spin_lock(&cluster
->lock
);
2831 cluster
->fragmented
= 0;
2832 spin_unlock(&cluster
->lock
);
2835 spin_lock(&space_info
->lock
);
2836 spin_lock(&cache
->lock
);
2837 cache
->pinned
-= len
;
2838 btrfs_space_info_update_bytes_pinned(fs_info
, space_info
, -len
);
2839 space_info
->max_extent_size
= 0;
2840 percpu_counter_add_batch(&space_info
->total_bytes_pinned
,
2841 -len
, BTRFS_TOTAL_BYTES_PINNED_BATCH
);
2843 space_info
->bytes_readonly
+= len
;
2846 spin_unlock(&cache
->lock
);
2847 if (!readonly
&& return_free_space
&&
2848 global_rsv
->space_info
== space_info
) {
2851 spin_lock(&global_rsv
->lock
);
2852 if (!global_rsv
->full
) {
2853 to_add
= min(len
, global_rsv
->size
-
2854 global_rsv
->reserved
);
2855 global_rsv
->reserved
+= to_add
;
2856 btrfs_space_info_update_bytes_may_use(fs_info
,
2857 space_info
, to_add
);
2858 if (global_rsv
->reserved
>= global_rsv
->size
)
2859 global_rsv
->full
= 1;
2862 spin_unlock(&global_rsv
->lock
);
2863 /* Add to any tickets we may have */
2865 btrfs_try_granting_tickets(fs_info
,
2868 spin_unlock(&space_info
->lock
);
2872 btrfs_put_block_group(cache
);
2876 int btrfs_finish_extent_commit(struct btrfs_trans_handle
*trans
)
2878 struct btrfs_fs_info
*fs_info
= trans
->fs_info
;
2879 struct btrfs_block_group
*block_group
, *tmp
;
2880 struct list_head
*deleted_bgs
;
2881 struct extent_io_tree
*unpin
;
2886 unpin
= &trans
->transaction
->pinned_extents
;
2888 while (!TRANS_ABORTED(trans
)) {
2889 struct extent_state
*cached_state
= NULL
;
2891 mutex_lock(&fs_info
->unused_bg_unpin_mutex
);
2892 ret
= find_first_extent_bit(unpin
, 0, &start
, &end
,
2893 EXTENT_DIRTY
, &cached_state
);
2895 mutex_unlock(&fs_info
->unused_bg_unpin_mutex
);
2898 if (test_bit(BTRFS_FS_LOG_RECOVERING
, &fs_info
->flags
))
2899 clear_extent_bits(&fs_info
->excluded_extents
, start
,
2900 end
, EXTENT_UPTODATE
);
2902 if (btrfs_test_opt(fs_info
, DISCARD_SYNC
))
2903 ret
= btrfs_discard_extent(fs_info
, start
,
2904 end
+ 1 - start
, NULL
);
2906 clear_extent_dirty(unpin
, start
, end
, &cached_state
);
2907 unpin_extent_range(fs_info
, start
, end
, true);
2908 mutex_unlock(&fs_info
->unused_bg_unpin_mutex
);
2909 free_extent_state(cached_state
);
2913 if (btrfs_test_opt(fs_info
, DISCARD_ASYNC
)) {
2914 btrfs_discard_calc_delay(&fs_info
->discard_ctl
);
2915 btrfs_discard_schedule_work(&fs_info
->discard_ctl
, true);
2919 * Transaction is finished. We don't need the lock anymore. We
2920 * do need to clean up the block groups in case of a transaction
2923 deleted_bgs
= &trans
->transaction
->deleted_bgs
;
2924 list_for_each_entry_safe(block_group
, tmp
, deleted_bgs
, bg_list
) {
2928 if (!TRANS_ABORTED(trans
))
2929 ret
= btrfs_discard_extent(fs_info
,
2931 block_group
->length
,
2934 list_del_init(&block_group
->bg_list
);
2935 btrfs_put_block_group_trimming(block_group
);
2936 btrfs_put_block_group(block_group
);
2939 const char *errstr
= btrfs_decode_error(ret
);
2941 "discard failed while removing blockgroup: errno=%d %s",
2949 static int __btrfs_free_extent(struct btrfs_trans_handle
*trans
,
2950 struct btrfs_delayed_ref_node
*node
, u64 parent
,
2951 u64 root_objectid
, u64 owner_objectid
,
2952 u64 owner_offset
, int refs_to_drop
,
2953 struct btrfs_delayed_extent_op
*extent_op
)
2955 struct btrfs_fs_info
*info
= trans
->fs_info
;
2956 struct btrfs_key key
;
2957 struct btrfs_path
*path
;
2958 struct btrfs_root
*extent_root
= info
->extent_root
;
2959 struct extent_buffer
*leaf
;
2960 struct btrfs_extent_item
*ei
;
2961 struct btrfs_extent_inline_ref
*iref
;
2964 int extent_slot
= 0;
2965 int found_extent
= 0;
2969 u64 bytenr
= node
->bytenr
;
2970 u64 num_bytes
= node
->num_bytes
;
2972 bool skinny_metadata
= btrfs_fs_incompat(info
, SKINNY_METADATA
);
2974 path
= btrfs_alloc_path();
2978 path
->leave_spinning
= 1;
2980 is_data
= owner_objectid
>= BTRFS_FIRST_FREE_OBJECTID
;
2981 BUG_ON(!is_data
&& refs_to_drop
!= 1);
2984 skinny_metadata
= false;
2986 ret
= lookup_extent_backref(trans
, path
, &iref
, bytenr
, num_bytes
,
2987 parent
, root_objectid
, owner_objectid
,
2990 extent_slot
= path
->slots
[0];
2991 while (extent_slot
>= 0) {
2992 btrfs_item_key_to_cpu(path
->nodes
[0], &key
,
2994 if (key
.objectid
!= bytenr
)
2996 if (key
.type
== BTRFS_EXTENT_ITEM_KEY
&&
2997 key
.offset
== num_bytes
) {
3001 if (key
.type
== BTRFS_METADATA_ITEM_KEY
&&
3002 key
.offset
== owner_objectid
) {
3006 if (path
->slots
[0] - extent_slot
> 5)
3011 if (!found_extent
) {
3013 ret
= remove_extent_backref(trans
, path
, NULL
,
3015 is_data
, &last_ref
);
3017 btrfs_abort_transaction(trans
, ret
);
3020 btrfs_release_path(path
);
3021 path
->leave_spinning
= 1;
3023 key
.objectid
= bytenr
;
3024 key
.type
= BTRFS_EXTENT_ITEM_KEY
;
3025 key
.offset
= num_bytes
;
3027 if (!is_data
&& skinny_metadata
) {
3028 key
.type
= BTRFS_METADATA_ITEM_KEY
;
3029 key
.offset
= owner_objectid
;
3032 ret
= btrfs_search_slot(trans
, extent_root
,
3034 if (ret
> 0 && skinny_metadata
&& path
->slots
[0]) {
3036 * Couldn't find our skinny metadata item,
3037 * see if we have ye olde extent item.
3040 btrfs_item_key_to_cpu(path
->nodes
[0], &key
,
3042 if (key
.objectid
== bytenr
&&
3043 key
.type
== BTRFS_EXTENT_ITEM_KEY
&&
3044 key
.offset
== num_bytes
)
3048 if (ret
> 0 && skinny_metadata
) {
3049 skinny_metadata
= false;
3050 key
.objectid
= bytenr
;
3051 key
.type
= BTRFS_EXTENT_ITEM_KEY
;
3052 key
.offset
= num_bytes
;
3053 btrfs_release_path(path
);
3054 ret
= btrfs_search_slot(trans
, extent_root
,
3060 "umm, got %d back from search, was looking for %llu",
3063 btrfs_print_leaf(path
->nodes
[0]);
3066 btrfs_abort_transaction(trans
, ret
);
3069 extent_slot
= path
->slots
[0];
3071 } else if (WARN_ON(ret
== -ENOENT
)) {
3072 btrfs_print_leaf(path
->nodes
[0]);
3074 "unable to find ref byte nr %llu parent %llu root %llu owner %llu offset %llu",
3075 bytenr
, parent
, root_objectid
, owner_objectid
,
3077 btrfs_abort_transaction(trans
, ret
);
3080 btrfs_abort_transaction(trans
, ret
);
3084 leaf
= path
->nodes
[0];
3085 item_size
= btrfs_item_size_nr(leaf
, extent_slot
);
3086 if (unlikely(item_size
< sizeof(*ei
))) {
3088 btrfs_print_v0_err(info
);
3089 btrfs_abort_transaction(trans
, ret
);
3092 ei
= btrfs_item_ptr(leaf
, extent_slot
,
3093 struct btrfs_extent_item
);
3094 if (owner_objectid
< BTRFS_FIRST_FREE_OBJECTID
&&
3095 key
.type
== BTRFS_EXTENT_ITEM_KEY
) {
3096 struct btrfs_tree_block_info
*bi
;
3097 BUG_ON(item_size
< sizeof(*ei
) + sizeof(*bi
));
3098 bi
= (struct btrfs_tree_block_info
*)(ei
+ 1);
3099 WARN_ON(owner_objectid
!= btrfs_tree_block_level(leaf
, bi
));
3102 refs
= btrfs_extent_refs(leaf
, ei
);
3103 if (refs
< refs_to_drop
) {
3105 "trying to drop %d refs but we only have %Lu for bytenr %Lu",
3106 refs_to_drop
, refs
, bytenr
);
3108 btrfs_abort_transaction(trans
, ret
);
3111 refs
-= refs_to_drop
;
3115 __run_delayed_extent_op(extent_op
, leaf
, ei
);
3117 * In the case of inline back ref, reference count will
3118 * be updated by remove_extent_backref
3121 BUG_ON(!found_extent
);
3123 btrfs_set_extent_refs(leaf
, ei
, refs
);
3124 btrfs_mark_buffer_dirty(leaf
);
3127 ret
= remove_extent_backref(trans
, path
, iref
,
3128 refs_to_drop
, is_data
,
3131 btrfs_abort_transaction(trans
, ret
);
3137 BUG_ON(is_data
&& refs_to_drop
!=
3138 extent_data_ref_count(path
, iref
));
3140 BUG_ON(path
->slots
[0] != extent_slot
);
3142 BUG_ON(path
->slots
[0] != extent_slot
+ 1);
3143 path
->slots
[0] = extent_slot
;
3149 ret
= btrfs_del_items(trans
, extent_root
, path
, path
->slots
[0],
3152 btrfs_abort_transaction(trans
, ret
);
3155 btrfs_release_path(path
);
3158 ret
= btrfs_del_csums(trans
, info
->csum_root
, bytenr
,
3161 btrfs_abort_transaction(trans
, ret
);
3166 ret
= add_to_free_space_tree(trans
, bytenr
, num_bytes
);
3168 btrfs_abort_transaction(trans
, ret
);
3172 ret
= btrfs_update_block_group(trans
, bytenr
, num_bytes
, 0);
3174 btrfs_abort_transaction(trans
, ret
);
3178 btrfs_release_path(path
);
3181 btrfs_free_path(path
);
3186 * when we free an block, it is possible (and likely) that we free the last
3187 * delayed ref for that extent as well. This searches the delayed ref tree for
3188 * a given extent, and if there are no other delayed refs to be processed, it
3189 * removes it from the tree.
3191 static noinline
int check_ref_cleanup(struct btrfs_trans_handle
*trans
,
3194 struct btrfs_delayed_ref_head
*head
;
3195 struct btrfs_delayed_ref_root
*delayed_refs
;
3198 delayed_refs
= &trans
->transaction
->delayed_refs
;
3199 spin_lock(&delayed_refs
->lock
);
3200 head
= btrfs_find_delayed_ref_head(delayed_refs
, bytenr
);
3202 goto out_delayed_unlock
;
3204 spin_lock(&head
->lock
);
3205 if (!RB_EMPTY_ROOT(&head
->ref_tree
.rb_root
))
3208 if (cleanup_extent_op(head
) != NULL
)
3212 * waiting for the lock here would deadlock. If someone else has it
3213 * locked they are already in the process of dropping it anyway
3215 if (!mutex_trylock(&head
->mutex
))
3218 btrfs_delete_ref_head(delayed_refs
, head
);
3219 head
->processing
= 0;
3221 spin_unlock(&head
->lock
);
3222 spin_unlock(&delayed_refs
->lock
);
3224 BUG_ON(head
->extent_op
);
3225 if (head
->must_insert_reserved
)
3228 btrfs_cleanup_ref_head_accounting(trans
->fs_info
, delayed_refs
, head
);
3229 mutex_unlock(&head
->mutex
);
3230 btrfs_put_delayed_ref_head(head
);
3233 spin_unlock(&head
->lock
);
3236 spin_unlock(&delayed_refs
->lock
);
3240 void btrfs_free_tree_block(struct btrfs_trans_handle
*trans
,
3241 struct btrfs_root
*root
,
3242 struct extent_buffer
*buf
,
3243 u64 parent
, int last_ref
)
3245 struct btrfs_fs_info
*fs_info
= root
->fs_info
;
3246 struct btrfs_ref generic_ref
= { 0 };
3250 btrfs_init_generic_ref(&generic_ref
, BTRFS_DROP_DELAYED_REF
,
3251 buf
->start
, buf
->len
, parent
);
3252 btrfs_init_tree_ref(&generic_ref
, btrfs_header_level(buf
),
3253 root
->root_key
.objectid
);
3255 if (root
->root_key
.objectid
!= BTRFS_TREE_LOG_OBJECTID
) {
3256 int old_ref_mod
, new_ref_mod
;
3258 btrfs_ref_tree_mod(fs_info
, &generic_ref
);
3259 ret
= btrfs_add_delayed_tree_ref(trans
, &generic_ref
, NULL
,
3260 &old_ref_mod
, &new_ref_mod
);
3261 BUG_ON(ret
); /* -ENOMEM */
3262 pin
= old_ref_mod
>= 0 && new_ref_mod
< 0;
3265 if (last_ref
&& btrfs_header_generation(buf
) == trans
->transid
) {
3266 struct btrfs_block_group
*cache
;
3268 if (root
->root_key
.objectid
!= BTRFS_TREE_LOG_OBJECTID
) {
3269 ret
= check_ref_cleanup(trans
, buf
->start
);
3275 cache
= btrfs_lookup_block_group(fs_info
, buf
->start
);
3277 if (btrfs_header_flag(buf
, BTRFS_HEADER_FLAG_WRITTEN
)) {
3278 pin_down_extent(trans
, cache
, buf
->start
, buf
->len
, 1);
3279 btrfs_put_block_group(cache
);
3283 WARN_ON(test_bit(EXTENT_BUFFER_DIRTY
, &buf
->bflags
));
3285 btrfs_add_free_space(cache
, buf
->start
, buf
->len
);
3286 btrfs_free_reserved_bytes(cache
, buf
->len
, 0);
3287 btrfs_put_block_group(cache
);
3288 trace_btrfs_reserved_extent_free(fs_info
, buf
->start
, buf
->len
);
3292 add_pinned_bytes(fs_info
, &generic_ref
);
3296 * Deleting the buffer, clear the corrupt flag since it doesn't
3299 clear_bit(EXTENT_BUFFER_CORRUPT
, &buf
->bflags
);
3303 /* Can return -ENOMEM */
3304 int btrfs_free_extent(struct btrfs_trans_handle
*trans
, struct btrfs_ref
*ref
)
3306 struct btrfs_fs_info
*fs_info
= trans
->fs_info
;
3307 int old_ref_mod
, new_ref_mod
;
3310 if (btrfs_is_testing(fs_info
))
3314 * tree log blocks never actually go into the extent allocation
3315 * tree, just update pinning info and exit early.
3317 if ((ref
->type
== BTRFS_REF_METADATA
&&
3318 ref
->tree_ref
.root
== BTRFS_TREE_LOG_OBJECTID
) ||
3319 (ref
->type
== BTRFS_REF_DATA
&&
3320 ref
->data_ref
.ref_root
== BTRFS_TREE_LOG_OBJECTID
)) {
3321 /* unlocks the pinned mutex */
3322 btrfs_pin_extent(trans
, ref
->bytenr
, ref
->len
, 1);
3323 old_ref_mod
= new_ref_mod
= 0;
3325 } else if (ref
->type
== BTRFS_REF_METADATA
) {
3326 ret
= btrfs_add_delayed_tree_ref(trans
, ref
, NULL
,
3327 &old_ref_mod
, &new_ref_mod
);
3329 ret
= btrfs_add_delayed_data_ref(trans
, ref
, 0,
3330 &old_ref_mod
, &new_ref_mod
);
3333 if (!((ref
->type
== BTRFS_REF_METADATA
&&
3334 ref
->tree_ref
.root
== BTRFS_TREE_LOG_OBJECTID
) ||
3335 (ref
->type
== BTRFS_REF_DATA
&&
3336 ref
->data_ref
.ref_root
== BTRFS_TREE_LOG_OBJECTID
)))
3337 btrfs_ref_tree_mod(fs_info
, ref
);
3339 if (ret
== 0 && old_ref_mod
>= 0 && new_ref_mod
< 0)
3340 add_pinned_bytes(fs_info
, ref
);
3345 enum btrfs_loop_type
{
3346 LOOP_CACHING_NOWAIT
,
3353 btrfs_lock_block_group(struct btrfs_block_group
*cache
,
3357 down_read(&cache
->data_rwsem
);
3360 static inline void btrfs_grab_block_group(struct btrfs_block_group
*cache
,
3363 btrfs_get_block_group(cache
);
3365 down_read(&cache
->data_rwsem
);
3368 static struct btrfs_block_group
*btrfs_lock_cluster(
3369 struct btrfs_block_group
*block_group
,
3370 struct btrfs_free_cluster
*cluster
,
3373 struct btrfs_block_group
*used_bg
= NULL
;
3375 spin_lock(&cluster
->refill_lock
);
3377 used_bg
= cluster
->block_group
;
3381 if (used_bg
== block_group
)
3384 btrfs_get_block_group(used_bg
);
3389 if (down_read_trylock(&used_bg
->data_rwsem
))
3392 spin_unlock(&cluster
->refill_lock
);
3394 /* We should only have one-level nested. */
3395 down_read_nested(&used_bg
->data_rwsem
, SINGLE_DEPTH_NESTING
);
3397 spin_lock(&cluster
->refill_lock
);
3398 if (used_bg
== cluster
->block_group
)
3401 up_read(&used_bg
->data_rwsem
);
3402 btrfs_put_block_group(used_bg
);
3407 btrfs_release_block_group(struct btrfs_block_group
*cache
,
3411 up_read(&cache
->data_rwsem
);
3412 btrfs_put_block_group(cache
);
3415 enum btrfs_extent_allocation_policy
{
3416 BTRFS_EXTENT_ALLOC_CLUSTERED
,
3420 * Structure used internally for find_free_extent() function. Wraps needed
3423 struct find_free_extent_ctl
{
3424 /* Basic allocation info */
3430 /* Where to start the search inside the bg */
3433 /* For clustered allocation */
3435 struct btrfs_free_cluster
*last_ptr
;
3438 bool have_caching_bg
;
3439 bool orig_have_caching_bg
;
3441 /* RAID index, converted from flags */
3445 * Current loop number, check find_free_extent_update_loop() for details
3450 * Whether we're refilling a cluster, if true we need to re-search
3451 * current block group but don't try to refill the cluster again.
3453 bool retry_clustered
;
3456 * Whether we're updating free space cache, if true we need to re-search
3457 * current block group but don't try updating free space cache again.
3459 bool retry_unclustered
;
3461 /* If current block group is cached */
3464 /* Max contiguous hole found */
3465 u64 max_extent_size
;
3467 /* Total free space from free space cache, not always contiguous */
3468 u64 total_free_space
;
3473 /* Hint where to start looking for an empty space */
3476 /* Allocation policy */
3477 enum btrfs_extent_allocation_policy policy
;
3482 * Helper function for find_free_extent().
3484 * Return -ENOENT to inform caller that we need fallback to unclustered mode.
3485 * Return -EAGAIN to inform caller that we need to re-search this block group
3486 * Return >0 to inform caller that we find nothing
3487 * Return 0 means we have found a location and set ffe_ctl->found_offset.
3489 static int find_free_extent_clustered(struct btrfs_block_group
*bg
,
3490 struct find_free_extent_ctl
*ffe_ctl
,
3491 struct btrfs_block_group
**cluster_bg_ret
)
3493 struct btrfs_block_group
*cluster_bg
;
3494 struct btrfs_free_cluster
*last_ptr
= ffe_ctl
->last_ptr
;
3495 u64 aligned_cluster
;
3499 cluster_bg
= btrfs_lock_cluster(bg
, last_ptr
, ffe_ctl
->delalloc
);
3501 goto refill_cluster
;
3502 if (cluster_bg
!= bg
&& (cluster_bg
->ro
||
3503 !block_group_bits(cluster_bg
, ffe_ctl
->flags
)))
3504 goto release_cluster
;
3506 offset
= btrfs_alloc_from_cluster(cluster_bg
, last_ptr
,
3507 ffe_ctl
->num_bytes
, cluster_bg
->start
,
3508 &ffe_ctl
->max_extent_size
);
3510 /* We have a block, we're done */
3511 spin_unlock(&last_ptr
->refill_lock
);
3512 trace_btrfs_reserve_extent_cluster(cluster_bg
,
3513 ffe_ctl
->search_start
, ffe_ctl
->num_bytes
);
3514 *cluster_bg_ret
= cluster_bg
;
3515 ffe_ctl
->found_offset
= offset
;
3518 WARN_ON(last_ptr
->block_group
!= cluster_bg
);
3522 * If we are on LOOP_NO_EMPTY_SIZE, we can't set up a new clusters, so
3523 * lets just skip it and let the allocator find whatever block it can
3524 * find. If we reach this point, we will have tried the cluster
3525 * allocator plenty of times and not have found anything, so we are
3526 * likely way too fragmented for the clustering stuff to find anything.
3528 * However, if the cluster is taken from the current block group,
3529 * release the cluster first, so that we stand a better chance of
3530 * succeeding in the unclustered allocation.
3532 if (ffe_ctl
->loop
>= LOOP_NO_EMPTY_SIZE
&& cluster_bg
!= bg
) {
3533 spin_unlock(&last_ptr
->refill_lock
);
3534 btrfs_release_block_group(cluster_bg
, ffe_ctl
->delalloc
);
3538 /* This cluster didn't work out, free it and start over */
3539 btrfs_return_cluster_to_free_space(NULL
, last_ptr
);
3541 if (cluster_bg
!= bg
)
3542 btrfs_release_block_group(cluster_bg
, ffe_ctl
->delalloc
);
3545 if (ffe_ctl
->loop
>= LOOP_NO_EMPTY_SIZE
) {
3546 spin_unlock(&last_ptr
->refill_lock
);
3550 aligned_cluster
= max_t(u64
,
3551 ffe_ctl
->empty_cluster
+ ffe_ctl
->empty_size
,
3552 bg
->full_stripe_len
);
3553 ret
= btrfs_find_space_cluster(bg
, last_ptr
, ffe_ctl
->search_start
,
3554 ffe_ctl
->num_bytes
, aligned_cluster
);
3556 /* Now pull our allocation out of this cluster */
3557 offset
= btrfs_alloc_from_cluster(bg
, last_ptr
,
3558 ffe_ctl
->num_bytes
, ffe_ctl
->search_start
,
3559 &ffe_ctl
->max_extent_size
);
3561 /* We found one, proceed */
3562 spin_unlock(&last_ptr
->refill_lock
);
3563 trace_btrfs_reserve_extent_cluster(bg
,
3564 ffe_ctl
->search_start
,
3565 ffe_ctl
->num_bytes
);
3566 ffe_ctl
->found_offset
= offset
;
3569 } else if (!ffe_ctl
->cached
&& ffe_ctl
->loop
> LOOP_CACHING_NOWAIT
&&
3570 !ffe_ctl
->retry_clustered
) {
3571 spin_unlock(&last_ptr
->refill_lock
);
3573 ffe_ctl
->retry_clustered
= true;
3574 btrfs_wait_block_group_cache_progress(bg
, ffe_ctl
->num_bytes
+
3575 ffe_ctl
->empty_cluster
+ ffe_ctl
->empty_size
);
3579 * At this point we either didn't find a cluster or we weren't able to
3580 * allocate a block from our cluster. Free the cluster we've been
3581 * trying to use, and go to the next block group.
3583 btrfs_return_cluster_to_free_space(NULL
, last_ptr
);
3584 spin_unlock(&last_ptr
->refill_lock
);
3589 * Return >0 to inform caller that we find nothing
3590 * Return 0 when we found an free extent and set ffe_ctrl->found_offset
3591 * Return -EAGAIN to inform caller that we need to re-search this block group
3593 static int find_free_extent_unclustered(struct btrfs_block_group
*bg
,
3594 struct find_free_extent_ctl
*ffe_ctl
)
3596 struct btrfs_free_cluster
*last_ptr
= ffe_ctl
->last_ptr
;
3600 * We are doing an unclustered allocation, set the fragmented flag so
3601 * we don't bother trying to setup a cluster again until we get more
3604 if (unlikely(last_ptr
)) {
3605 spin_lock(&last_ptr
->lock
);
3606 last_ptr
->fragmented
= 1;
3607 spin_unlock(&last_ptr
->lock
);
3609 if (ffe_ctl
->cached
) {
3610 struct btrfs_free_space_ctl
*free_space_ctl
;
3612 free_space_ctl
= bg
->free_space_ctl
;
3613 spin_lock(&free_space_ctl
->tree_lock
);
3614 if (free_space_ctl
->free_space
<
3615 ffe_ctl
->num_bytes
+ ffe_ctl
->empty_cluster
+
3616 ffe_ctl
->empty_size
) {
3617 ffe_ctl
->total_free_space
= max_t(u64
,
3618 ffe_ctl
->total_free_space
,
3619 free_space_ctl
->free_space
);
3620 spin_unlock(&free_space_ctl
->tree_lock
);
3623 spin_unlock(&free_space_ctl
->tree_lock
);
3626 offset
= btrfs_find_space_for_alloc(bg
, ffe_ctl
->search_start
,
3627 ffe_ctl
->num_bytes
, ffe_ctl
->empty_size
,
3628 &ffe_ctl
->max_extent_size
);
3631 * If we didn't find a chunk, and we haven't failed on this block group
3632 * before, and this block group is in the middle of caching and we are
3633 * ok with waiting, then go ahead and wait for progress to be made, and
3634 * set @retry_unclustered to true.
3636 * If @retry_unclustered is true then we've already waited on this
3637 * block group once and should move on to the next block group.
3639 if (!offset
&& !ffe_ctl
->retry_unclustered
&& !ffe_ctl
->cached
&&
3640 ffe_ctl
->loop
> LOOP_CACHING_NOWAIT
) {
3641 btrfs_wait_block_group_cache_progress(bg
, ffe_ctl
->num_bytes
+
3642 ffe_ctl
->empty_size
);
3643 ffe_ctl
->retry_unclustered
= true;
3645 } else if (!offset
) {
3648 ffe_ctl
->found_offset
= offset
;
3652 static int do_allocation_clustered(struct btrfs_block_group
*block_group
,
3653 struct find_free_extent_ctl
*ffe_ctl
,
3654 struct btrfs_block_group
**bg_ret
)
3658 /* We want to try and use the cluster allocator, so lets look there */
3659 if (ffe_ctl
->last_ptr
&& ffe_ctl
->use_cluster
) {
3660 ret
= find_free_extent_clustered(block_group
, ffe_ctl
, bg_ret
);
3661 if (ret
>= 0 || ret
== -EAGAIN
)
3663 /* ret == -ENOENT case falls through */
3666 return find_free_extent_unclustered(block_group
, ffe_ctl
);
3669 static int do_allocation(struct btrfs_block_group
*block_group
,
3670 struct find_free_extent_ctl
*ffe_ctl
,
3671 struct btrfs_block_group
**bg_ret
)
3673 switch (ffe_ctl
->policy
) {
3674 case BTRFS_EXTENT_ALLOC_CLUSTERED
:
3675 return do_allocation_clustered(block_group
, ffe_ctl
, bg_ret
);
3681 static void release_block_group(struct btrfs_block_group
*block_group
,
3682 struct find_free_extent_ctl
*ffe_ctl
,
3685 switch (ffe_ctl
->policy
) {
3686 case BTRFS_EXTENT_ALLOC_CLUSTERED
:
3687 ffe_ctl
->retry_clustered
= false;
3688 ffe_ctl
->retry_unclustered
= false;
3694 BUG_ON(btrfs_bg_flags_to_raid_index(block_group
->flags
) !=
3696 btrfs_release_block_group(block_group
, delalloc
);
3699 static void found_extent_clustered(struct find_free_extent_ctl
*ffe_ctl
,
3700 struct btrfs_key
*ins
)
3702 struct btrfs_free_cluster
*last_ptr
= ffe_ctl
->last_ptr
;
3704 if (!ffe_ctl
->use_cluster
&& last_ptr
) {
3705 spin_lock(&last_ptr
->lock
);
3706 last_ptr
->window_start
= ins
->objectid
;
3707 spin_unlock(&last_ptr
->lock
);
3711 static void found_extent(struct find_free_extent_ctl
*ffe_ctl
,
3712 struct btrfs_key
*ins
)
3714 switch (ffe_ctl
->policy
) {
3715 case BTRFS_EXTENT_ALLOC_CLUSTERED
:
3716 found_extent_clustered(ffe_ctl
, ins
);
3723 static int chunk_allocation_failed(struct find_free_extent_ctl
*ffe_ctl
)
3725 switch (ffe_ctl
->policy
) {
3726 case BTRFS_EXTENT_ALLOC_CLUSTERED
:
3728 * If we can't allocate a new chunk we've already looped through
3729 * at least once, move on to the NO_EMPTY_SIZE case.
3731 ffe_ctl
->loop
= LOOP_NO_EMPTY_SIZE
;
3739 * Return >0 means caller needs to re-search for free extent
3740 * Return 0 means we have the needed free extent.
3741 * Return <0 means we failed to locate any free extent.
3743 static int find_free_extent_update_loop(struct btrfs_fs_info
*fs_info
,
3744 struct btrfs_key
*ins
,
3745 struct find_free_extent_ctl
*ffe_ctl
,
3748 struct btrfs_root
*root
= fs_info
->extent_root
;
3751 if ((ffe_ctl
->loop
== LOOP_CACHING_NOWAIT
) &&
3752 ffe_ctl
->have_caching_bg
&& !ffe_ctl
->orig_have_caching_bg
)
3753 ffe_ctl
->orig_have_caching_bg
= true;
3755 if (!ins
->objectid
&& ffe_ctl
->loop
>= LOOP_CACHING_WAIT
&&
3756 ffe_ctl
->have_caching_bg
)
3759 if (!ins
->objectid
&& ++(ffe_ctl
->index
) < BTRFS_NR_RAID_TYPES
)
3762 if (ins
->objectid
) {
3763 found_extent(ffe_ctl
, ins
);
3768 * LOOP_CACHING_NOWAIT, search partially cached block groups, kicking
3769 * caching kthreads as we move along
3770 * LOOP_CACHING_WAIT, search everything, and wait if our bg is caching
3771 * LOOP_ALLOC_CHUNK, force a chunk allocation and try again
3772 * LOOP_NO_EMPTY_SIZE, set empty_size and empty_cluster to 0 and try
3775 if (ffe_ctl
->loop
< LOOP_NO_EMPTY_SIZE
) {
3777 if (ffe_ctl
->loop
== LOOP_CACHING_NOWAIT
) {
3779 * We want to skip the LOOP_CACHING_WAIT step if we
3780 * don't have any uncached bgs and we've already done a
3781 * full search through.
3783 if (ffe_ctl
->orig_have_caching_bg
|| !full_search
)
3784 ffe_ctl
->loop
= LOOP_CACHING_WAIT
;
3786 ffe_ctl
->loop
= LOOP_ALLOC_CHUNK
;
3791 if (ffe_ctl
->loop
== LOOP_ALLOC_CHUNK
) {
3792 struct btrfs_trans_handle
*trans
;
3795 trans
= current
->journal_info
;
3799 trans
= btrfs_join_transaction(root
);
3801 if (IS_ERR(trans
)) {
3802 ret
= PTR_ERR(trans
);
3806 ret
= btrfs_chunk_alloc(trans
, ffe_ctl
->flags
,
3809 /* Do not bail out on ENOSPC since we can do more. */
3811 ret
= chunk_allocation_failed(ffe_ctl
);
3813 btrfs_abort_transaction(trans
, ret
);
3817 btrfs_end_transaction(trans
);
3822 if (ffe_ctl
->loop
== LOOP_NO_EMPTY_SIZE
) {
3823 if (ffe_ctl
->policy
!= BTRFS_EXTENT_ALLOC_CLUSTERED
)
3827 * Don't loop again if we already have no empty_size and
3830 if (ffe_ctl
->empty_size
== 0 &&
3831 ffe_ctl
->empty_cluster
== 0)
3833 ffe_ctl
->empty_size
= 0;
3834 ffe_ctl
->empty_cluster
= 0;
3841 static int prepare_allocation_clustered(struct btrfs_fs_info
*fs_info
,
3842 struct find_free_extent_ctl
*ffe_ctl
,
3843 struct btrfs_space_info
*space_info
,
3844 struct btrfs_key
*ins
)
3847 * If our free space is heavily fragmented we may not be able to make
3848 * big contiguous allocations, so instead of doing the expensive search
3849 * for free space, simply return ENOSPC with our max_extent_size so we
3850 * can go ahead and search for a more manageable chunk.
3852 * If our max_extent_size is large enough for our allocation simply
3853 * disable clustering since we will likely not be able to find enough
3854 * space to create a cluster and induce latency trying.
3856 if (space_info
->max_extent_size
) {
3857 spin_lock(&space_info
->lock
);
3858 if (space_info
->max_extent_size
&&
3859 ffe_ctl
->num_bytes
> space_info
->max_extent_size
) {
3860 ins
->offset
= space_info
->max_extent_size
;
3861 spin_unlock(&space_info
->lock
);
3863 } else if (space_info
->max_extent_size
) {
3864 ffe_ctl
->use_cluster
= false;
3866 spin_unlock(&space_info
->lock
);
3869 ffe_ctl
->last_ptr
= fetch_cluster_info(fs_info
, space_info
,
3870 &ffe_ctl
->empty_cluster
);
3871 if (ffe_ctl
->last_ptr
) {
3872 struct btrfs_free_cluster
*last_ptr
= ffe_ctl
->last_ptr
;
3874 spin_lock(&last_ptr
->lock
);
3875 if (last_ptr
->block_group
)
3876 ffe_ctl
->hint_byte
= last_ptr
->window_start
;
3877 if (last_ptr
->fragmented
) {
3879 * We still set window_start so we can keep track of the
3880 * last place we found an allocation to try and save
3883 ffe_ctl
->hint_byte
= last_ptr
->window_start
;
3884 ffe_ctl
->use_cluster
= false;
3886 spin_unlock(&last_ptr
->lock
);
3892 static int prepare_allocation(struct btrfs_fs_info
*fs_info
,
3893 struct find_free_extent_ctl
*ffe_ctl
,
3894 struct btrfs_space_info
*space_info
,
3895 struct btrfs_key
*ins
)
3897 switch (ffe_ctl
->policy
) {
3898 case BTRFS_EXTENT_ALLOC_CLUSTERED
:
3899 return prepare_allocation_clustered(fs_info
, ffe_ctl
,
3907 * walks the btree of allocated extents and find a hole of a given size.
3908 * The key ins is changed to record the hole:
3909 * ins->objectid == start position
3910 * ins->flags = BTRFS_EXTENT_ITEM_KEY
3911 * ins->offset == the size of the hole.
3912 * Any available blocks before search_start are skipped.
3914 * If there is no suitable free space, we will record the max size of
3915 * the free space extent currently.
3917 * The overall logic and call chain:
3919 * find_free_extent()
3920 * |- Iterate through all block groups
3921 * | |- Get a valid block group
3922 * | |- Try to do clustered allocation in that block group
3923 * | |- Try to do unclustered allocation in that block group
3924 * | |- Check if the result is valid
3925 * | | |- If valid, then exit
3926 * | |- Jump to next block group
3928 * |- Push harder to find free extents
3929 * |- If not found, re-iterate all block groups
3931 static noinline
int find_free_extent(struct btrfs_fs_info
*fs_info
,
3932 u64 ram_bytes
, u64 num_bytes
, u64 empty_size
,
3933 u64 hint_byte_orig
, struct btrfs_key
*ins
,
3934 u64 flags
, int delalloc
)
3937 int cache_block_group_error
= 0;
3938 struct btrfs_block_group
*block_group
= NULL
;
3939 struct find_free_extent_ctl ffe_ctl
= {0};
3940 struct btrfs_space_info
*space_info
;
3941 bool full_search
= false;
3943 WARN_ON(num_bytes
< fs_info
->sectorsize
);
3945 ffe_ctl
.num_bytes
= num_bytes
;
3946 ffe_ctl
.empty_size
= empty_size
;
3947 ffe_ctl
.flags
= flags
;
3948 ffe_ctl
.search_start
= 0;
3949 ffe_ctl
.delalloc
= delalloc
;
3950 ffe_ctl
.index
= btrfs_bg_flags_to_raid_index(flags
);
3951 ffe_ctl
.have_caching_bg
= false;
3952 ffe_ctl
.orig_have_caching_bg
= false;
3953 ffe_ctl
.found_offset
= 0;
3954 ffe_ctl
.hint_byte
= hint_byte_orig
;
3955 ffe_ctl
.policy
= BTRFS_EXTENT_ALLOC_CLUSTERED
;
3957 /* For clustered allocation */
3958 ffe_ctl
.retry_clustered
= false;
3959 ffe_ctl
.retry_unclustered
= false;
3960 ffe_ctl
.last_ptr
= NULL
;
3961 ffe_ctl
.use_cluster
= true;
3963 ins
->type
= BTRFS_EXTENT_ITEM_KEY
;
3967 trace_find_free_extent(fs_info
, num_bytes
, empty_size
, flags
);
3969 space_info
= btrfs_find_space_info(fs_info
, flags
);
3971 btrfs_err(fs_info
, "No space info for %llu", flags
);
3975 ret
= prepare_allocation(fs_info
, &ffe_ctl
, space_info
, ins
);
3979 ffe_ctl
.search_start
= max(ffe_ctl
.search_start
,
3980 first_logical_byte(fs_info
, 0));
3981 ffe_ctl
.search_start
= max(ffe_ctl
.search_start
, ffe_ctl
.hint_byte
);
3982 if (ffe_ctl
.search_start
== ffe_ctl
.hint_byte
) {
3983 block_group
= btrfs_lookup_block_group(fs_info
,
3984 ffe_ctl
.search_start
);
3986 * we don't want to use the block group if it doesn't match our
3987 * allocation bits, or if its not cached.
3989 * However if we are re-searching with an ideal block group
3990 * picked out then we don't care that the block group is cached.
3992 if (block_group
&& block_group_bits(block_group
, flags
) &&
3993 block_group
->cached
!= BTRFS_CACHE_NO
) {
3994 down_read(&space_info
->groups_sem
);
3995 if (list_empty(&block_group
->list
) ||
3998 * someone is removing this block group,
3999 * we can't jump into the have_block_group
4000 * target because our list pointers are not
4003 btrfs_put_block_group(block_group
);
4004 up_read(&space_info
->groups_sem
);
4006 ffe_ctl
.index
= btrfs_bg_flags_to_raid_index(
4007 block_group
->flags
);
4008 btrfs_lock_block_group(block_group
, delalloc
);
4009 goto have_block_group
;
4011 } else if (block_group
) {
4012 btrfs_put_block_group(block_group
);
4016 ffe_ctl
.have_caching_bg
= false;
4017 if (ffe_ctl
.index
== btrfs_bg_flags_to_raid_index(flags
) ||
4020 down_read(&space_info
->groups_sem
);
4021 list_for_each_entry(block_group
,
4022 &space_info
->block_groups
[ffe_ctl
.index
], list
) {
4023 struct btrfs_block_group
*bg_ret
;
4025 /* If the block group is read-only, we can skip it entirely. */
4026 if (unlikely(block_group
->ro
))
4029 btrfs_grab_block_group(block_group
, delalloc
);
4030 ffe_ctl
.search_start
= block_group
->start
;
4033 * this can happen if we end up cycling through all the
4034 * raid types, but we want to make sure we only allocate
4035 * for the proper type.
4037 if (!block_group_bits(block_group
, flags
)) {
4038 u64 extra
= BTRFS_BLOCK_GROUP_DUP
|
4039 BTRFS_BLOCK_GROUP_RAID1_MASK
|
4040 BTRFS_BLOCK_GROUP_RAID56_MASK
|
4041 BTRFS_BLOCK_GROUP_RAID10
;
4044 * if they asked for extra copies and this block group
4045 * doesn't provide them, bail. This does allow us to
4046 * fill raid0 from raid1.
4048 if ((flags
& extra
) && !(block_group
->flags
& extra
))
4052 * This block group has different flags than we want.
4053 * It's possible that we have MIXED_GROUP flag but no
4054 * block group is mixed. Just skip such block group.
4056 btrfs_release_block_group(block_group
, delalloc
);
4061 ffe_ctl
.cached
= btrfs_block_group_done(block_group
);
4062 if (unlikely(!ffe_ctl
.cached
)) {
4063 ffe_ctl
.have_caching_bg
= true;
4064 ret
= btrfs_cache_block_group(block_group
, 0);
4067 * If we get ENOMEM here or something else we want to
4068 * try other block groups, because it may not be fatal.
4069 * However if we can't find anything else we need to
4070 * save our return here so that we return the actual
4071 * error that caused problems, not ENOSPC.
4074 if (!cache_block_group_error
)
4075 cache_block_group_error
= ret
;
4082 if (unlikely(block_group
->cached
== BTRFS_CACHE_ERROR
))
4086 ret
= do_allocation(block_group
, &ffe_ctl
, &bg_ret
);
4088 if (bg_ret
&& bg_ret
!= block_group
) {
4089 btrfs_release_block_group(block_group
, delalloc
);
4090 block_group
= bg_ret
;
4092 } else if (ret
== -EAGAIN
) {
4093 goto have_block_group
;
4094 } else if (ret
> 0) {
4099 ffe_ctl
.search_start
= round_up(ffe_ctl
.found_offset
,
4100 fs_info
->stripesize
);
4102 /* move on to the next group */
4103 if (ffe_ctl
.search_start
+ num_bytes
>
4104 block_group
->start
+ block_group
->length
) {
4105 btrfs_add_free_space(block_group
, ffe_ctl
.found_offset
,
4110 if (ffe_ctl
.found_offset
< ffe_ctl
.search_start
)
4111 btrfs_add_free_space(block_group
, ffe_ctl
.found_offset
,
4112 ffe_ctl
.search_start
- ffe_ctl
.found_offset
);
4114 ret
= btrfs_add_reserved_bytes(block_group
, ram_bytes
,
4115 num_bytes
, delalloc
);
4116 if (ret
== -EAGAIN
) {
4117 btrfs_add_free_space(block_group
, ffe_ctl
.found_offset
,
4121 btrfs_inc_block_group_reservations(block_group
);
4123 /* we are all good, lets return */
4124 ins
->objectid
= ffe_ctl
.search_start
;
4125 ins
->offset
= num_bytes
;
4127 trace_btrfs_reserve_extent(block_group
, ffe_ctl
.search_start
,
4129 btrfs_release_block_group(block_group
, delalloc
);
4132 release_block_group(block_group
, &ffe_ctl
, delalloc
);
4135 up_read(&space_info
->groups_sem
);
4137 ret
= find_free_extent_update_loop(fs_info
, ins
, &ffe_ctl
, full_search
);
4141 if (ret
== -ENOSPC
&& !cache_block_group_error
) {
4143 * Use ffe_ctl->total_free_space as fallback if we can't find
4144 * any contiguous hole.
4146 if (!ffe_ctl
.max_extent_size
)
4147 ffe_ctl
.max_extent_size
= ffe_ctl
.total_free_space
;
4148 spin_lock(&space_info
->lock
);
4149 space_info
->max_extent_size
= ffe_ctl
.max_extent_size
;
4150 spin_unlock(&space_info
->lock
);
4151 ins
->offset
= ffe_ctl
.max_extent_size
;
4152 } else if (ret
== -ENOSPC
) {
4153 ret
= cache_block_group_error
;
4159 * btrfs_reserve_extent - entry point to the extent allocator. Tries to find a
4160 * hole that is at least as big as @num_bytes.
4162 * @root - The root that will contain this extent
4164 * @ram_bytes - The amount of space in ram that @num_bytes take. This
4165 * is used for accounting purposes. This value differs
4166 * from @num_bytes only in the case of compressed extents.
4168 * @num_bytes - Number of bytes to allocate on-disk.
4170 * @min_alloc_size - Indicates the minimum amount of space that the
4171 * allocator should try to satisfy. In some cases
4172 * @num_bytes may be larger than what is required and if
4173 * the filesystem is fragmented then allocation fails.
4174 * However, the presence of @min_alloc_size gives a
4175 * chance to try and satisfy the smaller allocation.
4177 * @empty_size - A hint that you plan on doing more COW. This is the
4178 * size in bytes the allocator should try to find free
4179 * next to the block it returns. This is just a hint and
4180 * may be ignored by the allocator.
4182 * @hint_byte - Hint to the allocator to start searching above the byte
4183 * address passed. It might be ignored.
4185 * @ins - This key is modified to record the found hole. It will
4186 * have the following values:
4187 * ins->objectid == start position
4188 * ins->flags = BTRFS_EXTENT_ITEM_KEY
4189 * ins->offset == the size of the hole.
4191 * @is_data - Boolean flag indicating whether an extent is
4192 * allocated for data (true) or metadata (false)
4194 * @delalloc - Boolean flag indicating whether this allocation is for
4195 * delalloc or not. If 'true' data_rwsem of block groups
4196 * is going to be acquired.
4199 * Returns 0 when an allocation succeeded or < 0 when an error occurred. In
4200 * case -ENOSPC is returned then @ins->offset will contain the size of the
4201 * largest available hole the allocator managed to find.
4203 int btrfs_reserve_extent(struct btrfs_root
*root
, u64 ram_bytes
,
4204 u64 num_bytes
, u64 min_alloc_size
,
4205 u64 empty_size
, u64 hint_byte
,
4206 struct btrfs_key
*ins
, int is_data
, int delalloc
)
4208 struct btrfs_fs_info
*fs_info
= root
->fs_info
;
4209 bool final_tried
= num_bytes
== min_alloc_size
;
4213 flags
= get_alloc_profile_by_root(root
, is_data
);
4215 WARN_ON(num_bytes
< fs_info
->sectorsize
);
4216 ret
= find_free_extent(fs_info
, ram_bytes
, num_bytes
, empty_size
,
4217 hint_byte
, ins
, flags
, delalloc
);
4218 if (!ret
&& !is_data
) {
4219 btrfs_dec_block_group_reservations(fs_info
, ins
->objectid
);
4220 } else if (ret
== -ENOSPC
) {
4221 if (!final_tried
&& ins
->offset
) {
4222 num_bytes
= min(num_bytes
>> 1, ins
->offset
);
4223 num_bytes
= round_down(num_bytes
,
4224 fs_info
->sectorsize
);
4225 num_bytes
= max(num_bytes
, min_alloc_size
);
4226 ram_bytes
= num_bytes
;
4227 if (num_bytes
== min_alloc_size
)
4230 } else if (btrfs_test_opt(fs_info
, ENOSPC_DEBUG
)) {
4231 struct btrfs_space_info
*sinfo
;
4233 sinfo
= btrfs_find_space_info(fs_info
, flags
);
4235 "allocation failed flags %llu, wanted %llu",
4238 btrfs_dump_space_info(fs_info
, sinfo
,
4246 int btrfs_free_reserved_extent(struct btrfs_fs_info
*fs_info
,
4247 u64 start
, u64 len
, int delalloc
)
4249 struct btrfs_block_group
*cache
;
4251 cache
= btrfs_lookup_block_group(fs_info
, start
);
4253 btrfs_err(fs_info
, "Unable to find block group for %llu",
4258 btrfs_add_free_space(cache
, start
, len
);
4259 btrfs_free_reserved_bytes(cache
, len
, delalloc
);
4260 trace_btrfs_reserved_extent_free(fs_info
, start
, len
);
4262 btrfs_put_block_group(cache
);
4266 int btrfs_pin_reserved_extent(struct btrfs_trans_handle
*trans
, u64 start
,
4269 struct btrfs_block_group
*cache
;
4272 cache
= btrfs_lookup_block_group(trans
->fs_info
, start
);
4274 btrfs_err(trans
->fs_info
, "unable to find block group for %llu",
4279 ret
= pin_down_extent(trans
, cache
, start
, len
, 1);
4280 btrfs_put_block_group(cache
);
4284 static int alloc_reserved_file_extent(struct btrfs_trans_handle
*trans
,
4285 u64 parent
, u64 root_objectid
,
4286 u64 flags
, u64 owner
, u64 offset
,
4287 struct btrfs_key
*ins
, int ref_mod
)
4289 struct btrfs_fs_info
*fs_info
= trans
->fs_info
;
4291 struct btrfs_extent_item
*extent_item
;
4292 struct btrfs_extent_inline_ref
*iref
;
4293 struct btrfs_path
*path
;
4294 struct extent_buffer
*leaf
;
4299 type
= BTRFS_SHARED_DATA_REF_KEY
;
4301 type
= BTRFS_EXTENT_DATA_REF_KEY
;
4303 size
= sizeof(*extent_item
) + btrfs_extent_inline_ref_size(type
);
4305 path
= btrfs_alloc_path();
4309 path
->leave_spinning
= 1;
4310 ret
= btrfs_insert_empty_item(trans
, fs_info
->extent_root
, path
,
4313 btrfs_free_path(path
);
4317 leaf
= path
->nodes
[0];
4318 extent_item
= btrfs_item_ptr(leaf
, path
->slots
[0],
4319 struct btrfs_extent_item
);
4320 btrfs_set_extent_refs(leaf
, extent_item
, ref_mod
);
4321 btrfs_set_extent_generation(leaf
, extent_item
, trans
->transid
);
4322 btrfs_set_extent_flags(leaf
, extent_item
,
4323 flags
| BTRFS_EXTENT_FLAG_DATA
);
4325 iref
= (struct btrfs_extent_inline_ref
*)(extent_item
+ 1);
4326 btrfs_set_extent_inline_ref_type(leaf
, iref
, type
);
4328 struct btrfs_shared_data_ref
*ref
;
4329 ref
= (struct btrfs_shared_data_ref
*)(iref
+ 1);
4330 btrfs_set_extent_inline_ref_offset(leaf
, iref
, parent
);
4331 btrfs_set_shared_data_ref_count(leaf
, ref
, ref_mod
);
4333 struct btrfs_extent_data_ref
*ref
;
4334 ref
= (struct btrfs_extent_data_ref
*)(&iref
->offset
);
4335 btrfs_set_extent_data_ref_root(leaf
, ref
, root_objectid
);
4336 btrfs_set_extent_data_ref_objectid(leaf
, ref
, owner
);
4337 btrfs_set_extent_data_ref_offset(leaf
, ref
, offset
);
4338 btrfs_set_extent_data_ref_count(leaf
, ref
, ref_mod
);
4341 btrfs_mark_buffer_dirty(path
->nodes
[0]);
4342 btrfs_free_path(path
);
4344 ret
= remove_from_free_space_tree(trans
, ins
->objectid
, ins
->offset
);
4348 ret
= btrfs_update_block_group(trans
, ins
->objectid
, ins
->offset
, 1);
4349 if (ret
) { /* -ENOENT, logic error */
4350 btrfs_err(fs_info
, "update block group failed for %llu %llu",
4351 ins
->objectid
, ins
->offset
);
4354 trace_btrfs_reserved_extent_alloc(fs_info
, ins
->objectid
, ins
->offset
);
4358 static int alloc_reserved_tree_block(struct btrfs_trans_handle
*trans
,
4359 struct btrfs_delayed_ref_node
*node
,
4360 struct btrfs_delayed_extent_op
*extent_op
)
4362 struct btrfs_fs_info
*fs_info
= trans
->fs_info
;
4364 struct btrfs_extent_item
*extent_item
;
4365 struct btrfs_key extent_key
;
4366 struct btrfs_tree_block_info
*block_info
;
4367 struct btrfs_extent_inline_ref
*iref
;
4368 struct btrfs_path
*path
;
4369 struct extent_buffer
*leaf
;
4370 struct btrfs_delayed_tree_ref
*ref
;
4371 u32 size
= sizeof(*extent_item
) + sizeof(*iref
);
4373 u64 flags
= extent_op
->flags_to_set
;
4374 bool skinny_metadata
= btrfs_fs_incompat(fs_info
, SKINNY_METADATA
);
4376 ref
= btrfs_delayed_node_to_tree_ref(node
);
4378 extent_key
.objectid
= node
->bytenr
;
4379 if (skinny_metadata
) {
4380 extent_key
.offset
= ref
->level
;
4381 extent_key
.type
= BTRFS_METADATA_ITEM_KEY
;
4382 num_bytes
= fs_info
->nodesize
;
4384 extent_key
.offset
= node
->num_bytes
;
4385 extent_key
.type
= BTRFS_EXTENT_ITEM_KEY
;
4386 size
+= sizeof(*block_info
);
4387 num_bytes
= node
->num_bytes
;
4390 path
= btrfs_alloc_path();
4394 path
->leave_spinning
= 1;
4395 ret
= btrfs_insert_empty_item(trans
, fs_info
->extent_root
, path
,
4398 btrfs_free_path(path
);
4402 leaf
= path
->nodes
[0];
4403 extent_item
= btrfs_item_ptr(leaf
, path
->slots
[0],
4404 struct btrfs_extent_item
);
4405 btrfs_set_extent_refs(leaf
, extent_item
, 1);
4406 btrfs_set_extent_generation(leaf
, extent_item
, trans
->transid
);
4407 btrfs_set_extent_flags(leaf
, extent_item
,
4408 flags
| BTRFS_EXTENT_FLAG_TREE_BLOCK
);
4410 if (skinny_metadata
) {
4411 iref
= (struct btrfs_extent_inline_ref
*)(extent_item
+ 1);
4413 block_info
= (struct btrfs_tree_block_info
*)(extent_item
+ 1);
4414 btrfs_set_tree_block_key(leaf
, block_info
, &extent_op
->key
);
4415 btrfs_set_tree_block_level(leaf
, block_info
, ref
->level
);
4416 iref
= (struct btrfs_extent_inline_ref
*)(block_info
+ 1);
4419 if (node
->type
== BTRFS_SHARED_BLOCK_REF_KEY
) {
4420 BUG_ON(!(flags
& BTRFS_BLOCK_FLAG_FULL_BACKREF
));
4421 btrfs_set_extent_inline_ref_type(leaf
, iref
,
4422 BTRFS_SHARED_BLOCK_REF_KEY
);
4423 btrfs_set_extent_inline_ref_offset(leaf
, iref
, ref
->parent
);
4425 btrfs_set_extent_inline_ref_type(leaf
, iref
,
4426 BTRFS_TREE_BLOCK_REF_KEY
);
4427 btrfs_set_extent_inline_ref_offset(leaf
, iref
, ref
->root
);
4430 btrfs_mark_buffer_dirty(leaf
);
4431 btrfs_free_path(path
);
4433 ret
= remove_from_free_space_tree(trans
, extent_key
.objectid
,
4438 ret
= btrfs_update_block_group(trans
, extent_key
.objectid
,
4439 fs_info
->nodesize
, 1);
4440 if (ret
) { /* -ENOENT, logic error */
4441 btrfs_err(fs_info
, "update block group failed for %llu %llu",
4442 extent_key
.objectid
, extent_key
.offset
);
4446 trace_btrfs_reserved_extent_alloc(fs_info
, extent_key
.objectid
,
4451 int btrfs_alloc_reserved_file_extent(struct btrfs_trans_handle
*trans
,
4452 struct btrfs_root
*root
, u64 owner
,
4453 u64 offset
, u64 ram_bytes
,
4454 struct btrfs_key
*ins
)
4456 struct btrfs_ref generic_ref
= { 0 };
4459 BUG_ON(root
->root_key
.objectid
== BTRFS_TREE_LOG_OBJECTID
);
4461 btrfs_init_generic_ref(&generic_ref
, BTRFS_ADD_DELAYED_EXTENT
,
4462 ins
->objectid
, ins
->offset
, 0);
4463 btrfs_init_data_ref(&generic_ref
, root
->root_key
.objectid
, owner
, offset
);
4464 btrfs_ref_tree_mod(root
->fs_info
, &generic_ref
);
4465 ret
= btrfs_add_delayed_data_ref(trans
, &generic_ref
,
4466 ram_bytes
, NULL
, NULL
);
4471 * this is used by the tree logging recovery code. It records that
4472 * an extent has been allocated and makes sure to clear the free
4473 * space cache bits as well
4475 int btrfs_alloc_logged_file_extent(struct btrfs_trans_handle
*trans
,
4476 u64 root_objectid
, u64 owner
, u64 offset
,
4477 struct btrfs_key
*ins
)
4479 struct btrfs_fs_info
*fs_info
= trans
->fs_info
;
4481 struct btrfs_block_group
*block_group
;
4482 struct btrfs_space_info
*space_info
;
4485 * Mixed block groups will exclude before processing the log so we only
4486 * need to do the exclude dance if this fs isn't mixed.
4488 if (!btrfs_fs_incompat(fs_info
, MIXED_GROUPS
)) {
4489 ret
= __exclude_logged_extent(fs_info
, ins
->objectid
,
4495 block_group
= btrfs_lookup_block_group(fs_info
, ins
->objectid
);
4499 space_info
= block_group
->space_info
;
4500 spin_lock(&space_info
->lock
);
4501 spin_lock(&block_group
->lock
);
4502 space_info
->bytes_reserved
+= ins
->offset
;
4503 block_group
->reserved
+= ins
->offset
;
4504 spin_unlock(&block_group
->lock
);
4505 spin_unlock(&space_info
->lock
);
4507 ret
= alloc_reserved_file_extent(trans
, 0, root_objectid
, 0, owner
,
4510 btrfs_pin_extent(trans
, ins
->objectid
, ins
->offset
, 1);
4511 btrfs_put_block_group(block_group
);
4515 static struct extent_buffer
*
4516 btrfs_init_new_buffer(struct btrfs_trans_handle
*trans
, struct btrfs_root
*root
,
4517 u64 bytenr
, int level
, u64 owner
)
4519 struct btrfs_fs_info
*fs_info
= root
->fs_info
;
4520 struct extent_buffer
*buf
;
4522 buf
= btrfs_find_create_tree_block(fs_info
, bytenr
);
4527 * Extra safety check in case the extent tree is corrupted and extent
4528 * allocator chooses to use a tree block which is already used and
4531 if (buf
->lock_owner
== current
->pid
) {
4532 btrfs_err_rl(fs_info
,
4533 "tree block %llu owner %llu already locked by pid=%d, extent tree corruption detected",
4534 buf
->start
, btrfs_header_owner(buf
), current
->pid
);
4535 free_extent_buffer(buf
);
4536 return ERR_PTR(-EUCLEAN
);
4539 btrfs_set_buffer_lockdep_class(root
->root_key
.objectid
, buf
, level
);
4540 btrfs_tree_lock(buf
);
4541 btrfs_clean_tree_block(buf
);
4542 clear_bit(EXTENT_BUFFER_STALE
, &buf
->bflags
);
4544 btrfs_set_lock_blocking_write(buf
);
4545 set_extent_buffer_uptodate(buf
);
4547 memzero_extent_buffer(buf
, 0, sizeof(struct btrfs_header
));
4548 btrfs_set_header_level(buf
, level
);
4549 btrfs_set_header_bytenr(buf
, buf
->start
);
4550 btrfs_set_header_generation(buf
, trans
->transid
);
4551 btrfs_set_header_backref_rev(buf
, BTRFS_MIXED_BACKREF_REV
);
4552 btrfs_set_header_owner(buf
, owner
);
4553 write_extent_buffer_fsid(buf
, fs_info
->fs_devices
->metadata_uuid
);
4554 write_extent_buffer_chunk_tree_uuid(buf
, fs_info
->chunk_tree_uuid
);
4555 if (root
->root_key
.objectid
== BTRFS_TREE_LOG_OBJECTID
) {
4556 buf
->log_index
= root
->log_transid
% 2;
4558 * we allow two log transactions at a time, use different
4559 * EXTENT bit to differentiate dirty pages.
4561 if (buf
->log_index
== 0)
4562 set_extent_dirty(&root
->dirty_log_pages
, buf
->start
,
4563 buf
->start
+ buf
->len
- 1, GFP_NOFS
);
4565 set_extent_new(&root
->dirty_log_pages
, buf
->start
,
4566 buf
->start
+ buf
->len
- 1);
4568 buf
->log_index
= -1;
4569 set_extent_dirty(&trans
->transaction
->dirty_pages
, buf
->start
,
4570 buf
->start
+ buf
->len
- 1, GFP_NOFS
);
4572 trans
->dirty
= true;
4573 /* this returns a buffer locked for blocking */
4578 * finds a free extent and does all the dirty work required for allocation
4579 * returns the tree buffer or an ERR_PTR on error.
4581 struct extent_buffer
*btrfs_alloc_tree_block(struct btrfs_trans_handle
*trans
,
4582 struct btrfs_root
*root
,
4583 u64 parent
, u64 root_objectid
,
4584 const struct btrfs_disk_key
*key
,
4585 int level
, u64 hint
,
4588 struct btrfs_fs_info
*fs_info
= root
->fs_info
;
4589 struct btrfs_key ins
;
4590 struct btrfs_block_rsv
*block_rsv
;
4591 struct extent_buffer
*buf
;
4592 struct btrfs_delayed_extent_op
*extent_op
;
4593 struct btrfs_ref generic_ref
= { 0 };
4596 u32 blocksize
= fs_info
->nodesize
;
4597 bool skinny_metadata
= btrfs_fs_incompat(fs_info
, SKINNY_METADATA
);
4599 #ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS
4600 if (btrfs_is_testing(fs_info
)) {
4601 buf
= btrfs_init_new_buffer(trans
, root
, root
->alloc_bytenr
,
4602 level
, root_objectid
);
4604 root
->alloc_bytenr
+= blocksize
;
4609 block_rsv
= btrfs_use_block_rsv(trans
, root
, blocksize
);
4610 if (IS_ERR(block_rsv
))
4611 return ERR_CAST(block_rsv
);
4613 ret
= btrfs_reserve_extent(root
, blocksize
, blocksize
, blocksize
,
4614 empty_size
, hint
, &ins
, 0, 0);
4618 buf
= btrfs_init_new_buffer(trans
, root
, ins
.objectid
, level
,
4622 goto out_free_reserved
;
4625 if (root_objectid
== BTRFS_TREE_RELOC_OBJECTID
) {
4627 parent
= ins
.objectid
;
4628 flags
|= BTRFS_BLOCK_FLAG_FULL_BACKREF
;
4632 if (root_objectid
!= BTRFS_TREE_LOG_OBJECTID
) {
4633 extent_op
= btrfs_alloc_delayed_extent_op();
4639 memcpy(&extent_op
->key
, key
, sizeof(extent_op
->key
));
4641 memset(&extent_op
->key
, 0, sizeof(extent_op
->key
));
4642 extent_op
->flags_to_set
= flags
;
4643 extent_op
->update_key
= skinny_metadata
? false : true;
4644 extent_op
->update_flags
= true;
4645 extent_op
->is_data
= false;
4646 extent_op
->level
= level
;
4648 btrfs_init_generic_ref(&generic_ref
, BTRFS_ADD_DELAYED_EXTENT
,
4649 ins
.objectid
, ins
.offset
, parent
);
4650 generic_ref
.real_root
= root
->root_key
.objectid
;
4651 btrfs_init_tree_ref(&generic_ref
, level
, root_objectid
);
4652 btrfs_ref_tree_mod(fs_info
, &generic_ref
);
4653 ret
= btrfs_add_delayed_tree_ref(trans
, &generic_ref
,
4654 extent_op
, NULL
, NULL
);
4656 goto out_free_delayed
;
4661 btrfs_free_delayed_extent_op(extent_op
);
4663 free_extent_buffer(buf
);
4665 btrfs_free_reserved_extent(fs_info
, ins
.objectid
, ins
.offset
, 0);
4667 btrfs_unuse_block_rsv(fs_info
, block_rsv
, blocksize
);
4668 return ERR_PTR(ret
);
4671 struct walk_control
{
4672 u64 refs
[BTRFS_MAX_LEVEL
];
4673 u64 flags
[BTRFS_MAX_LEVEL
];
4674 struct btrfs_key update_progress
;
4675 struct btrfs_key drop_progress
;
4687 #define DROP_REFERENCE 1
4688 #define UPDATE_BACKREF 2
4690 static noinline
void reada_walk_down(struct btrfs_trans_handle
*trans
,
4691 struct btrfs_root
*root
,
4692 struct walk_control
*wc
,
4693 struct btrfs_path
*path
)
4695 struct btrfs_fs_info
*fs_info
= root
->fs_info
;
4701 struct btrfs_key key
;
4702 struct extent_buffer
*eb
;
4707 if (path
->slots
[wc
->level
] < wc
->reada_slot
) {
4708 wc
->reada_count
= wc
->reada_count
* 2 / 3;
4709 wc
->reada_count
= max(wc
->reada_count
, 2);
4711 wc
->reada_count
= wc
->reada_count
* 3 / 2;
4712 wc
->reada_count
= min_t(int, wc
->reada_count
,
4713 BTRFS_NODEPTRS_PER_BLOCK(fs_info
));
4716 eb
= path
->nodes
[wc
->level
];
4717 nritems
= btrfs_header_nritems(eb
);
4719 for (slot
= path
->slots
[wc
->level
]; slot
< nritems
; slot
++) {
4720 if (nread
>= wc
->reada_count
)
4724 bytenr
= btrfs_node_blockptr(eb
, slot
);
4725 generation
= btrfs_node_ptr_generation(eb
, slot
);
4727 if (slot
== path
->slots
[wc
->level
])
4730 if (wc
->stage
== UPDATE_BACKREF
&&
4731 generation
<= root
->root_key
.offset
)
4734 /* We don't lock the tree block, it's OK to be racy here */
4735 ret
= btrfs_lookup_extent_info(trans
, fs_info
, bytenr
,
4736 wc
->level
- 1, 1, &refs
,
4738 /* We don't care about errors in readahead. */
4743 if (wc
->stage
== DROP_REFERENCE
) {
4747 if (wc
->level
== 1 &&
4748 (flags
& BTRFS_BLOCK_FLAG_FULL_BACKREF
))
4750 if (!wc
->update_ref
||
4751 generation
<= root
->root_key
.offset
)
4753 btrfs_node_key_to_cpu(eb
, &key
, slot
);
4754 ret
= btrfs_comp_cpu_keys(&key
,
4755 &wc
->update_progress
);
4759 if (wc
->level
== 1 &&
4760 (flags
& BTRFS_BLOCK_FLAG_FULL_BACKREF
))
4764 readahead_tree_block(fs_info
, bytenr
);
4767 wc
->reada_slot
= slot
;
4771 * helper to process tree block while walking down the tree.
4773 * when wc->stage == UPDATE_BACKREF, this function updates
4774 * back refs for pointers in the block.
4776 * NOTE: return value 1 means we should stop walking down.
4778 static noinline
int walk_down_proc(struct btrfs_trans_handle
*trans
,
4779 struct btrfs_root
*root
,
4780 struct btrfs_path
*path
,
4781 struct walk_control
*wc
, int lookup_info
)
4783 struct btrfs_fs_info
*fs_info
= root
->fs_info
;
4784 int level
= wc
->level
;
4785 struct extent_buffer
*eb
= path
->nodes
[level
];
4786 u64 flag
= BTRFS_BLOCK_FLAG_FULL_BACKREF
;
4789 if (wc
->stage
== UPDATE_BACKREF
&&
4790 btrfs_header_owner(eb
) != root
->root_key
.objectid
)
4794 * when reference count of tree block is 1, it won't increase
4795 * again. once full backref flag is set, we never clear it.
4798 ((wc
->stage
== DROP_REFERENCE
&& wc
->refs
[level
] != 1) ||
4799 (wc
->stage
== UPDATE_BACKREF
&& !(wc
->flags
[level
] & flag
)))) {
4800 BUG_ON(!path
->locks
[level
]);
4801 ret
= btrfs_lookup_extent_info(trans
, fs_info
,
4802 eb
->start
, level
, 1,
4805 BUG_ON(ret
== -ENOMEM
);
4808 BUG_ON(wc
->refs
[level
] == 0);
4811 if (wc
->stage
== DROP_REFERENCE
) {
4812 if (wc
->refs
[level
] > 1)
4815 if (path
->locks
[level
] && !wc
->keep_locks
) {
4816 btrfs_tree_unlock_rw(eb
, path
->locks
[level
]);
4817 path
->locks
[level
] = 0;
4822 /* wc->stage == UPDATE_BACKREF */
4823 if (!(wc
->flags
[level
] & flag
)) {
4824 BUG_ON(!path
->locks
[level
]);
4825 ret
= btrfs_inc_ref(trans
, root
, eb
, 1);
4826 BUG_ON(ret
); /* -ENOMEM */
4827 ret
= btrfs_dec_ref(trans
, root
, eb
, 0);
4828 BUG_ON(ret
); /* -ENOMEM */
4829 ret
= btrfs_set_disk_extent_flags(trans
, eb
, flag
,
4830 btrfs_header_level(eb
), 0);
4831 BUG_ON(ret
); /* -ENOMEM */
4832 wc
->flags
[level
] |= flag
;
4836 * the block is shared by multiple trees, so it's not good to
4837 * keep the tree lock
4839 if (path
->locks
[level
] && level
> 0) {
4840 btrfs_tree_unlock_rw(eb
, path
->locks
[level
]);
4841 path
->locks
[level
] = 0;
4847 * This is used to verify a ref exists for this root to deal with a bug where we
4848 * would have a drop_progress key that hadn't been updated properly.
4850 static int check_ref_exists(struct btrfs_trans_handle
*trans
,
4851 struct btrfs_root
*root
, u64 bytenr
, u64 parent
,
4854 struct btrfs_path
*path
;
4855 struct btrfs_extent_inline_ref
*iref
;
4858 path
= btrfs_alloc_path();
4862 ret
= lookup_extent_backref(trans
, path
, &iref
, bytenr
,
4863 root
->fs_info
->nodesize
, parent
,
4864 root
->root_key
.objectid
, level
, 0);
4865 btrfs_free_path(path
);
4874 * helper to process tree block pointer.
4876 * when wc->stage == DROP_REFERENCE, this function checks
4877 * reference count of the block pointed to. if the block
4878 * is shared and we need update back refs for the subtree
4879 * rooted at the block, this function changes wc->stage to
4880 * UPDATE_BACKREF. if the block is shared and there is no
4881 * need to update back, this function drops the reference
4884 * NOTE: return value 1 means we should stop walking down.
4886 static noinline
int do_walk_down(struct btrfs_trans_handle
*trans
,
4887 struct btrfs_root
*root
,
4888 struct btrfs_path
*path
,
4889 struct walk_control
*wc
, int *lookup_info
)
4891 struct btrfs_fs_info
*fs_info
= root
->fs_info
;
4895 struct btrfs_key key
;
4896 struct btrfs_key first_key
;
4897 struct btrfs_ref ref
= { 0 };
4898 struct extent_buffer
*next
;
4899 int level
= wc
->level
;
4902 bool need_account
= false;
4904 generation
= btrfs_node_ptr_generation(path
->nodes
[level
],
4905 path
->slots
[level
]);
4907 * if the lower level block was created before the snapshot
4908 * was created, we know there is no need to update back refs
4911 if (wc
->stage
== UPDATE_BACKREF
&&
4912 generation
<= root
->root_key
.offset
) {
4917 bytenr
= btrfs_node_blockptr(path
->nodes
[level
], path
->slots
[level
]);
4918 btrfs_node_key_to_cpu(path
->nodes
[level
], &first_key
,
4919 path
->slots
[level
]);
4921 next
= find_extent_buffer(fs_info
, bytenr
);
4923 next
= btrfs_find_create_tree_block(fs_info
, bytenr
);
4925 return PTR_ERR(next
);
4927 btrfs_set_buffer_lockdep_class(root
->root_key
.objectid
, next
,
4931 btrfs_tree_lock(next
);
4932 btrfs_set_lock_blocking_write(next
);
4934 ret
= btrfs_lookup_extent_info(trans
, fs_info
, bytenr
, level
- 1, 1,
4935 &wc
->refs
[level
- 1],
4936 &wc
->flags
[level
- 1]);
4940 if (unlikely(wc
->refs
[level
- 1] == 0)) {
4941 btrfs_err(fs_info
, "Missing references.");
4947 if (wc
->stage
== DROP_REFERENCE
) {
4948 if (wc
->refs
[level
- 1] > 1) {
4949 need_account
= true;
4951 (wc
->flags
[0] & BTRFS_BLOCK_FLAG_FULL_BACKREF
))
4954 if (!wc
->update_ref
||
4955 generation
<= root
->root_key
.offset
)
4958 btrfs_node_key_to_cpu(path
->nodes
[level
], &key
,
4959 path
->slots
[level
]);
4960 ret
= btrfs_comp_cpu_keys(&key
, &wc
->update_progress
);
4964 wc
->stage
= UPDATE_BACKREF
;
4965 wc
->shared_level
= level
- 1;
4969 (wc
->flags
[0] & BTRFS_BLOCK_FLAG_FULL_BACKREF
))
4973 if (!btrfs_buffer_uptodate(next
, generation
, 0)) {
4974 btrfs_tree_unlock(next
);
4975 free_extent_buffer(next
);
4981 if (reada
&& level
== 1)
4982 reada_walk_down(trans
, root
, wc
, path
);
4983 next
= read_tree_block(fs_info
, bytenr
, generation
, level
- 1,
4986 return PTR_ERR(next
);
4987 } else if (!extent_buffer_uptodate(next
)) {
4988 free_extent_buffer(next
);
4991 btrfs_tree_lock(next
);
4992 btrfs_set_lock_blocking_write(next
);
4996 ASSERT(level
== btrfs_header_level(next
));
4997 if (level
!= btrfs_header_level(next
)) {
4998 btrfs_err(root
->fs_info
, "mismatched level");
5002 path
->nodes
[level
] = next
;
5003 path
->slots
[level
] = 0;
5004 path
->locks
[level
] = BTRFS_WRITE_LOCK_BLOCKING
;
5010 wc
->refs
[level
- 1] = 0;
5011 wc
->flags
[level
- 1] = 0;
5012 if (wc
->stage
== DROP_REFERENCE
) {
5013 if (wc
->flags
[level
] & BTRFS_BLOCK_FLAG_FULL_BACKREF
) {
5014 parent
= path
->nodes
[level
]->start
;
5016 ASSERT(root
->root_key
.objectid
==
5017 btrfs_header_owner(path
->nodes
[level
]));
5018 if (root
->root_key
.objectid
!=
5019 btrfs_header_owner(path
->nodes
[level
])) {
5020 btrfs_err(root
->fs_info
,
5021 "mismatched block owner");
5029 * If we had a drop_progress we need to verify the refs are set
5030 * as expected. If we find our ref then we know that from here
5031 * on out everything should be correct, and we can clear the
5034 if (wc
->restarted
) {
5035 ret
= check_ref_exists(trans
, root
, bytenr
, parent
,
5046 * Reloc tree doesn't contribute to qgroup numbers, and we have
5047 * already accounted them at merge time (replace_path),
5048 * thus we could skip expensive subtree trace here.
5050 if (root
->root_key
.objectid
!= BTRFS_TREE_RELOC_OBJECTID
&&
5052 ret
= btrfs_qgroup_trace_subtree(trans
, next
,
5053 generation
, level
- 1);
5055 btrfs_err_rl(fs_info
,
5056 "Error %d accounting shared subtree. Quota is out of sync, rescan required.",
5062 * We need to update the next key in our walk control so we can
5063 * update the drop_progress key accordingly. We don't care if
5064 * find_next_key doesn't find a key because that means we're at
5065 * the end and are going to clean up now.
5067 wc
->drop_level
= level
;
5068 find_next_key(path
, level
, &wc
->drop_progress
);
5070 btrfs_init_generic_ref(&ref
, BTRFS_DROP_DELAYED_REF
, bytenr
,
5071 fs_info
->nodesize
, parent
);
5072 btrfs_init_tree_ref(&ref
, level
- 1, root
->root_key
.objectid
);
5073 ret
= btrfs_free_extent(trans
, &ref
);
5082 btrfs_tree_unlock(next
);
5083 free_extent_buffer(next
);
5089 * helper to process tree block while walking up the tree.
5091 * when wc->stage == DROP_REFERENCE, this function drops
5092 * reference count on the block.
5094 * when wc->stage == UPDATE_BACKREF, this function changes
5095 * wc->stage back to DROP_REFERENCE if we changed wc->stage
5096 * to UPDATE_BACKREF previously while processing the block.
5098 * NOTE: return value 1 means we should stop walking up.
5100 static noinline
int walk_up_proc(struct btrfs_trans_handle
*trans
,
5101 struct btrfs_root
*root
,
5102 struct btrfs_path
*path
,
5103 struct walk_control
*wc
)
5105 struct btrfs_fs_info
*fs_info
= root
->fs_info
;
5107 int level
= wc
->level
;
5108 struct extent_buffer
*eb
= path
->nodes
[level
];
5111 if (wc
->stage
== UPDATE_BACKREF
) {
5112 BUG_ON(wc
->shared_level
< level
);
5113 if (level
< wc
->shared_level
)
5116 ret
= find_next_key(path
, level
+ 1, &wc
->update_progress
);
5120 wc
->stage
= DROP_REFERENCE
;
5121 wc
->shared_level
= -1;
5122 path
->slots
[level
] = 0;
5125 * check reference count again if the block isn't locked.
5126 * we should start walking down the tree again if reference
5129 if (!path
->locks
[level
]) {
5131 btrfs_tree_lock(eb
);
5132 btrfs_set_lock_blocking_write(eb
);
5133 path
->locks
[level
] = BTRFS_WRITE_LOCK_BLOCKING
;
5135 ret
= btrfs_lookup_extent_info(trans
, fs_info
,
5136 eb
->start
, level
, 1,
5140 btrfs_tree_unlock_rw(eb
, path
->locks
[level
]);
5141 path
->locks
[level
] = 0;
5144 BUG_ON(wc
->refs
[level
] == 0);
5145 if (wc
->refs
[level
] == 1) {
5146 btrfs_tree_unlock_rw(eb
, path
->locks
[level
]);
5147 path
->locks
[level
] = 0;
5153 /* wc->stage == DROP_REFERENCE */
5154 BUG_ON(wc
->refs
[level
] > 1 && !path
->locks
[level
]);
5156 if (wc
->refs
[level
] == 1) {
5158 if (wc
->flags
[level
] & BTRFS_BLOCK_FLAG_FULL_BACKREF
)
5159 ret
= btrfs_dec_ref(trans
, root
, eb
, 1);
5161 ret
= btrfs_dec_ref(trans
, root
, eb
, 0);
5162 BUG_ON(ret
); /* -ENOMEM */
5163 if (is_fstree(root
->root_key
.objectid
)) {
5164 ret
= btrfs_qgroup_trace_leaf_items(trans
, eb
);
5166 btrfs_err_rl(fs_info
,
5167 "error %d accounting leaf items, quota is out of sync, rescan required",
5172 /* make block locked assertion in btrfs_clean_tree_block happy */
5173 if (!path
->locks
[level
] &&
5174 btrfs_header_generation(eb
) == trans
->transid
) {
5175 btrfs_tree_lock(eb
);
5176 btrfs_set_lock_blocking_write(eb
);
5177 path
->locks
[level
] = BTRFS_WRITE_LOCK_BLOCKING
;
5179 btrfs_clean_tree_block(eb
);
5182 if (eb
== root
->node
) {
5183 if (wc
->flags
[level
] & BTRFS_BLOCK_FLAG_FULL_BACKREF
)
5185 else if (root
->root_key
.objectid
!= btrfs_header_owner(eb
))
5186 goto owner_mismatch
;
5188 if (wc
->flags
[level
+ 1] & BTRFS_BLOCK_FLAG_FULL_BACKREF
)
5189 parent
= path
->nodes
[level
+ 1]->start
;
5190 else if (root
->root_key
.objectid
!=
5191 btrfs_header_owner(path
->nodes
[level
+ 1]))
5192 goto owner_mismatch
;
5195 btrfs_free_tree_block(trans
, root
, eb
, parent
, wc
->refs
[level
] == 1);
5197 wc
->refs
[level
] = 0;
5198 wc
->flags
[level
] = 0;
5202 btrfs_err_rl(fs_info
, "unexpected tree owner, have %llu expect %llu",
5203 btrfs_header_owner(eb
), root
->root_key
.objectid
);
5207 static noinline
int walk_down_tree(struct btrfs_trans_handle
*trans
,
5208 struct btrfs_root
*root
,
5209 struct btrfs_path
*path
,
5210 struct walk_control
*wc
)
5212 int level
= wc
->level
;
5213 int lookup_info
= 1;
5216 while (level
>= 0) {
5217 ret
= walk_down_proc(trans
, root
, path
, wc
, lookup_info
);
5224 if (path
->slots
[level
] >=
5225 btrfs_header_nritems(path
->nodes
[level
]))
5228 ret
= do_walk_down(trans
, root
, path
, wc
, &lookup_info
);
5230 path
->slots
[level
]++;
5239 static noinline
int walk_up_tree(struct btrfs_trans_handle
*trans
,
5240 struct btrfs_root
*root
,
5241 struct btrfs_path
*path
,
5242 struct walk_control
*wc
, int max_level
)
5244 int level
= wc
->level
;
5247 path
->slots
[level
] = btrfs_header_nritems(path
->nodes
[level
]);
5248 while (level
< max_level
&& path
->nodes
[level
]) {
5250 if (path
->slots
[level
] + 1 <
5251 btrfs_header_nritems(path
->nodes
[level
])) {
5252 path
->slots
[level
]++;
5255 ret
= walk_up_proc(trans
, root
, path
, wc
);
5261 if (path
->locks
[level
]) {
5262 btrfs_tree_unlock_rw(path
->nodes
[level
],
5263 path
->locks
[level
]);
5264 path
->locks
[level
] = 0;
5266 free_extent_buffer(path
->nodes
[level
]);
5267 path
->nodes
[level
] = NULL
;
5275 * drop a subvolume tree.
5277 * this function traverses the tree freeing any blocks that only
5278 * referenced by the tree.
5280 * when a shared tree block is found. this function decreases its
5281 * reference count by one. if update_ref is true, this function
5282 * also make sure backrefs for the shared block and all lower level
5283 * blocks are properly updated.
5285 * If called with for_reloc == 0, may exit early with -EAGAIN
5287 int btrfs_drop_snapshot(struct btrfs_root
*root
, int update_ref
, int for_reloc
)
5289 struct btrfs_fs_info
*fs_info
= root
->fs_info
;
5290 struct btrfs_path
*path
;
5291 struct btrfs_trans_handle
*trans
;
5292 struct btrfs_root
*tree_root
= fs_info
->tree_root
;
5293 struct btrfs_root_item
*root_item
= &root
->root_item
;
5294 struct walk_control
*wc
;
5295 struct btrfs_key key
;
5299 bool root_dropped
= false;
5301 btrfs_debug(fs_info
, "Drop subvolume %llu", root
->root_key
.objectid
);
5303 path
= btrfs_alloc_path();
5309 wc
= kzalloc(sizeof(*wc
), GFP_NOFS
);
5311 btrfs_free_path(path
);
5316 trans
= btrfs_start_transaction(tree_root
, 0);
5317 if (IS_ERR(trans
)) {
5318 err
= PTR_ERR(trans
);
5322 err
= btrfs_run_delayed_items(trans
);
5327 * This will help us catch people modifying the fs tree while we're
5328 * dropping it. It is unsafe to mess with the fs tree while it's being
5329 * dropped as we unlock the root node and parent nodes as we walk down
5330 * the tree, assuming nothing will change. If something does change
5331 * then we'll have stale information and drop references to blocks we've
5334 set_bit(BTRFS_ROOT_DELETING
, &root
->state
);
5335 if (btrfs_disk_key_objectid(&root_item
->drop_progress
) == 0) {
5336 level
= btrfs_header_level(root
->node
);
5337 path
->nodes
[level
] = btrfs_lock_root_node(root
);
5338 btrfs_set_lock_blocking_write(path
->nodes
[level
]);
5339 path
->slots
[level
] = 0;
5340 path
->locks
[level
] = BTRFS_WRITE_LOCK_BLOCKING
;
5341 memset(&wc
->update_progress
, 0,
5342 sizeof(wc
->update_progress
));
5344 btrfs_disk_key_to_cpu(&key
, &root_item
->drop_progress
);
5345 memcpy(&wc
->update_progress
, &key
,
5346 sizeof(wc
->update_progress
));
5348 level
= root_item
->drop_level
;
5350 path
->lowest_level
= level
;
5351 ret
= btrfs_search_slot(NULL
, root
, &key
, path
, 0, 0);
5352 path
->lowest_level
= 0;
5360 * unlock our path, this is safe because only this
5361 * function is allowed to delete this snapshot
5363 btrfs_unlock_up_safe(path
, 0);
5365 level
= btrfs_header_level(root
->node
);
5367 btrfs_tree_lock(path
->nodes
[level
]);
5368 btrfs_set_lock_blocking_write(path
->nodes
[level
]);
5369 path
->locks
[level
] = BTRFS_WRITE_LOCK_BLOCKING
;
5371 ret
= btrfs_lookup_extent_info(trans
, fs_info
,
5372 path
->nodes
[level
]->start
,
5373 level
, 1, &wc
->refs
[level
],
5379 BUG_ON(wc
->refs
[level
] == 0);
5381 if (level
== root_item
->drop_level
)
5384 btrfs_tree_unlock(path
->nodes
[level
]);
5385 path
->locks
[level
] = 0;
5386 WARN_ON(wc
->refs
[level
] != 1);
5391 wc
->restarted
= test_bit(BTRFS_ROOT_DEAD_TREE
, &root
->state
);
5393 wc
->shared_level
= -1;
5394 wc
->stage
= DROP_REFERENCE
;
5395 wc
->update_ref
= update_ref
;
5397 wc
->reada_count
= BTRFS_NODEPTRS_PER_BLOCK(fs_info
);
5401 ret
= walk_down_tree(trans
, root
, path
, wc
);
5407 ret
= walk_up_tree(trans
, root
, path
, wc
, BTRFS_MAX_LEVEL
);
5414 BUG_ON(wc
->stage
!= DROP_REFERENCE
);
5418 if (wc
->stage
== DROP_REFERENCE
) {
5419 wc
->drop_level
= wc
->level
;
5420 btrfs_node_key_to_cpu(path
->nodes
[wc
->drop_level
],
5422 path
->slots
[wc
->drop_level
]);
5424 btrfs_cpu_key_to_disk(&root_item
->drop_progress
,
5425 &wc
->drop_progress
);
5426 root_item
->drop_level
= wc
->drop_level
;
5428 BUG_ON(wc
->level
== 0);
5429 if (btrfs_should_end_transaction(trans
) ||
5430 (!for_reloc
&& btrfs_need_cleaner_sleep(fs_info
))) {
5431 ret
= btrfs_update_root(trans
, tree_root
,
5435 btrfs_abort_transaction(trans
, ret
);
5440 btrfs_end_transaction_throttle(trans
);
5441 if (!for_reloc
&& btrfs_need_cleaner_sleep(fs_info
)) {
5442 btrfs_debug(fs_info
,
5443 "drop snapshot early exit");
5448 trans
= btrfs_start_transaction(tree_root
, 0);
5449 if (IS_ERR(trans
)) {
5450 err
= PTR_ERR(trans
);
5455 btrfs_release_path(path
);
5459 ret
= btrfs_del_root(trans
, &root
->root_key
);
5461 btrfs_abort_transaction(trans
, ret
);
5466 if (root
->root_key
.objectid
!= BTRFS_TREE_RELOC_OBJECTID
) {
5467 ret
= btrfs_find_root(tree_root
, &root
->root_key
, path
,
5470 btrfs_abort_transaction(trans
, ret
);
5473 } else if (ret
> 0) {
5474 /* if we fail to delete the orphan item this time
5475 * around, it'll get picked up the next time.
5477 * The most common failure here is just -ENOENT.
5479 btrfs_del_orphan_item(trans
, tree_root
,
5480 root
->root_key
.objectid
);
5484 if (test_bit(BTRFS_ROOT_IN_RADIX
, &root
->state
))
5485 btrfs_add_dropped_root(trans
, root
);
5487 btrfs_put_root(root
);
5488 root_dropped
= true;
5490 btrfs_end_transaction_throttle(trans
);
5493 btrfs_free_path(path
);
5496 * So if we need to stop dropping the snapshot for whatever reason we
5497 * need to make sure to add it back to the dead root list so that we
5498 * keep trying to do the work later. This also cleans up roots if we
5499 * don't have it in the radix (like when we recover after a power fail
5500 * or unmount) so we don't leak memory.
5502 if (!for_reloc
&& !root_dropped
)
5503 btrfs_add_dead_root(root
);
5504 if (err
&& err
!= -EAGAIN
)
5505 btrfs_handle_fs_error(fs_info
, err
, NULL
);
5510 * drop subtree rooted at tree block 'node'.
5512 * NOTE: this function will unlock and release tree block 'node'
5513 * only used by relocation code
5515 int btrfs_drop_subtree(struct btrfs_trans_handle
*trans
,
5516 struct btrfs_root
*root
,
5517 struct extent_buffer
*node
,
5518 struct extent_buffer
*parent
)
5520 struct btrfs_fs_info
*fs_info
= root
->fs_info
;
5521 struct btrfs_path
*path
;
5522 struct walk_control
*wc
;
5528 BUG_ON(root
->root_key
.objectid
!= BTRFS_TREE_RELOC_OBJECTID
);
5530 path
= btrfs_alloc_path();
5534 wc
= kzalloc(sizeof(*wc
), GFP_NOFS
);
5536 btrfs_free_path(path
);
5540 btrfs_assert_tree_locked(parent
);
5541 parent_level
= btrfs_header_level(parent
);
5542 atomic_inc(&parent
->refs
);
5543 path
->nodes
[parent_level
] = parent
;
5544 path
->slots
[parent_level
] = btrfs_header_nritems(parent
);
5546 btrfs_assert_tree_locked(node
);
5547 level
= btrfs_header_level(node
);
5548 path
->nodes
[level
] = node
;
5549 path
->slots
[level
] = 0;
5550 path
->locks
[level
] = BTRFS_WRITE_LOCK_BLOCKING
;
5552 wc
->refs
[parent_level
] = 1;
5553 wc
->flags
[parent_level
] = BTRFS_BLOCK_FLAG_FULL_BACKREF
;
5555 wc
->shared_level
= -1;
5556 wc
->stage
= DROP_REFERENCE
;
5559 wc
->reada_count
= BTRFS_NODEPTRS_PER_BLOCK(fs_info
);
5562 wret
= walk_down_tree(trans
, root
, path
, wc
);
5568 wret
= walk_up_tree(trans
, root
, path
, wc
, parent_level
);
5576 btrfs_free_path(path
);
5581 * helper to account the unused space of all the readonly block group in the
5582 * space_info. takes mirrors into account.
5584 u64
btrfs_account_ro_block_groups_free_space(struct btrfs_space_info
*sinfo
)
5586 struct btrfs_block_group
*block_group
;
5590 /* It's df, we don't care if it's racy */
5591 if (list_empty(&sinfo
->ro_bgs
))
5594 spin_lock(&sinfo
->lock
);
5595 list_for_each_entry(block_group
, &sinfo
->ro_bgs
, ro_list
) {
5596 spin_lock(&block_group
->lock
);
5598 if (!block_group
->ro
) {
5599 spin_unlock(&block_group
->lock
);
5603 factor
= btrfs_bg_type_to_factor(block_group
->flags
);
5604 free_bytes
+= (block_group
->length
-
5605 block_group
->used
) * factor
;
5607 spin_unlock(&block_group
->lock
);
5609 spin_unlock(&sinfo
->lock
);
5614 int btrfs_error_unpin_extent_range(struct btrfs_fs_info
*fs_info
,
5617 return unpin_extent_range(fs_info
, start
, end
, false);
5621 * It used to be that old block groups would be left around forever.
5622 * Iterating over them would be enough to trim unused space. Since we
5623 * now automatically remove them, we also need to iterate over unallocated
5626 * We don't want a transaction for this since the discard may take a
5627 * substantial amount of time. We don't require that a transaction be
5628 * running, but we do need to take a running transaction into account
5629 * to ensure that we're not discarding chunks that were released or
5630 * allocated in the current transaction.
5632 * Holding the chunks lock will prevent other threads from allocating
5633 * or releasing chunks, but it won't prevent a running transaction
5634 * from committing and releasing the memory that the pending chunks
5635 * list head uses. For that, we need to take a reference to the
5636 * transaction and hold the commit root sem. We only need to hold
5637 * it while performing the free space search since we have already
5638 * held back allocations.
5640 static int btrfs_trim_free_extents(struct btrfs_device
*device
, u64
*trimmed
)
5642 u64 start
= SZ_1M
, len
= 0, end
= 0;
5647 /* Discard not supported = nothing to do. */
5648 if (!blk_queue_discard(bdev_get_queue(device
->bdev
)))
5651 /* Not writable = nothing to do. */
5652 if (!test_bit(BTRFS_DEV_STATE_WRITEABLE
, &device
->dev_state
))
5655 /* No free space = nothing to do. */
5656 if (device
->total_bytes
<= device
->bytes_used
)
5662 struct btrfs_fs_info
*fs_info
= device
->fs_info
;
5665 ret
= mutex_lock_interruptible(&fs_info
->chunk_mutex
);
5669 find_first_clear_extent_bit(&device
->alloc_state
, start
,
5671 CHUNK_TRIMMED
| CHUNK_ALLOCATED
);
5673 /* Ensure we skip the reserved area in the first 1M */
5674 start
= max_t(u64
, start
, SZ_1M
);
5677 * If find_first_clear_extent_bit find a range that spans the
5678 * end of the device it will set end to -1, in this case it's up
5679 * to the caller to trim the value to the size of the device.
5681 end
= min(end
, device
->total_bytes
- 1);
5683 len
= end
- start
+ 1;
5685 /* We didn't find any extents */
5687 mutex_unlock(&fs_info
->chunk_mutex
);
5692 ret
= btrfs_issue_discard(device
->bdev
, start
, len
,
5695 set_extent_bits(&device
->alloc_state
, start
,
5698 mutex_unlock(&fs_info
->chunk_mutex
);
5706 if (fatal_signal_pending(current
)) {
5718 * Trim the whole filesystem by:
5719 * 1) trimming the free space in each block group
5720 * 2) trimming the unallocated space on each device
5722 * This will also continue trimming even if a block group or device encounters
5723 * an error. The return value will be the last error, or 0 if nothing bad
5726 int btrfs_trim_fs(struct btrfs_fs_info
*fs_info
, struct fstrim_range
*range
)
5728 struct btrfs_block_group
*cache
= NULL
;
5729 struct btrfs_device
*device
;
5730 struct list_head
*devices
;
5732 u64 range_end
= U64_MAX
;
5743 * Check range overflow if range->len is set.
5744 * The default range->len is U64_MAX.
5746 if (range
->len
!= U64_MAX
&&
5747 check_add_overflow(range
->start
, range
->len
, &range_end
))
5750 cache
= btrfs_lookup_first_block_group(fs_info
, range
->start
);
5751 for (; cache
; cache
= btrfs_next_block_group(cache
)) {
5752 if (cache
->start
>= range_end
) {
5753 btrfs_put_block_group(cache
);
5757 start
= max(range
->start
, cache
->start
);
5758 end
= min(range_end
, cache
->start
+ cache
->length
);
5760 if (end
- start
>= range
->minlen
) {
5761 if (!btrfs_block_group_done(cache
)) {
5762 ret
= btrfs_cache_block_group(cache
, 0);
5768 ret
= btrfs_wait_block_group_cache_done(cache
);
5775 ret
= btrfs_trim_block_group(cache
,
5781 trimmed
+= group_trimmed
;
5792 "failed to trim %llu block group(s), last error %d",
5794 mutex_lock(&fs_info
->fs_devices
->device_list_mutex
);
5795 devices
= &fs_info
->fs_devices
->devices
;
5796 list_for_each_entry(device
, devices
, dev_list
) {
5797 ret
= btrfs_trim_free_extents(device
, &group_trimmed
);
5804 trimmed
+= group_trimmed
;
5806 mutex_unlock(&fs_info
->fs_devices
->device_list_mutex
);
5810 "failed to trim %llu device(s), last error %d",
5811 dev_failed
, dev_ret
);
5812 range
->len
= trimmed
;