1 // SPDX-License-Identifier: GPL-2.0
5 #include "block-group.h"
6 #include "space-info.h"
8 #include "free-space-cache.h"
9 #include "free-space-tree.h"
11 #include "transaction.h"
12 #include "ref-verify.h"
15 #include "delalloc-space.h"
20 * Return target flags in extended format or 0 if restripe for this chunk_type
23 * Should be called with balance_lock held
25 static u64
get_restripe_target(struct btrfs_fs_info
*fs_info
, u64 flags
)
27 struct btrfs_balance_control
*bctl
= fs_info
->balance_ctl
;
33 if (flags
& BTRFS_BLOCK_GROUP_DATA
&&
34 bctl
->data
.flags
& BTRFS_BALANCE_ARGS_CONVERT
) {
35 target
= BTRFS_BLOCK_GROUP_DATA
| bctl
->data
.target
;
36 } else if (flags
& BTRFS_BLOCK_GROUP_SYSTEM
&&
37 bctl
->sys
.flags
& BTRFS_BALANCE_ARGS_CONVERT
) {
38 target
= BTRFS_BLOCK_GROUP_SYSTEM
| bctl
->sys
.target
;
39 } else if (flags
& BTRFS_BLOCK_GROUP_METADATA
&&
40 bctl
->meta
.flags
& BTRFS_BALANCE_ARGS_CONVERT
) {
41 target
= BTRFS_BLOCK_GROUP_METADATA
| bctl
->meta
.target
;
48 * @flags: available profiles in extended format (see ctree.h)
50 * Return reduced profile in chunk format. If profile changing is in progress
51 * (either running or paused) picks the target profile (if it's already
52 * available), otherwise falls back to plain reducing.
54 static u64
btrfs_reduce_alloc_profile(struct btrfs_fs_info
*fs_info
, u64 flags
)
56 u64 num_devices
= fs_info
->fs_devices
->rw_devices
;
62 * See if restripe for this chunk_type is in progress, if so try to
63 * reduce to the target profile
65 spin_lock(&fs_info
->balance_lock
);
66 target
= get_restripe_target(fs_info
, flags
);
68 /* Pick target profile only if it's already available */
69 if ((flags
& target
) & BTRFS_EXTENDED_PROFILE_MASK
) {
70 spin_unlock(&fs_info
->balance_lock
);
71 return extended_to_chunk(target
);
74 spin_unlock(&fs_info
->balance_lock
);
76 /* First, mask out the RAID levels which aren't possible */
77 for (raid_type
= 0; raid_type
< BTRFS_NR_RAID_TYPES
; raid_type
++) {
78 if (num_devices
>= btrfs_raid_array
[raid_type
].devs_min
)
79 allowed
|= btrfs_raid_array
[raid_type
].bg_flag
;
83 if (allowed
& BTRFS_BLOCK_GROUP_RAID6
)
84 allowed
= BTRFS_BLOCK_GROUP_RAID6
;
85 else if (allowed
& BTRFS_BLOCK_GROUP_RAID5
)
86 allowed
= BTRFS_BLOCK_GROUP_RAID5
;
87 else if (allowed
& BTRFS_BLOCK_GROUP_RAID10
)
88 allowed
= BTRFS_BLOCK_GROUP_RAID10
;
89 else if (allowed
& BTRFS_BLOCK_GROUP_RAID1
)
90 allowed
= BTRFS_BLOCK_GROUP_RAID1
;
91 else if (allowed
& BTRFS_BLOCK_GROUP_RAID0
)
92 allowed
= BTRFS_BLOCK_GROUP_RAID0
;
94 flags
&= ~BTRFS_BLOCK_GROUP_PROFILE_MASK
;
96 return extended_to_chunk(flags
| allowed
);
99 u64
btrfs_get_alloc_profile(struct btrfs_fs_info
*fs_info
, u64 orig_flags
)
106 seq
= read_seqbegin(&fs_info
->profiles_lock
);
108 if (flags
& BTRFS_BLOCK_GROUP_DATA
)
109 flags
|= fs_info
->avail_data_alloc_bits
;
110 else if (flags
& BTRFS_BLOCK_GROUP_SYSTEM
)
111 flags
|= fs_info
->avail_system_alloc_bits
;
112 else if (flags
& BTRFS_BLOCK_GROUP_METADATA
)
113 flags
|= fs_info
->avail_metadata_alloc_bits
;
114 } while (read_seqretry(&fs_info
->profiles_lock
, seq
));
116 return btrfs_reduce_alloc_profile(fs_info
, flags
);
119 void btrfs_get_block_group(struct btrfs_block_group
*cache
)
121 atomic_inc(&cache
->count
);
124 void btrfs_put_block_group(struct btrfs_block_group
*cache
)
126 if (atomic_dec_and_test(&cache
->count
)) {
127 WARN_ON(cache
->pinned
> 0);
128 WARN_ON(cache
->reserved
> 0);
131 * A block_group shouldn't be on the discard_list anymore.
132 * Remove the block_group from the discard_list to prevent us
133 * from causing a panic due to NULL pointer dereference.
135 if (WARN_ON(!list_empty(&cache
->discard_list
)))
136 btrfs_discard_cancel_work(&cache
->fs_info
->discard_ctl
,
140 * If not empty, someone is still holding mutex of
141 * full_stripe_lock, which can only be released by caller.
142 * And it will definitely cause use-after-free when caller
143 * tries to release full stripe lock.
145 * No better way to resolve, but only to warn.
147 WARN_ON(!RB_EMPTY_ROOT(&cache
->full_stripe_locks_root
.root
));
148 kfree(cache
->free_space_ctl
);
154 * This adds the block group to the fs_info rb tree for the block group cache
156 static int btrfs_add_block_group_cache(struct btrfs_fs_info
*info
,
157 struct btrfs_block_group
*block_group
)
160 struct rb_node
*parent
= NULL
;
161 struct btrfs_block_group
*cache
;
163 ASSERT(block_group
->length
!= 0);
165 spin_lock(&info
->block_group_cache_lock
);
166 p
= &info
->block_group_cache_tree
.rb_node
;
170 cache
= rb_entry(parent
, struct btrfs_block_group
, cache_node
);
171 if (block_group
->start
< cache
->start
) {
173 } else if (block_group
->start
> cache
->start
) {
176 spin_unlock(&info
->block_group_cache_lock
);
181 rb_link_node(&block_group
->cache_node
, parent
, p
);
182 rb_insert_color(&block_group
->cache_node
,
183 &info
->block_group_cache_tree
);
185 if (info
->first_logical_byte
> block_group
->start
)
186 info
->first_logical_byte
= block_group
->start
;
188 spin_unlock(&info
->block_group_cache_lock
);
194 * This will return the block group at or after bytenr if contains is 0, else
195 * it will return the block group that contains the bytenr
197 static struct btrfs_block_group
*block_group_cache_tree_search(
198 struct btrfs_fs_info
*info
, u64 bytenr
, int contains
)
200 struct btrfs_block_group
*cache
, *ret
= NULL
;
204 spin_lock(&info
->block_group_cache_lock
);
205 n
= info
->block_group_cache_tree
.rb_node
;
208 cache
= rb_entry(n
, struct btrfs_block_group
, cache_node
);
209 end
= cache
->start
+ cache
->length
- 1;
210 start
= cache
->start
;
212 if (bytenr
< start
) {
213 if (!contains
&& (!ret
|| start
< ret
->start
))
216 } else if (bytenr
> start
) {
217 if (contains
&& bytenr
<= end
) {
228 btrfs_get_block_group(ret
);
229 if (bytenr
== 0 && info
->first_logical_byte
> ret
->start
)
230 info
->first_logical_byte
= ret
->start
;
232 spin_unlock(&info
->block_group_cache_lock
);
238 * Return the block group that starts at or after bytenr
240 struct btrfs_block_group
*btrfs_lookup_first_block_group(
241 struct btrfs_fs_info
*info
, u64 bytenr
)
243 return block_group_cache_tree_search(info
, bytenr
, 0);
247 * Return the block group that contains the given bytenr
249 struct btrfs_block_group
*btrfs_lookup_block_group(
250 struct btrfs_fs_info
*info
, u64 bytenr
)
252 return block_group_cache_tree_search(info
, bytenr
, 1);
255 struct btrfs_block_group
*btrfs_next_block_group(
256 struct btrfs_block_group
*cache
)
258 struct btrfs_fs_info
*fs_info
= cache
->fs_info
;
259 struct rb_node
*node
;
261 spin_lock(&fs_info
->block_group_cache_lock
);
263 /* If our block group was removed, we need a full search. */
264 if (RB_EMPTY_NODE(&cache
->cache_node
)) {
265 const u64 next_bytenr
= cache
->start
+ cache
->length
;
267 spin_unlock(&fs_info
->block_group_cache_lock
);
268 btrfs_put_block_group(cache
);
269 cache
= btrfs_lookup_first_block_group(fs_info
, next_bytenr
); return cache
;
271 node
= rb_next(&cache
->cache_node
);
272 btrfs_put_block_group(cache
);
274 cache
= rb_entry(node
, struct btrfs_block_group
, cache_node
);
275 btrfs_get_block_group(cache
);
278 spin_unlock(&fs_info
->block_group_cache_lock
);
282 bool btrfs_inc_nocow_writers(struct btrfs_fs_info
*fs_info
, u64 bytenr
)
284 struct btrfs_block_group
*bg
;
287 bg
= btrfs_lookup_block_group(fs_info
, bytenr
);
291 spin_lock(&bg
->lock
);
295 atomic_inc(&bg
->nocow_writers
);
296 spin_unlock(&bg
->lock
);
298 /* No put on block group, done by btrfs_dec_nocow_writers */
300 btrfs_put_block_group(bg
);
305 void btrfs_dec_nocow_writers(struct btrfs_fs_info
*fs_info
, u64 bytenr
)
307 struct btrfs_block_group
*bg
;
309 bg
= btrfs_lookup_block_group(fs_info
, bytenr
);
311 if (atomic_dec_and_test(&bg
->nocow_writers
))
312 wake_up_var(&bg
->nocow_writers
);
314 * Once for our lookup and once for the lookup done by a previous call
315 * to btrfs_inc_nocow_writers()
317 btrfs_put_block_group(bg
);
318 btrfs_put_block_group(bg
);
321 void btrfs_wait_nocow_writers(struct btrfs_block_group
*bg
)
323 wait_var_event(&bg
->nocow_writers
, !atomic_read(&bg
->nocow_writers
));
326 void btrfs_dec_block_group_reservations(struct btrfs_fs_info
*fs_info
,
329 struct btrfs_block_group
*bg
;
331 bg
= btrfs_lookup_block_group(fs_info
, start
);
333 if (atomic_dec_and_test(&bg
->reservations
))
334 wake_up_var(&bg
->reservations
);
335 btrfs_put_block_group(bg
);
338 void btrfs_wait_block_group_reservations(struct btrfs_block_group
*bg
)
340 struct btrfs_space_info
*space_info
= bg
->space_info
;
344 if (!(bg
->flags
& BTRFS_BLOCK_GROUP_DATA
))
348 * Our block group is read only but before we set it to read only,
349 * some task might have had allocated an extent from it already, but it
350 * has not yet created a respective ordered extent (and added it to a
351 * root's list of ordered extents).
352 * Therefore wait for any task currently allocating extents, since the
353 * block group's reservations counter is incremented while a read lock
354 * on the groups' semaphore is held and decremented after releasing
355 * the read access on that semaphore and creating the ordered extent.
357 down_write(&space_info
->groups_sem
);
358 up_write(&space_info
->groups_sem
);
360 wait_var_event(&bg
->reservations
, !atomic_read(&bg
->reservations
));
363 struct btrfs_caching_control
*btrfs_get_caching_control(
364 struct btrfs_block_group
*cache
)
366 struct btrfs_caching_control
*ctl
;
368 spin_lock(&cache
->lock
);
369 if (!cache
->caching_ctl
) {
370 spin_unlock(&cache
->lock
);
374 ctl
= cache
->caching_ctl
;
375 refcount_inc(&ctl
->count
);
376 spin_unlock(&cache
->lock
);
380 void btrfs_put_caching_control(struct btrfs_caching_control
*ctl
)
382 if (refcount_dec_and_test(&ctl
->count
))
387 * When we wait for progress in the block group caching, its because our
388 * allocation attempt failed at least once. So, we must sleep and let some
389 * progress happen before we try again.
391 * This function will sleep at least once waiting for new free space to show
392 * up, and then it will check the block group free space numbers for our min
393 * num_bytes. Another option is to have it go ahead and look in the rbtree for
394 * a free extent of a given size, but this is a good start.
396 * Callers of this must check if cache->cached == BTRFS_CACHE_ERROR before using
397 * any of the information in this block group.
399 void btrfs_wait_block_group_cache_progress(struct btrfs_block_group
*cache
,
402 struct btrfs_caching_control
*caching_ctl
;
404 caching_ctl
= btrfs_get_caching_control(cache
);
408 wait_event(caching_ctl
->wait
, btrfs_block_group_done(cache
) ||
409 (cache
->free_space_ctl
->free_space
>= num_bytes
));
411 btrfs_put_caching_control(caching_ctl
);
414 int btrfs_wait_block_group_cache_done(struct btrfs_block_group
*cache
)
416 struct btrfs_caching_control
*caching_ctl
;
419 caching_ctl
= btrfs_get_caching_control(cache
);
421 return (cache
->cached
== BTRFS_CACHE_ERROR
) ? -EIO
: 0;
423 wait_event(caching_ctl
->wait
, btrfs_block_group_done(cache
));
424 if (cache
->cached
== BTRFS_CACHE_ERROR
)
426 btrfs_put_caching_control(caching_ctl
);
430 #ifdef CONFIG_BTRFS_DEBUG
431 static void fragment_free_space(struct btrfs_block_group
*block_group
)
433 struct btrfs_fs_info
*fs_info
= block_group
->fs_info
;
434 u64 start
= block_group
->start
;
435 u64 len
= block_group
->length
;
436 u64 chunk
= block_group
->flags
& BTRFS_BLOCK_GROUP_METADATA
?
437 fs_info
->nodesize
: fs_info
->sectorsize
;
438 u64 step
= chunk
<< 1;
440 while (len
> chunk
) {
441 btrfs_remove_free_space(block_group
, start
, chunk
);
452 * This is only called by btrfs_cache_block_group, since we could have freed
453 * extents we need to check the pinned_extents for any extents that can't be
454 * used yet since their free space will be released as soon as the transaction
457 u64
add_new_free_space(struct btrfs_block_group
*block_group
, u64 start
, u64 end
)
459 struct btrfs_fs_info
*info
= block_group
->fs_info
;
460 u64 extent_start
, extent_end
, size
, total_added
= 0;
463 while (start
< end
) {
464 ret
= find_first_extent_bit(&info
->excluded_extents
, start
,
465 &extent_start
, &extent_end
,
466 EXTENT_DIRTY
| EXTENT_UPTODATE
,
471 if (extent_start
<= start
) {
472 start
= extent_end
+ 1;
473 } else if (extent_start
> start
&& extent_start
< end
) {
474 size
= extent_start
- start
;
476 ret
= btrfs_add_free_space_async_trimmed(block_group
,
478 BUG_ON(ret
); /* -ENOMEM or logic error */
479 start
= extent_end
+ 1;
488 ret
= btrfs_add_free_space_async_trimmed(block_group
, start
,
490 BUG_ON(ret
); /* -ENOMEM or logic error */
496 static int load_extent_tree_free(struct btrfs_caching_control
*caching_ctl
)
498 struct btrfs_block_group
*block_group
= caching_ctl
->block_group
;
499 struct btrfs_fs_info
*fs_info
= block_group
->fs_info
;
500 struct btrfs_root
*extent_root
= fs_info
->extent_root
;
501 struct btrfs_path
*path
;
502 struct extent_buffer
*leaf
;
503 struct btrfs_key key
;
510 path
= btrfs_alloc_path();
514 last
= max_t(u64
, block_group
->start
, BTRFS_SUPER_INFO_OFFSET
);
516 #ifdef CONFIG_BTRFS_DEBUG
518 * If we're fragmenting we don't want to make anybody think we can
519 * allocate from this block group until we've had a chance to fragment
522 if (btrfs_should_fragment_free_space(block_group
))
526 * We don't want to deadlock with somebody trying to allocate a new
527 * extent for the extent root while also trying to search the extent
528 * root to add free space. So we skip locking and search the commit
529 * root, since its read-only
531 path
->skip_locking
= 1;
532 path
->search_commit_root
= 1;
533 path
->reada
= READA_FORWARD
;
537 key
.type
= BTRFS_EXTENT_ITEM_KEY
;
540 ret
= btrfs_search_slot(NULL
, extent_root
, &key
, path
, 0, 0);
544 leaf
= path
->nodes
[0];
545 nritems
= btrfs_header_nritems(leaf
);
548 if (btrfs_fs_closing(fs_info
) > 1) {
553 if (path
->slots
[0] < nritems
) {
554 btrfs_item_key_to_cpu(leaf
, &key
, path
->slots
[0]);
556 ret
= btrfs_find_next_key(extent_root
, path
, &key
, 0, 0);
560 if (need_resched() ||
561 rwsem_is_contended(&fs_info
->commit_root_sem
)) {
563 caching_ctl
->progress
= last
;
564 btrfs_release_path(path
);
565 up_read(&fs_info
->commit_root_sem
);
566 mutex_unlock(&caching_ctl
->mutex
);
568 mutex_lock(&caching_ctl
->mutex
);
569 down_read(&fs_info
->commit_root_sem
);
573 ret
= btrfs_next_leaf(extent_root
, path
);
578 leaf
= path
->nodes
[0];
579 nritems
= btrfs_header_nritems(leaf
);
583 if (key
.objectid
< last
) {
586 key
.type
= BTRFS_EXTENT_ITEM_KEY
;
589 caching_ctl
->progress
= last
;
590 btrfs_release_path(path
);
594 if (key
.objectid
< block_group
->start
) {
599 if (key
.objectid
>= block_group
->start
+ block_group
->length
)
602 if (key
.type
== BTRFS_EXTENT_ITEM_KEY
||
603 key
.type
== BTRFS_METADATA_ITEM_KEY
) {
604 total_found
+= add_new_free_space(block_group
, last
,
606 if (key
.type
== BTRFS_METADATA_ITEM_KEY
)
607 last
= key
.objectid
+
610 last
= key
.objectid
+ key
.offset
;
612 if (total_found
> CACHING_CTL_WAKE_UP
) {
615 wake_up(&caching_ctl
->wait
);
622 total_found
+= add_new_free_space(block_group
, last
,
623 block_group
->start
+ block_group
->length
);
624 caching_ctl
->progress
= (u64
)-1;
627 btrfs_free_path(path
);
631 static noinline
void caching_thread(struct btrfs_work
*work
)
633 struct btrfs_block_group
*block_group
;
634 struct btrfs_fs_info
*fs_info
;
635 struct btrfs_caching_control
*caching_ctl
;
638 caching_ctl
= container_of(work
, struct btrfs_caching_control
, work
);
639 block_group
= caching_ctl
->block_group
;
640 fs_info
= block_group
->fs_info
;
642 mutex_lock(&caching_ctl
->mutex
);
643 down_read(&fs_info
->commit_root_sem
);
645 if (btrfs_fs_compat_ro(fs_info
, FREE_SPACE_TREE
))
646 ret
= load_free_space_tree(caching_ctl
);
648 ret
= load_extent_tree_free(caching_ctl
);
650 spin_lock(&block_group
->lock
);
651 block_group
->caching_ctl
= NULL
;
652 block_group
->cached
= ret
? BTRFS_CACHE_ERROR
: BTRFS_CACHE_FINISHED
;
653 spin_unlock(&block_group
->lock
);
655 #ifdef CONFIG_BTRFS_DEBUG
656 if (btrfs_should_fragment_free_space(block_group
)) {
659 spin_lock(&block_group
->space_info
->lock
);
660 spin_lock(&block_group
->lock
);
661 bytes_used
= block_group
->length
- block_group
->used
;
662 block_group
->space_info
->bytes_used
+= bytes_used
>> 1;
663 spin_unlock(&block_group
->lock
);
664 spin_unlock(&block_group
->space_info
->lock
);
665 fragment_free_space(block_group
);
669 caching_ctl
->progress
= (u64
)-1;
671 up_read(&fs_info
->commit_root_sem
);
672 btrfs_free_excluded_extents(block_group
);
673 mutex_unlock(&caching_ctl
->mutex
);
675 wake_up(&caching_ctl
->wait
);
677 btrfs_put_caching_control(caching_ctl
);
678 btrfs_put_block_group(block_group
);
681 int btrfs_cache_block_group(struct btrfs_block_group
*cache
, int load_cache_only
)
684 struct btrfs_fs_info
*fs_info
= cache
->fs_info
;
685 struct btrfs_caching_control
*caching_ctl
;
688 caching_ctl
= kzalloc(sizeof(*caching_ctl
), GFP_NOFS
);
692 INIT_LIST_HEAD(&caching_ctl
->list
);
693 mutex_init(&caching_ctl
->mutex
);
694 init_waitqueue_head(&caching_ctl
->wait
);
695 caching_ctl
->block_group
= cache
;
696 caching_ctl
->progress
= cache
->start
;
697 refcount_set(&caching_ctl
->count
, 1);
698 btrfs_init_work(&caching_ctl
->work
, caching_thread
, NULL
, NULL
);
700 spin_lock(&cache
->lock
);
702 * This should be a rare occasion, but this could happen I think in the
703 * case where one thread starts to load the space cache info, and then
704 * some other thread starts a transaction commit which tries to do an
705 * allocation while the other thread is still loading the space cache
706 * info. The previous loop should have kept us from choosing this block
707 * group, but if we've moved to the state where we will wait on caching
708 * block groups we need to first check if we're doing a fast load here,
709 * so we can wait for it to finish, otherwise we could end up allocating
710 * from a block group who's cache gets evicted for one reason or
713 while (cache
->cached
== BTRFS_CACHE_FAST
) {
714 struct btrfs_caching_control
*ctl
;
716 ctl
= cache
->caching_ctl
;
717 refcount_inc(&ctl
->count
);
718 prepare_to_wait(&ctl
->wait
, &wait
, TASK_UNINTERRUPTIBLE
);
719 spin_unlock(&cache
->lock
);
723 finish_wait(&ctl
->wait
, &wait
);
724 btrfs_put_caching_control(ctl
);
725 spin_lock(&cache
->lock
);
728 if (cache
->cached
!= BTRFS_CACHE_NO
) {
729 spin_unlock(&cache
->lock
);
733 WARN_ON(cache
->caching_ctl
);
734 cache
->caching_ctl
= caching_ctl
;
735 cache
->cached
= BTRFS_CACHE_FAST
;
736 spin_unlock(&cache
->lock
);
738 if (btrfs_test_opt(fs_info
, SPACE_CACHE
)) {
739 mutex_lock(&caching_ctl
->mutex
);
740 ret
= load_free_space_cache(cache
);
742 spin_lock(&cache
->lock
);
744 cache
->caching_ctl
= NULL
;
745 cache
->cached
= BTRFS_CACHE_FINISHED
;
746 cache
->last_byte_to_unpin
= (u64
)-1;
747 caching_ctl
->progress
= (u64
)-1;
749 if (load_cache_only
) {
750 cache
->caching_ctl
= NULL
;
751 cache
->cached
= BTRFS_CACHE_NO
;
753 cache
->cached
= BTRFS_CACHE_STARTED
;
754 cache
->has_caching_ctl
= 1;
757 spin_unlock(&cache
->lock
);
758 #ifdef CONFIG_BTRFS_DEBUG
760 btrfs_should_fragment_free_space(cache
)) {
763 spin_lock(&cache
->space_info
->lock
);
764 spin_lock(&cache
->lock
);
765 bytes_used
= cache
->length
- cache
->used
;
766 cache
->space_info
->bytes_used
+= bytes_used
>> 1;
767 spin_unlock(&cache
->lock
);
768 spin_unlock(&cache
->space_info
->lock
);
769 fragment_free_space(cache
);
772 mutex_unlock(&caching_ctl
->mutex
);
774 wake_up(&caching_ctl
->wait
);
776 btrfs_put_caching_control(caching_ctl
);
777 btrfs_free_excluded_extents(cache
);
782 * We're either using the free space tree or no caching at all.
783 * Set cached to the appropriate value and wakeup any waiters.
785 spin_lock(&cache
->lock
);
786 if (load_cache_only
) {
787 cache
->caching_ctl
= NULL
;
788 cache
->cached
= BTRFS_CACHE_NO
;
790 cache
->cached
= BTRFS_CACHE_STARTED
;
791 cache
->has_caching_ctl
= 1;
793 spin_unlock(&cache
->lock
);
794 wake_up(&caching_ctl
->wait
);
797 if (load_cache_only
) {
798 btrfs_put_caching_control(caching_ctl
);
802 down_write(&fs_info
->commit_root_sem
);
803 refcount_inc(&caching_ctl
->count
);
804 list_add_tail(&caching_ctl
->list
, &fs_info
->caching_block_groups
);
805 up_write(&fs_info
->commit_root_sem
);
807 btrfs_get_block_group(cache
);
809 btrfs_queue_work(fs_info
->caching_workers
, &caching_ctl
->work
);
814 static void clear_avail_alloc_bits(struct btrfs_fs_info
*fs_info
, u64 flags
)
816 u64 extra_flags
= chunk_to_extended(flags
) &
817 BTRFS_EXTENDED_PROFILE_MASK
;
819 write_seqlock(&fs_info
->profiles_lock
);
820 if (flags
& BTRFS_BLOCK_GROUP_DATA
)
821 fs_info
->avail_data_alloc_bits
&= ~extra_flags
;
822 if (flags
& BTRFS_BLOCK_GROUP_METADATA
)
823 fs_info
->avail_metadata_alloc_bits
&= ~extra_flags
;
824 if (flags
& BTRFS_BLOCK_GROUP_SYSTEM
)
825 fs_info
->avail_system_alloc_bits
&= ~extra_flags
;
826 write_sequnlock(&fs_info
->profiles_lock
);
830 * Clear incompat bits for the following feature(s):
832 * - RAID56 - in case there's neither RAID5 nor RAID6 profile block group
833 * in the whole filesystem
835 * - RAID1C34 - same as above for RAID1C3 and RAID1C4 block groups
837 static void clear_incompat_bg_bits(struct btrfs_fs_info
*fs_info
, u64 flags
)
839 bool found_raid56
= false;
840 bool found_raid1c34
= false;
842 if ((flags
& BTRFS_BLOCK_GROUP_RAID56_MASK
) ||
843 (flags
& BTRFS_BLOCK_GROUP_RAID1C3
) ||
844 (flags
& BTRFS_BLOCK_GROUP_RAID1C4
)) {
845 struct list_head
*head
= &fs_info
->space_info
;
846 struct btrfs_space_info
*sinfo
;
848 list_for_each_entry_rcu(sinfo
, head
, list
) {
849 down_read(&sinfo
->groups_sem
);
850 if (!list_empty(&sinfo
->block_groups
[BTRFS_RAID_RAID5
]))
852 if (!list_empty(&sinfo
->block_groups
[BTRFS_RAID_RAID6
]))
854 if (!list_empty(&sinfo
->block_groups
[BTRFS_RAID_RAID1C3
]))
855 found_raid1c34
= true;
856 if (!list_empty(&sinfo
->block_groups
[BTRFS_RAID_RAID1C4
]))
857 found_raid1c34
= true;
858 up_read(&sinfo
->groups_sem
);
861 btrfs_clear_fs_incompat(fs_info
, RAID56
);
863 btrfs_clear_fs_incompat(fs_info
, RAID1C34
);
867 static int remove_block_group_item(struct btrfs_trans_handle
*trans
,
868 struct btrfs_path
*path
,
869 struct btrfs_block_group
*block_group
)
871 struct btrfs_fs_info
*fs_info
= trans
->fs_info
;
872 struct btrfs_root
*root
;
873 struct btrfs_key key
;
876 root
= fs_info
->extent_root
;
877 key
.objectid
= block_group
->start
;
878 key
.type
= BTRFS_BLOCK_GROUP_ITEM_KEY
;
879 key
.offset
= block_group
->length
;
881 ret
= btrfs_search_slot(trans
, root
, &key
, path
, -1, 1);
887 ret
= btrfs_del_item(trans
, root
, path
);
891 int btrfs_remove_block_group(struct btrfs_trans_handle
*trans
,
892 u64 group_start
, struct extent_map
*em
)
894 struct btrfs_fs_info
*fs_info
= trans
->fs_info
;
895 struct btrfs_path
*path
;
896 struct btrfs_block_group
*block_group
;
897 struct btrfs_free_cluster
*cluster
;
898 struct btrfs_root
*tree_root
= fs_info
->tree_root
;
899 struct btrfs_key key
;
901 struct kobject
*kobj
= NULL
;
905 struct btrfs_caching_control
*caching_ctl
= NULL
;
907 bool remove_rsv
= false;
909 block_group
= btrfs_lookup_block_group(fs_info
, group_start
);
910 BUG_ON(!block_group
);
911 BUG_ON(!block_group
->ro
);
913 trace_btrfs_remove_block_group(block_group
);
915 * Free the reserved super bytes from this block group before
918 btrfs_free_excluded_extents(block_group
);
919 btrfs_free_ref_tree_range(fs_info
, block_group
->start
,
920 block_group
->length
);
922 index
= btrfs_bg_flags_to_raid_index(block_group
->flags
);
923 factor
= btrfs_bg_type_to_factor(block_group
->flags
);
925 /* make sure this block group isn't part of an allocation cluster */
926 cluster
= &fs_info
->data_alloc_cluster
;
927 spin_lock(&cluster
->refill_lock
);
928 btrfs_return_cluster_to_free_space(block_group
, cluster
);
929 spin_unlock(&cluster
->refill_lock
);
932 * make sure this block group isn't part of a metadata
935 cluster
= &fs_info
->meta_alloc_cluster
;
936 spin_lock(&cluster
->refill_lock
);
937 btrfs_return_cluster_to_free_space(block_group
, cluster
);
938 spin_unlock(&cluster
->refill_lock
);
940 path
= btrfs_alloc_path();
947 * get the inode first so any iput calls done for the io_list
948 * aren't the final iput (no unlinks allowed now)
950 inode
= lookup_free_space_inode(block_group
, path
);
952 mutex_lock(&trans
->transaction
->cache_write_mutex
);
954 * Make sure our free space cache IO is done before removing the
957 spin_lock(&trans
->transaction
->dirty_bgs_lock
);
958 if (!list_empty(&block_group
->io_list
)) {
959 list_del_init(&block_group
->io_list
);
961 WARN_ON(!IS_ERR(inode
) && inode
!= block_group
->io_ctl
.inode
);
963 spin_unlock(&trans
->transaction
->dirty_bgs_lock
);
964 btrfs_wait_cache_io(trans
, block_group
, path
);
965 btrfs_put_block_group(block_group
);
966 spin_lock(&trans
->transaction
->dirty_bgs_lock
);
969 if (!list_empty(&block_group
->dirty_list
)) {
970 list_del_init(&block_group
->dirty_list
);
972 btrfs_put_block_group(block_group
);
974 spin_unlock(&trans
->transaction
->dirty_bgs_lock
);
975 mutex_unlock(&trans
->transaction
->cache_write_mutex
);
977 if (!IS_ERR(inode
)) {
978 ret
= btrfs_orphan_add(trans
, BTRFS_I(inode
));
980 btrfs_add_delayed_iput(inode
);
984 /* One for the block groups ref */
985 spin_lock(&block_group
->lock
);
986 if (block_group
->iref
) {
987 block_group
->iref
= 0;
988 block_group
->inode
= NULL
;
989 spin_unlock(&block_group
->lock
);
992 spin_unlock(&block_group
->lock
);
994 /* One for our lookup ref */
995 btrfs_add_delayed_iput(inode
);
998 key
.objectid
= BTRFS_FREE_SPACE_OBJECTID
;
1000 key
.offset
= block_group
->start
;
1002 ret
= btrfs_search_slot(trans
, tree_root
, &key
, path
, -1, 1);
1006 btrfs_release_path(path
);
1008 ret
= btrfs_del_item(trans
, tree_root
, path
);
1011 btrfs_release_path(path
);
1014 spin_lock(&fs_info
->block_group_cache_lock
);
1015 rb_erase(&block_group
->cache_node
,
1016 &fs_info
->block_group_cache_tree
);
1017 RB_CLEAR_NODE(&block_group
->cache_node
);
1019 /* Once for the block groups rbtree */
1020 btrfs_put_block_group(block_group
);
1022 if (fs_info
->first_logical_byte
== block_group
->start
)
1023 fs_info
->first_logical_byte
= (u64
)-1;
1024 spin_unlock(&fs_info
->block_group_cache_lock
);
1026 down_write(&block_group
->space_info
->groups_sem
);
1028 * we must use list_del_init so people can check to see if they
1029 * are still on the list after taking the semaphore
1031 list_del_init(&block_group
->list
);
1032 if (list_empty(&block_group
->space_info
->block_groups
[index
])) {
1033 kobj
= block_group
->space_info
->block_group_kobjs
[index
];
1034 block_group
->space_info
->block_group_kobjs
[index
] = NULL
;
1035 clear_avail_alloc_bits(fs_info
, block_group
->flags
);
1037 up_write(&block_group
->space_info
->groups_sem
);
1038 clear_incompat_bg_bits(fs_info
, block_group
->flags
);
1044 if (block_group
->has_caching_ctl
)
1045 caching_ctl
= btrfs_get_caching_control(block_group
);
1046 if (block_group
->cached
== BTRFS_CACHE_STARTED
)
1047 btrfs_wait_block_group_cache_done(block_group
);
1048 if (block_group
->has_caching_ctl
) {
1049 down_write(&fs_info
->commit_root_sem
);
1051 struct btrfs_caching_control
*ctl
;
1053 list_for_each_entry(ctl
,
1054 &fs_info
->caching_block_groups
, list
)
1055 if (ctl
->block_group
== block_group
) {
1057 refcount_inc(&caching_ctl
->count
);
1062 list_del_init(&caching_ctl
->list
);
1063 up_write(&fs_info
->commit_root_sem
);
1065 /* Once for the caching bgs list and once for us. */
1066 btrfs_put_caching_control(caching_ctl
);
1067 btrfs_put_caching_control(caching_ctl
);
1071 spin_lock(&trans
->transaction
->dirty_bgs_lock
);
1072 WARN_ON(!list_empty(&block_group
->dirty_list
));
1073 WARN_ON(!list_empty(&block_group
->io_list
));
1074 spin_unlock(&trans
->transaction
->dirty_bgs_lock
);
1076 btrfs_remove_free_space_cache(block_group
);
1078 spin_lock(&block_group
->space_info
->lock
);
1079 list_del_init(&block_group
->ro_list
);
1081 if (btrfs_test_opt(fs_info
, ENOSPC_DEBUG
)) {
1082 WARN_ON(block_group
->space_info
->total_bytes
1083 < block_group
->length
);
1084 WARN_ON(block_group
->space_info
->bytes_readonly
1085 < block_group
->length
);
1086 WARN_ON(block_group
->space_info
->disk_total
1087 < block_group
->length
* factor
);
1089 block_group
->space_info
->total_bytes
-= block_group
->length
;
1090 block_group
->space_info
->bytes_readonly
-= block_group
->length
;
1091 block_group
->space_info
->disk_total
-= block_group
->length
* factor
;
1093 spin_unlock(&block_group
->space_info
->lock
);
1096 * Remove the free space for the block group from the free space tree
1097 * and the block group's item from the extent tree before marking the
1098 * block group as removed. This is to prevent races with tasks that
1099 * freeze and unfreeze a block group, this task and another task
1100 * allocating a new block group - the unfreeze task ends up removing
1101 * the block group's extent map before the task calling this function
1102 * deletes the block group item from the extent tree, allowing for
1103 * another task to attempt to create another block group with the same
1104 * item key (and failing with -EEXIST and a transaction abort).
1106 ret
= remove_block_group_free_space(trans
, block_group
);
1110 ret
= remove_block_group_item(trans
, path
, block_group
);
1114 mutex_lock(&fs_info
->chunk_mutex
);
1115 spin_lock(&block_group
->lock
);
1116 block_group
->removed
= 1;
1118 * At this point trimming or scrub can't start on this block group,
1119 * because we removed the block group from the rbtree
1120 * fs_info->block_group_cache_tree so no one can't find it anymore and
1121 * even if someone already got this block group before we removed it
1122 * from the rbtree, they have already incremented block_group->frozen -
1123 * if they didn't, for the trimming case they won't find any free space
1124 * entries because we already removed them all when we called
1125 * btrfs_remove_free_space_cache().
1127 * And we must not remove the extent map from the fs_info->mapping_tree
1128 * to prevent the same logical address range and physical device space
1129 * ranges from being reused for a new block group. This is needed to
1130 * avoid races with trimming and scrub.
1132 * An fs trim operation (btrfs_trim_fs() / btrfs_ioctl_fitrim()) is
1133 * completely transactionless, so while it is trimming a range the
1134 * currently running transaction might finish and a new one start,
1135 * allowing for new block groups to be created that can reuse the same
1136 * physical device locations unless we take this special care.
1138 * There may also be an implicit trim operation if the file system
1139 * is mounted with -odiscard. The same protections must remain
1140 * in place until the extents have been discarded completely when
1141 * the transaction commit has completed.
1143 remove_em
= (atomic_read(&block_group
->frozen
) == 0);
1144 spin_unlock(&block_group
->lock
);
1146 mutex_unlock(&fs_info
->chunk_mutex
);
1149 struct extent_map_tree
*em_tree
;
1151 em_tree
= &fs_info
->mapping_tree
;
1152 write_lock(&em_tree
->lock
);
1153 remove_extent_mapping(em_tree
, em
);
1154 write_unlock(&em_tree
->lock
);
1155 /* once for the tree */
1156 free_extent_map(em
);
1160 /* Once for the lookup reference */
1161 btrfs_put_block_group(block_group
);
1163 btrfs_delayed_refs_rsv_release(fs_info
, 1);
1164 btrfs_free_path(path
);
1168 struct btrfs_trans_handle
*btrfs_start_trans_remove_block_group(
1169 struct btrfs_fs_info
*fs_info
, const u64 chunk_offset
)
1171 struct extent_map_tree
*em_tree
= &fs_info
->mapping_tree
;
1172 struct extent_map
*em
;
1173 struct map_lookup
*map
;
1174 unsigned int num_items
;
1176 read_lock(&em_tree
->lock
);
1177 em
= lookup_extent_mapping(em_tree
, chunk_offset
, 1);
1178 read_unlock(&em_tree
->lock
);
1179 ASSERT(em
&& em
->start
== chunk_offset
);
1182 * We need to reserve 3 + N units from the metadata space info in order
1183 * to remove a block group (done at btrfs_remove_chunk() and at
1184 * btrfs_remove_block_group()), which are used for:
1186 * 1 unit for adding the free space inode's orphan (located in the tree
1188 * 1 unit for deleting the block group item (located in the extent
1190 * 1 unit for deleting the free space item (located in tree of tree
1192 * N units for deleting N device extent items corresponding to each
1193 * stripe (located in the device tree).
1195 * In order to remove a block group we also need to reserve units in the
1196 * system space info in order to update the chunk tree (update one or
1197 * more device items and remove one chunk item), but this is done at
1198 * btrfs_remove_chunk() through a call to check_system_chunk().
1200 map
= em
->map_lookup
;
1201 num_items
= 3 + map
->num_stripes
;
1202 free_extent_map(em
);
1204 return btrfs_start_transaction_fallback_global_rsv(fs_info
->extent_root
,
1209 * Mark block group @cache read-only, so later write won't happen to block
1212 * If @force is not set, this function will only mark the block group readonly
1213 * if we have enough free space (1M) in other metadata/system block groups.
1214 * If @force is not set, this function will mark the block group readonly
1215 * without checking free space.
1217 * NOTE: This function doesn't care if other block groups can contain all the
1218 * data in this block group. That check should be done by relocation routine,
1219 * not this function.
1221 static int inc_block_group_ro(struct btrfs_block_group
*cache
, int force
)
1223 struct btrfs_space_info
*sinfo
= cache
->space_info
;
1227 spin_lock(&sinfo
->lock
);
1228 spin_lock(&cache
->lock
);
1236 num_bytes
= cache
->length
- cache
->reserved
- cache
->pinned
-
1237 cache
->bytes_super
- cache
->used
;
1240 * Data never overcommits, even in mixed mode, so do just the straight
1241 * check of left over space in how much we have allocated.
1245 } else if (sinfo
->flags
& BTRFS_BLOCK_GROUP_DATA
) {
1246 u64 sinfo_used
= btrfs_space_info_used(sinfo
, true);
1249 * Here we make sure if we mark this bg RO, we still have enough
1250 * free space as buffer.
1252 if (sinfo_used
+ num_bytes
<= sinfo
->total_bytes
)
1256 * We overcommit metadata, so we need to do the
1257 * btrfs_can_overcommit check here, and we need to pass in
1258 * BTRFS_RESERVE_NO_FLUSH to give ourselves the most amount of
1259 * leeway to allow us to mark this block group as read only.
1261 if (btrfs_can_overcommit(cache
->fs_info
, sinfo
, num_bytes
,
1262 BTRFS_RESERVE_NO_FLUSH
))
1267 sinfo
->bytes_readonly
+= num_bytes
;
1269 list_add_tail(&cache
->ro_list
, &sinfo
->ro_bgs
);
1272 spin_unlock(&cache
->lock
);
1273 spin_unlock(&sinfo
->lock
);
1274 if (ret
== -ENOSPC
&& btrfs_test_opt(cache
->fs_info
, ENOSPC_DEBUG
)) {
1275 btrfs_info(cache
->fs_info
,
1276 "unable to make block group %llu ro", cache
->start
);
1277 btrfs_dump_space_info(cache
->fs_info
, cache
->space_info
, 0, 0);
1282 static bool clean_pinned_extents(struct btrfs_trans_handle
*trans
,
1283 struct btrfs_block_group
*bg
)
1285 struct btrfs_fs_info
*fs_info
= bg
->fs_info
;
1286 struct btrfs_transaction
*prev_trans
= NULL
;
1287 const u64 start
= bg
->start
;
1288 const u64 end
= start
+ bg
->length
- 1;
1291 spin_lock(&fs_info
->trans_lock
);
1292 if (trans
->transaction
->list
.prev
!= &fs_info
->trans_list
) {
1293 prev_trans
= list_last_entry(&trans
->transaction
->list
,
1294 struct btrfs_transaction
, list
);
1295 refcount_inc(&prev_trans
->use_count
);
1297 spin_unlock(&fs_info
->trans_lock
);
1300 * Hold the unused_bg_unpin_mutex lock to avoid racing with
1301 * btrfs_finish_extent_commit(). If we are at transaction N, another
1302 * task might be running finish_extent_commit() for the previous
1303 * transaction N - 1, and have seen a range belonging to the block
1304 * group in pinned_extents before we were able to clear the whole block
1305 * group range from pinned_extents. This means that task can lookup for
1306 * the block group after we unpinned it from pinned_extents and removed
1307 * it, leading to a BUG_ON() at unpin_extent_range().
1309 mutex_lock(&fs_info
->unused_bg_unpin_mutex
);
1311 ret
= clear_extent_bits(&prev_trans
->pinned_extents
, start
, end
,
1317 ret
= clear_extent_bits(&trans
->transaction
->pinned_extents
, start
, end
,
1320 mutex_unlock(&fs_info
->unused_bg_unpin_mutex
);
1322 btrfs_put_transaction(prev_trans
);
1328 * Process the unused_bgs list and remove any that don't have any allocated
1329 * space inside of them.
1331 void btrfs_delete_unused_bgs(struct btrfs_fs_info
*fs_info
)
1333 struct btrfs_block_group
*block_group
;
1334 struct btrfs_space_info
*space_info
;
1335 struct btrfs_trans_handle
*trans
;
1336 const bool async_trim_enabled
= btrfs_test_opt(fs_info
, DISCARD_ASYNC
);
1339 if (!test_bit(BTRFS_FS_OPEN
, &fs_info
->flags
))
1342 spin_lock(&fs_info
->unused_bgs_lock
);
1343 while (!list_empty(&fs_info
->unused_bgs
)) {
1346 block_group
= list_first_entry(&fs_info
->unused_bgs
,
1347 struct btrfs_block_group
,
1349 list_del_init(&block_group
->bg_list
);
1351 space_info
= block_group
->space_info
;
1353 if (ret
|| btrfs_mixed_space_info(space_info
)) {
1354 btrfs_put_block_group(block_group
);
1357 spin_unlock(&fs_info
->unused_bgs_lock
);
1359 btrfs_discard_cancel_work(&fs_info
->discard_ctl
, block_group
);
1361 mutex_lock(&fs_info
->delete_unused_bgs_mutex
);
1363 /* Don't want to race with allocators so take the groups_sem */
1364 down_write(&space_info
->groups_sem
);
1367 * Async discard moves the final block group discard to be prior
1368 * to the unused_bgs code path. Therefore, if it's not fully
1369 * trimmed, punt it back to the async discard lists.
1371 if (btrfs_test_opt(fs_info
, DISCARD_ASYNC
) &&
1372 !btrfs_is_free_space_trimmed(block_group
)) {
1373 trace_btrfs_skip_unused_block_group(block_group
);
1374 up_write(&space_info
->groups_sem
);
1375 /* Requeue if we failed because of async discard */
1376 btrfs_discard_queue_work(&fs_info
->discard_ctl
,
1381 spin_lock(&block_group
->lock
);
1382 if (block_group
->reserved
|| block_group
->pinned
||
1383 block_group
->used
|| block_group
->ro
||
1384 list_is_singular(&block_group
->list
)) {
1386 * We want to bail if we made new allocations or have
1387 * outstanding allocations in this block group. We do
1388 * the ro check in case balance is currently acting on
1391 trace_btrfs_skip_unused_block_group(block_group
);
1392 spin_unlock(&block_group
->lock
);
1393 up_write(&space_info
->groups_sem
);
1396 spin_unlock(&block_group
->lock
);
1398 /* We don't want to force the issue, only flip if it's ok. */
1399 ret
= inc_block_group_ro(block_group
, 0);
1400 up_write(&space_info
->groups_sem
);
1407 * Want to do this before we do anything else so we can recover
1408 * properly if we fail to join the transaction.
1410 trans
= btrfs_start_trans_remove_block_group(fs_info
,
1411 block_group
->start
);
1412 if (IS_ERR(trans
)) {
1413 btrfs_dec_block_group_ro(block_group
);
1414 ret
= PTR_ERR(trans
);
1419 * We could have pending pinned extents for this block group,
1420 * just delete them, we don't care about them anymore.
1422 if (!clean_pinned_extents(trans
, block_group
)) {
1423 btrfs_dec_block_group_ro(block_group
);
1428 * At this point, the block_group is read only and should fail
1429 * new allocations. However, btrfs_finish_extent_commit() can
1430 * cause this block_group to be placed back on the discard
1431 * lists because now the block_group isn't fully discarded.
1432 * Bail here and try again later after discarding everything.
1434 spin_lock(&fs_info
->discard_ctl
.lock
);
1435 if (!list_empty(&block_group
->discard_list
)) {
1436 spin_unlock(&fs_info
->discard_ctl
.lock
);
1437 btrfs_dec_block_group_ro(block_group
);
1438 btrfs_discard_queue_work(&fs_info
->discard_ctl
,
1442 spin_unlock(&fs_info
->discard_ctl
.lock
);
1444 /* Reset pinned so btrfs_put_block_group doesn't complain */
1445 spin_lock(&space_info
->lock
);
1446 spin_lock(&block_group
->lock
);
1448 btrfs_space_info_update_bytes_pinned(fs_info
, space_info
,
1449 -block_group
->pinned
);
1450 space_info
->bytes_readonly
+= block_group
->pinned
;
1451 percpu_counter_add_batch(&space_info
->total_bytes_pinned
,
1452 -block_group
->pinned
,
1453 BTRFS_TOTAL_BYTES_PINNED_BATCH
);
1454 block_group
->pinned
= 0;
1456 spin_unlock(&block_group
->lock
);
1457 spin_unlock(&space_info
->lock
);
1460 * The normal path here is an unused block group is passed here,
1461 * then trimming is handled in the transaction commit path.
1462 * Async discard interposes before this to do the trimming
1463 * before coming down the unused block group path as trimming
1464 * will no longer be done later in the transaction commit path.
1466 if (!async_trim_enabled
&& btrfs_test_opt(fs_info
, DISCARD_ASYNC
))
1469 /* DISCARD can flip during remount */
1470 trimming
= btrfs_test_opt(fs_info
, DISCARD_SYNC
);
1472 /* Implicit trim during transaction commit. */
1474 btrfs_freeze_block_group(block_group
);
1477 * Btrfs_remove_chunk will abort the transaction if things go
1480 ret
= btrfs_remove_chunk(trans
, block_group
->start
);
1484 btrfs_unfreeze_block_group(block_group
);
1489 * If we're not mounted with -odiscard, we can just forget
1490 * about this block group. Otherwise we'll need to wait
1491 * until transaction commit to do the actual discard.
1494 spin_lock(&fs_info
->unused_bgs_lock
);
1496 * A concurrent scrub might have added us to the list
1497 * fs_info->unused_bgs, so use a list_move operation
1498 * to add the block group to the deleted_bgs list.
1500 list_move(&block_group
->bg_list
,
1501 &trans
->transaction
->deleted_bgs
);
1502 spin_unlock(&fs_info
->unused_bgs_lock
);
1503 btrfs_get_block_group(block_group
);
1506 btrfs_end_transaction(trans
);
1508 mutex_unlock(&fs_info
->delete_unused_bgs_mutex
);
1509 btrfs_put_block_group(block_group
);
1510 spin_lock(&fs_info
->unused_bgs_lock
);
1512 spin_unlock(&fs_info
->unused_bgs_lock
);
1516 btrfs_end_transaction(trans
);
1517 mutex_unlock(&fs_info
->delete_unused_bgs_mutex
);
1518 btrfs_put_block_group(block_group
);
1519 btrfs_discard_punt_unused_bgs_list(fs_info
);
1522 void btrfs_mark_bg_unused(struct btrfs_block_group
*bg
)
1524 struct btrfs_fs_info
*fs_info
= bg
->fs_info
;
1526 spin_lock(&fs_info
->unused_bgs_lock
);
1527 if (list_empty(&bg
->bg_list
)) {
1528 btrfs_get_block_group(bg
);
1529 trace_btrfs_add_unused_block_group(bg
);
1530 list_add_tail(&bg
->bg_list
, &fs_info
->unused_bgs
);
1532 spin_unlock(&fs_info
->unused_bgs_lock
);
1535 static int find_first_block_group(struct btrfs_fs_info
*fs_info
,
1536 struct btrfs_path
*path
,
1537 struct btrfs_key
*key
)
1539 struct btrfs_root
*root
= fs_info
->extent_root
;
1541 struct btrfs_key found_key
;
1542 struct extent_buffer
*leaf
;
1543 struct btrfs_block_group_item bg
;
1547 ret
= btrfs_search_slot(NULL
, root
, key
, path
, 0, 0);
1552 slot
= path
->slots
[0];
1553 leaf
= path
->nodes
[0];
1554 if (slot
>= btrfs_header_nritems(leaf
)) {
1555 ret
= btrfs_next_leaf(root
, path
);
1562 btrfs_item_key_to_cpu(leaf
, &found_key
, slot
);
1564 if (found_key
.objectid
>= key
->objectid
&&
1565 found_key
.type
== BTRFS_BLOCK_GROUP_ITEM_KEY
) {
1566 struct extent_map_tree
*em_tree
;
1567 struct extent_map
*em
;
1569 em_tree
= &root
->fs_info
->mapping_tree
;
1570 read_lock(&em_tree
->lock
);
1571 em
= lookup_extent_mapping(em_tree
, found_key
.objectid
,
1573 read_unlock(&em_tree
->lock
);
1576 "logical %llu len %llu found bg but no related chunk",
1577 found_key
.objectid
, found_key
.offset
);
1579 } else if (em
->start
!= found_key
.objectid
||
1580 em
->len
!= found_key
.offset
) {
1582 "block group %llu len %llu mismatch with chunk %llu len %llu",
1583 found_key
.objectid
, found_key
.offset
,
1584 em
->start
, em
->len
);
1587 read_extent_buffer(leaf
, &bg
,
1588 btrfs_item_ptr_offset(leaf
, slot
),
1590 flags
= btrfs_stack_block_group_flags(&bg
) &
1591 BTRFS_BLOCK_GROUP_TYPE_MASK
;
1593 if (flags
!= (em
->map_lookup
->type
&
1594 BTRFS_BLOCK_GROUP_TYPE_MASK
)) {
1596 "block group %llu len %llu type flags 0x%llx mismatch with chunk type flags 0x%llx",
1598 found_key
.offset
, flags
,
1599 (BTRFS_BLOCK_GROUP_TYPE_MASK
&
1600 em
->map_lookup
->type
));
1606 free_extent_map(em
);
1615 static void set_avail_alloc_bits(struct btrfs_fs_info
*fs_info
, u64 flags
)
1617 u64 extra_flags
= chunk_to_extended(flags
) &
1618 BTRFS_EXTENDED_PROFILE_MASK
;
1620 write_seqlock(&fs_info
->profiles_lock
);
1621 if (flags
& BTRFS_BLOCK_GROUP_DATA
)
1622 fs_info
->avail_data_alloc_bits
|= extra_flags
;
1623 if (flags
& BTRFS_BLOCK_GROUP_METADATA
)
1624 fs_info
->avail_metadata_alloc_bits
|= extra_flags
;
1625 if (flags
& BTRFS_BLOCK_GROUP_SYSTEM
)
1626 fs_info
->avail_system_alloc_bits
|= extra_flags
;
1627 write_sequnlock(&fs_info
->profiles_lock
);
1631 * btrfs_rmap_block - Map a physical disk address to a list of logical addresses
1632 * @chunk_start: logical address of block group
1633 * @physical: physical address to map to logical addresses
1634 * @logical: return array of logical addresses which map to @physical
1635 * @naddrs: length of @logical
1636 * @stripe_len: size of IO stripe for the given block group
1638 * Maps a particular @physical disk address to a list of @logical addresses.
1639 * Used primarily to exclude those portions of a block group that contain super
1643 int btrfs_rmap_block(struct btrfs_fs_info
*fs_info
, u64 chunk_start
,
1644 u64 physical
, u64
**logical
, int *naddrs
, int *stripe_len
)
1646 struct extent_map
*em
;
1647 struct map_lookup
*map
;
1650 u64 data_stripe_length
;
1655 em
= btrfs_get_chunk_map(fs_info
, chunk_start
, 1);
1659 map
= em
->map_lookup
;
1660 data_stripe_length
= em
->len
;
1661 io_stripe_size
= map
->stripe_len
;
1663 if (map
->type
& BTRFS_BLOCK_GROUP_RAID10
)
1664 data_stripe_length
= div_u64(data_stripe_length
,
1665 map
->num_stripes
/ map
->sub_stripes
);
1666 else if (map
->type
& BTRFS_BLOCK_GROUP_RAID0
)
1667 data_stripe_length
= div_u64(data_stripe_length
, map
->num_stripes
);
1668 else if (map
->type
& BTRFS_BLOCK_GROUP_RAID56_MASK
) {
1669 data_stripe_length
= div_u64(data_stripe_length
,
1670 nr_data_stripes(map
));
1671 io_stripe_size
= map
->stripe_len
* nr_data_stripes(map
);
1674 buf
= kcalloc(map
->num_stripes
, sizeof(u64
), GFP_NOFS
);
1680 for (i
= 0; i
< map
->num_stripes
; i
++) {
1681 bool already_inserted
= false;
1685 if (!in_range(physical
, map
->stripes
[i
].physical
,
1686 data_stripe_length
))
1689 stripe_nr
= physical
- map
->stripes
[i
].physical
;
1690 stripe_nr
= div64_u64(stripe_nr
, map
->stripe_len
);
1692 if (map
->type
& BTRFS_BLOCK_GROUP_RAID10
) {
1693 stripe_nr
= stripe_nr
* map
->num_stripes
+ i
;
1694 stripe_nr
= div_u64(stripe_nr
, map
->sub_stripes
);
1695 } else if (map
->type
& BTRFS_BLOCK_GROUP_RAID0
) {
1696 stripe_nr
= stripe_nr
* map
->num_stripes
+ i
;
1699 * The remaining case would be for RAID56, multiply by
1700 * nr_data_stripes(). Alternatively, just use rmap_len below
1701 * instead of map->stripe_len
1704 bytenr
= chunk_start
+ stripe_nr
* io_stripe_size
;
1706 /* Ensure we don't add duplicate addresses */
1707 for (j
= 0; j
< nr
; j
++) {
1708 if (buf
[j
] == bytenr
) {
1709 already_inserted
= true;
1714 if (!already_inserted
)
1720 *stripe_len
= io_stripe_size
;
1722 free_extent_map(em
);
1726 static int exclude_super_stripes(struct btrfs_block_group
*cache
)
1728 struct btrfs_fs_info
*fs_info
= cache
->fs_info
;
1734 if (cache
->start
< BTRFS_SUPER_INFO_OFFSET
) {
1735 stripe_len
= BTRFS_SUPER_INFO_OFFSET
- cache
->start
;
1736 cache
->bytes_super
+= stripe_len
;
1737 ret
= btrfs_add_excluded_extent(fs_info
, cache
->start
,
1743 for (i
= 0; i
< BTRFS_SUPER_MIRROR_MAX
; i
++) {
1744 bytenr
= btrfs_sb_offset(i
);
1745 ret
= btrfs_rmap_block(fs_info
, cache
->start
,
1746 bytenr
, &logical
, &nr
, &stripe_len
);
1753 if (logical
[nr
] > cache
->start
+ cache
->length
)
1756 if (logical
[nr
] + stripe_len
<= cache
->start
)
1759 start
= logical
[nr
];
1760 if (start
< cache
->start
) {
1761 start
= cache
->start
;
1762 len
= (logical
[nr
] + stripe_len
) - start
;
1764 len
= min_t(u64
, stripe_len
,
1765 cache
->start
+ cache
->length
- start
);
1768 cache
->bytes_super
+= len
;
1769 ret
= btrfs_add_excluded_extent(fs_info
, start
, len
);
1781 static void link_block_group(struct btrfs_block_group
*cache
)
1783 struct btrfs_space_info
*space_info
= cache
->space_info
;
1784 int index
= btrfs_bg_flags_to_raid_index(cache
->flags
);
1787 down_write(&space_info
->groups_sem
);
1788 if (list_empty(&space_info
->block_groups
[index
]))
1790 list_add_tail(&cache
->list
, &space_info
->block_groups
[index
]);
1791 up_write(&space_info
->groups_sem
);
1794 btrfs_sysfs_add_block_group_type(cache
);
1797 static struct btrfs_block_group
*btrfs_create_block_group_cache(
1798 struct btrfs_fs_info
*fs_info
, u64 start
)
1800 struct btrfs_block_group
*cache
;
1802 cache
= kzalloc(sizeof(*cache
), GFP_NOFS
);
1806 cache
->free_space_ctl
= kzalloc(sizeof(*cache
->free_space_ctl
),
1808 if (!cache
->free_space_ctl
) {
1813 cache
->start
= start
;
1815 cache
->fs_info
= fs_info
;
1816 cache
->full_stripe_len
= btrfs_full_stripe_len(fs_info
, start
);
1817 set_free_space_tree_thresholds(cache
);
1819 cache
->discard_index
= BTRFS_DISCARD_INDEX_UNUSED
;
1821 atomic_set(&cache
->count
, 1);
1822 spin_lock_init(&cache
->lock
);
1823 init_rwsem(&cache
->data_rwsem
);
1824 INIT_LIST_HEAD(&cache
->list
);
1825 INIT_LIST_HEAD(&cache
->cluster_list
);
1826 INIT_LIST_HEAD(&cache
->bg_list
);
1827 INIT_LIST_HEAD(&cache
->ro_list
);
1828 INIT_LIST_HEAD(&cache
->discard_list
);
1829 INIT_LIST_HEAD(&cache
->dirty_list
);
1830 INIT_LIST_HEAD(&cache
->io_list
);
1831 btrfs_init_free_space_ctl(cache
);
1832 atomic_set(&cache
->frozen
, 0);
1833 mutex_init(&cache
->free_space_lock
);
1834 btrfs_init_full_stripe_locks_tree(&cache
->full_stripe_locks_root
);
1840 * Iterate all chunks and verify that each of them has the corresponding block
1843 static int check_chunk_block_group_mappings(struct btrfs_fs_info
*fs_info
)
1845 struct extent_map_tree
*map_tree
= &fs_info
->mapping_tree
;
1846 struct extent_map
*em
;
1847 struct btrfs_block_group
*bg
;
1852 read_lock(&map_tree
->lock
);
1854 * lookup_extent_mapping will return the first extent map
1855 * intersecting the range, so setting @len to 1 is enough to
1856 * get the first chunk.
1858 em
= lookup_extent_mapping(map_tree
, start
, 1);
1859 read_unlock(&map_tree
->lock
);
1863 bg
= btrfs_lookup_block_group(fs_info
, em
->start
);
1866 "chunk start=%llu len=%llu doesn't have corresponding block group",
1867 em
->start
, em
->len
);
1869 free_extent_map(em
);
1872 if (bg
->start
!= em
->start
|| bg
->length
!= em
->len
||
1873 (bg
->flags
& BTRFS_BLOCK_GROUP_TYPE_MASK
) !=
1874 (em
->map_lookup
->type
& BTRFS_BLOCK_GROUP_TYPE_MASK
)) {
1876 "chunk start=%llu len=%llu flags=0x%llx doesn't match block group start=%llu len=%llu flags=0x%llx",
1878 em
->map_lookup
->type
& BTRFS_BLOCK_GROUP_TYPE_MASK
,
1879 bg
->start
, bg
->length
,
1880 bg
->flags
& BTRFS_BLOCK_GROUP_TYPE_MASK
);
1882 free_extent_map(em
);
1883 btrfs_put_block_group(bg
);
1886 start
= em
->start
+ em
->len
;
1887 free_extent_map(em
);
1888 btrfs_put_block_group(bg
);
1893 static int read_block_group_item(struct btrfs_block_group
*cache
,
1894 struct btrfs_path
*path
,
1895 const struct btrfs_key
*key
)
1897 struct extent_buffer
*leaf
= path
->nodes
[0];
1898 struct btrfs_block_group_item bgi
;
1899 int slot
= path
->slots
[0];
1901 cache
->length
= key
->offset
;
1903 read_extent_buffer(leaf
, &bgi
, btrfs_item_ptr_offset(leaf
, slot
),
1905 cache
->used
= btrfs_stack_block_group_used(&bgi
);
1906 cache
->flags
= btrfs_stack_block_group_flags(&bgi
);
1911 static int read_one_block_group(struct btrfs_fs_info
*info
,
1912 struct btrfs_path
*path
,
1913 const struct btrfs_key
*key
,
1916 struct btrfs_block_group
*cache
;
1917 struct btrfs_space_info
*space_info
;
1918 const bool mixed
= btrfs_fs_incompat(info
, MIXED_GROUPS
);
1921 ASSERT(key
->type
== BTRFS_BLOCK_GROUP_ITEM_KEY
);
1923 cache
= btrfs_create_block_group_cache(info
, key
->objectid
);
1927 ret
= read_block_group_item(cache
, path
, key
);
1933 * When we mount with old space cache, we need to
1934 * set BTRFS_DC_CLEAR and set dirty flag.
1936 * a) Setting 'BTRFS_DC_CLEAR' makes sure that we
1937 * truncate the old free space cache inode and
1939 * b) Setting 'dirty flag' makes sure that we flush
1940 * the new space cache info onto disk.
1942 if (btrfs_test_opt(info
, SPACE_CACHE
))
1943 cache
->disk_cache_state
= BTRFS_DC_CLEAR
;
1945 if (!mixed
&& ((cache
->flags
& BTRFS_BLOCK_GROUP_METADATA
) &&
1946 (cache
->flags
& BTRFS_BLOCK_GROUP_DATA
))) {
1948 "bg %llu is a mixed block group but filesystem hasn't enabled mixed block groups",
1955 * We need to exclude the super stripes now so that the space info has
1956 * super bytes accounted for, otherwise we'll think we have more space
1957 * than we actually do.
1959 ret
= exclude_super_stripes(cache
);
1961 /* We may have excluded something, so call this just in case. */
1962 btrfs_free_excluded_extents(cache
);
1967 * Check for two cases, either we are full, and therefore don't need
1968 * to bother with the caching work since we won't find any space, or we
1969 * are empty, and we can just add all the space in and be done with it.
1970 * This saves us _a_lot_ of time, particularly in the full case.
1972 if (cache
->length
== cache
->used
) {
1973 cache
->last_byte_to_unpin
= (u64
)-1;
1974 cache
->cached
= BTRFS_CACHE_FINISHED
;
1975 btrfs_free_excluded_extents(cache
);
1976 } else if (cache
->used
== 0) {
1977 cache
->last_byte_to_unpin
= (u64
)-1;
1978 cache
->cached
= BTRFS_CACHE_FINISHED
;
1979 add_new_free_space(cache
, cache
->start
,
1980 cache
->start
+ cache
->length
);
1981 btrfs_free_excluded_extents(cache
);
1984 ret
= btrfs_add_block_group_cache(info
, cache
);
1986 btrfs_remove_free_space_cache(cache
);
1989 trace_btrfs_add_block_group(info
, cache
, 0);
1990 btrfs_update_space_info(info
, cache
->flags
, cache
->length
,
1991 cache
->used
, cache
->bytes_super
, &space_info
);
1993 cache
->space_info
= space_info
;
1995 link_block_group(cache
);
1997 set_avail_alloc_bits(info
, cache
->flags
);
1998 if (btrfs_chunk_readonly(info
, cache
->start
)) {
1999 inc_block_group_ro(cache
, 1);
2000 } else if (cache
->used
== 0) {
2001 ASSERT(list_empty(&cache
->bg_list
));
2002 if (btrfs_test_opt(info
, DISCARD_ASYNC
))
2003 btrfs_discard_queue_work(&info
->discard_ctl
, cache
);
2005 btrfs_mark_bg_unused(cache
);
2009 btrfs_put_block_group(cache
);
2013 int btrfs_read_block_groups(struct btrfs_fs_info
*info
)
2015 struct btrfs_path
*path
;
2017 struct btrfs_block_group
*cache
;
2018 struct btrfs_space_info
*space_info
;
2019 struct btrfs_key key
;
2025 key
.type
= BTRFS_BLOCK_GROUP_ITEM_KEY
;
2026 path
= btrfs_alloc_path();
2030 cache_gen
= btrfs_super_cache_generation(info
->super_copy
);
2031 if (btrfs_test_opt(info
, SPACE_CACHE
) &&
2032 btrfs_super_generation(info
->super_copy
) != cache_gen
)
2034 if (btrfs_test_opt(info
, CLEAR_CACHE
))
2038 ret
= find_first_block_group(info
, path
, &key
);
2044 btrfs_item_key_to_cpu(path
->nodes
[0], &key
, path
->slots
[0]);
2045 ret
= read_one_block_group(info
, path
, &key
, need_clear
);
2048 key
.objectid
+= key
.offset
;
2050 btrfs_release_path(path
);
2054 list_for_each_entry_rcu(space_info
, &info
->space_info
, list
) {
2055 if (!(btrfs_get_alloc_profile(info
, space_info
->flags
) &
2056 (BTRFS_BLOCK_GROUP_RAID10
|
2057 BTRFS_BLOCK_GROUP_RAID1_MASK
|
2058 BTRFS_BLOCK_GROUP_RAID56_MASK
|
2059 BTRFS_BLOCK_GROUP_DUP
)))
2062 * Avoid allocating from un-mirrored block group if there are
2063 * mirrored block groups.
2065 list_for_each_entry(cache
,
2066 &space_info
->block_groups
[BTRFS_RAID_RAID0
],
2068 inc_block_group_ro(cache
, 1);
2069 list_for_each_entry(cache
,
2070 &space_info
->block_groups
[BTRFS_RAID_SINGLE
],
2072 inc_block_group_ro(cache
, 1);
2076 btrfs_init_global_block_rsv(info
);
2077 ret
= check_chunk_block_group_mappings(info
);
2079 btrfs_free_path(path
);
2083 static int insert_block_group_item(struct btrfs_trans_handle
*trans
,
2084 struct btrfs_block_group
*block_group
)
2086 struct btrfs_fs_info
*fs_info
= trans
->fs_info
;
2087 struct btrfs_block_group_item bgi
;
2088 struct btrfs_root
*root
;
2089 struct btrfs_key key
;
2091 spin_lock(&block_group
->lock
);
2092 btrfs_set_stack_block_group_used(&bgi
, block_group
->used
);
2093 btrfs_set_stack_block_group_chunk_objectid(&bgi
,
2094 BTRFS_FIRST_CHUNK_TREE_OBJECTID
);
2095 btrfs_set_stack_block_group_flags(&bgi
, block_group
->flags
);
2096 key
.objectid
= block_group
->start
;
2097 key
.type
= BTRFS_BLOCK_GROUP_ITEM_KEY
;
2098 key
.offset
= block_group
->length
;
2099 spin_unlock(&block_group
->lock
);
2101 root
= fs_info
->extent_root
;
2102 return btrfs_insert_item(trans
, root
, &key
, &bgi
, sizeof(bgi
));
2105 void btrfs_create_pending_block_groups(struct btrfs_trans_handle
*trans
)
2107 struct btrfs_fs_info
*fs_info
= trans
->fs_info
;
2108 struct btrfs_block_group
*block_group
;
2111 if (!trans
->can_flush_pending_bgs
)
2114 while (!list_empty(&trans
->new_bgs
)) {
2115 block_group
= list_first_entry(&trans
->new_bgs
,
2116 struct btrfs_block_group
,
2121 ret
= insert_block_group_item(trans
, block_group
);
2123 btrfs_abort_transaction(trans
, ret
);
2124 ret
= btrfs_finish_chunk_alloc(trans
, block_group
->start
,
2125 block_group
->length
);
2127 btrfs_abort_transaction(trans
, ret
);
2128 add_block_group_free_space(trans
, block_group
);
2129 /* Already aborted the transaction if it failed. */
2131 btrfs_delayed_refs_rsv_release(fs_info
, 1);
2132 list_del_init(&block_group
->bg_list
);
2134 btrfs_trans_release_chunk_metadata(trans
);
2137 int btrfs_make_block_group(struct btrfs_trans_handle
*trans
, u64 bytes_used
,
2138 u64 type
, u64 chunk_offset
, u64 size
)
2140 struct btrfs_fs_info
*fs_info
= trans
->fs_info
;
2141 struct btrfs_block_group
*cache
;
2144 btrfs_set_log_full_commit(trans
);
2146 cache
= btrfs_create_block_group_cache(fs_info
, chunk_offset
);
2150 cache
->length
= size
;
2151 cache
->used
= bytes_used
;
2152 cache
->flags
= type
;
2153 cache
->last_byte_to_unpin
= (u64
)-1;
2154 cache
->cached
= BTRFS_CACHE_FINISHED
;
2155 cache
->needs_free_space
= 1;
2156 ret
= exclude_super_stripes(cache
);
2158 /* We may have excluded something, so call this just in case */
2159 btrfs_free_excluded_extents(cache
);
2160 btrfs_put_block_group(cache
);
2164 add_new_free_space(cache
, chunk_offset
, chunk_offset
+ size
);
2166 btrfs_free_excluded_extents(cache
);
2168 #ifdef CONFIG_BTRFS_DEBUG
2169 if (btrfs_should_fragment_free_space(cache
)) {
2170 u64 new_bytes_used
= size
- bytes_used
;
2172 bytes_used
+= new_bytes_used
>> 1;
2173 fragment_free_space(cache
);
2177 * Ensure the corresponding space_info object is created and
2178 * assigned to our block group. We want our bg to be added to the rbtree
2179 * with its ->space_info set.
2181 cache
->space_info
= btrfs_find_space_info(fs_info
, cache
->flags
);
2182 ASSERT(cache
->space_info
);
2184 ret
= btrfs_add_block_group_cache(fs_info
, cache
);
2186 btrfs_remove_free_space_cache(cache
);
2187 btrfs_put_block_group(cache
);
2192 * Now that our block group has its ->space_info set and is inserted in
2193 * the rbtree, update the space info's counters.
2195 trace_btrfs_add_block_group(fs_info
, cache
, 1);
2196 btrfs_update_space_info(fs_info
, cache
->flags
, size
, bytes_used
,
2197 cache
->bytes_super
, &cache
->space_info
);
2198 btrfs_update_global_block_rsv(fs_info
);
2200 link_block_group(cache
);
2202 list_add_tail(&cache
->bg_list
, &trans
->new_bgs
);
2203 trans
->delayed_ref_updates
++;
2204 btrfs_update_delayed_refs_rsv(trans
);
2206 set_avail_alloc_bits(fs_info
, type
);
2210 static u64
update_block_group_flags(struct btrfs_fs_info
*fs_info
, u64 flags
)
2216 * if restripe for this chunk_type is on pick target profile and
2217 * return, otherwise do the usual balance
2219 stripped
= get_restripe_target(fs_info
, flags
);
2221 return extended_to_chunk(stripped
);
2223 num_devices
= fs_info
->fs_devices
->rw_devices
;
2225 stripped
= BTRFS_BLOCK_GROUP_RAID0
| BTRFS_BLOCK_GROUP_RAID56_MASK
|
2226 BTRFS_BLOCK_GROUP_RAID1_MASK
| BTRFS_BLOCK_GROUP_RAID10
;
2228 if (num_devices
== 1) {
2229 stripped
|= BTRFS_BLOCK_GROUP_DUP
;
2230 stripped
= flags
& ~stripped
;
2232 /* turn raid0 into single device chunks */
2233 if (flags
& BTRFS_BLOCK_GROUP_RAID0
)
2236 /* turn mirroring into duplication */
2237 if (flags
& (BTRFS_BLOCK_GROUP_RAID1_MASK
|
2238 BTRFS_BLOCK_GROUP_RAID10
))
2239 return stripped
| BTRFS_BLOCK_GROUP_DUP
;
2241 /* they already had raid on here, just return */
2242 if (flags
& stripped
)
2245 stripped
|= BTRFS_BLOCK_GROUP_DUP
;
2246 stripped
= flags
& ~stripped
;
2248 /* switch duplicated blocks with raid1 */
2249 if (flags
& BTRFS_BLOCK_GROUP_DUP
)
2250 return stripped
| BTRFS_BLOCK_GROUP_RAID1
;
2252 /* this is drive concat, leave it alone */
2259 * Mark one block group RO, can be called several times for the same block
2262 * @cache: the destination block group
2263 * @do_chunk_alloc: whether need to do chunk pre-allocation, this is to
2264 * ensure we still have some free space after marking this
2267 int btrfs_inc_block_group_ro(struct btrfs_block_group
*cache
,
2268 bool do_chunk_alloc
)
2270 struct btrfs_fs_info
*fs_info
= cache
->fs_info
;
2271 struct btrfs_trans_handle
*trans
;
2276 trans
= btrfs_join_transaction(fs_info
->extent_root
);
2278 return PTR_ERR(trans
);
2281 * we're not allowed to set block groups readonly after the dirty
2282 * block groups cache has started writing. If it already started,
2283 * back off and let this transaction commit
2285 mutex_lock(&fs_info
->ro_block_group_mutex
);
2286 if (test_bit(BTRFS_TRANS_DIRTY_BG_RUN
, &trans
->transaction
->flags
)) {
2287 u64 transid
= trans
->transid
;
2289 mutex_unlock(&fs_info
->ro_block_group_mutex
);
2290 btrfs_end_transaction(trans
);
2292 ret
= btrfs_wait_for_commit(fs_info
, transid
);
2298 if (do_chunk_alloc
) {
2300 * If we are changing raid levels, try to allocate a
2301 * corresponding block group with the new raid level.
2303 alloc_flags
= update_block_group_flags(fs_info
, cache
->flags
);
2304 if (alloc_flags
!= cache
->flags
) {
2305 ret
= btrfs_chunk_alloc(trans
, alloc_flags
,
2308 * ENOSPC is allowed here, we may have enough space
2309 * already allocated at the new raid level to carry on
2318 ret
= inc_block_group_ro(cache
, 0);
2319 if (!do_chunk_alloc
)
2323 alloc_flags
= btrfs_get_alloc_profile(fs_info
, cache
->space_info
->flags
);
2324 ret
= btrfs_chunk_alloc(trans
, alloc_flags
, CHUNK_ALLOC_FORCE
);
2327 ret
= inc_block_group_ro(cache
, 0);
2329 if (cache
->flags
& BTRFS_BLOCK_GROUP_SYSTEM
) {
2330 alloc_flags
= update_block_group_flags(fs_info
, cache
->flags
);
2331 mutex_lock(&fs_info
->chunk_mutex
);
2332 check_system_chunk(trans
, alloc_flags
);
2333 mutex_unlock(&fs_info
->chunk_mutex
);
2336 mutex_unlock(&fs_info
->ro_block_group_mutex
);
2338 btrfs_end_transaction(trans
);
2342 void btrfs_dec_block_group_ro(struct btrfs_block_group
*cache
)
2344 struct btrfs_space_info
*sinfo
= cache
->space_info
;
2349 spin_lock(&sinfo
->lock
);
2350 spin_lock(&cache
->lock
);
2352 num_bytes
= cache
->length
- cache
->reserved
-
2353 cache
->pinned
- cache
->bytes_super
- cache
->used
;
2354 sinfo
->bytes_readonly
-= num_bytes
;
2355 list_del_init(&cache
->ro_list
);
2357 spin_unlock(&cache
->lock
);
2358 spin_unlock(&sinfo
->lock
);
2361 static int update_block_group_item(struct btrfs_trans_handle
*trans
,
2362 struct btrfs_path
*path
,
2363 struct btrfs_block_group
*cache
)
2365 struct btrfs_fs_info
*fs_info
= trans
->fs_info
;
2367 struct btrfs_root
*root
= fs_info
->extent_root
;
2369 struct extent_buffer
*leaf
;
2370 struct btrfs_block_group_item bgi
;
2371 struct btrfs_key key
;
2373 key
.objectid
= cache
->start
;
2374 key
.type
= BTRFS_BLOCK_GROUP_ITEM_KEY
;
2375 key
.offset
= cache
->length
;
2377 ret
= btrfs_search_slot(trans
, root
, &key
, path
, 0, 1);
2384 leaf
= path
->nodes
[0];
2385 bi
= btrfs_item_ptr_offset(leaf
, path
->slots
[0]);
2386 btrfs_set_stack_block_group_used(&bgi
, cache
->used
);
2387 btrfs_set_stack_block_group_chunk_objectid(&bgi
,
2388 BTRFS_FIRST_CHUNK_TREE_OBJECTID
);
2389 btrfs_set_stack_block_group_flags(&bgi
, cache
->flags
);
2390 write_extent_buffer(leaf
, &bgi
, bi
, sizeof(bgi
));
2391 btrfs_mark_buffer_dirty(leaf
);
2393 btrfs_release_path(path
);
2398 static int cache_save_setup(struct btrfs_block_group
*block_group
,
2399 struct btrfs_trans_handle
*trans
,
2400 struct btrfs_path
*path
)
2402 struct btrfs_fs_info
*fs_info
= block_group
->fs_info
;
2403 struct btrfs_root
*root
= fs_info
->tree_root
;
2404 struct inode
*inode
= NULL
;
2405 struct extent_changeset
*data_reserved
= NULL
;
2407 int dcs
= BTRFS_DC_ERROR
;
2413 * If this block group is smaller than 100 megs don't bother caching the
2416 if (block_group
->length
< (100 * SZ_1M
)) {
2417 spin_lock(&block_group
->lock
);
2418 block_group
->disk_cache_state
= BTRFS_DC_WRITTEN
;
2419 spin_unlock(&block_group
->lock
);
2423 if (TRANS_ABORTED(trans
))
2426 inode
= lookup_free_space_inode(block_group
, path
);
2427 if (IS_ERR(inode
) && PTR_ERR(inode
) != -ENOENT
) {
2428 ret
= PTR_ERR(inode
);
2429 btrfs_release_path(path
);
2433 if (IS_ERR(inode
)) {
2437 if (block_group
->ro
)
2440 ret
= create_free_space_inode(trans
, block_group
, path
);
2447 * We want to set the generation to 0, that way if anything goes wrong
2448 * from here on out we know not to trust this cache when we load up next
2451 BTRFS_I(inode
)->generation
= 0;
2452 ret
= btrfs_update_inode(trans
, root
, inode
);
2455 * So theoretically we could recover from this, simply set the
2456 * super cache generation to 0 so we know to invalidate the
2457 * cache, but then we'd have to keep track of the block groups
2458 * that fail this way so we know we _have_ to reset this cache
2459 * before the next commit or risk reading stale cache. So to
2460 * limit our exposure to horrible edge cases lets just abort the
2461 * transaction, this only happens in really bad situations
2464 btrfs_abort_transaction(trans
, ret
);
2469 /* We've already setup this transaction, go ahead and exit */
2470 if (block_group
->cache_generation
== trans
->transid
&&
2471 i_size_read(inode
)) {
2472 dcs
= BTRFS_DC_SETUP
;
2476 if (i_size_read(inode
) > 0) {
2477 ret
= btrfs_check_trunc_cache_free_space(fs_info
,
2478 &fs_info
->global_block_rsv
);
2482 ret
= btrfs_truncate_free_space_cache(trans
, NULL
, inode
);
2487 spin_lock(&block_group
->lock
);
2488 if (block_group
->cached
!= BTRFS_CACHE_FINISHED
||
2489 !btrfs_test_opt(fs_info
, SPACE_CACHE
)) {
2491 * don't bother trying to write stuff out _if_
2492 * a) we're not cached,
2493 * b) we're with nospace_cache mount option,
2494 * c) we're with v2 space_cache (FREE_SPACE_TREE).
2496 dcs
= BTRFS_DC_WRITTEN
;
2497 spin_unlock(&block_group
->lock
);
2500 spin_unlock(&block_group
->lock
);
2503 * We hit an ENOSPC when setting up the cache in this transaction, just
2504 * skip doing the setup, we've already cleared the cache so we're safe.
2506 if (test_bit(BTRFS_TRANS_CACHE_ENOSPC
, &trans
->transaction
->flags
)) {
2512 * Try to preallocate enough space based on how big the block group is.
2513 * Keep in mind this has to include any pinned space which could end up
2514 * taking up quite a bit since it's not folded into the other space
2517 num_pages
= div_u64(block_group
->length
, SZ_256M
);
2522 num_pages
*= PAGE_SIZE
;
2524 ret
= btrfs_check_data_free_space(inode
, &data_reserved
, 0, num_pages
);
2528 ret
= btrfs_prealloc_file_range_trans(inode
, trans
, 0, 0, num_pages
,
2529 num_pages
, num_pages
,
2532 * Our cache requires contiguous chunks so that we don't modify a bunch
2533 * of metadata or split extents when writing the cache out, which means
2534 * we can enospc if we are heavily fragmented in addition to just normal
2535 * out of space conditions. So if we hit this just skip setting up any
2536 * other block groups for this transaction, maybe we'll unpin enough
2537 * space the next time around.
2540 dcs
= BTRFS_DC_SETUP
;
2541 else if (ret
== -ENOSPC
)
2542 set_bit(BTRFS_TRANS_CACHE_ENOSPC
, &trans
->transaction
->flags
);
2547 btrfs_release_path(path
);
2549 spin_lock(&block_group
->lock
);
2550 if (!ret
&& dcs
== BTRFS_DC_SETUP
)
2551 block_group
->cache_generation
= trans
->transid
;
2552 block_group
->disk_cache_state
= dcs
;
2553 spin_unlock(&block_group
->lock
);
2555 extent_changeset_free(data_reserved
);
2559 int btrfs_setup_space_cache(struct btrfs_trans_handle
*trans
)
2561 struct btrfs_fs_info
*fs_info
= trans
->fs_info
;
2562 struct btrfs_block_group
*cache
, *tmp
;
2563 struct btrfs_transaction
*cur_trans
= trans
->transaction
;
2564 struct btrfs_path
*path
;
2566 if (list_empty(&cur_trans
->dirty_bgs
) ||
2567 !btrfs_test_opt(fs_info
, SPACE_CACHE
))
2570 path
= btrfs_alloc_path();
2574 /* Could add new block groups, use _safe just in case */
2575 list_for_each_entry_safe(cache
, tmp
, &cur_trans
->dirty_bgs
,
2577 if (cache
->disk_cache_state
== BTRFS_DC_CLEAR
)
2578 cache_save_setup(cache
, trans
, path
);
2581 btrfs_free_path(path
);
2586 * Transaction commit does final block group cache writeback during a critical
2587 * section where nothing is allowed to change the FS. This is required in
2588 * order for the cache to actually match the block group, but can introduce a
2589 * lot of latency into the commit.
2591 * So, btrfs_start_dirty_block_groups is here to kick off block group cache IO.
2592 * There's a chance we'll have to redo some of it if the block group changes
2593 * again during the commit, but it greatly reduces the commit latency by
2594 * getting rid of the easy block groups while we're still allowing others to
2597 int btrfs_start_dirty_block_groups(struct btrfs_trans_handle
*trans
)
2599 struct btrfs_fs_info
*fs_info
= trans
->fs_info
;
2600 struct btrfs_block_group
*cache
;
2601 struct btrfs_transaction
*cur_trans
= trans
->transaction
;
2604 struct btrfs_path
*path
= NULL
;
2606 struct list_head
*io
= &cur_trans
->io_bgs
;
2607 int num_started
= 0;
2610 spin_lock(&cur_trans
->dirty_bgs_lock
);
2611 if (list_empty(&cur_trans
->dirty_bgs
)) {
2612 spin_unlock(&cur_trans
->dirty_bgs_lock
);
2615 list_splice_init(&cur_trans
->dirty_bgs
, &dirty
);
2616 spin_unlock(&cur_trans
->dirty_bgs_lock
);
2619 /* Make sure all the block groups on our dirty list actually exist */
2620 btrfs_create_pending_block_groups(trans
);
2623 path
= btrfs_alloc_path();
2629 * cache_write_mutex is here only to save us from balance or automatic
2630 * removal of empty block groups deleting this block group while we are
2631 * writing out the cache
2633 mutex_lock(&trans
->transaction
->cache_write_mutex
);
2634 while (!list_empty(&dirty
)) {
2635 bool drop_reserve
= true;
2637 cache
= list_first_entry(&dirty
, struct btrfs_block_group
,
2640 * This can happen if something re-dirties a block group that
2641 * is already under IO. Just wait for it to finish and then do
2644 if (!list_empty(&cache
->io_list
)) {
2645 list_del_init(&cache
->io_list
);
2646 btrfs_wait_cache_io(trans
, cache
, path
);
2647 btrfs_put_block_group(cache
);
2652 * btrfs_wait_cache_io uses the cache->dirty_list to decide if
2653 * it should update the cache_state. Don't delete until after
2656 * Since we're not running in the commit critical section
2657 * we need the dirty_bgs_lock to protect from update_block_group
2659 spin_lock(&cur_trans
->dirty_bgs_lock
);
2660 list_del_init(&cache
->dirty_list
);
2661 spin_unlock(&cur_trans
->dirty_bgs_lock
);
2665 cache_save_setup(cache
, trans
, path
);
2667 if (cache
->disk_cache_state
== BTRFS_DC_SETUP
) {
2668 cache
->io_ctl
.inode
= NULL
;
2669 ret
= btrfs_write_out_cache(trans
, cache
, path
);
2670 if (ret
== 0 && cache
->io_ctl
.inode
) {
2675 * The cache_write_mutex is protecting the
2676 * io_list, also refer to the definition of
2677 * btrfs_transaction::io_bgs for more details
2679 list_add_tail(&cache
->io_list
, io
);
2682 * If we failed to write the cache, the
2683 * generation will be bad and life goes on
2689 ret
= update_block_group_item(trans
, path
, cache
);
2691 * Our block group might still be attached to the list
2692 * of new block groups in the transaction handle of some
2693 * other task (struct btrfs_trans_handle->new_bgs). This
2694 * means its block group item isn't yet in the extent
2695 * tree. If this happens ignore the error, as we will
2696 * try again later in the critical section of the
2697 * transaction commit.
2699 if (ret
== -ENOENT
) {
2701 spin_lock(&cur_trans
->dirty_bgs_lock
);
2702 if (list_empty(&cache
->dirty_list
)) {
2703 list_add_tail(&cache
->dirty_list
,
2704 &cur_trans
->dirty_bgs
);
2705 btrfs_get_block_group(cache
);
2706 drop_reserve
= false;
2708 spin_unlock(&cur_trans
->dirty_bgs_lock
);
2710 btrfs_abort_transaction(trans
, ret
);
2714 /* If it's not on the io list, we need to put the block group */
2716 btrfs_put_block_group(cache
);
2718 btrfs_delayed_refs_rsv_release(fs_info
, 1);
2724 * Avoid blocking other tasks for too long. It might even save
2725 * us from writing caches for block groups that are going to be
2728 mutex_unlock(&trans
->transaction
->cache_write_mutex
);
2729 mutex_lock(&trans
->transaction
->cache_write_mutex
);
2731 mutex_unlock(&trans
->transaction
->cache_write_mutex
);
2734 * Go through delayed refs for all the stuff we've just kicked off
2735 * and then loop back (just once)
2737 ret
= btrfs_run_delayed_refs(trans
, 0);
2738 if (!ret
&& loops
== 0) {
2740 spin_lock(&cur_trans
->dirty_bgs_lock
);
2741 list_splice_init(&cur_trans
->dirty_bgs
, &dirty
);
2743 * dirty_bgs_lock protects us from concurrent block group
2744 * deletes too (not just cache_write_mutex).
2746 if (!list_empty(&dirty
)) {
2747 spin_unlock(&cur_trans
->dirty_bgs_lock
);
2750 spin_unlock(&cur_trans
->dirty_bgs_lock
);
2751 } else if (ret
< 0) {
2752 btrfs_cleanup_dirty_bgs(cur_trans
, fs_info
);
2755 btrfs_free_path(path
);
2759 int btrfs_write_dirty_block_groups(struct btrfs_trans_handle
*trans
)
2761 struct btrfs_fs_info
*fs_info
= trans
->fs_info
;
2762 struct btrfs_block_group
*cache
;
2763 struct btrfs_transaction
*cur_trans
= trans
->transaction
;
2766 struct btrfs_path
*path
;
2767 struct list_head
*io
= &cur_trans
->io_bgs
;
2768 int num_started
= 0;
2770 path
= btrfs_alloc_path();
2775 * Even though we are in the critical section of the transaction commit,
2776 * we can still have concurrent tasks adding elements to this
2777 * transaction's list of dirty block groups. These tasks correspond to
2778 * endio free space workers started when writeback finishes for a
2779 * space cache, which run inode.c:btrfs_finish_ordered_io(), and can
2780 * allocate new block groups as a result of COWing nodes of the root
2781 * tree when updating the free space inode. The writeback for the space
2782 * caches is triggered by an earlier call to
2783 * btrfs_start_dirty_block_groups() and iterations of the following
2785 * Also we want to do the cache_save_setup first and then run the
2786 * delayed refs to make sure we have the best chance at doing this all
2789 spin_lock(&cur_trans
->dirty_bgs_lock
);
2790 while (!list_empty(&cur_trans
->dirty_bgs
)) {
2791 cache
= list_first_entry(&cur_trans
->dirty_bgs
,
2792 struct btrfs_block_group
,
2796 * This can happen if cache_save_setup re-dirties a block group
2797 * that is already under IO. Just wait for it to finish and
2798 * then do it all again
2800 if (!list_empty(&cache
->io_list
)) {
2801 spin_unlock(&cur_trans
->dirty_bgs_lock
);
2802 list_del_init(&cache
->io_list
);
2803 btrfs_wait_cache_io(trans
, cache
, path
);
2804 btrfs_put_block_group(cache
);
2805 spin_lock(&cur_trans
->dirty_bgs_lock
);
2809 * Don't remove from the dirty list until after we've waited on
2812 list_del_init(&cache
->dirty_list
);
2813 spin_unlock(&cur_trans
->dirty_bgs_lock
);
2816 cache_save_setup(cache
, trans
, path
);
2819 ret
= btrfs_run_delayed_refs(trans
,
2820 (unsigned long) -1);
2822 if (!ret
&& cache
->disk_cache_state
== BTRFS_DC_SETUP
) {
2823 cache
->io_ctl
.inode
= NULL
;
2824 ret
= btrfs_write_out_cache(trans
, cache
, path
);
2825 if (ret
== 0 && cache
->io_ctl
.inode
) {
2828 list_add_tail(&cache
->io_list
, io
);
2831 * If we failed to write the cache, the
2832 * generation will be bad and life goes on
2838 ret
= update_block_group_item(trans
, path
, cache
);
2840 * One of the free space endio workers might have
2841 * created a new block group while updating a free space
2842 * cache's inode (at inode.c:btrfs_finish_ordered_io())
2843 * and hasn't released its transaction handle yet, in
2844 * which case the new block group is still attached to
2845 * its transaction handle and its creation has not
2846 * finished yet (no block group item in the extent tree
2847 * yet, etc). If this is the case, wait for all free
2848 * space endio workers to finish and retry. This is a
2849 * a very rare case so no need for a more efficient and
2852 if (ret
== -ENOENT
) {
2853 wait_event(cur_trans
->writer_wait
,
2854 atomic_read(&cur_trans
->num_writers
) == 1);
2855 ret
= update_block_group_item(trans
, path
, cache
);
2858 btrfs_abort_transaction(trans
, ret
);
2861 /* If its not on the io list, we need to put the block group */
2863 btrfs_put_block_group(cache
);
2864 btrfs_delayed_refs_rsv_release(fs_info
, 1);
2865 spin_lock(&cur_trans
->dirty_bgs_lock
);
2867 spin_unlock(&cur_trans
->dirty_bgs_lock
);
2870 * Refer to the definition of io_bgs member for details why it's safe
2871 * to use it without any locking
2873 while (!list_empty(io
)) {
2874 cache
= list_first_entry(io
, struct btrfs_block_group
,
2876 list_del_init(&cache
->io_list
);
2877 btrfs_wait_cache_io(trans
, cache
, path
);
2878 btrfs_put_block_group(cache
);
2881 btrfs_free_path(path
);
2885 int btrfs_update_block_group(struct btrfs_trans_handle
*trans
,
2886 u64 bytenr
, u64 num_bytes
, int alloc
)
2888 struct btrfs_fs_info
*info
= trans
->fs_info
;
2889 struct btrfs_block_group
*cache
= NULL
;
2890 u64 total
= num_bytes
;
2896 /* Block accounting for super block */
2897 spin_lock(&info
->delalloc_root_lock
);
2898 old_val
= btrfs_super_bytes_used(info
->super_copy
);
2900 old_val
+= num_bytes
;
2902 old_val
-= num_bytes
;
2903 btrfs_set_super_bytes_used(info
->super_copy
, old_val
);
2904 spin_unlock(&info
->delalloc_root_lock
);
2907 cache
= btrfs_lookup_block_group(info
, bytenr
);
2912 factor
= btrfs_bg_type_to_factor(cache
->flags
);
2915 * If this block group has free space cache written out, we
2916 * need to make sure to load it if we are removing space. This
2917 * is because we need the unpinning stage to actually add the
2918 * space back to the block group, otherwise we will leak space.
2920 if (!alloc
&& !btrfs_block_group_done(cache
))
2921 btrfs_cache_block_group(cache
, 1);
2923 byte_in_group
= bytenr
- cache
->start
;
2924 WARN_ON(byte_in_group
> cache
->length
);
2926 spin_lock(&cache
->space_info
->lock
);
2927 spin_lock(&cache
->lock
);
2929 if (btrfs_test_opt(info
, SPACE_CACHE
) &&
2930 cache
->disk_cache_state
< BTRFS_DC_CLEAR
)
2931 cache
->disk_cache_state
= BTRFS_DC_CLEAR
;
2933 old_val
= cache
->used
;
2934 num_bytes
= min(total
, cache
->length
- byte_in_group
);
2936 old_val
+= num_bytes
;
2937 cache
->used
= old_val
;
2938 cache
->reserved
-= num_bytes
;
2939 cache
->space_info
->bytes_reserved
-= num_bytes
;
2940 cache
->space_info
->bytes_used
+= num_bytes
;
2941 cache
->space_info
->disk_used
+= num_bytes
* factor
;
2942 spin_unlock(&cache
->lock
);
2943 spin_unlock(&cache
->space_info
->lock
);
2945 old_val
-= num_bytes
;
2946 cache
->used
= old_val
;
2947 cache
->pinned
+= num_bytes
;
2948 btrfs_space_info_update_bytes_pinned(info
,
2949 cache
->space_info
, num_bytes
);
2950 cache
->space_info
->bytes_used
-= num_bytes
;
2951 cache
->space_info
->disk_used
-= num_bytes
* factor
;
2952 spin_unlock(&cache
->lock
);
2953 spin_unlock(&cache
->space_info
->lock
);
2955 percpu_counter_add_batch(
2956 &cache
->space_info
->total_bytes_pinned
,
2958 BTRFS_TOTAL_BYTES_PINNED_BATCH
);
2959 set_extent_dirty(&trans
->transaction
->pinned_extents
,
2960 bytenr
, bytenr
+ num_bytes
- 1,
2961 GFP_NOFS
| __GFP_NOFAIL
);
2964 spin_lock(&trans
->transaction
->dirty_bgs_lock
);
2965 if (list_empty(&cache
->dirty_list
)) {
2966 list_add_tail(&cache
->dirty_list
,
2967 &trans
->transaction
->dirty_bgs
);
2968 trans
->delayed_ref_updates
++;
2969 btrfs_get_block_group(cache
);
2971 spin_unlock(&trans
->transaction
->dirty_bgs_lock
);
2974 * No longer have used bytes in this block group, queue it for
2975 * deletion. We do this after adding the block group to the
2976 * dirty list to avoid races between cleaner kthread and space
2979 if (!alloc
&& old_val
== 0) {
2980 if (!btrfs_test_opt(info
, DISCARD_ASYNC
))
2981 btrfs_mark_bg_unused(cache
);
2984 btrfs_put_block_group(cache
);
2986 bytenr
+= num_bytes
;
2989 /* Modified block groups are accounted for in the delayed_refs_rsv. */
2990 btrfs_update_delayed_refs_rsv(trans
);
2995 * btrfs_add_reserved_bytes - update the block_group and space info counters
2996 * @cache: The cache we are manipulating
2997 * @ram_bytes: The number of bytes of file content, and will be same to
2998 * @num_bytes except for the compress path.
2999 * @num_bytes: The number of bytes in question
3000 * @delalloc: The blocks are allocated for the delalloc write
3002 * This is called by the allocator when it reserves space. If this is a
3003 * reservation and the block group has become read only we cannot make the
3004 * reservation and return -EAGAIN, otherwise this function always succeeds.
3006 int btrfs_add_reserved_bytes(struct btrfs_block_group
*cache
,
3007 u64 ram_bytes
, u64 num_bytes
, int delalloc
)
3009 struct btrfs_space_info
*space_info
= cache
->space_info
;
3012 spin_lock(&space_info
->lock
);
3013 spin_lock(&cache
->lock
);
3017 cache
->reserved
+= num_bytes
;
3018 space_info
->bytes_reserved
+= num_bytes
;
3019 trace_btrfs_space_reservation(cache
->fs_info
, "space_info",
3020 space_info
->flags
, num_bytes
, 1);
3021 btrfs_space_info_update_bytes_may_use(cache
->fs_info
,
3022 space_info
, -ram_bytes
);
3024 cache
->delalloc_bytes
+= num_bytes
;
3026 spin_unlock(&cache
->lock
);
3027 spin_unlock(&space_info
->lock
);
3032 * btrfs_free_reserved_bytes - update the block_group and space info counters
3033 * @cache: The cache we are manipulating
3034 * @num_bytes: The number of bytes in question
3035 * @delalloc: The blocks are allocated for the delalloc write
3037 * This is called by somebody who is freeing space that was never actually used
3038 * on disk. For example if you reserve some space for a new leaf in transaction
3039 * A and before transaction A commits you free that leaf, you call this with
3040 * reserve set to 0 in order to clear the reservation.
3042 void btrfs_free_reserved_bytes(struct btrfs_block_group
*cache
,
3043 u64 num_bytes
, int delalloc
)
3045 struct btrfs_space_info
*space_info
= cache
->space_info
;
3047 spin_lock(&space_info
->lock
);
3048 spin_lock(&cache
->lock
);
3050 space_info
->bytes_readonly
+= num_bytes
;
3051 cache
->reserved
-= num_bytes
;
3052 space_info
->bytes_reserved
-= num_bytes
;
3053 space_info
->max_extent_size
= 0;
3056 cache
->delalloc_bytes
-= num_bytes
;
3057 spin_unlock(&cache
->lock
);
3058 spin_unlock(&space_info
->lock
);
3061 static void force_metadata_allocation(struct btrfs_fs_info
*info
)
3063 struct list_head
*head
= &info
->space_info
;
3064 struct btrfs_space_info
*found
;
3067 list_for_each_entry_rcu(found
, head
, list
) {
3068 if (found
->flags
& BTRFS_BLOCK_GROUP_METADATA
)
3069 found
->force_alloc
= CHUNK_ALLOC_FORCE
;
3074 static int should_alloc_chunk(struct btrfs_fs_info
*fs_info
,
3075 struct btrfs_space_info
*sinfo
, int force
)
3077 u64 bytes_used
= btrfs_space_info_used(sinfo
, false);
3080 if (force
== CHUNK_ALLOC_FORCE
)
3084 * in limited mode, we want to have some free space up to
3085 * about 1% of the FS size.
3087 if (force
== CHUNK_ALLOC_LIMITED
) {
3088 thresh
= btrfs_super_total_bytes(fs_info
->super_copy
);
3089 thresh
= max_t(u64
, SZ_64M
, div_factor_fine(thresh
, 1));
3091 if (sinfo
->total_bytes
- bytes_used
< thresh
)
3095 if (bytes_used
+ SZ_2M
< div_factor(sinfo
->total_bytes
, 8))
3100 int btrfs_force_chunk_alloc(struct btrfs_trans_handle
*trans
, u64 type
)
3102 u64 alloc_flags
= btrfs_get_alloc_profile(trans
->fs_info
, type
);
3104 return btrfs_chunk_alloc(trans
, alloc_flags
, CHUNK_ALLOC_FORCE
);
3108 * If force is CHUNK_ALLOC_FORCE:
3109 * - return 1 if it successfully allocates a chunk,
3110 * - return errors including -ENOSPC otherwise.
3111 * If force is NOT CHUNK_ALLOC_FORCE:
3112 * - return 0 if it doesn't need to allocate a new chunk,
3113 * - return 1 if it successfully allocates a chunk,
3114 * - return errors including -ENOSPC otherwise.
3116 int btrfs_chunk_alloc(struct btrfs_trans_handle
*trans
, u64 flags
,
3117 enum btrfs_chunk_alloc_enum force
)
3119 struct btrfs_fs_info
*fs_info
= trans
->fs_info
;
3120 struct btrfs_space_info
*space_info
;
3121 bool wait_for_alloc
= false;
3122 bool should_alloc
= false;
3125 /* Don't re-enter if we're already allocating a chunk */
3126 if (trans
->allocating_chunk
)
3129 space_info
= btrfs_find_space_info(fs_info
, flags
);
3133 spin_lock(&space_info
->lock
);
3134 if (force
< space_info
->force_alloc
)
3135 force
= space_info
->force_alloc
;
3136 should_alloc
= should_alloc_chunk(fs_info
, space_info
, force
);
3137 if (space_info
->full
) {
3138 /* No more free physical space */
3143 spin_unlock(&space_info
->lock
);
3145 } else if (!should_alloc
) {
3146 spin_unlock(&space_info
->lock
);
3148 } else if (space_info
->chunk_alloc
) {
3150 * Someone is already allocating, so we need to block
3151 * until this someone is finished and then loop to
3152 * recheck if we should continue with our allocation
3155 wait_for_alloc
= true;
3156 spin_unlock(&space_info
->lock
);
3157 mutex_lock(&fs_info
->chunk_mutex
);
3158 mutex_unlock(&fs_info
->chunk_mutex
);
3160 /* Proceed with allocation */
3161 space_info
->chunk_alloc
= 1;
3162 wait_for_alloc
= false;
3163 spin_unlock(&space_info
->lock
);
3167 } while (wait_for_alloc
);
3169 mutex_lock(&fs_info
->chunk_mutex
);
3170 trans
->allocating_chunk
= true;
3173 * If we have mixed data/metadata chunks we want to make sure we keep
3174 * allocating mixed chunks instead of individual chunks.
3176 if (btrfs_mixed_space_info(space_info
))
3177 flags
|= (BTRFS_BLOCK_GROUP_DATA
| BTRFS_BLOCK_GROUP_METADATA
);
3180 * if we're doing a data chunk, go ahead and make sure that
3181 * we keep a reasonable number of metadata chunks allocated in the
3184 if (flags
& BTRFS_BLOCK_GROUP_DATA
&& fs_info
->metadata_ratio
) {
3185 fs_info
->data_chunk_allocations
++;
3186 if (!(fs_info
->data_chunk_allocations
%
3187 fs_info
->metadata_ratio
))
3188 force_metadata_allocation(fs_info
);
3192 * Check if we have enough space in SYSTEM chunk because we may need
3193 * to update devices.
3195 check_system_chunk(trans
, flags
);
3197 ret
= btrfs_alloc_chunk(trans
, flags
);
3198 trans
->allocating_chunk
= false;
3200 spin_lock(&space_info
->lock
);
3203 space_info
->full
= 1;
3208 space_info
->max_extent_size
= 0;
3211 space_info
->force_alloc
= CHUNK_ALLOC_NO_FORCE
;
3213 space_info
->chunk_alloc
= 0;
3214 spin_unlock(&space_info
->lock
);
3215 mutex_unlock(&fs_info
->chunk_mutex
);
3217 * When we allocate a new chunk we reserve space in the chunk block
3218 * reserve to make sure we can COW nodes/leafs in the chunk tree or
3219 * add new nodes/leafs to it if we end up needing to do it when
3220 * inserting the chunk item and updating device items as part of the
3221 * second phase of chunk allocation, performed by
3222 * btrfs_finish_chunk_alloc(). So make sure we don't accumulate a
3223 * large number of new block groups to create in our transaction
3224 * handle's new_bgs list to avoid exhausting the chunk block reserve
3225 * in extreme cases - like having a single transaction create many new
3226 * block groups when starting to write out the free space caches of all
3227 * the block groups that were made dirty during the lifetime of the
3230 if (trans
->chunk_bytes_reserved
>= (u64
)SZ_2M
)
3231 btrfs_create_pending_block_groups(trans
);
3236 static u64
get_profile_num_devs(struct btrfs_fs_info
*fs_info
, u64 type
)
3240 num_dev
= btrfs_raid_array
[btrfs_bg_flags_to_raid_index(type
)].devs_max
;
3242 num_dev
= fs_info
->fs_devices
->rw_devices
;
3248 * Reserve space in the system space for allocating or removing a chunk
3250 void check_system_chunk(struct btrfs_trans_handle
*trans
, u64 type
)
3252 struct btrfs_fs_info
*fs_info
= trans
->fs_info
;
3253 struct btrfs_space_info
*info
;
3260 * Needed because we can end up allocating a system chunk and for an
3261 * atomic and race free space reservation in the chunk block reserve.
3263 lockdep_assert_held(&fs_info
->chunk_mutex
);
3265 info
= btrfs_find_space_info(fs_info
, BTRFS_BLOCK_GROUP_SYSTEM
);
3266 spin_lock(&info
->lock
);
3267 left
= info
->total_bytes
- btrfs_space_info_used(info
, true);
3268 spin_unlock(&info
->lock
);
3270 num_devs
= get_profile_num_devs(fs_info
, type
);
3272 /* num_devs device items to update and 1 chunk item to add or remove */
3273 thresh
= btrfs_calc_metadata_size(fs_info
, num_devs
) +
3274 btrfs_calc_insert_metadata_size(fs_info
, 1);
3276 if (left
< thresh
&& btrfs_test_opt(fs_info
, ENOSPC_DEBUG
)) {
3277 btrfs_info(fs_info
, "left=%llu, need=%llu, flags=%llu",
3278 left
, thresh
, type
);
3279 btrfs_dump_space_info(fs_info
, info
, 0, 0);
3282 if (left
< thresh
) {
3283 u64 flags
= btrfs_system_alloc_profile(fs_info
);
3286 * Ignore failure to create system chunk. We might end up not
3287 * needing it, as we might not need to COW all nodes/leafs from
3288 * the paths we visit in the chunk tree (they were already COWed
3289 * or created in the current transaction for example).
3291 ret
= btrfs_alloc_chunk(trans
, flags
);
3295 ret
= btrfs_block_rsv_add(fs_info
->chunk_root
,
3296 &fs_info
->chunk_block_rsv
,
3297 thresh
, BTRFS_RESERVE_NO_FLUSH
);
3299 trans
->chunk_bytes_reserved
+= thresh
;
3303 void btrfs_put_block_group_cache(struct btrfs_fs_info
*info
)
3305 struct btrfs_block_group
*block_group
;
3309 struct inode
*inode
;
3311 block_group
= btrfs_lookup_first_block_group(info
, last
);
3312 while (block_group
) {
3313 btrfs_wait_block_group_cache_done(block_group
);
3314 spin_lock(&block_group
->lock
);
3315 if (block_group
->iref
)
3317 spin_unlock(&block_group
->lock
);
3318 block_group
= btrfs_next_block_group(block_group
);
3327 inode
= block_group
->inode
;
3328 block_group
->iref
= 0;
3329 block_group
->inode
= NULL
;
3330 spin_unlock(&block_group
->lock
);
3331 ASSERT(block_group
->io_ctl
.inode
== NULL
);
3333 last
= block_group
->start
+ block_group
->length
;
3334 btrfs_put_block_group(block_group
);
3339 * Must be called only after stopping all workers, since we could have block
3340 * group caching kthreads running, and therefore they could race with us if we
3341 * freed the block groups before stopping them.
3343 int btrfs_free_block_groups(struct btrfs_fs_info
*info
)
3345 struct btrfs_block_group
*block_group
;
3346 struct btrfs_space_info
*space_info
;
3347 struct btrfs_caching_control
*caching_ctl
;
3350 down_write(&info
->commit_root_sem
);
3351 while (!list_empty(&info
->caching_block_groups
)) {
3352 caching_ctl
= list_entry(info
->caching_block_groups
.next
,
3353 struct btrfs_caching_control
, list
);
3354 list_del(&caching_ctl
->list
);
3355 btrfs_put_caching_control(caching_ctl
);
3357 up_write(&info
->commit_root_sem
);
3359 spin_lock(&info
->unused_bgs_lock
);
3360 while (!list_empty(&info
->unused_bgs
)) {
3361 block_group
= list_first_entry(&info
->unused_bgs
,
3362 struct btrfs_block_group
,
3364 list_del_init(&block_group
->bg_list
);
3365 btrfs_put_block_group(block_group
);
3367 spin_unlock(&info
->unused_bgs_lock
);
3369 spin_lock(&info
->block_group_cache_lock
);
3370 while ((n
= rb_last(&info
->block_group_cache_tree
)) != NULL
) {
3371 block_group
= rb_entry(n
, struct btrfs_block_group
,
3373 rb_erase(&block_group
->cache_node
,
3374 &info
->block_group_cache_tree
);
3375 RB_CLEAR_NODE(&block_group
->cache_node
);
3376 spin_unlock(&info
->block_group_cache_lock
);
3378 down_write(&block_group
->space_info
->groups_sem
);
3379 list_del(&block_group
->list
);
3380 up_write(&block_group
->space_info
->groups_sem
);
3383 * We haven't cached this block group, which means we could
3384 * possibly have excluded extents on this block group.
3386 if (block_group
->cached
== BTRFS_CACHE_NO
||
3387 block_group
->cached
== BTRFS_CACHE_ERROR
)
3388 btrfs_free_excluded_extents(block_group
);
3390 btrfs_remove_free_space_cache(block_group
);
3391 ASSERT(block_group
->cached
!= BTRFS_CACHE_STARTED
);
3392 ASSERT(list_empty(&block_group
->dirty_list
));
3393 ASSERT(list_empty(&block_group
->io_list
));
3394 ASSERT(list_empty(&block_group
->bg_list
));
3395 ASSERT(atomic_read(&block_group
->count
) == 1);
3396 btrfs_put_block_group(block_group
);
3398 spin_lock(&info
->block_group_cache_lock
);
3400 spin_unlock(&info
->block_group_cache_lock
);
3403 * Now that all the block groups are freed, go through and free all the
3404 * space_info structs. This is only called during the final stages of
3405 * unmount, and so we know nobody is using them. We call
3406 * synchronize_rcu() once before we start, just to be on the safe side.
3410 btrfs_release_global_block_rsv(info
);
3412 while (!list_empty(&info
->space_info
)) {
3413 space_info
= list_entry(info
->space_info
.next
,
3414 struct btrfs_space_info
,
3418 * Do not hide this behind enospc_debug, this is actually
3419 * important and indicates a real bug if this happens.
3421 if (WARN_ON(space_info
->bytes_pinned
> 0 ||
3422 space_info
->bytes_reserved
> 0 ||
3423 space_info
->bytes_may_use
> 0))
3424 btrfs_dump_space_info(info
, space_info
, 0, 0);
3425 WARN_ON(space_info
->reclaim_size
> 0);
3426 list_del(&space_info
->list
);
3427 btrfs_sysfs_remove_space_info(space_info
);
3432 void btrfs_freeze_block_group(struct btrfs_block_group
*cache
)
3434 atomic_inc(&cache
->frozen
);
3437 void btrfs_unfreeze_block_group(struct btrfs_block_group
*block_group
)
3439 struct btrfs_fs_info
*fs_info
= block_group
->fs_info
;
3440 struct extent_map_tree
*em_tree
;
3441 struct extent_map
*em
;
3444 spin_lock(&block_group
->lock
);
3445 cleanup
= (atomic_dec_and_test(&block_group
->frozen
) &&
3446 block_group
->removed
);
3447 spin_unlock(&block_group
->lock
);
3450 mutex_lock(&fs_info
->chunk_mutex
);
3451 em_tree
= &fs_info
->mapping_tree
;
3452 write_lock(&em_tree
->lock
);
3453 em
= lookup_extent_mapping(em_tree
, block_group
->start
,
3455 BUG_ON(!em
); /* logic error, can't happen */
3456 remove_extent_mapping(em_tree
, em
);
3457 write_unlock(&em_tree
->lock
);
3458 mutex_unlock(&fs_info
->chunk_mutex
);
3460 /* once for us and once for the tree */
3461 free_extent_map(em
);
3462 free_extent_map(em
);
3465 * We may have left one free space entry and other possible
3466 * tasks trimming this block group have left 1 entry each one.
3469 __btrfs_remove_free_space_cache(block_group
->free_space_ctl
);