1 // SPDX-License-Identifier: GPL-2.0
5 #include "block-group.h"
6 #include "space-info.h"
8 #include "free-space-cache.h"
9 #include "free-space-tree.h"
12 #include "transaction.h"
13 #include "ref-verify.h"
16 #include "delalloc-space.h"
21 * Return target flags in extended format or 0 if restripe for this chunk_type
24 * Should be called with balance_lock held
26 static u64
get_restripe_target(struct btrfs_fs_info
*fs_info
, u64 flags
)
28 struct btrfs_balance_control
*bctl
= fs_info
->balance_ctl
;
34 if (flags
& BTRFS_BLOCK_GROUP_DATA
&&
35 bctl
->data
.flags
& BTRFS_BALANCE_ARGS_CONVERT
) {
36 target
= BTRFS_BLOCK_GROUP_DATA
| bctl
->data
.target
;
37 } else if (flags
& BTRFS_BLOCK_GROUP_SYSTEM
&&
38 bctl
->sys
.flags
& BTRFS_BALANCE_ARGS_CONVERT
) {
39 target
= BTRFS_BLOCK_GROUP_SYSTEM
| bctl
->sys
.target
;
40 } else if (flags
& BTRFS_BLOCK_GROUP_METADATA
&&
41 bctl
->meta
.flags
& BTRFS_BALANCE_ARGS_CONVERT
) {
42 target
= BTRFS_BLOCK_GROUP_METADATA
| bctl
->meta
.target
;
49 * @flags: available profiles in extended format (see ctree.h)
51 * Return reduced profile in chunk format. If profile changing is in progress
52 * (either running or paused) picks the target profile (if it's already
53 * available), otherwise falls back to plain reducing.
55 static u64
btrfs_reduce_alloc_profile(struct btrfs_fs_info
*fs_info
, u64 flags
)
57 u64 num_devices
= fs_info
->fs_devices
->rw_devices
;
63 * See if restripe for this chunk_type is in progress, if so try to
64 * reduce to the target profile
66 spin_lock(&fs_info
->balance_lock
);
67 target
= get_restripe_target(fs_info
, flags
);
69 /* Pick target profile only if it's already available */
70 if ((flags
& target
) & BTRFS_EXTENDED_PROFILE_MASK
) {
71 spin_unlock(&fs_info
->balance_lock
);
72 return extended_to_chunk(target
);
75 spin_unlock(&fs_info
->balance_lock
);
77 /* First, mask out the RAID levels which aren't possible */
78 for (raid_type
= 0; raid_type
< BTRFS_NR_RAID_TYPES
; raid_type
++) {
79 if (num_devices
>= btrfs_raid_array
[raid_type
].devs_min
)
80 allowed
|= btrfs_raid_array
[raid_type
].bg_flag
;
84 if (allowed
& BTRFS_BLOCK_GROUP_RAID6
)
85 allowed
= BTRFS_BLOCK_GROUP_RAID6
;
86 else if (allowed
& BTRFS_BLOCK_GROUP_RAID5
)
87 allowed
= BTRFS_BLOCK_GROUP_RAID5
;
88 else if (allowed
& BTRFS_BLOCK_GROUP_RAID10
)
89 allowed
= BTRFS_BLOCK_GROUP_RAID10
;
90 else if (allowed
& BTRFS_BLOCK_GROUP_RAID1
)
91 allowed
= BTRFS_BLOCK_GROUP_RAID1
;
92 else if (allowed
& BTRFS_BLOCK_GROUP_RAID0
)
93 allowed
= BTRFS_BLOCK_GROUP_RAID0
;
95 flags
&= ~BTRFS_BLOCK_GROUP_PROFILE_MASK
;
97 return extended_to_chunk(flags
| allowed
);
100 u64
btrfs_get_alloc_profile(struct btrfs_fs_info
*fs_info
, u64 orig_flags
)
107 seq
= read_seqbegin(&fs_info
->profiles_lock
);
109 if (flags
& BTRFS_BLOCK_GROUP_DATA
)
110 flags
|= fs_info
->avail_data_alloc_bits
;
111 else if (flags
& BTRFS_BLOCK_GROUP_SYSTEM
)
112 flags
|= fs_info
->avail_system_alloc_bits
;
113 else if (flags
& BTRFS_BLOCK_GROUP_METADATA
)
114 flags
|= fs_info
->avail_metadata_alloc_bits
;
115 } while (read_seqretry(&fs_info
->profiles_lock
, seq
));
117 return btrfs_reduce_alloc_profile(fs_info
, flags
);
120 void btrfs_get_block_group(struct btrfs_block_group
*cache
)
122 atomic_inc(&cache
->count
);
125 void btrfs_put_block_group(struct btrfs_block_group
*cache
)
127 if (atomic_dec_and_test(&cache
->count
)) {
128 WARN_ON(cache
->pinned
> 0);
129 WARN_ON(cache
->reserved
> 0);
132 * A block_group shouldn't be on the discard_list anymore.
133 * Remove the block_group from the discard_list to prevent us
134 * from causing a panic due to NULL pointer dereference.
136 if (WARN_ON(!list_empty(&cache
->discard_list
)))
137 btrfs_discard_cancel_work(&cache
->fs_info
->discard_ctl
,
141 * If not empty, someone is still holding mutex of
142 * full_stripe_lock, which can only be released by caller.
143 * And it will definitely cause use-after-free when caller
144 * tries to release full stripe lock.
146 * No better way to resolve, but only to warn.
148 WARN_ON(!RB_EMPTY_ROOT(&cache
->full_stripe_locks_root
.root
));
149 kfree(cache
->free_space_ctl
);
155 * This adds the block group to the fs_info rb tree for the block group cache
157 static int btrfs_add_block_group_cache(struct btrfs_fs_info
*info
,
158 struct btrfs_block_group
*block_group
)
161 struct rb_node
*parent
= NULL
;
162 struct btrfs_block_group
*cache
;
164 spin_lock(&info
->block_group_cache_lock
);
165 p
= &info
->block_group_cache_tree
.rb_node
;
169 cache
= rb_entry(parent
, struct btrfs_block_group
, cache_node
);
170 if (block_group
->start
< cache
->start
) {
172 } else if (block_group
->start
> cache
->start
) {
175 spin_unlock(&info
->block_group_cache_lock
);
180 rb_link_node(&block_group
->cache_node
, parent
, p
);
181 rb_insert_color(&block_group
->cache_node
,
182 &info
->block_group_cache_tree
);
184 if (info
->first_logical_byte
> block_group
->start
)
185 info
->first_logical_byte
= block_group
->start
;
187 spin_unlock(&info
->block_group_cache_lock
);
193 * This will return the block group at or after bytenr if contains is 0, else
194 * it will return the block group that contains the bytenr
196 static struct btrfs_block_group
*block_group_cache_tree_search(
197 struct btrfs_fs_info
*info
, u64 bytenr
, int contains
)
199 struct btrfs_block_group
*cache
, *ret
= NULL
;
203 spin_lock(&info
->block_group_cache_lock
);
204 n
= info
->block_group_cache_tree
.rb_node
;
207 cache
= rb_entry(n
, struct btrfs_block_group
, cache_node
);
208 end
= cache
->start
+ cache
->length
- 1;
209 start
= cache
->start
;
211 if (bytenr
< start
) {
212 if (!contains
&& (!ret
|| start
< ret
->start
))
215 } else if (bytenr
> start
) {
216 if (contains
&& bytenr
<= end
) {
227 btrfs_get_block_group(ret
);
228 if (bytenr
== 0 && info
->first_logical_byte
> ret
->start
)
229 info
->first_logical_byte
= ret
->start
;
231 spin_unlock(&info
->block_group_cache_lock
);
237 * Return the block group that starts at or after bytenr
239 struct btrfs_block_group
*btrfs_lookup_first_block_group(
240 struct btrfs_fs_info
*info
, u64 bytenr
)
242 return block_group_cache_tree_search(info
, bytenr
, 0);
246 * Return the block group that contains the given bytenr
248 struct btrfs_block_group
*btrfs_lookup_block_group(
249 struct btrfs_fs_info
*info
, u64 bytenr
)
251 return block_group_cache_tree_search(info
, bytenr
, 1);
254 struct btrfs_block_group
*btrfs_next_block_group(
255 struct btrfs_block_group
*cache
)
257 struct btrfs_fs_info
*fs_info
= cache
->fs_info
;
258 struct rb_node
*node
;
260 spin_lock(&fs_info
->block_group_cache_lock
);
262 /* If our block group was removed, we need a full search. */
263 if (RB_EMPTY_NODE(&cache
->cache_node
)) {
264 const u64 next_bytenr
= cache
->start
+ cache
->length
;
266 spin_unlock(&fs_info
->block_group_cache_lock
);
267 btrfs_put_block_group(cache
);
268 cache
= btrfs_lookup_first_block_group(fs_info
, next_bytenr
); return cache
;
270 node
= rb_next(&cache
->cache_node
);
271 btrfs_put_block_group(cache
);
273 cache
= rb_entry(node
, struct btrfs_block_group
, cache_node
);
274 btrfs_get_block_group(cache
);
277 spin_unlock(&fs_info
->block_group_cache_lock
);
281 bool btrfs_inc_nocow_writers(struct btrfs_fs_info
*fs_info
, u64 bytenr
)
283 struct btrfs_block_group
*bg
;
286 bg
= btrfs_lookup_block_group(fs_info
, bytenr
);
290 spin_lock(&bg
->lock
);
294 atomic_inc(&bg
->nocow_writers
);
295 spin_unlock(&bg
->lock
);
297 /* No put on block group, done by btrfs_dec_nocow_writers */
299 btrfs_put_block_group(bg
);
304 void btrfs_dec_nocow_writers(struct btrfs_fs_info
*fs_info
, u64 bytenr
)
306 struct btrfs_block_group
*bg
;
308 bg
= btrfs_lookup_block_group(fs_info
, bytenr
);
310 if (atomic_dec_and_test(&bg
->nocow_writers
))
311 wake_up_var(&bg
->nocow_writers
);
313 * Once for our lookup and once for the lookup done by a previous call
314 * to btrfs_inc_nocow_writers()
316 btrfs_put_block_group(bg
);
317 btrfs_put_block_group(bg
);
320 void btrfs_wait_nocow_writers(struct btrfs_block_group
*bg
)
322 wait_var_event(&bg
->nocow_writers
, !atomic_read(&bg
->nocow_writers
));
325 void btrfs_dec_block_group_reservations(struct btrfs_fs_info
*fs_info
,
328 struct btrfs_block_group
*bg
;
330 bg
= btrfs_lookup_block_group(fs_info
, start
);
332 if (atomic_dec_and_test(&bg
->reservations
))
333 wake_up_var(&bg
->reservations
);
334 btrfs_put_block_group(bg
);
337 void btrfs_wait_block_group_reservations(struct btrfs_block_group
*bg
)
339 struct btrfs_space_info
*space_info
= bg
->space_info
;
343 if (!(bg
->flags
& BTRFS_BLOCK_GROUP_DATA
))
347 * Our block group is read only but before we set it to read only,
348 * some task might have had allocated an extent from it already, but it
349 * has not yet created a respective ordered extent (and added it to a
350 * root's list of ordered extents).
351 * Therefore wait for any task currently allocating extents, since the
352 * block group's reservations counter is incremented while a read lock
353 * on the groups' semaphore is held and decremented after releasing
354 * the read access on that semaphore and creating the ordered extent.
356 down_write(&space_info
->groups_sem
);
357 up_write(&space_info
->groups_sem
);
359 wait_var_event(&bg
->reservations
, !atomic_read(&bg
->reservations
));
362 struct btrfs_caching_control
*btrfs_get_caching_control(
363 struct btrfs_block_group
*cache
)
365 struct btrfs_caching_control
*ctl
;
367 spin_lock(&cache
->lock
);
368 if (!cache
->caching_ctl
) {
369 spin_unlock(&cache
->lock
);
373 ctl
= cache
->caching_ctl
;
374 refcount_inc(&ctl
->count
);
375 spin_unlock(&cache
->lock
);
379 void btrfs_put_caching_control(struct btrfs_caching_control
*ctl
)
381 if (refcount_dec_and_test(&ctl
->count
))
386 * When we wait for progress in the block group caching, its because our
387 * allocation attempt failed at least once. So, we must sleep and let some
388 * progress happen before we try again.
390 * This function will sleep at least once waiting for new free space to show
391 * up, and then it will check the block group free space numbers for our min
392 * num_bytes. Another option is to have it go ahead and look in the rbtree for
393 * a free extent of a given size, but this is a good start.
395 * Callers of this must check if cache->cached == BTRFS_CACHE_ERROR before using
396 * any of the information in this block group.
398 void btrfs_wait_block_group_cache_progress(struct btrfs_block_group
*cache
,
401 struct btrfs_caching_control
*caching_ctl
;
403 caching_ctl
= btrfs_get_caching_control(cache
);
407 wait_event(caching_ctl
->wait
, btrfs_block_group_done(cache
) ||
408 (cache
->free_space_ctl
->free_space
>= num_bytes
));
410 btrfs_put_caching_control(caching_ctl
);
413 int btrfs_wait_block_group_cache_done(struct btrfs_block_group
*cache
)
415 struct btrfs_caching_control
*caching_ctl
;
418 caching_ctl
= btrfs_get_caching_control(cache
);
420 return (cache
->cached
== BTRFS_CACHE_ERROR
) ? -EIO
: 0;
422 wait_event(caching_ctl
->wait
, btrfs_block_group_done(cache
));
423 if (cache
->cached
== BTRFS_CACHE_ERROR
)
425 btrfs_put_caching_control(caching_ctl
);
429 #ifdef CONFIG_BTRFS_DEBUG
430 static void fragment_free_space(struct btrfs_block_group
*block_group
)
432 struct btrfs_fs_info
*fs_info
= block_group
->fs_info
;
433 u64 start
= block_group
->start
;
434 u64 len
= block_group
->length
;
435 u64 chunk
= block_group
->flags
& BTRFS_BLOCK_GROUP_METADATA
?
436 fs_info
->nodesize
: fs_info
->sectorsize
;
437 u64 step
= chunk
<< 1;
439 while (len
> chunk
) {
440 btrfs_remove_free_space(block_group
, start
, chunk
);
451 * This is only called by btrfs_cache_block_group, since we could have freed
452 * extents we need to check the pinned_extents for any extents that can't be
453 * used yet since their free space will be released as soon as the transaction
456 u64
add_new_free_space(struct btrfs_block_group
*block_group
, u64 start
, u64 end
)
458 struct btrfs_fs_info
*info
= block_group
->fs_info
;
459 u64 extent_start
, extent_end
, size
, total_added
= 0;
462 while (start
< end
) {
463 ret
= find_first_extent_bit(info
->pinned_extents
, start
,
464 &extent_start
, &extent_end
,
465 EXTENT_DIRTY
| EXTENT_UPTODATE
,
470 if (extent_start
<= start
) {
471 start
= extent_end
+ 1;
472 } else if (extent_start
> start
&& extent_start
< end
) {
473 size
= extent_start
- start
;
475 ret
= btrfs_add_free_space_async_trimmed(block_group
,
477 BUG_ON(ret
); /* -ENOMEM or logic error */
478 start
= extent_end
+ 1;
487 ret
= btrfs_add_free_space_async_trimmed(block_group
, start
,
489 BUG_ON(ret
); /* -ENOMEM or logic error */
495 static int load_extent_tree_free(struct btrfs_caching_control
*caching_ctl
)
497 struct btrfs_block_group
*block_group
= caching_ctl
->block_group
;
498 struct btrfs_fs_info
*fs_info
= block_group
->fs_info
;
499 struct btrfs_root
*extent_root
= fs_info
->extent_root
;
500 struct btrfs_path
*path
;
501 struct extent_buffer
*leaf
;
502 struct btrfs_key key
;
509 path
= btrfs_alloc_path();
513 last
= max_t(u64
, block_group
->start
, BTRFS_SUPER_INFO_OFFSET
);
515 #ifdef CONFIG_BTRFS_DEBUG
517 * If we're fragmenting we don't want to make anybody think we can
518 * allocate from this block group until we've had a chance to fragment
521 if (btrfs_should_fragment_free_space(block_group
))
525 * We don't want to deadlock with somebody trying to allocate a new
526 * extent for the extent root while also trying to search the extent
527 * root to add free space. So we skip locking and search the commit
528 * root, since its read-only
530 path
->skip_locking
= 1;
531 path
->search_commit_root
= 1;
532 path
->reada
= READA_FORWARD
;
536 key
.type
= BTRFS_EXTENT_ITEM_KEY
;
539 ret
= btrfs_search_slot(NULL
, extent_root
, &key
, path
, 0, 0);
543 leaf
= path
->nodes
[0];
544 nritems
= btrfs_header_nritems(leaf
);
547 if (btrfs_fs_closing(fs_info
) > 1) {
552 if (path
->slots
[0] < nritems
) {
553 btrfs_item_key_to_cpu(leaf
, &key
, path
->slots
[0]);
555 ret
= btrfs_find_next_key(extent_root
, path
, &key
, 0, 0);
559 if (need_resched() ||
560 rwsem_is_contended(&fs_info
->commit_root_sem
)) {
562 caching_ctl
->progress
= last
;
563 btrfs_release_path(path
);
564 up_read(&fs_info
->commit_root_sem
);
565 mutex_unlock(&caching_ctl
->mutex
);
567 mutex_lock(&caching_ctl
->mutex
);
568 down_read(&fs_info
->commit_root_sem
);
572 ret
= btrfs_next_leaf(extent_root
, path
);
577 leaf
= path
->nodes
[0];
578 nritems
= btrfs_header_nritems(leaf
);
582 if (key
.objectid
< last
) {
585 key
.type
= BTRFS_EXTENT_ITEM_KEY
;
588 caching_ctl
->progress
= last
;
589 btrfs_release_path(path
);
593 if (key
.objectid
< block_group
->start
) {
598 if (key
.objectid
>= block_group
->start
+ block_group
->length
)
601 if (key
.type
== BTRFS_EXTENT_ITEM_KEY
||
602 key
.type
== BTRFS_METADATA_ITEM_KEY
) {
603 total_found
+= add_new_free_space(block_group
, last
,
605 if (key
.type
== BTRFS_METADATA_ITEM_KEY
)
606 last
= key
.objectid
+
609 last
= key
.objectid
+ key
.offset
;
611 if (total_found
> CACHING_CTL_WAKE_UP
) {
614 wake_up(&caching_ctl
->wait
);
621 total_found
+= add_new_free_space(block_group
, last
,
622 block_group
->start
+ block_group
->length
);
623 caching_ctl
->progress
= (u64
)-1;
626 btrfs_free_path(path
);
630 static noinline
void caching_thread(struct btrfs_work
*work
)
632 struct btrfs_block_group
*block_group
;
633 struct btrfs_fs_info
*fs_info
;
634 struct btrfs_caching_control
*caching_ctl
;
637 caching_ctl
= container_of(work
, struct btrfs_caching_control
, work
);
638 block_group
= caching_ctl
->block_group
;
639 fs_info
= block_group
->fs_info
;
641 mutex_lock(&caching_ctl
->mutex
);
642 down_read(&fs_info
->commit_root_sem
);
644 if (btrfs_fs_compat_ro(fs_info
, FREE_SPACE_TREE
))
645 ret
= load_free_space_tree(caching_ctl
);
647 ret
= load_extent_tree_free(caching_ctl
);
649 spin_lock(&block_group
->lock
);
650 block_group
->caching_ctl
= NULL
;
651 block_group
->cached
= ret
? BTRFS_CACHE_ERROR
: BTRFS_CACHE_FINISHED
;
652 spin_unlock(&block_group
->lock
);
654 #ifdef CONFIG_BTRFS_DEBUG
655 if (btrfs_should_fragment_free_space(block_group
)) {
658 spin_lock(&block_group
->space_info
->lock
);
659 spin_lock(&block_group
->lock
);
660 bytes_used
= block_group
->length
- block_group
->used
;
661 block_group
->space_info
->bytes_used
+= bytes_used
>> 1;
662 spin_unlock(&block_group
->lock
);
663 spin_unlock(&block_group
->space_info
->lock
);
664 fragment_free_space(block_group
);
668 caching_ctl
->progress
= (u64
)-1;
670 up_read(&fs_info
->commit_root_sem
);
671 btrfs_free_excluded_extents(block_group
);
672 mutex_unlock(&caching_ctl
->mutex
);
674 wake_up(&caching_ctl
->wait
);
676 btrfs_put_caching_control(caching_ctl
);
677 btrfs_put_block_group(block_group
);
680 int btrfs_cache_block_group(struct btrfs_block_group
*cache
, int load_cache_only
)
683 struct btrfs_fs_info
*fs_info
= cache
->fs_info
;
684 struct btrfs_caching_control
*caching_ctl
;
687 caching_ctl
= kzalloc(sizeof(*caching_ctl
), GFP_NOFS
);
691 INIT_LIST_HEAD(&caching_ctl
->list
);
692 mutex_init(&caching_ctl
->mutex
);
693 init_waitqueue_head(&caching_ctl
->wait
);
694 caching_ctl
->block_group
= cache
;
695 caching_ctl
->progress
= cache
->start
;
696 refcount_set(&caching_ctl
->count
, 1);
697 btrfs_init_work(&caching_ctl
->work
, caching_thread
, NULL
, NULL
);
699 spin_lock(&cache
->lock
);
701 * This should be a rare occasion, but this could happen I think in the
702 * case where one thread starts to load the space cache info, and then
703 * some other thread starts a transaction commit which tries to do an
704 * allocation while the other thread is still loading the space cache
705 * info. The previous loop should have kept us from choosing this block
706 * group, but if we've moved to the state where we will wait on caching
707 * block groups we need to first check if we're doing a fast load here,
708 * so we can wait for it to finish, otherwise we could end up allocating
709 * from a block group who's cache gets evicted for one reason or
712 while (cache
->cached
== BTRFS_CACHE_FAST
) {
713 struct btrfs_caching_control
*ctl
;
715 ctl
= cache
->caching_ctl
;
716 refcount_inc(&ctl
->count
);
717 prepare_to_wait(&ctl
->wait
, &wait
, TASK_UNINTERRUPTIBLE
);
718 spin_unlock(&cache
->lock
);
722 finish_wait(&ctl
->wait
, &wait
);
723 btrfs_put_caching_control(ctl
);
724 spin_lock(&cache
->lock
);
727 if (cache
->cached
!= BTRFS_CACHE_NO
) {
728 spin_unlock(&cache
->lock
);
732 WARN_ON(cache
->caching_ctl
);
733 cache
->caching_ctl
= caching_ctl
;
734 cache
->cached
= BTRFS_CACHE_FAST
;
735 spin_unlock(&cache
->lock
);
737 if (btrfs_test_opt(fs_info
, SPACE_CACHE
)) {
738 mutex_lock(&caching_ctl
->mutex
);
739 ret
= load_free_space_cache(cache
);
741 spin_lock(&cache
->lock
);
743 cache
->caching_ctl
= NULL
;
744 cache
->cached
= BTRFS_CACHE_FINISHED
;
745 cache
->last_byte_to_unpin
= (u64
)-1;
746 caching_ctl
->progress
= (u64
)-1;
748 if (load_cache_only
) {
749 cache
->caching_ctl
= NULL
;
750 cache
->cached
= BTRFS_CACHE_NO
;
752 cache
->cached
= BTRFS_CACHE_STARTED
;
753 cache
->has_caching_ctl
= 1;
756 spin_unlock(&cache
->lock
);
757 #ifdef CONFIG_BTRFS_DEBUG
759 btrfs_should_fragment_free_space(cache
)) {
762 spin_lock(&cache
->space_info
->lock
);
763 spin_lock(&cache
->lock
);
764 bytes_used
= cache
->length
- cache
->used
;
765 cache
->space_info
->bytes_used
+= bytes_used
>> 1;
766 spin_unlock(&cache
->lock
);
767 spin_unlock(&cache
->space_info
->lock
);
768 fragment_free_space(cache
);
771 mutex_unlock(&caching_ctl
->mutex
);
773 wake_up(&caching_ctl
->wait
);
775 btrfs_put_caching_control(caching_ctl
);
776 btrfs_free_excluded_extents(cache
);
781 * We're either using the free space tree or no caching at all.
782 * Set cached to the appropriate value and wakeup any waiters.
784 spin_lock(&cache
->lock
);
785 if (load_cache_only
) {
786 cache
->caching_ctl
= NULL
;
787 cache
->cached
= BTRFS_CACHE_NO
;
789 cache
->cached
= BTRFS_CACHE_STARTED
;
790 cache
->has_caching_ctl
= 1;
792 spin_unlock(&cache
->lock
);
793 wake_up(&caching_ctl
->wait
);
796 if (load_cache_only
) {
797 btrfs_put_caching_control(caching_ctl
);
801 down_write(&fs_info
->commit_root_sem
);
802 refcount_inc(&caching_ctl
->count
);
803 list_add_tail(&caching_ctl
->list
, &fs_info
->caching_block_groups
);
804 up_write(&fs_info
->commit_root_sem
);
806 btrfs_get_block_group(cache
);
808 btrfs_queue_work(fs_info
->caching_workers
, &caching_ctl
->work
);
813 static void clear_avail_alloc_bits(struct btrfs_fs_info
*fs_info
, u64 flags
)
815 u64 extra_flags
= chunk_to_extended(flags
) &
816 BTRFS_EXTENDED_PROFILE_MASK
;
818 write_seqlock(&fs_info
->profiles_lock
);
819 if (flags
& BTRFS_BLOCK_GROUP_DATA
)
820 fs_info
->avail_data_alloc_bits
&= ~extra_flags
;
821 if (flags
& BTRFS_BLOCK_GROUP_METADATA
)
822 fs_info
->avail_metadata_alloc_bits
&= ~extra_flags
;
823 if (flags
& BTRFS_BLOCK_GROUP_SYSTEM
)
824 fs_info
->avail_system_alloc_bits
&= ~extra_flags
;
825 write_sequnlock(&fs_info
->profiles_lock
);
829 * Clear incompat bits for the following feature(s):
831 * - RAID56 - in case there's neither RAID5 nor RAID6 profile block group
832 * in the whole filesystem
834 * - RAID1C34 - same as above for RAID1C3 and RAID1C4 block groups
836 static void clear_incompat_bg_bits(struct btrfs_fs_info
*fs_info
, u64 flags
)
838 bool found_raid56
= false;
839 bool found_raid1c34
= false;
841 if ((flags
& BTRFS_BLOCK_GROUP_RAID56_MASK
) ||
842 (flags
& BTRFS_BLOCK_GROUP_RAID1C3
) ||
843 (flags
& BTRFS_BLOCK_GROUP_RAID1C4
)) {
844 struct list_head
*head
= &fs_info
->space_info
;
845 struct btrfs_space_info
*sinfo
;
847 list_for_each_entry_rcu(sinfo
, head
, list
) {
848 down_read(&sinfo
->groups_sem
);
849 if (!list_empty(&sinfo
->block_groups
[BTRFS_RAID_RAID5
]))
851 if (!list_empty(&sinfo
->block_groups
[BTRFS_RAID_RAID6
]))
853 if (!list_empty(&sinfo
->block_groups
[BTRFS_RAID_RAID1C3
]))
854 found_raid1c34
= true;
855 if (!list_empty(&sinfo
->block_groups
[BTRFS_RAID_RAID1C4
]))
856 found_raid1c34
= true;
857 up_read(&sinfo
->groups_sem
);
860 btrfs_clear_fs_incompat(fs_info
, RAID56
);
862 btrfs_clear_fs_incompat(fs_info
, RAID1C34
);
866 int btrfs_remove_block_group(struct btrfs_trans_handle
*trans
,
867 u64 group_start
, struct extent_map
*em
)
869 struct btrfs_fs_info
*fs_info
= trans
->fs_info
;
870 struct btrfs_root
*root
= fs_info
->extent_root
;
871 struct btrfs_path
*path
;
872 struct btrfs_block_group
*block_group
;
873 struct btrfs_free_cluster
*cluster
;
874 struct btrfs_root
*tree_root
= fs_info
->tree_root
;
875 struct btrfs_key key
;
877 struct kobject
*kobj
= NULL
;
881 struct btrfs_caching_control
*caching_ctl
= NULL
;
883 bool remove_rsv
= false;
885 block_group
= btrfs_lookup_block_group(fs_info
, group_start
);
886 BUG_ON(!block_group
);
887 BUG_ON(!block_group
->ro
);
889 trace_btrfs_remove_block_group(block_group
);
891 * Free the reserved super bytes from this block group before
894 btrfs_free_excluded_extents(block_group
);
895 btrfs_free_ref_tree_range(fs_info
, block_group
->start
,
896 block_group
->length
);
898 index
= btrfs_bg_flags_to_raid_index(block_group
->flags
);
899 factor
= btrfs_bg_type_to_factor(block_group
->flags
);
901 /* make sure this block group isn't part of an allocation cluster */
902 cluster
= &fs_info
->data_alloc_cluster
;
903 spin_lock(&cluster
->refill_lock
);
904 btrfs_return_cluster_to_free_space(block_group
, cluster
);
905 spin_unlock(&cluster
->refill_lock
);
908 * make sure this block group isn't part of a metadata
911 cluster
= &fs_info
->meta_alloc_cluster
;
912 spin_lock(&cluster
->refill_lock
);
913 btrfs_return_cluster_to_free_space(block_group
, cluster
);
914 spin_unlock(&cluster
->refill_lock
);
916 path
= btrfs_alloc_path();
923 * get the inode first so any iput calls done for the io_list
924 * aren't the final iput (no unlinks allowed now)
926 inode
= lookup_free_space_inode(block_group
, path
);
928 mutex_lock(&trans
->transaction
->cache_write_mutex
);
930 * Make sure our free space cache IO is done before removing the
933 spin_lock(&trans
->transaction
->dirty_bgs_lock
);
934 if (!list_empty(&block_group
->io_list
)) {
935 list_del_init(&block_group
->io_list
);
937 WARN_ON(!IS_ERR(inode
) && inode
!= block_group
->io_ctl
.inode
);
939 spin_unlock(&trans
->transaction
->dirty_bgs_lock
);
940 btrfs_wait_cache_io(trans
, block_group
, path
);
941 btrfs_put_block_group(block_group
);
942 spin_lock(&trans
->transaction
->dirty_bgs_lock
);
945 if (!list_empty(&block_group
->dirty_list
)) {
946 list_del_init(&block_group
->dirty_list
);
948 btrfs_put_block_group(block_group
);
950 spin_unlock(&trans
->transaction
->dirty_bgs_lock
);
951 mutex_unlock(&trans
->transaction
->cache_write_mutex
);
953 if (!IS_ERR(inode
)) {
954 ret
= btrfs_orphan_add(trans
, BTRFS_I(inode
));
956 btrfs_add_delayed_iput(inode
);
960 /* One for the block groups ref */
961 spin_lock(&block_group
->lock
);
962 if (block_group
->iref
) {
963 block_group
->iref
= 0;
964 block_group
->inode
= NULL
;
965 spin_unlock(&block_group
->lock
);
968 spin_unlock(&block_group
->lock
);
970 /* One for our lookup ref */
971 btrfs_add_delayed_iput(inode
);
974 key
.objectid
= BTRFS_FREE_SPACE_OBJECTID
;
976 key
.offset
= block_group
->start
;
978 ret
= btrfs_search_slot(trans
, tree_root
, &key
, path
, -1, 1);
982 btrfs_release_path(path
);
984 ret
= btrfs_del_item(trans
, tree_root
, path
);
987 btrfs_release_path(path
);
990 spin_lock(&fs_info
->block_group_cache_lock
);
991 rb_erase(&block_group
->cache_node
,
992 &fs_info
->block_group_cache_tree
);
993 RB_CLEAR_NODE(&block_group
->cache_node
);
995 if (fs_info
->first_logical_byte
== block_group
->start
)
996 fs_info
->first_logical_byte
= (u64
)-1;
997 spin_unlock(&fs_info
->block_group_cache_lock
);
999 down_write(&block_group
->space_info
->groups_sem
);
1001 * we must use list_del_init so people can check to see if they
1002 * are still on the list after taking the semaphore
1004 list_del_init(&block_group
->list
);
1005 if (list_empty(&block_group
->space_info
->block_groups
[index
])) {
1006 kobj
= block_group
->space_info
->block_group_kobjs
[index
];
1007 block_group
->space_info
->block_group_kobjs
[index
] = NULL
;
1008 clear_avail_alloc_bits(fs_info
, block_group
->flags
);
1010 up_write(&block_group
->space_info
->groups_sem
);
1011 clear_incompat_bg_bits(fs_info
, block_group
->flags
);
1017 if (block_group
->has_caching_ctl
)
1018 caching_ctl
= btrfs_get_caching_control(block_group
);
1019 if (block_group
->cached
== BTRFS_CACHE_STARTED
)
1020 btrfs_wait_block_group_cache_done(block_group
);
1021 if (block_group
->has_caching_ctl
) {
1022 down_write(&fs_info
->commit_root_sem
);
1024 struct btrfs_caching_control
*ctl
;
1026 list_for_each_entry(ctl
,
1027 &fs_info
->caching_block_groups
, list
)
1028 if (ctl
->block_group
== block_group
) {
1030 refcount_inc(&caching_ctl
->count
);
1035 list_del_init(&caching_ctl
->list
);
1036 up_write(&fs_info
->commit_root_sem
);
1038 /* Once for the caching bgs list and once for us. */
1039 btrfs_put_caching_control(caching_ctl
);
1040 btrfs_put_caching_control(caching_ctl
);
1044 spin_lock(&trans
->transaction
->dirty_bgs_lock
);
1045 WARN_ON(!list_empty(&block_group
->dirty_list
));
1046 WARN_ON(!list_empty(&block_group
->io_list
));
1047 spin_unlock(&trans
->transaction
->dirty_bgs_lock
);
1049 btrfs_remove_free_space_cache(block_group
);
1051 spin_lock(&block_group
->space_info
->lock
);
1052 list_del_init(&block_group
->ro_list
);
1054 if (btrfs_test_opt(fs_info
, ENOSPC_DEBUG
)) {
1055 WARN_ON(block_group
->space_info
->total_bytes
1056 < block_group
->length
);
1057 WARN_ON(block_group
->space_info
->bytes_readonly
1058 < block_group
->length
);
1059 WARN_ON(block_group
->space_info
->disk_total
1060 < block_group
->length
* factor
);
1062 block_group
->space_info
->total_bytes
-= block_group
->length
;
1063 block_group
->space_info
->bytes_readonly
-= block_group
->length
;
1064 block_group
->space_info
->disk_total
-= block_group
->length
* factor
;
1066 spin_unlock(&block_group
->space_info
->lock
);
1068 key
.objectid
= block_group
->start
;
1069 key
.type
= BTRFS_BLOCK_GROUP_ITEM_KEY
;
1070 key
.offset
= block_group
->length
;
1072 mutex_lock(&fs_info
->chunk_mutex
);
1073 spin_lock(&block_group
->lock
);
1074 block_group
->removed
= 1;
1076 * At this point trimming can't start on this block group, because we
1077 * removed the block group from the tree fs_info->block_group_cache_tree
1078 * so no one can't find it anymore and even if someone already got this
1079 * block group before we removed it from the rbtree, they have already
1080 * incremented block_group->trimming - if they didn't, they won't find
1081 * any free space entries because we already removed them all when we
1082 * called btrfs_remove_free_space_cache().
1084 * And we must not remove the extent map from the fs_info->mapping_tree
1085 * to prevent the same logical address range and physical device space
1086 * ranges from being reused for a new block group. This is because our
1087 * fs trim operation (btrfs_trim_fs() / btrfs_ioctl_fitrim()) is
1088 * completely transactionless, so while it is trimming a range the
1089 * currently running transaction might finish and a new one start,
1090 * allowing for new block groups to be created that can reuse the same
1091 * physical device locations unless we take this special care.
1093 * There may also be an implicit trim operation if the file system
1094 * is mounted with -odiscard. The same protections must remain
1095 * in place until the extents have been discarded completely when
1096 * the transaction commit has completed.
1098 remove_em
= (atomic_read(&block_group
->trimming
) == 0);
1099 spin_unlock(&block_group
->lock
);
1101 mutex_unlock(&fs_info
->chunk_mutex
);
1103 ret
= remove_block_group_free_space(trans
, block_group
);
1107 btrfs_put_block_group(block_group
);
1108 btrfs_put_block_group(block_group
);
1110 ret
= btrfs_search_slot(trans
, root
, &key
, path
, -1, 1);
1116 ret
= btrfs_del_item(trans
, root
, path
);
1121 struct extent_map_tree
*em_tree
;
1123 em_tree
= &fs_info
->mapping_tree
;
1124 write_lock(&em_tree
->lock
);
1125 remove_extent_mapping(em_tree
, em
);
1126 write_unlock(&em_tree
->lock
);
1127 /* once for the tree */
1128 free_extent_map(em
);
1132 btrfs_delayed_refs_rsv_release(fs_info
, 1);
1133 btrfs_free_path(path
);
1137 struct btrfs_trans_handle
*btrfs_start_trans_remove_block_group(
1138 struct btrfs_fs_info
*fs_info
, const u64 chunk_offset
)
1140 struct extent_map_tree
*em_tree
= &fs_info
->mapping_tree
;
1141 struct extent_map
*em
;
1142 struct map_lookup
*map
;
1143 unsigned int num_items
;
1145 read_lock(&em_tree
->lock
);
1146 em
= lookup_extent_mapping(em_tree
, chunk_offset
, 1);
1147 read_unlock(&em_tree
->lock
);
1148 ASSERT(em
&& em
->start
== chunk_offset
);
1151 * We need to reserve 3 + N units from the metadata space info in order
1152 * to remove a block group (done at btrfs_remove_chunk() and at
1153 * btrfs_remove_block_group()), which are used for:
1155 * 1 unit for adding the free space inode's orphan (located in the tree
1157 * 1 unit for deleting the block group item (located in the extent
1159 * 1 unit for deleting the free space item (located in tree of tree
1161 * N units for deleting N device extent items corresponding to each
1162 * stripe (located in the device tree).
1164 * In order to remove a block group we also need to reserve units in the
1165 * system space info in order to update the chunk tree (update one or
1166 * more device items and remove one chunk item), but this is done at
1167 * btrfs_remove_chunk() through a call to check_system_chunk().
1169 map
= em
->map_lookup
;
1170 num_items
= 3 + map
->num_stripes
;
1171 free_extent_map(em
);
1173 return btrfs_start_transaction_fallback_global_rsv(fs_info
->extent_root
,
1178 * Mark block group @cache read-only, so later write won't happen to block
1181 * If @force is not set, this function will only mark the block group readonly
1182 * if we have enough free space (1M) in other metadata/system block groups.
1183 * If @force is not set, this function will mark the block group readonly
1184 * without checking free space.
1186 * NOTE: This function doesn't care if other block groups can contain all the
1187 * data in this block group. That check should be done by relocation routine,
1188 * not this function.
1190 static int inc_block_group_ro(struct btrfs_block_group
*cache
, int force
)
1192 struct btrfs_space_info
*sinfo
= cache
->space_info
;
1196 spin_lock(&sinfo
->lock
);
1197 spin_lock(&cache
->lock
);
1205 num_bytes
= cache
->length
- cache
->reserved
- cache
->pinned
-
1206 cache
->bytes_super
- cache
->used
;
1209 * Data never overcommits, even in mixed mode, so do just the straight
1210 * check of left over space in how much we have allocated.
1214 } else if (sinfo
->flags
& BTRFS_BLOCK_GROUP_DATA
) {
1215 u64 sinfo_used
= btrfs_space_info_used(sinfo
, true);
1218 * Here we make sure if we mark this bg RO, we still have enough
1219 * free space as buffer.
1221 if (sinfo_used
+ num_bytes
<= sinfo
->total_bytes
)
1225 * We overcommit metadata, so we need to do the
1226 * btrfs_can_overcommit check here, and we need to pass in
1227 * BTRFS_RESERVE_NO_FLUSH to give ourselves the most amount of
1228 * leeway to allow us to mark this block group as read only.
1230 if (btrfs_can_overcommit(cache
->fs_info
, sinfo
, num_bytes
,
1231 BTRFS_RESERVE_NO_FLUSH
))
1236 sinfo
->bytes_readonly
+= num_bytes
;
1238 list_add_tail(&cache
->ro_list
, &sinfo
->ro_bgs
);
1241 spin_unlock(&cache
->lock
);
1242 spin_unlock(&sinfo
->lock
);
1243 if (ret
== -ENOSPC
&& btrfs_test_opt(cache
->fs_info
, ENOSPC_DEBUG
)) {
1244 btrfs_info(cache
->fs_info
,
1245 "unable to make block group %llu ro", cache
->start
);
1246 btrfs_dump_space_info(cache
->fs_info
, cache
->space_info
, 0, 0);
1252 * Process the unused_bgs list and remove any that don't have any allocated
1253 * space inside of them.
1255 void btrfs_delete_unused_bgs(struct btrfs_fs_info
*fs_info
)
1257 struct btrfs_block_group
*block_group
;
1258 struct btrfs_space_info
*space_info
;
1259 struct btrfs_trans_handle
*trans
;
1260 const bool async_trim_enabled
= btrfs_test_opt(fs_info
, DISCARD_ASYNC
);
1263 if (!test_bit(BTRFS_FS_OPEN
, &fs_info
->flags
))
1266 spin_lock(&fs_info
->unused_bgs_lock
);
1267 while (!list_empty(&fs_info
->unused_bgs
)) {
1271 block_group
= list_first_entry(&fs_info
->unused_bgs
,
1272 struct btrfs_block_group
,
1274 list_del_init(&block_group
->bg_list
);
1276 space_info
= block_group
->space_info
;
1278 if (ret
|| btrfs_mixed_space_info(space_info
)) {
1279 btrfs_put_block_group(block_group
);
1282 spin_unlock(&fs_info
->unused_bgs_lock
);
1284 btrfs_discard_cancel_work(&fs_info
->discard_ctl
, block_group
);
1286 mutex_lock(&fs_info
->delete_unused_bgs_mutex
);
1288 /* Don't want to race with allocators so take the groups_sem */
1289 down_write(&space_info
->groups_sem
);
1292 * Async discard moves the final block group discard to be prior
1293 * to the unused_bgs code path. Therefore, if it's not fully
1294 * trimmed, punt it back to the async discard lists.
1296 if (btrfs_test_opt(fs_info
, DISCARD_ASYNC
) &&
1297 !btrfs_is_free_space_trimmed(block_group
)) {
1298 trace_btrfs_skip_unused_block_group(block_group
);
1299 up_write(&space_info
->groups_sem
);
1300 /* Requeue if we failed because of async discard */
1301 btrfs_discard_queue_work(&fs_info
->discard_ctl
,
1306 spin_lock(&block_group
->lock
);
1307 if (block_group
->reserved
|| block_group
->pinned
||
1308 block_group
->used
|| block_group
->ro
||
1309 list_is_singular(&block_group
->list
)) {
1311 * We want to bail if we made new allocations or have
1312 * outstanding allocations in this block group. We do
1313 * the ro check in case balance is currently acting on
1316 trace_btrfs_skip_unused_block_group(block_group
);
1317 spin_unlock(&block_group
->lock
);
1318 up_write(&space_info
->groups_sem
);
1321 spin_unlock(&block_group
->lock
);
1323 /* We don't want to force the issue, only flip if it's ok. */
1324 ret
= inc_block_group_ro(block_group
, 0);
1325 up_write(&space_info
->groups_sem
);
1332 * Want to do this before we do anything else so we can recover
1333 * properly if we fail to join the transaction.
1335 trans
= btrfs_start_trans_remove_block_group(fs_info
,
1336 block_group
->start
);
1337 if (IS_ERR(trans
)) {
1338 btrfs_dec_block_group_ro(block_group
);
1339 ret
= PTR_ERR(trans
);
1344 * We could have pending pinned extents for this block group,
1345 * just delete them, we don't care about them anymore.
1347 start
= block_group
->start
;
1348 end
= start
+ block_group
->length
- 1;
1350 * Hold the unused_bg_unpin_mutex lock to avoid racing with
1351 * btrfs_finish_extent_commit(). If we are at transaction N,
1352 * another task might be running finish_extent_commit() for the
1353 * previous transaction N - 1, and have seen a range belonging
1354 * to the block group in freed_extents[] before we were able to
1355 * clear the whole block group range from freed_extents[]. This
1356 * means that task can lookup for the block group after we
1357 * unpinned it from freed_extents[] and removed it, leading to
1358 * a BUG_ON() at btrfs_unpin_extent_range().
1360 mutex_lock(&fs_info
->unused_bg_unpin_mutex
);
1361 ret
= clear_extent_bits(&fs_info
->freed_extents
[0], start
, end
,
1364 mutex_unlock(&fs_info
->unused_bg_unpin_mutex
);
1365 btrfs_dec_block_group_ro(block_group
);
1368 ret
= clear_extent_bits(&fs_info
->freed_extents
[1], start
, end
,
1371 mutex_unlock(&fs_info
->unused_bg_unpin_mutex
);
1372 btrfs_dec_block_group_ro(block_group
);
1375 mutex_unlock(&fs_info
->unused_bg_unpin_mutex
);
1378 * At this point, the block_group is read only and should fail
1379 * new allocations. However, btrfs_finish_extent_commit() can
1380 * cause this block_group to be placed back on the discard
1381 * lists because now the block_group isn't fully discarded.
1382 * Bail here and try again later after discarding everything.
1384 spin_lock(&fs_info
->discard_ctl
.lock
);
1385 if (!list_empty(&block_group
->discard_list
)) {
1386 spin_unlock(&fs_info
->discard_ctl
.lock
);
1387 btrfs_dec_block_group_ro(block_group
);
1388 btrfs_discard_queue_work(&fs_info
->discard_ctl
,
1392 spin_unlock(&fs_info
->discard_ctl
.lock
);
1394 /* Reset pinned so btrfs_put_block_group doesn't complain */
1395 spin_lock(&space_info
->lock
);
1396 spin_lock(&block_group
->lock
);
1398 btrfs_space_info_update_bytes_pinned(fs_info
, space_info
,
1399 -block_group
->pinned
);
1400 space_info
->bytes_readonly
+= block_group
->pinned
;
1401 percpu_counter_add_batch(&space_info
->total_bytes_pinned
,
1402 -block_group
->pinned
,
1403 BTRFS_TOTAL_BYTES_PINNED_BATCH
);
1404 block_group
->pinned
= 0;
1406 spin_unlock(&block_group
->lock
);
1407 spin_unlock(&space_info
->lock
);
1410 * The normal path here is an unused block group is passed here,
1411 * then trimming is handled in the transaction commit path.
1412 * Async discard interposes before this to do the trimming
1413 * before coming down the unused block group path as trimming
1414 * will no longer be done later in the transaction commit path.
1416 if (!async_trim_enabled
&& btrfs_test_opt(fs_info
, DISCARD_ASYNC
))
1419 /* DISCARD can flip during remount */
1420 trimming
= btrfs_test_opt(fs_info
, DISCARD_SYNC
);
1422 /* Implicit trim during transaction commit. */
1424 btrfs_get_block_group_trimming(block_group
);
1427 * Btrfs_remove_chunk will abort the transaction if things go
1430 ret
= btrfs_remove_chunk(trans
, block_group
->start
);
1434 btrfs_put_block_group_trimming(block_group
);
1439 * If we're not mounted with -odiscard, we can just forget
1440 * about this block group. Otherwise we'll need to wait
1441 * until transaction commit to do the actual discard.
1444 spin_lock(&fs_info
->unused_bgs_lock
);
1446 * A concurrent scrub might have added us to the list
1447 * fs_info->unused_bgs, so use a list_move operation
1448 * to add the block group to the deleted_bgs list.
1450 list_move(&block_group
->bg_list
,
1451 &trans
->transaction
->deleted_bgs
);
1452 spin_unlock(&fs_info
->unused_bgs_lock
);
1453 btrfs_get_block_group(block_group
);
1456 btrfs_end_transaction(trans
);
1458 mutex_unlock(&fs_info
->delete_unused_bgs_mutex
);
1459 btrfs_put_block_group(block_group
);
1460 spin_lock(&fs_info
->unused_bgs_lock
);
1462 spin_unlock(&fs_info
->unused_bgs_lock
);
1466 btrfs_end_transaction(trans
);
1467 mutex_unlock(&fs_info
->delete_unused_bgs_mutex
);
1468 btrfs_put_block_group(block_group
);
1469 btrfs_discard_punt_unused_bgs_list(fs_info
);
1472 void btrfs_mark_bg_unused(struct btrfs_block_group
*bg
)
1474 struct btrfs_fs_info
*fs_info
= bg
->fs_info
;
1476 spin_lock(&fs_info
->unused_bgs_lock
);
1477 if (list_empty(&bg
->bg_list
)) {
1478 btrfs_get_block_group(bg
);
1479 trace_btrfs_add_unused_block_group(bg
);
1480 list_add_tail(&bg
->bg_list
, &fs_info
->unused_bgs
);
1482 spin_unlock(&fs_info
->unused_bgs_lock
);
1485 static int find_first_block_group(struct btrfs_fs_info
*fs_info
,
1486 struct btrfs_path
*path
,
1487 struct btrfs_key
*key
)
1489 struct btrfs_root
*root
= fs_info
->extent_root
;
1491 struct btrfs_key found_key
;
1492 struct extent_buffer
*leaf
;
1493 struct btrfs_block_group_item bg
;
1497 ret
= btrfs_search_slot(NULL
, root
, key
, path
, 0, 0);
1502 slot
= path
->slots
[0];
1503 leaf
= path
->nodes
[0];
1504 if (slot
>= btrfs_header_nritems(leaf
)) {
1505 ret
= btrfs_next_leaf(root
, path
);
1512 btrfs_item_key_to_cpu(leaf
, &found_key
, slot
);
1514 if (found_key
.objectid
>= key
->objectid
&&
1515 found_key
.type
== BTRFS_BLOCK_GROUP_ITEM_KEY
) {
1516 struct extent_map_tree
*em_tree
;
1517 struct extent_map
*em
;
1519 em_tree
= &root
->fs_info
->mapping_tree
;
1520 read_lock(&em_tree
->lock
);
1521 em
= lookup_extent_mapping(em_tree
, found_key
.objectid
,
1523 read_unlock(&em_tree
->lock
);
1526 "logical %llu len %llu found bg but no related chunk",
1527 found_key
.objectid
, found_key
.offset
);
1529 } else if (em
->start
!= found_key
.objectid
||
1530 em
->len
!= found_key
.offset
) {
1532 "block group %llu len %llu mismatch with chunk %llu len %llu",
1533 found_key
.objectid
, found_key
.offset
,
1534 em
->start
, em
->len
);
1537 read_extent_buffer(leaf
, &bg
,
1538 btrfs_item_ptr_offset(leaf
, slot
),
1540 flags
= btrfs_stack_block_group_flags(&bg
) &
1541 BTRFS_BLOCK_GROUP_TYPE_MASK
;
1543 if (flags
!= (em
->map_lookup
->type
&
1544 BTRFS_BLOCK_GROUP_TYPE_MASK
)) {
1546 "block group %llu len %llu type flags 0x%llx mismatch with chunk type flags 0x%llx",
1548 found_key
.offset
, flags
,
1549 (BTRFS_BLOCK_GROUP_TYPE_MASK
&
1550 em
->map_lookup
->type
));
1556 free_extent_map(em
);
1565 static void set_avail_alloc_bits(struct btrfs_fs_info
*fs_info
, u64 flags
)
1567 u64 extra_flags
= chunk_to_extended(flags
) &
1568 BTRFS_EXTENDED_PROFILE_MASK
;
1570 write_seqlock(&fs_info
->profiles_lock
);
1571 if (flags
& BTRFS_BLOCK_GROUP_DATA
)
1572 fs_info
->avail_data_alloc_bits
|= extra_flags
;
1573 if (flags
& BTRFS_BLOCK_GROUP_METADATA
)
1574 fs_info
->avail_metadata_alloc_bits
|= extra_flags
;
1575 if (flags
& BTRFS_BLOCK_GROUP_SYSTEM
)
1576 fs_info
->avail_system_alloc_bits
|= extra_flags
;
1577 write_sequnlock(&fs_info
->profiles_lock
);
1581 * btrfs_rmap_block - Map a physical disk address to a list of logical addresses
1582 * @chunk_start: logical address of block group
1583 * @physical: physical address to map to logical addresses
1584 * @logical: return array of logical addresses which map to @physical
1585 * @naddrs: length of @logical
1586 * @stripe_len: size of IO stripe for the given block group
1588 * Maps a particular @physical disk address to a list of @logical addresses.
1589 * Used primarily to exclude those portions of a block group that contain super
1593 int btrfs_rmap_block(struct btrfs_fs_info
*fs_info
, u64 chunk_start
,
1594 u64 physical
, u64
**logical
, int *naddrs
, int *stripe_len
)
1596 struct extent_map
*em
;
1597 struct map_lookup
*map
;
1600 u64 data_stripe_length
;
1605 em
= btrfs_get_chunk_map(fs_info
, chunk_start
, 1);
1609 map
= em
->map_lookup
;
1610 data_stripe_length
= em
->len
;
1611 io_stripe_size
= map
->stripe_len
;
1613 if (map
->type
& BTRFS_BLOCK_GROUP_RAID10
)
1614 data_stripe_length
= div_u64(data_stripe_length
,
1615 map
->num_stripes
/ map
->sub_stripes
);
1616 else if (map
->type
& BTRFS_BLOCK_GROUP_RAID0
)
1617 data_stripe_length
= div_u64(data_stripe_length
, map
->num_stripes
);
1618 else if (map
->type
& BTRFS_BLOCK_GROUP_RAID56_MASK
) {
1619 data_stripe_length
= div_u64(data_stripe_length
,
1620 nr_data_stripes(map
));
1621 io_stripe_size
= map
->stripe_len
* nr_data_stripes(map
);
1624 buf
= kcalloc(map
->num_stripes
, sizeof(u64
), GFP_NOFS
);
1630 for (i
= 0; i
< map
->num_stripes
; i
++) {
1631 bool already_inserted
= false;
1635 if (!in_range(physical
, map
->stripes
[i
].physical
,
1636 data_stripe_length
))
1639 stripe_nr
= physical
- map
->stripes
[i
].physical
;
1640 stripe_nr
= div64_u64(stripe_nr
, map
->stripe_len
);
1642 if (map
->type
& BTRFS_BLOCK_GROUP_RAID10
) {
1643 stripe_nr
= stripe_nr
* map
->num_stripes
+ i
;
1644 stripe_nr
= div_u64(stripe_nr
, map
->sub_stripes
);
1645 } else if (map
->type
& BTRFS_BLOCK_GROUP_RAID0
) {
1646 stripe_nr
= stripe_nr
* map
->num_stripes
+ i
;
1649 * The remaining case would be for RAID56, multiply by
1650 * nr_data_stripes(). Alternatively, just use rmap_len below
1651 * instead of map->stripe_len
1654 bytenr
= chunk_start
+ stripe_nr
* io_stripe_size
;
1656 /* Ensure we don't add duplicate addresses */
1657 for (j
= 0; j
< nr
; j
++) {
1658 if (buf
[j
] == bytenr
) {
1659 already_inserted
= true;
1664 if (!already_inserted
)
1670 *stripe_len
= io_stripe_size
;
1672 free_extent_map(em
);
1676 static int exclude_super_stripes(struct btrfs_block_group
*cache
)
1678 struct btrfs_fs_info
*fs_info
= cache
->fs_info
;
1684 if (cache
->start
< BTRFS_SUPER_INFO_OFFSET
) {
1685 stripe_len
= BTRFS_SUPER_INFO_OFFSET
- cache
->start
;
1686 cache
->bytes_super
+= stripe_len
;
1687 ret
= btrfs_add_excluded_extent(fs_info
, cache
->start
,
1693 for (i
= 0; i
< BTRFS_SUPER_MIRROR_MAX
; i
++) {
1694 bytenr
= btrfs_sb_offset(i
);
1695 ret
= btrfs_rmap_block(fs_info
, cache
->start
,
1696 bytenr
, &logical
, &nr
, &stripe_len
);
1703 if (logical
[nr
] > cache
->start
+ cache
->length
)
1706 if (logical
[nr
] + stripe_len
<= cache
->start
)
1709 start
= logical
[nr
];
1710 if (start
< cache
->start
) {
1711 start
= cache
->start
;
1712 len
= (logical
[nr
] + stripe_len
) - start
;
1714 len
= min_t(u64
, stripe_len
,
1715 cache
->start
+ cache
->length
- start
);
1718 cache
->bytes_super
+= len
;
1719 ret
= btrfs_add_excluded_extent(fs_info
, start
, len
);
1731 static void link_block_group(struct btrfs_block_group
*cache
)
1733 struct btrfs_space_info
*space_info
= cache
->space_info
;
1734 int index
= btrfs_bg_flags_to_raid_index(cache
->flags
);
1737 down_write(&space_info
->groups_sem
);
1738 if (list_empty(&space_info
->block_groups
[index
]))
1740 list_add_tail(&cache
->list
, &space_info
->block_groups
[index
]);
1741 up_write(&space_info
->groups_sem
);
1744 btrfs_sysfs_add_block_group_type(cache
);
1747 static struct btrfs_block_group
*btrfs_create_block_group_cache(
1748 struct btrfs_fs_info
*fs_info
, u64 start
, u64 size
)
1750 struct btrfs_block_group
*cache
;
1752 cache
= kzalloc(sizeof(*cache
), GFP_NOFS
);
1756 cache
->free_space_ctl
= kzalloc(sizeof(*cache
->free_space_ctl
),
1758 if (!cache
->free_space_ctl
) {
1763 cache
->start
= start
;
1764 cache
->length
= size
;
1766 cache
->fs_info
= fs_info
;
1767 cache
->full_stripe_len
= btrfs_full_stripe_len(fs_info
, start
);
1768 set_free_space_tree_thresholds(cache
);
1770 cache
->discard_index
= BTRFS_DISCARD_INDEX_UNUSED
;
1772 atomic_set(&cache
->count
, 1);
1773 spin_lock_init(&cache
->lock
);
1774 init_rwsem(&cache
->data_rwsem
);
1775 INIT_LIST_HEAD(&cache
->list
);
1776 INIT_LIST_HEAD(&cache
->cluster_list
);
1777 INIT_LIST_HEAD(&cache
->bg_list
);
1778 INIT_LIST_HEAD(&cache
->ro_list
);
1779 INIT_LIST_HEAD(&cache
->discard_list
);
1780 INIT_LIST_HEAD(&cache
->dirty_list
);
1781 INIT_LIST_HEAD(&cache
->io_list
);
1782 btrfs_init_free_space_ctl(cache
);
1783 atomic_set(&cache
->trimming
, 0);
1784 mutex_init(&cache
->free_space_lock
);
1785 btrfs_init_full_stripe_locks_tree(&cache
->full_stripe_locks_root
);
1791 * Iterate all chunks and verify that each of them has the corresponding block
1794 static int check_chunk_block_group_mappings(struct btrfs_fs_info
*fs_info
)
1796 struct extent_map_tree
*map_tree
= &fs_info
->mapping_tree
;
1797 struct extent_map
*em
;
1798 struct btrfs_block_group
*bg
;
1803 read_lock(&map_tree
->lock
);
1805 * lookup_extent_mapping will return the first extent map
1806 * intersecting the range, so setting @len to 1 is enough to
1807 * get the first chunk.
1809 em
= lookup_extent_mapping(map_tree
, start
, 1);
1810 read_unlock(&map_tree
->lock
);
1814 bg
= btrfs_lookup_block_group(fs_info
, em
->start
);
1817 "chunk start=%llu len=%llu doesn't have corresponding block group",
1818 em
->start
, em
->len
);
1820 free_extent_map(em
);
1823 if (bg
->start
!= em
->start
|| bg
->length
!= em
->len
||
1824 (bg
->flags
& BTRFS_BLOCK_GROUP_TYPE_MASK
) !=
1825 (em
->map_lookup
->type
& BTRFS_BLOCK_GROUP_TYPE_MASK
)) {
1827 "chunk start=%llu len=%llu flags=0x%llx doesn't match block group start=%llu len=%llu flags=0x%llx",
1829 em
->map_lookup
->type
& BTRFS_BLOCK_GROUP_TYPE_MASK
,
1830 bg
->start
, bg
->length
,
1831 bg
->flags
& BTRFS_BLOCK_GROUP_TYPE_MASK
);
1833 free_extent_map(em
);
1834 btrfs_put_block_group(bg
);
1837 start
= em
->start
+ em
->len
;
1838 free_extent_map(em
);
1839 btrfs_put_block_group(bg
);
1844 static int read_one_block_group(struct btrfs_fs_info
*info
,
1845 struct btrfs_path
*path
,
1846 const struct btrfs_key
*key
,
1849 struct extent_buffer
*leaf
= path
->nodes
[0];
1850 struct btrfs_block_group
*cache
;
1851 struct btrfs_space_info
*space_info
;
1852 struct btrfs_block_group_item bgi
;
1853 const bool mixed
= btrfs_fs_incompat(info
, MIXED_GROUPS
);
1854 int slot
= path
->slots
[0];
1857 ASSERT(key
->type
== BTRFS_BLOCK_GROUP_ITEM_KEY
);
1859 cache
= btrfs_create_block_group_cache(info
, key
->objectid
, key
->offset
);
1865 * When we mount with old space cache, we need to
1866 * set BTRFS_DC_CLEAR and set dirty flag.
1868 * a) Setting 'BTRFS_DC_CLEAR' makes sure that we
1869 * truncate the old free space cache inode and
1871 * b) Setting 'dirty flag' makes sure that we flush
1872 * the new space cache info onto disk.
1874 if (btrfs_test_opt(info
, SPACE_CACHE
))
1875 cache
->disk_cache_state
= BTRFS_DC_CLEAR
;
1877 read_extent_buffer(leaf
, &bgi
, btrfs_item_ptr_offset(leaf
, slot
),
1879 cache
->used
= btrfs_stack_block_group_used(&bgi
);
1880 cache
->flags
= btrfs_stack_block_group_flags(&bgi
);
1881 if (!mixed
&& ((cache
->flags
& BTRFS_BLOCK_GROUP_METADATA
) &&
1882 (cache
->flags
& BTRFS_BLOCK_GROUP_DATA
))) {
1884 "bg %llu is a mixed block group but filesystem hasn't enabled mixed block groups",
1891 * We need to exclude the super stripes now so that the space info has
1892 * super bytes accounted for, otherwise we'll think we have more space
1893 * than we actually do.
1895 ret
= exclude_super_stripes(cache
);
1897 /* We may have excluded something, so call this just in case. */
1898 btrfs_free_excluded_extents(cache
);
1903 * Check for two cases, either we are full, and therefore don't need
1904 * to bother with the caching work since we won't find any space, or we
1905 * are empty, and we can just add all the space in and be done with it.
1906 * This saves us _a_lot_ of time, particularly in the full case.
1908 if (key
->offset
== cache
->used
) {
1909 cache
->last_byte_to_unpin
= (u64
)-1;
1910 cache
->cached
= BTRFS_CACHE_FINISHED
;
1911 btrfs_free_excluded_extents(cache
);
1912 } else if (cache
->used
== 0) {
1913 cache
->last_byte_to_unpin
= (u64
)-1;
1914 cache
->cached
= BTRFS_CACHE_FINISHED
;
1915 add_new_free_space(cache
, key
->objectid
,
1916 key
->objectid
+ key
->offset
);
1917 btrfs_free_excluded_extents(cache
);
1920 ret
= btrfs_add_block_group_cache(info
, cache
);
1922 btrfs_remove_free_space_cache(cache
);
1925 trace_btrfs_add_block_group(info
, cache
, 0);
1926 btrfs_update_space_info(info
, cache
->flags
, key
->offset
,
1927 cache
->used
, cache
->bytes_super
, &space_info
);
1929 cache
->space_info
= space_info
;
1931 link_block_group(cache
);
1933 set_avail_alloc_bits(info
, cache
->flags
);
1934 if (btrfs_chunk_readonly(info
, cache
->start
)) {
1935 inc_block_group_ro(cache
, 1);
1936 } else if (cache
->used
== 0) {
1937 ASSERT(list_empty(&cache
->bg_list
));
1938 if (btrfs_test_opt(info
, DISCARD_ASYNC
))
1939 btrfs_discard_queue_work(&info
->discard_ctl
, cache
);
1941 btrfs_mark_bg_unused(cache
);
1945 btrfs_put_block_group(cache
);
1949 int btrfs_read_block_groups(struct btrfs_fs_info
*info
)
1951 struct btrfs_path
*path
;
1953 struct btrfs_block_group
*cache
;
1954 struct btrfs_space_info
*space_info
;
1955 struct btrfs_key key
;
1961 key
.type
= BTRFS_BLOCK_GROUP_ITEM_KEY
;
1962 path
= btrfs_alloc_path();
1965 path
->reada
= READA_FORWARD
;
1967 cache_gen
= btrfs_super_cache_generation(info
->super_copy
);
1968 if (btrfs_test_opt(info
, SPACE_CACHE
) &&
1969 btrfs_super_generation(info
->super_copy
) != cache_gen
)
1971 if (btrfs_test_opt(info
, CLEAR_CACHE
))
1975 ret
= find_first_block_group(info
, path
, &key
);
1981 btrfs_item_key_to_cpu(path
->nodes
[0], &key
, path
->slots
[0]);
1982 ret
= read_one_block_group(info
, path
, &key
, need_clear
);
1985 key
.objectid
+= key
.offset
;
1987 btrfs_release_path(path
);
1990 list_for_each_entry_rcu(space_info
, &info
->space_info
, list
) {
1991 if (!(btrfs_get_alloc_profile(info
, space_info
->flags
) &
1992 (BTRFS_BLOCK_GROUP_RAID10
|
1993 BTRFS_BLOCK_GROUP_RAID1_MASK
|
1994 BTRFS_BLOCK_GROUP_RAID56_MASK
|
1995 BTRFS_BLOCK_GROUP_DUP
)))
1998 * Avoid allocating from un-mirrored block group if there are
1999 * mirrored block groups.
2001 list_for_each_entry(cache
,
2002 &space_info
->block_groups
[BTRFS_RAID_RAID0
],
2004 inc_block_group_ro(cache
, 1);
2005 list_for_each_entry(cache
,
2006 &space_info
->block_groups
[BTRFS_RAID_SINGLE
],
2008 inc_block_group_ro(cache
, 1);
2011 btrfs_init_global_block_rsv(info
);
2012 ret
= check_chunk_block_group_mappings(info
);
2014 btrfs_free_path(path
);
2018 void btrfs_create_pending_block_groups(struct btrfs_trans_handle
*trans
)
2020 struct btrfs_fs_info
*fs_info
= trans
->fs_info
;
2021 struct btrfs_block_group
*block_group
;
2022 struct btrfs_root
*extent_root
= fs_info
->extent_root
;
2023 struct btrfs_block_group_item item
;
2024 struct btrfs_key key
;
2027 if (!trans
->can_flush_pending_bgs
)
2030 while (!list_empty(&trans
->new_bgs
)) {
2031 block_group
= list_first_entry(&trans
->new_bgs
,
2032 struct btrfs_block_group
,
2037 spin_lock(&block_group
->lock
);
2038 btrfs_set_stack_block_group_used(&item
, block_group
->used
);
2039 btrfs_set_stack_block_group_chunk_objectid(&item
,
2040 BTRFS_FIRST_CHUNK_TREE_OBJECTID
);
2041 btrfs_set_stack_block_group_flags(&item
, block_group
->flags
);
2042 key
.objectid
= block_group
->start
;
2043 key
.type
= BTRFS_BLOCK_GROUP_ITEM_KEY
;
2044 key
.offset
= block_group
->length
;
2045 spin_unlock(&block_group
->lock
);
2047 ret
= btrfs_insert_item(trans
, extent_root
, &key
, &item
,
2050 btrfs_abort_transaction(trans
, ret
);
2051 ret
= btrfs_finish_chunk_alloc(trans
, key
.objectid
, key
.offset
);
2053 btrfs_abort_transaction(trans
, ret
);
2054 add_block_group_free_space(trans
, block_group
);
2055 /* Already aborted the transaction if it failed. */
2057 btrfs_delayed_refs_rsv_release(fs_info
, 1);
2058 list_del_init(&block_group
->bg_list
);
2060 btrfs_trans_release_chunk_metadata(trans
);
2063 int btrfs_make_block_group(struct btrfs_trans_handle
*trans
, u64 bytes_used
,
2064 u64 type
, u64 chunk_offset
, u64 size
)
2066 struct btrfs_fs_info
*fs_info
= trans
->fs_info
;
2067 struct btrfs_block_group
*cache
;
2070 btrfs_set_log_full_commit(trans
);
2072 cache
= btrfs_create_block_group_cache(fs_info
, chunk_offset
, size
);
2076 cache
->used
= bytes_used
;
2077 cache
->flags
= type
;
2078 cache
->last_byte_to_unpin
= (u64
)-1;
2079 cache
->cached
= BTRFS_CACHE_FINISHED
;
2080 cache
->needs_free_space
= 1;
2081 ret
= exclude_super_stripes(cache
);
2083 /* We may have excluded something, so call this just in case */
2084 btrfs_free_excluded_extents(cache
);
2085 btrfs_put_block_group(cache
);
2089 add_new_free_space(cache
, chunk_offset
, chunk_offset
+ size
);
2091 btrfs_free_excluded_extents(cache
);
2093 #ifdef CONFIG_BTRFS_DEBUG
2094 if (btrfs_should_fragment_free_space(cache
)) {
2095 u64 new_bytes_used
= size
- bytes_used
;
2097 bytes_used
+= new_bytes_used
>> 1;
2098 fragment_free_space(cache
);
2102 * Ensure the corresponding space_info object is created and
2103 * assigned to our block group. We want our bg to be added to the rbtree
2104 * with its ->space_info set.
2106 cache
->space_info
= btrfs_find_space_info(fs_info
, cache
->flags
);
2107 ASSERT(cache
->space_info
);
2109 ret
= btrfs_add_block_group_cache(fs_info
, cache
);
2111 btrfs_remove_free_space_cache(cache
);
2112 btrfs_put_block_group(cache
);
2117 * Now that our block group has its ->space_info set and is inserted in
2118 * the rbtree, update the space info's counters.
2120 trace_btrfs_add_block_group(fs_info
, cache
, 1);
2121 btrfs_update_space_info(fs_info
, cache
->flags
, size
, bytes_used
,
2122 cache
->bytes_super
, &cache
->space_info
);
2123 btrfs_update_global_block_rsv(fs_info
);
2125 link_block_group(cache
);
2127 list_add_tail(&cache
->bg_list
, &trans
->new_bgs
);
2128 trans
->delayed_ref_updates
++;
2129 btrfs_update_delayed_refs_rsv(trans
);
2131 set_avail_alloc_bits(fs_info
, type
);
2135 static u64
update_block_group_flags(struct btrfs_fs_info
*fs_info
, u64 flags
)
2141 * if restripe for this chunk_type is on pick target profile and
2142 * return, otherwise do the usual balance
2144 stripped
= get_restripe_target(fs_info
, flags
);
2146 return extended_to_chunk(stripped
);
2148 num_devices
= fs_info
->fs_devices
->rw_devices
;
2150 stripped
= BTRFS_BLOCK_GROUP_RAID0
| BTRFS_BLOCK_GROUP_RAID56_MASK
|
2151 BTRFS_BLOCK_GROUP_RAID1_MASK
| BTRFS_BLOCK_GROUP_RAID10
;
2153 if (num_devices
== 1) {
2154 stripped
|= BTRFS_BLOCK_GROUP_DUP
;
2155 stripped
= flags
& ~stripped
;
2157 /* turn raid0 into single device chunks */
2158 if (flags
& BTRFS_BLOCK_GROUP_RAID0
)
2161 /* turn mirroring into duplication */
2162 if (flags
& (BTRFS_BLOCK_GROUP_RAID1_MASK
|
2163 BTRFS_BLOCK_GROUP_RAID10
))
2164 return stripped
| BTRFS_BLOCK_GROUP_DUP
;
2166 /* they already had raid on here, just return */
2167 if (flags
& stripped
)
2170 stripped
|= BTRFS_BLOCK_GROUP_DUP
;
2171 stripped
= flags
& ~stripped
;
2173 /* switch duplicated blocks with raid1 */
2174 if (flags
& BTRFS_BLOCK_GROUP_DUP
)
2175 return stripped
| BTRFS_BLOCK_GROUP_RAID1
;
2177 /* this is drive concat, leave it alone */
2184 * Mark one block group RO, can be called several times for the same block
2187 * @cache: the destination block group
2188 * @do_chunk_alloc: whether need to do chunk pre-allocation, this is to
2189 * ensure we still have some free space after marking this
2192 int btrfs_inc_block_group_ro(struct btrfs_block_group
*cache
,
2193 bool do_chunk_alloc
)
2195 struct btrfs_fs_info
*fs_info
= cache
->fs_info
;
2196 struct btrfs_trans_handle
*trans
;
2201 trans
= btrfs_join_transaction(fs_info
->extent_root
);
2203 return PTR_ERR(trans
);
2206 * we're not allowed to set block groups readonly after the dirty
2207 * block groups cache has started writing. If it already started,
2208 * back off and let this transaction commit
2210 mutex_lock(&fs_info
->ro_block_group_mutex
);
2211 if (test_bit(BTRFS_TRANS_DIRTY_BG_RUN
, &trans
->transaction
->flags
)) {
2212 u64 transid
= trans
->transid
;
2214 mutex_unlock(&fs_info
->ro_block_group_mutex
);
2215 btrfs_end_transaction(trans
);
2217 ret
= btrfs_wait_for_commit(fs_info
, transid
);
2223 if (do_chunk_alloc
) {
2225 * If we are changing raid levels, try to allocate a
2226 * corresponding block group with the new raid level.
2228 alloc_flags
= update_block_group_flags(fs_info
, cache
->flags
);
2229 if (alloc_flags
!= cache
->flags
) {
2230 ret
= btrfs_chunk_alloc(trans
, alloc_flags
,
2233 * ENOSPC is allowed here, we may have enough space
2234 * already allocated at the new raid level to carry on
2243 ret
= inc_block_group_ro(cache
, 0);
2244 if (!do_chunk_alloc
)
2248 alloc_flags
= btrfs_get_alloc_profile(fs_info
, cache
->space_info
->flags
);
2249 ret
= btrfs_chunk_alloc(trans
, alloc_flags
, CHUNK_ALLOC_FORCE
);
2252 ret
= inc_block_group_ro(cache
, 0);
2254 if (cache
->flags
& BTRFS_BLOCK_GROUP_SYSTEM
) {
2255 alloc_flags
= update_block_group_flags(fs_info
, cache
->flags
);
2256 mutex_lock(&fs_info
->chunk_mutex
);
2257 check_system_chunk(trans
, alloc_flags
);
2258 mutex_unlock(&fs_info
->chunk_mutex
);
2261 mutex_unlock(&fs_info
->ro_block_group_mutex
);
2263 btrfs_end_transaction(trans
);
2267 void btrfs_dec_block_group_ro(struct btrfs_block_group
*cache
)
2269 struct btrfs_space_info
*sinfo
= cache
->space_info
;
2274 spin_lock(&sinfo
->lock
);
2275 spin_lock(&cache
->lock
);
2277 num_bytes
= cache
->length
- cache
->reserved
-
2278 cache
->pinned
- cache
->bytes_super
- cache
->used
;
2279 sinfo
->bytes_readonly
-= num_bytes
;
2280 list_del_init(&cache
->ro_list
);
2282 spin_unlock(&cache
->lock
);
2283 spin_unlock(&sinfo
->lock
);
2286 static int write_one_cache_group(struct btrfs_trans_handle
*trans
,
2287 struct btrfs_path
*path
,
2288 struct btrfs_block_group
*cache
)
2290 struct btrfs_fs_info
*fs_info
= trans
->fs_info
;
2292 struct btrfs_root
*extent_root
= fs_info
->extent_root
;
2294 struct extent_buffer
*leaf
;
2295 struct btrfs_block_group_item bgi
;
2296 struct btrfs_key key
;
2298 key
.objectid
= cache
->start
;
2299 key
.type
= BTRFS_BLOCK_GROUP_ITEM_KEY
;
2300 key
.offset
= cache
->length
;
2302 ret
= btrfs_search_slot(trans
, extent_root
, &key
, path
, 0, 1);
2309 leaf
= path
->nodes
[0];
2310 bi
= btrfs_item_ptr_offset(leaf
, path
->slots
[0]);
2311 btrfs_set_stack_block_group_used(&bgi
, cache
->used
);
2312 btrfs_set_stack_block_group_chunk_objectid(&bgi
,
2313 BTRFS_FIRST_CHUNK_TREE_OBJECTID
);
2314 btrfs_set_stack_block_group_flags(&bgi
, cache
->flags
);
2315 write_extent_buffer(leaf
, &bgi
, bi
, sizeof(bgi
));
2316 btrfs_mark_buffer_dirty(leaf
);
2318 btrfs_release_path(path
);
2323 static int cache_save_setup(struct btrfs_block_group
*block_group
,
2324 struct btrfs_trans_handle
*trans
,
2325 struct btrfs_path
*path
)
2327 struct btrfs_fs_info
*fs_info
= block_group
->fs_info
;
2328 struct btrfs_root
*root
= fs_info
->tree_root
;
2329 struct inode
*inode
= NULL
;
2330 struct extent_changeset
*data_reserved
= NULL
;
2332 int dcs
= BTRFS_DC_ERROR
;
2338 * If this block group is smaller than 100 megs don't bother caching the
2341 if (block_group
->length
< (100 * SZ_1M
)) {
2342 spin_lock(&block_group
->lock
);
2343 block_group
->disk_cache_state
= BTRFS_DC_WRITTEN
;
2344 spin_unlock(&block_group
->lock
);
2351 inode
= lookup_free_space_inode(block_group
, path
);
2352 if (IS_ERR(inode
) && PTR_ERR(inode
) != -ENOENT
) {
2353 ret
= PTR_ERR(inode
);
2354 btrfs_release_path(path
);
2358 if (IS_ERR(inode
)) {
2362 if (block_group
->ro
)
2365 ret
= create_free_space_inode(trans
, block_group
, path
);
2372 * We want to set the generation to 0, that way if anything goes wrong
2373 * from here on out we know not to trust this cache when we load up next
2376 BTRFS_I(inode
)->generation
= 0;
2377 ret
= btrfs_update_inode(trans
, root
, inode
);
2380 * So theoretically we could recover from this, simply set the
2381 * super cache generation to 0 so we know to invalidate the
2382 * cache, but then we'd have to keep track of the block groups
2383 * that fail this way so we know we _have_ to reset this cache
2384 * before the next commit or risk reading stale cache. So to
2385 * limit our exposure to horrible edge cases lets just abort the
2386 * transaction, this only happens in really bad situations
2389 btrfs_abort_transaction(trans
, ret
);
2394 /* We've already setup this transaction, go ahead and exit */
2395 if (block_group
->cache_generation
== trans
->transid
&&
2396 i_size_read(inode
)) {
2397 dcs
= BTRFS_DC_SETUP
;
2401 if (i_size_read(inode
) > 0) {
2402 ret
= btrfs_check_trunc_cache_free_space(fs_info
,
2403 &fs_info
->global_block_rsv
);
2407 ret
= btrfs_truncate_free_space_cache(trans
, NULL
, inode
);
2412 spin_lock(&block_group
->lock
);
2413 if (block_group
->cached
!= BTRFS_CACHE_FINISHED
||
2414 !btrfs_test_opt(fs_info
, SPACE_CACHE
)) {
2416 * don't bother trying to write stuff out _if_
2417 * a) we're not cached,
2418 * b) we're with nospace_cache mount option,
2419 * c) we're with v2 space_cache (FREE_SPACE_TREE).
2421 dcs
= BTRFS_DC_WRITTEN
;
2422 spin_unlock(&block_group
->lock
);
2425 spin_unlock(&block_group
->lock
);
2428 * We hit an ENOSPC when setting up the cache in this transaction, just
2429 * skip doing the setup, we've already cleared the cache so we're safe.
2431 if (test_bit(BTRFS_TRANS_CACHE_ENOSPC
, &trans
->transaction
->flags
)) {
2437 * Try to preallocate enough space based on how big the block group is.
2438 * Keep in mind this has to include any pinned space which could end up
2439 * taking up quite a bit since it's not folded into the other space
2442 num_pages
= div_u64(block_group
->length
, SZ_256M
);
2447 num_pages
*= PAGE_SIZE
;
2449 ret
= btrfs_check_data_free_space(inode
, &data_reserved
, 0, num_pages
);
2453 ret
= btrfs_prealloc_file_range_trans(inode
, trans
, 0, 0, num_pages
,
2454 num_pages
, num_pages
,
2457 * Our cache requires contiguous chunks so that we don't modify a bunch
2458 * of metadata or split extents when writing the cache out, which means
2459 * we can enospc if we are heavily fragmented in addition to just normal
2460 * out of space conditions. So if we hit this just skip setting up any
2461 * other block groups for this transaction, maybe we'll unpin enough
2462 * space the next time around.
2465 dcs
= BTRFS_DC_SETUP
;
2466 else if (ret
== -ENOSPC
)
2467 set_bit(BTRFS_TRANS_CACHE_ENOSPC
, &trans
->transaction
->flags
);
2472 btrfs_release_path(path
);
2474 spin_lock(&block_group
->lock
);
2475 if (!ret
&& dcs
== BTRFS_DC_SETUP
)
2476 block_group
->cache_generation
= trans
->transid
;
2477 block_group
->disk_cache_state
= dcs
;
2478 spin_unlock(&block_group
->lock
);
2480 extent_changeset_free(data_reserved
);
2484 int btrfs_setup_space_cache(struct btrfs_trans_handle
*trans
)
2486 struct btrfs_fs_info
*fs_info
= trans
->fs_info
;
2487 struct btrfs_block_group
*cache
, *tmp
;
2488 struct btrfs_transaction
*cur_trans
= trans
->transaction
;
2489 struct btrfs_path
*path
;
2491 if (list_empty(&cur_trans
->dirty_bgs
) ||
2492 !btrfs_test_opt(fs_info
, SPACE_CACHE
))
2495 path
= btrfs_alloc_path();
2499 /* Could add new block groups, use _safe just in case */
2500 list_for_each_entry_safe(cache
, tmp
, &cur_trans
->dirty_bgs
,
2502 if (cache
->disk_cache_state
== BTRFS_DC_CLEAR
)
2503 cache_save_setup(cache
, trans
, path
);
2506 btrfs_free_path(path
);
2511 * Transaction commit does final block group cache writeback during a critical
2512 * section where nothing is allowed to change the FS. This is required in
2513 * order for the cache to actually match the block group, but can introduce a
2514 * lot of latency into the commit.
2516 * So, btrfs_start_dirty_block_groups is here to kick off block group cache IO.
2517 * There's a chance we'll have to redo some of it if the block group changes
2518 * again during the commit, but it greatly reduces the commit latency by
2519 * getting rid of the easy block groups while we're still allowing others to
2522 int btrfs_start_dirty_block_groups(struct btrfs_trans_handle
*trans
)
2524 struct btrfs_fs_info
*fs_info
= trans
->fs_info
;
2525 struct btrfs_block_group
*cache
;
2526 struct btrfs_transaction
*cur_trans
= trans
->transaction
;
2529 struct btrfs_path
*path
= NULL
;
2531 struct list_head
*io
= &cur_trans
->io_bgs
;
2532 int num_started
= 0;
2535 spin_lock(&cur_trans
->dirty_bgs_lock
);
2536 if (list_empty(&cur_trans
->dirty_bgs
)) {
2537 spin_unlock(&cur_trans
->dirty_bgs_lock
);
2540 list_splice_init(&cur_trans
->dirty_bgs
, &dirty
);
2541 spin_unlock(&cur_trans
->dirty_bgs_lock
);
2544 /* Make sure all the block groups on our dirty list actually exist */
2545 btrfs_create_pending_block_groups(trans
);
2548 path
= btrfs_alloc_path();
2554 * cache_write_mutex is here only to save us from balance or automatic
2555 * removal of empty block groups deleting this block group while we are
2556 * writing out the cache
2558 mutex_lock(&trans
->transaction
->cache_write_mutex
);
2559 while (!list_empty(&dirty
)) {
2560 bool drop_reserve
= true;
2562 cache
= list_first_entry(&dirty
, struct btrfs_block_group
,
2565 * This can happen if something re-dirties a block group that
2566 * is already under IO. Just wait for it to finish and then do
2569 if (!list_empty(&cache
->io_list
)) {
2570 list_del_init(&cache
->io_list
);
2571 btrfs_wait_cache_io(trans
, cache
, path
);
2572 btrfs_put_block_group(cache
);
2577 * btrfs_wait_cache_io uses the cache->dirty_list to decide if
2578 * it should update the cache_state. Don't delete until after
2581 * Since we're not running in the commit critical section
2582 * we need the dirty_bgs_lock to protect from update_block_group
2584 spin_lock(&cur_trans
->dirty_bgs_lock
);
2585 list_del_init(&cache
->dirty_list
);
2586 spin_unlock(&cur_trans
->dirty_bgs_lock
);
2590 cache_save_setup(cache
, trans
, path
);
2592 if (cache
->disk_cache_state
== BTRFS_DC_SETUP
) {
2593 cache
->io_ctl
.inode
= NULL
;
2594 ret
= btrfs_write_out_cache(trans
, cache
, path
);
2595 if (ret
== 0 && cache
->io_ctl
.inode
) {
2600 * The cache_write_mutex is protecting the
2601 * io_list, also refer to the definition of
2602 * btrfs_transaction::io_bgs for more details
2604 list_add_tail(&cache
->io_list
, io
);
2607 * If we failed to write the cache, the
2608 * generation will be bad and life goes on
2614 ret
= write_one_cache_group(trans
, path
, cache
);
2616 * Our block group might still be attached to the list
2617 * of new block groups in the transaction handle of some
2618 * other task (struct btrfs_trans_handle->new_bgs). This
2619 * means its block group item isn't yet in the extent
2620 * tree. If this happens ignore the error, as we will
2621 * try again later in the critical section of the
2622 * transaction commit.
2624 if (ret
== -ENOENT
) {
2626 spin_lock(&cur_trans
->dirty_bgs_lock
);
2627 if (list_empty(&cache
->dirty_list
)) {
2628 list_add_tail(&cache
->dirty_list
,
2629 &cur_trans
->dirty_bgs
);
2630 btrfs_get_block_group(cache
);
2631 drop_reserve
= false;
2633 spin_unlock(&cur_trans
->dirty_bgs_lock
);
2635 btrfs_abort_transaction(trans
, ret
);
2639 /* If it's not on the io list, we need to put the block group */
2641 btrfs_put_block_group(cache
);
2643 btrfs_delayed_refs_rsv_release(fs_info
, 1);
2649 * Avoid blocking other tasks for too long. It might even save
2650 * us from writing caches for block groups that are going to be
2653 mutex_unlock(&trans
->transaction
->cache_write_mutex
);
2654 mutex_lock(&trans
->transaction
->cache_write_mutex
);
2656 mutex_unlock(&trans
->transaction
->cache_write_mutex
);
2659 * Go through delayed refs for all the stuff we've just kicked off
2660 * and then loop back (just once)
2662 ret
= btrfs_run_delayed_refs(trans
, 0);
2663 if (!ret
&& loops
== 0) {
2665 spin_lock(&cur_trans
->dirty_bgs_lock
);
2666 list_splice_init(&cur_trans
->dirty_bgs
, &dirty
);
2668 * dirty_bgs_lock protects us from concurrent block group
2669 * deletes too (not just cache_write_mutex).
2671 if (!list_empty(&dirty
)) {
2672 spin_unlock(&cur_trans
->dirty_bgs_lock
);
2675 spin_unlock(&cur_trans
->dirty_bgs_lock
);
2676 } else if (ret
< 0) {
2677 btrfs_cleanup_dirty_bgs(cur_trans
, fs_info
);
2680 btrfs_free_path(path
);
2684 int btrfs_write_dirty_block_groups(struct btrfs_trans_handle
*trans
)
2686 struct btrfs_fs_info
*fs_info
= trans
->fs_info
;
2687 struct btrfs_block_group
*cache
;
2688 struct btrfs_transaction
*cur_trans
= trans
->transaction
;
2691 struct btrfs_path
*path
;
2692 struct list_head
*io
= &cur_trans
->io_bgs
;
2693 int num_started
= 0;
2695 path
= btrfs_alloc_path();
2700 * Even though we are in the critical section of the transaction commit,
2701 * we can still have concurrent tasks adding elements to this
2702 * transaction's list of dirty block groups. These tasks correspond to
2703 * endio free space workers started when writeback finishes for a
2704 * space cache, which run inode.c:btrfs_finish_ordered_io(), and can
2705 * allocate new block groups as a result of COWing nodes of the root
2706 * tree when updating the free space inode. The writeback for the space
2707 * caches is triggered by an earlier call to
2708 * btrfs_start_dirty_block_groups() and iterations of the following
2710 * Also we want to do the cache_save_setup first and then run the
2711 * delayed refs to make sure we have the best chance at doing this all
2714 spin_lock(&cur_trans
->dirty_bgs_lock
);
2715 while (!list_empty(&cur_trans
->dirty_bgs
)) {
2716 cache
= list_first_entry(&cur_trans
->dirty_bgs
,
2717 struct btrfs_block_group
,
2721 * This can happen if cache_save_setup re-dirties a block group
2722 * that is already under IO. Just wait for it to finish and
2723 * then do it all again
2725 if (!list_empty(&cache
->io_list
)) {
2726 spin_unlock(&cur_trans
->dirty_bgs_lock
);
2727 list_del_init(&cache
->io_list
);
2728 btrfs_wait_cache_io(trans
, cache
, path
);
2729 btrfs_put_block_group(cache
);
2730 spin_lock(&cur_trans
->dirty_bgs_lock
);
2734 * Don't remove from the dirty list until after we've waited on
2737 list_del_init(&cache
->dirty_list
);
2738 spin_unlock(&cur_trans
->dirty_bgs_lock
);
2741 cache_save_setup(cache
, trans
, path
);
2744 ret
= btrfs_run_delayed_refs(trans
,
2745 (unsigned long) -1);
2747 if (!ret
&& cache
->disk_cache_state
== BTRFS_DC_SETUP
) {
2748 cache
->io_ctl
.inode
= NULL
;
2749 ret
= btrfs_write_out_cache(trans
, cache
, path
);
2750 if (ret
== 0 && cache
->io_ctl
.inode
) {
2753 list_add_tail(&cache
->io_list
, io
);
2756 * If we failed to write the cache, the
2757 * generation will be bad and life goes on
2763 ret
= write_one_cache_group(trans
, path
, cache
);
2765 * One of the free space endio workers might have
2766 * created a new block group while updating a free space
2767 * cache's inode (at inode.c:btrfs_finish_ordered_io())
2768 * and hasn't released its transaction handle yet, in
2769 * which case the new block group is still attached to
2770 * its transaction handle and its creation has not
2771 * finished yet (no block group item in the extent tree
2772 * yet, etc). If this is the case, wait for all free
2773 * space endio workers to finish and retry. This is a
2774 * a very rare case so no need for a more efficient and
2777 if (ret
== -ENOENT
) {
2778 wait_event(cur_trans
->writer_wait
,
2779 atomic_read(&cur_trans
->num_writers
) == 1);
2780 ret
= write_one_cache_group(trans
, path
, cache
);
2783 btrfs_abort_transaction(trans
, ret
);
2786 /* If its not on the io list, we need to put the block group */
2788 btrfs_put_block_group(cache
);
2789 btrfs_delayed_refs_rsv_release(fs_info
, 1);
2790 spin_lock(&cur_trans
->dirty_bgs_lock
);
2792 spin_unlock(&cur_trans
->dirty_bgs_lock
);
2795 * Refer to the definition of io_bgs member for details why it's safe
2796 * to use it without any locking
2798 while (!list_empty(io
)) {
2799 cache
= list_first_entry(io
, struct btrfs_block_group
,
2801 list_del_init(&cache
->io_list
);
2802 btrfs_wait_cache_io(trans
, cache
, path
);
2803 btrfs_put_block_group(cache
);
2806 btrfs_free_path(path
);
2810 int btrfs_update_block_group(struct btrfs_trans_handle
*trans
,
2811 u64 bytenr
, u64 num_bytes
, int alloc
)
2813 struct btrfs_fs_info
*info
= trans
->fs_info
;
2814 struct btrfs_block_group
*cache
= NULL
;
2815 u64 total
= num_bytes
;
2821 /* Block accounting for super block */
2822 spin_lock(&info
->delalloc_root_lock
);
2823 old_val
= btrfs_super_bytes_used(info
->super_copy
);
2825 old_val
+= num_bytes
;
2827 old_val
-= num_bytes
;
2828 btrfs_set_super_bytes_used(info
->super_copy
, old_val
);
2829 spin_unlock(&info
->delalloc_root_lock
);
2832 cache
= btrfs_lookup_block_group(info
, bytenr
);
2837 factor
= btrfs_bg_type_to_factor(cache
->flags
);
2840 * If this block group has free space cache written out, we
2841 * need to make sure to load it if we are removing space. This
2842 * is because we need the unpinning stage to actually add the
2843 * space back to the block group, otherwise we will leak space.
2845 if (!alloc
&& !btrfs_block_group_done(cache
))
2846 btrfs_cache_block_group(cache
, 1);
2848 byte_in_group
= bytenr
- cache
->start
;
2849 WARN_ON(byte_in_group
> cache
->length
);
2851 spin_lock(&cache
->space_info
->lock
);
2852 spin_lock(&cache
->lock
);
2854 if (btrfs_test_opt(info
, SPACE_CACHE
) &&
2855 cache
->disk_cache_state
< BTRFS_DC_CLEAR
)
2856 cache
->disk_cache_state
= BTRFS_DC_CLEAR
;
2858 old_val
= cache
->used
;
2859 num_bytes
= min(total
, cache
->length
- byte_in_group
);
2861 old_val
+= num_bytes
;
2862 cache
->used
= old_val
;
2863 cache
->reserved
-= num_bytes
;
2864 cache
->space_info
->bytes_reserved
-= num_bytes
;
2865 cache
->space_info
->bytes_used
+= num_bytes
;
2866 cache
->space_info
->disk_used
+= num_bytes
* factor
;
2867 spin_unlock(&cache
->lock
);
2868 spin_unlock(&cache
->space_info
->lock
);
2870 old_val
-= num_bytes
;
2871 cache
->used
= old_val
;
2872 cache
->pinned
+= num_bytes
;
2873 btrfs_space_info_update_bytes_pinned(info
,
2874 cache
->space_info
, num_bytes
);
2875 cache
->space_info
->bytes_used
-= num_bytes
;
2876 cache
->space_info
->disk_used
-= num_bytes
* factor
;
2877 spin_unlock(&cache
->lock
);
2878 spin_unlock(&cache
->space_info
->lock
);
2880 percpu_counter_add_batch(
2881 &cache
->space_info
->total_bytes_pinned
,
2883 BTRFS_TOTAL_BYTES_PINNED_BATCH
);
2884 set_extent_dirty(info
->pinned_extents
,
2885 bytenr
, bytenr
+ num_bytes
- 1,
2886 GFP_NOFS
| __GFP_NOFAIL
);
2889 spin_lock(&trans
->transaction
->dirty_bgs_lock
);
2890 if (list_empty(&cache
->dirty_list
)) {
2891 list_add_tail(&cache
->dirty_list
,
2892 &trans
->transaction
->dirty_bgs
);
2893 trans
->delayed_ref_updates
++;
2894 btrfs_get_block_group(cache
);
2896 spin_unlock(&trans
->transaction
->dirty_bgs_lock
);
2899 * No longer have used bytes in this block group, queue it for
2900 * deletion. We do this after adding the block group to the
2901 * dirty list to avoid races between cleaner kthread and space
2904 if (!alloc
&& old_val
== 0) {
2905 if (!btrfs_test_opt(info
, DISCARD_ASYNC
))
2906 btrfs_mark_bg_unused(cache
);
2909 btrfs_put_block_group(cache
);
2911 bytenr
+= num_bytes
;
2914 /* Modified block groups are accounted for in the delayed_refs_rsv. */
2915 btrfs_update_delayed_refs_rsv(trans
);
2920 * btrfs_add_reserved_bytes - update the block_group and space info counters
2921 * @cache: The cache we are manipulating
2922 * @ram_bytes: The number of bytes of file content, and will be same to
2923 * @num_bytes except for the compress path.
2924 * @num_bytes: The number of bytes in question
2925 * @delalloc: The blocks are allocated for the delalloc write
2927 * This is called by the allocator when it reserves space. If this is a
2928 * reservation and the block group has become read only we cannot make the
2929 * reservation and return -EAGAIN, otherwise this function always succeeds.
2931 int btrfs_add_reserved_bytes(struct btrfs_block_group
*cache
,
2932 u64 ram_bytes
, u64 num_bytes
, int delalloc
)
2934 struct btrfs_space_info
*space_info
= cache
->space_info
;
2937 spin_lock(&space_info
->lock
);
2938 spin_lock(&cache
->lock
);
2942 cache
->reserved
+= num_bytes
;
2943 space_info
->bytes_reserved
+= num_bytes
;
2944 trace_btrfs_space_reservation(cache
->fs_info
, "space_info",
2945 space_info
->flags
, num_bytes
, 1);
2946 btrfs_space_info_update_bytes_may_use(cache
->fs_info
,
2947 space_info
, -ram_bytes
);
2949 cache
->delalloc_bytes
+= num_bytes
;
2951 spin_unlock(&cache
->lock
);
2952 spin_unlock(&space_info
->lock
);
2957 * btrfs_free_reserved_bytes - update the block_group and space info counters
2958 * @cache: The cache we are manipulating
2959 * @num_bytes: The number of bytes in question
2960 * @delalloc: The blocks are allocated for the delalloc write
2962 * This is called by somebody who is freeing space that was never actually used
2963 * on disk. For example if you reserve some space for a new leaf in transaction
2964 * A and before transaction A commits you free that leaf, you call this with
2965 * reserve set to 0 in order to clear the reservation.
2967 void btrfs_free_reserved_bytes(struct btrfs_block_group
*cache
,
2968 u64 num_bytes
, int delalloc
)
2970 struct btrfs_space_info
*space_info
= cache
->space_info
;
2972 spin_lock(&space_info
->lock
);
2973 spin_lock(&cache
->lock
);
2975 space_info
->bytes_readonly
+= num_bytes
;
2976 cache
->reserved
-= num_bytes
;
2977 space_info
->bytes_reserved
-= num_bytes
;
2978 space_info
->max_extent_size
= 0;
2981 cache
->delalloc_bytes
-= num_bytes
;
2982 spin_unlock(&cache
->lock
);
2983 spin_unlock(&space_info
->lock
);
2986 static void force_metadata_allocation(struct btrfs_fs_info
*info
)
2988 struct list_head
*head
= &info
->space_info
;
2989 struct btrfs_space_info
*found
;
2992 list_for_each_entry_rcu(found
, head
, list
) {
2993 if (found
->flags
& BTRFS_BLOCK_GROUP_METADATA
)
2994 found
->force_alloc
= CHUNK_ALLOC_FORCE
;
2999 static int should_alloc_chunk(struct btrfs_fs_info
*fs_info
,
3000 struct btrfs_space_info
*sinfo
, int force
)
3002 u64 bytes_used
= btrfs_space_info_used(sinfo
, false);
3005 if (force
== CHUNK_ALLOC_FORCE
)
3009 * in limited mode, we want to have some free space up to
3010 * about 1% of the FS size.
3012 if (force
== CHUNK_ALLOC_LIMITED
) {
3013 thresh
= btrfs_super_total_bytes(fs_info
->super_copy
);
3014 thresh
= max_t(u64
, SZ_64M
, div_factor_fine(thresh
, 1));
3016 if (sinfo
->total_bytes
- bytes_used
< thresh
)
3020 if (bytes_used
+ SZ_2M
< div_factor(sinfo
->total_bytes
, 8))
3025 int btrfs_force_chunk_alloc(struct btrfs_trans_handle
*trans
, u64 type
)
3027 u64 alloc_flags
= btrfs_get_alloc_profile(trans
->fs_info
, type
);
3029 return btrfs_chunk_alloc(trans
, alloc_flags
, CHUNK_ALLOC_FORCE
);
3033 * If force is CHUNK_ALLOC_FORCE:
3034 * - return 1 if it successfully allocates a chunk,
3035 * - return errors including -ENOSPC otherwise.
3036 * If force is NOT CHUNK_ALLOC_FORCE:
3037 * - return 0 if it doesn't need to allocate a new chunk,
3038 * - return 1 if it successfully allocates a chunk,
3039 * - return errors including -ENOSPC otherwise.
3041 int btrfs_chunk_alloc(struct btrfs_trans_handle
*trans
, u64 flags
,
3042 enum btrfs_chunk_alloc_enum force
)
3044 struct btrfs_fs_info
*fs_info
= trans
->fs_info
;
3045 struct btrfs_space_info
*space_info
;
3046 bool wait_for_alloc
= false;
3047 bool should_alloc
= false;
3050 /* Don't re-enter if we're already allocating a chunk */
3051 if (trans
->allocating_chunk
)
3054 space_info
= btrfs_find_space_info(fs_info
, flags
);
3058 spin_lock(&space_info
->lock
);
3059 if (force
< space_info
->force_alloc
)
3060 force
= space_info
->force_alloc
;
3061 should_alloc
= should_alloc_chunk(fs_info
, space_info
, force
);
3062 if (space_info
->full
) {
3063 /* No more free physical space */
3068 spin_unlock(&space_info
->lock
);
3070 } else if (!should_alloc
) {
3071 spin_unlock(&space_info
->lock
);
3073 } else if (space_info
->chunk_alloc
) {
3075 * Someone is already allocating, so we need to block
3076 * until this someone is finished and then loop to
3077 * recheck if we should continue with our allocation
3080 wait_for_alloc
= true;
3081 spin_unlock(&space_info
->lock
);
3082 mutex_lock(&fs_info
->chunk_mutex
);
3083 mutex_unlock(&fs_info
->chunk_mutex
);
3085 /* Proceed with allocation */
3086 space_info
->chunk_alloc
= 1;
3087 wait_for_alloc
= false;
3088 spin_unlock(&space_info
->lock
);
3092 } while (wait_for_alloc
);
3094 mutex_lock(&fs_info
->chunk_mutex
);
3095 trans
->allocating_chunk
= true;
3098 * If we have mixed data/metadata chunks we want to make sure we keep
3099 * allocating mixed chunks instead of individual chunks.
3101 if (btrfs_mixed_space_info(space_info
))
3102 flags
|= (BTRFS_BLOCK_GROUP_DATA
| BTRFS_BLOCK_GROUP_METADATA
);
3105 * if we're doing a data chunk, go ahead and make sure that
3106 * we keep a reasonable number of metadata chunks allocated in the
3109 if (flags
& BTRFS_BLOCK_GROUP_DATA
&& fs_info
->metadata_ratio
) {
3110 fs_info
->data_chunk_allocations
++;
3111 if (!(fs_info
->data_chunk_allocations
%
3112 fs_info
->metadata_ratio
))
3113 force_metadata_allocation(fs_info
);
3117 * Check if we have enough space in SYSTEM chunk because we may need
3118 * to update devices.
3120 check_system_chunk(trans
, flags
);
3122 ret
= btrfs_alloc_chunk(trans
, flags
);
3123 trans
->allocating_chunk
= false;
3125 spin_lock(&space_info
->lock
);
3128 space_info
->full
= 1;
3133 space_info
->max_extent_size
= 0;
3136 space_info
->force_alloc
= CHUNK_ALLOC_NO_FORCE
;
3138 space_info
->chunk_alloc
= 0;
3139 spin_unlock(&space_info
->lock
);
3140 mutex_unlock(&fs_info
->chunk_mutex
);
3142 * When we allocate a new chunk we reserve space in the chunk block
3143 * reserve to make sure we can COW nodes/leafs in the chunk tree or
3144 * add new nodes/leafs to it if we end up needing to do it when
3145 * inserting the chunk item and updating device items as part of the
3146 * second phase of chunk allocation, performed by
3147 * btrfs_finish_chunk_alloc(). So make sure we don't accumulate a
3148 * large number of new block groups to create in our transaction
3149 * handle's new_bgs list to avoid exhausting the chunk block reserve
3150 * in extreme cases - like having a single transaction create many new
3151 * block groups when starting to write out the free space caches of all
3152 * the block groups that were made dirty during the lifetime of the
3155 if (trans
->chunk_bytes_reserved
>= (u64
)SZ_2M
)
3156 btrfs_create_pending_block_groups(trans
);
3161 static u64
get_profile_num_devs(struct btrfs_fs_info
*fs_info
, u64 type
)
3165 num_dev
= btrfs_raid_array
[btrfs_bg_flags_to_raid_index(type
)].devs_max
;
3167 num_dev
= fs_info
->fs_devices
->rw_devices
;
3173 * Reserve space in the system space for allocating or removing a chunk
3175 void check_system_chunk(struct btrfs_trans_handle
*trans
, u64 type
)
3177 struct btrfs_fs_info
*fs_info
= trans
->fs_info
;
3178 struct btrfs_space_info
*info
;
3185 * Needed because we can end up allocating a system chunk and for an
3186 * atomic and race free space reservation in the chunk block reserve.
3188 lockdep_assert_held(&fs_info
->chunk_mutex
);
3190 info
= btrfs_find_space_info(fs_info
, BTRFS_BLOCK_GROUP_SYSTEM
);
3191 spin_lock(&info
->lock
);
3192 left
= info
->total_bytes
- btrfs_space_info_used(info
, true);
3193 spin_unlock(&info
->lock
);
3195 num_devs
= get_profile_num_devs(fs_info
, type
);
3197 /* num_devs device items to update and 1 chunk item to add or remove */
3198 thresh
= btrfs_calc_metadata_size(fs_info
, num_devs
) +
3199 btrfs_calc_insert_metadata_size(fs_info
, 1);
3201 if (left
< thresh
&& btrfs_test_opt(fs_info
, ENOSPC_DEBUG
)) {
3202 btrfs_info(fs_info
, "left=%llu, need=%llu, flags=%llu",
3203 left
, thresh
, type
);
3204 btrfs_dump_space_info(fs_info
, info
, 0, 0);
3207 if (left
< thresh
) {
3208 u64 flags
= btrfs_system_alloc_profile(fs_info
);
3211 * Ignore failure to create system chunk. We might end up not
3212 * needing it, as we might not need to COW all nodes/leafs from
3213 * the paths we visit in the chunk tree (they were already COWed
3214 * or created in the current transaction for example).
3216 ret
= btrfs_alloc_chunk(trans
, flags
);
3220 ret
= btrfs_block_rsv_add(fs_info
->chunk_root
,
3221 &fs_info
->chunk_block_rsv
,
3222 thresh
, BTRFS_RESERVE_NO_FLUSH
);
3224 trans
->chunk_bytes_reserved
+= thresh
;
3228 void btrfs_put_block_group_cache(struct btrfs_fs_info
*info
)
3230 struct btrfs_block_group
*block_group
;
3234 struct inode
*inode
;
3236 block_group
= btrfs_lookup_first_block_group(info
, last
);
3237 while (block_group
) {
3238 btrfs_wait_block_group_cache_done(block_group
);
3239 spin_lock(&block_group
->lock
);
3240 if (block_group
->iref
)
3242 spin_unlock(&block_group
->lock
);
3243 block_group
= btrfs_next_block_group(block_group
);
3252 inode
= block_group
->inode
;
3253 block_group
->iref
= 0;
3254 block_group
->inode
= NULL
;
3255 spin_unlock(&block_group
->lock
);
3256 ASSERT(block_group
->io_ctl
.inode
== NULL
);
3258 last
= block_group
->start
+ block_group
->length
;
3259 btrfs_put_block_group(block_group
);
3264 * Must be called only after stopping all workers, since we could have block
3265 * group caching kthreads running, and therefore they could race with us if we
3266 * freed the block groups before stopping them.
3268 int btrfs_free_block_groups(struct btrfs_fs_info
*info
)
3270 struct btrfs_block_group
*block_group
;
3271 struct btrfs_space_info
*space_info
;
3272 struct btrfs_caching_control
*caching_ctl
;
3275 down_write(&info
->commit_root_sem
);
3276 while (!list_empty(&info
->caching_block_groups
)) {
3277 caching_ctl
= list_entry(info
->caching_block_groups
.next
,
3278 struct btrfs_caching_control
, list
);
3279 list_del(&caching_ctl
->list
);
3280 btrfs_put_caching_control(caching_ctl
);
3282 up_write(&info
->commit_root_sem
);
3284 spin_lock(&info
->unused_bgs_lock
);
3285 while (!list_empty(&info
->unused_bgs
)) {
3286 block_group
= list_first_entry(&info
->unused_bgs
,
3287 struct btrfs_block_group
,
3289 list_del_init(&block_group
->bg_list
);
3290 btrfs_put_block_group(block_group
);
3292 spin_unlock(&info
->unused_bgs_lock
);
3294 spin_lock(&info
->block_group_cache_lock
);
3295 while ((n
= rb_last(&info
->block_group_cache_tree
)) != NULL
) {
3296 block_group
= rb_entry(n
, struct btrfs_block_group
,
3298 rb_erase(&block_group
->cache_node
,
3299 &info
->block_group_cache_tree
);
3300 RB_CLEAR_NODE(&block_group
->cache_node
);
3301 spin_unlock(&info
->block_group_cache_lock
);
3303 down_write(&block_group
->space_info
->groups_sem
);
3304 list_del(&block_group
->list
);
3305 up_write(&block_group
->space_info
->groups_sem
);
3308 * We haven't cached this block group, which means we could
3309 * possibly have excluded extents on this block group.
3311 if (block_group
->cached
== BTRFS_CACHE_NO
||
3312 block_group
->cached
== BTRFS_CACHE_ERROR
)
3313 btrfs_free_excluded_extents(block_group
);
3315 btrfs_remove_free_space_cache(block_group
);
3316 ASSERT(block_group
->cached
!= BTRFS_CACHE_STARTED
);
3317 ASSERT(list_empty(&block_group
->dirty_list
));
3318 ASSERT(list_empty(&block_group
->io_list
));
3319 ASSERT(list_empty(&block_group
->bg_list
));
3320 ASSERT(atomic_read(&block_group
->count
) == 1);
3321 btrfs_put_block_group(block_group
);
3323 spin_lock(&info
->block_group_cache_lock
);
3325 spin_unlock(&info
->block_group_cache_lock
);
3328 * Now that all the block groups are freed, go through and free all the
3329 * space_info structs. This is only called during the final stages of
3330 * unmount, and so we know nobody is using them. We call
3331 * synchronize_rcu() once before we start, just to be on the safe side.
3335 btrfs_release_global_block_rsv(info
);
3337 while (!list_empty(&info
->space_info
)) {
3338 space_info
= list_entry(info
->space_info
.next
,
3339 struct btrfs_space_info
,
3343 * Do not hide this behind enospc_debug, this is actually
3344 * important and indicates a real bug if this happens.
3346 if (WARN_ON(space_info
->bytes_pinned
> 0 ||
3347 space_info
->bytes_reserved
> 0 ||
3348 space_info
->bytes_may_use
> 0))
3349 btrfs_dump_space_info(info
, space_info
, 0, 0);
3350 list_del(&space_info
->list
);
3351 btrfs_sysfs_remove_space_info(space_info
);