1 // SPDX-License-Identifier: GPL-2.0
5 #include "space-info.h"
8 #include "free-space-cache.h"
9 #include "ordered-data.h"
10 #include "transaction.h"
11 #include "block-group.h"
14 * HOW DOES SPACE RESERVATION WORK
16 * If you want to know about delalloc specifically, there is a separate comment
17 * for that with the delalloc code. This comment is about how the whole system
22 * 1) space_info. This is the ultimate arbiter of how much space we can use.
23 * There's a description of the bytes_ fields with the struct declaration,
24 * refer to that for specifics on each field. Suffice it to say that for
25 * reservations we care about total_bytes - SUM(space_info->bytes_) when
26 * determining if there is space to make an allocation. There is a space_info
27 * for METADATA, SYSTEM, and DATA areas.
29 * 2) block_rsv's. These are basically buckets for every different type of
30 * metadata reservation we have. You can see the comment in the block_rsv
31 * code on the rules for each type, but generally block_rsv->reserved is how
32 * much space is accounted for in space_info->bytes_may_use.
34 * 3) btrfs_calc*_size. These are the worst case calculations we used based
35 * on the number of items we will want to modify. We have one for changing
36 * items, and one for inserting new items. Generally we use these helpers to
37 * determine the size of the block reserves, and then use the actual bytes
38 * values to adjust the space_info counters.
40 * MAKING RESERVATIONS, THE NORMAL CASE
42 * We call into either btrfs_reserve_data_bytes() or
43 * btrfs_reserve_metadata_bytes(), depending on which we're looking for, with
44 * num_bytes we want to reserve.
47 * space_info->bytes_may_reserve += num_bytes
50 * Call btrfs_add_reserved_bytes() which does
51 * space_info->bytes_may_reserve -= num_bytes
52 * space_info->bytes_reserved += extent_bytes
55 * Call btrfs_update_block_group() which does
56 * space_info->bytes_reserved -= extent_bytes
57 * space_info->bytes_used += extent_bytes
59 * MAKING RESERVATIONS, FLUSHING NORMALLY (non-priority)
61 * Assume we are unable to simply make the reservation because we do not have
65 * create a reserve_ticket with ->bytes set to our reservation, add it to
66 * the tail of space_info->tickets, kick async flush thread
68 * ->handle_reserve_ticket
69 * wait on ticket->wait for ->bytes to be reduced to 0, or ->error to be set
72 * -> btrfs_async_reclaim_metadata_space/btrfs_async_reclaim_data_space
73 * Flushes various things attempting to free up space.
75 * -> btrfs_try_granting_tickets()
76 * This is called by anything that either subtracts space from
77 * space_info->bytes_may_use, ->bytes_pinned, etc, or adds to the
78 * space_info->total_bytes. This loops through the ->priority_tickets and
79 * then the ->tickets list checking to see if the reservation can be
80 * completed. If it can the space is added to space_info->bytes_may_use and
81 * the ticket is woken up.
84 * Check if ->bytes == 0, if it does we got our reservation and we can carry
85 * on, if not return the appropriate error (ENOSPC, but can be EINTR if we
88 * MAKING RESERVATIONS, FLUSHING HIGH PRIORITY
90 * Same as the above, except we add ourselves to the
91 * space_info->priority_tickets, and we do not use ticket->wait, we simply
92 * call flush_space() ourselves for the states that are safe for us to call
93 * without deadlocking and hope for the best.
97 * Generally speaking we will have two cases for each state, a "nice" state
98 * and a "ALL THE THINGS" state. In btrfs we delay a lot of work in order to
99 * reduce the locking over head on the various trees, and even to keep from
100 * doing any work at all in the case of delayed refs. Each of these delayed
101 * things however hold reservations, and so letting them run allows us to
102 * reclaim space so we can make new reservations.
104 * FLUSH_DELAYED_ITEMS
105 * Every inode has a delayed item to update the inode. Take a simple write
106 * for example, we would update the inode item at write time to update the
107 * mtime, and then again at finish_ordered_io() time in order to update the
108 * isize or bytes. We keep these delayed items to coalesce these operations
109 * into a single operation done on demand. These are an easy way to reclaim
113 * Look at the delalloc comment to get an idea of how much space is reserved
114 * for delayed allocation. We can reclaim some of this space simply by
115 * running delalloc, but usually we need to wait for ordered extents to
116 * reclaim the bulk of this space.
119 * We have a block reserve for the outstanding delayed refs space, and every
120 * delayed ref operation holds a reservation. Running these is a quick way
121 * to reclaim space, but we want to hold this until the end because COW can
122 * churn a lot and we can avoid making some extent tree modifications if we
123 * are able to delay for as long as possible.
126 * We will skip this the first time through space reservation, because of
127 * overcommit and we don't want to have a lot of useless metadata space when
128 * our worst case reservations will likely never come true.
131 * If we're freeing inodes we're likely freeing checksums, file extent
132 * items, and extent tree items. Loads of space could be freed up by these
133 * operations, however they won't be usable until the transaction commits.
136 * may_commit_transaction() is the ultimate arbiter on whether we commit the
137 * transaction or not. In order to avoid constantly churning we do all the
138 * above flushing first and then commit the transaction as the last resort.
139 * However we need to take into account things like pinned space that would
140 * be freed, plus any delayed work we may not have gotten rid of in the case
145 * Because we hold so many reservations for metadata we will allow you to
146 * reserve more space than is currently free in the currently allocate
147 * metadata space. This only happens with metadata, data does not allow
150 * You can see the current logic for when we allow overcommit in
151 * btrfs_can_overcommit(), but it only applies to unallocated space. If there
152 * is no unallocated space to be had, all reservations are kept within the
153 * free space in the allocated metadata chunks.
155 * Because of overcommitting, you generally want to use the
156 * btrfs_can_overcommit() logic for metadata allocations, as it does the right
157 * thing with or without extra unallocated space.
160 u64 __pure
btrfs_space_info_used(struct btrfs_space_info
*s_info
,
161 bool may_use_included
)
164 return s_info
->bytes_used
+ s_info
->bytes_reserved
+
165 s_info
->bytes_pinned
+ s_info
->bytes_readonly
+
166 (may_use_included
? s_info
->bytes_may_use
: 0);
170 * after adding space to the filesystem, we need to clear the full flags
171 * on all the space infos.
173 void btrfs_clear_space_info_full(struct btrfs_fs_info
*info
)
175 struct list_head
*head
= &info
->space_info
;
176 struct btrfs_space_info
*found
;
179 list_for_each_entry_rcu(found
, head
, list
)
184 static int create_space_info(struct btrfs_fs_info
*info
, u64 flags
)
187 struct btrfs_space_info
*space_info
;
191 space_info
= kzalloc(sizeof(*space_info
), GFP_NOFS
);
195 ret
= percpu_counter_init(&space_info
->total_bytes_pinned
, 0,
202 for (i
= 0; i
< BTRFS_NR_RAID_TYPES
; i
++)
203 INIT_LIST_HEAD(&space_info
->block_groups
[i
]);
204 init_rwsem(&space_info
->groups_sem
);
205 spin_lock_init(&space_info
->lock
);
206 space_info
->flags
= flags
& BTRFS_BLOCK_GROUP_TYPE_MASK
;
207 space_info
->force_alloc
= CHUNK_ALLOC_NO_FORCE
;
208 INIT_LIST_HEAD(&space_info
->ro_bgs
);
209 INIT_LIST_HEAD(&space_info
->tickets
);
210 INIT_LIST_HEAD(&space_info
->priority_tickets
);
212 ret
= btrfs_sysfs_add_space_info_type(info
, space_info
);
216 list_add_rcu(&space_info
->list
, &info
->space_info
);
217 if (flags
& BTRFS_BLOCK_GROUP_DATA
)
218 info
->data_sinfo
= space_info
;
223 int btrfs_init_space_info(struct btrfs_fs_info
*fs_info
)
225 struct btrfs_super_block
*disk_super
;
231 disk_super
= fs_info
->super_copy
;
232 if (!btrfs_super_root(disk_super
))
235 features
= btrfs_super_incompat_flags(disk_super
);
236 if (features
& BTRFS_FEATURE_INCOMPAT_MIXED_GROUPS
)
239 flags
= BTRFS_BLOCK_GROUP_SYSTEM
;
240 ret
= create_space_info(fs_info
, flags
);
245 flags
= BTRFS_BLOCK_GROUP_METADATA
| BTRFS_BLOCK_GROUP_DATA
;
246 ret
= create_space_info(fs_info
, flags
);
248 flags
= BTRFS_BLOCK_GROUP_METADATA
;
249 ret
= create_space_info(fs_info
, flags
);
253 flags
= BTRFS_BLOCK_GROUP_DATA
;
254 ret
= create_space_info(fs_info
, flags
);
260 void btrfs_update_space_info(struct btrfs_fs_info
*info
, u64 flags
,
261 u64 total_bytes
, u64 bytes_used
,
263 struct btrfs_space_info
**space_info
)
265 struct btrfs_space_info
*found
;
268 factor
= btrfs_bg_type_to_factor(flags
);
270 found
= btrfs_find_space_info(info
, flags
);
272 spin_lock(&found
->lock
);
273 found
->total_bytes
+= total_bytes
;
274 found
->disk_total
+= total_bytes
* factor
;
275 found
->bytes_used
+= bytes_used
;
276 found
->disk_used
+= bytes_used
* factor
;
277 found
->bytes_readonly
+= bytes_readonly
;
280 btrfs_try_granting_tickets(info
, found
);
281 spin_unlock(&found
->lock
);
285 struct btrfs_space_info
*btrfs_find_space_info(struct btrfs_fs_info
*info
,
288 struct list_head
*head
= &info
->space_info
;
289 struct btrfs_space_info
*found
;
291 flags
&= BTRFS_BLOCK_GROUP_TYPE_MASK
;
294 list_for_each_entry_rcu(found
, head
, list
) {
295 if (found
->flags
& flags
) {
304 static inline u64
calc_global_rsv_need_space(struct btrfs_block_rsv
*global
)
306 return (global
->size
<< 1);
309 static u64
calc_available_free_space(struct btrfs_fs_info
*fs_info
,
310 struct btrfs_space_info
*space_info
,
311 enum btrfs_reserve_flush_enum flush
)
317 if (space_info
->flags
& BTRFS_BLOCK_GROUP_SYSTEM
)
318 profile
= btrfs_system_alloc_profile(fs_info
);
320 profile
= btrfs_metadata_alloc_profile(fs_info
);
322 avail
= atomic64_read(&fs_info
->free_chunk_space
);
325 * If we have dup, raid1 or raid10 then only half of the free
326 * space is actually usable. For raid56, the space info used
327 * doesn't include the parity drive, so we don't have to
330 factor
= btrfs_bg_type_to_factor(profile
);
331 avail
= div_u64(avail
, factor
);
334 * If we aren't flushing all things, let us overcommit up to
335 * 1/2th of the space. If we can flush, don't let us overcommit
336 * too much, let it overcommit up to 1/8 of the space.
338 if (flush
== BTRFS_RESERVE_FLUSH_ALL
)
345 int btrfs_can_overcommit(struct btrfs_fs_info
*fs_info
,
346 struct btrfs_space_info
*space_info
, u64 bytes
,
347 enum btrfs_reserve_flush_enum flush
)
352 /* Don't overcommit when in mixed mode */
353 if (space_info
->flags
& BTRFS_BLOCK_GROUP_DATA
)
356 used
= btrfs_space_info_used(space_info
, true);
357 avail
= calc_available_free_space(fs_info
, space_info
, flush
);
359 if (used
+ bytes
< space_info
->total_bytes
+ avail
)
364 static void remove_ticket(struct btrfs_space_info
*space_info
,
365 struct reserve_ticket
*ticket
)
367 if (!list_empty(&ticket
->list
)) {
368 list_del_init(&ticket
->list
);
369 ASSERT(space_info
->reclaim_size
>= ticket
->bytes
);
370 space_info
->reclaim_size
-= ticket
->bytes
;
375 * This is for space we already have accounted in space_info->bytes_may_use, so
376 * basically when we're returning space from block_rsv's.
378 void btrfs_try_granting_tickets(struct btrfs_fs_info
*fs_info
,
379 struct btrfs_space_info
*space_info
)
381 struct list_head
*head
;
382 enum btrfs_reserve_flush_enum flush
= BTRFS_RESERVE_NO_FLUSH
;
384 lockdep_assert_held(&space_info
->lock
);
386 head
= &space_info
->priority_tickets
;
388 while (!list_empty(head
)) {
389 struct reserve_ticket
*ticket
;
390 u64 used
= btrfs_space_info_used(space_info
, true);
392 ticket
= list_first_entry(head
, struct reserve_ticket
, list
);
394 /* Check and see if our ticket can be satisified now. */
395 if ((used
+ ticket
->bytes
<= space_info
->total_bytes
) ||
396 btrfs_can_overcommit(fs_info
, space_info
, ticket
->bytes
,
398 btrfs_space_info_update_bytes_may_use(fs_info
,
401 remove_ticket(space_info
, ticket
);
403 space_info
->tickets_id
++;
404 wake_up(&ticket
->wait
);
410 if (head
== &space_info
->priority_tickets
) {
411 head
= &space_info
->tickets
;
412 flush
= BTRFS_RESERVE_FLUSH_ALL
;
417 #define DUMP_BLOCK_RSV(fs_info, rsv_name) \
419 struct btrfs_block_rsv *__rsv = &(fs_info)->rsv_name; \
420 spin_lock(&__rsv->lock); \
421 btrfs_info(fs_info, #rsv_name ": size %llu reserved %llu", \
422 __rsv->size, __rsv->reserved); \
423 spin_unlock(&__rsv->lock); \
426 static void __btrfs_dump_space_info(struct btrfs_fs_info
*fs_info
,
427 struct btrfs_space_info
*info
)
429 lockdep_assert_held(&info
->lock
);
431 btrfs_info(fs_info
, "space_info %llu has %llu free, is %sfull",
433 info
->total_bytes
- btrfs_space_info_used(info
, true),
434 info
->full
? "" : "not ");
436 "space_info total=%llu, used=%llu, pinned=%llu, reserved=%llu, may_use=%llu, readonly=%llu",
437 info
->total_bytes
, info
->bytes_used
, info
->bytes_pinned
,
438 info
->bytes_reserved
, info
->bytes_may_use
,
439 info
->bytes_readonly
);
441 DUMP_BLOCK_RSV(fs_info
, global_block_rsv
);
442 DUMP_BLOCK_RSV(fs_info
, trans_block_rsv
);
443 DUMP_BLOCK_RSV(fs_info
, chunk_block_rsv
);
444 DUMP_BLOCK_RSV(fs_info
, delayed_block_rsv
);
445 DUMP_BLOCK_RSV(fs_info
, delayed_refs_rsv
);
449 void btrfs_dump_space_info(struct btrfs_fs_info
*fs_info
,
450 struct btrfs_space_info
*info
, u64 bytes
,
451 int dump_block_groups
)
453 struct btrfs_block_group
*cache
;
456 spin_lock(&info
->lock
);
457 __btrfs_dump_space_info(fs_info
, info
);
458 spin_unlock(&info
->lock
);
460 if (!dump_block_groups
)
463 down_read(&info
->groups_sem
);
465 list_for_each_entry(cache
, &info
->block_groups
[index
], list
) {
466 spin_lock(&cache
->lock
);
468 "block group %llu has %llu bytes, %llu used %llu pinned %llu reserved %s",
469 cache
->start
, cache
->length
, cache
->used
, cache
->pinned
,
470 cache
->reserved
, cache
->ro
? "[readonly]" : "");
471 btrfs_dump_free_space(cache
, bytes
);
472 spin_unlock(&cache
->lock
);
474 if (++index
< BTRFS_NR_RAID_TYPES
)
476 up_read(&info
->groups_sem
);
479 static void btrfs_writeback_inodes_sb_nr(struct btrfs_fs_info
*fs_info
,
480 unsigned long nr_pages
, int nr_items
)
482 struct super_block
*sb
= fs_info
->sb
;
484 if (down_read_trylock(&sb
->s_umount
)) {
485 writeback_inodes_sb_nr(sb
, nr_pages
, WB_REASON_FS_FREE_SPACE
);
486 up_read(&sb
->s_umount
);
489 * We needn't worry the filesystem going from r/w to r/o though
490 * we don't acquire ->s_umount mutex, because the filesystem
491 * should guarantee the delalloc inodes list be empty after
492 * the filesystem is readonly(all dirty pages are written to
495 btrfs_start_delalloc_roots(fs_info
, nr_items
);
496 if (!current
->journal_info
)
497 btrfs_wait_ordered_roots(fs_info
, nr_items
, 0, (u64
)-1);
501 static inline u64
calc_reclaim_items_nr(struct btrfs_fs_info
*fs_info
,
507 bytes
= btrfs_calc_insert_metadata_size(fs_info
, 1);
508 nr
= div64_u64(to_reclaim
, bytes
);
514 #define EXTENT_SIZE_PER_ITEM SZ_256K
517 * shrink metadata reservation for delalloc
519 static void shrink_delalloc(struct btrfs_fs_info
*fs_info
, u64 to_reclaim
,
520 u64 orig
, bool wait_ordered
)
522 struct btrfs_space_info
*space_info
;
523 struct btrfs_trans_handle
*trans
;
529 unsigned long nr_pages
;
532 /* Calc the number of the pages we need flush for space reservation */
533 items
= calc_reclaim_items_nr(fs_info
, to_reclaim
);
534 to_reclaim
= items
* EXTENT_SIZE_PER_ITEM
;
536 trans
= (struct btrfs_trans_handle
*)current
->journal_info
;
537 space_info
= btrfs_find_space_info(fs_info
, BTRFS_BLOCK_GROUP_METADATA
);
539 delalloc_bytes
= percpu_counter_sum_positive(
540 &fs_info
->delalloc_bytes
);
541 dio_bytes
= percpu_counter_sum_positive(&fs_info
->dio_bytes
);
542 if (delalloc_bytes
== 0 && dio_bytes
== 0) {
546 btrfs_wait_ordered_roots(fs_info
, items
, 0, (u64
)-1);
551 * If we are doing more ordered than delalloc we need to just wait on
552 * ordered extents, otherwise we'll waste time trying to flush delalloc
553 * that likely won't give us the space back we need.
555 if (dio_bytes
> delalloc_bytes
)
559 while ((delalloc_bytes
|| dio_bytes
) && loops
< 3) {
560 nr_pages
= min(delalloc_bytes
, to_reclaim
) >> PAGE_SHIFT
;
563 * Triggers inode writeback for up to nr_pages. This will invoke
564 * ->writepages callback and trigger delalloc filling
565 * (btrfs_run_delalloc_range()).
567 btrfs_writeback_inodes_sb_nr(fs_info
, nr_pages
, items
);
570 * We need to wait for the compressed pages to start before
573 async_pages
= atomic_read(&fs_info
->async_delalloc_pages
);
578 * Calculate how many compressed pages we want to be written
579 * before we continue. I.e if there are more async pages than we
580 * require wait_event will wait until nr_pages are written.
582 if (async_pages
<= nr_pages
)
585 async_pages
-= nr_pages
;
587 wait_event(fs_info
->async_submit_wait
,
588 atomic_read(&fs_info
->async_delalloc_pages
) <=
591 spin_lock(&space_info
->lock
);
592 if (list_empty(&space_info
->tickets
) &&
593 list_empty(&space_info
->priority_tickets
)) {
594 spin_unlock(&space_info
->lock
);
597 spin_unlock(&space_info
->lock
);
600 if (wait_ordered
&& !trans
) {
601 btrfs_wait_ordered_roots(fs_info
, items
, 0, (u64
)-1);
603 time_left
= schedule_timeout_killable(1);
607 delalloc_bytes
= percpu_counter_sum_positive(
608 &fs_info
->delalloc_bytes
);
609 dio_bytes
= percpu_counter_sum_positive(&fs_info
->dio_bytes
);
614 * maybe_commit_transaction - possibly commit the transaction if its ok to
615 * @root - the root we're allocating for
616 * @bytes - the number of bytes we want to reserve
617 * @force - force the commit
619 * This will check to make sure that committing the transaction will actually
620 * get us somewhere and then commit the transaction if it does. Otherwise it
621 * will return -ENOSPC.
623 static int may_commit_transaction(struct btrfs_fs_info
*fs_info
,
624 struct btrfs_space_info
*space_info
)
626 struct reserve_ticket
*ticket
= NULL
;
627 struct btrfs_block_rsv
*delayed_rsv
= &fs_info
->delayed_block_rsv
;
628 struct btrfs_block_rsv
*delayed_refs_rsv
= &fs_info
->delayed_refs_rsv
;
629 struct btrfs_block_rsv
*trans_rsv
= &fs_info
->trans_block_rsv
;
630 struct btrfs_trans_handle
*trans
;
632 u64 reclaim_bytes
= 0;
633 u64 cur_free_bytes
= 0;
635 trans
= (struct btrfs_trans_handle
*)current
->journal_info
;
639 spin_lock(&space_info
->lock
);
640 cur_free_bytes
= btrfs_space_info_used(space_info
, true);
641 if (cur_free_bytes
< space_info
->total_bytes
)
642 cur_free_bytes
= space_info
->total_bytes
- cur_free_bytes
;
646 if (!list_empty(&space_info
->priority_tickets
))
647 ticket
= list_first_entry(&space_info
->priority_tickets
,
648 struct reserve_ticket
, list
);
649 else if (!list_empty(&space_info
->tickets
))
650 ticket
= list_first_entry(&space_info
->tickets
,
651 struct reserve_ticket
, list
);
652 bytes_needed
= (ticket
) ? ticket
->bytes
: 0;
654 if (bytes_needed
> cur_free_bytes
)
655 bytes_needed
-= cur_free_bytes
;
658 spin_unlock(&space_info
->lock
);
663 trans
= btrfs_join_transaction(fs_info
->extent_root
);
665 return PTR_ERR(trans
);
668 * See if there is enough pinned space to make this reservation, or if
669 * we have block groups that are going to be freed, allowing us to
670 * possibly do a chunk allocation the next loop through.
672 if (test_bit(BTRFS_TRANS_HAVE_FREE_BGS
, &trans
->transaction
->flags
) ||
673 __percpu_counter_compare(&space_info
->total_bytes_pinned
,
675 BTRFS_TOTAL_BYTES_PINNED_BATCH
) >= 0)
679 * See if there is some space in the delayed insertion reservation for
682 if (space_info
!= delayed_rsv
->space_info
)
685 spin_lock(&delayed_rsv
->lock
);
686 reclaim_bytes
+= delayed_rsv
->reserved
;
687 spin_unlock(&delayed_rsv
->lock
);
689 spin_lock(&delayed_refs_rsv
->lock
);
690 reclaim_bytes
+= delayed_refs_rsv
->reserved
;
691 spin_unlock(&delayed_refs_rsv
->lock
);
693 spin_lock(&trans_rsv
->lock
);
694 reclaim_bytes
+= trans_rsv
->reserved
;
695 spin_unlock(&trans_rsv
->lock
);
697 if (reclaim_bytes
>= bytes_needed
)
699 bytes_needed
-= reclaim_bytes
;
701 if (__percpu_counter_compare(&space_info
->total_bytes_pinned
,
703 BTRFS_TOTAL_BYTES_PINNED_BATCH
) < 0)
707 return btrfs_commit_transaction(trans
);
709 btrfs_end_transaction(trans
);
714 * Try to flush some data based on policy set by @state. This is only advisory
715 * and may fail for various reasons. The caller is supposed to examine the
716 * state of @space_info to detect the outcome.
718 static void flush_space(struct btrfs_fs_info
*fs_info
,
719 struct btrfs_space_info
*space_info
, u64 num_bytes
,
722 struct btrfs_root
*root
= fs_info
->extent_root
;
723 struct btrfs_trans_handle
*trans
;
728 case FLUSH_DELAYED_ITEMS_NR
:
729 case FLUSH_DELAYED_ITEMS
:
730 if (state
== FLUSH_DELAYED_ITEMS_NR
)
731 nr
= calc_reclaim_items_nr(fs_info
, num_bytes
) * 2;
735 trans
= btrfs_join_transaction(root
);
737 ret
= PTR_ERR(trans
);
740 ret
= btrfs_run_delayed_items_nr(trans
, nr
);
741 btrfs_end_transaction(trans
);
744 case FLUSH_DELALLOC_WAIT
:
745 shrink_delalloc(fs_info
, num_bytes
* 2, num_bytes
,
746 state
== FLUSH_DELALLOC_WAIT
);
748 case FLUSH_DELAYED_REFS_NR
:
749 case FLUSH_DELAYED_REFS
:
750 trans
= btrfs_join_transaction(root
);
752 ret
= PTR_ERR(trans
);
755 if (state
== FLUSH_DELAYED_REFS_NR
)
756 nr
= calc_reclaim_items_nr(fs_info
, num_bytes
);
759 btrfs_run_delayed_refs(trans
, nr
);
760 btrfs_end_transaction(trans
);
763 case ALLOC_CHUNK_FORCE
:
764 trans
= btrfs_join_transaction(root
);
766 ret
= PTR_ERR(trans
);
769 ret
= btrfs_chunk_alloc(trans
,
770 btrfs_metadata_alloc_profile(fs_info
),
771 (state
== ALLOC_CHUNK
) ? CHUNK_ALLOC_NO_FORCE
:
773 btrfs_end_transaction(trans
);
774 if (ret
> 0 || ret
== -ENOSPC
)
777 case RUN_DELAYED_IPUTS
:
779 * If we have pending delayed iputs then we could free up a
780 * bunch of pinned space, so make sure we run the iputs before
781 * we do our pinned bytes check below.
783 btrfs_run_delayed_iputs(fs_info
);
784 btrfs_wait_on_delayed_iputs(fs_info
);
787 ret
= may_commit_transaction(fs_info
, space_info
);
794 trace_btrfs_flush_space(fs_info
, space_info
->flags
, num_bytes
, state
,
800 btrfs_calc_reclaim_metadata_size(struct btrfs_fs_info
*fs_info
,
801 struct btrfs_space_info
*space_info
)
806 u64 to_reclaim
= space_info
->reclaim_size
;
808 lockdep_assert_held(&space_info
->lock
);
810 avail
= calc_available_free_space(fs_info
, space_info
,
811 BTRFS_RESERVE_FLUSH_ALL
);
812 used
= btrfs_space_info_used(space_info
, true);
815 * We may be flushing because suddenly we have less space than we had
816 * before, and now we're well over-committed based on our current free
817 * space. If that's the case add in our overage so we make sure to put
818 * appropriate pressure on the flushing state machine.
820 if (space_info
->total_bytes
+ avail
< used
)
821 to_reclaim
+= used
- (space_info
->total_bytes
+ avail
);
826 to_reclaim
= min_t(u64
, num_online_cpus() * SZ_1M
, SZ_16M
);
827 if (btrfs_can_overcommit(fs_info
, space_info
, to_reclaim
,
828 BTRFS_RESERVE_FLUSH_ALL
))
831 used
= btrfs_space_info_used(space_info
, true);
833 if (btrfs_can_overcommit(fs_info
, space_info
, SZ_1M
,
834 BTRFS_RESERVE_FLUSH_ALL
))
835 expected
= div_factor_fine(space_info
->total_bytes
, 95);
837 expected
= div_factor_fine(space_info
->total_bytes
, 90);
840 to_reclaim
= used
- expected
;
843 to_reclaim
= min(to_reclaim
, space_info
->bytes_may_use
+
844 space_info
->bytes_reserved
);
848 static inline int need_do_async_reclaim(struct btrfs_fs_info
*fs_info
,
849 struct btrfs_space_info
*space_info
,
852 u64 thresh
= div_factor_fine(space_info
->total_bytes
, 98);
854 /* If we're just plain full then async reclaim just slows us down. */
855 if ((space_info
->bytes_used
+ space_info
->bytes_reserved
) >= thresh
)
858 if (!btrfs_calc_reclaim_metadata_size(fs_info
, space_info
))
861 return (used
>= thresh
&& !btrfs_fs_closing(fs_info
) &&
862 !test_bit(BTRFS_FS_STATE_REMOUNTING
, &fs_info
->fs_state
));
865 static bool steal_from_global_rsv(struct btrfs_fs_info
*fs_info
,
866 struct btrfs_space_info
*space_info
,
867 struct reserve_ticket
*ticket
)
869 struct btrfs_block_rsv
*global_rsv
= &fs_info
->global_block_rsv
;
872 if (global_rsv
->space_info
!= space_info
)
875 spin_lock(&global_rsv
->lock
);
876 min_bytes
= div_factor(global_rsv
->size
, 1);
877 if (global_rsv
->reserved
< min_bytes
+ ticket
->bytes
) {
878 spin_unlock(&global_rsv
->lock
);
881 global_rsv
->reserved
-= ticket
->bytes
;
883 list_del_init(&ticket
->list
);
884 wake_up(&ticket
->wait
);
885 space_info
->tickets_id
++;
886 if (global_rsv
->reserved
< global_rsv
->size
)
887 global_rsv
->full
= 0;
888 spin_unlock(&global_rsv
->lock
);
894 * maybe_fail_all_tickets - we've exhausted our flushing, start failing tickets
895 * @fs_info - fs_info for this fs
896 * @space_info - the space info we were flushing
898 * We call this when we've exhausted our flushing ability and haven't made
899 * progress in satisfying tickets. The reservation code handles tickets in
900 * order, so if there is a large ticket first and then smaller ones we could
901 * very well satisfy the smaller tickets. This will attempt to wake up any
902 * tickets in the list to catch this case.
904 * This function returns true if it was able to make progress by clearing out
905 * other tickets, or if it stumbles across a ticket that was smaller than the
908 static bool maybe_fail_all_tickets(struct btrfs_fs_info
*fs_info
,
909 struct btrfs_space_info
*space_info
)
911 struct reserve_ticket
*ticket
;
912 u64 tickets_id
= space_info
->tickets_id
;
913 u64 first_ticket_bytes
= 0;
915 if (btrfs_test_opt(fs_info
, ENOSPC_DEBUG
)) {
916 btrfs_info(fs_info
, "cannot satisfy tickets, dumping space info");
917 __btrfs_dump_space_info(fs_info
, space_info
);
920 while (!list_empty(&space_info
->tickets
) &&
921 tickets_id
== space_info
->tickets_id
) {
922 ticket
= list_first_entry(&space_info
->tickets
,
923 struct reserve_ticket
, list
);
926 steal_from_global_rsv(fs_info
, space_info
, ticket
))
930 * may_commit_transaction will avoid committing the transaction
931 * if it doesn't feel like the space reclaimed by the commit
932 * would result in the ticket succeeding. However if we have a
933 * smaller ticket in the queue it may be small enough to be
934 * satisified by committing the transaction, so if any
935 * subsequent ticket is smaller than the first ticket go ahead
936 * and send us back for another loop through the enospc flushing
939 if (first_ticket_bytes
== 0)
940 first_ticket_bytes
= ticket
->bytes
;
941 else if (first_ticket_bytes
> ticket
->bytes
)
944 if (btrfs_test_opt(fs_info
, ENOSPC_DEBUG
))
945 btrfs_info(fs_info
, "failing ticket with %llu bytes",
948 remove_ticket(space_info
, ticket
);
949 ticket
->error
= -ENOSPC
;
950 wake_up(&ticket
->wait
);
953 * We're just throwing tickets away, so more flushing may not
954 * trip over btrfs_try_granting_tickets, so we need to call it
955 * here to see if we can make progress with the next ticket in
958 btrfs_try_granting_tickets(fs_info
, space_info
);
960 return (tickets_id
!= space_info
->tickets_id
);
964 * This is for normal flushers, we can wait all goddamned day if we want to. We
965 * will loop and continuously try to flush as long as we are making progress.
966 * We count progress as clearing off tickets each time we have to loop.
968 static void btrfs_async_reclaim_metadata_space(struct work_struct
*work
)
970 struct btrfs_fs_info
*fs_info
;
971 struct btrfs_space_info
*space_info
;
974 int commit_cycles
= 0;
977 fs_info
= container_of(work
, struct btrfs_fs_info
, async_reclaim_work
);
978 space_info
= btrfs_find_space_info(fs_info
, BTRFS_BLOCK_GROUP_METADATA
);
980 spin_lock(&space_info
->lock
);
981 to_reclaim
= btrfs_calc_reclaim_metadata_size(fs_info
, space_info
);
983 space_info
->flush
= 0;
984 spin_unlock(&space_info
->lock
);
987 last_tickets_id
= space_info
->tickets_id
;
988 spin_unlock(&space_info
->lock
);
990 flush_state
= FLUSH_DELAYED_ITEMS_NR
;
992 flush_space(fs_info
, space_info
, to_reclaim
, flush_state
);
993 spin_lock(&space_info
->lock
);
994 if (list_empty(&space_info
->tickets
)) {
995 space_info
->flush
= 0;
996 spin_unlock(&space_info
->lock
);
999 to_reclaim
= btrfs_calc_reclaim_metadata_size(fs_info
,
1001 if (last_tickets_id
== space_info
->tickets_id
) {
1004 last_tickets_id
= space_info
->tickets_id
;
1005 flush_state
= FLUSH_DELAYED_ITEMS_NR
;
1011 * We don't want to force a chunk allocation until we've tried
1012 * pretty hard to reclaim space. Think of the case where we
1013 * freed up a bunch of space and so have a lot of pinned space
1014 * to reclaim. We would rather use that than possibly create a
1015 * underutilized metadata chunk. So if this is our first run
1016 * through the flushing state machine skip ALLOC_CHUNK_FORCE and
1017 * commit the transaction. If nothing has changed the next go
1018 * around then we can force a chunk allocation.
1020 if (flush_state
== ALLOC_CHUNK_FORCE
&& !commit_cycles
)
1023 if (flush_state
> COMMIT_TRANS
) {
1025 if (commit_cycles
> 2) {
1026 if (maybe_fail_all_tickets(fs_info
, space_info
)) {
1027 flush_state
= FLUSH_DELAYED_ITEMS_NR
;
1030 space_info
->flush
= 0;
1033 flush_state
= FLUSH_DELAYED_ITEMS_NR
;
1036 spin_unlock(&space_info
->lock
);
1037 } while (flush_state
<= COMMIT_TRANS
);
1040 void btrfs_init_async_reclaim_work(struct work_struct
*work
)
1042 INIT_WORK(work
, btrfs_async_reclaim_metadata_space
);
1045 static const enum btrfs_flush_state priority_flush_states
[] = {
1046 FLUSH_DELAYED_ITEMS_NR
,
1047 FLUSH_DELAYED_ITEMS
,
1051 static const enum btrfs_flush_state evict_flush_states
[] = {
1052 FLUSH_DELAYED_ITEMS_NR
,
1053 FLUSH_DELAYED_ITEMS
,
1054 FLUSH_DELAYED_REFS_NR
,
1057 FLUSH_DELALLOC_WAIT
,
1062 static void priority_reclaim_metadata_space(struct btrfs_fs_info
*fs_info
,
1063 struct btrfs_space_info
*space_info
,
1064 struct reserve_ticket
*ticket
,
1065 const enum btrfs_flush_state
*states
,
1071 spin_lock(&space_info
->lock
);
1072 to_reclaim
= btrfs_calc_reclaim_metadata_size(fs_info
, space_info
);
1074 spin_unlock(&space_info
->lock
);
1077 spin_unlock(&space_info
->lock
);
1081 flush_space(fs_info
, space_info
, to_reclaim
, states
[flush_state
]);
1083 spin_lock(&space_info
->lock
);
1084 if (ticket
->bytes
== 0) {
1085 spin_unlock(&space_info
->lock
);
1088 spin_unlock(&space_info
->lock
);
1089 } while (flush_state
< states_nr
);
1092 static void wait_reserve_ticket(struct btrfs_fs_info
*fs_info
,
1093 struct btrfs_space_info
*space_info
,
1094 struct reserve_ticket
*ticket
)
1100 spin_lock(&space_info
->lock
);
1101 while (ticket
->bytes
> 0 && ticket
->error
== 0) {
1102 ret
= prepare_to_wait_event(&ticket
->wait
, &wait
, TASK_KILLABLE
);
1105 * Delete us from the list. After we unlock the space
1106 * info, we don't want the async reclaim job to reserve
1107 * space for this ticket. If that would happen, then the
1108 * ticket's task would not known that space was reserved
1109 * despite getting an error, resulting in a space leak
1110 * (bytes_may_use counter of our space_info).
1112 remove_ticket(space_info
, ticket
);
1113 ticket
->error
= -EINTR
;
1116 spin_unlock(&space_info
->lock
);
1120 finish_wait(&ticket
->wait
, &wait
);
1121 spin_lock(&space_info
->lock
);
1123 spin_unlock(&space_info
->lock
);
1127 * handle_reserve_ticket - do the appropriate flushing and waiting for a ticket
1129 * @space_info - the space_info for the reservation
1130 * @ticket - the ticket for the reservation
1131 * @flush - how much we can flush
1133 * This does the work of figuring out how to flush for the ticket, waiting for
1134 * the reservation, and returning the appropriate error if there is one.
1136 static int handle_reserve_ticket(struct btrfs_fs_info
*fs_info
,
1137 struct btrfs_space_info
*space_info
,
1138 struct reserve_ticket
*ticket
,
1139 enum btrfs_reserve_flush_enum flush
)
1144 case BTRFS_RESERVE_FLUSH_ALL
:
1145 case BTRFS_RESERVE_FLUSH_ALL_STEAL
:
1146 wait_reserve_ticket(fs_info
, space_info
, ticket
);
1148 case BTRFS_RESERVE_FLUSH_LIMIT
:
1149 priority_reclaim_metadata_space(fs_info
, space_info
, ticket
,
1150 priority_flush_states
,
1151 ARRAY_SIZE(priority_flush_states
));
1153 case BTRFS_RESERVE_FLUSH_EVICT
:
1154 priority_reclaim_metadata_space(fs_info
, space_info
, ticket
,
1156 ARRAY_SIZE(evict_flush_states
));
1163 spin_lock(&space_info
->lock
);
1164 ret
= ticket
->error
;
1165 if (ticket
->bytes
|| ticket
->error
) {
1167 * We were a priority ticket, so we need to delete ourselves
1168 * from the list. Because we could have other priority tickets
1169 * behind us that require less space, run
1170 * btrfs_try_granting_tickets() to see if their reservations can
1173 if (!list_empty(&ticket
->list
)) {
1174 remove_ticket(space_info
, ticket
);
1175 btrfs_try_granting_tickets(fs_info
, space_info
);
1181 spin_unlock(&space_info
->lock
);
1182 ASSERT(list_empty(&ticket
->list
));
1184 * Check that we can't have an error set if the reservation succeeded,
1185 * as that would confuse tasks and lead them to error out without
1186 * releasing reserved space (if an error happens the expectation is that
1187 * space wasn't reserved at all).
1189 ASSERT(!(ticket
->bytes
== 0 && ticket
->error
));
1194 * This returns true if this flush state will go through the ordinary flushing
1197 static inline bool is_normal_flushing(enum btrfs_reserve_flush_enum flush
)
1199 return (flush
== BTRFS_RESERVE_FLUSH_ALL
) ||
1200 (flush
== BTRFS_RESERVE_FLUSH_ALL_STEAL
);
1204 * reserve_metadata_bytes - try to reserve bytes from the block_rsv's space
1205 * @root - the root we're allocating for
1206 * @space_info - the space info we want to allocate from
1207 * @orig_bytes - the number of bytes we want
1208 * @flush - whether or not we can flush to make our reservation
1210 * This will reserve orig_bytes number of bytes from the space info associated
1211 * with the block_rsv. If there is not enough space it will make an attempt to
1212 * flush out space to make room. It will do this by flushing delalloc if
1213 * possible or committing the transaction. If flush is 0 then no attempts to
1214 * regain reservations will be made and this will fail if there is not enough
1217 static int __reserve_metadata_bytes(struct btrfs_fs_info
*fs_info
,
1218 struct btrfs_space_info
*space_info
,
1220 enum btrfs_reserve_flush_enum flush
)
1222 struct reserve_ticket ticket
;
1225 bool pending_tickets
;
1228 ASSERT(!current
->journal_info
|| flush
!= BTRFS_RESERVE_FLUSH_ALL
);
1230 spin_lock(&space_info
->lock
);
1232 used
= btrfs_space_info_used(space_info
, true);
1235 * We don't want NO_FLUSH allocations to jump everybody, they can
1236 * generally handle ENOSPC in a different way, so treat them the same as
1237 * normal flushers when it comes to skipping pending tickets.
1239 if (is_normal_flushing(flush
) || (flush
== BTRFS_RESERVE_NO_FLUSH
))
1240 pending_tickets
= !list_empty(&space_info
->tickets
) ||
1241 !list_empty(&space_info
->priority_tickets
);
1243 pending_tickets
= !list_empty(&space_info
->priority_tickets
);
1246 * Carry on if we have enough space (short-circuit) OR call
1247 * can_overcommit() to ensure we can overcommit to continue.
1249 if (!pending_tickets
&&
1250 ((used
+ orig_bytes
<= space_info
->total_bytes
) ||
1251 btrfs_can_overcommit(fs_info
, space_info
, orig_bytes
, flush
))) {
1252 btrfs_space_info_update_bytes_may_use(fs_info
, space_info
,
1258 * If we couldn't make a reservation then setup our reservation ticket
1259 * and kick the async worker if it's not already running.
1261 * If we are a priority flusher then we just need to add our ticket to
1262 * the list and we will do our own flushing further down.
1264 if (ret
&& flush
!= BTRFS_RESERVE_NO_FLUSH
) {
1265 ticket
.bytes
= orig_bytes
;
1267 space_info
->reclaim_size
+= ticket
.bytes
;
1268 init_waitqueue_head(&ticket
.wait
);
1269 ticket
.steal
= (flush
== BTRFS_RESERVE_FLUSH_ALL_STEAL
);
1270 if (flush
== BTRFS_RESERVE_FLUSH_ALL
||
1271 flush
== BTRFS_RESERVE_FLUSH_ALL_STEAL
) {
1272 list_add_tail(&ticket
.list
, &space_info
->tickets
);
1273 if (!space_info
->flush
) {
1274 space_info
->flush
= 1;
1275 trace_btrfs_trigger_flush(fs_info
,
1279 queue_work(system_unbound_wq
,
1280 &fs_info
->async_reclaim_work
);
1283 list_add_tail(&ticket
.list
,
1284 &space_info
->priority_tickets
);
1286 } else if (!ret
&& space_info
->flags
& BTRFS_BLOCK_GROUP_METADATA
) {
1289 * We will do the space reservation dance during log replay,
1290 * which means we won't have fs_info->fs_root set, so don't do
1291 * the async reclaim as we will panic.
1293 if (!test_bit(BTRFS_FS_LOG_RECOVERING
, &fs_info
->flags
) &&
1294 need_do_async_reclaim(fs_info
, space_info
, used
) &&
1295 !work_busy(&fs_info
->async_reclaim_work
)) {
1296 trace_btrfs_trigger_flush(fs_info
, space_info
->flags
,
1297 orig_bytes
, flush
, "preempt");
1298 queue_work(system_unbound_wq
,
1299 &fs_info
->async_reclaim_work
);
1302 spin_unlock(&space_info
->lock
);
1303 if (!ret
|| flush
== BTRFS_RESERVE_NO_FLUSH
)
1306 return handle_reserve_ticket(fs_info
, space_info
, &ticket
, flush
);
1310 * reserve_metadata_bytes - try to reserve bytes from the block_rsv's space
1311 * @root - the root we're allocating for
1312 * @block_rsv - the block_rsv we're allocating for
1313 * @orig_bytes - the number of bytes we want
1314 * @flush - whether or not we can flush to make our reservation
1316 * This will reserve orig_bytes number of bytes from the space info associated
1317 * with the block_rsv. If there is not enough space it will make an attempt to
1318 * flush out space to make room. It will do this by flushing delalloc if
1319 * possible or committing the transaction. If flush is 0 then no attempts to
1320 * regain reservations will be made and this will fail if there is not enough
1323 int btrfs_reserve_metadata_bytes(struct btrfs_root
*root
,
1324 struct btrfs_block_rsv
*block_rsv
,
1326 enum btrfs_reserve_flush_enum flush
)
1328 struct btrfs_fs_info
*fs_info
= root
->fs_info
;
1329 struct btrfs_block_rsv
*global_rsv
= &fs_info
->global_block_rsv
;
1332 ret
= __reserve_metadata_bytes(fs_info
, block_rsv
->space_info
,
1334 if (ret
== -ENOSPC
&&
1335 unlikely(root
->orphan_cleanup_state
== ORPHAN_CLEANUP_STARTED
)) {
1336 if (block_rsv
!= global_rsv
&&
1337 !btrfs_block_rsv_use_bytes(global_rsv
, orig_bytes
))
1340 if (ret
== -ENOSPC
) {
1341 trace_btrfs_space_reservation(fs_info
, "space_info:enospc",
1342 block_rsv
->space_info
->flags
,
1345 if (btrfs_test_opt(fs_info
, ENOSPC_DEBUG
))
1346 btrfs_dump_space_info(fs_info
, block_rsv
->space_info
,