1 // SPDX-License-Identifier: GPL-2.0
6 #include "space-info.h"
7 #include "transaction.h"
8 #include "block-group.h"
10 #include "accessors.h"
13 * HOW DO BLOCK RESERVES WORK
15 * Think of block_rsv's as buckets for logically grouped metadata
16 * reservations. Each block_rsv has a ->size and a ->reserved. ->size is
17 * how large we want our block rsv to be, ->reserved is how much space is
18 * currently reserved for this block reserve.
20 * ->failfast exists for the truncate case, and is described below.
25 * Entrance: btrfs_block_rsv_add, btrfs_block_rsv_refill
27 * We call into btrfs_reserve_metadata_bytes() with our bytes, which is
28 * accounted for in space_info->bytes_may_use, and then add the bytes to
29 * ->reserved, and ->size in the case of btrfs_block_rsv_add.
31 * ->size is an over-estimation of how much we may use for a particular
35 * Entrance: btrfs_use_block_rsv
37 * When we do a btrfs_alloc_tree_block() we call into btrfs_use_block_rsv()
38 * to determine the appropriate block_rsv to use, and then verify that
39 * ->reserved has enough space for our tree block allocation. Once
40 * successful we subtract fs_info->nodesize from ->reserved.
43 * Entrance: btrfs_block_rsv_release
45 * We are finished with our operation, subtract our individual reservation
46 * from ->size, and then subtract ->size from ->reserved and free up the
47 * excess if there is any.
49 * There is some logic here to refill the delayed refs rsv or the global rsv
50 * as needed, otherwise the excess is subtracted from
51 * space_info->bytes_may_use.
53 * TYPES OF BLOCK RESERVES
55 * BLOCK_RSV_TRANS, BLOCK_RSV_DELOPS, BLOCK_RSV_CHUNK
56 * These behave normally, as described above, just within the confines of the
57 * lifetime of their particular operation (transaction for the whole trans
58 * handle lifetime, for example).
61 * It is impossible to properly account for all the space that may be required
62 * to make our extent tree updates. This block reserve acts as an overflow
63 * buffer in case our delayed refs reserve does not reserve enough space to
64 * update the extent tree.
66 * We can steal from this in some cases as well, notably on evict() or
67 * truncate() in order to help users recover from ENOSPC conditions.
70 * The individual item sizes are determined by the per-inode size
71 * calculations, which are described with the delalloc code. This is pretty
72 * straightforward, it's just the calculation of ->size encodes a lot of
73 * different items, and thus it gets used when updating inodes, inserting file
74 * extents, and inserting checksums.
77 * We keep a running tally of how many delayed refs we have on the system.
78 * We assume each one of these delayed refs are going to use a full
79 * reservation. We use the transaction items and pre-reserve space for every
80 * operation, and use this reservation to refill any gap between ->size and
81 * ->reserved that may exist.
83 * From there it's straightforward, removing a delayed ref means we remove its
84 * count from ->size and free up reservations as necessary. Since this is
85 * the most dynamic block reserve in the system, we will try to refill this
86 * block reserve first with any excess returned by any other block reserve.
89 * This is the fallback block reserve to make us try to reserve space if we
90 * don't have a specific bucket for this allocation. It is mostly used for
91 * updating the device tree and such, since that is a separate pool we're
92 * content to just reserve space from the space_info on demand.
95 * This is used by things like truncate and iput. We will temporarily
96 * allocate a block reserve, set it to some size, and then truncate bytes
97 * until we have no space left. With ->failfast set we'll simply return
98 * ENOSPC from btrfs_use_block_rsv() to signal that we need to unwind and try
99 * to make a new reservation. This is because these operations are
100 * unbounded, so we want to do as much work as we can, and then back off and
104 static u64
block_rsv_release_bytes(struct btrfs_fs_info
*fs_info
,
105 struct btrfs_block_rsv
*block_rsv
,
106 struct btrfs_block_rsv
*dest
, u64 num_bytes
,
107 u64
*qgroup_to_release_ret
)
109 struct btrfs_space_info
*space_info
= block_rsv
->space_info
;
110 u64 qgroup_to_release
= 0;
113 spin_lock(&block_rsv
->lock
);
114 if (num_bytes
== (u64
)-1) {
115 num_bytes
= block_rsv
->size
;
116 qgroup_to_release
= block_rsv
->qgroup_rsv_size
;
118 block_rsv
->size
-= num_bytes
;
119 if (block_rsv
->reserved
>= block_rsv
->size
) {
120 num_bytes
= block_rsv
->reserved
- block_rsv
->size
;
121 block_rsv
->reserved
= block_rsv
->size
;
122 block_rsv
->full
= true;
126 if (qgroup_to_release_ret
&&
127 block_rsv
->qgroup_rsv_reserved
>= block_rsv
->qgroup_rsv_size
) {
128 qgroup_to_release
= block_rsv
->qgroup_rsv_reserved
-
129 block_rsv
->qgroup_rsv_size
;
130 block_rsv
->qgroup_rsv_reserved
= block_rsv
->qgroup_rsv_size
;
132 qgroup_to_release
= 0;
134 spin_unlock(&block_rsv
->lock
);
139 spin_lock(&dest
->lock
);
143 bytes_to_add
= dest
->size
- dest
->reserved
;
144 bytes_to_add
= min(num_bytes
, bytes_to_add
);
145 dest
->reserved
+= bytes_to_add
;
146 if (dest
->reserved
>= dest
->size
)
148 num_bytes
-= bytes_to_add
;
150 spin_unlock(&dest
->lock
);
153 btrfs_space_info_free_bytes_may_use(fs_info
,
157 if (qgroup_to_release_ret
)
158 *qgroup_to_release_ret
= qgroup_to_release
;
162 int btrfs_block_rsv_migrate(struct btrfs_block_rsv
*src
,
163 struct btrfs_block_rsv
*dst
, u64 num_bytes
,
168 ret
= btrfs_block_rsv_use_bytes(src
, num_bytes
);
172 btrfs_block_rsv_add_bytes(dst
, num_bytes
, update_size
);
176 void btrfs_init_block_rsv(struct btrfs_block_rsv
*rsv
, enum btrfs_rsv_type type
)
178 memset(rsv
, 0, sizeof(*rsv
));
179 spin_lock_init(&rsv
->lock
);
183 void btrfs_init_metadata_block_rsv(struct btrfs_fs_info
*fs_info
,
184 struct btrfs_block_rsv
*rsv
,
185 enum btrfs_rsv_type type
)
187 btrfs_init_block_rsv(rsv
, type
);
188 rsv
->space_info
= btrfs_find_space_info(fs_info
,
189 BTRFS_BLOCK_GROUP_METADATA
);
192 struct btrfs_block_rsv
*btrfs_alloc_block_rsv(struct btrfs_fs_info
*fs_info
,
193 enum btrfs_rsv_type type
)
195 struct btrfs_block_rsv
*block_rsv
;
197 block_rsv
= kmalloc(sizeof(*block_rsv
), GFP_NOFS
);
201 btrfs_init_metadata_block_rsv(fs_info
, block_rsv
, type
);
205 void btrfs_free_block_rsv(struct btrfs_fs_info
*fs_info
,
206 struct btrfs_block_rsv
*rsv
)
210 btrfs_block_rsv_release(fs_info
, rsv
, (u64
)-1, NULL
);
214 int btrfs_block_rsv_add(struct btrfs_fs_info
*fs_info
,
215 struct btrfs_block_rsv
*block_rsv
, u64 num_bytes
,
216 enum btrfs_reserve_flush_enum flush
)
223 ret
= btrfs_reserve_metadata_bytes(fs_info
, block_rsv
->space_info
,
226 btrfs_block_rsv_add_bytes(block_rsv
, num_bytes
, true);
231 int btrfs_block_rsv_check(struct btrfs_block_rsv
*block_rsv
, int min_percent
)
236 spin_lock(&block_rsv
->lock
);
237 num_bytes
= mult_perc(block_rsv
->size
, min_percent
);
238 if (block_rsv
->reserved
>= num_bytes
)
240 spin_unlock(&block_rsv
->lock
);
245 int btrfs_block_rsv_refill(struct btrfs_fs_info
*fs_info
,
246 struct btrfs_block_rsv
*block_rsv
, u64 num_bytes
,
247 enum btrfs_reserve_flush_enum flush
)
254 spin_lock(&block_rsv
->lock
);
255 if (block_rsv
->reserved
>= num_bytes
)
258 num_bytes
-= block_rsv
->reserved
;
259 spin_unlock(&block_rsv
->lock
);
264 ret
= btrfs_reserve_metadata_bytes(fs_info
, block_rsv
->space_info
,
267 btrfs_block_rsv_add_bytes(block_rsv
, num_bytes
, false);
274 u64
btrfs_block_rsv_release(struct btrfs_fs_info
*fs_info
,
275 struct btrfs_block_rsv
*block_rsv
, u64 num_bytes
,
276 u64
*qgroup_to_release
)
278 struct btrfs_block_rsv
*global_rsv
= &fs_info
->global_block_rsv
;
279 struct btrfs_block_rsv
*delayed_rsv
= &fs_info
->delayed_refs_rsv
;
280 struct btrfs_block_rsv
*target
= NULL
;
283 * If we are a delayed block reserve then push to the global rsv,
284 * otherwise dump into the global delayed reserve if it is not full.
286 if (block_rsv
->type
== BTRFS_BLOCK_RSV_DELOPS
)
288 else if (block_rsv
!= global_rsv
&& !btrfs_block_rsv_full(delayed_rsv
))
289 target
= delayed_rsv
;
291 if (target
&& block_rsv
->space_info
!= target
->space_info
)
294 return block_rsv_release_bytes(fs_info
, block_rsv
, target
, num_bytes
,
298 int btrfs_block_rsv_use_bytes(struct btrfs_block_rsv
*block_rsv
, u64 num_bytes
)
302 spin_lock(&block_rsv
->lock
);
303 if (block_rsv
->reserved
>= num_bytes
) {
304 block_rsv
->reserved
-= num_bytes
;
305 if (block_rsv
->reserved
< block_rsv
->size
)
306 block_rsv
->full
= false;
309 spin_unlock(&block_rsv
->lock
);
313 void btrfs_block_rsv_add_bytes(struct btrfs_block_rsv
*block_rsv
,
314 u64 num_bytes
, bool update_size
)
316 spin_lock(&block_rsv
->lock
);
317 block_rsv
->reserved
+= num_bytes
;
319 block_rsv
->size
+= num_bytes
;
320 else if (block_rsv
->reserved
>= block_rsv
->size
)
321 block_rsv
->full
= true;
322 spin_unlock(&block_rsv
->lock
);
325 void btrfs_update_global_block_rsv(struct btrfs_fs_info
*fs_info
)
327 struct btrfs_block_rsv
*block_rsv
= &fs_info
->global_block_rsv
;
328 struct btrfs_space_info
*sinfo
= block_rsv
->space_info
;
329 struct btrfs_root
*root
, *tmp
;
330 u64 num_bytes
= btrfs_root_used(&fs_info
->tree_root
->root_item
);
331 unsigned int min_items
= 1;
334 * The global block rsv is based on the size of the extent tree, the
335 * checksum tree and the root tree. If the fs is empty we want to set
336 * it to a minimal amount for safety.
338 * We also are going to need to modify the minimum of the tree root and
339 * any global roots we could touch.
341 read_lock(&fs_info
->global_root_lock
);
342 rbtree_postorder_for_each_entry_safe(root
, tmp
, &fs_info
->global_root_tree
,
344 if (btrfs_root_id(root
) == BTRFS_EXTENT_TREE_OBJECTID
||
345 btrfs_root_id(root
) == BTRFS_CSUM_TREE_OBJECTID
||
346 btrfs_root_id(root
) == BTRFS_FREE_SPACE_TREE_OBJECTID
) {
347 num_bytes
+= btrfs_root_used(&root
->root_item
);
351 read_unlock(&fs_info
->global_root_lock
);
353 if (btrfs_fs_compat_ro(fs_info
, BLOCK_GROUP_TREE
)) {
354 num_bytes
+= btrfs_root_used(&fs_info
->block_group_root
->root_item
);
358 if (btrfs_fs_incompat(fs_info
, RAID_STRIPE_TREE
)) {
359 num_bytes
+= btrfs_root_used(&fs_info
->stripe_root
->root_item
);
364 * But we also want to reserve enough space so we can do the fallback
365 * global reserve for an unlink, which is an additional
366 * BTRFS_UNLINK_METADATA_UNITS items.
368 * But we also need space for the delayed ref updates from the unlink,
369 * so add BTRFS_UNLINK_METADATA_UNITS units for delayed refs, one for
370 * each unlink metadata item.
372 min_items
+= BTRFS_UNLINK_METADATA_UNITS
;
374 num_bytes
= max_t(u64
, num_bytes
,
375 btrfs_calc_insert_metadata_size(fs_info
, min_items
) +
376 btrfs_calc_delayed_ref_bytes(fs_info
,
377 BTRFS_UNLINK_METADATA_UNITS
));
379 spin_lock(&sinfo
->lock
);
380 spin_lock(&block_rsv
->lock
);
382 block_rsv
->size
= min_t(u64
, num_bytes
, SZ_512M
);
384 if (block_rsv
->reserved
< block_rsv
->size
) {
385 num_bytes
= block_rsv
->size
- block_rsv
->reserved
;
386 btrfs_space_info_update_bytes_may_use(fs_info
, sinfo
,
388 block_rsv
->reserved
= block_rsv
->size
;
389 } else if (block_rsv
->reserved
> block_rsv
->size
) {
390 num_bytes
= block_rsv
->reserved
- block_rsv
->size
;
391 btrfs_space_info_update_bytes_may_use(fs_info
, sinfo
,
393 block_rsv
->reserved
= block_rsv
->size
;
394 btrfs_try_granting_tickets(fs_info
, sinfo
);
397 block_rsv
->full
= (block_rsv
->reserved
== block_rsv
->size
);
399 if (block_rsv
->size
>= sinfo
->total_bytes
)
400 sinfo
->force_alloc
= CHUNK_ALLOC_FORCE
;
401 spin_unlock(&block_rsv
->lock
);
402 spin_unlock(&sinfo
->lock
);
405 void btrfs_init_root_block_rsv(struct btrfs_root
*root
)
407 struct btrfs_fs_info
*fs_info
= root
->fs_info
;
409 switch (btrfs_root_id(root
)) {
410 case BTRFS_CSUM_TREE_OBJECTID
:
411 case BTRFS_EXTENT_TREE_OBJECTID
:
412 case BTRFS_FREE_SPACE_TREE_OBJECTID
:
413 case BTRFS_BLOCK_GROUP_TREE_OBJECTID
:
414 case BTRFS_RAID_STRIPE_TREE_OBJECTID
:
415 root
->block_rsv
= &fs_info
->delayed_refs_rsv
;
417 case BTRFS_ROOT_TREE_OBJECTID
:
418 case BTRFS_DEV_TREE_OBJECTID
:
419 case BTRFS_QUOTA_TREE_OBJECTID
:
420 root
->block_rsv
= &fs_info
->global_block_rsv
;
422 case BTRFS_CHUNK_TREE_OBJECTID
:
423 root
->block_rsv
= &fs_info
->chunk_block_rsv
;
426 root
->block_rsv
= NULL
;
431 void btrfs_init_global_block_rsv(struct btrfs_fs_info
*fs_info
)
433 struct btrfs_space_info
*space_info
;
435 space_info
= btrfs_find_space_info(fs_info
, BTRFS_BLOCK_GROUP_SYSTEM
);
436 fs_info
->chunk_block_rsv
.space_info
= space_info
;
438 space_info
= btrfs_find_space_info(fs_info
, BTRFS_BLOCK_GROUP_METADATA
);
439 fs_info
->global_block_rsv
.space_info
= space_info
;
440 fs_info
->trans_block_rsv
.space_info
= space_info
;
441 fs_info
->empty_block_rsv
.space_info
= space_info
;
442 fs_info
->delayed_block_rsv
.space_info
= space_info
;
443 fs_info
->delayed_refs_rsv
.space_info
= space_info
;
445 btrfs_update_global_block_rsv(fs_info
);
448 void btrfs_release_global_block_rsv(struct btrfs_fs_info
*fs_info
)
450 btrfs_block_rsv_release(fs_info
, &fs_info
->global_block_rsv
, (u64
)-1,
452 WARN_ON(fs_info
->trans_block_rsv
.size
> 0);
453 WARN_ON(fs_info
->trans_block_rsv
.reserved
> 0);
454 WARN_ON(fs_info
->chunk_block_rsv
.size
> 0);
455 WARN_ON(fs_info
->chunk_block_rsv
.reserved
> 0);
456 WARN_ON(fs_info
->delayed_block_rsv
.size
> 0);
457 WARN_ON(fs_info
->delayed_block_rsv
.reserved
> 0);
458 WARN_ON(fs_info
->delayed_refs_rsv
.reserved
> 0);
459 WARN_ON(fs_info
->delayed_refs_rsv
.size
> 0);
462 static struct btrfs_block_rsv
*get_block_rsv(
463 const struct btrfs_trans_handle
*trans
,
464 const struct btrfs_root
*root
)
466 struct btrfs_fs_info
*fs_info
= root
->fs_info
;
467 struct btrfs_block_rsv
*block_rsv
= NULL
;
469 if (test_bit(BTRFS_ROOT_SHAREABLE
, &root
->state
) ||
470 (root
== fs_info
->uuid_root
) ||
471 (trans
->adding_csums
&& btrfs_root_id(root
) == BTRFS_CSUM_TREE_OBJECTID
))
472 block_rsv
= trans
->block_rsv
;
475 block_rsv
= root
->block_rsv
;
478 block_rsv
= &fs_info
->empty_block_rsv
;
483 struct btrfs_block_rsv
*btrfs_use_block_rsv(struct btrfs_trans_handle
*trans
,
484 struct btrfs_root
*root
,
487 struct btrfs_fs_info
*fs_info
= root
->fs_info
;
488 struct btrfs_block_rsv
*block_rsv
;
489 struct btrfs_block_rsv
*global_rsv
= &fs_info
->global_block_rsv
;
491 bool global_updated
= false;
493 block_rsv
= get_block_rsv(trans
, root
);
495 if (unlikely(btrfs_block_rsv_size(block_rsv
) == 0))
498 ret
= btrfs_block_rsv_use_bytes(block_rsv
, blocksize
);
502 if (block_rsv
->failfast
)
505 if (block_rsv
->type
== BTRFS_BLOCK_RSV_GLOBAL
&& !global_updated
) {
506 global_updated
= true;
507 btrfs_update_global_block_rsv(fs_info
);
512 * The global reserve still exists to save us from ourselves, so don't
513 * warn_on if we are short on our delayed refs reserve.
515 if (block_rsv
->type
!= BTRFS_BLOCK_RSV_DELREFS
&&
516 btrfs_test_opt(fs_info
, ENOSPC_DEBUG
)) {
517 static DEFINE_RATELIMIT_STATE(_rs
,
518 DEFAULT_RATELIMIT_INTERVAL
* 10,
519 /*DEFAULT_RATELIMIT_BURST*/ 1);
520 if (__ratelimit(&_rs
))
522 "BTRFS: block rsv %d returned %d\n",
523 block_rsv
->type
, ret
);
526 ret
= btrfs_reserve_metadata_bytes(fs_info
, block_rsv
->space_info
,
527 blocksize
, BTRFS_RESERVE_NO_FLUSH
);
531 * If we couldn't reserve metadata bytes try and use some from
532 * the global reserve if its space type is the same as the global
535 if (block_rsv
->type
!= BTRFS_BLOCK_RSV_GLOBAL
&&
536 block_rsv
->space_info
== global_rsv
->space_info
) {
537 ret
= btrfs_block_rsv_use_bytes(global_rsv
, blocksize
);
543 * All hope is lost, but of course our reservations are overly
544 * pessimistic, so instead of possibly having an ENOSPC abort here, try
545 * one last time to force a reservation if there's enough actual space
546 * on disk to make the reservation.
548 ret
= btrfs_reserve_metadata_bytes(fs_info
, block_rsv
->space_info
, blocksize
,
549 BTRFS_RESERVE_FLUSH_EMERGENCY
);
556 int btrfs_check_trunc_cache_free_space(const struct btrfs_fs_info
*fs_info
,
557 struct btrfs_block_rsv
*rsv
)
562 /* 1 for slack space, 1 for updating the inode */
563 needed_bytes
= btrfs_calc_insert_metadata_size(fs_info
, 1) +
564 btrfs_calc_metadata_size(fs_info
, 1);
566 spin_lock(&rsv
->lock
);
567 if (rsv
->reserved
< needed_bytes
)
571 spin_unlock(&rsv
->lock
);