drm/ssd130x: Set SPI .id_table to prevent an SPI core warning
[drm/drm-misc.git] / fs / xfs / libxfs / xfs_ag_resv.c
blobf5d853089019f0481f23e6e8f0431fbf422d1a0c
1 // SPDX-License-Identifier: GPL-2.0+
2 /*
3 * Copyright (C) 2016 Oracle. All Rights Reserved.
4 * Author: Darrick J. Wong <darrick.wong@oracle.com>
5 */
6 #include "xfs.h"
7 #include "xfs_fs.h"
8 #include "xfs_shared.h"
9 #include "xfs_format.h"
10 #include "xfs_log_format.h"
11 #include "xfs_trans_resv.h"
12 #include "xfs_mount.h"
13 #include "xfs_alloc.h"
14 #include "xfs_errortag.h"
15 #include "xfs_error.h"
16 #include "xfs_trace.h"
17 #include "xfs_trans.h"
18 #include "xfs_rmap_btree.h"
19 #include "xfs_btree.h"
20 #include "xfs_refcount_btree.h"
21 #include "xfs_ialloc_btree.h"
22 #include "xfs_ag.h"
23 #include "xfs_ag_resv.h"
26 * Per-AG Block Reservations
28 * For some kinds of allocation group metadata structures, it is advantageous
29 * to reserve a small number of blocks in each AG so that future expansions of
30 * that data structure do not encounter ENOSPC because errors during a btree
31 * split cause the filesystem to go offline.
33 * Prior to the introduction of reflink, this wasn't an issue because the free
34 * space btrees maintain a reserve of space (the AGFL) to handle any expansion
35 * that may be necessary; and allocations of other metadata (inodes, BMBT,
36 * dir/attr) aren't restricted to a single AG. However, with reflink it is
37 * possible to allocate all the space in an AG, have subsequent reflink/CoW
38 * activity expand the refcount btree, and discover that there's no space left
39 * to handle that expansion. Since we can calculate the maximum size of the
40 * refcount btree, we can reserve space for it and avoid ENOSPC.
42 * Handling per-AG reservations consists of three changes to the allocator's
43 * behavior: First, because these reservations are always needed, we decrease
44 * the ag_max_usable counter to reflect the size of the AG after the reserved
45 * blocks are taken. Second, the reservations must be reflected in the
46 * fdblocks count to maintain proper accounting. Third, each AG must maintain
47 * its own reserved block counter so that we can calculate the amount of space
48 * that must remain free to maintain the reservations. Fourth, the "remaining
49 * reserved blocks" count must be used when calculating the length of the
50 * longest free extent in an AG and to clamp maxlen in the per-AG allocation
51 * functions. In other words, we maintain a virtual allocation via in-core
52 * accounting tricks so that we don't have to clean up after a crash. :)
54 * Reserved blocks can be managed by passing one of the enum xfs_ag_resv_type
55 * values via struct xfs_alloc_arg or directly to the xfs_free_extent
56 * function. It might seem a little funny to maintain a reservoir of blocks
57 * to feed another reservoir, but the AGFL only holds enough blocks to get
58 * through the next transaction. The per-AG reservation is to ensure (we
59 * hope) that each AG never runs out of blocks. Each data structure wanting
60 * to use the reservation system should update ask/used in xfs_ag_resv_init.
64 * Are we critically low on blocks? For now we'll define that as the number
65 * of blocks we can get our hands on being less than 10% of what we reserved
66 * or less than some arbitrary number (maximum btree height).
68 bool
69 xfs_ag_resv_critical(
70 struct xfs_perag *pag,
71 enum xfs_ag_resv_type type)
73 struct xfs_mount *mp = pag_mount(pag);
74 xfs_extlen_t avail;
75 xfs_extlen_t orig;
77 switch (type) {
78 case XFS_AG_RESV_METADATA:
79 avail = pag->pagf_freeblks - pag->pag_rmapbt_resv.ar_reserved;
80 orig = pag->pag_meta_resv.ar_asked;
81 break;
82 case XFS_AG_RESV_RMAPBT:
83 avail = pag->pagf_freeblks + pag->pagf_flcount -
84 pag->pag_meta_resv.ar_reserved;
85 orig = pag->pag_rmapbt_resv.ar_asked;
86 break;
87 default:
88 ASSERT(0);
89 return false;
92 trace_xfs_ag_resv_critical(pag, type, avail);
94 /* Critically low if less than 10% or max btree height remains. */
95 return XFS_TEST_ERROR(avail < orig / 10 ||
96 avail < mp->m_agbtree_maxlevels,
97 mp, XFS_ERRTAG_AG_RESV_CRITICAL);
101 * How many blocks are reserved but not used, and therefore must not be
102 * allocated away?
104 xfs_extlen_t
105 xfs_ag_resv_needed(
106 struct xfs_perag *pag,
107 enum xfs_ag_resv_type type)
109 xfs_extlen_t len;
111 len = pag->pag_meta_resv.ar_reserved + pag->pag_rmapbt_resv.ar_reserved;
112 switch (type) {
113 case XFS_AG_RESV_METADATA:
114 case XFS_AG_RESV_RMAPBT:
115 len -= xfs_perag_resv(pag, type)->ar_reserved;
116 break;
117 case XFS_AG_RESV_NONE:
118 /* empty */
119 break;
120 default:
121 ASSERT(0);
124 trace_xfs_ag_resv_needed(pag, type, len);
126 return len;
129 /* Clean out a reservation */
130 static void
131 __xfs_ag_resv_free(
132 struct xfs_perag *pag,
133 enum xfs_ag_resv_type type)
135 struct xfs_ag_resv *resv;
136 xfs_extlen_t oldresv;
138 trace_xfs_ag_resv_free(pag, type, 0);
140 resv = xfs_perag_resv(pag, type);
141 if (pag_agno(pag) == 0)
142 pag_mount(pag)->m_ag_max_usable += resv->ar_asked;
144 * RMAPBT blocks come from the AGFL and AGFL blocks are always
145 * considered "free", so whatever was reserved at mount time must be
146 * given back at umount.
148 if (type == XFS_AG_RESV_RMAPBT)
149 oldresv = resv->ar_orig_reserved;
150 else
151 oldresv = resv->ar_reserved;
152 xfs_add_fdblocks(pag_mount(pag), oldresv);
153 resv->ar_reserved = 0;
154 resv->ar_asked = 0;
155 resv->ar_orig_reserved = 0;
158 /* Free a per-AG reservation. */
159 void
160 xfs_ag_resv_free(
161 struct xfs_perag *pag)
163 __xfs_ag_resv_free(pag, XFS_AG_RESV_RMAPBT);
164 __xfs_ag_resv_free(pag, XFS_AG_RESV_METADATA);
167 static int
168 __xfs_ag_resv_init(
169 struct xfs_perag *pag,
170 enum xfs_ag_resv_type type,
171 xfs_extlen_t ask,
172 xfs_extlen_t used)
174 struct xfs_mount *mp = pag_mount(pag);
175 struct xfs_ag_resv *resv;
176 int error;
177 xfs_extlen_t hidden_space;
179 if (used > ask)
180 ask = used;
182 switch (type) {
183 case XFS_AG_RESV_RMAPBT:
185 * Space taken by the rmapbt is not subtracted from fdblocks
186 * because the rmapbt lives in the free space. Here we must
187 * subtract the entire reservation from fdblocks so that we
188 * always have blocks available for rmapbt expansion.
190 hidden_space = ask;
191 break;
192 case XFS_AG_RESV_METADATA:
194 * Space taken by all other metadata btrees are accounted
195 * on-disk as used space. We therefore only hide the space
196 * that is reserved but not used by the trees.
198 hidden_space = ask - used;
199 break;
200 default:
201 ASSERT(0);
202 return -EINVAL;
205 if (XFS_TEST_ERROR(false, mp, XFS_ERRTAG_AG_RESV_FAIL))
206 error = -ENOSPC;
207 else
208 error = xfs_dec_fdblocks(mp, hidden_space, true);
209 if (error) {
210 trace_xfs_ag_resv_init_error(pag, error, _RET_IP_);
211 xfs_warn(mp,
212 "Per-AG reservation for AG %u failed. Filesystem may run out of space.",
213 pag_agno(pag));
214 return error;
218 * Reduce the maximum per-AG allocation length by however much we're
219 * trying to reserve for an AG. Since this is a filesystem-wide
220 * counter, we only make the adjustment for AG 0. This assumes that
221 * there aren't any AGs hungrier for per-AG reservation than AG 0.
223 if (pag_agno(pag) == 0)
224 mp->m_ag_max_usable -= ask;
226 resv = xfs_perag_resv(pag, type);
227 resv->ar_asked = ask;
228 resv->ar_orig_reserved = hidden_space;
229 resv->ar_reserved = ask - used;
231 trace_xfs_ag_resv_init(pag, type, ask);
232 return 0;
235 /* Create a per-AG block reservation. */
237 xfs_ag_resv_init(
238 struct xfs_perag *pag,
239 struct xfs_trans *tp)
241 struct xfs_mount *mp = pag_mount(pag);
242 xfs_extlen_t ask;
243 xfs_extlen_t used;
244 int error = 0, error2;
245 bool has_resv = false;
247 /* Create the metadata reservation. */
248 if (pag->pag_meta_resv.ar_asked == 0) {
249 ask = used = 0;
251 error = xfs_refcountbt_calc_reserves(mp, tp, pag, &ask, &used);
252 if (error)
253 goto out;
255 error = xfs_finobt_calc_reserves(pag, tp, &ask, &used);
256 if (error)
257 goto out;
259 error = __xfs_ag_resv_init(pag, XFS_AG_RESV_METADATA,
260 ask, used);
261 if (error) {
263 * Because we didn't have per-AG reservations when the
264 * finobt feature was added we might not be able to
265 * reserve all needed blocks. Warn and fall back to the
266 * old and potentially buggy code in that case, but
267 * ensure we do have the reservation for the refcountbt.
269 ask = used = 0;
271 mp->m_finobt_nores = true;
273 error = xfs_refcountbt_calc_reserves(mp, tp, pag, &ask,
274 &used);
275 if (error)
276 goto out;
278 error = __xfs_ag_resv_init(pag, XFS_AG_RESV_METADATA,
279 ask, used);
280 if (error)
281 goto out;
283 if (ask)
284 has_resv = true;
287 /* Create the RMAPBT metadata reservation */
288 if (pag->pag_rmapbt_resv.ar_asked == 0) {
289 ask = used = 0;
291 error = xfs_rmapbt_calc_reserves(mp, tp, pag, &ask, &used);
292 if (error)
293 goto out;
295 error = __xfs_ag_resv_init(pag, XFS_AG_RESV_RMAPBT, ask, used);
296 if (error)
297 goto out;
298 if (ask)
299 has_resv = true;
302 out:
304 * Initialize the pagf if we have at least one active reservation on the
305 * AG. This may have occurred already via reservation calculation, but
306 * fall back to an explicit init to ensure the in-core allocbt usage
307 * counters are initialized as soon as possible. This is important
308 * because filesystems with large perag reservations are susceptible to
309 * free space reservation problems that the allocbt counter is used to
310 * address.
312 if (has_resv) {
313 error2 = xfs_alloc_read_agf(pag, tp, 0, NULL);
314 if (error2)
315 return error2;
318 * If there isn't enough space in the AG to satisfy the
319 * reservation, let the caller know that there wasn't enough
320 * space. Callers are responsible for deciding what to do
321 * next, since (in theory) we can stumble along with
322 * insufficient reservation if data blocks are being freed to
323 * replenish the AG's free space.
325 if (!error &&
326 xfs_perag_resv(pag, XFS_AG_RESV_METADATA)->ar_reserved +
327 xfs_perag_resv(pag, XFS_AG_RESV_RMAPBT)->ar_reserved >
328 pag->pagf_freeblks + pag->pagf_flcount)
329 error = -ENOSPC;
332 return error;
335 /* Allocate a block from the reservation. */
336 void
337 xfs_ag_resv_alloc_extent(
338 struct xfs_perag *pag,
339 enum xfs_ag_resv_type type,
340 struct xfs_alloc_arg *args)
342 struct xfs_ag_resv *resv;
343 xfs_extlen_t len;
344 uint field;
346 trace_xfs_ag_resv_alloc_extent(pag, type, args->len);
348 switch (type) {
349 case XFS_AG_RESV_AGFL:
350 return;
351 case XFS_AG_RESV_METADATA:
352 case XFS_AG_RESV_RMAPBT:
353 resv = xfs_perag_resv(pag, type);
354 break;
355 default:
356 ASSERT(0);
357 fallthrough;
358 case XFS_AG_RESV_NONE:
359 field = args->wasdel ? XFS_TRANS_SB_RES_FDBLOCKS :
360 XFS_TRANS_SB_FDBLOCKS;
361 xfs_trans_mod_sb(args->tp, field, -(int64_t)args->len);
362 return;
365 len = min_t(xfs_extlen_t, args->len, resv->ar_reserved);
366 resv->ar_reserved -= len;
367 if (type == XFS_AG_RESV_RMAPBT)
368 return;
369 /* Allocations of reserved blocks only need on-disk sb updates... */
370 xfs_trans_mod_sb(args->tp, XFS_TRANS_SB_RES_FDBLOCKS, -(int64_t)len);
371 /* ...but non-reserved blocks need in-core and on-disk updates. */
372 if (args->len > len)
373 xfs_trans_mod_sb(args->tp, XFS_TRANS_SB_FDBLOCKS,
374 -((int64_t)args->len - len));
377 /* Free a block to the reservation. */
378 void
379 xfs_ag_resv_free_extent(
380 struct xfs_perag *pag,
381 enum xfs_ag_resv_type type,
382 struct xfs_trans *tp,
383 xfs_extlen_t len)
385 xfs_extlen_t leftover;
386 struct xfs_ag_resv *resv;
388 trace_xfs_ag_resv_free_extent(pag, type, len);
390 switch (type) {
391 case XFS_AG_RESV_AGFL:
392 return;
393 case XFS_AG_RESV_METADATA:
394 case XFS_AG_RESV_RMAPBT:
395 resv = xfs_perag_resv(pag, type);
396 break;
397 default:
398 ASSERT(0);
399 fallthrough;
400 case XFS_AG_RESV_NONE:
401 xfs_trans_mod_sb(tp, XFS_TRANS_SB_FDBLOCKS, (int64_t)len);
402 fallthrough;
403 case XFS_AG_RESV_IGNORE:
404 return;
407 leftover = min_t(xfs_extlen_t, len, resv->ar_asked - resv->ar_reserved);
408 resv->ar_reserved += leftover;
409 if (type == XFS_AG_RESV_RMAPBT)
410 return;
411 /* Freeing into the reserved pool only requires on-disk update... */
412 xfs_trans_mod_sb(tp, XFS_TRANS_SB_RES_FDBLOCKS, len);
413 /* ...but freeing beyond that requires in-core and on-disk update. */
414 if (len > leftover)
415 xfs_trans_mod_sb(tp, XFS_TRANS_SB_FDBLOCKS, len - leftover);