1 /* SPDX-License-Identifier: GPL-2.0 */
3 * Copyright (c) 2000-2005 Silicon Graphics, Inc.
4 * Copyright (c) 2018 Red Hat, Inc.
10 #include "xfs_shared.h"
11 #include "xfs_format.h"
12 #include "xfs_trans_resv.h"
15 #include "xfs_mount.h"
16 #include "xfs_btree.h"
17 #include "xfs_alloc_btree.h"
18 #include "xfs_rmap_btree.h"
19 #include "xfs_alloc.h"
20 #include "xfs_ialloc.h"
23 #include "xfs_ag_resv.h"
24 #include "xfs_health.h"
32 const struct xfs_buf_ops
*ops
)
37 error
= xfs_buf_get_uncached(mp
->m_ddev_targp
, numblks
, 0, &bp
);
41 xfs_buf_zero(bp
, 0, BBTOB(bp
->b_length
));
43 bp
->b_maps
[0].bm_bn
= blkno
;
50 static inline bool is_log_ag(struct xfs_mount
*mp
, struct aghdr_init_data
*id
)
52 return mp
->m_sb
.sb_logstart
> 0 &&
53 id
->agno
== XFS_FSB_TO_AGNO(mp
, mp
->m_sb
.sb_logstart
);
57 * Generic btree root block init function
63 struct aghdr_init_data
*id
)
65 xfs_btree_init_block(mp
, bp
, id
->type
, 0, 0, id
->agno
);
68 /* Finish initializing a free space btree. */
73 struct aghdr_init_data
*id
)
75 struct xfs_alloc_rec
*arec
;
76 struct xfs_btree_block
*block
= XFS_BUF_TO_BLOCK(bp
);
78 arec
= XFS_ALLOC_REC_ADDR(mp
, XFS_BUF_TO_BLOCK(bp
), 1);
79 arec
->ar_startblock
= cpu_to_be32(mp
->m_ag_prealloc_blocks
);
81 if (is_log_ag(mp
, id
)) {
82 struct xfs_alloc_rec
*nrec
;
83 xfs_agblock_t start
= XFS_FSB_TO_AGBNO(mp
,
84 mp
->m_sb
.sb_logstart
);
86 ASSERT(start
>= mp
->m_ag_prealloc_blocks
);
87 if (start
!= mp
->m_ag_prealloc_blocks
) {
89 * Modify first record to pad stripe align of log
91 arec
->ar_blockcount
= cpu_to_be32(start
-
92 mp
->m_ag_prealloc_blocks
);
96 * Insert second record at start of internal log
97 * which then gets trimmed.
99 nrec
->ar_startblock
= cpu_to_be32(
100 be32_to_cpu(arec
->ar_startblock
) +
101 be32_to_cpu(arec
->ar_blockcount
));
103 be16_add_cpu(&block
->bb_numrecs
, 1);
106 * Change record start to after the internal log
108 be32_add_cpu(&arec
->ar_startblock
, mp
->m_sb
.sb_logblocks
);
112 * Calculate the record block count and check for the case where
113 * the log might have consumed all available space in the AG. If
114 * so, reset the record count to 0 to avoid exposure of an invalid
115 * record start block.
117 arec
->ar_blockcount
= cpu_to_be32(id
->agsize
-
118 be32_to_cpu(arec
->ar_startblock
));
119 if (!arec
->ar_blockcount
)
120 block
->bb_numrecs
= 0;
124 * Alloc btree root block init functions
128 struct xfs_mount
*mp
,
130 struct aghdr_init_data
*id
)
132 xfs_btree_init_block(mp
, bp
, XFS_BTNUM_BNO
, 0, 1, id
->agno
);
133 xfs_freesp_init_recs(mp
, bp
, id
);
138 struct xfs_mount
*mp
,
140 struct aghdr_init_data
*id
)
142 xfs_btree_init_block(mp
, bp
, XFS_BTNUM_CNT
, 0, 1, id
->agno
);
143 xfs_freesp_init_recs(mp
, bp
, id
);
147 * Reverse map root block init
151 struct xfs_mount
*mp
,
153 struct aghdr_init_data
*id
)
155 struct xfs_btree_block
*block
= XFS_BUF_TO_BLOCK(bp
);
156 struct xfs_rmap_rec
*rrec
;
158 xfs_btree_init_block(mp
, bp
, XFS_BTNUM_RMAP
, 0, 4, id
->agno
);
161 * mark the AG header regions as static metadata The BNO
162 * btree block is the first block after the headers, so
163 * it's location defines the size of region the static
166 * Note: unlike mkfs, we never have to account for log
167 * space when growing the data regions
169 rrec
= XFS_RMAP_REC_ADDR(block
, 1);
170 rrec
->rm_startblock
= 0;
171 rrec
->rm_blockcount
= cpu_to_be32(XFS_BNO_BLOCK(mp
));
172 rrec
->rm_owner
= cpu_to_be64(XFS_RMAP_OWN_FS
);
175 /* account freespace btree root blocks */
176 rrec
= XFS_RMAP_REC_ADDR(block
, 2);
177 rrec
->rm_startblock
= cpu_to_be32(XFS_BNO_BLOCK(mp
));
178 rrec
->rm_blockcount
= cpu_to_be32(2);
179 rrec
->rm_owner
= cpu_to_be64(XFS_RMAP_OWN_AG
);
182 /* account inode btree root blocks */
183 rrec
= XFS_RMAP_REC_ADDR(block
, 3);
184 rrec
->rm_startblock
= cpu_to_be32(XFS_IBT_BLOCK(mp
));
185 rrec
->rm_blockcount
= cpu_to_be32(XFS_RMAP_BLOCK(mp
) -
187 rrec
->rm_owner
= cpu_to_be64(XFS_RMAP_OWN_INOBT
);
190 /* account for rmap btree root */
191 rrec
= XFS_RMAP_REC_ADDR(block
, 4);
192 rrec
->rm_startblock
= cpu_to_be32(XFS_RMAP_BLOCK(mp
));
193 rrec
->rm_blockcount
= cpu_to_be32(1);
194 rrec
->rm_owner
= cpu_to_be64(XFS_RMAP_OWN_AG
);
197 /* account for refc btree root */
198 if (xfs_sb_version_hasreflink(&mp
->m_sb
)) {
199 rrec
= XFS_RMAP_REC_ADDR(block
, 5);
200 rrec
->rm_startblock
= cpu_to_be32(xfs_refc_block(mp
));
201 rrec
->rm_blockcount
= cpu_to_be32(1);
202 rrec
->rm_owner
= cpu_to_be64(XFS_RMAP_OWN_REFC
);
204 be16_add_cpu(&block
->bb_numrecs
, 1);
207 /* account for the log space */
208 if (is_log_ag(mp
, id
)) {
209 rrec
= XFS_RMAP_REC_ADDR(block
,
210 be16_to_cpu(block
->bb_numrecs
) + 1);
211 rrec
->rm_startblock
= cpu_to_be32(
212 XFS_FSB_TO_AGBNO(mp
, mp
->m_sb
.sb_logstart
));
213 rrec
->rm_blockcount
= cpu_to_be32(mp
->m_sb
.sb_logblocks
);
214 rrec
->rm_owner
= cpu_to_be64(XFS_RMAP_OWN_LOG
);
216 be16_add_cpu(&block
->bb_numrecs
, 1);
221 * Initialise new secondary superblocks with the pre-grow geometry, but mark
222 * them as "in progress" so we know they haven't yet been activated. This will
223 * get cleared when the update with the new geometry information is done after
224 * changes to the primary are committed. This isn't strictly necessary, but we
225 * get it for free with the delayed buffer write lists and it means we can tell
226 * if a grow operation didn't complete properly after the fact.
230 struct xfs_mount
*mp
,
232 struct aghdr_init_data
*id
)
234 struct xfs_dsb
*dsb
= bp
->b_addr
;
236 xfs_sb_to_disk(dsb
, &mp
->m_sb
);
237 dsb
->sb_inprogress
= 1;
242 struct xfs_mount
*mp
,
244 struct aghdr_init_data
*id
)
246 struct xfs_agf
*agf
= bp
->b_addr
;
247 xfs_extlen_t tmpsize
;
249 agf
->agf_magicnum
= cpu_to_be32(XFS_AGF_MAGIC
);
250 agf
->agf_versionnum
= cpu_to_be32(XFS_AGF_VERSION
);
251 agf
->agf_seqno
= cpu_to_be32(id
->agno
);
252 agf
->agf_length
= cpu_to_be32(id
->agsize
);
253 agf
->agf_roots
[XFS_BTNUM_BNOi
] = cpu_to_be32(XFS_BNO_BLOCK(mp
));
254 agf
->agf_roots
[XFS_BTNUM_CNTi
] = cpu_to_be32(XFS_CNT_BLOCK(mp
));
255 agf
->agf_levels
[XFS_BTNUM_BNOi
] = cpu_to_be32(1);
256 agf
->agf_levels
[XFS_BTNUM_CNTi
] = cpu_to_be32(1);
257 if (xfs_sb_version_hasrmapbt(&mp
->m_sb
)) {
258 agf
->agf_roots
[XFS_BTNUM_RMAPi
] =
259 cpu_to_be32(XFS_RMAP_BLOCK(mp
));
260 agf
->agf_levels
[XFS_BTNUM_RMAPi
] = cpu_to_be32(1);
261 agf
->agf_rmap_blocks
= cpu_to_be32(1);
264 agf
->agf_flfirst
= cpu_to_be32(1);
266 agf
->agf_flcount
= 0;
267 tmpsize
= id
->agsize
- mp
->m_ag_prealloc_blocks
;
268 agf
->agf_freeblks
= cpu_to_be32(tmpsize
);
269 agf
->agf_longest
= cpu_to_be32(tmpsize
);
270 if (xfs_sb_version_hascrc(&mp
->m_sb
))
271 uuid_copy(&agf
->agf_uuid
, &mp
->m_sb
.sb_meta_uuid
);
272 if (xfs_sb_version_hasreflink(&mp
->m_sb
)) {
273 agf
->agf_refcount_root
= cpu_to_be32(
275 agf
->agf_refcount_level
= cpu_to_be32(1);
276 agf
->agf_refcount_blocks
= cpu_to_be32(1);
279 if (is_log_ag(mp
, id
)) {
280 int64_t logblocks
= mp
->m_sb
.sb_logblocks
;
282 be32_add_cpu(&agf
->agf_freeblks
, -logblocks
);
283 agf
->agf_longest
= cpu_to_be32(id
->agsize
-
284 XFS_FSB_TO_AGBNO(mp
, mp
->m_sb
.sb_logstart
) - logblocks
);
290 struct xfs_mount
*mp
,
292 struct aghdr_init_data
*id
)
294 struct xfs_agfl
*agfl
= XFS_BUF_TO_AGFL(bp
);
298 if (xfs_sb_version_hascrc(&mp
->m_sb
)) {
299 agfl
->agfl_magicnum
= cpu_to_be32(XFS_AGFL_MAGIC
);
300 agfl
->agfl_seqno
= cpu_to_be32(id
->agno
);
301 uuid_copy(&agfl
->agfl_uuid
, &mp
->m_sb
.sb_meta_uuid
);
304 agfl_bno
= xfs_buf_to_agfl_bno(bp
);
305 for (bucket
= 0; bucket
< xfs_agfl_size(mp
); bucket
++)
306 agfl_bno
[bucket
] = cpu_to_be32(NULLAGBLOCK
);
311 struct xfs_mount
*mp
,
313 struct aghdr_init_data
*id
)
315 struct xfs_agi
*agi
= bp
->b_addr
;
318 agi
->agi_magicnum
= cpu_to_be32(XFS_AGI_MAGIC
);
319 agi
->agi_versionnum
= cpu_to_be32(XFS_AGI_VERSION
);
320 agi
->agi_seqno
= cpu_to_be32(id
->agno
);
321 agi
->agi_length
= cpu_to_be32(id
->agsize
);
323 agi
->agi_root
= cpu_to_be32(XFS_IBT_BLOCK(mp
));
324 agi
->agi_level
= cpu_to_be32(1);
325 agi
->agi_freecount
= 0;
326 agi
->agi_newino
= cpu_to_be32(NULLAGINO
);
327 agi
->agi_dirino
= cpu_to_be32(NULLAGINO
);
328 if (xfs_sb_version_hascrc(&mp
->m_sb
))
329 uuid_copy(&agi
->agi_uuid
, &mp
->m_sb
.sb_meta_uuid
);
330 if (xfs_sb_version_hasfinobt(&mp
->m_sb
)) {
331 agi
->agi_free_root
= cpu_to_be32(XFS_FIBT_BLOCK(mp
));
332 agi
->agi_free_level
= cpu_to_be32(1);
334 for (bucket
= 0; bucket
< XFS_AGI_UNLINKED_BUCKETS
; bucket
++)
335 agi
->agi_unlinked
[bucket
] = cpu_to_be32(NULLAGINO
);
336 if (xfs_sb_version_hasinobtcounts(&mp
->m_sb
)) {
337 agi
->agi_iblocks
= cpu_to_be32(1);
338 if (xfs_sb_version_hasfinobt(&mp
->m_sb
))
339 agi
->agi_fblocks
= cpu_to_be32(1);
343 typedef void (*aghdr_init_work_f
)(struct xfs_mount
*mp
, struct xfs_buf
*bp
,
344 struct aghdr_init_data
*id
);
347 struct xfs_mount
*mp
,
348 struct aghdr_init_data
*id
,
349 aghdr_init_work_f work
,
350 const struct xfs_buf_ops
*ops
)
355 error
= xfs_get_aghdr_buf(mp
, id
->daddr
, id
->numblks
, &bp
, ops
);
361 xfs_buf_delwri_queue(bp
, &id
->buffer_list
);
366 struct xfs_aghdr_grow_data
{
369 const struct xfs_buf_ops
*ops
;
370 aghdr_init_work_f work
;
376 * Prepare new AG headers to be written to disk. We use uncached buffers here,
377 * as it is assumed these new AG headers are currently beyond the currently
378 * valid filesystem address space. Using cached buffers would trip over EOFS
379 * corruption detection alogrithms in the buffer cache lookup routines.
381 * This is a non-transactional function, but the prepared buffers are added to a
382 * delayed write buffer list supplied by the caller so they can submit them to
383 * disk and wait on them as required.
387 struct xfs_mount
*mp
,
388 struct aghdr_init_data
*id
)
391 struct xfs_aghdr_grow_data aghdr_data
[] = {
393 .daddr
= XFS_AG_DADDR(mp
, id
->agno
, XFS_SB_DADDR
),
394 .numblks
= XFS_FSS_TO_BB(mp
, 1),
395 .ops
= &xfs_sb_buf_ops
,
396 .work
= &xfs_sbblock_init
,
400 .daddr
= XFS_AG_DADDR(mp
, id
->agno
, XFS_AGF_DADDR(mp
)),
401 .numblks
= XFS_FSS_TO_BB(mp
, 1),
402 .ops
= &xfs_agf_buf_ops
,
403 .work
= &xfs_agfblock_init
,
407 .daddr
= XFS_AG_DADDR(mp
, id
->agno
, XFS_AGFL_DADDR(mp
)),
408 .numblks
= XFS_FSS_TO_BB(mp
, 1),
409 .ops
= &xfs_agfl_buf_ops
,
410 .work
= &xfs_agflblock_init
,
414 .daddr
= XFS_AG_DADDR(mp
, id
->agno
, XFS_AGI_DADDR(mp
)),
415 .numblks
= XFS_FSS_TO_BB(mp
, 1),
416 .ops
= &xfs_agi_buf_ops
,
417 .work
= &xfs_agiblock_init
,
420 { /* BNO root block */
421 .daddr
= XFS_AGB_TO_DADDR(mp
, id
->agno
, XFS_BNO_BLOCK(mp
)),
422 .numblks
= BTOBB(mp
->m_sb
.sb_blocksize
),
423 .ops
= &xfs_bnobt_buf_ops
,
424 .work
= &xfs_bnoroot_init
,
427 { /* CNT root block */
428 .daddr
= XFS_AGB_TO_DADDR(mp
, id
->agno
, XFS_CNT_BLOCK(mp
)),
429 .numblks
= BTOBB(mp
->m_sb
.sb_blocksize
),
430 .ops
= &xfs_cntbt_buf_ops
,
431 .work
= &xfs_cntroot_init
,
434 { /* INO root block */
435 .daddr
= XFS_AGB_TO_DADDR(mp
, id
->agno
, XFS_IBT_BLOCK(mp
)),
436 .numblks
= BTOBB(mp
->m_sb
.sb_blocksize
),
437 .ops
= &xfs_inobt_buf_ops
,
438 .work
= &xfs_btroot_init
,
439 .type
= XFS_BTNUM_INO
,
442 { /* FINO root block */
443 .daddr
= XFS_AGB_TO_DADDR(mp
, id
->agno
, XFS_FIBT_BLOCK(mp
)),
444 .numblks
= BTOBB(mp
->m_sb
.sb_blocksize
),
445 .ops
= &xfs_finobt_buf_ops
,
446 .work
= &xfs_btroot_init
,
447 .type
= XFS_BTNUM_FINO
,
448 .need_init
= xfs_sb_version_hasfinobt(&mp
->m_sb
)
450 { /* RMAP root block */
451 .daddr
= XFS_AGB_TO_DADDR(mp
, id
->agno
, XFS_RMAP_BLOCK(mp
)),
452 .numblks
= BTOBB(mp
->m_sb
.sb_blocksize
),
453 .ops
= &xfs_rmapbt_buf_ops
,
454 .work
= &xfs_rmaproot_init
,
455 .need_init
= xfs_sb_version_hasrmapbt(&mp
->m_sb
)
457 { /* REFC root block */
458 .daddr
= XFS_AGB_TO_DADDR(mp
, id
->agno
, xfs_refc_block(mp
)),
459 .numblks
= BTOBB(mp
->m_sb
.sb_blocksize
),
460 .ops
= &xfs_refcountbt_buf_ops
,
461 .work
= &xfs_btroot_init
,
462 .type
= XFS_BTNUM_REFC
,
463 .need_init
= xfs_sb_version_hasreflink(&mp
->m_sb
)
465 { /* NULL terminating block */
466 .daddr
= XFS_BUF_DADDR_NULL
,
469 struct xfs_aghdr_grow_data
*dp
;
472 /* Account for AG free space in new AG */
473 id
->nfree
+= id
->agsize
- mp
->m_ag_prealloc_blocks
;
474 for (dp
= &aghdr_data
[0]; dp
->daddr
!= XFS_BUF_DADDR_NULL
; dp
++) {
478 id
->daddr
= dp
->daddr
;
479 id
->numblks
= dp
->numblks
;
481 error
= xfs_ag_init_hdr(mp
, id
, dp
->work
, dp
->ops
);
489 * Extent the AG indicated by the @id by the length passed in
493 struct xfs_mount
*mp
,
494 struct xfs_trans
*tp
,
495 struct aghdr_init_data
*id
,
504 * Change the agi length.
506 error
= xfs_ialloc_read_agi(mp
, tp
, id
->agno
, &bp
);
511 be32_add_cpu(&agi
->agi_length
, len
);
512 ASSERT(id
->agno
== mp
->m_sb
.sb_agcount
- 1 ||
513 be32_to_cpu(agi
->agi_length
) == mp
->m_sb
.sb_agblocks
);
514 xfs_ialloc_log_agi(tp
, bp
, XFS_AGI_LENGTH
);
519 error
= xfs_alloc_read_agf(mp
, tp
, id
->agno
, 0, &bp
);
524 be32_add_cpu(&agf
->agf_length
, len
);
525 ASSERT(agf
->agf_length
== agi
->agi_length
);
526 xfs_alloc_log_agf(tp
, bp
, XFS_AGF_LENGTH
);
529 * Free the new space.
531 * XFS_RMAP_OINFO_SKIP_UPDATE is used here to tell the rmap btree that
532 * this doesn't actually exist in the rmap btree.
534 error
= xfs_rmap_free(tp
, bp
, id
->agno
,
535 be32_to_cpu(agf
->agf_length
) - len
,
536 len
, &XFS_RMAP_OINFO_SKIP_UPDATE
);
540 return xfs_free_extent(tp
, XFS_AGB_TO_FSB(mp
, id
->agno
,
541 be32_to_cpu(agf
->agf_length
) - len
),
542 len
, &XFS_RMAP_OINFO_SKIP_UPDATE
,
546 /* Retrieve AG geometry. */
549 struct xfs_mount
*mp
,
551 struct xfs_ag_geometry
*ageo
)
553 struct xfs_buf
*agi_bp
;
554 struct xfs_buf
*agf_bp
;
557 struct xfs_perag
*pag
;
558 unsigned int freeblks
;
561 if (agno
>= mp
->m_sb
.sb_agcount
)
564 /* Lock the AG headers. */
565 error
= xfs_ialloc_read_agi(mp
, NULL
, agno
, &agi_bp
);
568 error
= xfs_alloc_read_agf(mp
, NULL
, agno
, 0, &agf_bp
);
575 memset(ageo
, 0, sizeof(*ageo
));
576 ageo
->ag_number
= agno
;
578 agi
= agi_bp
->b_addr
;
579 ageo
->ag_icount
= be32_to_cpu(agi
->agi_count
);
580 ageo
->ag_ifree
= be32_to_cpu(agi
->agi_freecount
);
582 agf
= agf_bp
->b_addr
;
583 ageo
->ag_length
= be32_to_cpu(agf
->agf_length
);
584 freeblks
= pag
->pagf_freeblks
+
586 pag
->pagf_btreeblks
-
587 xfs_ag_resv_needed(pag
, XFS_AG_RESV_NONE
);
588 ageo
->ag_freeblks
= freeblks
;
589 xfs_ag_geom_health(pag
, ageo
);
591 /* Release resources. */
592 xfs_buf_relse(agf_bp
);
594 xfs_buf_relse(agi_bp
);