1 /* SPDX-License-Identifier: GPL-2.0 */
3 * Copyright (c) 2000-2005 Silicon Graphics, Inc.
4 * Copyright (c) 2018 Red Hat, Inc.
10 #include "xfs_shared.h"
11 #include "xfs_format.h"
12 #include "xfs_trans_resv.h"
14 #include "xfs_mount.h"
15 #include "xfs_btree.h"
16 #include "xfs_alloc_btree.h"
17 #include "xfs_rmap_btree.h"
18 #include "xfs_alloc.h"
19 #include "xfs_ialloc.h"
23 static struct xfs_buf
*
29 const struct xfs_buf_ops
*ops
)
33 bp
= xfs_buf_get_uncached(mp
->m_ddev_targp
, numblks
, flags
);
37 xfs_buf_zero(bp
, 0, BBTOB(bp
->b_length
));
39 bp
->b_maps
[0].bm_bn
= blkno
;
46 * Generic btree root block init function
52 struct aghdr_init_data
*id
)
54 xfs_btree_init_block(mp
, bp
, id
->type
, 0, 0, id
->agno
, 0);
58 * Alloc btree root block init functions
64 struct aghdr_init_data
*id
)
66 struct xfs_alloc_rec
*arec
;
68 xfs_btree_init_block(mp
, bp
, XFS_BTNUM_BNO
, 0, 1, id
->agno
, 0);
69 arec
= XFS_ALLOC_REC_ADDR(mp
, XFS_BUF_TO_BLOCK(bp
), 1);
70 arec
->ar_startblock
= cpu_to_be32(mp
->m_ag_prealloc_blocks
);
71 arec
->ar_blockcount
= cpu_to_be32(id
->agsize
-
72 be32_to_cpu(arec
->ar_startblock
));
79 struct aghdr_init_data
*id
)
81 struct xfs_alloc_rec
*arec
;
83 xfs_btree_init_block(mp
, bp
, XFS_BTNUM_CNT
, 0, 1, id
->agno
, 0);
84 arec
= XFS_ALLOC_REC_ADDR(mp
, XFS_BUF_TO_BLOCK(bp
), 1);
85 arec
->ar_startblock
= cpu_to_be32(mp
->m_ag_prealloc_blocks
);
86 arec
->ar_blockcount
= cpu_to_be32(id
->agsize
-
87 be32_to_cpu(arec
->ar_startblock
));
91 * Reverse map root block init
97 struct aghdr_init_data
*id
)
99 struct xfs_btree_block
*block
= XFS_BUF_TO_BLOCK(bp
);
100 struct xfs_rmap_rec
*rrec
;
102 xfs_btree_init_block(mp
, bp
, XFS_BTNUM_RMAP
, 0, 4, id
->agno
, 0);
105 * mark the AG header regions as static metadata The BNO
106 * btree block is the first block after the headers, so
107 * it's location defines the size of region the static
110 * Note: unlike mkfs, we never have to account for log
111 * space when growing the data regions
113 rrec
= XFS_RMAP_REC_ADDR(block
, 1);
114 rrec
->rm_startblock
= 0;
115 rrec
->rm_blockcount
= cpu_to_be32(XFS_BNO_BLOCK(mp
));
116 rrec
->rm_owner
= cpu_to_be64(XFS_RMAP_OWN_FS
);
119 /* account freespace btree root blocks */
120 rrec
= XFS_RMAP_REC_ADDR(block
, 2);
121 rrec
->rm_startblock
= cpu_to_be32(XFS_BNO_BLOCK(mp
));
122 rrec
->rm_blockcount
= cpu_to_be32(2);
123 rrec
->rm_owner
= cpu_to_be64(XFS_RMAP_OWN_AG
);
126 /* account inode btree root blocks */
127 rrec
= XFS_RMAP_REC_ADDR(block
, 3);
128 rrec
->rm_startblock
= cpu_to_be32(XFS_IBT_BLOCK(mp
));
129 rrec
->rm_blockcount
= cpu_to_be32(XFS_RMAP_BLOCK(mp
) -
131 rrec
->rm_owner
= cpu_to_be64(XFS_RMAP_OWN_INOBT
);
134 /* account for rmap btree root */
135 rrec
= XFS_RMAP_REC_ADDR(block
, 4);
136 rrec
->rm_startblock
= cpu_to_be32(XFS_RMAP_BLOCK(mp
));
137 rrec
->rm_blockcount
= cpu_to_be32(1);
138 rrec
->rm_owner
= cpu_to_be64(XFS_RMAP_OWN_AG
);
141 /* account for refc btree root */
142 if (xfs_sb_version_hasreflink(&mp
->m_sb
)) {
143 rrec
= XFS_RMAP_REC_ADDR(block
, 5);
144 rrec
->rm_startblock
= cpu_to_be32(xfs_refc_block(mp
));
145 rrec
->rm_blockcount
= cpu_to_be32(1);
146 rrec
->rm_owner
= cpu_to_be64(XFS_RMAP_OWN_REFC
);
148 be16_add_cpu(&block
->bb_numrecs
, 1);
153 * Initialise new secondary superblocks with the pre-grow geometry, but mark
154 * them as "in progress" so we know they haven't yet been activated. This will
155 * get cleared when the update with the new geometry information is done after
156 * changes to the primary are committed. This isn't strictly necessary, but we
157 * get it for free with the delayed buffer write lists and it means we can tell
158 * if a grow operation didn't complete properly after the fact.
162 struct xfs_mount
*mp
,
164 struct aghdr_init_data
*id
)
166 struct xfs_dsb
*dsb
= XFS_BUF_TO_SBP(bp
);
168 xfs_sb_to_disk(dsb
, &mp
->m_sb
);
169 dsb
->sb_inprogress
= 1;
174 struct xfs_mount
*mp
,
176 struct aghdr_init_data
*id
)
178 struct xfs_agf
*agf
= XFS_BUF_TO_AGF(bp
);
179 xfs_extlen_t tmpsize
;
181 agf
->agf_magicnum
= cpu_to_be32(XFS_AGF_MAGIC
);
182 agf
->agf_versionnum
= cpu_to_be32(XFS_AGF_VERSION
);
183 agf
->agf_seqno
= cpu_to_be32(id
->agno
);
184 agf
->agf_length
= cpu_to_be32(id
->agsize
);
185 agf
->agf_roots
[XFS_BTNUM_BNOi
] = cpu_to_be32(XFS_BNO_BLOCK(mp
));
186 agf
->agf_roots
[XFS_BTNUM_CNTi
] = cpu_to_be32(XFS_CNT_BLOCK(mp
));
187 agf
->agf_levels
[XFS_BTNUM_BNOi
] = cpu_to_be32(1);
188 agf
->agf_levels
[XFS_BTNUM_CNTi
] = cpu_to_be32(1);
189 if (xfs_sb_version_hasrmapbt(&mp
->m_sb
)) {
190 agf
->agf_roots
[XFS_BTNUM_RMAPi
] =
191 cpu_to_be32(XFS_RMAP_BLOCK(mp
));
192 agf
->agf_levels
[XFS_BTNUM_RMAPi
] = cpu_to_be32(1);
193 agf
->agf_rmap_blocks
= cpu_to_be32(1);
196 agf
->agf_flfirst
= cpu_to_be32(1);
198 agf
->agf_flcount
= 0;
199 tmpsize
= id
->agsize
- mp
->m_ag_prealloc_blocks
;
200 agf
->agf_freeblks
= cpu_to_be32(tmpsize
);
201 agf
->agf_longest
= cpu_to_be32(tmpsize
);
202 if (xfs_sb_version_hascrc(&mp
->m_sb
))
203 uuid_copy(&agf
->agf_uuid
, &mp
->m_sb
.sb_meta_uuid
);
204 if (xfs_sb_version_hasreflink(&mp
->m_sb
)) {
205 agf
->agf_refcount_root
= cpu_to_be32(
207 agf
->agf_refcount_level
= cpu_to_be32(1);
208 agf
->agf_refcount_blocks
= cpu_to_be32(1);
214 struct xfs_mount
*mp
,
216 struct aghdr_init_data
*id
)
218 struct xfs_agfl
*agfl
= XFS_BUF_TO_AGFL(bp
);
222 if (xfs_sb_version_hascrc(&mp
->m_sb
)) {
223 agfl
->agfl_magicnum
= cpu_to_be32(XFS_AGFL_MAGIC
);
224 agfl
->agfl_seqno
= cpu_to_be32(id
->agno
);
225 uuid_copy(&agfl
->agfl_uuid
, &mp
->m_sb
.sb_meta_uuid
);
228 agfl_bno
= XFS_BUF_TO_AGFL_BNO(mp
, bp
);
229 for (bucket
= 0; bucket
< xfs_agfl_size(mp
); bucket
++)
230 agfl_bno
[bucket
] = cpu_to_be32(NULLAGBLOCK
);
235 struct xfs_mount
*mp
,
237 struct aghdr_init_data
*id
)
239 struct xfs_agi
*agi
= XFS_BUF_TO_AGI(bp
);
242 agi
->agi_magicnum
= cpu_to_be32(XFS_AGI_MAGIC
);
243 agi
->agi_versionnum
= cpu_to_be32(XFS_AGI_VERSION
);
244 agi
->agi_seqno
= cpu_to_be32(id
->agno
);
245 agi
->agi_length
= cpu_to_be32(id
->agsize
);
247 agi
->agi_root
= cpu_to_be32(XFS_IBT_BLOCK(mp
));
248 agi
->agi_level
= cpu_to_be32(1);
249 agi
->agi_freecount
= 0;
250 agi
->agi_newino
= cpu_to_be32(NULLAGINO
);
251 agi
->agi_dirino
= cpu_to_be32(NULLAGINO
);
252 if (xfs_sb_version_hascrc(&mp
->m_sb
))
253 uuid_copy(&agi
->agi_uuid
, &mp
->m_sb
.sb_meta_uuid
);
254 if (xfs_sb_version_hasfinobt(&mp
->m_sb
)) {
255 agi
->agi_free_root
= cpu_to_be32(XFS_FIBT_BLOCK(mp
));
256 agi
->agi_free_level
= cpu_to_be32(1);
258 for (bucket
= 0; bucket
< XFS_AGI_UNLINKED_BUCKETS
; bucket
++)
259 agi
->agi_unlinked
[bucket
] = cpu_to_be32(NULLAGINO
);
262 typedef void (*aghdr_init_work_f
)(struct xfs_mount
*mp
, struct xfs_buf
*bp
,
263 struct aghdr_init_data
*id
);
266 struct xfs_mount
*mp
,
267 struct aghdr_init_data
*id
,
268 aghdr_init_work_f work
,
269 const struct xfs_buf_ops
*ops
)
274 bp
= xfs_get_aghdr_buf(mp
, id
->daddr
, id
->numblks
, 0, ops
);
280 xfs_buf_delwri_queue(bp
, &id
->buffer_list
);
285 struct xfs_aghdr_grow_data
{
288 const struct xfs_buf_ops
*ops
;
289 aghdr_init_work_f work
;
295 * Prepare new AG headers to be written to disk. We use uncached buffers here,
296 * as it is assumed these new AG headers are currently beyond the currently
297 * valid filesystem address space. Using cached buffers would trip over EOFS
298 * corruption detection alogrithms in the buffer cache lookup routines.
300 * This is a non-transactional function, but the prepared buffers are added to a
301 * delayed write buffer list supplied by the caller so they can submit them to
302 * disk and wait on them as required.
306 struct xfs_mount
*mp
,
307 struct aghdr_init_data
*id
)
310 struct xfs_aghdr_grow_data aghdr_data
[] = {
312 .daddr
= XFS_AG_DADDR(mp
, id
->agno
, XFS_SB_DADDR
),
313 .numblks
= XFS_FSS_TO_BB(mp
, 1),
314 .ops
= &xfs_sb_buf_ops
,
315 .work
= &xfs_sbblock_init
,
319 .daddr
= XFS_AG_DADDR(mp
, id
->agno
, XFS_AGF_DADDR(mp
)),
320 .numblks
= XFS_FSS_TO_BB(mp
, 1),
321 .ops
= &xfs_agf_buf_ops
,
322 .work
= &xfs_agfblock_init
,
326 .daddr
= XFS_AG_DADDR(mp
, id
->agno
, XFS_AGFL_DADDR(mp
)),
327 .numblks
= XFS_FSS_TO_BB(mp
, 1),
328 .ops
= &xfs_agfl_buf_ops
,
329 .work
= &xfs_agflblock_init
,
333 .daddr
= XFS_AG_DADDR(mp
, id
->agno
, XFS_AGI_DADDR(mp
)),
334 .numblks
= XFS_FSS_TO_BB(mp
, 1),
335 .ops
= &xfs_agi_buf_ops
,
336 .work
= &xfs_agiblock_init
,
339 { /* BNO root block */
340 .daddr
= XFS_AGB_TO_DADDR(mp
, id
->agno
, XFS_BNO_BLOCK(mp
)),
341 .numblks
= BTOBB(mp
->m_sb
.sb_blocksize
),
342 .ops
= &xfs_allocbt_buf_ops
,
343 .work
= &xfs_bnoroot_init
,
346 { /* CNT root block */
347 .daddr
= XFS_AGB_TO_DADDR(mp
, id
->agno
, XFS_CNT_BLOCK(mp
)),
348 .numblks
= BTOBB(mp
->m_sb
.sb_blocksize
),
349 .ops
= &xfs_allocbt_buf_ops
,
350 .work
= &xfs_cntroot_init
,
353 { /* INO root block */
354 .daddr
= XFS_AGB_TO_DADDR(mp
, id
->agno
, XFS_IBT_BLOCK(mp
)),
355 .numblks
= BTOBB(mp
->m_sb
.sb_blocksize
),
356 .ops
= &xfs_inobt_buf_ops
,
357 .work
= &xfs_btroot_init
,
358 .type
= XFS_BTNUM_INO
,
361 { /* FINO root block */
362 .daddr
= XFS_AGB_TO_DADDR(mp
, id
->agno
, XFS_FIBT_BLOCK(mp
)),
363 .numblks
= BTOBB(mp
->m_sb
.sb_blocksize
),
364 .ops
= &xfs_inobt_buf_ops
,
365 .work
= &xfs_btroot_init
,
366 .type
= XFS_BTNUM_FINO
,
367 .need_init
= xfs_sb_version_hasfinobt(&mp
->m_sb
)
369 { /* RMAP root block */
370 .daddr
= XFS_AGB_TO_DADDR(mp
, id
->agno
, XFS_RMAP_BLOCK(mp
)),
371 .numblks
= BTOBB(mp
->m_sb
.sb_blocksize
),
372 .ops
= &xfs_rmapbt_buf_ops
,
373 .work
= &xfs_rmaproot_init
,
374 .need_init
= xfs_sb_version_hasrmapbt(&mp
->m_sb
)
376 { /* REFC root block */
377 .daddr
= XFS_AGB_TO_DADDR(mp
, id
->agno
, xfs_refc_block(mp
)),
378 .numblks
= BTOBB(mp
->m_sb
.sb_blocksize
),
379 .ops
= &xfs_refcountbt_buf_ops
,
380 .work
= &xfs_btroot_init
,
381 .type
= XFS_BTNUM_REFC
,
382 .need_init
= xfs_sb_version_hasreflink(&mp
->m_sb
)
384 { /* NULL terminating block */
385 .daddr
= XFS_BUF_DADDR_NULL
,
388 struct xfs_aghdr_grow_data
*dp
;
391 /* Account for AG free space in new AG */
392 id
->nfree
+= id
->agsize
- mp
->m_ag_prealloc_blocks
;
393 for (dp
= &aghdr_data
[0]; dp
->daddr
!= XFS_BUF_DADDR_NULL
; dp
++) {
397 id
->daddr
= dp
->daddr
;
398 id
->numblks
= dp
->numblks
;
400 error
= xfs_ag_init_hdr(mp
, id
, dp
->work
, dp
->ops
);
408 * Extent the AG indicated by the @id by the length passed in
412 struct xfs_mount
*mp
,
413 struct xfs_trans
*tp
,
414 struct aghdr_init_data
*id
,
417 struct xfs_owner_info oinfo
;
424 * Change the agi length.
426 error
= xfs_ialloc_read_agi(mp
, tp
, id
->agno
, &bp
);
430 agi
= XFS_BUF_TO_AGI(bp
);
431 be32_add_cpu(&agi
->agi_length
, len
);
432 ASSERT(id
->agno
== mp
->m_sb
.sb_agcount
- 1 ||
433 be32_to_cpu(agi
->agi_length
) == mp
->m_sb
.sb_agblocks
);
434 xfs_ialloc_log_agi(tp
, bp
, XFS_AGI_LENGTH
);
439 error
= xfs_alloc_read_agf(mp
, tp
, id
->agno
, 0, &bp
);
443 agf
= XFS_BUF_TO_AGF(bp
);
444 be32_add_cpu(&agf
->agf_length
, len
);
445 ASSERT(agf
->agf_length
== agi
->agi_length
);
446 xfs_alloc_log_agf(tp
, bp
, XFS_AGF_LENGTH
);
449 * Free the new space.
451 * XFS_RMAP_OWN_NULL is used here to tell the rmap btree that
452 * this doesn't actually exist in the rmap btree.
454 xfs_rmap_ag_owner(&oinfo
, XFS_RMAP_OWN_NULL
);
455 error
= xfs_rmap_free(tp
, bp
, id
->agno
,
456 be32_to_cpu(agf
->agf_length
) - len
,
461 return xfs_free_extent(tp
, XFS_AGB_TO_FSB(mp
, id
->agno
,
462 be32_to_cpu(agf
->agf_length
) - len
),
463 len
, &oinfo
, XFS_AG_RESV_NONE
);