1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (c) 2000-2001,2005 Silicon Graphics, Inc.
8 #include "xfs_shared.h"
9 #include "xfs_format.h"
10 #include "xfs_log_format.h"
11 #include "xfs_trans_resv.h"
13 #include "xfs_mount.h"
14 #include "xfs_btree.h"
15 #include "xfs_alloc_btree.h"
16 #include "xfs_alloc.h"
17 #include "xfs_extent_busy.h"
18 #include "xfs_error.h"
19 #include "xfs_trace.h"
20 #include "xfs_cksum.h"
21 #include "xfs_trans.h"
24 STATIC
struct xfs_btree_cur
*
25 xfs_allocbt_dup_cursor(
26 struct xfs_btree_cur
*cur
)
28 return xfs_allocbt_init_cursor(cur
->bc_mp
, cur
->bc_tp
,
29 cur
->bc_private
.a
.agbp
, cur
->bc_private
.a
.agno
,
35 struct xfs_btree_cur
*cur
,
36 union xfs_btree_ptr
*ptr
,
39 struct xfs_buf
*agbp
= cur
->bc_private
.a
.agbp
;
40 struct xfs_agf
*agf
= XFS_BUF_TO_AGF(agbp
);
41 xfs_agnumber_t seqno
= be32_to_cpu(agf
->agf_seqno
);
42 int btnum
= cur
->bc_btnum
;
43 struct xfs_perag
*pag
= xfs_perag_get(cur
->bc_mp
, seqno
);
47 agf
->agf_roots
[btnum
] = ptr
->s
;
48 be32_add_cpu(&agf
->agf_levels
[btnum
], inc
);
49 pag
->pagf_levels
[btnum
] += inc
;
52 xfs_alloc_log_agf(cur
->bc_tp
, agbp
, XFS_AGF_ROOTS
| XFS_AGF_LEVELS
);
56 xfs_allocbt_alloc_block(
57 struct xfs_btree_cur
*cur
,
58 union xfs_btree_ptr
*start
,
59 union xfs_btree_ptr
*new,
65 /* Allocate the new block from the freelist. If we can't, give up. */
66 error
= xfs_alloc_get_freelist(cur
->bc_tp
, cur
->bc_private
.a
.agbp
,
71 if (bno
== NULLAGBLOCK
) {
76 xfs_extent_busy_reuse(cur
->bc_mp
, cur
->bc_private
.a
.agno
, bno
, 1, false);
78 xfs_trans_agbtree_delta(cur
->bc_tp
, 1);
79 new->s
= cpu_to_be32(bno
);
86 xfs_allocbt_free_block(
87 struct xfs_btree_cur
*cur
,
90 struct xfs_buf
*agbp
= cur
->bc_private
.a
.agbp
;
91 struct xfs_agf
*agf
= XFS_BUF_TO_AGF(agbp
);
95 bno
= xfs_daddr_to_agbno(cur
->bc_mp
, XFS_BUF_ADDR(bp
));
96 error
= xfs_alloc_put_freelist(cur
->bc_tp
, agbp
, NULL
, bno
, 1);
100 xfs_extent_busy_insert(cur
->bc_tp
, be32_to_cpu(agf
->agf_seqno
), bno
, 1,
101 XFS_EXTENT_BUSY_SKIP_DISCARD
);
102 xfs_trans_agbtree_delta(cur
->bc_tp
, -1);
107 * Update the longest extent in the AGF
110 xfs_allocbt_update_lastrec(
111 struct xfs_btree_cur
*cur
,
112 struct xfs_btree_block
*block
,
113 union xfs_btree_rec
*rec
,
117 struct xfs_agf
*agf
= XFS_BUF_TO_AGF(cur
->bc_private
.a
.agbp
);
118 xfs_agnumber_t seqno
= be32_to_cpu(agf
->agf_seqno
);
119 struct xfs_perag
*pag
;
123 ASSERT(cur
->bc_btnum
== XFS_BTNUM_CNT
);
128 * If this is the last leaf block and it's the last record,
129 * then update the size of the longest extent in the AG.
131 if (ptr
!= xfs_btree_get_numrecs(block
))
133 len
= rec
->alloc
.ar_blockcount
;
136 if (be32_to_cpu(rec
->alloc
.ar_blockcount
) <=
137 be32_to_cpu(agf
->agf_longest
))
139 len
= rec
->alloc
.ar_blockcount
;
142 numrecs
= xfs_btree_get_numrecs(block
);
145 ASSERT(ptr
== numrecs
+ 1);
148 xfs_alloc_rec_t
*rrp
;
150 rrp
= XFS_ALLOC_REC_ADDR(cur
->bc_mp
, block
, numrecs
);
151 len
= rrp
->ar_blockcount
;
162 agf
->agf_longest
= len
;
163 pag
= xfs_perag_get(cur
->bc_mp
, seqno
);
164 pag
->pagf_longest
= be32_to_cpu(len
);
166 xfs_alloc_log_agf(cur
->bc_tp
, cur
->bc_private
.a
.agbp
, XFS_AGF_LONGEST
);
170 xfs_allocbt_get_minrecs(
171 struct xfs_btree_cur
*cur
,
174 return cur
->bc_mp
->m_alloc_mnr
[level
!= 0];
178 xfs_allocbt_get_maxrecs(
179 struct xfs_btree_cur
*cur
,
182 return cur
->bc_mp
->m_alloc_mxr
[level
!= 0];
186 xfs_allocbt_init_key_from_rec(
187 union xfs_btree_key
*key
,
188 union xfs_btree_rec
*rec
)
190 key
->alloc
.ar_startblock
= rec
->alloc
.ar_startblock
;
191 key
->alloc
.ar_blockcount
= rec
->alloc
.ar_blockcount
;
195 xfs_bnobt_init_high_key_from_rec(
196 union xfs_btree_key
*key
,
197 union xfs_btree_rec
*rec
)
201 x
= be32_to_cpu(rec
->alloc
.ar_startblock
);
202 x
+= be32_to_cpu(rec
->alloc
.ar_blockcount
) - 1;
203 key
->alloc
.ar_startblock
= cpu_to_be32(x
);
204 key
->alloc
.ar_blockcount
= 0;
208 xfs_cntbt_init_high_key_from_rec(
209 union xfs_btree_key
*key
,
210 union xfs_btree_rec
*rec
)
212 key
->alloc
.ar_blockcount
= rec
->alloc
.ar_blockcount
;
213 key
->alloc
.ar_startblock
= 0;
217 xfs_allocbt_init_rec_from_cur(
218 struct xfs_btree_cur
*cur
,
219 union xfs_btree_rec
*rec
)
221 rec
->alloc
.ar_startblock
= cpu_to_be32(cur
->bc_rec
.a
.ar_startblock
);
222 rec
->alloc
.ar_blockcount
= cpu_to_be32(cur
->bc_rec
.a
.ar_blockcount
);
226 xfs_allocbt_init_ptr_from_cur(
227 struct xfs_btree_cur
*cur
,
228 union xfs_btree_ptr
*ptr
)
230 struct xfs_agf
*agf
= XFS_BUF_TO_AGF(cur
->bc_private
.a
.agbp
);
232 ASSERT(cur
->bc_private
.a
.agno
== be32_to_cpu(agf
->agf_seqno
));
234 ptr
->s
= agf
->agf_roots
[cur
->bc_btnum
];
239 struct xfs_btree_cur
*cur
,
240 union xfs_btree_key
*key
)
242 xfs_alloc_rec_incore_t
*rec
= &cur
->bc_rec
.a
;
243 xfs_alloc_key_t
*kp
= &key
->alloc
;
245 return (int64_t)be32_to_cpu(kp
->ar_startblock
) - rec
->ar_startblock
;
250 struct xfs_btree_cur
*cur
,
251 union xfs_btree_key
*key
)
253 xfs_alloc_rec_incore_t
*rec
= &cur
->bc_rec
.a
;
254 xfs_alloc_key_t
*kp
= &key
->alloc
;
257 diff
= (int64_t)be32_to_cpu(kp
->ar_blockcount
) - rec
->ar_blockcount
;
261 return (int64_t)be32_to_cpu(kp
->ar_startblock
) - rec
->ar_startblock
;
265 xfs_bnobt_diff_two_keys(
266 struct xfs_btree_cur
*cur
,
267 union xfs_btree_key
*k1
,
268 union xfs_btree_key
*k2
)
270 return (int64_t)be32_to_cpu(k1
->alloc
.ar_startblock
) -
271 be32_to_cpu(k2
->alloc
.ar_startblock
);
275 xfs_cntbt_diff_two_keys(
276 struct xfs_btree_cur
*cur
,
277 union xfs_btree_key
*k1
,
278 union xfs_btree_key
*k2
)
282 diff
= be32_to_cpu(k1
->alloc
.ar_blockcount
) -
283 be32_to_cpu(k2
->alloc
.ar_blockcount
);
287 return be32_to_cpu(k1
->alloc
.ar_startblock
) -
288 be32_to_cpu(k2
->alloc
.ar_startblock
);
291 static xfs_failaddr_t
295 struct xfs_mount
*mp
= bp
->b_target
->bt_mount
;
296 struct xfs_btree_block
*block
= XFS_BUF_TO_BLOCK(bp
);
297 struct xfs_perag
*pag
= bp
->b_pag
;
302 * magic number and level verification
304 * During growfs operations, we can't verify the exact level or owner as
305 * the perag is not fully initialised and hence not attached to the
306 * buffer. In this case, check against the maximum tree depth.
308 * Similarly, during log recovery we will have a perag structure
309 * attached, but the agf information will not yet have been initialised
310 * from the on disk AGF. Again, we can only check against maximum limits
313 level
= be16_to_cpu(block
->bb_level
);
314 switch (block
->bb_magic
) {
315 case cpu_to_be32(XFS_ABTB_CRC_MAGIC
):
316 fa
= xfs_btree_sblock_v5hdr_verify(bp
);
320 case cpu_to_be32(XFS_ABTB_MAGIC
):
321 if (pag
&& pag
->pagf_init
) {
322 if (level
>= pag
->pagf_levels
[XFS_BTNUM_BNOi
])
323 return __this_address
;
324 } else if (level
>= mp
->m_ag_maxlevels
)
325 return __this_address
;
327 case cpu_to_be32(XFS_ABTC_CRC_MAGIC
):
328 fa
= xfs_btree_sblock_v5hdr_verify(bp
);
332 case cpu_to_be32(XFS_ABTC_MAGIC
):
333 if (pag
&& pag
->pagf_init
) {
334 if (level
>= pag
->pagf_levels
[XFS_BTNUM_CNTi
])
335 return __this_address
;
336 } else if (level
>= mp
->m_ag_maxlevels
)
337 return __this_address
;
340 return __this_address
;
343 return xfs_btree_sblock_verify(bp
, mp
->m_alloc_mxr
[level
!= 0]);
347 xfs_allocbt_read_verify(
352 if (!xfs_btree_sblock_verify_crc(bp
))
353 xfs_verifier_error(bp
, -EFSBADCRC
, __this_address
);
355 fa
= xfs_allocbt_verify(bp
);
357 xfs_verifier_error(bp
, -EFSCORRUPTED
, fa
);
361 trace_xfs_btree_corrupt(bp
, _RET_IP_
);
365 xfs_allocbt_write_verify(
370 fa
= xfs_allocbt_verify(bp
);
372 trace_xfs_btree_corrupt(bp
, _RET_IP_
);
373 xfs_verifier_error(bp
, -EFSCORRUPTED
, fa
);
376 xfs_btree_sblock_calc_crc(bp
);
380 const struct xfs_buf_ops xfs_allocbt_buf_ops
= {
381 .name
= "xfs_allocbt",
382 .verify_read
= xfs_allocbt_read_verify
,
383 .verify_write
= xfs_allocbt_write_verify
,
384 .verify_struct
= xfs_allocbt_verify
,
389 xfs_bnobt_keys_inorder(
390 struct xfs_btree_cur
*cur
,
391 union xfs_btree_key
*k1
,
392 union xfs_btree_key
*k2
)
394 return be32_to_cpu(k1
->alloc
.ar_startblock
) <
395 be32_to_cpu(k2
->alloc
.ar_startblock
);
399 xfs_bnobt_recs_inorder(
400 struct xfs_btree_cur
*cur
,
401 union xfs_btree_rec
*r1
,
402 union xfs_btree_rec
*r2
)
404 return be32_to_cpu(r1
->alloc
.ar_startblock
) +
405 be32_to_cpu(r1
->alloc
.ar_blockcount
) <=
406 be32_to_cpu(r2
->alloc
.ar_startblock
);
410 xfs_cntbt_keys_inorder(
411 struct xfs_btree_cur
*cur
,
412 union xfs_btree_key
*k1
,
413 union xfs_btree_key
*k2
)
415 return be32_to_cpu(k1
->alloc
.ar_blockcount
) <
416 be32_to_cpu(k2
->alloc
.ar_blockcount
) ||
417 (k1
->alloc
.ar_blockcount
== k2
->alloc
.ar_blockcount
&&
418 be32_to_cpu(k1
->alloc
.ar_startblock
) <
419 be32_to_cpu(k2
->alloc
.ar_startblock
));
423 xfs_cntbt_recs_inorder(
424 struct xfs_btree_cur
*cur
,
425 union xfs_btree_rec
*r1
,
426 union xfs_btree_rec
*r2
)
428 return be32_to_cpu(r1
->alloc
.ar_blockcount
) <
429 be32_to_cpu(r2
->alloc
.ar_blockcount
) ||
430 (r1
->alloc
.ar_blockcount
== r2
->alloc
.ar_blockcount
&&
431 be32_to_cpu(r1
->alloc
.ar_startblock
) <
432 be32_to_cpu(r2
->alloc
.ar_startblock
));
435 static const struct xfs_btree_ops xfs_bnobt_ops
= {
436 .rec_len
= sizeof(xfs_alloc_rec_t
),
437 .key_len
= sizeof(xfs_alloc_key_t
),
439 .dup_cursor
= xfs_allocbt_dup_cursor
,
440 .set_root
= xfs_allocbt_set_root
,
441 .alloc_block
= xfs_allocbt_alloc_block
,
442 .free_block
= xfs_allocbt_free_block
,
443 .update_lastrec
= xfs_allocbt_update_lastrec
,
444 .get_minrecs
= xfs_allocbt_get_minrecs
,
445 .get_maxrecs
= xfs_allocbt_get_maxrecs
,
446 .init_key_from_rec
= xfs_allocbt_init_key_from_rec
,
447 .init_high_key_from_rec
= xfs_bnobt_init_high_key_from_rec
,
448 .init_rec_from_cur
= xfs_allocbt_init_rec_from_cur
,
449 .init_ptr_from_cur
= xfs_allocbt_init_ptr_from_cur
,
450 .key_diff
= xfs_bnobt_key_diff
,
451 .buf_ops
= &xfs_allocbt_buf_ops
,
452 .diff_two_keys
= xfs_bnobt_diff_two_keys
,
453 .keys_inorder
= xfs_bnobt_keys_inorder
,
454 .recs_inorder
= xfs_bnobt_recs_inorder
,
457 static const struct xfs_btree_ops xfs_cntbt_ops
= {
458 .rec_len
= sizeof(xfs_alloc_rec_t
),
459 .key_len
= sizeof(xfs_alloc_key_t
),
461 .dup_cursor
= xfs_allocbt_dup_cursor
,
462 .set_root
= xfs_allocbt_set_root
,
463 .alloc_block
= xfs_allocbt_alloc_block
,
464 .free_block
= xfs_allocbt_free_block
,
465 .update_lastrec
= xfs_allocbt_update_lastrec
,
466 .get_minrecs
= xfs_allocbt_get_minrecs
,
467 .get_maxrecs
= xfs_allocbt_get_maxrecs
,
468 .init_key_from_rec
= xfs_allocbt_init_key_from_rec
,
469 .init_high_key_from_rec
= xfs_cntbt_init_high_key_from_rec
,
470 .init_rec_from_cur
= xfs_allocbt_init_rec_from_cur
,
471 .init_ptr_from_cur
= xfs_allocbt_init_ptr_from_cur
,
472 .key_diff
= xfs_cntbt_key_diff
,
473 .buf_ops
= &xfs_allocbt_buf_ops
,
474 .diff_two_keys
= xfs_cntbt_diff_two_keys
,
475 .keys_inorder
= xfs_cntbt_keys_inorder
,
476 .recs_inorder
= xfs_cntbt_recs_inorder
,
480 * Allocate a new allocation btree cursor.
482 struct xfs_btree_cur
* /* new alloc btree cursor */
483 xfs_allocbt_init_cursor(
484 struct xfs_mount
*mp
, /* file system mount point */
485 struct xfs_trans
*tp
, /* transaction pointer */
486 struct xfs_buf
*agbp
, /* buffer for agf structure */
487 xfs_agnumber_t agno
, /* allocation group number */
488 xfs_btnum_t btnum
) /* btree identifier */
490 struct xfs_agf
*agf
= XFS_BUF_TO_AGF(agbp
);
491 struct xfs_btree_cur
*cur
;
493 ASSERT(btnum
== XFS_BTNUM_BNO
|| btnum
== XFS_BTNUM_CNT
);
495 cur
= kmem_zone_zalloc(xfs_btree_cur_zone
, KM_NOFS
);
499 cur
->bc_btnum
= btnum
;
500 cur
->bc_blocklog
= mp
->m_sb
.sb_blocklog
;
502 if (btnum
== XFS_BTNUM_CNT
) {
503 cur
->bc_statoff
= XFS_STATS_CALC_INDEX(xs_abtc_2
);
504 cur
->bc_ops
= &xfs_cntbt_ops
;
505 cur
->bc_nlevels
= be32_to_cpu(agf
->agf_levels
[XFS_BTNUM_CNT
]);
506 cur
->bc_flags
= XFS_BTREE_LASTREC_UPDATE
;
508 cur
->bc_statoff
= XFS_STATS_CALC_INDEX(xs_abtb_2
);
509 cur
->bc_ops
= &xfs_bnobt_ops
;
510 cur
->bc_nlevels
= be32_to_cpu(agf
->agf_levels
[XFS_BTNUM_BNO
]);
513 cur
->bc_private
.a
.agbp
= agbp
;
514 cur
->bc_private
.a
.agno
= agno
;
516 if (xfs_sb_version_hascrc(&mp
->m_sb
))
517 cur
->bc_flags
|= XFS_BTREE_CRC_BLOCKS
;
523 * Calculate number of records in an alloc btree block.
527 struct xfs_mount
*mp
,
531 blocklen
-= XFS_ALLOC_BLOCK_LEN(mp
);
534 return blocklen
/ sizeof(xfs_alloc_rec_t
);
535 return blocklen
/ (sizeof(xfs_alloc_key_t
) + sizeof(xfs_alloc_ptr_t
));
538 /* Calculate the freespace btree size for some records. */
540 xfs_allocbt_calc_size(
541 struct xfs_mount
*mp
,
542 unsigned long long len
)
544 return xfs_btree_calc_size(mp
->m_alloc_mnr
, len
);