2 * Copyright (C) 2016 Oracle. All Rights Reserved.
4 * Author: Darrick J. Wong <darrick.wong@oracle.com>
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version 2
9 * of the License, or (at your option) any later version.
11 * This program is distributed in the hope that it would be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write the Free Software Foundation,
18 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA.
22 #include "xfs_shared.h"
23 #include "xfs_format.h"
24 #include "xfs_log_format.h"
25 #include "xfs_trans_resv.h"
27 #include "xfs_mount.h"
28 #include "xfs_btree.h"
30 #include "xfs_refcount_btree.h"
31 #include "xfs_alloc.h"
32 #include "xfs_error.h"
33 #include "xfs_trace.h"
34 #include "xfs_cksum.h"
35 #include "xfs_trans.h"
39 static struct xfs_btree_cur
*
40 xfs_refcountbt_dup_cursor(
41 struct xfs_btree_cur
*cur
)
43 return xfs_refcountbt_init_cursor(cur
->bc_mp
, cur
->bc_tp
,
44 cur
->bc_private
.a
.agbp
, cur
->bc_private
.a
.agno
,
45 cur
->bc_private
.a
.dfops
);
49 xfs_refcountbt_set_root(
50 struct xfs_btree_cur
*cur
,
51 union xfs_btree_ptr
*ptr
,
54 struct xfs_buf
*agbp
= cur
->bc_private
.a
.agbp
;
55 struct xfs_agf
*agf
= XFS_BUF_TO_AGF(agbp
);
56 xfs_agnumber_t seqno
= be32_to_cpu(agf
->agf_seqno
);
57 struct xfs_perag
*pag
= xfs_perag_get(cur
->bc_mp
, seqno
);
61 agf
->agf_refcount_root
= ptr
->s
;
62 be32_add_cpu(&agf
->agf_refcount_level
, inc
);
63 pag
->pagf_refcount_level
+= inc
;
66 xfs_alloc_log_agf(cur
->bc_tp
, agbp
,
67 XFS_AGF_REFCOUNT_ROOT
| XFS_AGF_REFCOUNT_LEVEL
);
71 xfs_refcountbt_alloc_block(
72 struct xfs_btree_cur
*cur
,
73 union xfs_btree_ptr
*start
,
74 union xfs_btree_ptr
*new,
77 struct xfs_buf
*agbp
= cur
->bc_private
.a
.agbp
;
78 struct xfs_agf
*agf
= XFS_BUF_TO_AGF(agbp
);
79 struct xfs_alloc_arg args
; /* block allocation args */
80 int error
; /* error return value */
82 XFS_BTREE_TRACE_CURSOR(cur
, XBT_ENTRY
);
84 memset(&args
, 0, sizeof(args
));
87 args
.type
= XFS_ALLOCTYPE_NEAR_BNO
;
88 args
.fsbno
= XFS_AGB_TO_FSB(cur
->bc_mp
, cur
->bc_private
.a
.agno
,
89 xfs_refc_block(args
.mp
));
90 args
.firstblock
= args
.fsbno
;
91 xfs_rmap_ag_owner(&args
.oinfo
, XFS_RMAP_OWN_REFC
);
92 args
.minlen
= args
.maxlen
= args
.prod
= 1;
93 args
.resv
= XFS_AG_RESV_METADATA
;
95 error
= xfs_alloc_vextent(&args
);
98 trace_xfs_refcountbt_alloc_block(cur
->bc_mp
, cur
->bc_private
.a
.agno
,
100 if (args
.fsbno
== NULLFSBLOCK
) {
101 XFS_BTREE_TRACE_CURSOR(cur
, XBT_EXIT
);
105 ASSERT(args
.agno
== cur
->bc_private
.a
.agno
);
106 ASSERT(args
.len
== 1);
108 new->s
= cpu_to_be32(args
.agbno
);
109 be32_add_cpu(&agf
->agf_refcount_blocks
, 1);
110 xfs_alloc_log_agf(cur
->bc_tp
, agbp
, XFS_AGF_REFCOUNT_BLOCKS
);
112 XFS_BTREE_TRACE_CURSOR(cur
, XBT_EXIT
);
117 XFS_BTREE_TRACE_CURSOR(cur
, XBT_ERROR
);
122 xfs_refcountbt_free_block(
123 struct xfs_btree_cur
*cur
,
126 struct xfs_mount
*mp
= cur
->bc_mp
;
127 struct xfs_buf
*agbp
= cur
->bc_private
.a
.agbp
;
128 struct xfs_agf
*agf
= XFS_BUF_TO_AGF(agbp
);
129 xfs_fsblock_t fsbno
= XFS_DADDR_TO_FSB(mp
, XFS_BUF_ADDR(bp
));
130 struct xfs_owner_info oinfo
;
133 trace_xfs_refcountbt_free_block(cur
->bc_mp
, cur
->bc_private
.a
.agno
,
134 XFS_FSB_TO_AGBNO(cur
->bc_mp
, fsbno
), 1);
135 xfs_rmap_ag_owner(&oinfo
, XFS_RMAP_OWN_REFC
);
136 be32_add_cpu(&agf
->agf_refcount_blocks
, -1);
137 xfs_alloc_log_agf(cur
->bc_tp
, agbp
, XFS_AGF_REFCOUNT_BLOCKS
);
138 error
= xfs_free_extent(cur
->bc_tp
, fsbno
, 1, &oinfo
,
139 XFS_AG_RESV_METADATA
);
147 xfs_refcountbt_get_minrecs(
148 struct xfs_btree_cur
*cur
,
151 return cur
->bc_mp
->m_refc_mnr
[level
!= 0];
155 xfs_refcountbt_get_maxrecs(
156 struct xfs_btree_cur
*cur
,
159 return cur
->bc_mp
->m_refc_mxr
[level
!= 0];
163 xfs_refcountbt_init_key_from_rec(
164 union xfs_btree_key
*key
,
165 union xfs_btree_rec
*rec
)
167 key
->refc
.rc_startblock
= rec
->refc
.rc_startblock
;
171 xfs_refcountbt_init_high_key_from_rec(
172 union xfs_btree_key
*key
,
173 union xfs_btree_rec
*rec
)
177 x
= be32_to_cpu(rec
->refc
.rc_startblock
);
178 x
+= be32_to_cpu(rec
->refc
.rc_blockcount
) - 1;
179 key
->refc
.rc_startblock
= cpu_to_be32(x
);
183 xfs_refcountbt_init_rec_from_cur(
184 struct xfs_btree_cur
*cur
,
185 union xfs_btree_rec
*rec
)
187 rec
->refc
.rc_startblock
= cpu_to_be32(cur
->bc_rec
.rc
.rc_startblock
);
188 rec
->refc
.rc_blockcount
= cpu_to_be32(cur
->bc_rec
.rc
.rc_blockcount
);
189 rec
->refc
.rc_refcount
= cpu_to_be32(cur
->bc_rec
.rc
.rc_refcount
);
193 xfs_refcountbt_init_ptr_from_cur(
194 struct xfs_btree_cur
*cur
,
195 union xfs_btree_ptr
*ptr
)
197 struct xfs_agf
*agf
= XFS_BUF_TO_AGF(cur
->bc_private
.a
.agbp
);
199 ASSERT(cur
->bc_private
.a
.agno
== be32_to_cpu(agf
->agf_seqno
));
200 ASSERT(agf
->agf_refcount_root
!= 0);
202 ptr
->s
= agf
->agf_refcount_root
;
206 xfs_refcountbt_key_diff(
207 struct xfs_btree_cur
*cur
,
208 union xfs_btree_key
*key
)
210 struct xfs_refcount_irec
*rec
= &cur
->bc_rec
.rc
;
211 struct xfs_refcount_key
*kp
= &key
->refc
;
213 return (int64_t)be32_to_cpu(kp
->rc_startblock
) - rec
->rc_startblock
;
217 xfs_refcountbt_diff_two_keys(
218 struct xfs_btree_cur
*cur
,
219 union xfs_btree_key
*k1
,
220 union xfs_btree_key
*k2
)
222 return (int64_t)be32_to_cpu(k1
->refc
.rc_startblock
) -
223 be32_to_cpu(k2
->refc
.rc_startblock
);
227 xfs_refcountbt_verify(
230 struct xfs_mount
*mp
= bp
->b_target
->bt_mount
;
231 struct xfs_btree_block
*block
= XFS_BUF_TO_BLOCK(bp
);
232 struct xfs_perag
*pag
= bp
->b_pag
;
235 if (block
->bb_magic
!= cpu_to_be32(XFS_REFC_CRC_MAGIC
))
238 if (!xfs_sb_version_hasreflink(&mp
->m_sb
))
240 if (!xfs_btree_sblock_v5hdr_verify(bp
))
243 level
= be16_to_cpu(block
->bb_level
);
244 if (pag
&& pag
->pagf_init
) {
245 if (level
>= pag
->pagf_refcount_level
)
247 } else if (level
>= mp
->m_refc_maxlevels
)
250 return xfs_btree_sblock_verify(bp
, mp
->m_refc_mxr
[level
!= 0]);
254 xfs_refcountbt_read_verify(
257 if (!xfs_btree_sblock_verify_crc(bp
))
258 xfs_buf_ioerror(bp
, -EFSBADCRC
);
259 else if (!xfs_refcountbt_verify(bp
))
260 xfs_buf_ioerror(bp
, -EFSCORRUPTED
);
263 trace_xfs_btree_corrupt(bp
, _RET_IP_
);
264 xfs_verifier_error(bp
);
269 xfs_refcountbt_write_verify(
272 if (!xfs_refcountbt_verify(bp
)) {
273 trace_xfs_btree_corrupt(bp
, _RET_IP_
);
274 xfs_buf_ioerror(bp
, -EFSCORRUPTED
);
275 xfs_verifier_error(bp
);
278 xfs_btree_sblock_calc_crc(bp
);
282 const struct xfs_buf_ops xfs_refcountbt_buf_ops
= {
283 .name
= "xfs_refcountbt",
284 .verify_read
= xfs_refcountbt_read_verify
,
285 .verify_write
= xfs_refcountbt_write_verify
,
289 xfs_refcountbt_keys_inorder(
290 struct xfs_btree_cur
*cur
,
291 union xfs_btree_key
*k1
,
292 union xfs_btree_key
*k2
)
294 return be32_to_cpu(k1
->refc
.rc_startblock
) <
295 be32_to_cpu(k2
->refc
.rc_startblock
);
299 xfs_refcountbt_recs_inorder(
300 struct xfs_btree_cur
*cur
,
301 union xfs_btree_rec
*r1
,
302 union xfs_btree_rec
*r2
)
304 return be32_to_cpu(r1
->refc
.rc_startblock
) +
305 be32_to_cpu(r1
->refc
.rc_blockcount
) <=
306 be32_to_cpu(r2
->refc
.rc_startblock
);
309 static const struct xfs_btree_ops xfs_refcountbt_ops
= {
310 .rec_len
= sizeof(struct xfs_refcount_rec
),
311 .key_len
= sizeof(struct xfs_refcount_key
),
313 .dup_cursor
= xfs_refcountbt_dup_cursor
,
314 .set_root
= xfs_refcountbt_set_root
,
315 .alloc_block
= xfs_refcountbt_alloc_block
,
316 .free_block
= xfs_refcountbt_free_block
,
317 .get_minrecs
= xfs_refcountbt_get_minrecs
,
318 .get_maxrecs
= xfs_refcountbt_get_maxrecs
,
319 .init_key_from_rec
= xfs_refcountbt_init_key_from_rec
,
320 .init_high_key_from_rec
= xfs_refcountbt_init_high_key_from_rec
,
321 .init_rec_from_cur
= xfs_refcountbt_init_rec_from_cur
,
322 .init_ptr_from_cur
= xfs_refcountbt_init_ptr_from_cur
,
323 .key_diff
= xfs_refcountbt_key_diff
,
324 .buf_ops
= &xfs_refcountbt_buf_ops
,
325 .diff_two_keys
= xfs_refcountbt_diff_two_keys
,
326 .keys_inorder
= xfs_refcountbt_keys_inorder
,
327 .recs_inorder
= xfs_refcountbt_recs_inorder
,
331 * Allocate a new refcount btree cursor.
333 struct xfs_btree_cur
*
334 xfs_refcountbt_init_cursor(
335 struct xfs_mount
*mp
,
336 struct xfs_trans
*tp
,
337 struct xfs_buf
*agbp
,
339 struct xfs_defer_ops
*dfops
)
341 struct xfs_agf
*agf
= XFS_BUF_TO_AGF(agbp
);
342 struct xfs_btree_cur
*cur
;
344 ASSERT(agno
!= NULLAGNUMBER
);
345 ASSERT(agno
< mp
->m_sb
.sb_agcount
);
346 cur
= kmem_zone_zalloc(xfs_btree_cur_zone
, KM_NOFS
);
350 cur
->bc_btnum
= XFS_BTNUM_REFC
;
351 cur
->bc_blocklog
= mp
->m_sb
.sb_blocklog
;
352 cur
->bc_ops
= &xfs_refcountbt_ops
;
353 cur
->bc_statoff
= XFS_STATS_CALC_INDEX(xs_refcbt_2
);
355 cur
->bc_nlevels
= be32_to_cpu(agf
->agf_refcount_level
);
357 cur
->bc_private
.a
.agbp
= agbp
;
358 cur
->bc_private
.a
.agno
= agno
;
359 cur
->bc_private
.a
.dfops
= dfops
;
360 cur
->bc_flags
|= XFS_BTREE_CRC_BLOCKS
;
362 cur
->bc_private
.a
.priv
.refc
.nr_ops
= 0;
363 cur
->bc_private
.a
.priv
.refc
.shape_changes
= 0;
369 * Calculate the number of records in a refcount btree block.
372 xfs_refcountbt_maxrecs(
373 struct xfs_mount
*mp
,
377 blocklen
-= XFS_REFCOUNT_BLOCK_LEN
;
380 return blocklen
/ sizeof(struct xfs_refcount_rec
);
381 return blocklen
/ (sizeof(struct xfs_refcount_key
) +
382 sizeof(xfs_refcount_ptr_t
));
385 /* Compute the maximum height of a refcount btree. */
387 xfs_refcountbt_compute_maxlevels(
388 struct xfs_mount
*mp
)
390 mp
->m_refc_maxlevels
= xfs_btree_compute_maxlevels(mp
,
391 mp
->m_refc_mnr
, mp
->m_sb
.sb_agblocks
);
394 /* Calculate the refcount btree size for some records. */
396 xfs_refcountbt_calc_size(
397 struct xfs_mount
*mp
,
398 unsigned long long len
)
400 return xfs_btree_calc_size(mp
, mp
->m_refc_mnr
, len
);
404 * Calculate the maximum refcount btree size.
407 xfs_refcountbt_max_size(
408 struct xfs_mount
*mp
,
409 xfs_agblock_t agblocks
)
411 /* Bail out if we're uninitialized, which can happen in mkfs. */
412 if (mp
->m_refc_mxr
[0] == 0)
415 return xfs_refcountbt_calc_size(mp
, agblocks
);
419 * Figure out how many blocks to reserve and how many are used by this btree.
422 xfs_refcountbt_calc_reserves(
423 struct xfs_mount
*mp
,
428 struct xfs_buf
*agbp
;
430 xfs_agblock_t agblocks
;
431 xfs_extlen_t tree_len
;
434 if (!xfs_sb_version_hasreflink(&mp
->m_sb
))
438 error
= xfs_alloc_read_agf(mp
, NULL
, agno
, 0, &agbp
);
442 agf
= XFS_BUF_TO_AGF(agbp
);
443 agblocks
= be32_to_cpu(agf
->agf_length
);
444 tree_len
= be32_to_cpu(agf
->agf_refcount_blocks
);
447 *ask
+= xfs_refcountbt_max_size(mp
, agblocks
);