2 * Copyright (C) 2016 Oracle. All Rights Reserved.
4 * Author: Darrick J. Wong <darrick.wong@oracle.com>
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version 2
9 * of the License, or (at your option) any later version.
11 * This program is distributed in the hope that it would be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write the Free Software Foundation,
18 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA.
22 #include "xfs_shared.h"
23 #include "xfs_format.h"
24 #include "xfs_log_format.h"
25 #include "xfs_trans_resv.h"
27 #include "xfs_mount.h"
28 #include "xfs_btree.h"
30 #include "xfs_refcount_btree.h"
31 #include "xfs_alloc.h"
32 #include "xfs_error.h"
33 #include "xfs_trace.h"
34 #include "xfs_cksum.h"
35 #include "xfs_trans.h"
39 static struct xfs_btree_cur
*
40 xfs_refcountbt_dup_cursor(
41 struct xfs_btree_cur
*cur
)
43 return xfs_refcountbt_init_cursor(cur
->bc_mp
, cur
->bc_tp
,
44 cur
->bc_private
.a
.agbp
, cur
->bc_private
.a
.agno
,
45 cur
->bc_private
.a
.dfops
);
49 xfs_refcountbt_set_root(
50 struct xfs_btree_cur
*cur
,
51 union xfs_btree_ptr
*ptr
,
54 struct xfs_buf
*agbp
= cur
->bc_private
.a
.agbp
;
55 struct xfs_agf
*agf
= XFS_BUF_TO_AGF(agbp
);
56 xfs_agnumber_t seqno
= be32_to_cpu(agf
->agf_seqno
);
57 struct xfs_perag
*pag
= xfs_perag_get(cur
->bc_mp
, seqno
);
61 agf
->agf_refcount_root
= ptr
->s
;
62 be32_add_cpu(&agf
->agf_refcount_level
, inc
);
63 pag
->pagf_refcount_level
+= inc
;
66 xfs_alloc_log_agf(cur
->bc_tp
, agbp
,
67 XFS_AGF_REFCOUNT_ROOT
| XFS_AGF_REFCOUNT_LEVEL
);
71 xfs_refcountbt_alloc_block(
72 struct xfs_btree_cur
*cur
,
73 union xfs_btree_ptr
*start
,
74 union xfs_btree_ptr
*new,
77 struct xfs_buf
*agbp
= cur
->bc_private
.a
.agbp
;
78 struct xfs_agf
*agf
= XFS_BUF_TO_AGF(agbp
);
79 struct xfs_alloc_arg args
; /* block allocation args */
80 int error
; /* error return value */
82 XFS_BTREE_TRACE_CURSOR(cur
, XBT_ENTRY
);
84 memset(&args
, 0, sizeof(args
));
87 args
.type
= XFS_ALLOCTYPE_NEAR_BNO
;
88 args
.fsbno
= XFS_AGB_TO_FSB(cur
->bc_mp
, cur
->bc_private
.a
.agno
,
89 xfs_refc_block(args
.mp
));
90 args
.firstblock
= args
.fsbno
;
91 xfs_rmap_ag_owner(&args
.oinfo
, XFS_RMAP_OWN_REFC
);
92 args
.minlen
= args
.maxlen
= args
.prod
= 1;
93 args
.resv
= XFS_AG_RESV_METADATA
;
95 error
= xfs_alloc_vextent(&args
);
98 trace_xfs_refcountbt_alloc_block(cur
->bc_mp
, cur
->bc_private
.a
.agno
,
100 if (args
.fsbno
== NULLFSBLOCK
) {
101 XFS_BTREE_TRACE_CURSOR(cur
, XBT_EXIT
);
105 ASSERT(args
.agno
== cur
->bc_private
.a
.agno
);
106 ASSERT(args
.len
== 1);
108 new->s
= cpu_to_be32(args
.agbno
);
109 be32_add_cpu(&agf
->agf_refcount_blocks
, 1);
110 xfs_alloc_log_agf(cur
->bc_tp
, agbp
, XFS_AGF_REFCOUNT_BLOCKS
);
112 XFS_BTREE_TRACE_CURSOR(cur
, XBT_EXIT
);
117 XFS_BTREE_TRACE_CURSOR(cur
, XBT_ERROR
);
122 xfs_refcountbt_free_block(
123 struct xfs_btree_cur
*cur
,
126 struct xfs_mount
*mp
= cur
->bc_mp
;
127 struct xfs_buf
*agbp
= cur
->bc_private
.a
.agbp
;
128 struct xfs_agf
*agf
= XFS_BUF_TO_AGF(agbp
);
129 xfs_fsblock_t fsbno
= XFS_DADDR_TO_FSB(mp
, XFS_BUF_ADDR(bp
));
130 struct xfs_owner_info oinfo
;
133 trace_xfs_refcountbt_free_block(cur
->bc_mp
, cur
->bc_private
.a
.agno
,
134 XFS_FSB_TO_AGBNO(cur
->bc_mp
, fsbno
), 1);
135 xfs_rmap_ag_owner(&oinfo
, XFS_RMAP_OWN_REFC
);
136 be32_add_cpu(&agf
->agf_refcount_blocks
, -1);
137 xfs_alloc_log_agf(cur
->bc_tp
, agbp
, XFS_AGF_REFCOUNT_BLOCKS
);
138 error
= xfs_free_extent(cur
->bc_tp
, fsbno
, 1, &oinfo
,
139 XFS_AG_RESV_METADATA
);
147 xfs_refcountbt_get_minrecs(
148 struct xfs_btree_cur
*cur
,
151 return cur
->bc_mp
->m_refc_mnr
[level
!= 0];
155 xfs_refcountbt_get_maxrecs(
156 struct xfs_btree_cur
*cur
,
159 return cur
->bc_mp
->m_refc_mxr
[level
!= 0];
163 xfs_refcountbt_init_key_from_rec(
164 union xfs_btree_key
*key
,
165 union xfs_btree_rec
*rec
)
167 key
->refc
.rc_startblock
= rec
->refc
.rc_startblock
;
171 xfs_refcountbt_init_high_key_from_rec(
172 union xfs_btree_key
*key
,
173 union xfs_btree_rec
*rec
)
177 x
= be32_to_cpu(rec
->refc
.rc_startblock
);
178 x
+= be32_to_cpu(rec
->refc
.rc_blockcount
) - 1;
179 key
->refc
.rc_startblock
= cpu_to_be32(x
);
183 xfs_refcountbt_init_rec_from_cur(
184 struct xfs_btree_cur
*cur
,
185 union xfs_btree_rec
*rec
)
187 rec
->refc
.rc_startblock
= cpu_to_be32(cur
->bc_rec
.rc
.rc_startblock
);
188 rec
->refc
.rc_blockcount
= cpu_to_be32(cur
->bc_rec
.rc
.rc_blockcount
);
189 rec
->refc
.rc_refcount
= cpu_to_be32(cur
->bc_rec
.rc
.rc_refcount
);
193 xfs_refcountbt_init_ptr_from_cur(
194 struct xfs_btree_cur
*cur
,
195 union xfs_btree_ptr
*ptr
)
197 struct xfs_agf
*agf
= XFS_BUF_TO_AGF(cur
->bc_private
.a
.agbp
);
199 ASSERT(cur
->bc_private
.a
.agno
== be32_to_cpu(agf
->agf_seqno
));
200 ASSERT(agf
->agf_refcount_root
!= 0);
202 ptr
->s
= agf
->agf_refcount_root
;
206 xfs_refcountbt_key_diff(
207 struct xfs_btree_cur
*cur
,
208 union xfs_btree_key
*key
)
210 struct xfs_refcount_irec
*rec
= &cur
->bc_rec
.rc
;
211 struct xfs_refcount_key
*kp
= &key
->refc
;
213 return (__int64_t
)be32_to_cpu(kp
->rc_startblock
) - rec
->rc_startblock
;
217 xfs_refcountbt_diff_two_keys(
218 struct xfs_btree_cur
*cur
,
219 union xfs_btree_key
*k1
,
220 union xfs_btree_key
*k2
)
222 return (__int64_t
)be32_to_cpu(k1
->refc
.rc_startblock
) -
223 be32_to_cpu(k2
->refc
.rc_startblock
);
227 xfs_refcountbt_verify(
230 struct xfs_mount
*mp
= bp
->b_target
->bt_mount
;
231 struct xfs_btree_block
*block
= XFS_BUF_TO_BLOCK(bp
);
232 struct xfs_perag
*pag
= bp
->b_pag
;
235 if (block
->bb_magic
!= cpu_to_be32(XFS_REFC_CRC_MAGIC
))
238 if (!xfs_sb_version_hasreflink(&mp
->m_sb
))
240 if (!xfs_btree_sblock_v5hdr_verify(bp
))
243 level
= be16_to_cpu(block
->bb_level
);
244 if (pag
&& pag
->pagf_init
) {
245 if (level
>= pag
->pagf_refcount_level
)
247 } else if (level
>= mp
->m_refc_maxlevels
)
250 return xfs_btree_sblock_verify(bp
, mp
->m_refc_mxr
[level
!= 0]);
254 xfs_refcountbt_read_verify(
257 if (!xfs_btree_sblock_verify_crc(bp
))
258 xfs_buf_ioerror(bp
, -EFSBADCRC
);
259 else if (!xfs_refcountbt_verify(bp
))
260 xfs_buf_ioerror(bp
, -EFSCORRUPTED
);
263 trace_xfs_btree_corrupt(bp
, _RET_IP_
);
264 xfs_verifier_error(bp
);
269 xfs_refcountbt_write_verify(
272 if (!xfs_refcountbt_verify(bp
)) {
273 trace_xfs_btree_corrupt(bp
, _RET_IP_
);
274 xfs_buf_ioerror(bp
, -EFSCORRUPTED
);
275 xfs_verifier_error(bp
);
278 xfs_btree_sblock_calc_crc(bp
);
282 const struct xfs_buf_ops xfs_refcountbt_buf_ops
= {
283 .name
= "xfs_refcountbt",
284 .verify_read
= xfs_refcountbt_read_verify
,
285 .verify_write
= xfs_refcountbt_write_verify
,
288 #if defined(DEBUG) || defined(XFS_WARN)
290 xfs_refcountbt_keys_inorder(
291 struct xfs_btree_cur
*cur
,
292 union xfs_btree_key
*k1
,
293 union xfs_btree_key
*k2
)
295 return be32_to_cpu(k1
->refc
.rc_startblock
) <
296 be32_to_cpu(k2
->refc
.rc_startblock
);
300 xfs_refcountbt_recs_inorder(
301 struct xfs_btree_cur
*cur
,
302 union xfs_btree_rec
*r1
,
303 union xfs_btree_rec
*r2
)
305 return be32_to_cpu(r1
->refc
.rc_startblock
) +
306 be32_to_cpu(r1
->refc
.rc_blockcount
) <=
307 be32_to_cpu(r2
->refc
.rc_startblock
);
311 static const struct xfs_btree_ops xfs_refcountbt_ops
= {
312 .rec_len
= sizeof(struct xfs_refcount_rec
),
313 .key_len
= sizeof(struct xfs_refcount_key
),
315 .dup_cursor
= xfs_refcountbt_dup_cursor
,
316 .set_root
= xfs_refcountbt_set_root
,
317 .alloc_block
= xfs_refcountbt_alloc_block
,
318 .free_block
= xfs_refcountbt_free_block
,
319 .get_minrecs
= xfs_refcountbt_get_minrecs
,
320 .get_maxrecs
= xfs_refcountbt_get_maxrecs
,
321 .init_key_from_rec
= xfs_refcountbt_init_key_from_rec
,
322 .init_high_key_from_rec
= xfs_refcountbt_init_high_key_from_rec
,
323 .init_rec_from_cur
= xfs_refcountbt_init_rec_from_cur
,
324 .init_ptr_from_cur
= xfs_refcountbt_init_ptr_from_cur
,
325 .key_diff
= xfs_refcountbt_key_diff
,
326 .buf_ops
= &xfs_refcountbt_buf_ops
,
327 .diff_two_keys
= xfs_refcountbt_diff_two_keys
,
328 #if defined(DEBUG) || defined(XFS_WARN)
329 .keys_inorder
= xfs_refcountbt_keys_inorder
,
330 .recs_inorder
= xfs_refcountbt_recs_inorder
,
335 * Allocate a new refcount btree cursor.
337 struct xfs_btree_cur
*
338 xfs_refcountbt_init_cursor(
339 struct xfs_mount
*mp
,
340 struct xfs_trans
*tp
,
341 struct xfs_buf
*agbp
,
343 struct xfs_defer_ops
*dfops
)
345 struct xfs_agf
*agf
= XFS_BUF_TO_AGF(agbp
);
346 struct xfs_btree_cur
*cur
;
348 ASSERT(agno
!= NULLAGNUMBER
);
349 ASSERT(agno
< mp
->m_sb
.sb_agcount
);
350 cur
= kmem_zone_zalloc(xfs_btree_cur_zone
, KM_NOFS
);
354 cur
->bc_btnum
= XFS_BTNUM_REFC
;
355 cur
->bc_blocklog
= mp
->m_sb
.sb_blocklog
;
356 cur
->bc_ops
= &xfs_refcountbt_ops
;
358 cur
->bc_nlevels
= be32_to_cpu(agf
->agf_refcount_level
);
360 cur
->bc_private
.a
.agbp
= agbp
;
361 cur
->bc_private
.a
.agno
= agno
;
362 cur
->bc_private
.a
.dfops
= dfops
;
363 cur
->bc_flags
|= XFS_BTREE_CRC_BLOCKS
;
365 cur
->bc_private
.a
.priv
.refc
.nr_ops
= 0;
366 cur
->bc_private
.a
.priv
.refc
.shape_changes
= 0;
372 * Calculate the number of records in a refcount btree block.
375 xfs_refcountbt_maxrecs(
376 struct xfs_mount
*mp
,
380 blocklen
-= XFS_REFCOUNT_BLOCK_LEN
;
383 return blocklen
/ sizeof(struct xfs_refcount_rec
);
384 return blocklen
/ (sizeof(struct xfs_refcount_key
) +
385 sizeof(xfs_refcount_ptr_t
));
388 /* Compute the maximum height of a refcount btree. */
390 xfs_refcountbt_compute_maxlevels(
391 struct xfs_mount
*mp
)
393 mp
->m_refc_maxlevels
= xfs_btree_compute_maxlevels(mp
,
394 mp
->m_refc_mnr
, mp
->m_sb
.sb_agblocks
);
397 /* Calculate the refcount btree size for some records. */
399 xfs_refcountbt_calc_size(
400 struct xfs_mount
*mp
,
401 unsigned long long len
)
403 return xfs_btree_calc_size(mp
, mp
->m_refc_mnr
, len
);
407 * Calculate the maximum refcount btree size.
410 xfs_refcountbt_max_size(
411 struct xfs_mount
*mp
,
412 xfs_agblock_t agblocks
)
414 /* Bail out if we're uninitialized, which can happen in mkfs. */
415 if (mp
->m_refc_mxr
[0] == 0)
418 return xfs_refcountbt_calc_size(mp
, agblocks
);
422 * Figure out how many blocks to reserve and how many are used by this btree.
425 xfs_refcountbt_calc_reserves(
426 struct xfs_mount
*mp
,
431 struct xfs_buf
*agbp
;
433 xfs_agblock_t agblocks
;
434 xfs_extlen_t tree_len
;
437 if (!xfs_sb_version_hasreflink(&mp
->m_sb
))
441 error
= xfs_alloc_read_agf(mp
, NULL
, agno
, 0, &agbp
);
445 agf
= XFS_BUF_TO_AGF(agbp
);
446 agblocks
= be32_to_cpu(agf
->agf_length
);
447 tree_len
= be32_to_cpu(agf
->agf_refcount_blocks
);
450 *ask
+= xfs_refcountbt_max_size(mp
, agblocks
);