staging: ft1000-pcmcia: fix compilation with FT_DEBUG enabled
[linux/fpc-iii.git] / fs / xfs / xfs_bmap_btree.c
blob87d3c10b695437da9043d9a72985b8d8d25280c6
1 /*
2 * Copyright (c) 2000-2003,2005 Silicon Graphics, Inc.
3 * All Rights Reserved.
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License as
7 * published by the Free Software Foundation.
9 * This program is distributed in the hope that it would be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write the Free Software Foundation,
16 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
18 #include "xfs.h"
19 #include "xfs_fs.h"
20 #include "xfs_types.h"
21 #include "xfs_bit.h"
22 #include "xfs_log.h"
23 #include "xfs_inum.h"
24 #include "xfs_trans.h"
25 #include "xfs_sb.h"
26 #include "xfs_ag.h"
27 #include "xfs_mount.h"
28 #include "xfs_bmap_btree.h"
29 #include "xfs_alloc_btree.h"
30 #include "xfs_ialloc_btree.h"
31 #include "xfs_dinode.h"
32 #include "xfs_inode.h"
33 #include "xfs_inode_item.h"
34 #include "xfs_alloc.h"
35 #include "xfs_btree.h"
36 #include "xfs_btree_trace.h"
37 #include "xfs_itable.h"
38 #include "xfs_bmap.h"
39 #include "xfs_error.h"
40 #include "xfs_quota.h"
43 * Determine the extent state.
45 /* ARGSUSED */
46 STATIC xfs_exntst_t
47 xfs_extent_state(
48 xfs_filblks_t blks,
49 int extent_flag)
51 if (extent_flag) {
52 ASSERT(blks != 0); /* saved for DMIG */
53 return XFS_EXT_UNWRITTEN;
55 return XFS_EXT_NORM;
59 * Convert on-disk form of btree root to in-memory form.
61 void
62 xfs_bmdr_to_bmbt(
63 struct xfs_mount *mp,
64 xfs_bmdr_block_t *dblock,
65 int dblocklen,
66 struct xfs_btree_block *rblock,
67 int rblocklen)
69 int dmxr;
70 xfs_bmbt_key_t *fkp;
71 __be64 *fpp;
72 xfs_bmbt_key_t *tkp;
73 __be64 *tpp;
75 rblock->bb_magic = cpu_to_be32(XFS_BMAP_MAGIC);
76 rblock->bb_level = dblock->bb_level;
77 ASSERT(be16_to_cpu(rblock->bb_level) > 0);
78 rblock->bb_numrecs = dblock->bb_numrecs;
79 rblock->bb_u.l.bb_leftsib = cpu_to_be64(NULLDFSBNO);
80 rblock->bb_u.l.bb_rightsib = cpu_to_be64(NULLDFSBNO);
81 dmxr = xfs_bmdr_maxrecs(mp, dblocklen, 0);
82 fkp = XFS_BMDR_KEY_ADDR(dblock, 1);
83 tkp = XFS_BMBT_KEY_ADDR(mp, rblock, 1);
84 fpp = XFS_BMDR_PTR_ADDR(dblock, 1, dmxr);
85 tpp = XFS_BMAP_BROOT_PTR_ADDR(mp, rblock, 1, rblocklen);
86 dmxr = be16_to_cpu(dblock->bb_numrecs);
87 memcpy(tkp, fkp, sizeof(*fkp) * dmxr);
88 memcpy(tpp, fpp, sizeof(*fpp) * dmxr);
92 * Convert a compressed bmap extent record to an uncompressed form.
93 * This code must be in sync with the routines xfs_bmbt_get_startoff,
94 * xfs_bmbt_get_startblock, xfs_bmbt_get_blockcount and xfs_bmbt_get_state.
96 STATIC void
97 __xfs_bmbt_get_all(
98 __uint64_t l0,
99 __uint64_t l1,
100 xfs_bmbt_irec_t *s)
102 int ext_flag;
103 xfs_exntst_t st;
105 ext_flag = (int)(l0 >> (64 - BMBT_EXNTFLAG_BITLEN));
106 s->br_startoff = ((xfs_fileoff_t)l0 &
107 xfs_mask64lo(64 - BMBT_EXNTFLAG_BITLEN)) >> 9;
108 #if XFS_BIG_BLKNOS
109 s->br_startblock = (((xfs_fsblock_t)l0 & xfs_mask64lo(9)) << 43) |
110 (((xfs_fsblock_t)l1) >> 21);
111 #else
112 #ifdef DEBUG
114 xfs_dfsbno_t b;
116 b = (((xfs_dfsbno_t)l0 & xfs_mask64lo(9)) << 43) |
117 (((xfs_dfsbno_t)l1) >> 21);
118 ASSERT((b >> 32) == 0 || isnulldstartblock(b));
119 s->br_startblock = (xfs_fsblock_t)b;
121 #else /* !DEBUG */
122 s->br_startblock = (xfs_fsblock_t)(((xfs_dfsbno_t)l1) >> 21);
123 #endif /* DEBUG */
124 #endif /* XFS_BIG_BLKNOS */
125 s->br_blockcount = (xfs_filblks_t)(l1 & xfs_mask64lo(21));
126 /* This is xfs_extent_state() in-line */
127 if (ext_flag) {
128 ASSERT(s->br_blockcount != 0); /* saved for DMIG */
129 st = XFS_EXT_UNWRITTEN;
130 } else
131 st = XFS_EXT_NORM;
132 s->br_state = st;
135 void
136 xfs_bmbt_get_all(
137 xfs_bmbt_rec_host_t *r,
138 xfs_bmbt_irec_t *s)
140 __xfs_bmbt_get_all(r->l0, r->l1, s);
144 * Extract the blockcount field from an in memory bmap extent record.
146 xfs_filblks_t
147 xfs_bmbt_get_blockcount(
148 xfs_bmbt_rec_host_t *r)
150 return (xfs_filblks_t)(r->l1 & xfs_mask64lo(21));
154 * Extract the startblock field from an in memory bmap extent record.
156 xfs_fsblock_t
157 xfs_bmbt_get_startblock(
158 xfs_bmbt_rec_host_t *r)
160 #if XFS_BIG_BLKNOS
161 return (((xfs_fsblock_t)r->l0 & xfs_mask64lo(9)) << 43) |
162 (((xfs_fsblock_t)r->l1) >> 21);
163 #else
164 #ifdef DEBUG
165 xfs_dfsbno_t b;
167 b = (((xfs_dfsbno_t)r->l0 & xfs_mask64lo(9)) << 43) |
168 (((xfs_dfsbno_t)r->l1) >> 21);
169 ASSERT((b >> 32) == 0 || isnulldstartblock(b));
170 return (xfs_fsblock_t)b;
171 #else /* !DEBUG */
172 return (xfs_fsblock_t)(((xfs_dfsbno_t)r->l1) >> 21);
173 #endif /* DEBUG */
174 #endif /* XFS_BIG_BLKNOS */
178 * Extract the startoff field from an in memory bmap extent record.
180 xfs_fileoff_t
181 xfs_bmbt_get_startoff(
182 xfs_bmbt_rec_host_t *r)
184 return ((xfs_fileoff_t)r->l0 &
185 xfs_mask64lo(64 - BMBT_EXNTFLAG_BITLEN)) >> 9;
188 xfs_exntst_t
189 xfs_bmbt_get_state(
190 xfs_bmbt_rec_host_t *r)
192 int ext_flag;
194 ext_flag = (int)((r->l0) >> (64 - BMBT_EXNTFLAG_BITLEN));
195 return xfs_extent_state(xfs_bmbt_get_blockcount(r),
196 ext_flag);
200 * Extract the blockcount field from an on disk bmap extent record.
202 xfs_filblks_t
203 xfs_bmbt_disk_get_blockcount(
204 xfs_bmbt_rec_t *r)
206 return (xfs_filblks_t)(be64_to_cpu(r->l1) & xfs_mask64lo(21));
210 * Extract the startoff field from a disk format bmap extent record.
212 xfs_fileoff_t
213 xfs_bmbt_disk_get_startoff(
214 xfs_bmbt_rec_t *r)
216 return ((xfs_fileoff_t)be64_to_cpu(r->l0) &
217 xfs_mask64lo(64 - BMBT_EXNTFLAG_BITLEN)) >> 9;
222 * Set all the fields in a bmap extent record from the arguments.
224 void
225 xfs_bmbt_set_allf(
226 xfs_bmbt_rec_host_t *r,
227 xfs_fileoff_t startoff,
228 xfs_fsblock_t startblock,
229 xfs_filblks_t blockcount,
230 xfs_exntst_t state)
232 int extent_flag = (state == XFS_EXT_NORM) ? 0 : 1;
234 ASSERT(state == XFS_EXT_NORM || state == XFS_EXT_UNWRITTEN);
235 ASSERT((startoff & xfs_mask64hi(64-BMBT_STARTOFF_BITLEN)) == 0);
236 ASSERT((blockcount & xfs_mask64hi(64-BMBT_BLOCKCOUNT_BITLEN)) == 0);
238 #if XFS_BIG_BLKNOS
239 ASSERT((startblock & xfs_mask64hi(64-BMBT_STARTBLOCK_BITLEN)) == 0);
241 r->l0 = ((xfs_bmbt_rec_base_t)extent_flag << 63) |
242 ((xfs_bmbt_rec_base_t)startoff << 9) |
243 ((xfs_bmbt_rec_base_t)startblock >> 43);
244 r->l1 = ((xfs_bmbt_rec_base_t)startblock << 21) |
245 ((xfs_bmbt_rec_base_t)blockcount &
246 (xfs_bmbt_rec_base_t)xfs_mask64lo(21));
247 #else /* !XFS_BIG_BLKNOS */
248 if (isnullstartblock(startblock)) {
249 r->l0 = ((xfs_bmbt_rec_base_t)extent_flag << 63) |
250 ((xfs_bmbt_rec_base_t)startoff << 9) |
251 (xfs_bmbt_rec_base_t)xfs_mask64lo(9);
252 r->l1 = xfs_mask64hi(11) |
253 ((xfs_bmbt_rec_base_t)startblock << 21) |
254 ((xfs_bmbt_rec_base_t)blockcount &
255 (xfs_bmbt_rec_base_t)xfs_mask64lo(21));
256 } else {
257 r->l0 = ((xfs_bmbt_rec_base_t)extent_flag << 63) |
258 ((xfs_bmbt_rec_base_t)startoff << 9);
259 r->l1 = ((xfs_bmbt_rec_base_t)startblock << 21) |
260 ((xfs_bmbt_rec_base_t)blockcount &
261 (xfs_bmbt_rec_base_t)xfs_mask64lo(21));
263 #endif /* XFS_BIG_BLKNOS */
267 * Set all the fields in a bmap extent record from the uncompressed form.
269 void
270 xfs_bmbt_set_all(
271 xfs_bmbt_rec_host_t *r,
272 xfs_bmbt_irec_t *s)
274 xfs_bmbt_set_allf(r, s->br_startoff, s->br_startblock,
275 s->br_blockcount, s->br_state);
280 * Set all the fields in a disk format bmap extent record from the arguments.
282 void
283 xfs_bmbt_disk_set_allf(
284 xfs_bmbt_rec_t *r,
285 xfs_fileoff_t startoff,
286 xfs_fsblock_t startblock,
287 xfs_filblks_t blockcount,
288 xfs_exntst_t state)
290 int extent_flag = (state == XFS_EXT_NORM) ? 0 : 1;
292 ASSERT(state == XFS_EXT_NORM || state == XFS_EXT_UNWRITTEN);
293 ASSERT((startoff & xfs_mask64hi(64-BMBT_STARTOFF_BITLEN)) == 0);
294 ASSERT((blockcount & xfs_mask64hi(64-BMBT_BLOCKCOUNT_BITLEN)) == 0);
296 #if XFS_BIG_BLKNOS
297 ASSERT((startblock & xfs_mask64hi(64-BMBT_STARTBLOCK_BITLEN)) == 0);
299 r->l0 = cpu_to_be64(
300 ((xfs_bmbt_rec_base_t)extent_flag << 63) |
301 ((xfs_bmbt_rec_base_t)startoff << 9) |
302 ((xfs_bmbt_rec_base_t)startblock >> 43));
303 r->l1 = cpu_to_be64(
304 ((xfs_bmbt_rec_base_t)startblock << 21) |
305 ((xfs_bmbt_rec_base_t)blockcount &
306 (xfs_bmbt_rec_base_t)xfs_mask64lo(21)));
307 #else /* !XFS_BIG_BLKNOS */
308 if (isnullstartblock(startblock)) {
309 r->l0 = cpu_to_be64(
310 ((xfs_bmbt_rec_base_t)extent_flag << 63) |
311 ((xfs_bmbt_rec_base_t)startoff << 9) |
312 (xfs_bmbt_rec_base_t)xfs_mask64lo(9));
313 r->l1 = cpu_to_be64(xfs_mask64hi(11) |
314 ((xfs_bmbt_rec_base_t)startblock << 21) |
315 ((xfs_bmbt_rec_base_t)blockcount &
316 (xfs_bmbt_rec_base_t)xfs_mask64lo(21)));
317 } else {
318 r->l0 = cpu_to_be64(
319 ((xfs_bmbt_rec_base_t)extent_flag << 63) |
320 ((xfs_bmbt_rec_base_t)startoff << 9));
321 r->l1 = cpu_to_be64(
322 ((xfs_bmbt_rec_base_t)startblock << 21) |
323 ((xfs_bmbt_rec_base_t)blockcount &
324 (xfs_bmbt_rec_base_t)xfs_mask64lo(21)));
326 #endif /* XFS_BIG_BLKNOS */
330 * Set all the fields in a bmap extent record from the uncompressed form.
332 STATIC void
333 xfs_bmbt_disk_set_all(
334 xfs_bmbt_rec_t *r,
335 xfs_bmbt_irec_t *s)
337 xfs_bmbt_disk_set_allf(r, s->br_startoff, s->br_startblock,
338 s->br_blockcount, s->br_state);
342 * Set the blockcount field in a bmap extent record.
344 void
345 xfs_bmbt_set_blockcount(
346 xfs_bmbt_rec_host_t *r,
347 xfs_filblks_t v)
349 ASSERT((v & xfs_mask64hi(43)) == 0);
350 r->l1 = (r->l1 & (xfs_bmbt_rec_base_t)xfs_mask64hi(43)) |
351 (xfs_bmbt_rec_base_t)(v & xfs_mask64lo(21));
355 * Set the startblock field in a bmap extent record.
357 void
358 xfs_bmbt_set_startblock(
359 xfs_bmbt_rec_host_t *r,
360 xfs_fsblock_t v)
362 #if XFS_BIG_BLKNOS
363 ASSERT((v & xfs_mask64hi(12)) == 0);
364 r->l0 = (r->l0 & (xfs_bmbt_rec_base_t)xfs_mask64hi(55)) |
365 (xfs_bmbt_rec_base_t)(v >> 43);
366 r->l1 = (r->l1 & (xfs_bmbt_rec_base_t)xfs_mask64lo(21)) |
367 (xfs_bmbt_rec_base_t)(v << 21);
368 #else /* !XFS_BIG_BLKNOS */
369 if (isnullstartblock(v)) {
370 r->l0 |= (xfs_bmbt_rec_base_t)xfs_mask64lo(9);
371 r->l1 = (xfs_bmbt_rec_base_t)xfs_mask64hi(11) |
372 ((xfs_bmbt_rec_base_t)v << 21) |
373 (r->l1 & (xfs_bmbt_rec_base_t)xfs_mask64lo(21));
374 } else {
375 r->l0 &= ~(xfs_bmbt_rec_base_t)xfs_mask64lo(9);
376 r->l1 = ((xfs_bmbt_rec_base_t)v << 21) |
377 (r->l1 & (xfs_bmbt_rec_base_t)xfs_mask64lo(21));
379 #endif /* XFS_BIG_BLKNOS */
383 * Set the startoff field in a bmap extent record.
385 void
386 xfs_bmbt_set_startoff(
387 xfs_bmbt_rec_host_t *r,
388 xfs_fileoff_t v)
390 ASSERT((v & xfs_mask64hi(9)) == 0);
391 r->l0 = (r->l0 & (xfs_bmbt_rec_base_t) xfs_mask64hi(1)) |
392 ((xfs_bmbt_rec_base_t)v << 9) |
393 (r->l0 & (xfs_bmbt_rec_base_t)xfs_mask64lo(9));
397 * Set the extent state field in a bmap extent record.
399 void
400 xfs_bmbt_set_state(
401 xfs_bmbt_rec_host_t *r,
402 xfs_exntst_t v)
404 ASSERT(v == XFS_EXT_NORM || v == XFS_EXT_UNWRITTEN);
405 if (v == XFS_EXT_NORM)
406 r->l0 &= xfs_mask64lo(64 - BMBT_EXNTFLAG_BITLEN);
407 else
408 r->l0 |= xfs_mask64hi(BMBT_EXNTFLAG_BITLEN);
412 * Convert in-memory form of btree root to on-disk form.
414 void
415 xfs_bmbt_to_bmdr(
416 struct xfs_mount *mp,
417 struct xfs_btree_block *rblock,
418 int rblocklen,
419 xfs_bmdr_block_t *dblock,
420 int dblocklen)
422 int dmxr;
423 xfs_bmbt_key_t *fkp;
424 __be64 *fpp;
425 xfs_bmbt_key_t *tkp;
426 __be64 *tpp;
428 ASSERT(be32_to_cpu(rblock->bb_magic) == XFS_BMAP_MAGIC);
429 ASSERT(be64_to_cpu(rblock->bb_u.l.bb_leftsib) == NULLDFSBNO);
430 ASSERT(be64_to_cpu(rblock->bb_u.l.bb_rightsib) == NULLDFSBNO);
431 ASSERT(be16_to_cpu(rblock->bb_level) > 0);
432 dblock->bb_level = rblock->bb_level;
433 dblock->bb_numrecs = rblock->bb_numrecs;
434 dmxr = xfs_bmdr_maxrecs(mp, dblocklen, 0);
435 fkp = XFS_BMBT_KEY_ADDR(mp, rblock, 1);
436 tkp = XFS_BMDR_KEY_ADDR(dblock, 1);
437 fpp = XFS_BMAP_BROOT_PTR_ADDR(mp, rblock, 1, rblocklen);
438 tpp = XFS_BMDR_PTR_ADDR(dblock, 1, dmxr);
439 dmxr = be16_to_cpu(dblock->bb_numrecs);
440 memcpy(tkp, fkp, sizeof(*fkp) * dmxr);
441 memcpy(tpp, fpp, sizeof(*fpp) * dmxr);
445 * Check extent records, which have just been read, for
446 * any bit in the extent flag field. ASSERT on debug
447 * kernels, as this condition should not occur.
448 * Return an error condition (1) if any flags found,
449 * otherwise return 0.
453 xfs_check_nostate_extents(
454 xfs_ifork_t *ifp,
455 xfs_extnum_t idx,
456 xfs_extnum_t num)
458 for (; num > 0; num--, idx++) {
459 xfs_bmbt_rec_host_t *ep = xfs_iext_get_ext(ifp, idx);
460 if ((ep->l0 >>
461 (64 - BMBT_EXNTFLAG_BITLEN)) != 0) {
462 ASSERT(0);
463 return 1;
466 return 0;
470 STATIC struct xfs_btree_cur *
471 xfs_bmbt_dup_cursor(
472 struct xfs_btree_cur *cur)
474 struct xfs_btree_cur *new;
476 new = xfs_bmbt_init_cursor(cur->bc_mp, cur->bc_tp,
477 cur->bc_private.b.ip, cur->bc_private.b.whichfork);
480 * Copy the firstblock, flist, and flags values,
481 * since init cursor doesn't get them.
483 new->bc_private.b.firstblock = cur->bc_private.b.firstblock;
484 new->bc_private.b.flist = cur->bc_private.b.flist;
485 new->bc_private.b.flags = cur->bc_private.b.flags;
487 return new;
490 STATIC void
491 xfs_bmbt_update_cursor(
492 struct xfs_btree_cur *src,
493 struct xfs_btree_cur *dst)
495 ASSERT((dst->bc_private.b.firstblock != NULLFSBLOCK) ||
496 (dst->bc_private.b.ip->i_d.di_flags & XFS_DIFLAG_REALTIME));
497 ASSERT(dst->bc_private.b.flist == src->bc_private.b.flist);
499 dst->bc_private.b.allocated += src->bc_private.b.allocated;
500 dst->bc_private.b.firstblock = src->bc_private.b.firstblock;
502 src->bc_private.b.allocated = 0;
505 STATIC int
506 xfs_bmbt_alloc_block(
507 struct xfs_btree_cur *cur,
508 union xfs_btree_ptr *start,
509 union xfs_btree_ptr *new,
510 int length,
511 int *stat)
513 xfs_alloc_arg_t args; /* block allocation args */
514 int error; /* error return value */
516 memset(&args, 0, sizeof(args));
517 args.tp = cur->bc_tp;
518 args.mp = cur->bc_mp;
519 args.fsbno = cur->bc_private.b.firstblock;
520 args.firstblock = args.fsbno;
522 if (args.fsbno == NULLFSBLOCK) {
523 args.fsbno = be64_to_cpu(start->l);
524 args.type = XFS_ALLOCTYPE_START_BNO;
526 * Make sure there is sufficient room left in the AG to
527 * complete a full tree split for an extent insert. If
528 * we are converting the middle part of an extent then
529 * we may need space for two tree splits.
531 * We are relying on the caller to make the correct block
532 * reservation for this operation to succeed. If the
533 * reservation amount is insufficient then we may fail a
534 * block allocation here and corrupt the filesystem.
536 args.minleft = xfs_trans_get_block_res(args.tp);
537 } else if (cur->bc_private.b.flist->xbf_low) {
538 args.type = XFS_ALLOCTYPE_START_BNO;
539 } else {
540 args.type = XFS_ALLOCTYPE_NEAR_BNO;
543 args.minlen = args.maxlen = args.prod = 1;
544 args.wasdel = cur->bc_private.b.flags & XFS_BTCUR_BPRV_WASDEL;
545 if (!args.wasdel && xfs_trans_get_block_res(args.tp) == 0) {
546 error = XFS_ERROR(ENOSPC);
547 goto error0;
549 error = xfs_alloc_vextent(&args);
550 if (error)
551 goto error0;
553 if (args.fsbno == NULLFSBLOCK && args.minleft) {
555 * Could not find an AG with enough free space to satisfy
556 * a full btree split. Try again without minleft and if
557 * successful activate the lowspace algorithm.
559 args.fsbno = 0;
560 args.type = XFS_ALLOCTYPE_FIRST_AG;
561 args.minleft = 0;
562 error = xfs_alloc_vextent(&args);
563 if (error)
564 goto error0;
565 cur->bc_private.b.flist->xbf_low = 1;
567 if (args.fsbno == NULLFSBLOCK) {
568 XFS_BTREE_TRACE_CURSOR(cur, XBT_EXIT);
569 *stat = 0;
570 return 0;
572 ASSERT(args.len == 1);
573 cur->bc_private.b.firstblock = args.fsbno;
574 cur->bc_private.b.allocated++;
575 cur->bc_private.b.ip->i_d.di_nblocks++;
576 xfs_trans_log_inode(args.tp, cur->bc_private.b.ip, XFS_ILOG_CORE);
577 xfs_trans_mod_dquot_byino(args.tp, cur->bc_private.b.ip,
578 XFS_TRANS_DQ_BCOUNT, 1L);
580 new->l = cpu_to_be64(args.fsbno);
582 XFS_BTREE_TRACE_CURSOR(cur, XBT_EXIT);
583 *stat = 1;
584 return 0;
586 error0:
587 XFS_BTREE_TRACE_CURSOR(cur, XBT_ERROR);
588 return error;
591 STATIC int
592 xfs_bmbt_free_block(
593 struct xfs_btree_cur *cur,
594 struct xfs_buf *bp)
596 struct xfs_mount *mp = cur->bc_mp;
597 struct xfs_inode *ip = cur->bc_private.b.ip;
598 struct xfs_trans *tp = cur->bc_tp;
599 xfs_fsblock_t fsbno = XFS_DADDR_TO_FSB(mp, XFS_BUF_ADDR(bp));
601 xfs_bmap_add_free(fsbno, 1, cur->bc_private.b.flist, mp);
602 ip->i_d.di_nblocks--;
604 xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
605 xfs_trans_mod_dquot_byino(tp, ip, XFS_TRANS_DQ_BCOUNT, -1L);
606 xfs_trans_binval(tp, bp);
607 return 0;
610 STATIC int
611 xfs_bmbt_get_minrecs(
612 struct xfs_btree_cur *cur,
613 int level)
615 if (level == cur->bc_nlevels - 1) {
616 struct xfs_ifork *ifp;
618 ifp = XFS_IFORK_PTR(cur->bc_private.b.ip,
619 cur->bc_private.b.whichfork);
621 return xfs_bmbt_maxrecs(cur->bc_mp,
622 ifp->if_broot_bytes, level == 0) / 2;
625 return cur->bc_mp->m_bmap_dmnr[level != 0];
629 xfs_bmbt_get_maxrecs(
630 struct xfs_btree_cur *cur,
631 int level)
633 if (level == cur->bc_nlevels - 1) {
634 struct xfs_ifork *ifp;
636 ifp = XFS_IFORK_PTR(cur->bc_private.b.ip,
637 cur->bc_private.b.whichfork);
639 return xfs_bmbt_maxrecs(cur->bc_mp,
640 ifp->if_broot_bytes, level == 0);
643 return cur->bc_mp->m_bmap_dmxr[level != 0];
648 * Get the maximum records we could store in the on-disk format.
650 * For non-root nodes this is equivalent to xfs_bmbt_get_maxrecs, but
651 * for the root node this checks the available space in the dinode fork
652 * so that we can resize the in-memory buffer to match it. After a
653 * resize to the maximum size this function returns the same value
654 * as xfs_bmbt_get_maxrecs for the root node, too.
656 STATIC int
657 xfs_bmbt_get_dmaxrecs(
658 struct xfs_btree_cur *cur,
659 int level)
661 if (level != cur->bc_nlevels - 1)
662 return cur->bc_mp->m_bmap_dmxr[level != 0];
663 return xfs_bmdr_maxrecs(cur->bc_mp, cur->bc_private.b.forksize,
664 level == 0);
667 STATIC void
668 xfs_bmbt_init_key_from_rec(
669 union xfs_btree_key *key,
670 union xfs_btree_rec *rec)
672 key->bmbt.br_startoff =
673 cpu_to_be64(xfs_bmbt_disk_get_startoff(&rec->bmbt));
676 STATIC void
677 xfs_bmbt_init_rec_from_key(
678 union xfs_btree_key *key,
679 union xfs_btree_rec *rec)
681 ASSERT(key->bmbt.br_startoff != 0);
683 xfs_bmbt_disk_set_allf(&rec->bmbt, be64_to_cpu(key->bmbt.br_startoff),
684 0, 0, XFS_EXT_NORM);
687 STATIC void
688 xfs_bmbt_init_rec_from_cur(
689 struct xfs_btree_cur *cur,
690 union xfs_btree_rec *rec)
692 xfs_bmbt_disk_set_all(&rec->bmbt, &cur->bc_rec.b);
695 STATIC void
696 xfs_bmbt_init_ptr_from_cur(
697 struct xfs_btree_cur *cur,
698 union xfs_btree_ptr *ptr)
700 ptr->l = 0;
703 STATIC __int64_t
704 xfs_bmbt_key_diff(
705 struct xfs_btree_cur *cur,
706 union xfs_btree_key *key)
708 return (__int64_t)be64_to_cpu(key->bmbt.br_startoff) -
709 cur->bc_rec.b.br_startoff;
712 #ifdef DEBUG
713 STATIC int
714 xfs_bmbt_keys_inorder(
715 struct xfs_btree_cur *cur,
716 union xfs_btree_key *k1,
717 union xfs_btree_key *k2)
719 return be64_to_cpu(k1->bmbt.br_startoff) <
720 be64_to_cpu(k2->bmbt.br_startoff);
723 STATIC int
724 xfs_bmbt_recs_inorder(
725 struct xfs_btree_cur *cur,
726 union xfs_btree_rec *r1,
727 union xfs_btree_rec *r2)
729 return xfs_bmbt_disk_get_startoff(&r1->bmbt) +
730 xfs_bmbt_disk_get_blockcount(&r1->bmbt) <=
731 xfs_bmbt_disk_get_startoff(&r2->bmbt);
733 #endif /* DEBUG */
735 #ifdef XFS_BTREE_TRACE
736 ktrace_t *xfs_bmbt_trace_buf;
738 STATIC void
739 xfs_bmbt_trace_enter(
740 struct xfs_btree_cur *cur,
741 const char *func,
742 char *s,
743 int type,
744 int line,
745 __psunsigned_t a0,
746 __psunsigned_t a1,
747 __psunsigned_t a2,
748 __psunsigned_t a3,
749 __psunsigned_t a4,
750 __psunsigned_t a5,
751 __psunsigned_t a6,
752 __psunsigned_t a7,
753 __psunsigned_t a8,
754 __psunsigned_t a9,
755 __psunsigned_t a10)
757 struct xfs_inode *ip = cur->bc_private.b.ip;
758 int whichfork = cur->bc_private.b.whichfork;
760 ktrace_enter(xfs_bmbt_trace_buf,
761 (void *)((__psint_t)type | (whichfork << 8) | (line << 16)),
762 (void *)func, (void *)s, (void *)ip, (void *)cur,
763 (void *)a0, (void *)a1, (void *)a2, (void *)a3,
764 (void *)a4, (void *)a5, (void *)a6, (void *)a7,
765 (void *)a8, (void *)a9, (void *)a10);
768 STATIC void
769 xfs_bmbt_trace_cursor(
770 struct xfs_btree_cur *cur,
771 __uint32_t *s0,
772 __uint64_t *l0,
773 __uint64_t *l1)
775 struct xfs_bmbt_rec_host r;
777 xfs_bmbt_set_all(&r, &cur->bc_rec.b);
779 *s0 = (cur->bc_nlevels << 24) |
780 (cur->bc_private.b.flags << 16) |
781 cur->bc_private.b.allocated;
782 *l0 = r.l0;
783 *l1 = r.l1;
786 STATIC void
787 xfs_bmbt_trace_key(
788 struct xfs_btree_cur *cur,
789 union xfs_btree_key *key,
790 __uint64_t *l0,
791 __uint64_t *l1)
793 *l0 = be64_to_cpu(key->bmbt.br_startoff);
794 *l1 = 0;
797 /* Endian flipping versions of the bmbt extraction functions */
798 STATIC void
799 xfs_bmbt_disk_get_all(
800 xfs_bmbt_rec_t *r,
801 xfs_bmbt_irec_t *s)
803 __xfs_bmbt_get_all(get_unaligned_be64(&r->l0),
804 get_unaligned_be64(&r->l1), s);
807 STATIC void
808 xfs_bmbt_trace_record(
809 struct xfs_btree_cur *cur,
810 union xfs_btree_rec *rec,
811 __uint64_t *l0,
812 __uint64_t *l1,
813 __uint64_t *l2)
815 struct xfs_bmbt_irec irec;
817 xfs_bmbt_disk_get_all(&rec->bmbt, &irec);
818 *l0 = irec.br_startoff;
819 *l1 = irec.br_startblock;
820 *l2 = irec.br_blockcount;
822 #endif /* XFS_BTREE_TRACE */
824 static const struct xfs_btree_ops xfs_bmbt_ops = {
825 .rec_len = sizeof(xfs_bmbt_rec_t),
826 .key_len = sizeof(xfs_bmbt_key_t),
828 .dup_cursor = xfs_bmbt_dup_cursor,
829 .update_cursor = xfs_bmbt_update_cursor,
830 .alloc_block = xfs_bmbt_alloc_block,
831 .free_block = xfs_bmbt_free_block,
832 .get_maxrecs = xfs_bmbt_get_maxrecs,
833 .get_minrecs = xfs_bmbt_get_minrecs,
834 .get_dmaxrecs = xfs_bmbt_get_dmaxrecs,
835 .init_key_from_rec = xfs_bmbt_init_key_from_rec,
836 .init_rec_from_key = xfs_bmbt_init_rec_from_key,
837 .init_rec_from_cur = xfs_bmbt_init_rec_from_cur,
838 .init_ptr_from_cur = xfs_bmbt_init_ptr_from_cur,
839 .key_diff = xfs_bmbt_key_diff,
841 #ifdef DEBUG
842 .keys_inorder = xfs_bmbt_keys_inorder,
843 .recs_inorder = xfs_bmbt_recs_inorder,
844 #endif
846 #ifdef XFS_BTREE_TRACE
847 .trace_enter = xfs_bmbt_trace_enter,
848 .trace_cursor = xfs_bmbt_trace_cursor,
849 .trace_key = xfs_bmbt_trace_key,
850 .trace_record = xfs_bmbt_trace_record,
851 #endif
855 * Allocate a new bmap btree cursor.
857 struct xfs_btree_cur * /* new bmap btree cursor */
858 xfs_bmbt_init_cursor(
859 struct xfs_mount *mp, /* file system mount point */
860 struct xfs_trans *tp, /* transaction pointer */
861 struct xfs_inode *ip, /* inode owning the btree */
862 int whichfork) /* data or attr fork */
864 struct xfs_ifork *ifp = XFS_IFORK_PTR(ip, whichfork);
865 struct xfs_btree_cur *cur;
867 cur = kmem_zone_zalloc(xfs_btree_cur_zone, KM_SLEEP);
869 cur->bc_tp = tp;
870 cur->bc_mp = mp;
871 cur->bc_nlevels = be16_to_cpu(ifp->if_broot->bb_level) + 1;
872 cur->bc_btnum = XFS_BTNUM_BMAP;
873 cur->bc_blocklog = mp->m_sb.sb_blocklog;
875 cur->bc_ops = &xfs_bmbt_ops;
876 cur->bc_flags = XFS_BTREE_LONG_PTRS | XFS_BTREE_ROOT_IN_INODE;
878 cur->bc_private.b.forksize = XFS_IFORK_SIZE(ip, whichfork);
879 cur->bc_private.b.ip = ip;
880 cur->bc_private.b.firstblock = NULLFSBLOCK;
881 cur->bc_private.b.flist = NULL;
882 cur->bc_private.b.allocated = 0;
883 cur->bc_private.b.flags = 0;
884 cur->bc_private.b.whichfork = whichfork;
886 return cur;
890 * Calculate number of records in a bmap btree block.
893 xfs_bmbt_maxrecs(
894 struct xfs_mount *mp,
895 int blocklen,
896 int leaf)
898 blocklen -= XFS_BMBT_BLOCK_LEN(mp);
900 if (leaf)
901 return blocklen / sizeof(xfs_bmbt_rec_t);
902 return blocklen / (sizeof(xfs_bmbt_key_t) + sizeof(xfs_bmbt_ptr_t));
906 * Calculate number of records in a bmap btree inode root.
909 xfs_bmdr_maxrecs(
910 struct xfs_mount *mp,
911 int blocklen,
912 int leaf)
914 blocklen -= sizeof(xfs_bmdr_block_t);
916 if (leaf)
917 return blocklen / sizeof(xfs_bmdr_rec_t);
918 return blocklen / (sizeof(xfs_bmdr_key_t) + sizeof(xfs_bmdr_ptr_t));