mm: fix exec activate_mm vs TLB shootdown and lazy tlb switching race
[linux/fpc-iii.git] / fs / xfs / libxfs / xfs_bmap.c
blob2b07dadc5916745c2765df2f7a91d1fac38450b8
1 /*
2 * Copyright (c) 2000-2006 Silicon Graphics, Inc.
3 * All Rights Reserved.
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License as
7 * published by the Free Software Foundation.
9 * This program is distributed in the hope that it would be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write the Free Software Foundation,
16 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
18 #include "xfs.h"
19 #include "xfs_fs.h"
20 #include "xfs_shared.h"
21 #include "xfs_format.h"
22 #include "xfs_log_format.h"
23 #include "xfs_trans_resv.h"
24 #include "xfs_bit.h"
25 #include "xfs_sb.h"
26 #include "xfs_mount.h"
27 #include "xfs_defer.h"
28 #include "xfs_da_format.h"
29 #include "xfs_da_btree.h"
30 #include "xfs_dir2.h"
31 #include "xfs_inode.h"
32 #include "xfs_btree.h"
33 #include "xfs_trans.h"
34 #include "xfs_inode_item.h"
35 #include "xfs_extfree_item.h"
36 #include "xfs_alloc.h"
37 #include "xfs_bmap.h"
38 #include "xfs_bmap_util.h"
39 #include "xfs_bmap_btree.h"
40 #include "xfs_rtalloc.h"
41 #include "xfs_error.h"
42 #include "xfs_quota.h"
43 #include "xfs_trans_space.h"
44 #include "xfs_buf_item.h"
45 #include "xfs_trace.h"
46 #include "xfs_symlink.h"
47 #include "xfs_attr_leaf.h"
48 #include "xfs_filestream.h"
49 #include "xfs_rmap.h"
50 #include "xfs_ag_resv.h"
51 #include "xfs_refcount.h"
52 #include "xfs_icache.h"
55 kmem_zone_t *xfs_bmap_free_item_zone;
58 * Miscellaneous helper functions
62 * Compute and fill in the value of the maximum depth of a bmap btree
63 * in this filesystem. Done once, during mount.
65 void
66 xfs_bmap_compute_maxlevels(
67 xfs_mount_t *mp, /* file system mount structure */
68 int whichfork) /* data or attr fork */
70 int level; /* btree level */
71 uint maxblocks; /* max blocks at this level */
72 uint maxleafents; /* max leaf entries possible */
73 int maxrootrecs; /* max records in root block */
74 int minleafrecs; /* min records in leaf block */
75 int minnoderecs; /* min records in node block */
76 int sz; /* root block size */
79 * The maximum number of extents in a file, hence the maximum
80 * number of leaf entries, is controlled by the type of di_nextents
81 * (a signed 32-bit number, xfs_extnum_t), or by di_anextents
82 * (a signed 16-bit number, xfs_aextnum_t).
84 * Note that we can no longer assume that if we are in ATTR1 that
85 * the fork offset of all the inodes will be
86 * (xfs_default_attroffset(ip) >> 3) because we could have mounted
87 * with ATTR2 and then mounted back with ATTR1, keeping the
88 * di_forkoff's fixed but probably at various positions. Therefore,
89 * for both ATTR1 and ATTR2 we have to assume the worst case scenario
90 * of a minimum size available.
92 if (whichfork == XFS_DATA_FORK) {
93 maxleafents = MAXEXTNUM;
94 sz = XFS_BMDR_SPACE_CALC(MINDBTPTRS);
95 } else {
96 maxleafents = MAXAEXTNUM;
97 sz = XFS_BMDR_SPACE_CALC(MINABTPTRS);
99 maxrootrecs = xfs_bmdr_maxrecs(sz, 0);
100 minleafrecs = mp->m_bmap_dmnr[0];
101 minnoderecs = mp->m_bmap_dmnr[1];
102 maxblocks = (maxleafents + minleafrecs - 1) / minleafrecs;
103 for (level = 1; maxblocks > 1; level++) {
104 if (maxblocks <= maxrootrecs)
105 maxblocks = 1;
106 else
107 maxblocks = (maxblocks + minnoderecs - 1) / minnoderecs;
109 mp->m_bm_maxlevels[whichfork] = level;
112 STATIC int /* error */
113 xfs_bmbt_lookup_eq(
114 struct xfs_btree_cur *cur,
115 xfs_fileoff_t off,
116 xfs_fsblock_t bno,
117 xfs_filblks_t len,
118 int *stat) /* success/failure */
120 cur->bc_rec.b.br_startoff = off;
121 cur->bc_rec.b.br_startblock = bno;
122 cur->bc_rec.b.br_blockcount = len;
123 return xfs_btree_lookup(cur, XFS_LOOKUP_EQ, stat);
126 STATIC int /* error */
127 xfs_bmbt_lookup_ge(
128 struct xfs_btree_cur *cur,
129 xfs_fileoff_t off,
130 xfs_fsblock_t bno,
131 xfs_filblks_t len,
132 int *stat) /* success/failure */
134 cur->bc_rec.b.br_startoff = off;
135 cur->bc_rec.b.br_startblock = bno;
136 cur->bc_rec.b.br_blockcount = len;
137 return xfs_btree_lookup(cur, XFS_LOOKUP_GE, stat);
141 * Check if the inode needs to be converted to btree format.
143 static inline bool xfs_bmap_needs_btree(struct xfs_inode *ip, int whichfork)
145 return whichfork != XFS_COW_FORK &&
146 XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_EXTENTS &&
147 XFS_IFORK_NEXTENTS(ip, whichfork) >
148 XFS_IFORK_MAXEXT(ip, whichfork);
152 * Check if the inode should be converted to extent format.
154 static inline bool xfs_bmap_wants_extents(struct xfs_inode *ip, int whichfork)
156 return whichfork != XFS_COW_FORK &&
157 XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_BTREE &&
158 XFS_IFORK_NEXTENTS(ip, whichfork) <=
159 XFS_IFORK_MAXEXT(ip, whichfork);
163 * Update the record referred to by cur to the value given
164 * by [off, bno, len, state].
165 * This either works (return 0) or gets an EFSCORRUPTED error.
167 STATIC int
168 xfs_bmbt_update(
169 struct xfs_btree_cur *cur,
170 xfs_fileoff_t off,
171 xfs_fsblock_t bno,
172 xfs_filblks_t len,
173 xfs_exntst_t state)
175 union xfs_btree_rec rec;
177 xfs_bmbt_disk_set_allf(&rec.bmbt, off, bno, len, state);
178 return xfs_btree_update(cur, &rec);
182 * Compute the worst-case number of indirect blocks that will be used
183 * for ip's delayed extent of length "len".
185 STATIC xfs_filblks_t
186 xfs_bmap_worst_indlen(
187 xfs_inode_t *ip, /* incore inode pointer */
188 xfs_filblks_t len) /* delayed extent length */
190 int level; /* btree level number */
191 int maxrecs; /* maximum record count at this level */
192 xfs_mount_t *mp; /* mount structure */
193 xfs_filblks_t rval; /* return value */
195 mp = ip->i_mount;
196 maxrecs = mp->m_bmap_dmxr[0];
197 for (level = 0, rval = 0;
198 level < XFS_BM_MAXLEVELS(mp, XFS_DATA_FORK);
199 level++) {
200 len += maxrecs - 1;
201 do_div(len, maxrecs);
202 rval += len;
203 if (len == 1)
204 return rval + XFS_BM_MAXLEVELS(mp, XFS_DATA_FORK) -
205 level - 1;
206 if (level == 0)
207 maxrecs = mp->m_bmap_dmxr[1];
209 return rval;
213 * Calculate the default attribute fork offset for newly created inodes.
215 uint
216 xfs_default_attroffset(
217 struct xfs_inode *ip)
219 struct xfs_mount *mp = ip->i_mount;
220 uint offset;
222 if (mp->m_sb.sb_inodesize == 256) {
223 offset = XFS_LITINO(mp, ip->i_d.di_version) -
224 XFS_BMDR_SPACE_CALC(MINABTPTRS);
225 } else {
226 offset = XFS_BMDR_SPACE_CALC(6 * MINABTPTRS);
229 ASSERT(offset < XFS_LITINO(mp, ip->i_d.di_version));
230 return offset;
234 * Helper routine to reset inode di_forkoff field when switching
235 * attribute fork from local to extent format - we reset it where
236 * possible to make space available for inline data fork extents.
238 STATIC void
239 xfs_bmap_forkoff_reset(
240 xfs_inode_t *ip,
241 int whichfork)
243 if (whichfork == XFS_ATTR_FORK &&
244 ip->i_d.di_format != XFS_DINODE_FMT_DEV &&
245 ip->i_d.di_format != XFS_DINODE_FMT_UUID &&
246 ip->i_d.di_format != XFS_DINODE_FMT_BTREE) {
247 uint dfl_forkoff = xfs_default_attroffset(ip) >> 3;
249 if (dfl_forkoff > ip->i_d.di_forkoff)
250 ip->i_d.di_forkoff = dfl_forkoff;
254 #ifdef DEBUG
255 STATIC struct xfs_buf *
256 xfs_bmap_get_bp(
257 struct xfs_btree_cur *cur,
258 xfs_fsblock_t bno)
260 struct xfs_log_item_desc *lidp;
261 int i;
263 if (!cur)
264 return NULL;
266 for (i = 0; i < XFS_BTREE_MAXLEVELS; i++) {
267 if (!cur->bc_bufs[i])
268 break;
269 if (XFS_BUF_ADDR(cur->bc_bufs[i]) == bno)
270 return cur->bc_bufs[i];
273 /* Chase down all the log items to see if the bp is there */
274 list_for_each_entry(lidp, &cur->bc_tp->t_items, lid_trans) {
275 struct xfs_buf_log_item *bip;
276 bip = (struct xfs_buf_log_item *)lidp->lid_item;
277 if (bip->bli_item.li_type == XFS_LI_BUF &&
278 XFS_BUF_ADDR(bip->bli_buf) == bno)
279 return bip->bli_buf;
282 return NULL;
285 STATIC void
286 xfs_check_block(
287 struct xfs_btree_block *block,
288 xfs_mount_t *mp,
289 int root,
290 short sz)
292 int i, j, dmxr;
293 __be64 *pp, *thispa; /* pointer to block address */
294 xfs_bmbt_key_t *prevp, *keyp;
296 ASSERT(be16_to_cpu(block->bb_level) > 0);
298 prevp = NULL;
299 for( i = 1; i <= xfs_btree_get_numrecs(block); i++) {
300 dmxr = mp->m_bmap_dmxr[0];
301 keyp = XFS_BMBT_KEY_ADDR(mp, block, i);
303 if (prevp) {
304 ASSERT(be64_to_cpu(prevp->br_startoff) <
305 be64_to_cpu(keyp->br_startoff));
307 prevp = keyp;
310 * Compare the block numbers to see if there are dups.
312 if (root)
313 pp = XFS_BMAP_BROOT_PTR_ADDR(mp, block, i, sz);
314 else
315 pp = XFS_BMBT_PTR_ADDR(mp, block, i, dmxr);
317 for (j = i+1; j <= be16_to_cpu(block->bb_numrecs); j++) {
318 if (root)
319 thispa = XFS_BMAP_BROOT_PTR_ADDR(mp, block, j, sz);
320 else
321 thispa = XFS_BMBT_PTR_ADDR(mp, block, j, dmxr);
322 if (*thispa == *pp) {
323 xfs_warn(mp, "%s: thispa(%d) == pp(%d) %Ld",
324 __func__, j, i,
325 (unsigned long long)be64_to_cpu(*thispa));
326 panic("%s: ptrs are equal in node\n",
327 __func__);
334 * Check that the extents for the inode ip are in the right order in all
335 * btree leaves. THis becomes prohibitively expensive for large extent count
336 * files, so don't bother with inodes that have more than 10,000 extents in
337 * them. The btree record ordering checks will still be done, so for such large
338 * bmapbt constructs that is going to catch most corruptions.
340 STATIC void
341 xfs_bmap_check_leaf_extents(
342 xfs_btree_cur_t *cur, /* btree cursor or null */
343 xfs_inode_t *ip, /* incore inode pointer */
344 int whichfork) /* data or attr fork */
346 struct xfs_btree_block *block; /* current btree block */
347 xfs_fsblock_t bno; /* block # of "block" */
348 xfs_buf_t *bp; /* buffer for "block" */
349 int error; /* error return value */
350 xfs_extnum_t i=0, j; /* index into the extents list */
351 xfs_ifork_t *ifp; /* fork structure */
352 int level; /* btree level, for checking */
353 xfs_mount_t *mp; /* file system mount structure */
354 __be64 *pp; /* pointer to block address */
355 xfs_bmbt_rec_t *ep; /* pointer to current extent */
356 xfs_bmbt_rec_t last = {0, 0}; /* last extent in prev block */
357 xfs_bmbt_rec_t *nextp; /* pointer to next extent */
358 int bp_release = 0;
360 if (XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_BTREE) {
361 return;
364 /* skip large extent count inodes */
365 if (ip->i_d.di_nextents > 10000)
366 return;
368 bno = NULLFSBLOCK;
369 mp = ip->i_mount;
370 ifp = XFS_IFORK_PTR(ip, whichfork);
371 block = ifp->if_broot;
373 * Root level must use BMAP_BROOT_PTR_ADDR macro to get ptr out.
375 level = be16_to_cpu(block->bb_level);
376 ASSERT(level > 0);
377 xfs_check_block(block, mp, 1, ifp->if_broot_bytes);
378 pp = XFS_BMAP_BROOT_PTR_ADDR(mp, block, 1, ifp->if_broot_bytes);
379 bno = be64_to_cpu(*pp);
381 ASSERT(bno != NULLFSBLOCK);
382 ASSERT(XFS_FSB_TO_AGNO(mp, bno) < mp->m_sb.sb_agcount);
383 ASSERT(XFS_FSB_TO_AGBNO(mp, bno) < mp->m_sb.sb_agblocks);
386 * Go down the tree until leaf level is reached, following the first
387 * pointer (leftmost) at each level.
389 while (level-- > 0) {
390 /* See if buf is in cur first */
391 bp_release = 0;
392 bp = xfs_bmap_get_bp(cur, XFS_FSB_TO_DADDR(mp, bno));
393 if (!bp) {
394 bp_release = 1;
395 error = xfs_btree_read_bufl(mp, NULL, bno, 0, &bp,
396 XFS_BMAP_BTREE_REF,
397 &xfs_bmbt_buf_ops);
398 if (error)
399 goto error_norelse;
401 block = XFS_BUF_TO_BLOCK(bp);
402 if (level == 0)
403 break;
406 * Check this block for basic sanity (increasing keys and
407 * no duplicate blocks).
410 xfs_check_block(block, mp, 0, 0);
411 pp = XFS_BMBT_PTR_ADDR(mp, block, 1, mp->m_bmap_dmxr[1]);
412 bno = be64_to_cpu(*pp);
413 XFS_WANT_CORRUPTED_GOTO(mp,
414 XFS_FSB_SANITY_CHECK(mp, bno), error0);
415 if (bp_release) {
416 bp_release = 0;
417 xfs_trans_brelse(NULL, bp);
422 * Here with bp and block set to the leftmost leaf node in the tree.
424 i = 0;
427 * Loop over all leaf nodes checking that all extents are in the right order.
429 for (;;) {
430 xfs_fsblock_t nextbno;
431 xfs_extnum_t num_recs;
434 num_recs = xfs_btree_get_numrecs(block);
437 * Read-ahead the next leaf block, if any.
440 nextbno = be64_to_cpu(block->bb_u.l.bb_rightsib);
443 * Check all the extents to make sure they are OK.
444 * If we had a previous block, the last entry should
445 * conform with the first entry in this one.
448 ep = XFS_BMBT_REC_ADDR(mp, block, 1);
449 if (i) {
450 ASSERT(xfs_bmbt_disk_get_startoff(&last) +
451 xfs_bmbt_disk_get_blockcount(&last) <=
452 xfs_bmbt_disk_get_startoff(ep));
454 for (j = 1; j < num_recs; j++) {
455 nextp = XFS_BMBT_REC_ADDR(mp, block, j + 1);
456 ASSERT(xfs_bmbt_disk_get_startoff(ep) +
457 xfs_bmbt_disk_get_blockcount(ep) <=
458 xfs_bmbt_disk_get_startoff(nextp));
459 ep = nextp;
462 last = *ep;
463 i += num_recs;
464 if (bp_release) {
465 bp_release = 0;
466 xfs_trans_brelse(NULL, bp);
468 bno = nextbno;
470 * If we've reached the end, stop.
472 if (bno == NULLFSBLOCK)
473 break;
475 bp_release = 0;
476 bp = xfs_bmap_get_bp(cur, XFS_FSB_TO_DADDR(mp, bno));
477 if (!bp) {
478 bp_release = 1;
479 error = xfs_btree_read_bufl(mp, NULL, bno, 0, &bp,
480 XFS_BMAP_BTREE_REF,
481 &xfs_bmbt_buf_ops);
482 if (error)
483 goto error_norelse;
485 block = XFS_BUF_TO_BLOCK(bp);
488 return;
490 error0:
491 xfs_warn(mp, "%s: at error0", __func__);
492 if (bp_release)
493 xfs_trans_brelse(NULL, bp);
494 error_norelse:
495 xfs_warn(mp, "%s: BAD after btree leaves for %d extents",
496 __func__, i);
497 panic("%s: CORRUPTED BTREE OR SOMETHING", __func__);
498 return;
502 * Add bmap trace insert entries for all the contents of the extent records.
504 void
505 xfs_bmap_trace_exlist(
506 xfs_inode_t *ip, /* incore inode pointer */
507 xfs_extnum_t cnt, /* count of entries in the list */
508 int whichfork, /* data or attr or cow fork */
509 unsigned long caller_ip)
511 xfs_extnum_t idx; /* extent record index */
512 xfs_ifork_t *ifp; /* inode fork pointer */
513 int state = 0;
515 if (whichfork == XFS_ATTR_FORK)
516 state |= BMAP_ATTRFORK;
517 else if (whichfork == XFS_COW_FORK)
518 state |= BMAP_COWFORK;
520 ifp = XFS_IFORK_PTR(ip, whichfork);
521 ASSERT(cnt == xfs_iext_count(ifp));
522 for (idx = 0; idx < cnt; idx++)
523 trace_xfs_extlist(ip, idx, state, caller_ip);
527 * Validate that the bmbt_irecs being returned from bmapi are valid
528 * given the caller's original parameters. Specifically check the
529 * ranges of the returned irecs to ensure that they only extend beyond
530 * the given parameters if the XFS_BMAPI_ENTIRE flag was set.
532 STATIC void
533 xfs_bmap_validate_ret(
534 xfs_fileoff_t bno,
535 xfs_filblks_t len,
536 int flags,
537 xfs_bmbt_irec_t *mval,
538 int nmap,
539 int ret_nmap)
541 int i; /* index to map values */
543 ASSERT(ret_nmap <= nmap);
545 for (i = 0; i < ret_nmap; i++) {
546 ASSERT(mval[i].br_blockcount > 0);
547 if (!(flags & XFS_BMAPI_ENTIRE)) {
548 ASSERT(mval[i].br_startoff >= bno);
549 ASSERT(mval[i].br_blockcount <= len);
550 ASSERT(mval[i].br_startoff + mval[i].br_blockcount <=
551 bno + len);
552 } else {
553 ASSERT(mval[i].br_startoff < bno + len);
554 ASSERT(mval[i].br_startoff + mval[i].br_blockcount >
555 bno);
557 ASSERT(i == 0 ||
558 mval[i - 1].br_startoff + mval[i - 1].br_blockcount ==
559 mval[i].br_startoff);
560 ASSERT(mval[i].br_startblock != DELAYSTARTBLOCK &&
561 mval[i].br_startblock != HOLESTARTBLOCK);
562 ASSERT(mval[i].br_state == XFS_EXT_NORM ||
563 mval[i].br_state == XFS_EXT_UNWRITTEN);
567 #else
568 #define xfs_bmap_check_leaf_extents(cur, ip, whichfork) do { } while (0)
569 #define xfs_bmap_validate_ret(bno,len,flags,mval,onmap,nmap) do { } while (0)
570 #endif /* DEBUG */
573 * bmap free list manipulation functions
577 * Add the extent to the list of extents to be free at transaction end.
578 * The list is maintained sorted (by block number).
580 void
581 xfs_bmap_add_free(
582 struct xfs_mount *mp,
583 struct xfs_defer_ops *dfops,
584 xfs_fsblock_t bno,
585 xfs_filblks_t len,
586 struct xfs_owner_info *oinfo)
588 struct xfs_extent_free_item *new; /* new element */
589 #ifdef DEBUG
590 xfs_agnumber_t agno;
591 xfs_agblock_t agbno;
593 ASSERT(bno != NULLFSBLOCK);
594 ASSERT(len > 0);
595 ASSERT(len <= MAXEXTLEN);
596 ASSERT(!isnullstartblock(bno));
597 agno = XFS_FSB_TO_AGNO(mp, bno);
598 agbno = XFS_FSB_TO_AGBNO(mp, bno);
599 ASSERT(agno < mp->m_sb.sb_agcount);
600 ASSERT(agbno < mp->m_sb.sb_agblocks);
601 ASSERT(len < mp->m_sb.sb_agblocks);
602 ASSERT(agbno + len <= mp->m_sb.sb_agblocks);
603 #endif
604 ASSERT(xfs_bmap_free_item_zone != NULL);
606 new = kmem_zone_alloc(xfs_bmap_free_item_zone, KM_SLEEP);
607 new->xefi_startblock = bno;
608 new->xefi_blockcount = (xfs_extlen_t)len;
609 if (oinfo)
610 new->xefi_oinfo = *oinfo;
611 else
612 xfs_rmap_skip_owner_update(&new->xefi_oinfo);
613 trace_xfs_bmap_free_defer(mp, XFS_FSB_TO_AGNO(mp, bno), 0,
614 XFS_FSB_TO_AGBNO(mp, bno), len);
615 xfs_defer_add(dfops, XFS_DEFER_OPS_TYPE_FREE, &new->xefi_list);
619 * Inode fork format manipulation functions
623 * Transform a btree format file with only one leaf node, where the
624 * extents list will fit in the inode, into an extents format file.
625 * Since the file extents are already in-core, all we have to do is
626 * give up the space for the btree root and pitch the leaf block.
628 STATIC int /* error */
629 xfs_bmap_btree_to_extents(
630 xfs_trans_t *tp, /* transaction pointer */
631 xfs_inode_t *ip, /* incore inode pointer */
632 xfs_btree_cur_t *cur, /* btree cursor */
633 int *logflagsp, /* inode logging flags */
634 int whichfork) /* data or attr fork */
636 /* REFERENCED */
637 struct xfs_btree_block *cblock;/* child btree block */
638 xfs_fsblock_t cbno; /* child block number */
639 xfs_buf_t *cbp; /* child block's buffer */
640 int error; /* error return value */
641 xfs_ifork_t *ifp; /* inode fork data */
642 xfs_mount_t *mp; /* mount point structure */
643 __be64 *pp; /* ptr to block address */
644 struct xfs_btree_block *rblock;/* root btree block */
645 struct xfs_owner_info oinfo;
647 mp = ip->i_mount;
648 ifp = XFS_IFORK_PTR(ip, whichfork);
649 ASSERT(whichfork != XFS_COW_FORK);
650 ASSERT(ifp->if_flags & XFS_IFEXTENTS);
651 ASSERT(XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_BTREE);
652 rblock = ifp->if_broot;
653 ASSERT(be16_to_cpu(rblock->bb_level) == 1);
654 ASSERT(be16_to_cpu(rblock->bb_numrecs) == 1);
655 ASSERT(xfs_bmbt_maxrecs(mp, ifp->if_broot_bytes, 0) == 1);
656 pp = XFS_BMAP_BROOT_PTR_ADDR(mp, rblock, 1, ifp->if_broot_bytes);
657 cbno = be64_to_cpu(*pp);
658 *logflagsp = 0;
659 #ifdef DEBUG
660 if ((error = xfs_btree_check_lptr(cur, cbno, 1)))
661 return error;
662 #endif
663 error = xfs_btree_read_bufl(mp, tp, cbno, 0, &cbp, XFS_BMAP_BTREE_REF,
664 &xfs_bmbt_buf_ops);
665 if (error)
666 return error;
667 cblock = XFS_BUF_TO_BLOCK(cbp);
668 if ((error = xfs_btree_check_block(cur, cblock, 0, cbp)))
669 return error;
670 xfs_rmap_ino_bmbt_owner(&oinfo, ip->i_ino, whichfork);
671 xfs_bmap_add_free(mp, cur->bc_private.b.dfops, cbno, 1, &oinfo);
672 ip->i_d.di_nblocks--;
673 xfs_trans_mod_dquot_byino(tp, ip, XFS_TRANS_DQ_BCOUNT, -1L);
674 xfs_trans_binval(tp, cbp);
675 if (cur->bc_bufs[0] == cbp)
676 cur->bc_bufs[0] = NULL;
677 xfs_iroot_realloc(ip, -1, whichfork);
678 ASSERT(ifp->if_broot == NULL);
679 ASSERT((ifp->if_flags & XFS_IFBROOT) == 0);
680 XFS_IFORK_FMT_SET(ip, whichfork, XFS_DINODE_FMT_EXTENTS);
681 *logflagsp = XFS_ILOG_CORE | xfs_ilog_fext(whichfork);
682 return 0;
686 * Convert an extents-format file into a btree-format file.
687 * The new file will have a root block (in the inode) and a single child block.
689 STATIC int /* error */
690 xfs_bmap_extents_to_btree(
691 xfs_trans_t *tp, /* transaction pointer */
692 xfs_inode_t *ip, /* incore inode pointer */
693 xfs_fsblock_t *firstblock, /* first-block-allocated */
694 struct xfs_defer_ops *dfops, /* blocks freed in xaction */
695 xfs_btree_cur_t **curp, /* cursor returned to caller */
696 int wasdel, /* converting a delayed alloc */
697 int *logflagsp, /* inode logging flags */
698 int whichfork) /* data or attr fork */
700 struct xfs_btree_block *ablock; /* allocated (child) bt block */
701 xfs_buf_t *abp; /* buffer for ablock */
702 xfs_alloc_arg_t args; /* allocation arguments */
703 xfs_bmbt_rec_t *arp; /* child record pointer */
704 struct xfs_btree_block *block; /* btree root block */
705 xfs_btree_cur_t *cur; /* bmap btree cursor */
706 xfs_bmbt_rec_host_t *ep; /* extent record pointer */
707 int error; /* error return value */
708 xfs_extnum_t i, cnt; /* extent record index */
709 xfs_ifork_t *ifp; /* inode fork pointer */
710 xfs_bmbt_key_t *kp; /* root block key pointer */
711 xfs_mount_t *mp; /* mount structure */
712 xfs_extnum_t nextents; /* number of file extents */
713 xfs_bmbt_ptr_t *pp; /* root block address pointer */
715 mp = ip->i_mount;
716 ASSERT(whichfork != XFS_COW_FORK);
717 ifp = XFS_IFORK_PTR(ip, whichfork);
718 ASSERT(XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_EXTENTS);
721 * Make space in the inode incore.
723 xfs_iroot_realloc(ip, 1, whichfork);
724 ifp->if_flags |= XFS_IFBROOT;
727 * Fill in the root.
729 block = ifp->if_broot;
730 xfs_btree_init_block_int(mp, block, XFS_BUF_DADDR_NULL,
731 XFS_BTNUM_BMAP, 1, 1, ip->i_ino,
732 XFS_BTREE_LONG_PTRS);
734 * Need a cursor. Can't allocate until bb_level is filled in.
736 cur = xfs_bmbt_init_cursor(mp, tp, ip, whichfork);
737 cur->bc_private.b.firstblock = *firstblock;
738 cur->bc_private.b.dfops = dfops;
739 cur->bc_private.b.flags = wasdel ? XFS_BTCUR_BPRV_WASDEL : 0;
741 * Convert to a btree with two levels, one record in root.
743 XFS_IFORK_FMT_SET(ip, whichfork, XFS_DINODE_FMT_BTREE);
744 memset(&args, 0, sizeof(args));
745 args.tp = tp;
746 args.mp = mp;
747 xfs_rmap_ino_bmbt_owner(&args.oinfo, ip->i_ino, whichfork);
748 args.firstblock = *firstblock;
749 if (*firstblock == NULLFSBLOCK) {
750 args.type = XFS_ALLOCTYPE_START_BNO;
751 args.fsbno = XFS_INO_TO_FSB(mp, ip->i_ino);
752 } else if (dfops->dop_low) {
753 args.type = XFS_ALLOCTYPE_START_BNO;
754 args.fsbno = *firstblock;
755 } else {
756 args.type = XFS_ALLOCTYPE_NEAR_BNO;
757 args.fsbno = *firstblock;
759 args.minlen = args.maxlen = args.prod = 1;
760 args.wasdel = wasdel;
761 *logflagsp = 0;
762 if ((error = xfs_alloc_vextent(&args))) {
763 xfs_iroot_realloc(ip, -1, whichfork);
764 ASSERT(ifp->if_broot == NULL);
765 XFS_IFORK_FMT_SET(ip, whichfork, XFS_DINODE_FMT_EXTENTS);
766 xfs_btree_del_cursor(cur, XFS_BTREE_ERROR);
767 return error;
770 if (WARN_ON_ONCE(args.fsbno == NULLFSBLOCK)) {
771 xfs_iroot_realloc(ip, -1, whichfork);
772 ASSERT(ifp->if_broot == NULL);
773 XFS_IFORK_FMT_SET(ip, whichfork, XFS_DINODE_FMT_EXTENTS);
774 xfs_btree_del_cursor(cur, XFS_BTREE_ERROR);
775 return -ENOSPC;
778 * Allocation can't fail, the space was reserved.
780 ASSERT(*firstblock == NULLFSBLOCK ||
781 args.agno >= XFS_FSB_TO_AGNO(mp, *firstblock));
782 *firstblock = cur->bc_private.b.firstblock = args.fsbno;
783 cur->bc_private.b.allocated++;
784 ip->i_d.di_nblocks++;
785 xfs_trans_mod_dquot_byino(tp, ip, XFS_TRANS_DQ_BCOUNT, 1L);
786 abp = xfs_btree_get_bufl(mp, tp, args.fsbno, 0);
788 * Fill in the child block.
790 abp->b_ops = &xfs_bmbt_buf_ops;
791 ablock = XFS_BUF_TO_BLOCK(abp);
792 xfs_btree_init_block_int(mp, ablock, abp->b_bn,
793 XFS_BTNUM_BMAP, 0, 0, ip->i_ino,
794 XFS_BTREE_LONG_PTRS);
796 arp = XFS_BMBT_REC_ADDR(mp, ablock, 1);
797 nextents = xfs_iext_count(ifp);
798 for (cnt = i = 0; i < nextents; i++) {
799 ep = xfs_iext_get_ext(ifp, i);
800 if (!isnullstartblock(xfs_bmbt_get_startblock(ep))) {
801 arp->l0 = cpu_to_be64(ep->l0);
802 arp->l1 = cpu_to_be64(ep->l1);
803 arp++; cnt++;
806 ASSERT(cnt == XFS_IFORK_NEXTENTS(ip, whichfork));
807 xfs_btree_set_numrecs(ablock, cnt);
810 * Fill in the root key and pointer.
812 kp = XFS_BMBT_KEY_ADDR(mp, block, 1);
813 arp = XFS_BMBT_REC_ADDR(mp, ablock, 1);
814 kp->br_startoff = cpu_to_be64(xfs_bmbt_disk_get_startoff(arp));
815 pp = XFS_BMBT_PTR_ADDR(mp, block, 1, xfs_bmbt_get_maxrecs(cur,
816 be16_to_cpu(block->bb_level)));
817 *pp = cpu_to_be64(args.fsbno);
820 * Do all this logging at the end so that
821 * the root is at the right level.
823 xfs_btree_log_block(cur, abp, XFS_BB_ALL_BITS);
824 xfs_btree_log_recs(cur, abp, 1, be16_to_cpu(ablock->bb_numrecs));
825 ASSERT(*curp == NULL);
826 *curp = cur;
827 *logflagsp = XFS_ILOG_CORE | xfs_ilog_fbroot(whichfork);
828 return 0;
832 * Convert a local file to an extents file.
833 * This code is out of bounds for data forks of regular files,
834 * since the file data needs to get logged so things will stay consistent.
835 * (The bmap-level manipulations are ok, though).
837 void
838 xfs_bmap_local_to_extents_empty(
839 struct xfs_inode *ip,
840 int whichfork)
842 struct xfs_ifork *ifp = XFS_IFORK_PTR(ip, whichfork);
844 ASSERT(whichfork != XFS_COW_FORK);
845 ASSERT(XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_LOCAL);
846 ASSERT(ifp->if_bytes == 0);
847 ASSERT(XFS_IFORK_NEXTENTS(ip, whichfork) == 0);
849 xfs_bmap_forkoff_reset(ip, whichfork);
850 ifp->if_flags &= ~XFS_IFINLINE;
851 ifp->if_flags |= XFS_IFEXTENTS;
852 XFS_IFORK_FMT_SET(ip, whichfork, XFS_DINODE_FMT_EXTENTS);
856 STATIC int /* error */
857 xfs_bmap_local_to_extents(
858 xfs_trans_t *tp, /* transaction pointer */
859 xfs_inode_t *ip, /* incore inode pointer */
860 xfs_fsblock_t *firstblock, /* first block allocated in xaction */
861 xfs_extlen_t total, /* total blocks needed by transaction */
862 int *logflagsp, /* inode logging flags */
863 int whichfork,
864 void (*init_fn)(struct xfs_trans *tp,
865 struct xfs_buf *bp,
866 struct xfs_inode *ip,
867 struct xfs_ifork *ifp))
869 int error = 0;
870 int flags; /* logging flags returned */
871 xfs_ifork_t *ifp; /* inode fork pointer */
872 xfs_alloc_arg_t args; /* allocation arguments */
873 xfs_buf_t *bp; /* buffer for extent block */
874 struct xfs_bmbt_irec rec;
877 * We don't want to deal with the case of keeping inode data inline yet.
878 * So sending the data fork of a regular inode is invalid.
880 ASSERT(!(S_ISREG(VFS_I(ip)->i_mode) && whichfork == XFS_DATA_FORK));
881 ifp = XFS_IFORK_PTR(ip, whichfork);
882 ASSERT(XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_LOCAL);
884 if (!ifp->if_bytes) {
885 xfs_bmap_local_to_extents_empty(ip, whichfork);
886 flags = XFS_ILOG_CORE;
887 goto done;
890 flags = 0;
891 error = 0;
892 ASSERT((ifp->if_flags & (XFS_IFINLINE|XFS_IFEXTENTS|XFS_IFEXTIREC)) ==
893 XFS_IFINLINE);
894 memset(&args, 0, sizeof(args));
895 args.tp = tp;
896 args.mp = ip->i_mount;
897 xfs_rmap_ino_owner(&args.oinfo, ip->i_ino, whichfork, 0);
898 args.firstblock = *firstblock;
900 * Allocate a block. We know we need only one, since the
901 * file currently fits in an inode.
903 if (*firstblock == NULLFSBLOCK) {
904 args.fsbno = XFS_INO_TO_FSB(args.mp, ip->i_ino);
905 args.type = XFS_ALLOCTYPE_START_BNO;
906 } else {
907 args.fsbno = *firstblock;
908 args.type = XFS_ALLOCTYPE_NEAR_BNO;
910 args.total = total;
911 args.minlen = args.maxlen = args.prod = 1;
912 error = xfs_alloc_vextent(&args);
913 if (error)
914 goto done;
916 /* Can't fail, the space was reserved. */
917 ASSERT(args.fsbno != NULLFSBLOCK);
918 ASSERT(args.len == 1);
919 *firstblock = args.fsbno;
920 bp = xfs_btree_get_bufl(args.mp, tp, args.fsbno, 0);
923 * Initialize the block, copy the data and log the remote buffer.
925 * The callout is responsible for logging because the remote format
926 * might differ from the local format and thus we don't know how much to
927 * log here. Note that init_fn must also set the buffer log item type
928 * correctly.
930 init_fn(tp, bp, ip, ifp);
932 /* account for the change in fork size */
933 xfs_idata_realloc(ip, -ifp->if_bytes, whichfork);
934 xfs_bmap_local_to_extents_empty(ip, whichfork);
935 flags |= XFS_ILOG_CORE;
937 rec.br_startoff = 0;
938 rec.br_startblock = args.fsbno;
939 rec.br_blockcount = 1;
940 rec.br_state = XFS_EXT_NORM;
941 xfs_iext_insert(ip, 0, 1, &rec, 0);
943 trace_xfs_bmap_post_update(ip, 0,
944 whichfork == XFS_ATTR_FORK ? BMAP_ATTRFORK : 0,
945 _THIS_IP_);
946 XFS_IFORK_NEXT_SET(ip, whichfork, 1);
947 ip->i_d.di_nblocks = 1;
948 xfs_trans_mod_dquot_byino(tp, ip,
949 XFS_TRANS_DQ_BCOUNT, 1L);
950 flags |= xfs_ilog_fext(whichfork);
952 done:
953 *logflagsp = flags;
954 return error;
958 * Called from xfs_bmap_add_attrfork to handle btree format files.
960 STATIC int /* error */
961 xfs_bmap_add_attrfork_btree(
962 xfs_trans_t *tp, /* transaction pointer */
963 xfs_inode_t *ip, /* incore inode pointer */
964 xfs_fsblock_t *firstblock, /* first block allocated */
965 struct xfs_defer_ops *dfops, /* blocks to free at commit */
966 int *flags) /* inode logging flags */
968 xfs_btree_cur_t *cur; /* btree cursor */
969 int error; /* error return value */
970 xfs_mount_t *mp; /* file system mount struct */
971 int stat; /* newroot status */
973 mp = ip->i_mount;
974 if (ip->i_df.if_broot_bytes <= XFS_IFORK_DSIZE(ip))
975 *flags |= XFS_ILOG_DBROOT;
976 else {
977 cur = xfs_bmbt_init_cursor(mp, tp, ip, XFS_DATA_FORK);
978 cur->bc_private.b.dfops = dfops;
979 cur->bc_private.b.firstblock = *firstblock;
980 if ((error = xfs_bmbt_lookup_ge(cur, 0, 0, 0, &stat)))
981 goto error0;
982 /* must be at least one entry */
983 XFS_WANT_CORRUPTED_GOTO(mp, stat == 1, error0);
984 if ((error = xfs_btree_new_iroot(cur, flags, &stat)))
985 goto error0;
986 if (stat == 0) {
987 xfs_btree_del_cursor(cur, XFS_BTREE_NOERROR);
988 return -ENOSPC;
990 *firstblock = cur->bc_private.b.firstblock;
991 cur->bc_private.b.allocated = 0;
992 xfs_btree_del_cursor(cur, XFS_BTREE_NOERROR);
994 return 0;
995 error0:
996 xfs_btree_del_cursor(cur, XFS_BTREE_ERROR);
997 return error;
1001 * Called from xfs_bmap_add_attrfork to handle extents format files.
1003 STATIC int /* error */
1004 xfs_bmap_add_attrfork_extents(
1005 xfs_trans_t *tp, /* transaction pointer */
1006 xfs_inode_t *ip, /* incore inode pointer */
1007 xfs_fsblock_t *firstblock, /* first block allocated */
1008 struct xfs_defer_ops *dfops, /* blocks to free at commit */
1009 int *flags) /* inode logging flags */
1011 xfs_btree_cur_t *cur; /* bmap btree cursor */
1012 int error; /* error return value */
1014 if (ip->i_d.di_nextents * sizeof(xfs_bmbt_rec_t) <= XFS_IFORK_DSIZE(ip))
1015 return 0;
1016 cur = NULL;
1017 error = xfs_bmap_extents_to_btree(tp, ip, firstblock, dfops, &cur, 0,
1018 flags, XFS_DATA_FORK);
1019 if (cur) {
1020 cur->bc_private.b.allocated = 0;
1021 xfs_btree_del_cursor(cur,
1022 error ? XFS_BTREE_ERROR : XFS_BTREE_NOERROR);
1024 return error;
1028 * Called from xfs_bmap_add_attrfork to handle local format files. Each
1029 * different data fork content type needs a different callout to do the
1030 * conversion. Some are basic and only require special block initialisation
1031 * callouts for the data formating, others (directories) are so specialised they
1032 * handle everything themselves.
1034 * XXX (dgc): investigate whether directory conversion can use the generic
1035 * formatting callout. It should be possible - it's just a very complex
1036 * formatter.
1038 STATIC int /* error */
1039 xfs_bmap_add_attrfork_local(
1040 xfs_trans_t *tp, /* transaction pointer */
1041 xfs_inode_t *ip, /* incore inode pointer */
1042 xfs_fsblock_t *firstblock, /* first block allocated */
1043 struct xfs_defer_ops *dfops, /* blocks to free at commit */
1044 int *flags) /* inode logging flags */
1046 xfs_da_args_t dargs; /* args for dir/attr code */
1048 if (ip->i_df.if_bytes <= XFS_IFORK_DSIZE(ip))
1049 return 0;
1051 if (S_ISDIR(VFS_I(ip)->i_mode)) {
1052 memset(&dargs, 0, sizeof(dargs));
1053 dargs.geo = ip->i_mount->m_dir_geo;
1054 dargs.dp = ip;
1055 dargs.firstblock = firstblock;
1056 dargs.dfops = dfops;
1057 dargs.total = dargs.geo->fsbcount;
1058 dargs.whichfork = XFS_DATA_FORK;
1059 dargs.trans = tp;
1060 return xfs_dir2_sf_to_block(&dargs);
1063 if (S_ISLNK(VFS_I(ip)->i_mode))
1064 return xfs_bmap_local_to_extents(tp, ip, firstblock, 1,
1065 flags, XFS_DATA_FORK,
1066 xfs_symlink_local_to_remote);
1068 /* should only be called for types that support local format data */
1069 ASSERT(0);
1070 return -EFSCORRUPTED;
1074 * Convert inode from non-attributed to attributed.
1075 * Must not be in a transaction, ip must not be locked.
1077 int /* error code */
1078 xfs_bmap_add_attrfork(
1079 xfs_inode_t *ip, /* incore inode pointer */
1080 int size, /* space new attribute needs */
1081 int rsvd) /* xact may use reserved blks */
1083 xfs_fsblock_t firstblock; /* 1st block/ag allocated */
1084 struct xfs_defer_ops dfops; /* freed extent records */
1085 xfs_mount_t *mp; /* mount structure */
1086 xfs_trans_t *tp; /* transaction pointer */
1087 int blks; /* space reservation */
1088 int version = 1; /* superblock attr version */
1089 int logflags; /* logging flags */
1090 int error; /* error return value */
1092 ASSERT(XFS_IFORK_Q(ip) == 0);
1094 mp = ip->i_mount;
1095 ASSERT(!XFS_NOT_DQATTACHED(mp, ip));
1097 blks = XFS_ADDAFORK_SPACE_RES(mp);
1099 error = xfs_trans_alloc(mp, &M_RES(mp)->tr_addafork, blks, 0,
1100 rsvd ? XFS_TRANS_RESERVE : 0, &tp);
1101 if (error)
1102 return error;
1104 xfs_ilock(ip, XFS_ILOCK_EXCL);
1105 error = xfs_trans_reserve_quota_nblks(tp, ip, blks, 0, rsvd ?
1106 XFS_QMOPT_RES_REGBLKS | XFS_QMOPT_FORCE_RES :
1107 XFS_QMOPT_RES_REGBLKS);
1108 if (error)
1109 goto trans_cancel;
1110 if (XFS_IFORK_Q(ip))
1111 goto trans_cancel;
1112 if (ip->i_d.di_anextents != 0) {
1113 error = -EFSCORRUPTED;
1114 goto trans_cancel;
1116 if (ip->i_d.di_aformat != XFS_DINODE_FMT_EXTENTS) {
1118 * For inodes coming from pre-6.2 filesystems.
1120 ASSERT(ip->i_d.di_aformat == 0);
1121 ip->i_d.di_aformat = XFS_DINODE_FMT_EXTENTS;
1124 xfs_trans_ijoin(tp, ip, 0);
1125 xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
1127 switch (ip->i_d.di_format) {
1128 case XFS_DINODE_FMT_DEV:
1129 ip->i_d.di_forkoff = roundup(sizeof(xfs_dev_t), 8) >> 3;
1130 break;
1131 case XFS_DINODE_FMT_UUID:
1132 ip->i_d.di_forkoff = roundup(sizeof(uuid_t), 8) >> 3;
1133 break;
1134 case XFS_DINODE_FMT_LOCAL:
1135 case XFS_DINODE_FMT_EXTENTS:
1136 case XFS_DINODE_FMT_BTREE:
1137 ip->i_d.di_forkoff = xfs_attr_shortform_bytesfit(ip, size);
1138 if (!ip->i_d.di_forkoff)
1139 ip->i_d.di_forkoff = xfs_default_attroffset(ip) >> 3;
1140 else if (mp->m_flags & XFS_MOUNT_ATTR2)
1141 version = 2;
1142 break;
1143 default:
1144 ASSERT(0);
1145 error = -EINVAL;
1146 goto trans_cancel;
1149 ASSERT(ip->i_afp == NULL);
1150 ip->i_afp = kmem_zone_zalloc(xfs_ifork_zone, KM_SLEEP);
1151 ip->i_afp->if_flags = XFS_IFEXTENTS;
1152 logflags = 0;
1153 xfs_defer_init(&dfops, &firstblock);
1154 switch (ip->i_d.di_format) {
1155 case XFS_DINODE_FMT_LOCAL:
1156 error = xfs_bmap_add_attrfork_local(tp, ip, &firstblock, &dfops,
1157 &logflags);
1158 break;
1159 case XFS_DINODE_FMT_EXTENTS:
1160 error = xfs_bmap_add_attrfork_extents(tp, ip, &firstblock,
1161 &dfops, &logflags);
1162 break;
1163 case XFS_DINODE_FMT_BTREE:
1164 error = xfs_bmap_add_attrfork_btree(tp, ip, &firstblock, &dfops,
1165 &logflags);
1166 break;
1167 default:
1168 error = 0;
1169 break;
1171 if (logflags)
1172 xfs_trans_log_inode(tp, ip, logflags);
1173 if (error)
1174 goto bmap_cancel;
1175 if (!xfs_sb_version_hasattr(&mp->m_sb) ||
1176 (!xfs_sb_version_hasattr2(&mp->m_sb) && version == 2)) {
1177 bool log_sb = false;
1179 spin_lock(&mp->m_sb_lock);
1180 if (!xfs_sb_version_hasattr(&mp->m_sb)) {
1181 xfs_sb_version_addattr(&mp->m_sb);
1182 log_sb = true;
1184 if (!xfs_sb_version_hasattr2(&mp->m_sb) && version == 2) {
1185 xfs_sb_version_addattr2(&mp->m_sb);
1186 log_sb = true;
1188 spin_unlock(&mp->m_sb_lock);
1189 if (log_sb)
1190 xfs_log_sb(tp);
1193 error = xfs_defer_finish(&tp, &dfops);
1194 if (error)
1195 goto bmap_cancel;
1196 error = xfs_trans_commit(tp);
1197 xfs_iunlock(ip, XFS_ILOCK_EXCL);
1198 return error;
1200 bmap_cancel:
1201 xfs_defer_cancel(&dfops);
1202 trans_cancel:
1203 xfs_trans_cancel(tp);
1204 xfs_iunlock(ip, XFS_ILOCK_EXCL);
1205 return error;
1209 * Internal and external extent tree search functions.
1213 * Read in the extents to if_extents.
1214 * All inode fields are set up by caller, we just traverse the btree
1215 * and copy the records in. If the file system cannot contain unwritten
1216 * extents, the records are checked for no "state" flags.
1218 int /* error */
1219 xfs_bmap_read_extents(
1220 xfs_trans_t *tp, /* transaction pointer */
1221 xfs_inode_t *ip, /* incore inode */
1222 int whichfork) /* data or attr fork */
1224 struct xfs_btree_block *block; /* current btree block */
1225 xfs_fsblock_t bno; /* block # of "block" */
1226 xfs_buf_t *bp; /* buffer for "block" */
1227 int error; /* error return value */
1228 xfs_extnum_t i, j; /* index into the extents list */
1229 xfs_ifork_t *ifp; /* fork structure */
1230 int level; /* btree level, for checking */
1231 xfs_mount_t *mp; /* file system mount structure */
1232 __be64 *pp; /* pointer to block address */
1233 /* REFERENCED */
1234 xfs_extnum_t room; /* number of entries there's room for */
1236 mp = ip->i_mount;
1237 ifp = XFS_IFORK_PTR(ip, whichfork);
1238 block = ifp->if_broot;
1240 * Root level must use BMAP_BROOT_PTR_ADDR macro to get ptr out.
1242 level = be16_to_cpu(block->bb_level);
1243 ASSERT(level > 0);
1244 pp = XFS_BMAP_BROOT_PTR_ADDR(mp, block, 1, ifp->if_broot_bytes);
1245 bno = be64_to_cpu(*pp);
1248 * Go down the tree until leaf level is reached, following the first
1249 * pointer (leftmost) at each level.
1251 while (level-- > 0) {
1252 error = xfs_btree_read_bufl(mp, tp, bno, 0, &bp,
1253 XFS_BMAP_BTREE_REF, &xfs_bmbt_buf_ops);
1254 if (error)
1255 return error;
1256 block = XFS_BUF_TO_BLOCK(bp);
1257 if (level == 0)
1258 break;
1259 pp = XFS_BMBT_PTR_ADDR(mp, block, 1, mp->m_bmap_dmxr[1]);
1260 bno = be64_to_cpu(*pp);
1261 XFS_WANT_CORRUPTED_GOTO(mp,
1262 XFS_FSB_SANITY_CHECK(mp, bno), error0);
1263 xfs_trans_brelse(tp, bp);
1266 * Here with bp and block set to the leftmost leaf node in the tree.
1268 room = xfs_iext_count(ifp);
1269 i = 0;
1271 * Loop over all leaf nodes. Copy information to the extent records.
1273 for (;;) {
1274 xfs_bmbt_rec_t *frp;
1275 xfs_fsblock_t nextbno;
1276 xfs_extnum_t num_recs;
1278 num_recs = xfs_btree_get_numrecs(block);
1279 if (unlikely(i + num_recs > room)) {
1280 ASSERT(i + num_recs <= room);
1281 xfs_warn(ip->i_mount,
1282 "corrupt dinode %Lu, (btree extents).",
1283 (unsigned long long) ip->i_ino);
1284 XFS_CORRUPTION_ERROR("xfs_bmap_read_extents(1)",
1285 XFS_ERRLEVEL_LOW, ip->i_mount, block);
1286 goto error0;
1289 * Read-ahead the next leaf block, if any.
1291 nextbno = be64_to_cpu(block->bb_u.l.bb_rightsib);
1292 if (nextbno != NULLFSBLOCK)
1293 xfs_btree_reada_bufl(mp, nextbno, 1,
1294 &xfs_bmbt_buf_ops);
1296 * Copy records into the extent records.
1298 frp = XFS_BMBT_REC_ADDR(mp, block, 1);
1299 for (j = 0; j < num_recs; j++, i++, frp++) {
1300 xfs_bmbt_rec_host_t *trp = xfs_iext_get_ext(ifp, i);
1301 trp->l0 = be64_to_cpu(frp->l0);
1302 trp->l1 = be64_to_cpu(frp->l1);
1303 if (!xfs_bmbt_validate_extent(mp, whichfork, trp)) {
1304 XFS_ERROR_REPORT("xfs_bmap_read_extents(2)",
1305 XFS_ERRLEVEL_LOW, mp);
1306 goto error0;
1309 xfs_trans_brelse(tp, bp);
1310 bno = nextbno;
1312 * If we've reached the end, stop.
1314 if (bno == NULLFSBLOCK)
1315 break;
1316 error = xfs_btree_read_bufl(mp, tp, bno, 0, &bp,
1317 XFS_BMAP_BTREE_REF, &xfs_bmbt_buf_ops);
1318 if (error)
1319 return error;
1320 block = XFS_BUF_TO_BLOCK(bp);
1322 if (i != XFS_IFORK_NEXTENTS(ip, whichfork))
1323 return -EFSCORRUPTED;
1324 ASSERT(i == xfs_iext_count(ifp));
1325 XFS_BMAP_TRACE_EXLIST(ip, i, whichfork);
1326 return 0;
1327 error0:
1328 xfs_trans_brelse(tp, bp);
1329 return -EFSCORRUPTED;
1333 * Returns the file-relative block number of the first unused block(s)
1334 * in the file with at least "len" logically contiguous blocks free.
1335 * This is the lowest-address hole if the file has holes, else the first block
1336 * past the end of file.
1337 * Return 0 if the file is currently local (in-inode).
1339 int /* error */
1340 xfs_bmap_first_unused(
1341 xfs_trans_t *tp, /* transaction pointer */
1342 xfs_inode_t *ip, /* incore inode */
1343 xfs_extlen_t len, /* size of hole to find */
1344 xfs_fileoff_t *first_unused, /* unused block */
1345 int whichfork) /* data or attr fork */
1347 int error; /* error return value */
1348 int idx; /* extent record index */
1349 xfs_ifork_t *ifp; /* inode fork pointer */
1350 xfs_fileoff_t lastaddr; /* last block number seen */
1351 xfs_fileoff_t lowest; /* lowest useful block */
1352 xfs_fileoff_t max; /* starting useful block */
1353 xfs_extnum_t nextents; /* number of extent entries */
1355 ASSERT(XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_BTREE ||
1356 XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_EXTENTS ||
1357 XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_LOCAL);
1358 if (XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_LOCAL) {
1359 *first_unused = 0;
1360 return 0;
1362 ifp = XFS_IFORK_PTR(ip, whichfork);
1363 if (!(ifp->if_flags & XFS_IFEXTENTS) &&
1364 (error = xfs_iread_extents(tp, ip, whichfork)))
1365 return error;
1366 lowest = *first_unused;
1367 nextents = xfs_iext_count(ifp);
1368 for (idx = 0, lastaddr = 0, max = lowest; idx < nextents; idx++) {
1369 struct xfs_bmbt_irec got;
1371 xfs_iext_get_extent(ifp, idx, &got);
1374 * See if the hole before this extent will work.
1376 if (got.br_startoff >= lowest + len &&
1377 got.br_startoff - max >= len) {
1378 *first_unused = max;
1379 return 0;
1381 lastaddr = got.br_startoff + got.br_blockcount;
1382 max = XFS_FILEOFF_MAX(lastaddr, lowest);
1384 *first_unused = max;
1385 return 0;
1389 * Returns the file-relative block number of the last block - 1 before
1390 * last_block (input value) in the file.
1391 * This is not based on i_size, it is based on the extent records.
1392 * Returns 0 for local files, as they do not have extent records.
1394 int /* error */
1395 xfs_bmap_last_before(
1396 struct xfs_trans *tp, /* transaction pointer */
1397 struct xfs_inode *ip, /* incore inode */
1398 xfs_fileoff_t *last_block, /* last block */
1399 int whichfork) /* data or attr fork */
1401 struct xfs_ifork *ifp = XFS_IFORK_PTR(ip, whichfork);
1402 struct xfs_bmbt_irec got;
1403 xfs_extnum_t idx;
1404 int error;
1406 switch (XFS_IFORK_FORMAT(ip, whichfork)) {
1407 case XFS_DINODE_FMT_LOCAL:
1408 *last_block = 0;
1409 return 0;
1410 case XFS_DINODE_FMT_BTREE:
1411 case XFS_DINODE_FMT_EXTENTS:
1412 break;
1413 default:
1414 return -EIO;
1417 if (!(ifp->if_flags & XFS_IFEXTENTS)) {
1418 error = xfs_iread_extents(tp, ip, whichfork);
1419 if (error)
1420 return error;
1423 if (xfs_iext_lookup_extent(ip, ifp, *last_block - 1, &idx, &got)) {
1424 if (got.br_startoff <= *last_block - 1)
1425 return 0;
1428 if (xfs_iext_get_extent(ifp, idx - 1, &got)) {
1429 *last_block = got.br_startoff + got.br_blockcount;
1430 return 0;
1433 *last_block = 0;
1434 return 0;
1438 xfs_bmap_last_extent(
1439 struct xfs_trans *tp,
1440 struct xfs_inode *ip,
1441 int whichfork,
1442 struct xfs_bmbt_irec *rec,
1443 int *is_empty)
1445 struct xfs_ifork *ifp = XFS_IFORK_PTR(ip, whichfork);
1446 int error;
1447 int nextents;
1449 if (!(ifp->if_flags & XFS_IFEXTENTS)) {
1450 error = xfs_iread_extents(tp, ip, whichfork);
1451 if (error)
1452 return error;
1455 nextents = xfs_iext_count(ifp);
1456 if (nextents == 0) {
1457 *is_empty = 1;
1458 return 0;
1461 xfs_bmbt_get_all(xfs_iext_get_ext(ifp, nextents - 1), rec);
1462 *is_empty = 0;
1463 return 0;
1467 * Check the last inode extent to determine whether this allocation will result
1468 * in blocks being allocated at the end of the file. When we allocate new data
1469 * blocks at the end of the file which do not start at the previous data block,
1470 * we will try to align the new blocks at stripe unit boundaries.
1472 * Returns 1 in bma->aeof if the file (fork) is empty as any new write will be
1473 * at, or past the EOF.
1475 STATIC int
1476 xfs_bmap_isaeof(
1477 struct xfs_bmalloca *bma,
1478 int whichfork)
1480 struct xfs_bmbt_irec rec;
1481 int is_empty;
1482 int error;
1484 bma->aeof = false;
1485 error = xfs_bmap_last_extent(NULL, bma->ip, whichfork, &rec,
1486 &is_empty);
1487 if (error)
1488 return error;
1490 if (is_empty) {
1491 bma->aeof = true;
1492 return 0;
1496 * Check if we are allocation or past the last extent, or at least into
1497 * the last delayed allocated extent.
1499 bma->aeof = bma->offset >= rec.br_startoff + rec.br_blockcount ||
1500 (bma->offset >= rec.br_startoff &&
1501 isnullstartblock(rec.br_startblock));
1502 return 0;
1506 * Returns the file-relative block number of the first block past eof in
1507 * the file. This is not based on i_size, it is based on the extent records.
1508 * Returns 0 for local files, as they do not have extent records.
1511 xfs_bmap_last_offset(
1512 struct xfs_inode *ip,
1513 xfs_fileoff_t *last_block,
1514 int whichfork)
1516 struct xfs_bmbt_irec rec;
1517 int is_empty;
1518 int error;
1520 *last_block = 0;
1522 if (XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_LOCAL)
1523 return 0;
1525 if (XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_BTREE &&
1526 XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_EXTENTS)
1527 return -EIO;
1529 error = xfs_bmap_last_extent(NULL, ip, whichfork, &rec, &is_empty);
1530 if (error || is_empty)
1531 return error;
1533 *last_block = rec.br_startoff + rec.br_blockcount;
1534 return 0;
1538 * Returns whether the selected fork of the inode has exactly one
1539 * block or not. For the data fork we check this matches di_size,
1540 * implying the file's range is 0..bsize-1.
1542 int /* 1=>1 block, 0=>otherwise */
1543 xfs_bmap_one_block(
1544 xfs_inode_t *ip, /* incore inode */
1545 int whichfork) /* data or attr fork */
1547 xfs_bmbt_rec_host_t *ep; /* ptr to fork's extent */
1548 xfs_ifork_t *ifp; /* inode fork pointer */
1549 int rval; /* return value */
1550 xfs_bmbt_irec_t s; /* internal version of extent */
1552 #ifndef DEBUG
1553 if (whichfork == XFS_DATA_FORK)
1554 return XFS_ISIZE(ip) == ip->i_mount->m_sb.sb_blocksize;
1555 #endif /* !DEBUG */
1556 if (XFS_IFORK_NEXTENTS(ip, whichfork) != 1)
1557 return 0;
1558 if (XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_EXTENTS)
1559 return 0;
1560 ifp = XFS_IFORK_PTR(ip, whichfork);
1561 ASSERT(ifp->if_flags & XFS_IFEXTENTS);
1562 ep = xfs_iext_get_ext(ifp, 0);
1563 xfs_bmbt_get_all(ep, &s);
1564 rval = s.br_startoff == 0 && s.br_blockcount == 1;
1565 if (rval && whichfork == XFS_DATA_FORK)
1566 ASSERT(XFS_ISIZE(ip) == ip->i_mount->m_sb.sb_blocksize);
1567 return rval;
1571 * Extent tree manipulation functions used during allocation.
1575 * Convert a delayed allocation to a real allocation.
1577 STATIC int /* error */
1578 xfs_bmap_add_extent_delay_real(
1579 struct xfs_bmalloca *bma,
1580 int whichfork)
1582 struct xfs_bmbt_irec *new = &bma->got;
1583 int diff; /* temp value */
1584 xfs_bmbt_rec_host_t *ep; /* extent entry for idx */
1585 int error; /* error return value */
1586 int i; /* temp state */
1587 xfs_ifork_t *ifp; /* inode fork pointer */
1588 xfs_fileoff_t new_endoff; /* end offset of new entry */
1589 xfs_bmbt_irec_t r[3]; /* neighbor extent entries */
1590 /* left is 0, right is 1, prev is 2 */
1591 int rval=0; /* return value (logging flags) */
1592 int state = 0;/* state bits, accessed thru macros */
1593 xfs_filblks_t da_new; /* new count del alloc blocks used */
1594 xfs_filblks_t da_old; /* old count del alloc blocks used */
1595 xfs_filblks_t temp=0; /* value for da_new calculations */
1596 xfs_filblks_t temp2=0;/* value for da_new calculations */
1597 int tmp_rval; /* partial logging flags */
1598 struct xfs_mount *mp;
1599 xfs_extnum_t *nextents;
1601 mp = bma->ip->i_mount;
1602 ifp = XFS_IFORK_PTR(bma->ip, whichfork);
1603 ASSERT(whichfork != XFS_ATTR_FORK);
1604 nextents = (whichfork == XFS_COW_FORK ? &bma->ip->i_cnextents :
1605 &bma->ip->i_d.di_nextents);
1607 ASSERT(bma->idx >= 0);
1608 ASSERT(bma->idx <= xfs_iext_count(ifp));
1609 ASSERT(!isnullstartblock(new->br_startblock));
1610 ASSERT(!bma->cur ||
1611 (bma->cur->bc_private.b.flags & XFS_BTCUR_BPRV_WASDEL));
1613 XFS_STATS_INC(mp, xs_add_exlist);
1615 #define LEFT r[0]
1616 #define RIGHT r[1]
1617 #define PREV r[2]
1619 if (whichfork == XFS_COW_FORK)
1620 state |= BMAP_COWFORK;
1623 * Set up a bunch of variables to make the tests simpler.
1625 ep = xfs_iext_get_ext(ifp, bma->idx);
1626 xfs_bmbt_get_all(ep, &PREV);
1627 new_endoff = new->br_startoff + new->br_blockcount;
1628 ASSERT(PREV.br_startoff <= new->br_startoff);
1629 ASSERT(PREV.br_startoff + PREV.br_blockcount >= new_endoff);
1631 da_old = startblockval(PREV.br_startblock);
1632 da_new = 0;
1635 * Set flags determining what part of the previous delayed allocation
1636 * extent is being replaced by a real allocation.
1638 if (PREV.br_startoff == new->br_startoff)
1639 state |= BMAP_LEFT_FILLING;
1640 if (PREV.br_startoff + PREV.br_blockcount == new_endoff)
1641 state |= BMAP_RIGHT_FILLING;
1644 * Check and set flags if this segment has a left neighbor.
1645 * Don't set contiguous if the combined extent would be too large.
1647 if (bma->idx > 0) {
1648 state |= BMAP_LEFT_VALID;
1649 xfs_bmbt_get_all(xfs_iext_get_ext(ifp, bma->idx - 1), &LEFT);
1651 if (isnullstartblock(LEFT.br_startblock))
1652 state |= BMAP_LEFT_DELAY;
1655 if ((state & BMAP_LEFT_VALID) && !(state & BMAP_LEFT_DELAY) &&
1656 LEFT.br_startoff + LEFT.br_blockcount == new->br_startoff &&
1657 LEFT.br_startblock + LEFT.br_blockcount == new->br_startblock &&
1658 LEFT.br_state == new->br_state &&
1659 LEFT.br_blockcount + new->br_blockcount <= MAXEXTLEN)
1660 state |= BMAP_LEFT_CONTIG;
1663 * Check and set flags if this segment has a right neighbor.
1664 * Don't set contiguous if the combined extent would be too large.
1665 * Also check for all-three-contiguous being too large.
1667 if (bma->idx < xfs_iext_count(ifp) - 1) {
1668 state |= BMAP_RIGHT_VALID;
1669 xfs_bmbt_get_all(xfs_iext_get_ext(ifp, bma->idx + 1), &RIGHT);
1671 if (isnullstartblock(RIGHT.br_startblock))
1672 state |= BMAP_RIGHT_DELAY;
1675 if ((state & BMAP_RIGHT_VALID) && !(state & BMAP_RIGHT_DELAY) &&
1676 new_endoff == RIGHT.br_startoff &&
1677 new->br_startblock + new->br_blockcount == RIGHT.br_startblock &&
1678 new->br_state == RIGHT.br_state &&
1679 new->br_blockcount + RIGHT.br_blockcount <= MAXEXTLEN &&
1680 ((state & (BMAP_LEFT_CONTIG | BMAP_LEFT_FILLING |
1681 BMAP_RIGHT_FILLING)) !=
1682 (BMAP_LEFT_CONTIG | BMAP_LEFT_FILLING |
1683 BMAP_RIGHT_FILLING) ||
1684 LEFT.br_blockcount + new->br_blockcount + RIGHT.br_blockcount
1685 <= MAXEXTLEN))
1686 state |= BMAP_RIGHT_CONTIG;
1688 error = 0;
1690 * Switch out based on the FILLING and CONTIG state bits.
1692 switch (state & (BMAP_LEFT_FILLING | BMAP_LEFT_CONTIG |
1693 BMAP_RIGHT_FILLING | BMAP_RIGHT_CONTIG)) {
1694 case BMAP_LEFT_FILLING | BMAP_LEFT_CONTIG |
1695 BMAP_RIGHT_FILLING | BMAP_RIGHT_CONTIG:
1697 * Filling in all of a previously delayed allocation extent.
1698 * The left and right neighbors are both contiguous with new.
1700 bma->idx--;
1701 trace_xfs_bmap_pre_update(bma->ip, bma->idx, state, _THIS_IP_);
1702 xfs_bmbt_set_blockcount(xfs_iext_get_ext(ifp, bma->idx),
1703 LEFT.br_blockcount + PREV.br_blockcount +
1704 RIGHT.br_blockcount);
1705 trace_xfs_bmap_post_update(bma->ip, bma->idx, state, _THIS_IP_);
1707 xfs_iext_remove(bma->ip, bma->idx + 1, 2, state);
1708 (*nextents)--;
1709 if (bma->cur == NULL)
1710 rval = XFS_ILOG_CORE | XFS_ILOG_DEXT;
1711 else {
1712 rval = XFS_ILOG_CORE;
1713 error = xfs_bmbt_lookup_eq(bma->cur, RIGHT.br_startoff,
1714 RIGHT.br_startblock,
1715 RIGHT.br_blockcount, &i);
1716 if (error)
1717 goto done;
1718 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
1719 error = xfs_btree_delete(bma->cur, &i);
1720 if (error)
1721 goto done;
1722 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
1723 error = xfs_btree_decrement(bma->cur, 0, &i);
1724 if (error)
1725 goto done;
1726 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
1727 error = xfs_bmbt_update(bma->cur, LEFT.br_startoff,
1728 LEFT.br_startblock,
1729 LEFT.br_blockcount +
1730 PREV.br_blockcount +
1731 RIGHT.br_blockcount, LEFT.br_state);
1732 if (error)
1733 goto done;
1735 break;
1737 case BMAP_LEFT_FILLING | BMAP_RIGHT_FILLING | BMAP_LEFT_CONTIG:
1739 * Filling in all of a previously delayed allocation extent.
1740 * The left neighbor is contiguous, the right is not.
1742 bma->idx--;
1744 trace_xfs_bmap_pre_update(bma->ip, bma->idx, state, _THIS_IP_);
1745 xfs_bmbt_set_blockcount(xfs_iext_get_ext(ifp, bma->idx),
1746 LEFT.br_blockcount + PREV.br_blockcount);
1747 trace_xfs_bmap_post_update(bma->ip, bma->idx, state, _THIS_IP_);
1749 xfs_iext_remove(bma->ip, bma->idx + 1, 1, state);
1750 if (bma->cur == NULL)
1751 rval = XFS_ILOG_DEXT;
1752 else {
1753 rval = 0;
1754 error = xfs_bmbt_lookup_eq(bma->cur, LEFT.br_startoff,
1755 LEFT.br_startblock, LEFT.br_blockcount,
1756 &i);
1757 if (error)
1758 goto done;
1759 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
1760 error = xfs_bmbt_update(bma->cur, LEFT.br_startoff,
1761 LEFT.br_startblock,
1762 LEFT.br_blockcount +
1763 PREV.br_blockcount, LEFT.br_state);
1764 if (error)
1765 goto done;
1767 break;
1769 case BMAP_LEFT_FILLING | BMAP_RIGHT_FILLING | BMAP_RIGHT_CONTIG:
1771 * Filling in all of a previously delayed allocation extent.
1772 * The right neighbor is contiguous, the left is not.
1774 trace_xfs_bmap_pre_update(bma->ip, bma->idx, state, _THIS_IP_);
1775 xfs_bmbt_set_startblock(ep, new->br_startblock);
1776 xfs_bmbt_set_blockcount(ep,
1777 PREV.br_blockcount + RIGHT.br_blockcount);
1778 trace_xfs_bmap_post_update(bma->ip, bma->idx, state, _THIS_IP_);
1780 xfs_iext_remove(bma->ip, bma->idx + 1, 1, state);
1781 if (bma->cur == NULL)
1782 rval = XFS_ILOG_DEXT;
1783 else {
1784 rval = 0;
1785 error = xfs_bmbt_lookup_eq(bma->cur, RIGHT.br_startoff,
1786 RIGHT.br_startblock,
1787 RIGHT.br_blockcount, &i);
1788 if (error)
1789 goto done;
1790 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
1791 error = xfs_bmbt_update(bma->cur, PREV.br_startoff,
1792 new->br_startblock,
1793 PREV.br_blockcount +
1794 RIGHT.br_blockcount, PREV.br_state);
1795 if (error)
1796 goto done;
1798 break;
1800 case BMAP_LEFT_FILLING | BMAP_RIGHT_FILLING:
1802 * Filling in all of a previously delayed allocation extent.
1803 * Neither the left nor right neighbors are contiguous with
1804 * the new one.
1806 trace_xfs_bmap_pre_update(bma->ip, bma->idx, state, _THIS_IP_);
1807 xfs_bmbt_set_startblock(ep, new->br_startblock);
1808 xfs_bmbt_set_state(ep, new->br_state);
1809 trace_xfs_bmap_post_update(bma->ip, bma->idx, state, _THIS_IP_);
1811 (*nextents)++;
1812 if (bma->cur == NULL)
1813 rval = XFS_ILOG_CORE | XFS_ILOG_DEXT;
1814 else {
1815 rval = XFS_ILOG_CORE;
1816 error = xfs_bmbt_lookup_eq(bma->cur, new->br_startoff,
1817 new->br_startblock, new->br_blockcount,
1818 &i);
1819 if (error)
1820 goto done;
1821 XFS_WANT_CORRUPTED_GOTO(mp, i == 0, done);
1822 bma->cur->bc_rec.b.br_state = XFS_EXT_NORM;
1823 error = xfs_btree_insert(bma->cur, &i);
1824 if (error)
1825 goto done;
1826 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
1828 break;
1830 case BMAP_LEFT_FILLING | BMAP_LEFT_CONTIG:
1832 * Filling in the first part of a previous delayed allocation.
1833 * The left neighbor is contiguous.
1835 trace_xfs_bmap_pre_update(bma->ip, bma->idx - 1, state, _THIS_IP_);
1836 xfs_bmbt_set_blockcount(xfs_iext_get_ext(ifp, bma->idx - 1),
1837 LEFT.br_blockcount + new->br_blockcount);
1838 xfs_bmbt_set_startoff(ep,
1839 PREV.br_startoff + new->br_blockcount);
1840 trace_xfs_bmap_post_update(bma->ip, bma->idx - 1, state, _THIS_IP_);
1842 temp = PREV.br_blockcount - new->br_blockcount;
1843 trace_xfs_bmap_pre_update(bma->ip, bma->idx, state, _THIS_IP_);
1844 xfs_bmbt_set_blockcount(ep, temp);
1845 if (bma->cur == NULL)
1846 rval = XFS_ILOG_DEXT;
1847 else {
1848 rval = 0;
1849 error = xfs_bmbt_lookup_eq(bma->cur, LEFT.br_startoff,
1850 LEFT.br_startblock, LEFT.br_blockcount,
1851 &i);
1852 if (error)
1853 goto done;
1854 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
1855 error = xfs_bmbt_update(bma->cur, LEFT.br_startoff,
1856 LEFT.br_startblock,
1857 LEFT.br_blockcount +
1858 new->br_blockcount,
1859 LEFT.br_state);
1860 if (error)
1861 goto done;
1863 da_new = XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(bma->ip, temp),
1864 startblockval(PREV.br_startblock));
1865 xfs_bmbt_set_startblock(ep, nullstartblock(da_new));
1866 trace_xfs_bmap_post_update(bma->ip, bma->idx, state, _THIS_IP_);
1868 bma->idx--;
1869 break;
1871 case BMAP_LEFT_FILLING:
1873 * Filling in the first part of a previous delayed allocation.
1874 * The left neighbor is not contiguous.
1876 trace_xfs_bmap_pre_update(bma->ip, bma->idx, state, _THIS_IP_);
1877 xfs_bmbt_set_startoff(ep, new_endoff);
1878 temp = PREV.br_blockcount - new->br_blockcount;
1879 xfs_bmbt_set_blockcount(ep, temp);
1880 xfs_iext_insert(bma->ip, bma->idx, 1, new, state);
1881 (*nextents)++;
1882 if (bma->cur == NULL)
1883 rval = XFS_ILOG_CORE | XFS_ILOG_DEXT;
1884 else {
1885 rval = XFS_ILOG_CORE;
1886 error = xfs_bmbt_lookup_eq(bma->cur, new->br_startoff,
1887 new->br_startblock, new->br_blockcount,
1888 &i);
1889 if (error)
1890 goto done;
1891 XFS_WANT_CORRUPTED_GOTO(mp, i == 0, done);
1892 bma->cur->bc_rec.b.br_state = XFS_EXT_NORM;
1893 error = xfs_btree_insert(bma->cur, &i);
1894 if (error)
1895 goto done;
1896 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
1899 if (xfs_bmap_needs_btree(bma->ip, whichfork)) {
1900 error = xfs_bmap_extents_to_btree(bma->tp, bma->ip,
1901 bma->firstblock, bma->dfops,
1902 &bma->cur, 1, &tmp_rval, whichfork);
1903 rval |= tmp_rval;
1904 if (error)
1905 goto done;
1907 da_new = XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(bma->ip, temp),
1908 startblockval(PREV.br_startblock) -
1909 (bma->cur ? bma->cur->bc_private.b.allocated : 0));
1910 ep = xfs_iext_get_ext(ifp, bma->idx + 1);
1911 xfs_bmbt_set_startblock(ep, nullstartblock(da_new));
1912 trace_xfs_bmap_post_update(bma->ip, bma->idx + 1, state, _THIS_IP_);
1913 break;
1915 case BMAP_RIGHT_FILLING | BMAP_RIGHT_CONTIG:
1917 * Filling in the last part of a previous delayed allocation.
1918 * The right neighbor is contiguous with the new allocation.
1920 temp = PREV.br_blockcount - new->br_blockcount;
1921 trace_xfs_bmap_pre_update(bma->ip, bma->idx + 1, state, _THIS_IP_);
1922 xfs_bmbt_set_blockcount(ep, temp);
1923 xfs_bmbt_set_allf(xfs_iext_get_ext(ifp, bma->idx + 1),
1924 new->br_startoff, new->br_startblock,
1925 new->br_blockcount + RIGHT.br_blockcount,
1926 RIGHT.br_state);
1927 trace_xfs_bmap_post_update(bma->ip, bma->idx + 1, state, _THIS_IP_);
1928 if (bma->cur == NULL)
1929 rval = XFS_ILOG_DEXT;
1930 else {
1931 rval = 0;
1932 error = xfs_bmbt_lookup_eq(bma->cur, RIGHT.br_startoff,
1933 RIGHT.br_startblock,
1934 RIGHT.br_blockcount, &i);
1935 if (error)
1936 goto done;
1937 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
1938 error = xfs_bmbt_update(bma->cur, new->br_startoff,
1939 new->br_startblock,
1940 new->br_blockcount +
1941 RIGHT.br_blockcount,
1942 RIGHT.br_state);
1943 if (error)
1944 goto done;
1947 da_new = XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(bma->ip, temp),
1948 startblockval(PREV.br_startblock));
1949 trace_xfs_bmap_pre_update(bma->ip, bma->idx, state, _THIS_IP_);
1950 xfs_bmbt_set_startblock(ep, nullstartblock(da_new));
1951 trace_xfs_bmap_post_update(bma->ip, bma->idx, state, _THIS_IP_);
1953 bma->idx++;
1954 break;
1956 case BMAP_RIGHT_FILLING:
1958 * Filling in the last part of a previous delayed allocation.
1959 * The right neighbor is not contiguous.
1961 temp = PREV.br_blockcount - new->br_blockcount;
1962 trace_xfs_bmap_pre_update(bma->ip, bma->idx, state, _THIS_IP_);
1963 xfs_bmbt_set_blockcount(ep, temp);
1964 xfs_iext_insert(bma->ip, bma->idx + 1, 1, new, state);
1965 (*nextents)++;
1966 if (bma->cur == NULL)
1967 rval = XFS_ILOG_CORE | XFS_ILOG_DEXT;
1968 else {
1969 rval = XFS_ILOG_CORE;
1970 error = xfs_bmbt_lookup_eq(bma->cur, new->br_startoff,
1971 new->br_startblock, new->br_blockcount,
1972 &i);
1973 if (error)
1974 goto done;
1975 XFS_WANT_CORRUPTED_GOTO(mp, i == 0, done);
1976 bma->cur->bc_rec.b.br_state = XFS_EXT_NORM;
1977 error = xfs_btree_insert(bma->cur, &i);
1978 if (error)
1979 goto done;
1980 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
1983 if (xfs_bmap_needs_btree(bma->ip, whichfork)) {
1984 error = xfs_bmap_extents_to_btree(bma->tp, bma->ip,
1985 bma->firstblock, bma->dfops, &bma->cur, 1,
1986 &tmp_rval, whichfork);
1987 rval |= tmp_rval;
1988 if (error)
1989 goto done;
1991 da_new = XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(bma->ip, temp),
1992 startblockval(PREV.br_startblock) -
1993 (bma->cur ? bma->cur->bc_private.b.allocated : 0));
1994 ep = xfs_iext_get_ext(ifp, bma->idx);
1995 xfs_bmbt_set_startblock(ep, nullstartblock(da_new));
1996 trace_xfs_bmap_post_update(bma->ip, bma->idx, state, _THIS_IP_);
1998 bma->idx++;
1999 break;
2001 case 0:
2003 * Filling in the middle part of a previous delayed allocation.
2004 * Contiguity is impossible here.
2005 * This case is avoided almost all the time.
2007 * We start with a delayed allocation:
2009 * +ddddddddddddddddddddddddddddddddddddddddddddddddddddddd+
2010 * PREV @ idx
2012 * and we are allocating:
2013 * +rrrrrrrrrrrrrrrrr+
2014 * new
2016 * and we set it up for insertion as:
2017 * +ddddddddddddddddddd+rrrrrrrrrrrrrrrrr+ddddddddddddddddd+
2018 * new
2019 * PREV @ idx LEFT RIGHT
2020 * inserted at idx + 1
2022 temp = new->br_startoff - PREV.br_startoff;
2023 temp2 = PREV.br_startoff + PREV.br_blockcount - new_endoff;
2024 trace_xfs_bmap_pre_update(bma->ip, bma->idx, 0, _THIS_IP_);
2025 xfs_bmbt_set_blockcount(ep, temp); /* truncate PREV */
2026 LEFT = *new;
2027 RIGHT.br_state = PREV.br_state;
2028 RIGHT.br_startblock = nullstartblock(
2029 (int)xfs_bmap_worst_indlen(bma->ip, temp2));
2030 RIGHT.br_startoff = new_endoff;
2031 RIGHT.br_blockcount = temp2;
2032 /* insert LEFT (r[0]) and RIGHT (r[1]) at the same time */
2033 xfs_iext_insert(bma->ip, bma->idx + 1, 2, &LEFT, state);
2034 (*nextents)++;
2035 if (bma->cur == NULL)
2036 rval = XFS_ILOG_CORE | XFS_ILOG_DEXT;
2037 else {
2038 rval = XFS_ILOG_CORE;
2039 error = xfs_bmbt_lookup_eq(bma->cur, new->br_startoff,
2040 new->br_startblock, new->br_blockcount,
2041 &i);
2042 if (error)
2043 goto done;
2044 XFS_WANT_CORRUPTED_GOTO(mp, i == 0, done);
2045 bma->cur->bc_rec.b.br_state = XFS_EXT_NORM;
2046 error = xfs_btree_insert(bma->cur, &i);
2047 if (error)
2048 goto done;
2049 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
2052 if (xfs_bmap_needs_btree(bma->ip, whichfork)) {
2053 error = xfs_bmap_extents_to_btree(bma->tp, bma->ip,
2054 bma->firstblock, bma->dfops, &bma->cur,
2055 1, &tmp_rval, whichfork);
2056 rval |= tmp_rval;
2057 if (error)
2058 goto done;
2060 temp = xfs_bmap_worst_indlen(bma->ip, temp);
2061 temp2 = xfs_bmap_worst_indlen(bma->ip, temp2);
2062 diff = (int)(temp + temp2 -
2063 (startblockval(PREV.br_startblock) -
2064 (bma->cur ?
2065 bma->cur->bc_private.b.allocated : 0)));
2066 if (diff > 0) {
2067 error = xfs_mod_fdblocks(bma->ip->i_mount,
2068 -((int64_t)diff), false);
2069 ASSERT(!error);
2070 if (error)
2071 goto done;
2074 ep = xfs_iext_get_ext(ifp, bma->idx);
2075 xfs_bmbt_set_startblock(ep, nullstartblock((int)temp));
2076 trace_xfs_bmap_post_update(bma->ip, bma->idx, state, _THIS_IP_);
2077 trace_xfs_bmap_pre_update(bma->ip, bma->idx + 2, state, _THIS_IP_);
2078 xfs_bmbt_set_startblock(xfs_iext_get_ext(ifp, bma->idx + 2),
2079 nullstartblock((int)temp2));
2080 trace_xfs_bmap_post_update(bma->ip, bma->idx + 2, state, _THIS_IP_);
2082 bma->idx++;
2083 da_new = temp + temp2;
2084 break;
2086 case BMAP_LEFT_FILLING | BMAP_LEFT_CONTIG | BMAP_RIGHT_CONTIG:
2087 case BMAP_RIGHT_FILLING | BMAP_LEFT_CONTIG | BMAP_RIGHT_CONTIG:
2088 case BMAP_LEFT_FILLING | BMAP_RIGHT_CONTIG:
2089 case BMAP_RIGHT_FILLING | BMAP_LEFT_CONTIG:
2090 case BMAP_LEFT_CONTIG | BMAP_RIGHT_CONTIG:
2091 case BMAP_LEFT_CONTIG:
2092 case BMAP_RIGHT_CONTIG:
2094 * These cases are all impossible.
2096 ASSERT(0);
2099 /* add reverse mapping */
2100 error = xfs_rmap_map_extent(mp, bma->dfops, bma->ip, whichfork, new);
2101 if (error)
2102 goto done;
2104 /* convert to a btree if necessary */
2105 if (xfs_bmap_needs_btree(bma->ip, whichfork)) {
2106 int tmp_logflags; /* partial log flag return val */
2108 ASSERT(bma->cur == NULL);
2109 error = xfs_bmap_extents_to_btree(bma->tp, bma->ip,
2110 bma->firstblock, bma->dfops, &bma->cur,
2111 da_old > 0, &tmp_logflags, whichfork);
2112 bma->logflags |= tmp_logflags;
2113 if (error)
2114 goto done;
2117 /* adjust for changes in reserved delayed indirect blocks */
2118 if (da_old || da_new) {
2119 temp = da_new;
2120 if (bma->cur)
2121 temp += bma->cur->bc_private.b.allocated;
2122 if (temp < da_old)
2123 xfs_mod_fdblocks(bma->ip->i_mount,
2124 (int64_t)(da_old - temp), false);
2127 /* clear out the allocated field, done with it now in any case. */
2128 if (bma->cur)
2129 bma->cur->bc_private.b.allocated = 0;
2131 xfs_bmap_check_leaf_extents(bma->cur, bma->ip, whichfork);
2132 done:
2133 if (whichfork != XFS_COW_FORK)
2134 bma->logflags |= rval;
2135 return error;
2136 #undef LEFT
2137 #undef RIGHT
2138 #undef PREV
2142 * Convert an unwritten allocation to a real allocation or vice versa.
2144 STATIC int /* error */
2145 xfs_bmap_add_extent_unwritten_real(
2146 struct xfs_trans *tp,
2147 xfs_inode_t *ip, /* incore inode pointer */
2148 int whichfork,
2149 xfs_extnum_t *idx, /* extent number to update/insert */
2150 xfs_btree_cur_t **curp, /* if *curp is null, not a btree */
2151 xfs_bmbt_irec_t *new, /* new data to add to file extents */
2152 xfs_fsblock_t *first, /* pointer to firstblock variable */
2153 struct xfs_defer_ops *dfops, /* list of extents to be freed */
2154 int *logflagsp) /* inode logging flags */
2156 xfs_btree_cur_t *cur; /* btree cursor */
2157 xfs_bmbt_rec_host_t *ep; /* extent entry for idx */
2158 int error; /* error return value */
2159 int i; /* temp state */
2160 xfs_ifork_t *ifp; /* inode fork pointer */
2161 xfs_fileoff_t new_endoff; /* end offset of new entry */
2162 xfs_exntst_t newext; /* new extent state */
2163 xfs_exntst_t oldext; /* old extent state */
2164 xfs_bmbt_irec_t r[3]; /* neighbor extent entries */
2165 /* left is 0, right is 1, prev is 2 */
2166 int rval=0; /* return value (logging flags) */
2167 int state = 0;/* state bits, accessed thru macros */
2168 struct xfs_mount *mp = ip->i_mount;
2170 *logflagsp = 0;
2172 cur = *curp;
2173 ifp = XFS_IFORK_PTR(ip, whichfork);
2174 if (whichfork == XFS_COW_FORK)
2175 state |= BMAP_COWFORK;
2177 ASSERT(*idx >= 0);
2178 ASSERT(*idx <= xfs_iext_count(ifp));
2179 ASSERT(!isnullstartblock(new->br_startblock));
2181 XFS_STATS_INC(mp, xs_add_exlist);
2183 #define LEFT r[0]
2184 #define RIGHT r[1]
2185 #define PREV r[2]
2188 * Set up a bunch of variables to make the tests simpler.
2190 error = 0;
2191 ep = xfs_iext_get_ext(ifp, *idx);
2192 xfs_bmbt_get_all(ep, &PREV);
2193 newext = new->br_state;
2194 oldext = (newext == XFS_EXT_UNWRITTEN) ?
2195 XFS_EXT_NORM : XFS_EXT_UNWRITTEN;
2196 ASSERT(PREV.br_state == oldext);
2197 new_endoff = new->br_startoff + new->br_blockcount;
2198 ASSERT(PREV.br_startoff <= new->br_startoff);
2199 ASSERT(PREV.br_startoff + PREV.br_blockcount >= new_endoff);
2202 * Set flags determining what part of the previous oldext allocation
2203 * extent is being replaced by a newext allocation.
2205 if (PREV.br_startoff == new->br_startoff)
2206 state |= BMAP_LEFT_FILLING;
2207 if (PREV.br_startoff + PREV.br_blockcount == new_endoff)
2208 state |= BMAP_RIGHT_FILLING;
2211 * Check and set flags if this segment has a left neighbor.
2212 * Don't set contiguous if the combined extent would be too large.
2214 if (*idx > 0) {
2215 state |= BMAP_LEFT_VALID;
2216 xfs_bmbt_get_all(xfs_iext_get_ext(ifp, *idx - 1), &LEFT);
2218 if (isnullstartblock(LEFT.br_startblock))
2219 state |= BMAP_LEFT_DELAY;
2222 if ((state & BMAP_LEFT_VALID) && !(state & BMAP_LEFT_DELAY) &&
2223 LEFT.br_startoff + LEFT.br_blockcount == new->br_startoff &&
2224 LEFT.br_startblock + LEFT.br_blockcount == new->br_startblock &&
2225 LEFT.br_state == newext &&
2226 LEFT.br_blockcount + new->br_blockcount <= MAXEXTLEN)
2227 state |= BMAP_LEFT_CONTIG;
2230 * Check and set flags if this segment has a right neighbor.
2231 * Don't set contiguous if the combined extent would be too large.
2232 * Also check for all-three-contiguous being too large.
2234 if (*idx < xfs_iext_count(ifp) - 1) {
2235 state |= BMAP_RIGHT_VALID;
2236 xfs_bmbt_get_all(xfs_iext_get_ext(ifp, *idx + 1), &RIGHT);
2237 if (isnullstartblock(RIGHT.br_startblock))
2238 state |= BMAP_RIGHT_DELAY;
2241 if ((state & BMAP_RIGHT_VALID) && !(state & BMAP_RIGHT_DELAY) &&
2242 new_endoff == RIGHT.br_startoff &&
2243 new->br_startblock + new->br_blockcount == RIGHT.br_startblock &&
2244 newext == RIGHT.br_state &&
2245 new->br_blockcount + RIGHT.br_blockcount <= MAXEXTLEN &&
2246 ((state & (BMAP_LEFT_CONTIG | BMAP_LEFT_FILLING |
2247 BMAP_RIGHT_FILLING)) !=
2248 (BMAP_LEFT_CONTIG | BMAP_LEFT_FILLING |
2249 BMAP_RIGHT_FILLING) ||
2250 LEFT.br_blockcount + new->br_blockcount + RIGHT.br_blockcount
2251 <= MAXEXTLEN))
2252 state |= BMAP_RIGHT_CONTIG;
2255 * Switch out based on the FILLING and CONTIG state bits.
2257 switch (state & (BMAP_LEFT_FILLING | BMAP_LEFT_CONTIG |
2258 BMAP_RIGHT_FILLING | BMAP_RIGHT_CONTIG)) {
2259 case BMAP_LEFT_FILLING | BMAP_LEFT_CONTIG |
2260 BMAP_RIGHT_FILLING | BMAP_RIGHT_CONTIG:
2262 * Setting all of a previous oldext extent to newext.
2263 * The left and right neighbors are both contiguous with new.
2265 --*idx;
2267 trace_xfs_bmap_pre_update(ip, *idx, state, _THIS_IP_);
2268 xfs_bmbt_set_blockcount(xfs_iext_get_ext(ifp, *idx),
2269 LEFT.br_blockcount + PREV.br_blockcount +
2270 RIGHT.br_blockcount);
2271 trace_xfs_bmap_post_update(ip, *idx, state, _THIS_IP_);
2273 xfs_iext_remove(ip, *idx + 1, 2, state);
2274 XFS_IFORK_NEXT_SET(ip, whichfork,
2275 XFS_IFORK_NEXTENTS(ip, whichfork) - 2);
2276 if (cur == NULL)
2277 rval = XFS_ILOG_CORE | XFS_ILOG_DEXT;
2278 else {
2279 rval = XFS_ILOG_CORE;
2280 if ((error = xfs_bmbt_lookup_eq(cur, RIGHT.br_startoff,
2281 RIGHT.br_startblock,
2282 RIGHT.br_blockcount, &i)))
2283 goto done;
2284 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
2285 if ((error = xfs_btree_delete(cur, &i)))
2286 goto done;
2287 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
2288 if ((error = xfs_btree_decrement(cur, 0, &i)))
2289 goto done;
2290 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
2291 if ((error = xfs_btree_delete(cur, &i)))
2292 goto done;
2293 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
2294 if ((error = xfs_btree_decrement(cur, 0, &i)))
2295 goto done;
2296 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
2297 if ((error = xfs_bmbt_update(cur, LEFT.br_startoff,
2298 LEFT.br_startblock,
2299 LEFT.br_blockcount + PREV.br_blockcount +
2300 RIGHT.br_blockcount, LEFT.br_state)))
2301 goto done;
2303 break;
2305 case BMAP_LEFT_FILLING | BMAP_RIGHT_FILLING | BMAP_LEFT_CONTIG:
2307 * Setting all of a previous oldext extent to newext.
2308 * The left neighbor is contiguous, the right is not.
2310 --*idx;
2312 trace_xfs_bmap_pre_update(ip, *idx, state, _THIS_IP_);
2313 xfs_bmbt_set_blockcount(xfs_iext_get_ext(ifp, *idx),
2314 LEFT.br_blockcount + PREV.br_blockcount);
2315 trace_xfs_bmap_post_update(ip, *idx, state, _THIS_IP_);
2317 xfs_iext_remove(ip, *idx + 1, 1, state);
2318 XFS_IFORK_NEXT_SET(ip, whichfork,
2319 XFS_IFORK_NEXTENTS(ip, whichfork) - 1);
2320 if (cur == NULL)
2321 rval = XFS_ILOG_CORE | XFS_ILOG_DEXT;
2322 else {
2323 rval = XFS_ILOG_CORE;
2324 if ((error = xfs_bmbt_lookup_eq(cur, PREV.br_startoff,
2325 PREV.br_startblock, PREV.br_blockcount,
2326 &i)))
2327 goto done;
2328 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
2329 if ((error = xfs_btree_delete(cur, &i)))
2330 goto done;
2331 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
2332 if ((error = xfs_btree_decrement(cur, 0, &i)))
2333 goto done;
2334 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
2335 if ((error = xfs_bmbt_update(cur, LEFT.br_startoff,
2336 LEFT.br_startblock,
2337 LEFT.br_blockcount + PREV.br_blockcount,
2338 LEFT.br_state)))
2339 goto done;
2341 break;
2343 case BMAP_LEFT_FILLING | BMAP_RIGHT_FILLING | BMAP_RIGHT_CONTIG:
2345 * Setting all of a previous oldext extent to newext.
2346 * The right neighbor is contiguous, the left is not.
2348 trace_xfs_bmap_pre_update(ip, *idx, state, _THIS_IP_);
2349 xfs_bmbt_set_blockcount(ep,
2350 PREV.br_blockcount + RIGHT.br_blockcount);
2351 xfs_bmbt_set_state(ep, newext);
2352 trace_xfs_bmap_post_update(ip, *idx, state, _THIS_IP_);
2353 xfs_iext_remove(ip, *idx + 1, 1, state);
2354 XFS_IFORK_NEXT_SET(ip, whichfork,
2355 XFS_IFORK_NEXTENTS(ip, whichfork) - 1);
2356 if (cur == NULL)
2357 rval = XFS_ILOG_CORE | XFS_ILOG_DEXT;
2358 else {
2359 rval = XFS_ILOG_CORE;
2360 if ((error = xfs_bmbt_lookup_eq(cur, RIGHT.br_startoff,
2361 RIGHT.br_startblock,
2362 RIGHT.br_blockcount, &i)))
2363 goto done;
2364 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
2365 if ((error = xfs_btree_delete(cur, &i)))
2366 goto done;
2367 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
2368 if ((error = xfs_btree_decrement(cur, 0, &i)))
2369 goto done;
2370 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
2371 if ((error = xfs_bmbt_update(cur, new->br_startoff,
2372 new->br_startblock,
2373 new->br_blockcount + RIGHT.br_blockcount,
2374 newext)))
2375 goto done;
2377 break;
2379 case BMAP_LEFT_FILLING | BMAP_RIGHT_FILLING:
2381 * Setting all of a previous oldext extent to newext.
2382 * Neither the left nor right neighbors are contiguous with
2383 * the new one.
2385 trace_xfs_bmap_pre_update(ip, *idx, state, _THIS_IP_);
2386 xfs_bmbt_set_state(ep, newext);
2387 trace_xfs_bmap_post_update(ip, *idx, state, _THIS_IP_);
2389 if (cur == NULL)
2390 rval = XFS_ILOG_DEXT;
2391 else {
2392 rval = 0;
2393 if ((error = xfs_bmbt_lookup_eq(cur, new->br_startoff,
2394 new->br_startblock, new->br_blockcount,
2395 &i)))
2396 goto done;
2397 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
2398 if ((error = xfs_bmbt_update(cur, new->br_startoff,
2399 new->br_startblock, new->br_blockcount,
2400 newext)))
2401 goto done;
2403 break;
2405 case BMAP_LEFT_FILLING | BMAP_LEFT_CONTIG:
2407 * Setting the first part of a previous oldext extent to newext.
2408 * The left neighbor is contiguous.
2410 trace_xfs_bmap_pre_update(ip, *idx - 1, state, _THIS_IP_);
2411 xfs_bmbt_set_blockcount(xfs_iext_get_ext(ifp, *idx - 1),
2412 LEFT.br_blockcount + new->br_blockcount);
2413 xfs_bmbt_set_startoff(ep,
2414 PREV.br_startoff + new->br_blockcount);
2415 trace_xfs_bmap_post_update(ip, *idx - 1, state, _THIS_IP_);
2417 trace_xfs_bmap_pre_update(ip, *idx, state, _THIS_IP_);
2418 xfs_bmbt_set_startblock(ep,
2419 new->br_startblock + new->br_blockcount);
2420 xfs_bmbt_set_blockcount(ep,
2421 PREV.br_blockcount - new->br_blockcount);
2422 trace_xfs_bmap_post_update(ip, *idx, state, _THIS_IP_);
2424 --*idx;
2426 if (cur == NULL)
2427 rval = XFS_ILOG_DEXT;
2428 else {
2429 rval = 0;
2430 if ((error = xfs_bmbt_lookup_eq(cur, PREV.br_startoff,
2431 PREV.br_startblock, PREV.br_blockcount,
2432 &i)))
2433 goto done;
2434 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
2435 if ((error = xfs_bmbt_update(cur,
2436 PREV.br_startoff + new->br_blockcount,
2437 PREV.br_startblock + new->br_blockcount,
2438 PREV.br_blockcount - new->br_blockcount,
2439 oldext)))
2440 goto done;
2441 if ((error = xfs_btree_decrement(cur, 0, &i)))
2442 goto done;
2443 error = xfs_bmbt_update(cur, LEFT.br_startoff,
2444 LEFT.br_startblock,
2445 LEFT.br_blockcount + new->br_blockcount,
2446 LEFT.br_state);
2447 if (error)
2448 goto done;
2450 break;
2452 case BMAP_LEFT_FILLING:
2454 * Setting the first part of a previous oldext extent to newext.
2455 * The left neighbor is not contiguous.
2457 trace_xfs_bmap_pre_update(ip, *idx, state, _THIS_IP_);
2458 ASSERT(ep && xfs_bmbt_get_state(ep) == oldext);
2459 xfs_bmbt_set_startoff(ep, new_endoff);
2460 xfs_bmbt_set_blockcount(ep,
2461 PREV.br_blockcount - new->br_blockcount);
2462 xfs_bmbt_set_startblock(ep,
2463 new->br_startblock + new->br_blockcount);
2464 trace_xfs_bmap_post_update(ip, *idx, state, _THIS_IP_);
2466 xfs_iext_insert(ip, *idx, 1, new, state);
2467 XFS_IFORK_NEXT_SET(ip, whichfork,
2468 XFS_IFORK_NEXTENTS(ip, whichfork) + 1);
2469 if (cur == NULL)
2470 rval = XFS_ILOG_CORE | XFS_ILOG_DEXT;
2471 else {
2472 rval = XFS_ILOG_CORE;
2473 if ((error = xfs_bmbt_lookup_eq(cur, PREV.br_startoff,
2474 PREV.br_startblock, PREV.br_blockcount,
2475 &i)))
2476 goto done;
2477 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
2478 if ((error = xfs_bmbt_update(cur,
2479 PREV.br_startoff + new->br_blockcount,
2480 PREV.br_startblock + new->br_blockcount,
2481 PREV.br_blockcount - new->br_blockcount,
2482 oldext)))
2483 goto done;
2484 cur->bc_rec.b = *new;
2485 if ((error = xfs_btree_insert(cur, &i)))
2486 goto done;
2487 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
2489 break;
2491 case BMAP_RIGHT_FILLING | BMAP_RIGHT_CONTIG:
2493 * Setting the last part of a previous oldext extent to newext.
2494 * The right neighbor is contiguous with the new allocation.
2496 trace_xfs_bmap_pre_update(ip, *idx, state, _THIS_IP_);
2497 xfs_bmbt_set_blockcount(ep,
2498 PREV.br_blockcount - new->br_blockcount);
2499 trace_xfs_bmap_post_update(ip, *idx, state, _THIS_IP_);
2501 ++*idx;
2503 trace_xfs_bmap_pre_update(ip, *idx, state, _THIS_IP_);
2504 xfs_bmbt_set_allf(xfs_iext_get_ext(ifp, *idx),
2505 new->br_startoff, new->br_startblock,
2506 new->br_blockcount + RIGHT.br_blockcount, newext);
2507 trace_xfs_bmap_post_update(ip, *idx, state, _THIS_IP_);
2509 if (cur == NULL)
2510 rval = XFS_ILOG_DEXT;
2511 else {
2512 rval = 0;
2513 if ((error = xfs_bmbt_lookup_eq(cur, PREV.br_startoff,
2514 PREV.br_startblock,
2515 PREV.br_blockcount, &i)))
2516 goto done;
2517 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
2518 if ((error = xfs_bmbt_update(cur, PREV.br_startoff,
2519 PREV.br_startblock,
2520 PREV.br_blockcount - new->br_blockcount,
2521 oldext)))
2522 goto done;
2523 if ((error = xfs_btree_increment(cur, 0, &i)))
2524 goto done;
2525 if ((error = xfs_bmbt_update(cur, new->br_startoff,
2526 new->br_startblock,
2527 new->br_blockcount + RIGHT.br_blockcount,
2528 newext)))
2529 goto done;
2531 break;
2533 case BMAP_RIGHT_FILLING:
2535 * Setting the last part of a previous oldext extent to newext.
2536 * The right neighbor is not contiguous.
2538 trace_xfs_bmap_pre_update(ip, *idx, state, _THIS_IP_);
2539 xfs_bmbt_set_blockcount(ep,
2540 PREV.br_blockcount - new->br_blockcount);
2541 trace_xfs_bmap_post_update(ip, *idx, state, _THIS_IP_);
2543 ++*idx;
2544 xfs_iext_insert(ip, *idx, 1, new, state);
2546 XFS_IFORK_NEXT_SET(ip, whichfork,
2547 XFS_IFORK_NEXTENTS(ip, whichfork) + 1);
2548 if (cur == NULL)
2549 rval = XFS_ILOG_CORE | XFS_ILOG_DEXT;
2550 else {
2551 rval = XFS_ILOG_CORE;
2552 if ((error = xfs_bmbt_lookup_eq(cur, PREV.br_startoff,
2553 PREV.br_startblock, PREV.br_blockcount,
2554 &i)))
2555 goto done;
2556 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
2557 if ((error = xfs_bmbt_update(cur, PREV.br_startoff,
2558 PREV.br_startblock,
2559 PREV.br_blockcount - new->br_blockcount,
2560 oldext)))
2561 goto done;
2562 if ((error = xfs_bmbt_lookup_eq(cur, new->br_startoff,
2563 new->br_startblock, new->br_blockcount,
2564 &i)))
2565 goto done;
2566 XFS_WANT_CORRUPTED_GOTO(mp, i == 0, done);
2567 cur->bc_rec.b.br_state = new->br_state;
2568 if ((error = xfs_btree_insert(cur, &i)))
2569 goto done;
2570 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
2572 break;
2574 case 0:
2576 * Setting the middle part of a previous oldext extent to
2577 * newext. Contiguity is impossible here.
2578 * One extent becomes three extents.
2580 trace_xfs_bmap_pre_update(ip, *idx, state, _THIS_IP_);
2581 xfs_bmbt_set_blockcount(ep,
2582 new->br_startoff - PREV.br_startoff);
2583 trace_xfs_bmap_post_update(ip, *idx, state, _THIS_IP_);
2585 r[0] = *new;
2586 r[1].br_startoff = new_endoff;
2587 r[1].br_blockcount =
2588 PREV.br_startoff + PREV.br_blockcount - new_endoff;
2589 r[1].br_startblock = new->br_startblock + new->br_blockcount;
2590 r[1].br_state = oldext;
2592 ++*idx;
2593 xfs_iext_insert(ip, *idx, 2, &r[0], state);
2595 XFS_IFORK_NEXT_SET(ip, whichfork,
2596 XFS_IFORK_NEXTENTS(ip, whichfork) + 2);
2597 if (cur == NULL)
2598 rval = XFS_ILOG_CORE | XFS_ILOG_DEXT;
2599 else {
2600 rval = XFS_ILOG_CORE;
2601 if ((error = xfs_bmbt_lookup_eq(cur, PREV.br_startoff,
2602 PREV.br_startblock, PREV.br_blockcount,
2603 &i)))
2604 goto done;
2605 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
2606 /* new right extent - oldext */
2607 if ((error = xfs_bmbt_update(cur, r[1].br_startoff,
2608 r[1].br_startblock, r[1].br_blockcount,
2609 r[1].br_state)))
2610 goto done;
2611 /* new left extent - oldext */
2612 cur->bc_rec.b = PREV;
2613 cur->bc_rec.b.br_blockcount =
2614 new->br_startoff - PREV.br_startoff;
2615 if ((error = xfs_btree_insert(cur, &i)))
2616 goto done;
2617 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
2619 * Reset the cursor to the position of the new extent
2620 * we are about to insert as we can't trust it after
2621 * the previous insert.
2623 if ((error = xfs_bmbt_lookup_eq(cur, new->br_startoff,
2624 new->br_startblock, new->br_blockcount,
2625 &i)))
2626 goto done;
2627 XFS_WANT_CORRUPTED_GOTO(mp, i == 0, done);
2628 /* new middle extent - newext */
2629 cur->bc_rec.b.br_state = new->br_state;
2630 if ((error = xfs_btree_insert(cur, &i)))
2631 goto done;
2632 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
2634 break;
2636 case BMAP_LEFT_FILLING | BMAP_LEFT_CONTIG | BMAP_RIGHT_CONTIG:
2637 case BMAP_RIGHT_FILLING | BMAP_LEFT_CONTIG | BMAP_RIGHT_CONTIG:
2638 case BMAP_LEFT_FILLING | BMAP_RIGHT_CONTIG:
2639 case BMAP_RIGHT_FILLING | BMAP_LEFT_CONTIG:
2640 case BMAP_LEFT_CONTIG | BMAP_RIGHT_CONTIG:
2641 case BMAP_LEFT_CONTIG:
2642 case BMAP_RIGHT_CONTIG:
2644 * These cases are all impossible.
2646 ASSERT(0);
2649 /* update reverse mappings */
2650 error = xfs_rmap_convert_extent(mp, dfops, ip, whichfork, new);
2651 if (error)
2652 goto done;
2654 /* convert to a btree if necessary */
2655 if (xfs_bmap_needs_btree(ip, whichfork)) {
2656 int tmp_logflags; /* partial log flag return val */
2658 ASSERT(cur == NULL);
2659 error = xfs_bmap_extents_to_btree(tp, ip, first, dfops, &cur,
2660 0, &tmp_logflags, whichfork);
2661 *logflagsp |= tmp_logflags;
2662 if (error)
2663 goto done;
2666 /* clear out the allocated field, done with it now in any case. */
2667 if (cur) {
2668 cur->bc_private.b.allocated = 0;
2669 *curp = cur;
2672 xfs_bmap_check_leaf_extents(*curp, ip, whichfork);
2673 done:
2674 *logflagsp |= rval;
2675 return error;
2676 #undef LEFT
2677 #undef RIGHT
2678 #undef PREV
2682 * Convert a hole to a delayed allocation.
2684 STATIC void
2685 xfs_bmap_add_extent_hole_delay(
2686 xfs_inode_t *ip, /* incore inode pointer */
2687 int whichfork,
2688 xfs_extnum_t *idx, /* extent number to update/insert */
2689 xfs_bmbt_irec_t *new) /* new data to add to file extents */
2691 xfs_ifork_t *ifp; /* inode fork pointer */
2692 xfs_bmbt_irec_t left; /* left neighbor extent entry */
2693 xfs_filblks_t newlen=0; /* new indirect size */
2694 xfs_filblks_t oldlen=0; /* old indirect size */
2695 xfs_bmbt_irec_t right; /* right neighbor extent entry */
2696 int state; /* state bits, accessed thru macros */
2697 xfs_filblks_t temp=0; /* temp for indirect calculations */
2699 ifp = XFS_IFORK_PTR(ip, whichfork);
2700 state = 0;
2701 if (whichfork == XFS_COW_FORK)
2702 state |= BMAP_COWFORK;
2703 ASSERT(isnullstartblock(new->br_startblock));
2706 * Check and set flags if this segment has a left neighbor
2708 if (*idx > 0) {
2709 state |= BMAP_LEFT_VALID;
2710 xfs_bmbt_get_all(xfs_iext_get_ext(ifp, *idx - 1), &left);
2712 if (isnullstartblock(left.br_startblock))
2713 state |= BMAP_LEFT_DELAY;
2717 * Check and set flags if the current (right) segment exists.
2718 * If it doesn't exist, we're converting the hole at end-of-file.
2720 if (*idx < xfs_iext_count(ifp)) {
2721 state |= BMAP_RIGHT_VALID;
2722 xfs_bmbt_get_all(xfs_iext_get_ext(ifp, *idx), &right);
2724 if (isnullstartblock(right.br_startblock))
2725 state |= BMAP_RIGHT_DELAY;
2729 * Set contiguity flags on the left and right neighbors.
2730 * Don't let extents get too large, even if the pieces are contiguous.
2732 if ((state & BMAP_LEFT_VALID) && (state & BMAP_LEFT_DELAY) &&
2733 left.br_startoff + left.br_blockcount == new->br_startoff &&
2734 left.br_blockcount + new->br_blockcount <= MAXEXTLEN)
2735 state |= BMAP_LEFT_CONTIG;
2737 if ((state & BMAP_RIGHT_VALID) && (state & BMAP_RIGHT_DELAY) &&
2738 new->br_startoff + new->br_blockcount == right.br_startoff &&
2739 new->br_blockcount + right.br_blockcount <= MAXEXTLEN &&
2740 (!(state & BMAP_LEFT_CONTIG) ||
2741 (left.br_blockcount + new->br_blockcount +
2742 right.br_blockcount <= MAXEXTLEN)))
2743 state |= BMAP_RIGHT_CONTIG;
2746 * Switch out based on the contiguity flags.
2748 switch (state & (BMAP_LEFT_CONTIG | BMAP_RIGHT_CONTIG)) {
2749 case BMAP_LEFT_CONTIG | BMAP_RIGHT_CONTIG:
2751 * New allocation is contiguous with delayed allocations
2752 * on the left and on the right.
2753 * Merge all three into a single extent record.
2755 --*idx;
2756 temp = left.br_blockcount + new->br_blockcount +
2757 right.br_blockcount;
2759 trace_xfs_bmap_pre_update(ip, *idx, state, _THIS_IP_);
2760 xfs_bmbt_set_blockcount(xfs_iext_get_ext(ifp, *idx), temp);
2761 oldlen = startblockval(left.br_startblock) +
2762 startblockval(new->br_startblock) +
2763 startblockval(right.br_startblock);
2764 newlen = XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(ip, temp),
2765 oldlen);
2766 xfs_bmbt_set_startblock(xfs_iext_get_ext(ifp, *idx),
2767 nullstartblock((int)newlen));
2768 trace_xfs_bmap_post_update(ip, *idx, state, _THIS_IP_);
2770 xfs_iext_remove(ip, *idx + 1, 1, state);
2771 break;
2773 case BMAP_LEFT_CONTIG:
2775 * New allocation is contiguous with a delayed allocation
2776 * on the left.
2777 * Merge the new allocation with the left neighbor.
2779 --*idx;
2780 temp = left.br_blockcount + new->br_blockcount;
2782 trace_xfs_bmap_pre_update(ip, *idx, state, _THIS_IP_);
2783 xfs_bmbt_set_blockcount(xfs_iext_get_ext(ifp, *idx), temp);
2784 oldlen = startblockval(left.br_startblock) +
2785 startblockval(new->br_startblock);
2786 newlen = XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(ip, temp),
2787 oldlen);
2788 xfs_bmbt_set_startblock(xfs_iext_get_ext(ifp, *idx),
2789 nullstartblock((int)newlen));
2790 trace_xfs_bmap_post_update(ip, *idx, state, _THIS_IP_);
2791 break;
2793 case BMAP_RIGHT_CONTIG:
2795 * New allocation is contiguous with a delayed allocation
2796 * on the right.
2797 * Merge the new allocation with the right neighbor.
2799 trace_xfs_bmap_pre_update(ip, *idx, state, _THIS_IP_);
2800 temp = new->br_blockcount + right.br_blockcount;
2801 oldlen = startblockval(new->br_startblock) +
2802 startblockval(right.br_startblock);
2803 newlen = XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(ip, temp),
2804 oldlen);
2805 xfs_bmbt_set_allf(xfs_iext_get_ext(ifp, *idx),
2806 new->br_startoff,
2807 nullstartblock((int)newlen), temp, right.br_state);
2808 trace_xfs_bmap_post_update(ip, *idx, state, _THIS_IP_);
2809 break;
2811 case 0:
2813 * New allocation is not contiguous with another
2814 * delayed allocation.
2815 * Insert a new entry.
2817 oldlen = newlen = 0;
2818 xfs_iext_insert(ip, *idx, 1, new, state);
2819 break;
2821 if (oldlen != newlen) {
2822 ASSERT(oldlen > newlen);
2823 xfs_mod_fdblocks(ip->i_mount, (int64_t)(oldlen - newlen),
2824 false);
2826 * Nothing to do for disk quota accounting here.
2832 * Convert a hole to a real allocation.
2834 STATIC int /* error */
2835 xfs_bmap_add_extent_hole_real(
2836 struct xfs_trans *tp,
2837 struct xfs_inode *ip,
2838 int whichfork,
2839 xfs_extnum_t *idx,
2840 struct xfs_btree_cur **curp,
2841 struct xfs_bmbt_irec *new,
2842 xfs_fsblock_t *first,
2843 struct xfs_defer_ops *dfops,
2844 int *logflagsp)
2846 struct xfs_ifork *ifp = XFS_IFORK_PTR(ip, whichfork);
2847 struct xfs_mount *mp = ip->i_mount;
2848 struct xfs_btree_cur *cur = *curp;
2849 int error; /* error return value */
2850 int i; /* temp state */
2851 xfs_bmbt_irec_t left; /* left neighbor extent entry */
2852 xfs_bmbt_irec_t right; /* right neighbor extent entry */
2853 int rval=0; /* return value (logging flags) */
2854 int state; /* state bits, accessed thru macros */
2856 ASSERT(*idx >= 0);
2857 ASSERT(*idx <= xfs_iext_count(ifp));
2858 ASSERT(!isnullstartblock(new->br_startblock));
2859 ASSERT(!cur || !(cur->bc_private.b.flags & XFS_BTCUR_BPRV_WASDEL));
2861 XFS_STATS_INC(mp, xs_add_exlist);
2863 state = 0;
2864 if (whichfork == XFS_ATTR_FORK)
2865 state |= BMAP_ATTRFORK;
2866 if (whichfork == XFS_COW_FORK)
2867 state |= BMAP_COWFORK;
2870 * Check and set flags if this segment has a left neighbor.
2872 if (*idx > 0) {
2873 state |= BMAP_LEFT_VALID;
2874 xfs_bmbt_get_all(xfs_iext_get_ext(ifp, *idx - 1), &left);
2875 if (isnullstartblock(left.br_startblock))
2876 state |= BMAP_LEFT_DELAY;
2880 * Check and set flags if this segment has a current value.
2881 * Not true if we're inserting into the "hole" at eof.
2883 if (*idx < xfs_iext_count(ifp)) {
2884 state |= BMAP_RIGHT_VALID;
2885 xfs_bmbt_get_all(xfs_iext_get_ext(ifp, *idx), &right);
2886 if (isnullstartblock(right.br_startblock))
2887 state |= BMAP_RIGHT_DELAY;
2891 * We're inserting a real allocation between "left" and "right".
2892 * Set the contiguity flags. Don't let extents get too large.
2894 if ((state & BMAP_LEFT_VALID) && !(state & BMAP_LEFT_DELAY) &&
2895 left.br_startoff + left.br_blockcount == new->br_startoff &&
2896 left.br_startblock + left.br_blockcount == new->br_startblock &&
2897 left.br_state == new->br_state &&
2898 left.br_blockcount + new->br_blockcount <= MAXEXTLEN)
2899 state |= BMAP_LEFT_CONTIG;
2901 if ((state & BMAP_RIGHT_VALID) && !(state & BMAP_RIGHT_DELAY) &&
2902 new->br_startoff + new->br_blockcount == right.br_startoff &&
2903 new->br_startblock + new->br_blockcount == right.br_startblock &&
2904 new->br_state == right.br_state &&
2905 new->br_blockcount + right.br_blockcount <= MAXEXTLEN &&
2906 (!(state & BMAP_LEFT_CONTIG) ||
2907 left.br_blockcount + new->br_blockcount +
2908 right.br_blockcount <= MAXEXTLEN))
2909 state |= BMAP_RIGHT_CONTIG;
2911 error = 0;
2913 * Select which case we're in here, and implement it.
2915 switch (state & (BMAP_LEFT_CONTIG | BMAP_RIGHT_CONTIG)) {
2916 case BMAP_LEFT_CONTIG | BMAP_RIGHT_CONTIG:
2918 * New allocation is contiguous with real allocations on the
2919 * left and on the right.
2920 * Merge all three into a single extent record.
2922 --*idx;
2923 trace_xfs_bmap_pre_update(ip, *idx, state, _THIS_IP_);
2924 xfs_bmbt_set_blockcount(xfs_iext_get_ext(ifp, *idx),
2925 left.br_blockcount + new->br_blockcount +
2926 right.br_blockcount);
2927 trace_xfs_bmap_post_update(ip, *idx, state, _THIS_IP_);
2929 xfs_iext_remove(ip, *idx + 1, 1, state);
2931 XFS_IFORK_NEXT_SET(ip, whichfork,
2932 XFS_IFORK_NEXTENTS(ip, whichfork) - 1);
2933 if (cur == NULL) {
2934 rval = XFS_ILOG_CORE | xfs_ilog_fext(whichfork);
2935 } else {
2936 rval = XFS_ILOG_CORE;
2937 error = xfs_bmbt_lookup_eq(cur, right.br_startoff,
2938 right.br_startblock, right.br_blockcount,
2939 &i);
2940 if (error)
2941 goto done;
2942 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
2943 error = xfs_btree_delete(cur, &i);
2944 if (error)
2945 goto done;
2946 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
2947 error = xfs_btree_decrement(cur, 0, &i);
2948 if (error)
2949 goto done;
2950 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
2951 error = xfs_bmbt_update(cur, left.br_startoff,
2952 left.br_startblock,
2953 left.br_blockcount +
2954 new->br_blockcount +
2955 right.br_blockcount,
2956 left.br_state);
2957 if (error)
2958 goto done;
2960 break;
2962 case BMAP_LEFT_CONTIG:
2964 * New allocation is contiguous with a real allocation
2965 * on the left.
2966 * Merge the new allocation with the left neighbor.
2968 --*idx;
2969 trace_xfs_bmap_pre_update(ip, *idx, state, _THIS_IP_);
2970 xfs_bmbt_set_blockcount(xfs_iext_get_ext(ifp, *idx),
2971 left.br_blockcount + new->br_blockcount);
2972 trace_xfs_bmap_post_update(ip, *idx, state, _THIS_IP_);
2974 if (cur == NULL) {
2975 rval = xfs_ilog_fext(whichfork);
2976 } else {
2977 rval = 0;
2978 error = xfs_bmbt_lookup_eq(cur, left.br_startoff,
2979 left.br_startblock, left.br_blockcount,
2980 &i);
2981 if (error)
2982 goto done;
2983 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
2984 error = xfs_bmbt_update(cur, left.br_startoff,
2985 left.br_startblock,
2986 left.br_blockcount +
2987 new->br_blockcount,
2988 left.br_state);
2989 if (error)
2990 goto done;
2992 break;
2994 case BMAP_RIGHT_CONTIG:
2996 * New allocation is contiguous with a real allocation
2997 * on the right.
2998 * Merge the new allocation with the right neighbor.
3000 trace_xfs_bmap_pre_update(ip, *idx, state, _THIS_IP_);
3001 xfs_bmbt_set_allf(xfs_iext_get_ext(ifp, *idx),
3002 new->br_startoff, new->br_startblock,
3003 new->br_blockcount + right.br_blockcount,
3004 right.br_state);
3005 trace_xfs_bmap_post_update(ip, *idx, state, _THIS_IP_);
3007 if (cur == NULL) {
3008 rval = xfs_ilog_fext(whichfork);
3009 } else {
3010 rval = 0;
3011 error = xfs_bmbt_lookup_eq(cur,
3012 right.br_startoff,
3013 right.br_startblock,
3014 right.br_blockcount, &i);
3015 if (error)
3016 goto done;
3017 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
3018 error = xfs_bmbt_update(cur, new->br_startoff,
3019 new->br_startblock,
3020 new->br_blockcount +
3021 right.br_blockcount,
3022 right.br_state);
3023 if (error)
3024 goto done;
3026 break;
3028 case 0:
3030 * New allocation is not contiguous with another
3031 * real allocation.
3032 * Insert a new entry.
3034 xfs_iext_insert(ip, *idx, 1, new, state);
3035 XFS_IFORK_NEXT_SET(ip, whichfork,
3036 XFS_IFORK_NEXTENTS(ip, whichfork) + 1);
3037 if (cur == NULL) {
3038 rval = XFS_ILOG_CORE | xfs_ilog_fext(whichfork);
3039 } else {
3040 rval = XFS_ILOG_CORE;
3041 error = xfs_bmbt_lookup_eq(cur,
3042 new->br_startoff,
3043 new->br_startblock,
3044 new->br_blockcount, &i);
3045 if (error)
3046 goto done;
3047 XFS_WANT_CORRUPTED_GOTO(mp, i == 0, done);
3048 cur->bc_rec.b.br_state = new->br_state;
3049 error = xfs_btree_insert(cur, &i);
3050 if (error)
3051 goto done;
3052 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
3054 break;
3057 /* add reverse mapping */
3058 error = xfs_rmap_map_extent(mp, dfops, ip, whichfork, new);
3059 if (error)
3060 goto done;
3062 /* convert to a btree if necessary */
3063 if (xfs_bmap_needs_btree(ip, whichfork)) {
3064 int tmp_logflags; /* partial log flag return val */
3066 ASSERT(cur == NULL);
3067 error = xfs_bmap_extents_to_btree(tp, ip, first, dfops, curp,
3068 0, &tmp_logflags, whichfork);
3069 *logflagsp |= tmp_logflags;
3070 cur = *curp;
3071 if (error)
3072 goto done;
3075 /* clear out the allocated field, done with it now in any case. */
3076 if (cur)
3077 cur->bc_private.b.allocated = 0;
3079 xfs_bmap_check_leaf_extents(cur, ip, whichfork);
3080 done:
3081 *logflagsp |= rval;
3082 return error;
3086 * Functions used in the extent read, allocate and remove paths
3090 * Adjust the size of the new extent based on di_extsize and rt extsize.
3093 xfs_bmap_extsize_align(
3094 xfs_mount_t *mp,
3095 xfs_bmbt_irec_t *gotp, /* next extent pointer */
3096 xfs_bmbt_irec_t *prevp, /* previous extent pointer */
3097 xfs_extlen_t extsz, /* align to this extent size */
3098 int rt, /* is this a realtime inode? */
3099 int eof, /* is extent at end-of-file? */
3100 int delay, /* creating delalloc extent? */
3101 int convert, /* overwriting unwritten extent? */
3102 xfs_fileoff_t *offp, /* in/out: aligned offset */
3103 xfs_extlen_t *lenp) /* in/out: aligned length */
3105 xfs_fileoff_t orig_off; /* original offset */
3106 xfs_extlen_t orig_alen; /* original length */
3107 xfs_fileoff_t orig_end; /* original off+len */
3108 xfs_fileoff_t nexto; /* next file offset */
3109 xfs_fileoff_t prevo; /* previous file offset */
3110 xfs_fileoff_t align_off; /* temp for offset */
3111 xfs_extlen_t align_alen; /* temp for length */
3112 xfs_extlen_t temp; /* temp for calculations */
3114 if (convert)
3115 return 0;
3117 orig_off = align_off = *offp;
3118 orig_alen = align_alen = *lenp;
3119 orig_end = orig_off + orig_alen;
3122 * If this request overlaps an existing extent, then don't
3123 * attempt to perform any additional alignment.
3125 if (!delay && !eof &&
3126 (orig_off >= gotp->br_startoff) &&
3127 (orig_end <= gotp->br_startoff + gotp->br_blockcount)) {
3128 return 0;
3132 * If the file offset is unaligned vs. the extent size
3133 * we need to align it. This will be possible unless
3134 * the file was previously written with a kernel that didn't
3135 * perform this alignment, or if a truncate shot us in the
3136 * foot.
3138 temp = do_mod(orig_off, extsz);
3139 if (temp) {
3140 align_alen += temp;
3141 align_off -= temp;
3144 /* Same adjustment for the end of the requested area. */
3145 temp = (align_alen % extsz);
3146 if (temp)
3147 align_alen += extsz - temp;
3150 * For large extent hint sizes, the aligned extent might be larger than
3151 * MAXEXTLEN. In that case, reduce the size by an extsz so that it pulls
3152 * the length back under MAXEXTLEN. The outer allocation loops handle
3153 * short allocation just fine, so it is safe to do this. We only want to
3154 * do it when we are forced to, though, because it means more allocation
3155 * operations are required.
3157 while (align_alen > MAXEXTLEN)
3158 align_alen -= extsz;
3159 ASSERT(align_alen <= MAXEXTLEN);
3162 * If the previous block overlaps with this proposed allocation
3163 * then move the start forward without adjusting the length.
3165 if (prevp->br_startoff != NULLFILEOFF) {
3166 if (prevp->br_startblock == HOLESTARTBLOCK)
3167 prevo = prevp->br_startoff;
3168 else
3169 prevo = prevp->br_startoff + prevp->br_blockcount;
3170 } else
3171 prevo = 0;
3172 if (align_off != orig_off && align_off < prevo)
3173 align_off = prevo;
3175 * If the next block overlaps with this proposed allocation
3176 * then move the start back without adjusting the length,
3177 * but not before offset 0.
3178 * This may of course make the start overlap previous block,
3179 * and if we hit the offset 0 limit then the next block
3180 * can still overlap too.
3182 if (!eof && gotp->br_startoff != NULLFILEOFF) {
3183 if ((delay && gotp->br_startblock == HOLESTARTBLOCK) ||
3184 (!delay && gotp->br_startblock == DELAYSTARTBLOCK))
3185 nexto = gotp->br_startoff + gotp->br_blockcount;
3186 else
3187 nexto = gotp->br_startoff;
3188 } else
3189 nexto = NULLFILEOFF;
3190 if (!eof &&
3191 align_off + align_alen != orig_end &&
3192 align_off + align_alen > nexto)
3193 align_off = nexto > align_alen ? nexto - align_alen : 0;
3195 * If we're now overlapping the next or previous extent that
3196 * means we can't fit an extsz piece in this hole. Just move
3197 * the start forward to the first valid spot and set
3198 * the length so we hit the end.
3200 if (align_off != orig_off && align_off < prevo)
3201 align_off = prevo;
3202 if (align_off + align_alen != orig_end &&
3203 align_off + align_alen > nexto &&
3204 nexto != NULLFILEOFF) {
3205 ASSERT(nexto > prevo);
3206 align_alen = nexto - align_off;
3210 * If realtime, and the result isn't a multiple of the realtime
3211 * extent size we need to remove blocks until it is.
3213 if (rt && (temp = (align_alen % mp->m_sb.sb_rextsize))) {
3215 * We're not covering the original request, or
3216 * we won't be able to once we fix the length.
3218 if (orig_off < align_off ||
3219 orig_end > align_off + align_alen ||
3220 align_alen - temp < orig_alen)
3221 return -EINVAL;
3223 * Try to fix it by moving the start up.
3225 if (align_off + temp <= orig_off) {
3226 align_alen -= temp;
3227 align_off += temp;
3230 * Try to fix it by moving the end in.
3232 else if (align_off + align_alen - temp >= orig_end)
3233 align_alen -= temp;
3235 * Set the start to the minimum then trim the length.
3237 else {
3238 align_alen -= orig_off - align_off;
3239 align_off = orig_off;
3240 align_alen -= align_alen % mp->m_sb.sb_rextsize;
3243 * Result doesn't cover the request, fail it.
3245 if (orig_off < align_off || orig_end > align_off + align_alen)
3246 return -EINVAL;
3247 } else {
3248 ASSERT(orig_off >= align_off);
3249 /* see MAXEXTLEN handling above */
3250 ASSERT(orig_end <= align_off + align_alen ||
3251 align_alen + extsz > MAXEXTLEN);
3254 #ifdef DEBUG
3255 if (!eof && gotp->br_startoff != NULLFILEOFF)
3256 ASSERT(align_off + align_alen <= gotp->br_startoff);
3257 if (prevp->br_startoff != NULLFILEOFF)
3258 ASSERT(align_off >= prevp->br_startoff + prevp->br_blockcount);
3259 #endif
3261 *lenp = align_alen;
3262 *offp = align_off;
3263 return 0;
3266 #define XFS_ALLOC_GAP_UNITS 4
3268 void
3269 xfs_bmap_adjacent(
3270 struct xfs_bmalloca *ap) /* bmap alloc argument struct */
3272 xfs_fsblock_t adjust; /* adjustment to block numbers */
3273 xfs_agnumber_t fb_agno; /* ag number of ap->firstblock */
3274 xfs_mount_t *mp; /* mount point structure */
3275 int nullfb; /* true if ap->firstblock isn't set */
3276 int rt; /* true if inode is realtime */
3278 #define ISVALID(x,y) \
3279 (rt ? \
3280 (x) < mp->m_sb.sb_rblocks : \
3281 XFS_FSB_TO_AGNO(mp, x) == XFS_FSB_TO_AGNO(mp, y) && \
3282 XFS_FSB_TO_AGNO(mp, x) < mp->m_sb.sb_agcount && \
3283 XFS_FSB_TO_AGBNO(mp, x) < mp->m_sb.sb_agblocks)
3285 mp = ap->ip->i_mount;
3286 nullfb = *ap->firstblock == NULLFSBLOCK;
3287 rt = XFS_IS_REALTIME_INODE(ap->ip) &&
3288 xfs_alloc_is_userdata(ap->datatype);
3289 fb_agno = nullfb ? NULLAGNUMBER : XFS_FSB_TO_AGNO(mp, *ap->firstblock);
3291 * If allocating at eof, and there's a previous real block,
3292 * try to use its last block as our starting point.
3294 if (ap->eof && ap->prev.br_startoff != NULLFILEOFF &&
3295 !isnullstartblock(ap->prev.br_startblock) &&
3296 ISVALID(ap->prev.br_startblock + ap->prev.br_blockcount,
3297 ap->prev.br_startblock)) {
3298 ap->blkno = ap->prev.br_startblock + ap->prev.br_blockcount;
3300 * Adjust for the gap between prevp and us.
3302 adjust = ap->offset -
3303 (ap->prev.br_startoff + ap->prev.br_blockcount);
3304 if (adjust &&
3305 ISVALID(ap->blkno + adjust, ap->prev.br_startblock))
3306 ap->blkno += adjust;
3309 * If not at eof, then compare the two neighbor blocks.
3310 * Figure out whether either one gives us a good starting point,
3311 * and pick the better one.
3313 else if (!ap->eof) {
3314 xfs_fsblock_t gotbno; /* right side block number */
3315 xfs_fsblock_t gotdiff=0; /* right side difference */
3316 xfs_fsblock_t prevbno; /* left side block number */
3317 xfs_fsblock_t prevdiff=0; /* left side difference */
3320 * If there's a previous (left) block, select a requested
3321 * start block based on it.
3323 if (ap->prev.br_startoff != NULLFILEOFF &&
3324 !isnullstartblock(ap->prev.br_startblock) &&
3325 (prevbno = ap->prev.br_startblock +
3326 ap->prev.br_blockcount) &&
3327 ISVALID(prevbno, ap->prev.br_startblock)) {
3329 * Calculate gap to end of previous block.
3331 adjust = prevdiff = ap->offset -
3332 (ap->prev.br_startoff +
3333 ap->prev.br_blockcount);
3335 * Figure the startblock based on the previous block's
3336 * end and the gap size.
3337 * Heuristic!
3338 * If the gap is large relative to the piece we're
3339 * allocating, or using it gives us an invalid block
3340 * number, then just use the end of the previous block.
3342 if (prevdiff <= XFS_ALLOC_GAP_UNITS * ap->length &&
3343 ISVALID(prevbno + prevdiff,
3344 ap->prev.br_startblock))
3345 prevbno += adjust;
3346 else
3347 prevdiff += adjust;
3349 * If the firstblock forbids it, can't use it,
3350 * must use default.
3352 if (!rt && !nullfb &&
3353 XFS_FSB_TO_AGNO(mp, prevbno) != fb_agno)
3354 prevbno = NULLFSBLOCK;
3357 * No previous block or can't follow it, just default.
3359 else
3360 prevbno = NULLFSBLOCK;
3362 * If there's a following (right) block, select a requested
3363 * start block based on it.
3365 if (!isnullstartblock(ap->got.br_startblock)) {
3367 * Calculate gap to start of next block.
3369 adjust = gotdiff = ap->got.br_startoff - ap->offset;
3371 * Figure the startblock based on the next block's
3372 * start and the gap size.
3374 gotbno = ap->got.br_startblock;
3376 * Heuristic!
3377 * If the gap is large relative to the piece we're
3378 * allocating, or using it gives us an invalid block
3379 * number, then just use the start of the next block
3380 * offset by our length.
3382 if (gotdiff <= XFS_ALLOC_GAP_UNITS * ap->length &&
3383 ISVALID(gotbno - gotdiff, gotbno))
3384 gotbno -= adjust;
3385 else if (ISVALID(gotbno - ap->length, gotbno)) {
3386 gotbno -= ap->length;
3387 gotdiff += adjust - ap->length;
3388 } else
3389 gotdiff += adjust;
3391 * If the firstblock forbids it, can't use it,
3392 * must use default.
3394 if (!rt && !nullfb &&
3395 XFS_FSB_TO_AGNO(mp, gotbno) != fb_agno)
3396 gotbno = NULLFSBLOCK;
3399 * No next block, just default.
3401 else
3402 gotbno = NULLFSBLOCK;
3404 * If both valid, pick the better one, else the only good
3405 * one, else ap->blkno is already set (to 0 or the inode block).
3407 if (prevbno != NULLFSBLOCK && gotbno != NULLFSBLOCK)
3408 ap->blkno = prevdiff <= gotdiff ? prevbno : gotbno;
3409 else if (prevbno != NULLFSBLOCK)
3410 ap->blkno = prevbno;
3411 else if (gotbno != NULLFSBLOCK)
3412 ap->blkno = gotbno;
3414 #undef ISVALID
3417 static int
3418 xfs_bmap_longest_free_extent(
3419 struct xfs_trans *tp,
3420 xfs_agnumber_t ag,
3421 xfs_extlen_t *blen,
3422 int *notinit)
3424 struct xfs_mount *mp = tp->t_mountp;
3425 struct xfs_perag *pag;
3426 xfs_extlen_t longest;
3427 int error = 0;
3429 pag = xfs_perag_get(mp, ag);
3430 if (!pag->pagf_init) {
3431 error = xfs_alloc_pagf_init(mp, tp, ag, XFS_ALLOC_FLAG_TRYLOCK);
3432 if (error)
3433 goto out;
3435 if (!pag->pagf_init) {
3436 *notinit = 1;
3437 goto out;
3441 longest = xfs_alloc_longest_free_extent(mp, pag,
3442 xfs_alloc_min_freelist(mp, pag),
3443 xfs_ag_resv_needed(pag, XFS_AG_RESV_NONE));
3444 if (*blen < longest)
3445 *blen = longest;
3447 out:
3448 xfs_perag_put(pag);
3449 return error;
3452 static void
3453 xfs_bmap_select_minlen(
3454 struct xfs_bmalloca *ap,
3455 struct xfs_alloc_arg *args,
3456 xfs_extlen_t *blen,
3457 int notinit)
3459 if (notinit || *blen < ap->minlen) {
3461 * Since we did a BUF_TRYLOCK above, it is possible that
3462 * there is space for this request.
3464 args->minlen = ap->minlen;
3465 } else if (*blen < args->maxlen) {
3467 * If the best seen length is less than the request length,
3468 * use the best as the minimum.
3470 args->minlen = *blen;
3471 } else {
3473 * Otherwise we've seen an extent as big as maxlen, use that
3474 * as the minimum.
3476 args->minlen = args->maxlen;
3480 STATIC int
3481 xfs_bmap_btalloc_nullfb(
3482 struct xfs_bmalloca *ap,
3483 struct xfs_alloc_arg *args,
3484 xfs_extlen_t *blen)
3486 struct xfs_mount *mp = ap->ip->i_mount;
3487 xfs_agnumber_t ag, startag;
3488 int notinit = 0;
3489 int error;
3491 args->type = XFS_ALLOCTYPE_START_BNO;
3492 args->total = ap->total;
3494 startag = ag = XFS_FSB_TO_AGNO(mp, args->fsbno);
3495 if (startag == NULLAGNUMBER)
3496 startag = ag = 0;
3498 while (*blen < args->maxlen) {
3499 error = xfs_bmap_longest_free_extent(args->tp, ag, blen,
3500 &notinit);
3501 if (error)
3502 return error;
3504 if (++ag == mp->m_sb.sb_agcount)
3505 ag = 0;
3506 if (ag == startag)
3507 break;
3510 xfs_bmap_select_minlen(ap, args, blen, notinit);
3511 return 0;
3514 STATIC int
3515 xfs_bmap_btalloc_filestreams(
3516 struct xfs_bmalloca *ap,
3517 struct xfs_alloc_arg *args,
3518 xfs_extlen_t *blen)
3520 struct xfs_mount *mp = ap->ip->i_mount;
3521 xfs_agnumber_t ag;
3522 int notinit = 0;
3523 int error;
3525 args->type = XFS_ALLOCTYPE_NEAR_BNO;
3526 args->total = ap->total;
3528 ag = XFS_FSB_TO_AGNO(mp, args->fsbno);
3529 if (ag == NULLAGNUMBER)
3530 ag = 0;
3532 error = xfs_bmap_longest_free_extent(args->tp, ag, blen, &notinit);
3533 if (error)
3534 return error;
3536 if (*blen < args->maxlen) {
3537 error = xfs_filestream_new_ag(ap, &ag);
3538 if (error)
3539 return error;
3541 error = xfs_bmap_longest_free_extent(args->tp, ag, blen,
3542 &notinit);
3543 if (error)
3544 return error;
3548 xfs_bmap_select_minlen(ap, args, blen, notinit);
3551 * Set the failure fallback case to look in the selected AG as stream
3552 * may have moved.
3554 ap->blkno = args->fsbno = XFS_AGB_TO_FSB(mp, ag, 0);
3555 return 0;
3558 STATIC int
3559 xfs_bmap_btalloc(
3560 struct xfs_bmalloca *ap) /* bmap alloc argument struct */
3562 xfs_mount_t *mp; /* mount point structure */
3563 xfs_alloctype_t atype = 0; /* type for allocation routines */
3564 xfs_extlen_t align = 0; /* minimum allocation alignment */
3565 xfs_agnumber_t fb_agno; /* ag number of ap->firstblock */
3566 xfs_agnumber_t ag;
3567 xfs_alloc_arg_t args;
3568 xfs_extlen_t blen;
3569 xfs_extlen_t nextminlen = 0;
3570 int nullfb; /* true if ap->firstblock isn't set */
3571 int isaligned;
3572 int tryagain;
3573 int error;
3574 int stripe_align;
3576 ASSERT(ap->length);
3578 mp = ap->ip->i_mount;
3580 /* stripe alignment for allocation is determined by mount parameters */
3581 stripe_align = 0;
3582 if (mp->m_swidth && (mp->m_flags & XFS_MOUNT_SWALLOC))
3583 stripe_align = mp->m_swidth;
3584 else if (mp->m_dalign)
3585 stripe_align = mp->m_dalign;
3587 if (ap->flags & XFS_BMAPI_COWFORK)
3588 align = xfs_get_cowextsz_hint(ap->ip);
3589 else if (xfs_alloc_is_userdata(ap->datatype))
3590 align = xfs_get_extsz_hint(ap->ip);
3591 if (align) {
3592 error = xfs_bmap_extsize_align(mp, &ap->got, &ap->prev,
3593 align, 0, ap->eof, 0, ap->conv,
3594 &ap->offset, &ap->length);
3595 ASSERT(!error);
3596 ASSERT(ap->length);
3600 nullfb = *ap->firstblock == NULLFSBLOCK;
3601 fb_agno = nullfb ? NULLAGNUMBER : XFS_FSB_TO_AGNO(mp, *ap->firstblock);
3602 if (nullfb) {
3603 if (xfs_alloc_is_userdata(ap->datatype) &&
3604 xfs_inode_is_filestream(ap->ip)) {
3605 ag = xfs_filestream_lookup_ag(ap->ip);
3606 ag = (ag != NULLAGNUMBER) ? ag : 0;
3607 ap->blkno = XFS_AGB_TO_FSB(mp, ag, 0);
3608 } else {
3609 ap->blkno = XFS_INO_TO_FSB(mp, ap->ip->i_ino);
3611 } else
3612 ap->blkno = *ap->firstblock;
3614 xfs_bmap_adjacent(ap);
3617 * If allowed, use ap->blkno; otherwise must use firstblock since
3618 * it's in the right allocation group.
3620 if (nullfb || XFS_FSB_TO_AGNO(mp, ap->blkno) == fb_agno)
3622 else
3623 ap->blkno = *ap->firstblock;
3625 * Normal allocation, done through xfs_alloc_vextent.
3627 tryagain = isaligned = 0;
3628 memset(&args, 0, sizeof(args));
3629 args.tp = ap->tp;
3630 args.mp = mp;
3631 args.fsbno = ap->blkno;
3632 xfs_rmap_skip_owner_update(&args.oinfo);
3634 /* Trim the allocation back to the maximum an AG can fit. */
3635 args.maxlen = MIN(ap->length, mp->m_ag_max_usable);
3636 args.firstblock = *ap->firstblock;
3637 blen = 0;
3638 if (nullfb) {
3640 * Search for an allocation group with a single extent large
3641 * enough for the request. If one isn't found, then adjust
3642 * the minimum allocation size to the largest space found.
3644 if (xfs_alloc_is_userdata(ap->datatype) &&
3645 xfs_inode_is_filestream(ap->ip))
3646 error = xfs_bmap_btalloc_filestreams(ap, &args, &blen);
3647 else
3648 error = xfs_bmap_btalloc_nullfb(ap, &args, &blen);
3649 if (error)
3650 return error;
3651 } else if (ap->dfops->dop_low) {
3652 if (xfs_inode_is_filestream(ap->ip))
3653 args.type = XFS_ALLOCTYPE_FIRST_AG;
3654 else
3655 args.type = XFS_ALLOCTYPE_START_BNO;
3656 args.total = args.minlen = ap->minlen;
3657 } else {
3658 args.type = XFS_ALLOCTYPE_NEAR_BNO;
3659 args.total = ap->total;
3660 args.minlen = ap->minlen;
3662 /* apply extent size hints if obtained earlier */
3663 if (align) {
3664 args.prod = align;
3665 if ((args.mod = (xfs_extlen_t)do_mod(ap->offset, args.prod)))
3666 args.mod = (xfs_extlen_t)(args.prod - args.mod);
3667 } else if (mp->m_sb.sb_blocksize >= PAGE_SIZE) {
3668 args.prod = 1;
3669 args.mod = 0;
3670 } else {
3671 args.prod = PAGE_SIZE >> mp->m_sb.sb_blocklog;
3672 if ((args.mod = (xfs_extlen_t)(do_mod(ap->offset, args.prod))))
3673 args.mod = (xfs_extlen_t)(args.prod - args.mod);
3676 * If we are not low on available data blocks, and the
3677 * underlying logical volume manager is a stripe, and
3678 * the file offset is zero then try to allocate data
3679 * blocks on stripe unit boundary.
3680 * NOTE: ap->aeof is only set if the allocation length
3681 * is >= the stripe unit and the allocation offset is
3682 * at the end of file.
3684 if (!ap->dfops->dop_low && ap->aeof) {
3685 if (!ap->offset) {
3686 args.alignment = stripe_align;
3687 atype = args.type;
3688 isaligned = 1;
3690 * Adjust for alignment
3692 if (blen > args.alignment && blen <= args.maxlen)
3693 args.minlen = blen - args.alignment;
3694 args.minalignslop = 0;
3695 } else {
3697 * First try an exact bno allocation.
3698 * If it fails then do a near or start bno
3699 * allocation with alignment turned on.
3701 atype = args.type;
3702 tryagain = 1;
3703 args.type = XFS_ALLOCTYPE_THIS_BNO;
3704 args.alignment = 1;
3706 * Compute the minlen+alignment for the
3707 * next case. Set slop so that the value
3708 * of minlen+alignment+slop doesn't go up
3709 * between the calls.
3711 if (blen > stripe_align && blen <= args.maxlen)
3712 nextminlen = blen - stripe_align;
3713 else
3714 nextminlen = args.minlen;
3715 if (nextminlen + stripe_align > args.minlen + 1)
3716 args.minalignslop =
3717 nextminlen + stripe_align -
3718 args.minlen - 1;
3719 else
3720 args.minalignslop = 0;
3722 } else {
3723 args.alignment = 1;
3724 args.minalignslop = 0;
3726 args.minleft = ap->minleft;
3727 args.wasdel = ap->wasdel;
3728 args.resv = XFS_AG_RESV_NONE;
3729 args.datatype = ap->datatype;
3730 if (ap->datatype & XFS_ALLOC_USERDATA_ZERO)
3731 args.ip = ap->ip;
3733 error = xfs_alloc_vextent(&args);
3734 if (error)
3735 return error;
3737 if (tryagain && args.fsbno == NULLFSBLOCK) {
3739 * Exact allocation failed. Now try with alignment
3740 * turned on.
3742 args.type = atype;
3743 args.fsbno = ap->blkno;
3744 args.alignment = stripe_align;
3745 args.minlen = nextminlen;
3746 args.minalignslop = 0;
3747 isaligned = 1;
3748 if ((error = xfs_alloc_vextent(&args)))
3749 return error;
3751 if (isaligned && args.fsbno == NULLFSBLOCK) {
3753 * allocation failed, so turn off alignment and
3754 * try again.
3756 args.type = atype;
3757 args.fsbno = ap->blkno;
3758 args.alignment = 0;
3759 if ((error = xfs_alloc_vextent(&args)))
3760 return error;
3762 if (args.fsbno == NULLFSBLOCK && nullfb &&
3763 args.minlen > ap->minlen) {
3764 args.minlen = ap->minlen;
3765 args.type = XFS_ALLOCTYPE_START_BNO;
3766 args.fsbno = ap->blkno;
3767 if ((error = xfs_alloc_vextent(&args)))
3768 return error;
3770 if (args.fsbno == NULLFSBLOCK && nullfb) {
3771 args.fsbno = 0;
3772 args.type = XFS_ALLOCTYPE_FIRST_AG;
3773 args.total = ap->minlen;
3774 if ((error = xfs_alloc_vextent(&args)))
3775 return error;
3776 ap->dfops->dop_low = true;
3778 if (args.fsbno != NULLFSBLOCK) {
3780 * check the allocation happened at the same or higher AG than
3781 * the first block that was allocated.
3783 ASSERT(*ap->firstblock == NULLFSBLOCK ||
3784 XFS_FSB_TO_AGNO(mp, *ap->firstblock) <=
3785 XFS_FSB_TO_AGNO(mp, args.fsbno));
3787 ap->blkno = args.fsbno;
3788 if (*ap->firstblock == NULLFSBLOCK)
3789 *ap->firstblock = args.fsbno;
3790 ASSERT(nullfb || fb_agno <= args.agno);
3791 ap->length = args.len;
3792 if (!(ap->flags & XFS_BMAPI_COWFORK))
3793 ap->ip->i_d.di_nblocks += args.len;
3794 xfs_trans_log_inode(ap->tp, ap->ip, XFS_ILOG_CORE);
3795 if (ap->wasdel)
3796 ap->ip->i_delayed_blks -= args.len;
3798 * Adjust the disk quota also. This was reserved
3799 * earlier.
3801 xfs_trans_mod_dquot_byino(ap->tp, ap->ip,
3802 ap->wasdel ? XFS_TRANS_DQ_DELBCOUNT :
3803 XFS_TRANS_DQ_BCOUNT,
3804 (long) args.len);
3805 } else {
3806 ap->blkno = NULLFSBLOCK;
3807 ap->length = 0;
3809 return 0;
3813 * xfs_bmap_alloc is called by xfs_bmapi to allocate an extent for a file.
3814 * It figures out where to ask the underlying allocator to put the new extent.
3816 STATIC int
3817 xfs_bmap_alloc(
3818 struct xfs_bmalloca *ap) /* bmap alloc argument struct */
3820 if (XFS_IS_REALTIME_INODE(ap->ip) &&
3821 xfs_alloc_is_userdata(ap->datatype))
3822 return xfs_bmap_rtalloc(ap);
3823 return xfs_bmap_btalloc(ap);
3826 /* Trim extent to fit a logical block range. */
3827 void
3828 xfs_trim_extent(
3829 struct xfs_bmbt_irec *irec,
3830 xfs_fileoff_t bno,
3831 xfs_filblks_t len)
3833 xfs_fileoff_t distance;
3834 xfs_fileoff_t end = bno + len;
3836 if (irec->br_startoff + irec->br_blockcount <= bno ||
3837 irec->br_startoff >= end) {
3838 irec->br_blockcount = 0;
3839 return;
3842 if (irec->br_startoff < bno) {
3843 distance = bno - irec->br_startoff;
3844 if (isnullstartblock(irec->br_startblock))
3845 irec->br_startblock = DELAYSTARTBLOCK;
3846 if (irec->br_startblock != DELAYSTARTBLOCK &&
3847 irec->br_startblock != HOLESTARTBLOCK)
3848 irec->br_startblock += distance;
3849 irec->br_startoff += distance;
3850 irec->br_blockcount -= distance;
3853 if (end < irec->br_startoff + irec->br_blockcount) {
3854 distance = irec->br_startoff + irec->br_blockcount - end;
3855 irec->br_blockcount -= distance;
3859 /* trim extent to within eof */
3860 void
3861 xfs_trim_extent_eof(
3862 struct xfs_bmbt_irec *irec,
3863 struct xfs_inode *ip)
3866 xfs_trim_extent(irec, 0, XFS_B_TO_FSB(ip->i_mount,
3867 i_size_read(VFS_I(ip))));
3871 * Trim the returned map to the required bounds
3873 STATIC void
3874 xfs_bmapi_trim_map(
3875 struct xfs_bmbt_irec *mval,
3876 struct xfs_bmbt_irec *got,
3877 xfs_fileoff_t *bno,
3878 xfs_filblks_t len,
3879 xfs_fileoff_t obno,
3880 xfs_fileoff_t end,
3881 int n,
3882 int flags)
3884 if ((flags & XFS_BMAPI_ENTIRE) ||
3885 got->br_startoff + got->br_blockcount <= obno) {
3886 *mval = *got;
3887 if (isnullstartblock(got->br_startblock))
3888 mval->br_startblock = DELAYSTARTBLOCK;
3889 return;
3892 if (obno > *bno)
3893 *bno = obno;
3894 ASSERT((*bno >= obno) || (n == 0));
3895 ASSERT(*bno < end);
3896 mval->br_startoff = *bno;
3897 if (isnullstartblock(got->br_startblock))
3898 mval->br_startblock = DELAYSTARTBLOCK;
3899 else
3900 mval->br_startblock = got->br_startblock +
3901 (*bno - got->br_startoff);
3903 * Return the minimum of what we got and what we asked for for
3904 * the length. We can use the len variable here because it is
3905 * modified below and we could have been there before coming
3906 * here if the first part of the allocation didn't overlap what
3907 * was asked for.
3909 mval->br_blockcount = XFS_FILBLKS_MIN(end - *bno,
3910 got->br_blockcount - (*bno - got->br_startoff));
3911 mval->br_state = got->br_state;
3912 ASSERT(mval->br_blockcount <= len);
3913 return;
3917 * Update and validate the extent map to return
3919 STATIC void
3920 xfs_bmapi_update_map(
3921 struct xfs_bmbt_irec **map,
3922 xfs_fileoff_t *bno,
3923 xfs_filblks_t *len,
3924 xfs_fileoff_t obno,
3925 xfs_fileoff_t end,
3926 int *n,
3927 int flags)
3929 xfs_bmbt_irec_t *mval = *map;
3931 ASSERT((flags & XFS_BMAPI_ENTIRE) ||
3932 ((mval->br_startoff + mval->br_blockcount) <= end));
3933 ASSERT((flags & XFS_BMAPI_ENTIRE) || (mval->br_blockcount <= *len) ||
3934 (mval->br_startoff < obno));
3936 *bno = mval->br_startoff + mval->br_blockcount;
3937 *len = end - *bno;
3938 if (*n > 0 && mval->br_startoff == mval[-1].br_startoff) {
3939 /* update previous map with new information */
3940 ASSERT(mval->br_startblock == mval[-1].br_startblock);
3941 ASSERT(mval->br_blockcount > mval[-1].br_blockcount);
3942 ASSERT(mval->br_state == mval[-1].br_state);
3943 mval[-1].br_blockcount = mval->br_blockcount;
3944 mval[-1].br_state = mval->br_state;
3945 } else if (*n > 0 && mval->br_startblock != DELAYSTARTBLOCK &&
3946 mval[-1].br_startblock != DELAYSTARTBLOCK &&
3947 mval[-1].br_startblock != HOLESTARTBLOCK &&
3948 mval->br_startblock == mval[-1].br_startblock +
3949 mval[-1].br_blockcount &&
3950 ((flags & XFS_BMAPI_IGSTATE) ||
3951 mval[-1].br_state == mval->br_state)) {
3952 ASSERT(mval->br_startoff ==
3953 mval[-1].br_startoff + mval[-1].br_blockcount);
3954 mval[-1].br_blockcount += mval->br_blockcount;
3955 } else if (*n > 0 &&
3956 mval->br_startblock == DELAYSTARTBLOCK &&
3957 mval[-1].br_startblock == DELAYSTARTBLOCK &&
3958 mval->br_startoff ==
3959 mval[-1].br_startoff + mval[-1].br_blockcount) {
3960 mval[-1].br_blockcount += mval->br_blockcount;
3961 mval[-1].br_state = mval->br_state;
3962 } else if (!((*n == 0) &&
3963 ((mval->br_startoff + mval->br_blockcount) <=
3964 obno))) {
3965 mval++;
3966 (*n)++;
3968 *map = mval;
3972 * Map file blocks to filesystem blocks without allocation.
3975 xfs_bmapi_read(
3976 struct xfs_inode *ip,
3977 xfs_fileoff_t bno,
3978 xfs_filblks_t len,
3979 struct xfs_bmbt_irec *mval,
3980 int *nmap,
3981 int flags)
3983 struct xfs_mount *mp = ip->i_mount;
3984 struct xfs_ifork *ifp;
3985 struct xfs_bmbt_irec got;
3986 xfs_fileoff_t obno;
3987 xfs_fileoff_t end;
3988 xfs_extnum_t idx;
3989 int error;
3990 bool eof = false;
3991 int n = 0;
3992 int whichfork = xfs_bmapi_whichfork(flags);
3994 ASSERT(*nmap >= 1);
3995 ASSERT(!(flags & ~(XFS_BMAPI_ATTRFORK|XFS_BMAPI_ENTIRE|
3996 XFS_BMAPI_IGSTATE|XFS_BMAPI_COWFORK)));
3997 ASSERT(xfs_isilocked(ip, XFS_ILOCK_SHARED|XFS_ILOCK_EXCL));
3999 if (unlikely(XFS_TEST_ERROR(
4000 (XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_EXTENTS &&
4001 XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_BTREE),
4002 mp, XFS_ERRTAG_BMAPIFORMAT))) {
4003 XFS_ERROR_REPORT("xfs_bmapi_read", XFS_ERRLEVEL_LOW, mp);
4004 return -EFSCORRUPTED;
4007 if (XFS_FORCED_SHUTDOWN(mp))
4008 return -EIO;
4010 XFS_STATS_INC(mp, xs_blk_mapr);
4012 ifp = XFS_IFORK_PTR(ip, whichfork);
4013 if (!ifp) {
4014 /* No CoW fork? Return a hole. */
4015 if (whichfork == XFS_COW_FORK) {
4016 mval->br_startoff = bno;
4017 mval->br_startblock = HOLESTARTBLOCK;
4018 mval->br_blockcount = len;
4019 mval->br_state = XFS_EXT_NORM;
4020 *nmap = 1;
4021 return 0;
4025 * A missing attr ifork implies that the inode says we're in
4026 * extents or btree format but failed to pass the inode fork
4027 * verifier while trying to load it. Treat that as a file
4028 * corruption too.
4030 #ifdef DEBUG
4031 xfs_alert(mp, "%s: inode %llu missing fork %d",
4032 __func__, ip->i_ino, whichfork);
4033 #endif /* DEBUG */
4034 return -EFSCORRUPTED;
4037 if (!(ifp->if_flags & XFS_IFEXTENTS)) {
4038 error = xfs_iread_extents(NULL, ip, whichfork);
4039 if (error)
4040 return error;
4043 if (!xfs_iext_lookup_extent(ip, ifp, bno, &idx, &got))
4044 eof = true;
4045 end = bno + len;
4046 obno = bno;
4048 while (bno < end && n < *nmap) {
4049 /* Reading past eof, act as though there's a hole up to end. */
4050 if (eof)
4051 got.br_startoff = end;
4052 if (got.br_startoff > bno) {
4053 /* Reading in a hole. */
4054 mval->br_startoff = bno;
4055 mval->br_startblock = HOLESTARTBLOCK;
4056 mval->br_blockcount =
4057 XFS_FILBLKS_MIN(len, got.br_startoff - bno);
4058 mval->br_state = XFS_EXT_NORM;
4059 bno += mval->br_blockcount;
4060 len -= mval->br_blockcount;
4061 mval++;
4062 n++;
4063 continue;
4066 /* set up the extent map to return. */
4067 xfs_bmapi_trim_map(mval, &got, &bno, len, obno, end, n, flags);
4068 xfs_bmapi_update_map(&mval, &bno, &len, obno, end, &n, flags);
4070 /* If we're done, stop now. */
4071 if (bno >= end || n >= *nmap)
4072 break;
4074 /* Else go on to the next record. */
4075 if (!xfs_iext_get_extent(ifp, ++idx, &got))
4076 eof = true;
4078 *nmap = n;
4079 return 0;
4083 * Add a delayed allocation extent to an inode. Blocks are reserved from the
4084 * global pool and the extent inserted into the inode in-core extent tree.
4086 * On entry, got refers to the first extent beyond the offset of the extent to
4087 * allocate or eof is specified if no such extent exists. On return, got refers
4088 * to the extent record that was inserted to the inode fork.
4090 * Note that the allocated extent may have been merged with contiguous extents
4091 * during insertion into the inode fork. Thus, got does not reflect the current
4092 * state of the inode fork on return. If necessary, the caller can use lastx to
4093 * look up the updated record in the inode fork.
4096 xfs_bmapi_reserve_delalloc(
4097 struct xfs_inode *ip,
4098 int whichfork,
4099 xfs_fileoff_t off,
4100 xfs_filblks_t len,
4101 xfs_filblks_t prealloc,
4102 struct xfs_bmbt_irec *got,
4103 xfs_extnum_t *lastx,
4104 int eof)
4106 struct xfs_mount *mp = ip->i_mount;
4107 struct xfs_ifork *ifp = XFS_IFORK_PTR(ip, whichfork);
4108 xfs_extlen_t alen;
4109 xfs_extlen_t indlen;
4110 char rt = XFS_IS_REALTIME_INODE(ip);
4111 xfs_extlen_t extsz;
4112 int error;
4113 xfs_fileoff_t aoff = off;
4116 * Cap the alloc length. Keep track of prealloc so we know whether to
4117 * tag the inode before we return.
4119 alen = XFS_FILBLKS_MIN(len + prealloc, MAXEXTLEN);
4120 if (!eof)
4121 alen = XFS_FILBLKS_MIN(alen, got->br_startoff - aoff);
4122 if (prealloc && alen >= len)
4123 prealloc = alen - len;
4125 /* Figure out the extent size, adjust alen */
4126 if (whichfork == XFS_COW_FORK)
4127 extsz = xfs_get_cowextsz_hint(ip);
4128 else
4129 extsz = xfs_get_extsz_hint(ip);
4130 if (extsz) {
4131 struct xfs_bmbt_irec prev;
4133 if (!xfs_iext_get_extent(ifp, *lastx - 1, &prev))
4134 prev.br_startoff = NULLFILEOFF;
4136 error = xfs_bmap_extsize_align(mp, got, &prev, extsz, rt, eof,
4137 1, 0, &aoff, &alen);
4138 ASSERT(!error);
4141 if (rt)
4142 extsz = alen / mp->m_sb.sb_rextsize;
4145 * Make a transaction-less quota reservation for delayed allocation
4146 * blocks. This number gets adjusted later. We return if we haven't
4147 * allocated blocks already inside this loop.
4149 error = xfs_trans_reserve_quota_nblks(NULL, ip, (long)alen, 0,
4150 rt ? XFS_QMOPT_RES_RTBLKS : XFS_QMOPT_RES_REGBLKS);
4151 if (error)
4152 return error;
4155 * Split changing sb for alen and indlen since they could be coming
4156 * from different places.
4158 indlen = (xfs_extlen_t)xfs_bmap_worst_indlen(ip, alen);
4159 ASSERT(indlen > 0);
4161 if (rt) {
4162 error = xfs_mod_frextents(mp, -((int64_t)extsz));
4163 } else {
4164 error = xfs_mod_fdblocks(mp, -((int64_t)alen), false);
4167 if (error)
4168 goto out_unreserve_quota;
4170 error = xfs_mod_fdblocks(mp, -((int64_t)indlen), false);
4171 if (error)
4172 goto out_unreserve_blocks;
4175 ip->i_delayed_blks += alen;
4177 got->br_startoff = aoff;
4178 got->br_startblock = nullstartblock(indlen);
4179 got->br_blockcount = alen;
4180 got->br_state = XFS_EXT_NORM;
4182 xfs_bmap_add_extent_hole_delay(ip, whichfork, lastx, got);
4185 * Tag the inode if blocks were preallocated. Note that COW fork
4186 * preallocation can occur at the start or end of the extent, even when
4187 * prealloc == 0, so we must also check the aligned offset and length.
4189 if (whichfork == XFS_DATA_FORK && prealloc)
4190 xfs_inode_set_eofblocks_tag(ip);
4191 if (whichfork == XFS_COW_FORK && (prealloc || aoff < off || alen > len))
4192 xfs_inode_set_cowblocks_tag(ip);
4194 return 0;
4196 out_unreserve_blocks:
4197 if (rt)
4198 xfs_mod_frextents(mp, extsz);
4199 else
4200 xfs_mod_fdblocks(mp, alen, false);
4201 out_unreserve_quota:
4202 if (XFS_IS_QUOTA_ON(mp))
4203 xfs_trans_unreserve_quota_nblks(NULL, ip, (long)alen, 0, rt ?
4204 XFS_QMOPT_RES_RTBLKS : XFS_QMOPT_RES_REGBLKS);
4205 return error;
4208 static int
4209 xfs_bmapi_allocate(
4210 struct xfs_bmalloca *bma)
4212 struct xfs_mount *mp = bma->ip->i_mount;
4213 int whichfork = xfs_bmapi_whichfork(bma->flags);
4214 struct xfs_ifork *ifp = XFS_IFORK_PTR(bma->ip, whichfork);
4215 int tmp_logflags = 0;
4216 int error;
4218 ASSERT(bma->length > 0);
4221 * For the wasdelay case, we could also just allocate the stuff asked
4222 * for in this bmap call but that wouldn't be as good.
4224 if (bma->wasdel) {
4225 bma->length = (xfs_extlen_t)bma->got.br_blockcount;
4226 bma->offset = bma->got.br_startoff;
4227 if (bma->idx) {
4228 xfs_bmbt_get_all(xfs_iext_get_ext(ifp, bma->idx - 1),
4229 &bma->prev);
4231 } else {
4232 bma->length = XFS_FILBLKS_MIN(bma->length, MAXEXTLEN);
4233 if (!bma->eof)
4234 bma->length = XFS_FILBLKS_MIN(bma->length,
4235 bma->got.br_startoff - bma->offset);
4239 * Set the data type being allocated. For the data fork, the first data
4240 * in the file is treated differently to all other allocations. For the
4241 * attribute fork, we only need to ensure the allocated range is not on
4242 * the busy list.
4244 if (!(bma->flags & XFS_BMAPI_METADATA)) {
4245 bma->datatype = XFS_ALLOC_NOBUSY;
4246 if (whichfork == XFS_DATA_FORK) {
4247 if (bma->offset == 0)
4248 bma->datatype |= XFS_ALLOC_INITIAL_USER_DATA;
4249 else
4250 bma->datatype |= XFS_ALLOC_USERDATA;
4252 if (bma->flags & XFS_BMAPI_ZERO)
4253 bma->datatype |= XFS_ALLOC_USERDATA_ZERO;
4256 bma->minlen = (bma->flags & XFS_BMAPI_CONTIG) ? bma->length : 1;
4259 * Only want to do the alignment at the eof if it is userdata and
4260 * allocation length is larger than a stripe unit.
4262 if (mp->m_dalign && bma->length >= mp->m_dalign &&
4263 !(bma->flags & XFS_BMAPI_METADATA) && whichfork == XFS_DATA_FORK) {
4264 error = xfs_bmap_isaeof(bma, whichfork);
4265 if (error)
4266 return error;
4269 error = xfs_bmap_alloc(bma);
4270 if (error)
4271 return error;
4273 if (bma->cur)
4274 bma->cur->bc_private.b.firstblock = *bma->firstblock;
4275 if (bma->blkno == NULLFSBLOCK)
4276 return 0;
4277 if ((ifp->if_flags & XFS_IFBROOT) && !bma->cur) {
4278 bma->cur = xfs_bmbt_init_cursor(mp, bma->tp, bma->ip, whichfork);
4279 bma->cur->bc_private.b.firstblock = *bma->firstblock;
4280 bma->cur->bc_private.b.dfops = bma->dfops;
4283 * Bump the number of extents we've allocated
4284 * in this call.
4286 bma->nallocs++;
4288 if (bma->cur)
4289 bma->cur->bc_private.b.flags =
4290 bma->wasdel ? XFS_BTCUR_BPRV_WASDEL : 0;
4292 bma->got.br_startoff = bma->offset;
4293 bma->got.br_startblock = bma->blkno;
4294 bma->got.br_blockcount = bma->length;
4295 bma->got.br_state = XFS_EXT_NORM;
4298 * In the data fork, a wasdelay extent has been initialized, so
4299 * shouldn't be flagged as unwritten.
4301 * For the cow fork, however, we convert delalloc reservations
4302 * (extents allocated for speculative preallocation) to
4303 * allocated unwritten extents, and only convert the unwritten
4304 * extents to real extents when we're about to write the data.
4306 if ((!bma->wasdel || (bma->flags & XFS_BMAPI_COWFORK)) &&
4307 (bma->flags & XFS_BMAPI_PREALLOC) &&
4308 xfs_sb_version_hasextflgbit(&mp->m_sb))
4309 bma->got.br_state = XFS_EXT_UNWRITTEN;
4311 if (bma->wasdel)
4312 error = xfs_bmap_add_extent_delay_real(bma, whichfork);
4313 else
4314 error = xfs_bmap_add_extent_hole_real(bma->tp, bma->ip,
4315 whichfork, &bma->idx, &bma->cur, &bma->got,
4316 bma->firstblock, bma->dfops, &bma->logflags);
4318 bma->logflags |= tmp_logflags;
4319 if (error)
4320 return error;
4323 * Update our extent pointer, given that xfs_bmap_add_extent_delay_real
4324 * or xfs_bmap_add_extent_hole_real might have merged it into one of
4325 * the neighbouring ones.
4327 xfs_bmbt_get_all(xfs_iext_get_ext(ifp, bma->idx), &bma->got);
4329 ASSERT(bma->got.br_startoff <= bma->offset);
4330 ASSERT(bma->got.br_startoff + bma->got.br_blockcount >=
4331 bma->offset + bma->length);
4332 ASSERT(bma->got.br_state == XFS_EXT_NORM ||
4333 bma->got.br_state == XFS_EXT_UNWRITTEN);
4334 return 0;
4337 STATIC int
4338 xfs_bmapi_convert_unwritten(
4339 struct xfs_bmalloca *bma,
4340 struct xfs_bmbt_irec *mval,
4341 xfs_filblks_t len,
4342 int flags)
4344 int whichfork = xfs_bmapi_whichfork(flags);
4345 struct xfs_ifork *ifp = XFS_IFORK_PTR(bma->ip, whichfork);
4346 int tmp_logflags = 0;
4347 int error;
4349 /* check if we need to do unwritten->real conversion */
4350 if (mval->br_state == XFS_EXT_UNWRITTEN &&
4351 (flags & XFS_BMAPI_PREALLOC))
4352 return 0;
4354 /* check if we need to do real->unwritten conversion */
4355 if (mval->br_state == XFS_EXT_NORM &&
4356 (flags & (XFS_BMAPI_PREALLOC | XFS_BMAPI_CONVERT)) !=
4357 (XFS_BMAPI_PREALLOC | XFS_BMAPI_CONVERT))
4358 return 0;
4361 * Modify (by adding) the state flag, if writing.
4363 ASSERT(mval->br_blockcount <= len);
4364 if ((ifp->if_flags & XFS_IFBROOT) && !bma->cur) {
4365 bma->cur = xfs_bmbt_init_cursor(bma->ip->i_mount, bma->tp,
4366 bma->ip, whichfork);
4367 bma->cur->bc_private.b.firstblock = *bma->firstblock;
4368 bma->cur->bc_private.b.dfops = bma->dfops;
4370 mval->br_state = (mval->br_state == XFS_EXT_UNWRITTEN)
4371 ? XFS_EXT_NORM : XFS_EXT_UNWRITTEN;
4374 * Before insertion into the bmbt, zero the range being converted
4375 * if required.
4377 if (flags & XFS_BMAPI_ZERO) {
4378 error = xfs_zero_extent(bma->ip, mval->br_startblock,
4379 mval->br_blockcount);
4380 if (error)
4381 return error;
4384 error = xfs_bmap_add_extent_unwritten_real(bma->tp, bma->ip, whichfork,
4385 &bma->idx, &bma->cur, mval, bma->firstblock, bma->dfops,
4386 &tmp_logflags);
4388 * Log the inode core unconditionally in the unwritten extent conversion
4389 * path because the conversion might not have done so (e.g., if the
4390 * extent count hasn't changed). We need to make sure the inode is dirty
4391 * in the transaction for the sake of fsync(), even if nothing has
4392 * changed, because fsync() will not force the log for this transaction
4393 * unless it sees the inode pinned.
4395 * Note: If we're only converting cow fork extents, there aren't
4396 * any on-disk updates to make, so we don't need to log anything.
4398 if (whichfork != XFS_COW_FORK)
4399 bma->logflags |= tmp_logflags | XFS_ILOG_CORE;
4400 if (error)
4401 return error;
4404 * Update our extent pointer, given that
4405 * xfs_bmap_add_extent_unwritten_real might have merged it into one
4406 * of the neighbouring ones.
4408 xfs_bmbt_get_all(xfs_iext_get_ext(ifp, bma->idx), &bma->got);
4411 * We may have combined previously unwritten space with written space,
4412 * so generate another request.
4414 if (mval->br_blockcount < len)
4415 return -EAGAIN;
4416 return 0;
4420 * Map file blocks to filesystem blocks, and allocate blocks or convert the
4421 * extent state if necessary. Details behaviour is controlled by the flags
4422 * parameter. Only allocates blocks from a single allocation group, to avoid
4423 * locking problems.
4425 * The returned value in "firstblock" from the first call in a transaction
4426 * must be remembered and presented to subsequent calls in "firstblock".
4427 * An upper bound for the number of blocks to be allocated is supplied to
4428 * the first call in "total"; if no allocation group has that many free
4429 * blocks then the call will fail (return NULLFSBLOCK in "firstblock").
4432 xfs_bmapi_write(
4433 struct xfs_trans *tp, /* transaction pointer */
4434 struct xfs_inode *ip, /* incore inode */
4435 xfs_fileoff_t bno, /* starting file offs. mapped */
4436 xfs_filblks_t len, /* length to map in file */
4437 int flags, /* XFS_BMAPI_... */
4438 xfs_fsblock_t *firstblock, /* first allocated block
4439 controls a.g. for allocs */
4440 xfs_extlen_t total, /* total blocks needed */
4441 struct xfs_bmbt_irec *mval, /* output: map values */
4442 int *nmap, /* i/o: mval size/count */
4443 struct xfs_defer_ops *dfops) /* i/o: list extents to free */
4445 struct xfs_mount *mp = ip->i_mount;
4446 struct xfs_ifork *ifp;
4447 struct xfs_bmalloca bma = { NULL }; /* args for xfs_bmap_alloc */
4448 xfs_fileoff_t end; /* end of mapped file region */
4449 bool eof = false; /* after the end of extents */
4450 int error; /* error return */
4451 int n; /* current extent index */
4452 xfs_fileoff_t obno; /* old block number (offset) */
4453 int whichfork; /* data or attr fork */
4455 #ifdef DEBUG
4456 xfs_fileoff_t orig_bno; /* original block number value */
4457 int orig_flags; /* original flags arg value */
4458 xfs_filblks_t orig_len; /* original value of len arg */
4459 struct xfs_bmbt_irec *orig_mval; /* original value of mval */
4460 int orig_nmap; /* original value of *nmap */
4462 orig_bno = bno;
4463 orig_len = len;
4464 orig_flags = flags;
4465 orig_mval = mval;
4466 orig_nmap = *nmap;
4467 #endif
4468 whichfork = xfs_bmapi_whichfork(flags);
4470 ASSERT(*nmap >= 1);
4471 ASSERT(*nmap <= XFS_BMAP_MAX_NMAP);
4472 ASSERT(!(flags & XFS_BMAPI_IGSTATE));
4473 ASSERT(tp != NULL ||
4474 (flags & (XFS_BMAPI_CONVERT | XFS_BMAPI_COWFORK)) ==
4475 (XFS_BMAPI_CONVERT | XFS_BMAPI_COWFORK));
4476 ASSERT(len > 0);
4477 ASSERT(XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_LOCAL);
4478 ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
4479 ASSERT(!(flags & XFS_BMAPI_REMAP));
4481 /* zeroing is for currently only for data extents, not metadata */
4482 ASSERT((flags & (XFS_BMAPI_METADATA | XFS_BMAPI_ZERO)) !=
4483 (XFS_BMAPI_METADATA | XFS_BMAPI_ZERO));
4485 * we can allocate unwritten extents or pre-zero allocated blocks,
4486 * but it makes no sense to do both at once. This would result in
4487 * zeroing the unwritten extent twice, but it still being an
4488 * unwritten extent....
4490 ASSERT((flags & (XFS_BMAPI_PREALLOC | XFS_BMAPI_ZERO)) !=
4491 (XFS_BMAPI_PREALLOC | XFS_BMAPI_ZERO));
4493 if (unlikely(XFS_TEST_ERROR(
4494 (XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_EXTENTS &&
4495 XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_BTREE),
4496 mp, XFS_ERRTAG_BMAPIFORMAT))) {
4497 XFS_ERROR_REPORT("xfs_bmapi_write", XFS_ERRLEVEL_LOW, mp);
4498 return -EFSCORRUPTED;
4501 if (XFS_FORCED_SHUTDOWN(mp))
4502 return -EIO;
4504 ifp = XFS_IFORK_PTR(ip, whichfork);
4506 XFS_STATS_INC(mp, xs_blk_mapw);
4508 if (*firstblock == NULLFSBLOCK) {
4509 if (XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_BTREE)
4510 bma.minleft = be16_to_cpu(ifp->if_broot->bb_level) + 1;
4511 else
4512 bma.minleft = 1;
4513 } else {
4514 bma.minleft = 0;
4517 if (!(ifp->if_flags & XFS_IFEXTENTS)) {
4518 error = xfs_iread_extents(tp, ip, whichfork);
4519 if (error)
4520 goto error0;
4523 n = 0;
4524 end = bno + len;
4525 obno = bno;
4527 if (!xfs_iext_lookup_extent(ip, ifp, bno, &bma.idx, &bma.got))
4528 eof = true;
4529 if (!xfs_iext_get_extent(ifp, bma.idx - 1, &bma.prev))
4530 bma.prev.br_startoff = NULLFILEOFF;
4531 bma.tp = tp;
4532 bma.ip = ip;
4533 bma.total = total;
4534 bma.datatype = 0;
4535 bma.dfops = dfops;
4536 bma.firstblock = firstblock;
4538 while (bno < end && n < *nmap) {
4539 bool need_alloc = false, wasdelay = false;
4541 /* in hole or beyoned EOF? */
4542 if (eof || bma.got.br_startoff > bno) {
4543 if (flags & XFS_BMAPI_DELALLOC) {
4545 * For the COW fork we can reasonably get a
4546 * request for converting an extent that races
4547 * with other threads already having converted
4548 * part of it, as there converting COW to
4549 * regular blocks is not protected using the
4550 * IOLOCK.
4552 ASSERT(flags & XFS_BMAPI_COWFORK);
4553 if (!(flags & XFS_BMAPI_COWFORK)) {
4554 error = -EIO;
4555 goto error0;
4558 if (eof || bno >= end)
4559 break;
4560 } else {
4561 need_alloc = true;
4563 } else if (isnullstartblock(bma.got.br_startblock)) {
4564 wasdelay = true;
4568 * First, deal with the hole before the allocated space
4569 * that we found, if any.
4571 if (need_alloc || wasdelay) {
4572 bma.eof = eof;
4573 bma.conv = !!(flags & XFS_BMAPI_CONVERT);
4574 bma.wasdel = wasdelay;
4575 bma.offset = bno;
4576 bma.flags = flags;
4579 * There's a 32/64 bit type mismatch between the
4580 * allocation length request (which can be 64 bits in
4581 * length) and the bma length request, which is
4582 * xfs_extlen_t and therefore 32 bits. Hence we have to
4583 * check for 32-bit overflows and handle them here.
4585 if (len > (xfs_filblks_t)MAXEXTLEN)
4586 bma.length = MAXEXTLEN;
4587 else
4588 bma.length = len;
4590 ASSERT(len > 0);
4591 ASSERT(bma.length > 0);
4592 error = xfs_bmapi_allocate(&bma);
4593 if (error)
4594 goto error0;
4595 if (bma.blkno == NULLFSBLOCK)
4596 break;
4599 * If this is a CoW allocation, record the data in
4600 * the refcount btree for orphan recovery.
4602 if (whichfork == XFS_COW_FORK) {
4603 error = xfs_refcount_alloc_cow_extent(mp, dfops,
4604 bma.blkno, bma.length);
4605 if (error)
4606 goto error0;
4610 /* Deal with the allocated space we found. */
4611 xfs_bmapi_trim_map(mval, &bma.got, &bno, len, obno,
4612 end, n, flags);
4614 /* Execute unwritten extent conversion if necessary */
4615 error = xfs_bmapi_convert_unwritten(&bma, mval, len, flags);
4616 if (error == -EAGAIN)
4617 continue;
4618 if (error)
4619 goto error0;
4621 /* update the extent map to return */
4622 xfs_bmapi_update_map(&mval, &bno, &len, obno, end, &n, flags);
4625 * If we're done, stop now. Stop when we've allocated
4626 * XFS_BMAP_MAX_NMAP extents no matter what. Otherwise
4627 * the transaction may get too big.
4629 if (bno >= end || n >= *nmap || bma.nallocs >= *nmap)
4630 break;
4632 /* Else go on to the next record. */
4633 bma.prev = bma.got;
4634 if (!xfs_iext_get_extent(ifp, ++bma.idx, &bma.got))
4635 eof = true;
4637 *nmap = n;
4640 * Transform from btree to extents, give it cur.
4642 if (xfs_bmap_wants_extents(ip, whichfork)) {
4643 int tmp_logflags = 0;
4645 ASSERT(bma.cur);
4646 error = xfs_bmap_btree_to_extents(tp, ip, bma.cur,
4647 &tmp_logflags, whichfork);
4648 bma.logflags |= tmp_logflags;
4649 if (error)
4650 goto error0;
4653 ASSERT(XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_BTREE ||
4654 XFS_IFORK_NEXTENTS(ip, whichfork) >
4655 XFS_IFORK_MAXEXT(ip, whichfork));
4656 error = 0;
4657 error0:
4659 * Log everything. Do this after conversion, there's no point in
4660 * logging the extent records if we've converted to btree format.
4662 if ((bma.logflags & xfs_ilog_fext(whichfork)) &&
4663 XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_EXTENTS)
4664 bma.logflags &= ~xfs_ilog_fext(whichfork);
4665 else if ((bma.logflags & xfs_ilog_fbroot(whichfork)) &&
4666 XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_BTREE)
4667 bma.logflags &= ~xfs_ilog_fbroot(whichfork);
4669 * Log whatever the flags say, even if error. Otherwise we might miss
4670 * detecting a case where the data is changed, there's an error,
4671 * and it's not logged so we don't shutdown when we should.
4673 if (bma.logflags)
4674 xfs_trans_log_inode(tp, ip, bma.logflags);
4676 if (bma.cur) {
4677 if (!error) {
4678 ASSERT(*firstblock == NULLFSBLOCK ||
4679 XFS_FSB_TO_AGNO(mp, *firstblock) <=
4680 XFS_FSB_TO_AGNO(mp,
4681 bma.cur->bc_private.b.firstblock));
4682 *firstblock = bma.cur->bc_private.b.firstblock;
4684 xfs_btree_del_cursor(bma.cur,
4685 error ? XFS_BTREE_ERROR : XFS_BTREE_NOERROR);
4687 if (!error)
4688 xfs_bmap_validate_ret(orig_bno, orig_len, orig_flags, orig_mval,
4689 orig_nmap, *nmap);
4690 return error;
4693 static int
4694 xfs_bmapi_remap(
4695 struct xfs_trans *tp,
4696 struct xfs_inode *ip,
4697 xfs_fileoff_t bno,
4698 xfs_filblks_t len,
4699 xfs_fsblock_t startblock,
4700 struct xfs_defer_ops *dfops)
4702 struct xfs_mount *mp = ip->i_mount;
4703 struct xfs_ifork *ifp = XFS_IFORK_PTR(ip, XFS_DATA_FORK);
4704 struct xfs_btree_cur *cur = NULL;
4705 xfs_fsblock_t firstblock = NULLFSBLOCK;
4706 struct xfs_bmbt_irec got;
4707 xfs_extnum_t idx;
4708 int logflags = 0, error;
4710 ASSERT(len > 0);
4711 ASSERT(len <= (xfs_filblks_t)MAXEXTLEN);
4712 ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
4714 if (unlikely(XFS_TEST_ERROR(
4715 (XFS_IFORK_FORMAT(ip, XFS_DATA_FORK) != XFS_DINODE_FMT_EXTENTS &&
4716 XFS_IFORK_FORMAT(ip, XFS_DATA_FORK) != XFS_DINODE_FMT_BTREE),
4717 mp, XFS_ERRTAG_BMAPIFORMAT))) {
4718 XFS_ERROR_REPORT("xfs_bmapi_remap", XFS_ERRLEVEL_LOW, mp);
4719 return -EFSCORRUPTED;
4722 if (XFS_FORCED_SHUTDOWN(mp))
4723 return -EIO;
4725 if (!(ifp->if_flags & XFS_IFEXTENTS)) {
4726 error = xfs_iread_extents(NULL, ip, XFS_DATA_FORK);
4727 if (error)
4728 return error;
4731 if (xfs_iext_lookup_extent(ip, ifp, bno, &idx, &got)) {
4732 /* make sure we only reflink into a hole. */
4733 ASSERT(got.br_startoff > bno);
4734 ASSERT(got.br_startoff - bno >= len);
4737 ip->i_d.di_nblocks += len;
4738 xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
4740 if (ifp->if_flags & XFS_IFBROOT) {
4741 cur = xfs_bmbt_init_cursor(mp, tp, ip, XFS_DATA_FORK);
4742 cur->bc_private.b.firstblock = firstblock;
4743 cur->bc_private.b.dfops = dfops;
4744 cur->bc_private.b.flags = 0;
4747 got.br_startoff = bno;
4748 got.br_startblock = startblock;
4749 got.br_blockcount = len;
4750 got.br_state = XFS_EXT_NORM;
4752 error = xfs_bmap_add_extent_hole_real(tp, ip, XFS_DATA_FORK, &idx, &cur,
4753 &got, &firstblock, dfops, &logflags);
4754 if (error)
4755 goto error0;
4757 if (xfs_bmap_wants_extents(ip, XFS_DATA_FORK)) {
4758 int tmp_logflags = 0;
4760 error = xfs_bmap_btree_to_extents(tp, ip, cur,
4761 &tmp_logflags, XFS_DATA_FORK);
4762 logflags |= tmp_logflags;
4765 error0:
4766 if (ip->i_d.di_format != XFS_DINODE_FMT_EXTENTS)
4767 logflags &= ~XFS_ILOG_DEXT;
4768 else if (ip->i_d.di_format != XFS_DINODE_FMT_BTREE)
4769 logflags &= ~XFS_ILOG_DBROOT;
4771 if (logflags)
4772 xfs_trans_log_inode(tp, ip, logflags);
4773 if (cur) {
4774 xfs_btree_del_cursor(cur,
4775 error ? XFS_BTREE_ERROR : XFS_BTREE_NOERROR);
4777 return error;
4781 * When a delalloc extent is split (e.g., due to a hole punch), the original
4782 * indlen reservation must be shared across the two new extents that are left
4783 * behind.
4785 * Given the original reservation and the worst case indlen for the two new
4786 * extents (as calculated by xfs_bmap_worst_indlen()), split the original
4787 * reservation fairly across the two new extents. If necessary, steal available
4788 * blocks from a deleted extent to make up a reservation deficiency (e.g., if
4789 * ores == 1). The number of stolen blocks is returned. The availability and
4790 * subsequent accounting of stolen blocks is the responsibility of the caller.
4792 static xfs_filblks_t
4793 xfs_bmap_split_indlen(
4794 xfs_filblks_t ores, /* original res. */
4795 xfs_filblks_t *indlen1, /* ext1 worst indlen */
4796 xfs_filblks_t *indlen2, /* ext2 worst indlen */
4797 xfs_filblks_t avail) /* stealable blocks */
4799 xfs_filblks_t len1 = *indlen1;
4800 xfs_filblks_t len2 = *indlen2;
4801 xfs_filblks_t nres = len1 + len2; /* new total res. */
4802 xfs_filblks_t stolen = 0;
4803 xfs_filblks_t resfactor;
4806 * Steal as many blocks as we can to try and satisfy the worst case
4807 * indlen for both new extents.
4809 if (ores < nres && avail)
4810 stolen = XFS_FILBLKS_MIN(nres - ores, avail);
4811 ores += stolen;
4813 /* nothing else to do if we've satisfied the new reservation */
4814 if (ores >= nres)
4815 return stolen;
4818 * We can't meet the total required reservation for the two extents.
4819 * Calculate the percent of the overall shortage between both extents
4820 * and apply this percentage to each of the requested indlen values.
4821 * This distributes the shortage fairly and reduces the chances that one
4822 * of the two extents is left with nothing when extents are repeatedly
4823 * split.
4825 resfactor = (ores * 100);
4826 do_div(resfactor, nres);
4827 len1 *= resfactor;
4828 do_div(len1, 100);
4829 len2 *= resfactor;
4830 do_div(len2, 100);
4831 ASSERT(len1 + len2 <= ores);
4832 ASSERT(len1 < *indlen1 && len2 < *indlen2);
4835 * Hand out the remainder to each extent. If one of the two reservations
4836 * is zero, we want to make sure that one gets a block first. The loop
4837 * below starts with len1, so hand len2 a block right off the bat if it
4838 * is zero.
4840 ores -= (len1 + len2);
4841 ASSERT((*indlen1 - len1) + (*indlen2 - len2) >= ores);
4842 if (ores && !len2 && *indlen2) {
4843 len2++;
4844 ores--;
4846 while (ores) {
4847 if (len1 < *indlen1) {
4848 len1++;
4849 ores--;
4851 if (!ores)
4852 break;
4853 if (len2 < *indlen2) {
4854 len2++;
4855 ores--;
4859 *indlen1 = len1;
4860 *indlen2 = len2;
4862 return stolen;
4866 xfs_bmap_del_extent_delay(
4867 struct xfs_inode *ip,
4868 int whichfork,
4869 xfs_extnum_t *idx,
4870 struct xfs_bmbt_irec *got,
4871 struct xfs_bmbt_irec *del)
4873 struct xfs_mount *mp = ip->i_mount;
4874 struct xfs_ifork *ifp = XFS_IFORK_PTR(ip, whichfork);
4875 struct xfs_bmbt_irec new;
4876 int64_t da_old, da_new, da_diff = 0;
4877 xfs_fileoff_t del_endoff, got_endoff;
4878 xfs_filblks_t got_indlen, new_indlen, stolen;
4879 int error = 0, state = 0;
4880 bool isrt;
4882 XFS_STATS_INC(mp, xs_del_exlist);
4884 isrt = (whichfork == XFS_DATA_FORK) && XFS_IS_REALTIME_INODE(ip);
4885 del_endoff = del->br_startoff + del->br_blockcount;
4886 got_endoff = got->br_startoff + got->br_blockcount;
4887 da_old = startblockval(got->br_startblock);
4888 da_new = 0;
4890 ASSERT(*idx >= 0);
4891 ASSERT(*idx <= xfs_iext_count(ifp));
4892 ASSERT(del->br_blockcount > 0);
4893 ASSERT(got->br_startoff <= del->br_startoff);
4894 ASSERT(got_endoff >= del_endoff);
4896 if (isrt) {
4897 uint64_t rtexts = XFS_FSB_TO_B(mp, del->br_blockcount);
4899 do_div(rtexts, mp->m_sb.sb_rextsize);
4900 xfs_mod_frextents(mp, rtexts);
4904 * Update the inode delalloc counter now and wait to update the
4905 * sb counters as we might have to borrow some blocks for the
4906 * indirect block accounting.
4908 error = xfs_trans_reserve_quota_nblks(NULL, ip,
4909 -((long)del->br_blockcount), 0,
4910 isrt ? XFS_QMOPT_RES_RTBLKS : XFS_QMOPT_RES_REGBLKS);
4911 if (error)
4912 return error;
4913 ip->i_delayed_blks -= del->br_blockcount;
4915 if (whichfork == XFS_COW_FORK)
4916 state |= BMAP_COWFORK;
4918 if (got->br_startoff == del->br_startoff)
4919 state |= BMAP_LEFT_CONTIG;
4920 if (got_endoff == del_endoff)
4921 state |= BMAP_RIGHT_CONTIG;
4923 switch (state & (BMAP_LEFT_CONTIG | BMAP_RIGHT_CONTIG)) {
4924 case BMAP_LEFT_CONTIG | BMAP_RIGHT_CONTIG:
4926 * Matches the whole extent. Delete the entry.
4928 xfs_iext_remove(ip, *idx, 1, state);
4929 --*idx;
4930 break;
4931 case BMAP_LEFT_CONTIG:
4933 * Deleting the first part of the extent.
4935 trace_xfs_bmap_pre_update(ip, *idx, state, _THIS_IP_);
4936 got->br_startoff = del_endoff;
4937 got->br_blockcount -= del->br_blockcount;
4938 da_new = XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(ip,
4939 got->br_blockcount), da_old);
4940 got->br_startblock = nullstartblock((int)da_new);
4941 xfs_iext_update_extent(ifp, *idx, got);
4942 trace_xfs_bmap_post_update(ip, *idx, state, _THIS_IP_);
4943 break;
4944 case BMAP_RIGHT_CONTIG:
4946 * Deleting the last part of the extent.
4948 trace_xfs_bmap_pre_update(ip, *idx, state, _THIS_IP_);
4949 got->br_blockcount = got->br_blockcount - del->br_blockcount;
4950 da_new = XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(ip,
4951 got->br_blockcount), da_old);
4952 got->br_startblock = nullstartblock((int)da_new);
4953 xfs_iext_update_extent(ifp, *idx, got);
4954 trace_xfs_bmap_post_update(ip, *idx, state, _THIS_IP_);
4955 break;
4956 case 0:
4958 * Deleting the middle of the extent.
4960 * Distribute the original indlen reservation across the two new
4961 * extents. Steal blocks from the deleted extent if necessary.
4962 * Stealing blocks simply fudges the fdblocks accounting below.
4963 * Warn if either of the new indlen reservations is zero as this
4964 * can lead to delalloc problems.
4966 trace_xfs_bmap_pre_update(ip, *idx, state, _THIS_IP_);
4968 got->br_blockcount = del->br_startoff - got->br_startoff;
4969 got_indlen = xfs_bmap_worst_indlen(ip, got->br_blockcount);
4971 new.br_blockcount = got_endoff - del_endoff;
4972 new_indlen = xfs_bmap_worst_indlen(ip, new.br_blockcount);
4974 WARN_ON_ONCE(!got_indlen || !new_indlen);
4975 stolen = xfs_bmap_split_indlen(da_old, &got_indlen, &new_indlen,
4976 del->br_blockcount);
4978 got->br_startblock = nullstartblock((int)got_indlen);
4979 xfs_iext_update_extent(ifp, *idx, got);
4980 trace_xfs_bmap_post_update(ip, *idx, 0, _THIS_IP_);
4982 new.br_startoff = del_endoff;
4983 new.br_state = got->br_state;
4984 new.br_startblock = nullstartblock((int)new_indlen);
4986 ++*idx;
4987 xfs_iext_insert(ip, *idx, 1, &new, state);
4989 da_new = got_indlen + new_indlen - stolen;
4990 del->br_blockcount -= stolen;
4991 break;
4994 ASSERT(da_old >= da_new);
4995 da_diff = da_old - da_new;
4996 if (!isrt)
4997 da_diff += del->br_blockcount;
4998 if (da_diff)
4999 xfs_mod_fdblocks(mp, da_diff, false);
5000 return error;
5003 void
5004 xfs_bmap_del_extent_cow(
5005 struct xfs_inode *ip,
5006 xfs_extnum_t *idx,
5007 struct xfs_bmbt_irec *got,
5008 struct xfs_bmbt_irec *del)
5010 struct xfs_mount *mp = ip->i_mount;
5011 struct xfs_ifork *ifp = XFS_IFORK_PTR(ip, XFS_COW_FORK);
5012 struct xfs_bmbt_irec new;
5013 xfs_fileoff_t del_endoff, got_endoff;
5014 int state = BMAP_COWFORK;
5016 XFS_STATS_INC(mp, xs_del_exlist);
5018 del_endoff = del->br_startoff + del->br_blockcount;
5019 got_endoff = got->br_startoff + got->br_blockcount;
5021 ASSERT(*idx >= 0);
5022 ASSERT(*idx <= xfs_iext_count(ifp));
5023 ASSERT(del->br_blockcount > 0);
5024 ASSERT(got->br_startoff <= del->br_startoff);
5025 ASSERT(got_endoff >= del_endoff);
5026 ASSERT(!isnullstartblock(got->br_startblock));
5028 if (got->br_startoff == del->br_startoff)
5029 state |= BMAP_LEFT_CONTIG;
5030 if (got_endoff == del_endoff)
5031 state |= BMAP_RIGHT_CONTIG;
5033 switch (state & (BMAP_LEFT_CONTIG | BMAP_RIGHT_CONTIG)) {
5034 case BMAP_LEFT_CONTIG | BMAP_RIGHT_CONTIG:
5036 * Matches the whole extent. Delete the entry.
5038 xfs_iext_remove(ip, *idx, 1, state);
5039 --*idx;
5040 break;
5041 case BMAP_LEFT_CONTIG:
5043 * Deleting the first part of the extent.
5045 trace_xfs_bmap_pre_update(ip, *idx, state, _THIS_IP_);
5046 got->br_startoff = del_endoff;
5047 got->br_blockcount -= del->br_blockcount;
5048 got->br_startblock = del->br_startblock + del->br_blockcount;
5049 xfs_iext_update_extent(ifp, *idx, got);
5050 trace_xfs_bmap_post_update(ip, *idx, state, _THIS_IP_);
5051 break;
5052 case BMAP_RIGHT_CONTIG:
5054 * Deleting the last part of the extent.
5056 trace_xfs_bmap_pre_update(ip, *idx, state, _THIS_IP_);
5057 got->br_blockcount -= del->br_blockcount;
5058 xfs_iext_update_extent(ifp, *idx, got);
5059 trace_xfs_bmap_post_update(ip, *idx, state, _THIS_IP_);
5060 break;
5061 case 0:
5063 * Deleting the middle of the extent.
5065 trace_xfs_bmap_pre_update(ip, *idx, state, _THIS_IP_);
5066 got->br_blockcount = del->br_startoff - got->br_startoff;
5067 xfs_iext_update_extent(ifp, *idx, got);
5068 trace_xfs_bmap_post_update(ip, *idx, state, _THIS_IP_);
5070 new.br_startoff = del_endoff;
5071 new.br_blockcount = got_endoff - del_endoff;
5072 new.br_state = got->br_state;
5073 new.br_startblock = del->br_startblock + del->br_blockcount;
5075 ++*idx;
5076 xfs_iext_insert(ip, *idx, 1, &new, state);
5077 break;
5082 * Called by xfs_bmapi to update file extent records and the btree
5083 * after removing space (or undoing a delayed allocation).
5085 STATIC int /* error */
5086 xfs_bmap_del_extent(
5087 xfs_inode_t *ip, /* incore inode pointer */
5088 xfs_trans_t *tp, /* current transaction pointer */
5089 xfs_extnum_t *idx, /* extent number to update/delete */
5090 struct xfs_defer_ops *dfops, /* list of extents to be freed */
5091 xfs_btree_cur_t *cur, /* if null, not a btree */
5092 xfs_bmbt_irec_t *del, /* data to remove from extents */
5093 int *logflagsp, /* inode logging flags */
5094 int whichfork, /* data or attr fork */
5095 int bflags) /* bmapi flags */
5097 xfs_filblks_t da_new; /* new delay-alloc indirect blocks */
5098 xfs_filblks_t da_old; /* old delay-alloc indirect blocks */
5099 xfs_fsblock_t del_endblock=0; /* first block past del */
5100 xfs_fileoff_t del_endoff; /* first offset past del */
5101 int delay; /* current block is delayed allocated */
5102 int do_fx; /* free extent at end of routine */
5103 xfs_bmbt_rec_host_t *ep; /* current extent entry pointer */
5104 int error; /* error return value */
5105 int flags; /* inode logging flags */
5106 xfs_bmbt_irec_t got; /* current extent entry */
5107 xfs_fileoff_t got_endoff; /* first offset past got */
5108 int i; /* temp state */
5109 xfs_ifork_t *ifp; /* inode fork pointer */
5110 xfs_mount_t *mp; /* mount structure */
5111 xfs_filblks_t nblks; /* quota/sb block count */
5112 xfs_bmbt_irec_t new; /* new record to be inserted */
5113 /* REFERENCED */
5114 uint qfield; /* quota field to update */
5115 xfs_filblks_t temp; /* for indirect length calculations */
5116 xfs_filblks_t temp2; /* for indirect length calculations */
5117 int state = 0;
5119 mp = ip->i_mount;
5120 XFS_STATS_INC(mp, xs_del_exlist);
5122 if (whichfork == XFS_ATTR_FORK)
5123 state |= BMAP_ATTRFORK;
5124 else if (whichfork == XFS_COW_FORK)
5125 state |= BMAP_COWFORK;
5127 ifp = XFS_IFORK_PTR(ip, whichfork);
5128 ASSERT((*idx >= 0) && (*idx < xfs_iext_count(ifp)));
5129 ASSERT(del->br_blockcount > 0);
5130 ep = xfs_iext_get_ext(ifp, *idx);
5131 xfs_bmbt_get_all(ep, &got);
5132 ASSERT(got.br_startoff <= del->br_startoff);
5133 del_endoff = del->br_startoff + del->br_blockcount;
5134 got_endoff = got.br_startoff + got.br_blockcount;
5135 ASSERT(got_endoff >= del_endoff);
5136 delay = isnullstartblock(got.br_startblock);
5137 ASSERT(isnullstartblock(del->br_startblock) == delay);
5138 flags = 0;
5139 qfield = 0;
5140 error = 0;
5142 * If deleting a real allocation, must free up the disk space.
5144 if (!delay) {
5145 flags = XFS_ILOG_CORE;
5147 * Realtime allocation. Free it and record di_nblocks update.
5149 if (whichfork == XFS_DATA_FORK && XFS_IS_REALTIME_INODE(ip)) {
5150 xfs_fsblock_t bno;
5151 xfs_filblks_t len;
5153 ASSERT(do_mod(del->br_blockcount,
5154 mp->m_sb.sb_rextsize) == 0);
5155 ASSERT(do_mod(del->br_startblock,
5156 mp->m_sb.sb_rextsize) == 0);
5157 bno = del->br_startblock;
5158 len = del->br_blockcount;
5159 do_div(bno, mp->m_sb.sb_rextsize);
5160 do_div(len, mp->m_sb.sb_rextsize);
5161 error = xfs_rtfree_extent(tp, bno, (xfs_extlen_t)len);
5162 if (error)
5163 goto done;
5164 do_fx = 0;
5165 nblks = len * mp->m_sb.sb_rextsize;
5166 qfield = XFS_TRANS_DQ_RTBCOUNT;
5169 * Ordinary allocation.
5171 else {
5172 do_fx = 1;
5173 nblks = del->br_blockcount;
5174 qfield = XFS_TRANS_DQ_BCOUNT;
5177 * Set up del_endblock and cur for later.
5179 del_endblock = del->br_startblock + del->br_blockcount;
5180 if (cur) {
5181 if ((error = xfs_bmbt_lookup_eq(cur, got.br_startoff,
5182 got.br_startblock, got.br_blockcount,
5183 &i)))
5184 goto done;
5185 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
5187 da_old = da_new = 0;
5188 } else {
5189 da_old = startblockval(got.br_startblock);
5190 da_new = 0;
5191 nblks = 0;
5192 do_fx = 0;
5196 * Set flag value to use in switch statement.
5197 * Left-contig is 2, right-contig is 1.
5199 switch (((got.br_startoff == del->br_startoff) << 1) |
5200 (got_endoff == del_endoff)) {
5201 case 3:
5203 * Matches the whole extent. Delete the entry.
5205 trace_xfs_bmap_pre_update(ip, *idx, state, _THIS_IP_);
5206 xfs_iext_remove(ip, *idx, 1,
5207 whichfork == XFS_ATTR_FORK ? BMAP_ATTRFORK : 0);
5208 --*idx;
5209 if (delay)
5210 break;
5212 XFS_IFORK_NEXT_SET(ip, whichfork,
5213 XFS_IFORK_NEXTENTS(ip, whichfork) - 1);
5214 flags |= XFS_ILOG_CORE;
5215 if (!cur) {
5216 flags |= xfs_ilog_fext(whichfork);
5217 break;
5219 if ((error = xfs_btree_delete(cur, &i)))
5220 goto done;
5221 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
5222 break;
5224 case 2:
5226 * Deleting the first part of the extent.
5228 trace_xfs_bmap_pre_update(ip, *idx, state, _THIS_IP_);
5229 xfs_bmbt_set_startoff(ep, del_endoff);
5230 temp = got.br_blockcount - del->br_blockcount;
5231 xfs_bmbt_set_blockcount(ep, temp);
5232 if (delay) {
5233 temp = XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(ip, temp),
5234 da_old);
5235 xfs_bmbt_set_startblock(ep, nullstartblock((int)temp));
5236 trace_xfs_bmap_post_update(ip, *idx, state, _THIS_IP_);
5237 da_new = temp;
5238 break;
5240 xfs_bmbt_set_startblock(ep, del_endblock);
5241 trace_xfs_bmap_post_update(ip, *idx, state, _THIS_IP_);
5242 if (!cur) {
5243 flags |= xfs_ilog_fext(whichfork);
5244 break;
5246 if ((error = xfs_bmbt_update(cur, del_endoff, del_endblock,
5247 got.br_blockcount - del->br_blockcount,
5248 got.br_state)))
5249 goto done;
5250 break;
5252 case 1:
5254 * Deleting the last part of the extent.
5256 temp = got.br_blockcount - del->br_blockcount;
5257 trace_xfs_bmap_pre_update(ip, *idx, state, _THIS_IP_);
5258 xfs_bmbt_set_blockcount(ep, temp);
5259 if (delay) {
5260 temp = XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(ip, temp),
5261 da_old);
5262 xfs_bmbt_set_startblock(ep, nullstartblock((int)temp));
5263 trace_xfs_bmap_post_update(ip, *idx, state, _THIS_IP_);
5264 da_new = temp;
5265 break;
5267 trace_xfs_bmap_post_update(ip, *idx, state, _THIS_IP_);
5268 if (!cur) {
5269 flags |= xfs_ilog_fext(whichfork);
5270 break;
5272 if ((error = xfs_bmbt_update(cur, got.br_startoff,
5273 got.br_startblock,
5274 got.br_blockcount - del->br_blockcount,
5275 got.br_state)))
5276 goto done;
5277 break;
5279 case 0:
5281 * Deleting the middle of the extent.
5283 temp = del->br_startoff - got.br_startoff;
5284 trace_xfs_bmap_pre_update(ip, *idx, state, _THIS_IP_);
5285 xfs_bmbt_set_blockcount(ep, temp);
5286 new.br_startoff = del_endoff;
5287 temp2 = got_endoff - del_endoff;
5288 new.br_blockcount = temp2;
5289 new.br_state = got.br_state;
5290 if (!delay) {
5291 new.br_startblock = del_endblock;
5292 flags |= XFS_ILOG_CORE;
5293 if (cur) {
5294 if ((error = xfs_bmbt_update(cur,
5295 got.br_startoff,
5296 got.br_startblock, temp,
5297 got.br_state)))
5298 goto done;
5299 if ((error = xfs_btree_increment(cur, 0, &i)))
5300 goto done;
5301 cur->bc_rec.b = new;
5302 error = xfs_btree_insert(cur, &i);
5303 if (error && error != -ENOSPC)
5304 goto done;
5306 * If get no-space back from btree insert,
5307 * it tried a split, and we have a zero
5308 * block reservation.
5309 * Fix up our state and return the error.
5311 if (error == -ENOSPC) {
5313 * Reset the cursor, don't trust
5314 * it after any insert operation.
5316 if ((error = xfs_bmbt_lookup_eq(cur,
5317 got.br_startoff,
5318 got.br_startblock,
5319 temp, &i)))
5320 goto done;
5321 XFS_WANT_CORRUPTED_GOTO(mp,
5322 i == 1, done);
5324 * Update the btree record back
5325 * to the original value.
5327 if ((error = xfs_bmbt_update(cur,
5328 got.br_startoff,
5329 got.br_startblock,
5330 got.br_blockcount,
5331 got.br_state)))
5332 goto done;
5334 * Reset the extent record back
5335 * to the original value.
5337 xfs_bmbt_set_blockcount(ep,
5338 got.br_blockcount);
5339 flags = 0;
5340 error = -ENOSPC;
5341 goto done;
5343 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
5344 } else
5345 flags |= xfs_ilog_fext(whichfork);
5346 XFS_IFORK_NEXT_SET(ip, whichfork,
5347 XFS_IFORK_NEXTENTS(ip, whichfork) + 1);
5348 } else {
5349 xfs_filblks_t stolen;
5350 ASSERT(whichfork == XFS_DATA_FORK);
5353 * Distribute the original indlen reservation across the
5354 * two new extents. Steal blocks from the deleted extent
5355 * if necessary. Stealing blocks simply fudges the
5356 * fdblocks accounting in xfs_bunmapi().
5358 temp = xfs_bmap_worst_indlen(ip, got.br_blockcount);
5359 temp2 = xfs_bmap_worst_indlen(ip, new.br_blockcount);
5360 stolen = xfs_bmap_split_indlen(da_old, &temp, &temp2,
5361 del->br_blockcount);
5362 da_new = temp + temp2 - stolen;
5363 del->br_blockcount -= stolen;
5366 * Set the reservation for each extent. Warn if either
5367 * is zero as this can lead to delalloc problems.
5369 WARN_ON_ONCE(!temp || !temp2);
5370 xfs_bmbt_set_startblock(ep, nullstartblock((int)temp));
5371 new.br_startblock = nullstartblock((int)temp2);
5373 trace_xfs_bmap_post_update(ip, *idx, state, _THIS_IP_);
5374 xfs_iext_insert(ip, *idx + 1, 1, &new, state);
5375 ++*idx;
5376 break;
5379 /* remove reverse mapping */
5380 if (!delay) {
5381 error = xfs_rmap_unmap_extent(mp, dfops, ip, whichfork, del);
5382 if (error)
5383 goto done;
5387 * If we need to, add to list of extents to delete.
5389 if (do_fx && !(bflags & XFS_BMAPI_REMAP)) {
5390 if (xfs_is_reflink_inode(ip) && whichfork == XFS_DATA_FORK) {
5391 error = xfs_refcount_decrease_extent(mp, dfops, del);
5392 if (error)
5393 goto done;
5394 } else
5395 xfs_bmap_add_free(mp, dfops, del->br_startblock,
5396 del->br_blockcount, NULL);
5400 * Adjust inode # blocks in the file.
5402 if (nblks)
5403 ip->i_d.di_nblocks -= nblks;
5405 * Adjust quota data.
5407 if (qfield && !(bflags & XFS_BMAPI_REMAP))
5408 xfs_trans_mod_dquot_byino(tp, ip, qfield, (long)-nblks);
5411 * Account for change in delayed indirect blocks.
5412 * Nothing to do for disk quota accounting here.
5414 ASSERT(da_old >= da_new);
5415 if (da_old > da_new)
5416 xfs_mod_fdblocks(mp, (int64_t)(da_old - da_new), false);
5417 done:
5418 *logflagsp = flags;
5419 return error;
5423 * Unmap (remove) blocks from a file.
5424 * If nexts is nonzero then the number of extents to remove is limited to
5425 * that value. If not all extents in the block range can be removed then
5426 * *done is set.
5428 int /* error */
5429 __xfs_bunmapi(
5430 xfs_trans_t *tp, /* transaction pointer */
5431 struct xfs_inode *ip, /* incore inode */
5432 xfs_fileoff_t bno, /* starting offset to unmap */
5433 xfs_filblks_t *rlen, /* i/o: amount remaining */
5434 int flags, /* misc flags */
5435 xfs_extnum_t nexts, /* number of extents max */
5436 xfs_fsblock_t *firstblock, /* first allocated block
5437 controls a.g. for allocs */
5438 struct xfs_defer_ops *dfops) /* i/o: deferred updates */
5440 xfs_btree_cur_t *cur; /* bmap btree cursor */
5441 xfs_bmbt_irec_t del; /* extent being deleted */
5442 int error; /* error return value */
5443 xfs_extnum_t extno; /* extent number in list */
5444 xfs_bmbt_irec_t got; /* current extent record */
5445 xfs_ifork_t *ifp; /* inode fork pointer */
5446 int isrt; /* freeing in rt area */
5447 xfs_extnum_t lastx; /* last extent index used */
5448 int logflags; /* transaction logging flags */
5449 xfs_extlen_t mod; /* rt extent offset */
5450 xfs_mount_t *mp; /* mount structure */
5451 xfs_fileoff_t start; /* first file offset deleted */
5452 int tmp_logflags; /* partial logging flags */
5453 int wasdel; /* was a delayed alloc extent */
5454 int whichfork; /* data or attribute fork */
5455 xfs_fsblock_t sum;
5456 xfs_filblks_t len = *rlen; /* length to unmap in file */
5457 xfs_fileoff_t max_len;
5458 xfs_agnumber_t prev_agno = NULLAGNUMBER, agno;
5460 trace_xfs_bunmap(ip, bno, len, flags, _RET_IP_);
5462 whichfork = xfs_bmapi_whichfork(flags);
5463 ASSERT(whichfork != XFS_COW_FORK);
5464 ifp = XFS_IFORK_PTR(ip, whichfork);
5465 if (unlikely(
5466 XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_EXTENTS &&
5467 XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_BTREE)) {
5468 XFS_ERROR_REPORT("xfs_bunmapi", XFS_ERRLEVEL_LOW,
5469 ip->i_mount);
5470 return -EFSCORRUPTED;
5472 mp = ip->i_mount;
5473 if (XFS_FORCED_SHUTDOWN(mp))
5474 return -EIO;
5476 ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
5477 ASSERT(len > 0);
5478 ASSERT(nexts >= 0);
5481 * Guesstimate how many blocks we can unmap without running the risk of
5482 * blowing out the transaction with a mix of EFIs and reflink
5483 * adjustments.
5485 if (xfs_is_reflink_inode(ip) && whichfork == XFS_DATA_FORK)
5486 max_len = min(len, xfs_refcount_max_unmap(tp->t_log_res));
5487 else
5488 max_len = len;
5490 if (!(ifp->if_flags & XFS_IFEXTENTS) &&
5491 (error = xfs_iread_extents(tp, ip, whichfork)))
5492 return error;
5493 if (xfs_iext_count(ifp) == 0) {
5494 *rlen = 0;
5495 return 0;
5497 XFS_STATS_INC(mp, xs_blk_unmap);
5498 isrt = (whichfork == XFS_DATA_FORK) && XFS_IS_REALTIME_INODE(ip);
5499 start = bno;
5500 bno = start + len - 1;
5503 * Check to see if the given block number is past the end of the
5504 * file, back up to the last block if so...
5506 if (!xfs_iext_lookup_extent(ip, ifp, bno, &lastx, &got)) {
5507 ASSERT(lastx > 0);
5508 xfs_iext_get_extent(ifp, --lastx, &got);
5509 bno = got.br_startoff + got.br_blockcount - 1;
5512 logflags = 0;
5513 if (ifp->if_flags & XFS_IFBROOT) {
5514 ASSERT(XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_BTREE);
5515 cur = xfs_bmbt_init_cursor(mp, tp, ip, whichfork);
5516 cur->bc_private.b.firstblock = *firstblock;
5517 cur->bc_private.b.dfops = dfops;
5518 cur->bc_private.b.flags = 0;
5519 } else
5520 cur = NULL;
5522 if (isrt) {
5524 * Synchronize by locking the bitmap inode.
5526 xfs_ilock(mp->m_rbmip, XFS_ILOCK_EXCL|XFS_ILOCK_RTBITMAP);
5527 xfs_trans_ijoin(tp, mp->m_rbmip, XFS_ILOCK_EXCL);
5528 xfs_ilock(mp->m_rsumip, XFS_ILOCK_EXCL|XFS_ILOCK_RTSUM);
5529 xfs_trans_ijoin(tp, mp->m_rsumip, XFS_ILOCK_EXCL);
5532 extno = 0;
5533 while (bno != (xfs_fileoff_t)-1 && bno >= start && lastx >= 0 &&
5534 (nexts == 0 || extno < nexts) && max_len > 0) {
5536 * Is the found extent after a hole in which bno lives?
5537 * Just back up to the previous extent, if so.
5539 if (got.br_startoff > bno) {
5540 if (--lastx < 0)
5541 break;
5542 xfs_iext_get_extent(ifp, lastx, &got);
5545 * Is the last block of this extent before the range
5546 * we're supposed to delete? If so, we're done.
5548 bno = XFS_FILEOFF_MIN(bno,
5549 got.br_startoff + got.br_blockcount - 1);
5550 if (bno < start)
5551 break;
5553 * Then deal with the (possibly delayed) allocated space
5554 * we found.
5556 del = got;
5557 wasdel = isnullstartblock(del.br_startblock);
5560 * Make sure we don't touch multiple AGF headers out of order
5561 * in a single transaction, as that could cause AB-BA deadlocks.
5563 if (!wasdel && !isrt) {
5564 agno = XFS_FSB_TO_AGNO(mp, del.br_startblock);
5565 if (prev_agno != NULLAGNUMBER && prev_agno > agno)
5566 break;
5567 prev_agno = agno;
5569 if (got.br_startoff < start) {
5570 del.br_startoff = start;
5571 del.br_blockcount -= start - got.br_startoff;
5572 if (!wasdel)
5573 del.br_startblock += start - got.br_startoff;
5575 if (del.br_startoff + del.br_blockcount > bno + 1)
5576 del.br_blockcount = bno + 1 - del.br_startoff;
5578 /* How much can we safely unmap? */
5579 if (max_len < del.br_blockcount) {
5580 del.br_startoff += del.br_blockcount - max_len;
5581 if (!wasdel)
5582 del.br_startblock += del.br_blockcount - max_len;
5583 del.br_blockcount = max_len;
5586 sum = del.br_startblock + del.br_blockcount;
5587 if (isrt &&
5588 (mod = do_mod(sum, mp->m_sb.sb_rextsize))) {
5590 * Realtime extent not lined up at the end.
5591 * The extent could have been split into written
5592 * and unwritten pieces, or we could just be
5593 * unmapping part of it. But we can't really
5594 * get rid of part of a realtime extent.
5596 if (del.br_state == XFS_EXT_UNWRITTEN ||
5597 !xfs_sb_version_hasextflgbit(&mp->m_sb)) {
5599 * This piece is unwritten, or we're not
5600 * using unwritten extents. Skip over it.
5602 ASSERT(bno >= mod);
5603 bno -= mod > del.br_blockcount ?
5604 del.br_blockcount : mod;
5605 if (bno < got.br_startoff) {
5606 if (--lastx >= 0)
5607 xfs_bmbt_get_all(xfs_iext_get_ext(
5608 ifp, lastx), &got);
5610 continue;
5613 * It's written, turn it unwritten.
5614 * This is better than zeroing it.
5616 ASSERT(del.br_state == XFS_EXT_NORM);
5617 ASSERT(tp->t_blk_res > 0);
5619 * If this spans a realtime extent boundary,
5620 * chop it back to the start of the one we end at.
5622 if (del.br_blockcount > mod) {
5623 del.br_startoff += del.br_blockcount - mod;
5624 del.br_startblock += del.br_blockcount - mod;
5625 del.br_blockcount = mod;
5627 del.br_state = XFS_EXT_UNWRITTEN;
5628 error = xfs_bmap_add_extent_unwritten_real(tp, ip,
5629 whichfork, &lastx, &cur, &del,
5630 firstblock, dfops, &logflags);
5631 if (error)
5632 goto error0;
5633 goto nodelete;
5635 if (isrt && (mod = do_mod(del.br_startblock, mp->m_sb.sb_rextsize))) {
5637 * Realtime extent is lined up at the end but not
5638 * at the front. We'll get rid of full extents if
5639 * we can.
5641 mod = mp->m_sb.sb_rextsize - mod;
5642 if (del.br_blockcount > mod) {
5643 del.br_blockcount -= mod;
5644 del.br_startoff += mod;
5645 del.br_startblock += mod;
5646 } else if ((del.br_startoff == start &&
5647 (del.br_state == XFS_EXT_UNWRITTEN ||
5648 tp->t_blk_res == 0)) ||
5649 !xfs_sb_version_hasextflgbit(&mp->m_sb)) {
5651 * Can't make it unwritten. There isn't
5652 * a full extent here so just skip it.
5654 ASSERT(bno >= del.br_blockcount);
5655 bno -= del.br_blockcount;
5656 if (got.br_startoff > bno && --lastx >= 0)
5657 xfs_iext_get_extent(ifp, lastx, &got);
5658 continue;
5659 } else if (del.br_state == XFS_EXT_UNWRITTEN) {
5660 struct xfs_bmbt_irec prev;
5663 * This one is already unwritten.
5664 * It must have a written left neighbor.
5665 * Unwrite the killed part of that one and
5666 * try again.
5668 ASSERT(lastx > 0);
5669 xfs_iext_get_extent(ifp, lastx - 1, &prev);
5670 ASSERT(prev.br_state == XFS_EXT_NORM);
5671 ASSERT(!isnullstartblock(prev.br_startblock));
5672 ASSERT(del.br_startblock ==
5673 prev.br_startblock + prev.br_blockcount);
5674 if (prev.br_startoff < start) {
5675 mod = start - prev.br_startoff;
5676 prev.br_blockcount -= mod;
5677 prev.br_startblock += mod;
5678 prev.br_startoff = start;
5680 prev.br_state = XFS_EXT_UNWRITTEN;
5681 lastx--;
5682 error = xfs_bmap_add_extent_unwritten_real(tp,
5683 ip, whichfork, &lastx, &cur,
5684 &prev, firstblock, dfops,
5685 &logflags);
5686 if (error)
5687 goto error0;
5688 goto nodelete;
5689 } else {
5690 ASSERT(del.br_state == XFS_EXT_NORM);
5691 del.br_state = XFS_EXT_UNWRITTEN;
5692 error = xfs_bmap_add_extent_unwritten_real(tp,
5693 ip, whichfork, &lastx, &cur,
5694 &del, firstblock, dfops,
5695 &logflags);
5696 if (error)
5697 goto error0;
5698 goto nodelete;
5703 * If it's the case where the directory code is running
5704 * with no block reservation, and the deleted block is in
5705 * the middle of its extent, and the resulting insert
5706 * of an extent would cause transformation to btree format,
5707 * then reject it. The calling code will then swap
5708 * blocks around instead.
5709 * We have to do this now, rather than waiting for the
5710 * conversion to btree format, since the transaction
5711 * will be dirty.
5713 if (!wasdel && tp->t_blk_res == 0 &&
5714 XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_EXTENTS &&
5715 XFS_IFORK_NEXTENTS(ip, whichfork) >= /* Note the >= */
5716 XFS_IFORK_MAXEXT(ip, whichfork) &&
5717 del.br_startoff > got.br_startoff &&
5718 del.br_startoff + del.br_blockcount <
5719 got.br_startoff + got.br_blockcount) {
5720 error = -ENOSPC;
5721 goto error0;
5725 * Unreserve quota and update realtime free space, if
5726 * appropriate. If delayed allocation, update the inode delalloc
5727 * counter now and wait to update the sb counters as
5728 * xfs_bmap_del_extent() might need to borrow some blocks.
5730 if (wasdel) {
5731 ASSERT(startblockval(del.br_startblock) > 0);
5732 if (isrt) {
5733 xfs_filblks_t rtexts;
5735 rtexts = XFS_FSB_TO_B(mp, del.br_blockcount);
5736 do_div(rtexts, mp->m_sb.sb_rextsize);
5737 xfs_mod_frextents(mp, (int64_t)rtexts);
5738 (void)xfs_trans_reserve_quota_nblks(NULL,
5739 ip, -((long)del.br_blockcount), 0,
5740 XFS_QMOPT_RES_RTBLKS);
5741 } else {
5742 (void)xfs_trans_reserve_quota_nblks(NULL,
5743 ip, -((long)del.br_blockcount), 0,
5744 XFS_QMOPT_RES_REGBLKS);
5746 ip->i_delayed_blks -= del.br_blockcount;
5747 if (cur)
5748 cur->bc_private.b.flags |=
5749 XFS_BTCUR_BPRV_WASDEL;
5750 } else if (cur)
5751 cur->bc_private.b.flags &= ~XFS_BTCUR_BPRV_WASDEL;
5753 error = xfs_bmap_del_extent(ip, tp, &lastx, dfops, cur, &del,
5754 &tmp_logflags, whichfork, flags);
5755 logflags |= tmp_logflags;
5756 if (error)
5757 goto error0;
5759 if (!isrt && wasdel)
5760 xfs_mod_fdblocks(mp, (int64_t)del.br_blockcount, false);
5762 max_len -= del.br_blockcount;
5763 bno = del.br_startoff - 1;
5764 nodelete:
5766 * If not done go on to the next (previous) record.
5768 if (bno != (xfs_fileoff_t)-1 && bno >= start) {
5769 if (lastx >= 0) {
5770 xfs_iext_get_extent(ifp, lastx, &got);
5771 if (got.br_startoff > bno && --lastx >= 0)
5772 xfs_iext_get_extent(ifp, lastx, &got);
5774 extno++;
5777 if (bno == (xfs_fileoff_t)-1 || bno < start || lastx < 0)
5778 *rlen = 0;
5779 else
5780 *rlen = bno - start + 1;
5783 * Convert to a btree if necessary.
5785 if (xfs_bmap_needs_btree(ip, whichfork)) {
5786 ASSERT(cur == NULL);
5787 error = xfs_bmap_extents_to_btree(tp, ip, firstblock, dfops,
5788 &cur, 0, &tmp_logflags, whichfork);
5789 logflags |= tmp_logflags;
5790 if (error)
5791 goto error0;
5794 * transform from btree to extents, give it cur
5796 else if (xfs_bmap_wants_extents(ip, whichfork)) {
5797 ASSERT(cur != NULL);
5798 error = xfs_bmap_btree_to_extents(tp, ip, cur, &tmp_logflags,
5799 whichfork);
5800 logflags |= tmp_logflags;
5801 if (error)
5802 goto error0;
5805 * transform from extents to local?
5807 error = 0;
5808 error0:
5810 * Log everything. Do this after conversion, there's no point in
5811 * logging the extent records if we've converted to btree format.
5813 if ((logflags & xfs_ilog_fext(whichfork)) &&
5814 XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_EXTENTS)
5815 logflags &= ~xfs_ilog_fext(whichfork);
5816 else if ((logflags & xfs_ilog_fbroot(whichfork)) &&
5817 XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_BTREE)
5818 logflags &= ~xfs_ilog_fbroot(whichfork);
5820 * Log inode even in the error case, if the transaction
5821 * is dirty we'll need to shut down the filesystem.
5823 if (logflags)
5824 xfs_trans_log_inode(tp, ip, logflags);
5825 if (cur) {
5826 if (!error) {
5827 *firstblock = cur->bc_private.b.firstblock;
5828 cur->bc_private.b.allocated = 0;
5830 xfs_btree_del_cursor(cur,
5831 error ? XFS_BTREE_ERROR : XFS_BTREE_NOERROR);
5833 return error;
5836 /* Unmap a range of a file. */
5838 xfs_bunmapi(
5839 xfs_trans_t *tp,
5840 struct xfs_inode *ip,
5841 xfs_fileoff_t bno,
5842 xfs_filblks_t len,
5843 int flags,
5844 xfs_extnum_t nexts,
5845 xfs_fsblock_t *firstblock,
5846 struct xfs_defer_ops *dfops,
5847 int *done)
5849 int error;
5851 error = __xfs_bunmapi(tp, ip, bno, &len, flags, nexts, firstblock,
5852 dfops);
5853 *done = (len == 0);
5854 return error;
5858 * Determine whether an extent shift can be accomplished by a merge with the
5859 * extent that precedes the target hole of the shift.
5861 STATIC bool
5862 xfs_bmse_can_merge(
5863 struct xfs_bmbt_irec *left, /* preceding extent */
5864 struct xfs_bmbt_irec *got, /* current extent to shift */
5865 xfs_fileoff_t shift) /* shift fsb */
5867 xfs_fileoff_t startoff;
5869 startoff = got->br_startoff - shift;
5872 * The extent, once shifted, must be adjacent in-file and on-disk with
5873 * the preceding extent.
5875 if ((left->br_startoff + left->br_blockcount != startoff) ||
5876 (left->br_startblock + left->br_blockcount != got->br_startblock) ||
5877 (left->br_state != got->br_state) ||
5878 (left->br_blockcount + got->br_blockcount > MAXEXTLEN))
5879 return false;
5881 return true;
5885 * A bmap extent shift adjusts the file offset of an extent to fill a preceding
5886 * hole in the file. If an extent shift would result in the extent being fully
5887 * adjacent to the extent that currently precedes the hole, we can merge with
5888 * the preceding extent rather than do the shift.
5890 * This function assumes the caller has verified a shift-by-merge is possible
5891 * with the provided extents via xfs_bmse_can_merge().
5893 STATIC int
5894 xfs_bmse_merge(
5895 struct xfs_inode *ip,
5896 int whichfork,
5897 xfs_fileoff_t shift, /* shift fsb */
5898 int current_ext, /* idx of gotp */
5899 struct xfs_bmbt_irec *got, /* extent to shift */
5900 struct xfs_bmbt_irec *left, /* preceding extent */
5901 struct xfs_btree_cur *cur,
5902 int *logflags, /* output */
5903 struct xfs_defer_ops *dfops)
5905 struct xfs_ifork *ifp = XFS_IFORK_PTR(ip, whichfork);
5906 struct xfs_bmbt_irec new;
5907 xfs_filblks_t blockcount;
5908 int error, i;
5909 struct xfs_mount *mp = ip->i_mount;
5911 blockcount = left->br_blockcount + got->br_blockcount;
5913 ASSERT(xfs_isilocked(ip, XFS_IOLOCK_EXCL));
5914 ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
5915 ASSERT(xfs_bmse_can_merge(left, got, shift));
5917 new = *left;
5918 new.br_blockcount = blockcount;
5921 * Update the on-disk extent count, the btree if necessary and log the
5922 * inode.
5924 XFS_IFORK_NEXT_SET(ip, whichfork,
5925 XFS_IFORK_NEXTENTS(ip, whichfork) - 1);
5926 *logflags |= XFS_ILOG_CORE;
5927 if (!cur) {
5928 *logflags |= XFS_ILOG_DEXT;
5929 goto done;
5932 /* lookup and remove the extent to merge */
5933 error = xfs_bmbt_lookup_eq(cur, got->br_startoff, got->br_startblock,
5934 got->br_blockcount, &i);
5935 if (error)
5936 return error;
5937 XFS_WANT_CORRUPTED_RETURN(mp, i == 1);
5939 error = xfs_btree_delete(cur, &i);
5940 if (error)
5941 return error;
5942 XFS_WANT_CORRUPTED_RETURN(mp, i == 1);
5944 /* lookup and update size of the previous extent */
5945 error = xfs_bmbt_lookup_eq(cur, left->br_startoff, left->br_startblock,
5946 left->br_blockcount, &i);
5947 if (error)
5948 return error;
5949 XFS_WANT_CORRUPTED_RETURN(mp, i == 1);
5951 error = xfs_bmbt_update(cur, new.br_startoff, new.br_startblock,
5952 new.br_blockcount, new.br_state);
5953 if (error)
5954 return error;
5956 done:
5957 xfs_iext_update_extent(ifp, current_ext - 1, &new);
5958 xfs_iext_remove(ip, current_ext, 1, 0);
5960 /* update reverse mapping. rmap functions merge the rmaps for us */
5961 error = xfs_rmap_unmap_extent(mp, dfops, ip, whichfork, got);
5962 if (error)
5963 return error;
5964 memcpy(&new, got, sizeof(new));
5965 new.br_startoff = left->br_startoff + left->br_blockcount;
5966 return xfs_rmap_map_extent(mp, dfops, ip, whichfork, &new);
5970 * Shift a single extent.
5972 STATIC int
5973 xfs_bmse_shift_one(
5974 struct xfs_inode *ip,
5975 int whichfork,
5976 xfs_fileoff_t offset_shift_fsb,
5977 int *current_ext,
5978 struct xfs_bmbt_irec *got,
5979 struct xfs_btree_cur *cur,
5980 int *logflags,
5981 enum shift_direction direction,
5982 struct xfs_defer_ops *dfops)
5984 struct xfs_ifork *ifp;
5985 struct xfs_mount *mp;
5986 xfs_fileoff_t startoff;
5987 struct xfs_bmbt_irec adj_irec, new;
5988 int error;
5989 int i;
5990 int total_extents;
5992 mp = ip->i_mount;
5993 ifp = XFS_IFORK_PTR(ip, whichfork);
5994 total_extents = xfs_iext_count(ifp);
5996 /* delalloc extents should be prevented by caller */
5997 XFS_WANT_CORRUPTED_RETURN(mp, !isnullstartblock(got->br_startblock));
5999 if (direction == SHIFT_LEFT) {
6000 startoff = got->br_startoff - offset_shift_fsb;
6003 * Check for merge if we've got an extent to the left,
6004 * otherwise make sure there's enough room at the start
6005 * of the file for the shift.
6007 if (!*current_ext) {
6008 if (got->br_startoff < offset_shift_fsb)
6009 return -EINVAL;
6010 goto update_current_ext;
6014 * grab the left extent and check for a large enough hole.
6016 xfs_iext_get_extent(ifp, *current_ext - 1, &adj_irec);
6017 if (startoff < adj_irec.br_startoff + adj_irec.br_blockcount)
6018 return -EINVAL;
6020 /* check whether to merge the extent or shift it down */
6021 if (xfs_bmse_can_merge(&adj_irec, got, offset_shift_fsb)) {
6022 return xfs_bmse_merge(ip, whichfork, offset_shift_fsb,
6023 *current_ext, got, &adj_irec,
6024 cur, logflags, dfops);
6026 } else {
6027 startoff = got->br_startoff + offset_shift_fsb;
6028 /* nothing to move if this is the last extent */
6029 if (*current_ext >= (total_extents - 1))
6030 goto update_current_ext;
6033 * If this is not the last extent in the file, make sure there
6034 * is enough room between current extent and next extent for
6035 * accommodating the shift.
6037 xfs_iext_get_extent(ifp, *current_ext + 1, &adj_irec);
6038 if (startoff + got->br_blockcount > adj_irec.br_startoff)
6039 return -EINVAL;
6042 * Unlike a left shift (which involves a hole punch),
6043 * a right shift does not modify extent neighbors
6044 * in any way. We should never find mergeable extents
6045 * in this scenario. Check anyways and warn if we
6046 * encounter two extents that could be one.
6048 if (xfs_bmse_can_merge(got, &adj_irec, offset_shift_fsb))
6049 WARN_ON_ONCE(1);
6053 * Increment the extent index for the next iteration, update the start
6054 * offset of the in-core extent and update the btree if applicable.
6056 update_current_ext:
6057 *logflags |= XFS_ILOG_CORE;
6059 new = *got;
6060 new.br_startoff = startoff;
6062 if (cur) {
6063 error = xfs_bmbt_lookup_eq(cur, got->br_startoff,
6064 got->br_startblock, got->br_blockcount, &i);
6065 if (error)
6066 return error;
6067 XFS_WANT_CORRUPTED_RETURN(mp, i == 1);
6069 error = xfs_bmbt_update(cur, new.br_startoff,
6070 new.br_startblock, new.br_blockcount,
6071 new.br_state);
6072 if (error)
6073 return error;
6074 } else {
6075 *logflags |= XFS_ILOG_DEXT;
6078 xfs_iext_update_extent(ifp, *current_ext, &new);
6080 if (direction == SHIFT_LEFT)
6081 (*current_ext)++;
6082 else
6083 (*current_ext)--;
6085 /* update reverse mapping */
6086 error = xfs_rmap_unmap_extent(mp, dfops, ip, whichfork, got);
6087 if (error)
6088 return error;
6089 return xfs_rmap_map_extent(mp, dfops, ip, whichfork, &new);
6093 * Shift extent records to the left/right to cover/create a hole.
6095 * The maximum number of extents to be shifted in a single operation is
6096 * @num_exts. @stop_fsb specifies the file offset at which to stop shift and the
6097 * file offset where we've left off is returned in @next_fsb. @offset_shift_fsb
6098 * is the length by which each extent is shifted. If there is no hole to shift
6099 * the extents into, this will be considered invalid operation and we abort
6100 * immediately.
6103 xfs_bmap_shift_extents(
6104 struct xfs_trans *tp,
6105 struct xfs_inode *ip,
6106 xfs_fileoff_t *next_fsb,
6107 xfs_fileoff_t offset_shift_fsb,
6108 int *done,
6109 xfs_fileoff_t stop_fsb,
6110 xfs_fsblock_t *firstblock,
6111 struct xfs_defer_ops *dfops,
6112 enum shift_direction direction,
6113 int num_exts)
6115 struct xfs_btree_cur *cur = NULL;
6116 struct xfs_bmbt_irec got;
6117 struct xfs_mount *mp = ip->i_mount;
6118 struct xfs_ifork *ifp;
6119 xfs_extnum_t nexts = 0;
6120 xfs_extnum_t current_ext;
6121 xfs_extnum_t total_extents;
6122 xfs_extnum_t stop_extent;
6123 int error = 0;
6124 int whichfork = XFS_DATA_FORK;
6125 int logflags = 0;
6127 if (unlikely(XFS_TEST_ERROR(
6128 (XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_EXTENTS &&
6129 XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_BTREE),
6130 mp, XFS_ERRTAG_BMAPIFORMAT))) {
6131 XFS_ERROR_REPORT("xfs_bmap_shift_extents",
6132 XFS_ERRLEVEL_LOW, mp);
6133 return -EFSCORRUPTED;
6136 if (XFS_FORCED_SHUTDOWN(mp))
6137 return -EIO;
6139 ASSERT(xfs_isilocked(ip, XFS_IOLOCK_EXCL));
6140 ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
6141 ASSERT(direction == SHIFT_LEFT || direction == SHIFT_RIGHT);
6143 ifp = XFS_IFORK_PTR(ip, whichfork);
6144 if (!(ifp->if_flags & XFS_IFEXTENTS)) {
6145 /* Read in all the extents */
6146 error = xfs_iread_extents(tp, ip, whichfork);
6147 if (error)
6148 return error;
6151 if (ifp->if_flags & XFS_IFBROOT) {
6152 cur = xfs_bmbt_init_cursor(mp, tp, ip, whichfork);
6153 cur->bc_private.b.firstblock = *firstblock;
6154 cur->bc_private.b.dfops = dfops;
6155 cur->bc_private.b.flags = 0;
6159 * There may be delalloc extents in the data fork before the range we
6160 * are collapsing out, so we cannot use the count of real extents here.
6161 * Instead we have to calculate it from the incore fork.
6163 total_extents = xfs_iext_count(ifp);
6164 if (total_extents == 0) {
6165 *done = 1;
6166 goto del_cursor;
6170 * In case of first right shift, we need to initialize next_fsb
6172 if (*next_fsb == NULLFSBLOCK) {
6173 ASSERT(direction == SHIFT_RIGHT);
6175 current_ext = total_extents - 1;
6176 xfs_iext_get_extent(ifp, current_ext, &got);
6177 if (stop_fsb > got.br_startoff) {
6178 *done = 1;
6179 goto del_cursor;
6181 *next_fsb = got.br_startoff;
6182 } else {
6184 * Look up the extent index for the fsb where we start shifting. We can
6185 * henceforth iterate with current_ext as extent list changes are locked
6186 * out via ilock.
6188 * If next_fsb lies in a hole beyond which there are no extents we are
6189 * done.
6191 if (!xfs_iext_lookup_extent(ip, ifp, *next_fsb, &current_ext,
6192 &got)) {
6193 *done = 1;
6194 goto del_cursor;
6198 /* Lookup the extent index at which we have to stop */
6199 if (direction == SHIFT_RIGHT) {
6200 struct xfs_bmbt_irec s;
6202 xfs_iext_lookup_extent(ip, ifp, stop_fsb, &stop_extent, &s);
6203 /* Make stop_extent exclusive of shift range */
6204 stop_extent--;
6205 if (current_ext <= stop_extent) {
6206 error = -EIO;
6207 goto del_cursor;
6209 } else {
6210 stop_extent = total_extents;
6211 if (current_ext >= stop_extent) {
6212 error = -EIO;
6213 goto del_cursor;
6217 while (nexts++ < num_exts) {
6218 error = xfs_bmse_shift_one(ip, whichfork, offset_shift_fsb,
6219 &current_ext, &got, cur, &logflags,
6220 direction, dfops);
6221 if (error)
6222 goto del_cursor;
6224 * If there was an extent merge during the shift, the extent
6225 * count can change. Update the total and grade the next record.
6227 if (direction == SHIFT_LEFT) {
6228 total_extents = xfs_iext_count(ifp);
6229 stop_extent = total_extents;
6232 if (current_ext == stop_extent) {
6233 *done = 1;
6234 *next_fsb = NULLFSBLOCK;
6235 break;
6237 xfs_iext_get_extent(ifp, current_ext, &got);
6240 if (!*done)
6241 *next_fsb = got.br_startoff;
6243 del_cursor:
6244 if (cur)
6245 xfs_btree_del_cursor(cur,
6246 error ? XFS_BTREE_ERROR : XFS_BTREE_NOERROR);
6248 if (logflags)
6249 xfs_trans_log_inode(tp, ip, logflags);
6251 return error;
6255 * Splits an extent into two extents at split_fsb block such that it is
6256 * the first block of the current_ext. @current_ext is a target extent
6257 * to be split. @split_fsb is a block where the extents is split.
6258 * If split_fsb lies in a hole or the first block of extents, just return 0.
6260 STATIC int
6261 xfs_bmap_split_extent_at(
6262 struct xfs_trans *tp,
6263 struct xfs_inode *ip,
6264 xfs_fileoff_t split_fsb,
6265 xfs_fsblock_t *firstfsb,
6266 struct xfs_defer_ops *dfops)
6268 int whichfork = XFS_DATA_FORK;
6269 struct xfs_btree_cur *cur = NULL;
6270 struct xfs_bmbt_irec got;
6271 struct xfs_bmbt_irec new; /* split extent */
6272 struct xfs_mount *mp = ip->i_mount;
6273 struct xfs_ifork *ifp;
6274 xfs_fsblock_t gotblkcnt; /* new block count for got */
6275 xfs_extnum_t current_ext;
6276 int error = 0;
6277 int logflags = 0;
6278 int i = 0;
6280 if (unlikely(XFS_TEST_ERROR(
6281 (XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_EXTENTS &&
6282 XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_BTREE),
6283 mp, XFS_ERRTAG_BMAPIFORMAT))) {
6284 XFS_ERROR_REPORT("xfs_bmap_split_extent_at",
6285 XFS_ERRLEVEL_LOW, mp);
6286 return -EFSCORRUPTED;
6289 if (XFS_FORCED_SHUTDOWN(mp))
6290 return -EIO;
6292 ifp = XFS_IFORK_PTR(ip, whichfork);
6293 if (!(ifp->if_flags & XFS_IFEXTENTS)) {
6294 /* Read in all the extents */
6295 error = xfs_iread_extents(tp, ip, whichfork);
6296 if (error)
6297 return error;
6301 * If there are not extents, or split_fsb lies in a hole we are done.
6303 if (!xfs_iext_lookup_extent(ip, ifp, split_fsb, &current_ext, &got) ||
6304 got.br_startoff >= split_fsb)
6305 return 0;
6307 gotblkcnt = split_fsb - got.br_startoff;
6308 new.br_startoff = split_fsb;
6309 new.br_startblock = got.br_startblock + gotblkcnt;
6310 new.br_blockcount = got.br_blockcount - gotblkcnt;
6311 new.br_state = got.br_state;
6313 if (ifp->if_flags & XFS_IFBROOT) {
6314 cur = xfs_bmbt_init_cursor(mp, tp, ip, whichfork);
6315 cur->bc_private.b.firstblock = *firstfsb;
6316 cur->bc_private.b.dfops = dfops;
6317 cur->bc_private.b.flags = 0;
6318 error = xfs_bmbt_lookup_eq(cur, got.br_startoff,
6319 got.br_startblock,
6320 got.br_blockcount,
6321 &i);
6322 if (error)
6323 goto del_cursor;
6324 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, del_cursor);
6327 got.br_blockcount = gotblkcnt;
6328 xfs_iext_update_extent(ifp, current_ext, &got);
6330 logflags = XFS_ILOG_CORE;
6331 if (cur) {
6332 error = xfs_bmbt_update(cur, got.br_startoff,
6333 got.br_startblock,
6334 got.br_blockcount,
6335 got.br_state);
6336 if (error)
6337 goto del_cursor;
6338 } else
6339 logflags |= XFS_ILOG_DEXT;
6341 /* Add new extent */
6342 current_ext++;
6343 xfs_iext_insert(ip, current_ext, 1, &new, 0);
6344 XFS_IFORK_NEXT_SET(ip, whichfork,
6345 XFS_IFORK_NEXTENTS(ip, whichfork) + 1);
6347 if (cur) {
6348 error = xfs_bmbt_lookup_eq(cur, new.br_startoff,
6349 new.br_startblock, new.br_blockcount,
6350 &i);
6351 if (error)
6352 goto del_cursor;
6353 XFS_WANT_CORRUPTED_GOTO(mp, i == 0, del_cursor);
6354 cur->bc_rec.b.br_state = new.br_state;
6356 error = xfs_btree_insert(cur, &i);
6357 if (error)
6358 goto del_cursor;
6359 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, del_cursor);
6363 * Convert to a btree if necessary.
6365 if (xfs_bmap_needs_btree(ip, whichfork)) {
6366 int tmp_logflags; /* partial log flag return val */
6368 ASSERT(cur == NULL);
6369 error = xfs_bmap_extents_to_btree(tp, ip, firstfsb, dfops,
6370 &cur, 0, &tmp_logflags, whichfork);
6371 logflags |= tmp_logflags;
6374 del_cursor:
6375 if (cur) {
6376 cur->bc_private.b.allocated = 0;
6377 xfs_btree_del_cursor(cur,
6378 error ? XFS_BTREE_ERROR : XFS_BTREE_NOERROR);
6381 if (logflags)
6382 xfs_trans_log_inode(tp, ip, logflags);
6383 return error;
6387 xfs_bmap_split_extent(
6388 struct xfs_inode *ip,
6389 xfs_fileoff_t split_fsb)
6391 struct xfs_mount *mp = ip->i_mount;
6392 struct xfs_trans *tp;
6393 struct xfs_defer_ops dfops;
6394 xfs_fsblock_t firstfsb;
6395 int error;
6397 error = xfs_trans_alloc(mp, &M_RES(mp)->tr_write,
6398 XFS_DIOSTRAT_SPACE_RES(mp, 0), 0, 0, &tp);
6399 if (error)
6400 return error;
6402 xfs_ilock(ip, XFS_ILOCK_EXCL);
6403 xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL);
6405 xfs_defer_init(&dfops, &firstfsb);
6407 error = xfs_bmap_split_extent_at(tp, ip, split_fsb,
6408 &firstfsb, &dfops);
6409 if (error)
6410 goto out;
6412 error = xfs_defer_finish(&tp, &dfops);
6413 if (error)
6414 goto out;
6416 return xfs_trans_commit(tp);
6418 out:
6419 xfs_defer_cancel(&dfops);
6420 xfs_trans_cancel(tp);
6421 return error;
6424 /* Deferred mapping is only for real extents in the data fork. */
6425 static bool
6426 xfs_bmap_is_update_needed(
6427 struct xfs_bmbt_irec *bmap)
6429 return bmap->br_startblock != HOLESTARTBLOCK &&
6430 bmap->br_startblock != DELAYSTARTBLOCK;
6433 /* Record a bmap intent. */
6434 static int
6435 __xfs_bmap_add(
6436 struct xfs_mount *mp,
6437 struct xfs_defer_ops *dfops,
6438 enum xfs_bmap_intent_type type,
6439 struct xfs_inode *ip,
6440 int whichfork,
6441 struct xfs_bmbt_irec *bmap)
6443 int error;
6444 struct xfs_bmap_intent *bi;
6446 trace_xfs_bmap_defer(mp,
6447 XFS_FSB_TO_AGNO(mp, bmap->br_startblock),
6448 type,
6449 XFS_FSB_TO_AGBNO(mp, bmap->br_startblock),
6450 ip->i_ino, whichfork,
6451 bmap->br_startoff,
6452 bmap->br_blockcount,
6453 bmap->br_state);
6455 bi = kmem_alloc(sizeof(struct xfs_bmap_intent), KM_SLEEP | KM_NOFS);
6456 INIT_LIST_HEAD(&bi->bi_list);
6457 bi->bi_type = type;
6458 bi->bi_owner = ip;
6459 bi->bi_whichfork = whichfork;
6460 bi->bi_bmap = *bmap;
6462 error = xfs_defer_ijoin(dfops, bi->bi_owner);
6463 if (error) {
6464 kmem_free(bi);
6465 return error;
6468 xfs_defer_add(dfops, XFS_DEFER_OPS_TYPE_BMAP, &bi->bi_list);
6469 return 0;
6472 /* Map an extent into a file. */
6474 xfs_bmap_map_extent(
6475 struct xfs_mount *mp,
6476 struct xfs_defer_ops *dfops,
6477 struct xfs_inode *ip,
6478 struct xfs_bmbt_irec *PREV)
6480 if (!xfs_bmap_is_update_needed(PREV))
6481 return 0;
6483 return __xfs_bmap_add(mp, dfops, XFS_BMAP_MAP, ip,
6484 XFS_DATA_FORK, PREV);
6487 /* Unmap an extent out of a file. */
6489 xfs_bmap_unmap_extent(
6490 struct xfs_mount *mp,
6491 struct xfs_defer_ops *dfops,
6492 struct xfs_inode *ip,
6493 struct xfs_bmbt_irec *PREV)
6495 if (!xfs_bmap_is_update_needed(PREV))
6496 return 0;
6498 return __xfs_bmap_add(mp, dfops, XFS_BMAP_UNMAP, ip,
6499 XFS_DATA_FORK, PREV);
6503 * Process one of the deferred bmap operations. We pass back the
6504 * btree cursor to maintain our lock on the bmapbt between calls.
6507 xfs_bmap_finish_one(
6508 struct xfs_trans *tp,
6509 struct xfs_defer_ops *dfops,
6510 struct xfs_inode *ip,
6511 enum xfs_bmap_intent_type type,
6512 int whichfork,
6513 xfs_fileoff_t startoff,
6514 xfs_fsblock_t startblock,
6515 xfs_filblks_t *blockcount,
6516 xfs_exntst_t state)
6518 xfs_fsblock_t firstfsb;
6519 int error = 0;
6522 * firstfsb is tied to the transaction lifetime and is used to
6523 * ensure correct AG locking order and schedule work item
6524 * continuations. XFS_BUI_MAX_FAST_EXTENTS (== 1) restricts us
6525 * to only making one bmap call per transaction, so it should
6526 * be safe to have it as a local variable here.
6528 firstfsb = NULLFSBLOCK;
6530 trace_xfs_bmap_deferred(tp->t_mountp,
6531 XFS_FSB_TO_AGNO(tp->t_mountp, startblock), type,
6532 XFS_FSB_TO_AGBNO(tp->t_mountp, startblock),
6533 ip->i_ino, whichfork, startoff, *blockcount, state);
6535 if (WARN_ON_ONCE(whichfork != XFS_DATA_FORK))
6536 return -EFSCORRUPTED;
6538 if (XFS_TEST_ERROR(false, tp->t_mountp,
6539 XFS_ERRTAG_BMAP_FINISH_ONE))
6540 return -EIO;
6542 switch (type) {
6543 case XFS_BMAP_MAP:
6544 error = xfs_bmapi_remap(tp, ip, startoff, *blockcount,
6545 startblock, dfops);
6546 *blockcount = 0;
6547 break;
6548 case XFS_BMAP_UNMAP:
6549 error = __xfs_bunmapi(tp, ip, startoff, blockcount,
6550 XFS_BMAPI_REMAP, 1, &firstfsb, dfops);
6551 break;
6552 default:
6553 ASSERT(0);
6554 error = -EFSCORRUPTED;
6557 return error;