2 * Copyright (c) 2000-2006 Silicon Graphics, Inc.
3 * Copyright (c) 2012 Red Hat, Inc.
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License as
8 * published by the Free Software Foundation.
10 * This program is distributed in the hope that it would be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
21 #include "xfs_shared.h"
22 #include "xfs_format.h"
23 #include "xfs_log_format.h"
24 #include "xfs_trans_resv.h"
26 #include "xfs_mount.h"
27 #include "xfs_da_format.h"
28 #include "xfs_defer.h"
29 #include "xfs_inode.h"
30 #include "xfs_btree.h"
31 #include "xfs_trans.h"
32 #include "xfs_extfree_item.h"
33 #include "xfs_alloc.h"
35 #include "xfs_bmap_util.h"
36 #include "xfs_bmap_btree.h"
37 #include "xfs_rtalloc.h"
38 #include "xfs_error.h"
39 #include "xfs_quota.h"
40 #include "xfs_trans_space.h"
41 #include "xfs_trace.h"
42 #include "xfs_icache.h"
44 #include "xfs_rmap_btree.h"
45 #include "xfs_iomap.h"
46 #include "xfs_reflink.h"
47 #include "xfs_refcount.h"
49 /* Kernel only BMAP related definitions and functions */
52 * Convert the given file system block to a disk block. We have to treat it
53 * differently based on whether the file is a real time file or not, because the
57 xfs_fsb_to_db(struct xfs_inode
*ip
, xfs_fsblock_t fsb
)
59 return (XFS_IS_REALTIME_INODE(ip
) ? \
60 (xfs_daddr_t
)XFS_FSB_TO_BB((ip
)->i_mount
, (fsb
)) : \
61 XFS_FSB_TO_DADDR((ip
)->i_mount
, (fsb
)));
65 * Routine to zero an extent on disk allocated to the specific inode.
67 * The VFS functions take a linearised filesystem block offset, so we have to
68 * convert the sparse xfs fsb to the right format first.
69 * VFS types are real funky, too.
74 xfs_fsblock_t start_fsb
,
77 struct xfs_mount
*mp
= ip
->i_mount
;
78 xfs_daddr_t sector
= xfs_fsb_to_db(ip
, start_fsb
);
79 sector_t block
= XFS_BB_TO_FSBT(mp
, sector
);
81 return blkdev_issue_zeroout(xfs_find_bdev_for_inode(VFS_I(ip
)),
82 block
<< (mp
->m_super
->s_blocksize_bits
- 9),
83 count_fsb
<< (mp
->m_super
->s_blocksize_bits
- 9),
89 struct xfs_bmalloca
*ap
) /* bmap alloc argument struct */
91 int error
; /* error return value */
92 xfs_mount_t
*mp
; /* mount point structure */
93 xfs_extlen_t prod
= 0; /* product factor for allocators */
94 xfs_extlen_t ralen
= 0; /* realtime allocation length */
95 xfs_extlen_t align
; /* minimum allocation alignment */
99 align
= xfs_get_extsz_hint(ap
->ip
);
100 prod
= align
/ mp
->m_sb
.sb_rextsize
;
101 error
= xfs_bmap_extsize_align(mp
, &ap
->got
, &ap
->prev
,
102 align
, 1, ap
->eof
, 0,
103 ap
->conv
, &ap
->offset
, &ap
->length
);
107 ASSERT(ap
->length
% mp
->m_sb
.sb_rextsize
== 0);
110 * If the offset & length are not perfectly aligned
111 * then kill prod, it will just get us in trouble.
113 if (do_mod(ap
->offset
, align
) || ap
->length
% align
)
116 * Set ralen to be the actual requested length in rtextents.
118 ralen
= ap
->length
/ mp
->m_sb
.sb_rextsize
;
120 * If the old value was close enough to MAXEXTLEN that
121 * we rounded up to it, cut it back so it's valid again.
122 * Note that if it's a really large request (bigger than
123 * MAXEXTLEN), we don't hear about that number, and can't
124 * adjust the starting point to match it.
126 if (ralen
* mp
->m_sb
.sb_rextsize
>= MAXEXTLEN
)
127 ralen
= MAXEXTLEN
/ mp
->m_sb
.sb_rextsize
;
130 * Lock out modifications to both the RT bitmap and summary inodes
132 xfs_ilock(mp
->m_rbmip
, XFS_ILOCK_EXCL
|XFS_ILOCK_RTBITMAP
);
133 xfs_trans_ijoin(ap
->tp
, mp
->m_rbmip
, XFS_ILOCK_EXCL
);
134 xfs_ilock(mp
->m_rsumip
, XFS_ILOCK_EXCL
|XFS_ILOCK_RTSUM
);
135 xfs_trans_ijoin(ap
->tp
, mp
->m_rsumip
, XFS_ILOCK_EXCL
);
138 * If it's an allocation to an empty file at offset 0,
139 * pick an extent that will space things out in the rt area.
141 if (ap
->eof
&& ap
->offset
== 0) {
142 xfs_rtblock_t
uninitialized_var(rtx
); /* realtime extent no */
144 error
= xfs_rtpick_extent(mp
, ap
->tp
, ralen
, &rtx
);
147 ap
->blkno
= rtx
* mp
->m_sb
.sb_rextsize
;
152 xfs_bmap_adjacent(ap
);
155 * Realtime allocation, done through xfs_rtallocate_extent.
157 do_div(ap
->blkno
, mp
->m_sb
.sb_rextsize
);
160 error
= xfs_rtallocate_extent(ap
->tp
, ap
->blkno
, 1, ap
->length
,
161 &ralen
, ap
->wasdel
, prod
, &rtb
);
166 if (ap
->blkno
!= NULLFSBLOCK
) {
167 ap
->blkno
*= mp
->m_sb
.sb_rextsize
;
168 ralen
*= mp
->m_sb
.sb_rextsize
;
170 ap
->ip
->i_d
.di_nblocks
+= ralen
;
171 xfs_trans_log_inode(ap
->tp
, ap
->ip
, XFS_ILOG_CORE
);
173 ap
->ip
->i_delayed_blks
-= ralen
;
175 * Adjust the disk quota also. This was reserved
178 xfs_trans_mod_dquot_byino(ap
->tp
, ap
->ip
,
179 ap
->wasdel
? XFS_TRANS_DQ_DELRTBCOUNT
:
180 XFS_TRANS_DQ_RTBCOUNT
, (long) ralen
);
182 /* Zero the extent if we were asked to do so */
183 if (ap
->datatype
& XFS_ALLOC_USERDATA_ZERO
) {
184 error
= xfs_zero_extent(ap
->ip
, ap
->blkno
, ap
->length
);
195 * Check if the endoff is outside the last extent. If so the caller will grow
196 * the allocation to a stripe unit boundary. All offsets are considered outside
197 * the end of file for an empty fork, so 1 is returned in *eof in that case.
201 struct xfs_inode
*ip
,
202 xfs_fileoff_t endoff
,
206 struct xfs_bmbt_irec rec
;
209 error
= xfs_bmap_last_extent(NULL
, ip
, whichfork
, &rec
, eof
);
213 *eof
= endoff
>= rec
.br_startoff
+ rec
.br_blockcount
;
218 * Extent tree block counting routines.
222 * Count leaf blocks given a range of extent records. Delayed allocation
223 * extents are not counted towards the totals.
226 xfs_bmap_count_leaves(
227 struct xfs_ifork
*ifp
,
228 xfs_extnum_t
*numrecs
,
229 xfs_filblks_t
*count
)
232 xfs_extnum_t nr_exts
= xfs_iext_count(ifp
);
234 for (i
= 0; i
< nr_exts
; i
++) {
235 xfs_bmbt_rec_host_t
*frp
= xfs_iext_get_ext(ifp
, i
);
236 if (!isnullstartblock(xfs_bmbt_get_startblock(frp
))) {
238 *count
+= xfs_bmbt_get_blockcount(frp
);
244 * Count leaf blocks given a range of extent records originally
248 xfs_bmap_disk_count_leaves(
249 struct xfs_mount
*mp
,
250 struct xfs_btree_block
*block
,
252 xfs_filblks_t
*count
)
257 for (b
= 1; b
<= numrecs
; b
++) {
258 frp
= XFS_BMBT_REC_ADDR(mp
, block
, b
);
259 *count
+= xfs_bmbt_disk_get_blockcount(frp
);
264 * Recursively walks each level of a btree
265 * to count total fsblocks in use.
269 struct xfs_mount
*mp
,
270 struct xfs_trans
*tp
,
271 struct xfs_ifork
*ifp
,
272 xfs_fsblock_t blockno
,
274 xfs_extnum_t
*nextents
,
275 xfs_filblks_t
*count
)
278 struct xfs_buf
*bp
, *nbp
;
281 xfs_fsblock_t bno
= blockno
;
282 xfs_fsblock_t nextbno
;
283 struct xfs_btree_block
*block
, *nextblock
;
286 error
= xfs_btree_read_bufl(mp
, tp
, bno
, 0, &bp
, XFS_BMAP_BTREE_REF
,
291 block
= XFS_BUF_TO_BLOCK(bp
);
294 /* Not at node above leaves, count this level of nodes */
295 nextbno
= be64_to_cpu(block
->bb_u
.l
.bb_rightsib
);
296 while (nextbno
!= NULLFSBLOCK
) {
297 error
= xfs_btree_read_bufl(mp
, tp
, nextbno
, 0, &nbp
,
303 nextblock
= XFS_BUF_TO_BLOCK(nbp
);
304 nextbno
= be64_to_cpu(nextblock
->bb_u
.l
.bb_rightsib
);
305 xfs_trans_brelse(tp
, nbp
);
308 /* Dive to the next level */
309 pp
= XFS_BMBT_PTR_ADDR(mp
, block
, 1, mp
->m_bmap_dmxr
[1]);
310 bno
= be64_to_cpu(*pp
);
311 error
= xfs_bmap_count_tree(mp
, tp
, ifp
, bno
, level
, nextents
,
314 xfs_trans_brelse(tp
, bp
);
315 XFS_ERROR_REPORT("xfs_bmap_count_tree(1)",
316 XFS_ERRLEVEL_LOW
, mp
);
317 return -EFSCORRUPTED
;
319 xfs_trans_brelse(tp
, bp
);
321 /* count all level 1 nodes and their leaves */
323 nextbno
= be64_to_cpu(block
->bb_u
.l
.bb_rightsib
);
324 numrecs
= be16_to_cpu(block
->bb_numrecs
);
325 (*nextents
) += numrecs
;
326 xfs_bmap_disk_count_leaves(mp
, block
, numrecs
, count
);
327 xfs_trans_brelse(tp
, bp
);
328 if (nextbno
== NULLFSBLOCK
)
331 error
= xfs_btree_read_bufl(mp
, tp
, bno
, 0, &bp
,
337 block
= XFS_BUF_TO_BLOCK(bp
);
344 * Count fsblocks of the given fork. Delayed allocation extents are
345 * not counted towards the totals.
348 xfs_bmap_count_blocks(
349 struct xfs_trans
*tp
,
350 struct xfs_inode
*ip
,
352 xfs_extnum_t
*nextents
,
353 xfs_filblks_t
*count
)
355 struct xfs_mount
*mp
; /* file system mount structure */
356 __be64
*pp
; /* pointer to block address */
357 struct xfs_btree_block
*block
; /* current btree block */
358 struct xfs_ifork
*ifp
; /* fork structure */
359 xfs_fsblock_t bno
; /* block # of "block" */
360 int level
; /* btree level, for checking */
367 ifp
= XFS_IFORK_PTR(ip
, whichfork
);
371 switch (XFS_IFORK_FORMAT(ip
, whichfork
)) {
372 case XFS_DINODE_FMT_EXTENTS
:
373 xfs_bmap_count_leaves(ifp
, nextents
, count
);
375 case XFS_DINODE_FMT_BTREE
:
376 if (!(ifp
->if_flags
& XFS_IFEXTENTS
)) {
377 error
= xfs_iread_extents(tp
, ip
, whichfork
);
383 * Root level must use BMAP_BROOT_PTR_ADDR macro to get ptr out.
385 block
= ifp
->if_broot
;
386 level
= be16_to_cpu(block
->bb_level
);
388 pp
= XFS_BMAP_BROOT_PTR_ADDR(mp
, block
, 1, ifp
->if_broot_bytes
);
389 bno
= be64_to_cpu(*pp
);
390 ASSERT(bno
!= NULLFSBLOCK
);
391 ASSERT(XFS_FSB_TO_AGNO(mp
, bno
) < mp
->m_sb
.sb_agcount
);
392 ASSERT(XFS_FSB_TO_AGBNO(mp
, bno
) < mp
->m_sb
.sb_agblocks
);
394 error
= xfs_bmap_count_tree(mp
, tp
, ifp
, bno
, level
,
397 XFS_ERROR_REPORT("xfs_bmap_count_blocks(2)",
398 XFS_ERRLEVEL_LOW
, mp
);
399 return -EFSCORRUPTED
;
408 * returns 1 for success, 0 if we failed to map the extent.
411 xfs_getbmapx_fix_eof_hole(
412 xfs_inode_t
*ip
, /* xfs incore inode pointer */
414 struct getbmapx
*out
, /* output structure */
415 int prealloced
, /* this is a file with
416 * preallocated data space */
417 int64_t end
, /* last block requested */
418 xfs_fsblock_t startblock
,
422 xfs_mount_t
*mp
; /* file system mount point */
423 xfs_ifork_t
*ifp
; /* inode fork pointer */
424 xfs_extnum_t lastx
; /* last extent pointer */
425 xfs_fileoff_t fileblock
;
427 if (startblock
== HOLESTARTBLOCK
) {
430 fixlen
= XFS_FSB_TO_BB(mp
, XFS_B_TO_FSB(mp
, XFS_ISIZE(ip
)));
431 fixlen
-= out
->bmv_offset
;
432 if (prealloced
&& out
->bmv_offset
+ out
->bmv_length
== end
) {
433 /* Came to hole at EOF. Trim it. */
436 out
->bmv_length
= fixlen
;
439 if (startblock
== DELAYSTARTBLOCK
)
442 out
->bmv_block
= xfs_fsb_to_db(ip
, startblock
);
443 fileblock
= XFS_BB_TO_FSB(ip
->i_mount
, out
->bmv_offset
);
444 ifp
= XFS_IFORK_PTR(ip
, whichfork
);
446 xfs_iext_bno_to_ext(ifp
, fileblock
, &lastx
) &&
447 (lastx
== xfs_iext_count(ifp
) - 1))
448 out
->bmv_oflags
|= BMV_OF_LAST
;
454 /* Adjust the reported bmap around shared/unshared extent transitions. */
456 xfs_getbmap_adjust_shared(
457 struct xfs_inode
*ip
,
459 struct xfs_bmbt_irec
*map
,
460 struct getbmapx
*out
,
461 struct xfs_bmbt_irec
*next_map
)
463 struct xfs_mount
*mp
= ip
->i_mount
;
471 next_map
->br_startblock
= NULLFSBLOCK
;
472 next_map
->br_startoff
= NULLFILEOFF
;
473 next_map
->br_blockcount
= 0;
475 /* Only written data blocks can be shared. */
476 if (!xfs_is_reflink_inode(ip
) ||
477 whichfork
!= XFS_DATA_FORK
||
478 !xfs_bmap_is_real_extent(map
))
481 agno
= XFS_FSB_TO_AGNO(mp
, map
->br_startblock
);
482 agbno
= XFS_FSB_TO_AGBNO(mp
, map
->br_startblock
);
483 error
= xfs_reflink_find_shared(mp
, NULL
, agno
, agbno
,
484 map
->br_blockcount
, &ebno
, &elen
, true);
488 if (ebno
== NULLAGBLOCK
) {
489 /* No shared blocks at all. */
491 } else if (agbno
== ebno
) {
493 * Shared extent at (agbno, elen). Shrink the reported
494 * extent length and prepare to move the start of map[i]
495 * to agbno+elen, with the aim of (re)formatting the new
496 * map[i] the next time through the inner loop.
498 out
->bmv_length
= XFS_FSB_TO_BB(mp
, elen
);
499 out
->bmv_oflags
|= BMV_OF_SHARED
;
500 if (elen
!= map
->br_blockcount
) {
502 next_map
->br_startblock
+= elen
;
503 next_map
->br_startoff
+= elen
;
504 next_map
->br_blockcount
-= elen
;
506 map
->br_blockcount
-= elen
;
509 * There's an unshared extent (agbno, ebno - agbno)
510 * followed by shared extent at (ebno, elen). Shrink
511 * the reported extent length to cover only the unshared
512 * extent and prepare to move up the start of map[i] to
513 * ebno, with the aim of (re)formatting the new map[i]
514 * the next time through the inner loop.
518 out
->bmv_length
= XFS_FSB_TO_BB(mp
, nlen
);
519 next_map
->br_startblock
+= nlen
;
520 next_map
->br_startoff
+= nlen
;
521 next_map
->br_blockcount
-= nlen
;
522 map
->br_blockcount
-= nlen
;
529 * Get inode's extents as described in bmv, and format for output.
530 * Calls formatter to fill the user's buffer until all extents
531 * are mapped, until the passed-in bmv->bmv_count slots have
532 * been filled, or until the formatter short-circuits the loop,
533 * if it is tracking filled-in extents on its own.
538 struct getbmapx
*bmv
, /* user bmap structure */
539 xfs_bmap_format_t formatter
, /* format to user */
540 void *arg
) /* formatter arg */
542 int64_t bmvend
; /* last block requested */
543 int error
= 0; /* return value */
544 int64_t fixlen
; /* length for -1 case */
545 int i
; /* extent number */
546 int lock
; /* lock state */
547 xfs_bmbt_irec_t
*map
; /* buffer for user's data */
548 xfs_mount_t
*mp
; /* file system mount point */
549 int nex
; /* # of user extents can do */
550 int subnex
; /* # of bmapi's can do */
551 int nmap
; /* number of map entries */
552 struct getbmapx
*out
; /* output structure */
553 int whichfork
; /* data or attr fork */
554 int prealloced
; /* this is a file with
555 * preallocated data space */
556 int iflags
; /* interface flags */
557 int bmapi_flags
; /* flags for xfs_bmapi */
559 struct xfs_bmbt_irec inject_map
;
562 iflags
= bmv
->bmv_iflags
;
565 /* Only allow CoW fork queries if we're debugging. */
566 if (iflags
& BMV_IF_COWFORK
)
569 if ((iflags
& BMV_IF_ATTRFORK
) && (iflags
& BMV_IF_COWFORK
))
572 if (iflags
& BMV_IF_ATTRFORK
)
573 whichfork
= XFS_ATTR_FORK
;
574 else if (iflags
& BMV_IF_COWFORK
)
575 whichfork
= XFS_COW_FORK
;
577 whichfork
= XFS_DATA_FORK
;
581 if (XFS_IFORK_Q(ip
)) {
582 if (ip
->i_d
.di_aformat
!= XFS_DINODE_FMT_EXTENTS
&&
583 ip
->i_d
.di_aformat
!= XFS_DINODE_FMT_BTREE
&&
584 ip
->i_d
.di_aformat
!= XFS_DINODE_FMT_LOCAL
)
587 ip
->i_d
.di_aformat
!= 0 &&
588 ip
->i_d
.di_aformat
!= XFS_DINODE_FMT_EXTENTS
)) {
589 XFS_ERROR_REPORT("xfs_getbmap", XFS_ERRLEVEL_LOW
,
591 return -EFSCORRUPTED
;
598 if (ip
->i_cformat
!= XFS_DINODE_FMT_EXTENTS
)
601 if (xfs_get_cowextsz_hint(ip
)) {
603 fixlen
= mp
->m_super
->s_maxbytes
;
606 fixlen
= XFS_ISIZE(ip
);
610 /* Local format data forks report no extents. */
611 if (ip
->i_d
.di_format
== XFS_DINODE_FMT_LOCAL
) {
612 bmv
->bmv_entries
= 0;
615 if (ip
->i_d
.di_format
!= XFS_DINODE_FMT_EXTENTS
&&
616 ip
->i_d
.di_format
!= XFS_DINODE_FMT_BTREE
)
619 if (xfs_get_extsz_hint(ip
) ||
620 ip
->i_d
.di_flags
& (XFS_DIFLAG_PREALLOC
|XFS_DIFLAG_APPEND
)){
622 fixlen
= mp
->m_super
->s_maxbytes
;
625 fixlen
= XFS_ISIZE(ip
);
630 if (bmv
->bmv_length
== -1) {
631 fixlen
= XFS_FSB_TO_BB(mp
, XFS_B_TO_FSB(mp
, fixlen
));
633 max_t(int64_t, fixlen
- bmv
->bmv_offset
, 0);
634 } else if (bmv
->bmv_length
== 0) {
635 bmv
->bmv_entries
= 0;
637 } else if (bmv
->bmv_length
< 0) {
641 nex
= bmv
->bmv_count
- 1;
644 bmvend
= bmv
->bmv_offset
+ bmv
->bmv_length
;
647 if (bmv
->bmv_count
> ULONG_MAX
/ sizeof(struct getbmapx
))
649 out
= kmem_zalloc_large(bmv
->bmv_count
* sizeof(struct getbmapx
), 0);
653 xfs_ilock(ip
, XFS_IOLOCK_SHARED
);
656 if (!(iflags
& BMV_IF_DELALLOC
) &&
657 (ip
->i_delayed_blks
|| XFS_ISIZE(ip
) > ip
->i_d
.di_size
)) {
658 error
= filemap_write_and_wait(VFS_I(ip
)->i_mapping
);
660 goto out_unlock_iolock
;
663 * Even after flushing the inode, there can still be
664 * delalloc blocks on the inode beyond EOF due to
665 * speculative preallocation. These are not removed
666 * until the release function is called or the inode
667 * is inactivated. Hence we cannot assert here that
668 * ip->i_delayed_blks == 0.
672 lock
= xfs_ilock_data_map_shared(ip
);
675 lock
= XFS_ILOCK_SHARED
;
679 lock
= xfs_ilock_attr_map_shared(ip
);
684 * Don't let nex be bigger than the number of extents
685 * we can have assuming alternating holes and real extents.
687 if (nex
> XFS_IFORK_NEXTENTS(ip
, whichfork
) * 2 + 1)
688 nex
= XFS_IFORK_NEXTENTS(ip
, whichfork
) * 2 + 1;
690 bmapi_flags
= xfs_bmapi_aflag(whichfork
);
691 if (!(iflags
& BMV_IF_PREALLOC
))
692 bmapi_flags
|= XFS_BMAPI_IGSTATE
;
695 * Allocate enough space to handle "subnex" maps at a time.
699 map
= kmem_alloc(subnex
* sizeof(*map
), KM_MAYFAIL
| KM_NOFS
);
701 goto out_unlock_ilock
;
703 bmv
->bmv_entries
= 0;
705 if (XFS_IFORK_NEXTENTS(ip
, whichfork
) == 0 &&
706 (whichfork
== XFS_ATTR_FORK
|| !(iflags
& BMV_IF_DELALLOC
))) {
712 nmap
= (nex
> subnex
) ? subnex
: nex
;
713 error
= xfs_bmapi_read(ip
, XFS_BB_TO_FSBT(mp
, bmv
->bmv_offset
),
714 XFS_BB_TO_FSB(mp
, bmv
->bmv_length
),
715 map
, &nmap
, bmapi_flags
);
718 ASSERT(nmap
<= subnex
);
720 for (i
= 0; i
< nmap
&& bmv
->bmv_length
&&
721 cur_ext
< bmv
->bmv_count
- 1; i
++) {
722 out
[cur_ext
].bmv_oflags
= 0;
723 if (map
[i
].br_state
== XFS_EXT_UNWRITTEN
)
724 out
[cur_ext
].bmv_oflags
|= BMV_OF_PREALLOC
;
725 else if (map
[i
].br_startblock
== DELAYSTARTBLOCK
)
726 out
[cur_ext
].bmv_oflags
|= BMV_OF_DELALLOC
;
727 out
[cur_ext
].bmv_offset
=
728 XFS_FSB_TO_BB(mp
, map
[i
].br_startoff
);
729 out
[cur_ext
].bmv_length
=
730 XFS_FSB_TO_BB(mp
, map
[i
].br_blockcount
);
731 out
[cur_ext
].bmv_unused1
= 0;
732 out
[cur_ext
].bmv_unused2
= 0;
735 * delayed allocation extents that start beyond EOF can
736 * occur due to speculative EOF allocation when the
737 * delalloc extent is larger than the largest freespace
738 * extent at conversion time. These extents cannot be
739 * converted by data writeback, so can exist here even
740 * if we are not supposed to be finding delalloc
743 if (map
[i
].br_startblock
== DELAYSTARTBLOCK
&&
744 map
[i
].br_startoff
< XFS_B_TO_FSB(mp
, XFS_ISIZE(ip
)))
745 ASSERT((iflags
& BMV_IF_DELALLOC
) != 0);
747 if (map
[i
].br_startblock
== HOLESTARTBLOCK
&&
748 whichfork
== XFS_ATTR_FORK
) {
749 /* came to the end of attribute fork */
750 out
[cur_ext
].bmv_oflags
|= BMV_OF_LAST
;
754 /* Is this a shared block? */
755 error
= xfs_getbmap_adjust_shared(ip
, whichfork
,
756 &map
[i
], &out
[cur_ext
], &inject_map
);
760 if (!xfs_getbmapx_fix_eof_hole(ip
, whichfork
,
761 &out
[cur_ext
], prealloced
, bmvend
,
762 map
[i
].br_startblock
,
763 inject_map
.br_startblock
!= NULLFSBLOCK
))
767 out
[cur_ext
].bmv_offset
+
768 out
[cur_ext
].bmv_length
;
770 max_t(int64_t, 0, bmvend
- bmv
->bmv_offset
);
773 * In case we don't want to return the hole,
774 * don't increase cur_ext so that we can reuse
775 * it in the next loop.
777 if ((iflags
& BMV_IF_NO_HOLES
) &&
778 map
[i
].br_startblock
== HOLESTARTBLOCK
) {
779 memset(&out
[cur_ext
], 0, sizeof(out
[cur_ext
]));
784 * In order to report shared extents accurately,
785 * we report each distinct shared/unshared part
786 * of a single bmbt record using multiple bmap
787 * extents. To make that happen, we iterate the
788 * same map array item multiple times, each
789 * time trimming out the subextent that we just
792 * Because of this, we must check the out array
793 * index (cur_ext) directly against bmv_count-1
794 * to avoid overflows.
796 if (inject_map
.br_startblock
!= NULLFSBLOCK
) {
803 } while (nmap
&& bmv
->bmv_length
&& cur_ext
< bmv
->bmv_count
- 1);
808 xfs_iunlock(ip
, lock
);
810 xfs_iunlock(ip
, XFS_IOLOCK_SHARED
);
812 for (i
= 0; i
< cur_ext
; i
++) {
813 /* format results & advance arg */
814 error
= formatter(&arg
, &out
[i
]);
824 * dead simple method of punching delalyed allocation blocks from a range in
825 * the inode. Walks a block at a time so will be slow, but is only executed in
826 * rare error cases so the overhead is not critical. This will always punch out
827 * both the start and end blocks, even if the ranges only partially overlap
828 * them, so it is up to the caller to ensure that partial blocks are not
832 xfs_bmap_punch_delalloc_range(
833 struct xfs_inode
*ip
,
834 xfs_fileoff_t start_fsb
,
835 xfs_fileoff_t length
)
837 xfs_fileoff_t remaining
= length
;
840 ASSERT(xfs_isilocked(ip
, XFS_ILOCK_EXCL
));
844 xfs_bmbt_irec_t imap
;
846 xfs_fsblock_t firstblock
;
847 struct xfs_defer_ops dfops
;
850 * Map the range first and check that it is a delalloc extent
851 * before trying to unmap the range. Otherwise we will be
852 * trying to remove a real extent (which requires a
853 * transaction) or a hole, which is probably a bad idea...
855 error
= xfs_bmapi_read(ip
, start_fsb
, 1, &imap
, &nimaps
,
859 /* something screwed, just bail */
860 if (!XFS_FORCED_SHUTDOWN(ip
->i_mount
)) {
861 xfs_alert(ip
->i_mount
,
862 "Failed delalloc mapping lookup ino %lld fsb %lld.",
863 ip
->i_ino
, start_fsb
);
871 if (imap
.br_startblock
!= DELAYSTARTBLOCK
) {
872 /* been converted, ignore */
875 WARN_ON(imap
.br_blockcount
== 0);
878 * Note: while we initialise the firstblock/dfops pair, they
879 * should never be used because blocks should never be
880 * allocated or freed for a delalloc extent and hence we need
881 * don't cancel or finish them after the xfs_bunmapi() call.
883 xfs_defer_init(&dfops
, &firstblock
);
884 error
= xfs_bunmapi(NULL
, ip
, start_fsb
, 1, 0, 1, &firstblock
,
889 ASSERT(!xfs_defer_has_unfinished_work(&dfops
));
893 } while(remaining
> 0);
899 * Test whether it is appropriate to check an inode for and free post EOF
900 * blocks. The 'force' parameter determines whether we should also consider
901 * regular files that are marked preallocated or append-only.
904 xfs_can_free_eofblocks(struct xfs_inode
*ip
, bool force
)
906 /* prealloc/delalloc exists only on regular files */
907 if (!S_ISREG(VFS_I(ip
)->i_mode
))
911 * Zero sized files with no cached pages and delalloc blocks will not
912 * have speculative prealloc/delalloc blocks to remove.
914 if (VFS_I(ip
)->i_size
== 0 &&
915 VFS_I(ip
)->i_mapping
->nrpages
== 0 &&
916 ip
->i_delayed_blks
== 0)
919 /* If we haven't read in the extent list, then don't do it now. */
920 if (!(ip
->i_df
.if_flags
& XFS_IFEXTENTS
))
924 * Do not free real preallocated or append-only files unless the file
925 * has delalloc blocks and we are forced to remove them.
927 if (ip
->i_d
.di_flags
& (XFS_DIFLAG_PREALLOC
| XFS_DIFLAG_APPEND
))
928 if (!force
|| ip
->i_delayed_blks
== 0)
935 * This is called to free any blocks beyond eof. The caller must hold
936 * IOLOCK_EXCL unless we are in the inode reclaim path and have the only
937 * reference to the inode.
941 struct xfs_inode
*ip
)
943 struct xfs_trans
*tp
;
945 xfs_fileoff_t end_fsb
;
946 xfs_fileoff_t last_fsb
;
947 xfs_filblks_t map_len
;
949 struct xfs_bmbt_irec imap
;
950 struct xfs_mount
*mp
= ip
->i_mount
;
953 * Figure out if there are any blocks beyond the end
954 * of the file. If not, then there is nothing to do.
956 end_fsb
= XFS_B_TO_FSB(mp
, (xfs_ufsize_t
)XFS_ISIZE(ip
));
957 last_fsb
= XFS_B_TO_FSB(mp
, mp
->m_super
->s_maxbytes
);
958 if (last_fsb
<= end_fsb
)
960 map_len
= last_fsb
- end_fsb
;
963 xfs_ilock(ip
, XFS_ILOCK_SHARED
);
964 error
= xfs_bmapi_read(ip
, end_fsb
, map_len
, &imap
, &nimaps
, 0);
965 xfs_iunlock(ip
, XFS_ILOCK_SHARED
);
968 * If there are blocks after the end of file, truncate the file to its
969 * current size to free them up.
971 if (!error
&& (nimaps
!= 0) &&
972 (imap
.br_startblock
!= HOLESTARTBLOCK
||
973 ip
->i_delayed_blks
)) {
975 * Attach the dquots to the inode up front.
977 error
= xfs_qm_dqattach(ip
, 0);
981 /* wait on dio to ensure i_size has settled */
982 inode_dio_wait(VFS_I(ip
));
984 error
= xfs_trans_alloc(mp
, &M_RES(mp
)->tr_itruncate
, 0, 0, 0,
987 ASSERT(XFS_FORCED_SHUTDOWN(mp
));
991 xfs_ilock(ip
, XFS_ILOCK_EXCL
);
992 xfs_trans_ijoin(tp
, ip
, 0);
995 * Do not update the on-disk file size. If we update the
996 * on-disk file size and then the system crashes before the
997 * contents of the file are flushed to disk then the files
998 * may be full of holes (ie NULL files bug).
1000 error
= xfs_itruncate_extents(&tp
, ip
, XFS_DATA_FORK
,
1004 * If we get an error at this point we simply don't
1005 * bother truncating the file.
1007 xfs_trans_cancel(tp
);
1009 error
= xfs_trans_commit(tp
);
1011 xfs_inode_clear_eofblocks_tag(ip
);
1014 xfs_iunlock(ip
, XFS_ILOCK_EXCL
);
1020 xfs_alloc_file_space(
1021 struct xfs_inode
*ip
,
1026 xfs_mount_t
*mp
= ip
->i_mount
;
1028 xfs_filblks_t allocated_fsb
;
1029 xfs_filblks_t allocatesize_fsb
;
1030 xfs_extlen_t extsz
, temp
;
1031 xfs_fileoff_t startoffset_fsb
;
1032 xfs_fsblock_t firstfsb
;
1037 xfs_bmbt_irec_t imaps
[1], *imapp
;
1038 struct xfs_defer_ops dfops
;
1039 uint qblocks
, resblks
, resrtextents
;
1042 trace_xfs_alloc_file_space(ip
);
1044 if (XFS_FORCED_SHUTDOWN(mp
))
1047 error
= xfs_qm_dqattach(ip
, 0);
1054 rt
= XFS_IS_REALTIME_INODE(ip
);
1055 extsz
= xfs_get_extsz_hint(ip
);
1060 startoffset_fsb
= XFS_B_TO_FSBT(mp
, offset
);
1061 allocatesize_fsb
= XFS_B_TO_FSB(mp
, count
);
1064 * Allocate file space until done or until there is an error
1066 while (allocatesize_fsb
&& !error
) {
1070 * Determine space reservations for data/realtime.
1072 if (unlikely(extsz
)) {
1073 s
= startoffset_fsb
;
1076 e
= startoffset_fsb
+ allocatesize_fsb
;
1077 if ((temp
= do_mod(startoffset_fsb
, extsz
)))
1079 if ((temp
= do_mod(e
, extsz
)))
1083 e
= allocatesize_fsb
;
1087 * The transaction reservation is limited to a 32-bit block
1088 * count, hence we need to limit the number of blocks we are
1089 * trying to reserve to avoid an overflow. We can't allocate
1090 * more than @nimaps extents, and an extent is limited on disk
1091 * to MAXEXTLEN (21 bits), so use that to enforce the limit.
1093 resblks
= min_t(xfs_fileoff_t
, (e
- s
), (MAXEXTLEN
* nimaps
));
1095 resrtextents
= qblocks
= resblks
;
1096 resrtextents
/= mp
->m_sb
.sb_rextsize
;
1097 resblks
= XFS_DIOSTRAT_SPACE_RES(mp
, 0);
1098 quota_flag
= XFS_QMOPT_RES_RTBLKS
;
1101 resblks
= qblocks
= XFS_DIOSTRAT_SPACE_RES(mp
, resblks
);
1102 quota_flag
= XFS_QMOPT_RES_REGBLKS
;
1106 * Allocate and setup the transaction.
1108 error
= xfs_trans_alloc(mp
, &M_RES(mp
)->tr_write
, resblks
,
1109 resrtextents
, 0, &tp
);
1112 * Check for running out of space
1116 * Free the transaction structure.
1118 ASSERT(error
== -ENOSPC
|| XFS_FORCED_SHUTDOWN(mp
));
1121 xfs_ilock(ip
, XFS_ILOCK_EXCL
);
1122 error
= xfs_trans_reserve_quota_nblks(tp
, ip
, qblocks
,
1127 xfs_trans_ijoin(tp
, ip
, 0);
1129 xfs_defer_init(&dfops
, &firstfsb
);
1130 error
= xfs_bmapi_write(tp
, ip
, startoffset_fsb
,
1131 allocatesize_fsb
, alloc_type
, &firstfsb
,
1132 resblks
, imapp
, &nimaps
, &dfops
);
1137 * Complete the transaction
1139 error
= xfs_defer_finish(&tp
, &dfops
, NULL
);
1143 error
= xfs_trans_commit(tp
);
1144 xfs_iunlock(ip
, XFS_ILOCK_EXCL
);
1148 allocated_fsb
= imapp
->br_blockcount
;
1155 startoffset_fsb
+= allocated_fsb
;
1156 allocatesize_fsb
-= allocated_fsb
;
1161 error0
: /* Cancel bmap, unlock inode, unreserve quota blocks, cancel trans */
1162 xfs_defer_cancel(&dfops
);
1163 xfs_trans_unreserve_quota_nblks(tp
, ip
, (long)qblocks
, 0, quota_flag
);
1165 error1
: /* Just cancel transaction */
1166 xfs_trans_cancel(tp
);
1167 xfs_iunlock(ip
, XFS_ILOCK_EXCL
);
1173 struct xfs_inode
*ip
,
1174 xfs_fileoff_t startoffset_fsb
,
1175 xfs_filblks_t len_fsb
,
1178 struct xfs_mount
*mp
= ip
->i_mount
;
1179 struct xfs_trans
*tp
;
1180 struct xfs_defer_ops dfops
;
1181 xfs_fsblock_t firstfsb
;
1182 uint resblks
= XFS_DIOSTRAT_SPACE_RES(mp
, 0);
1185 error
= xfs_trans_alloc(mp
, &M_RES(mp
)->tr_write
, resblks
, 0, 0, &tp
);
1187 ASSERT(error
== -ENOSPC
|| XFS_FORCED_SHUTDOWN(mp
));
1191 xfs_ilock(ip
, XFS_ILOCK_EXCL
);
1192 error
= xfs_trans_reserve_quota(tp
, mp
, ip
->i_udquot
, ip
->i_gdquot
,
1193 ip
->i_pdquot
, resblks
, 0, XFS_QMOPT_RES_REGBLKS
);
1195 goto out_trans_cancel
;
1197 xfs_trans_ijoin(tp
, ip
, 0);
1199 xfs_defer_init(&dfops
, &firstfsb
);
1200 error
= xfs_bunmapi(tp
, ip
, startoffset_fsb
, len_fsb
, 0, 2, &firstfsb
,
1203 goto out_bmap_cancel
;
1205 error
= xfs_defer_finish(&tp
, &dfops
, ip
);
1207 goto out_bmap_cancel
;
1209 error
= xfs_trans_commit(tp
);
1211 xfs_iunlock(ip
, XFS_ILOCK_EXCL
);
1215 xfs_defer_cancel(&dfops
);
1217 xfs_trans_cancel(tp
);
1222 xfs_adjust_extent_unmap_boundaries(
1223 struct xfs_inode
*ip
,
1224 xfs_fileoff_t
*startoffset_fsb
,
1225 xfs_fileoff_t
*endoffset_fsb
)
1227 struct xfs_mount
*mp
= ip
->i_mount
;
1228 struct xfs_bmbt_irec imap
;
1230 xfs_extlen_t mod
= 0;
1233 error
= xfs_bmapi_read(ip
, *startoffset_fsb
, 1, &imap
, &nimap
, 0);
1237 if (nimap
&& imap
.br_startblock
!= HOLESTARTBLOCK
) {
1238 ASSERT(imap
.br_startblock
!= DELAYSTARTBLOCK
);
1239 mod
= do_mod(imap
.br_startblock
, mp
->m_sb
.sb_rextsize
);
1241 *startoffset_fsb
+= mp
->m_sb
.sb_rextsize
- mod
;
1245 error
= xfs_bmapi_read(ip
, *endoffset_fsb
- 1, 1, &imap
, &nimap
, 0);
1249 if (nimap
&& imap
.br_startblock
!= HOLESTARTBLOCK
) {
1250 ASSERT(imap
.br_startblock
!= DELAYSTARTBLOCK
);
1252 if (mod
&& mod
!= mp
->m_sb
.sb_rextsize
)
1253 *endoffset_fsb
-= mod
;
1260 xfs_flush_unmap_range(
1261 struct xfs_inode
*ip
,
1265 struct xfs_mount
*mp
= ip
->i_mount
;
1266 struct inode
*inode
= VFS_I(ip
);
1267 xfs_off_t rounding
, start
, end
;
1270 /* wait for the completion of any pending DIOs */
1271 inode_dio_wait(inode
);
1273 rounding
= max_t(xfs_off_t
, 1 << mp
->m_sb
.sb_blocklog
, PAGE_SIZE
);
1274 start
= round_down(offset
, rounding
);
1275 end
= round_up(offset
+ len
, rounding
) - 1;
1277 error
= filemap_write_and_wait_range(inode
->i_mapping
, start
, end
);
1280 truncate_pagecache_range(inode
, start
, end
);
1285 xfs_free_file_space(
1286 struct xfs_inode
*ip
,
1290 struct xfs_mount
*mp
= ip
->i_mount
;
1291 xfs_fileoff_t startoffset_fsb
;
1292 xfs_fileoff_t endoffset_fsb
;
1293 int done
= 0, error
;
1295 trace_xfs_free_file_space(ip
);
1297 error
= xfs_qm_dqattach(ip
, 0);
1301 if (len
<= 0) /* if nothing being freed */
1304 error
= xfs_flush_unmap_range(ip
, offset
, len
);
1308 startoffset_fsb
= XFS_B_TO_FSB(mp
, offset
);
1309 endoffset_fsb
= XFS_B_TO_FSBT(mp
, offset
+ len
);
1312 * Need to zero the stuff we're not freeing, on disk. If it's a RT file
1313 * and we can't use unwritten extents then we actually need to ensure
1314 * to zero the whole extent, otherwise we just need to take of block
1315 * boundaries, and xfs_bunmapi will handle the rest.
1317 if (XFS_IS_REALTIME_INODE(ip
) &&
1318 !xfs_sb_version_hasextflgbit(&mp
->m_sb
)) {
1319 error
= xfs_adjust_extent_unmap_boundaries(ip
, &startoffset_fsb
,
1325 if (endoffset_fsb
> startoffset_fsb
) {
1327 error
= xfs_unmap_extent(ip
, startoffset_fsb
,
1328 endoffset_fsb
- startoffset_fsb
, &done
);
1335 * Now that we've unmap all full blocks we'll have to zero out any
1336 * partial block at the beginning and/or end. xfs_zero_range is
1337 * smart enough to skip any holes, including those we just created,
1338 * but we must take care not to zero beyond EOF and enlarge i_size.
1341 if (offset
>= XFS_ISIZE(ip
))
1344 if (offset
+ len
> XFS_ISIZE(ip
))
1345 len
= XFS_ISIZE(ip
) - offset
;
1347 return xfs_zero_range(ip
, offset
, len
, NULL
);
1351 * Preallocate and zero a range of a file. This mechanism has the allocation
1352 * semantics of fallocate and in addition converts data in the range to zeroes.
1355 xfs_zero_file_space(
1356 struct xfs_inode
*ip
,
1360 struct xfs_mount
*mp
= ip
->i_mount
;
1364 trace_xfs_zero_file_space(ip
);
1366 blksize
= 1 << mp
->m_sb
.sb_blocklog
;
1369 * Punch a hole and prealloc the range. We use hole punch rather than
1370 * unwritten extent conversion for two reasons:
1372 * 1.) Hole punch handles partial block zeroing for us.
1374 * 2.) If prealloc returns ENOSPC, the file range is still zero-valued
1375 * by virtue of the hole punch.
1377 error
= xfs_free_file_space(ip
, offset
, len
);
1381 error
= xfs_alloc_file_space(ip
, round_down(offset
, blksize
),
1382 round_up(offset
+ len
, blksize
) -
1383 round_down(offset
, blksize
),
1384 XFS_BMAPI_PREALLOC
);
1391 * @next_fsb will keep track of the extent currently undergoing shift.
1392 * @stop_fsb will keep track of the extent at which we have to stop.
1393 * If we are shifting left, we will start with block (offset + len) and
1394 * shift each extent till last extent.
1395 * If we are shifting right, we will start with last extent inside file space
1396 * and continue until we reach the block corresponding to offset.
1399 xfs_shift_file_space(
1400 struct xfs_inode
*ip
,
1403 enum shift_direction direction
)
1406 struct xfs_mount
*mp
= ip
->i_mount
;
1407 struct xfs_trans
*tp
;
1409 struct xfs_defer_ops dfops
;
1410 xfs_fsblock_t first_block
;
1411 xfs_fileoff_t stop_fsb
;
1412 xfs_fileoff_t next_fsb
;
1413 xfs_fileoff_t shift_fsb
;
1416 ASSERT(direction
== SHIFT_LEFT
|| direction
== SHIFT_RIGHT
);
1418 if (direction
== SHIFT_LEFT
) {
1420 * Reserve blocks to cover potential extent merges after left
1423 resblks
= XFS_DIOSTRAT_SPACE_RES(mp
, 0);
1424 next_fsb
= XFS_B_TO_FSB(mp
, offset
+ len
);
1425 stop_fsb
= XFS_B_TO_FSB(mp
, VFS_I(ip
)->i_size
);
1428 * If right shift, delegate the work of initialization of
1429 * next_fsb to xfs_bmap_shift_extent as it has ilock held.
1432 next_fsb
= NULLFSBLOCK
;
1433 stop_fsb
= XFS_B_TO_FSB(mp
, offset
);
1436 shift_fsb
= XFS_B_TO_FSB(mp
, len
);
1439 * Trim eofblocks to avoid shifting uninitialized post-eof preallocation
1440 * into the accessible region of the file.
1442 if (xfs_can_free_eofblocks(ip
, true)) {
1443 error
= xfs_free_eofblocks(ip
);
1449 * Writeback and invalidate cache for the remainder of the file as we're
1450 * about to shift down every extent from offset to EOF.
1452 error
= filemap_write_and_wait_range(VFS_I(ip
)->i_mapping
,
1456 error
= invalidate_inode_pages2_range(VFS_I(ip
)->i_mapping
,
1457 offset
>> PAGE_SHIFT
, -1);
1462 * The extent shiting code works on extent granularity. So, if
1463 * stop_fsb is not the starting block of extent, we need to split
1464 * the extent at stop_fsb.
1466 if (direction
== SHIFT_RIGHT
) {
1467 error
= xfs_bmap_split_extent(ip
, stop_fsb
);
1472 while (!error
&& !done
) {
1473 error
= xfs_trans_alloc(mp
, &M_RES(mp
)->tr_write
, resblks
, 0, 0,
1478 xfs_ilock(ip
, XFS_ILOCK_EXCL
);
1479 error
= xfs_trans_reserve_quota(tp
, mp
, ip
->i_udquot
,
1480 ip
->i_gdquot
, ip
->i_pdquot
, resblks
, 0,
1481 XFS_QMOPT_RES_REGBLKS
);
1483 goto out_trans_cancel
;
1485 xfs_trans_ijoin(tp
, ip
, XFS_ILOCK_EXCL
);
1487 xfs_defer_init(&dfops
, &first_block
);
1490 * We are using the write transaction in which max 2 bmbt
1491 * updates are allowed
1493 error
= xfs_bmap_shift_extents(tp
, ip
, &next_fsb
, shift_fsb
,
1494 &done
, stop_fsb
, &first_block
, &dfops
,
1495 direction
, XFS_BMAP_MAX_SHIFT_EXTENTS
);
1497 goto out_bmap_cancel
;
1499 error
= xfs_defer_finish(&tp
, &dfops
, NULL
);
1501 goto out_bmap_cancel
;
1503 error
= xfs_trans_commit(tp
);
1509 xfs_defer_cancel(&dfops
);
1511 xfs_trans_cancel(tp
);
1516 * xfs_collapse_file_space()
1517 * This routine frees disk space and shift extent for the given file.
1518 * The first thing we do is to free data blocks in the specified range
1519 * by calling xfs_free_file_space(). It would also sync dirty data
1520 * and invalidate page cache over the region on which collapse range
1521 * is working. And Shift extent records to the left to cover a hole.
1528 xfs_collapse_file_space(
1529 struct xfs_inode
*ip
,
1535 ASSERT(xfs_isilocked(ip
, XFS_IOLOCK_EXCL
));
1536 trace_xfs_collapse_file_space(ip
);
1538 error
= xfs_free_file_space(ip
, offset
, len
);
1542 return xfs_shift_file_space(ip
, offset
, len
, SHIFT_LEFT
);
1546 * xfs_insert_file_space()
1547 * This routine create hole space by shifting extents for the given file.
1548 * The first thing we do is to sync dirty data and invalidate page cache
1549 * over the region on which insert range is working. And split an extent
1550 * to two extents at given offset by calling xfs_bmap_split_extent.
1551 * And shift all extent records which are laying between [offset,
1552 * last allocated extent] to the right to reserve hole range.
1558 xfs_insert_file_space(
1559 struct xfs_inode
*ip
,
1563 ASSERT(xfs_isilocked(ip
, XFS_IOLOCK_EXCL
));
1564 trace_xfs_insert_file_space(ip
);
1566 return xfs_shift_file_space(ip
, offset
, len
, SHIFT_RIGHT
);
1570 * We need to check that the format of the data fork in the temporary inode is
1571 * valid for the target inode before doing the swap. This is not a problem with
1572 * attr1 because of the fixed fork offset, but attr2 has a dynamically sized
1573 * data fork depending on the space the attribute fork is taking so we can get
1574 * invalid formats on the target inode.
1576 * E.g. target has space for 7 extents in extent format, temp inode only has
1577 * space for 6. If we defragment down to 7 extents, then the tmp format is a
1578 * btree, but when swapped it needs to be in extent format. Hence we can't just
1579 * blindly swap data forks on attr2 filesystems.
1581 * Note that we check the swap in both directions so that we don't end up with
1582 * a corrupt temporary inode, either.
1584 * Note that fixing the way xfs_fsr sets up the attribute fork in the source
1585 * inode will prevent this situation from occurring, so all we do here is
1586 * reject and log the attempt. basically we are putting the responsibility on
1587 * userspace to get this right.
1590 xfs_swap_extents_check_format(
1591 struct xfs_inode
*ip
, /* target inode */
1592 struct xfs_inode
*tip
) /* tmp inode */
1595 /* Should never get a local format */
1596 if (ip
->i_d
.di_format
== XFS_DINODE_FMT_LOCAL
||
1597 tip
->i_d
.di_format
== XFS_DINODE_FMT_LOCAL
)
1601 * if the target inode has less extents that then temporary inode then
1602 * why did userspace call us?
1604 if (ip
->i_d
.di_nextents
< tip
->i_d
.di_nextents
)
1608 * If we have to use the (expensive) rmap swap method, we can
1609 * handle any number of extents and any format.
1611 if (xfs_sb_version_hasrmapbt(&ip
->i_mount
->m_sb
))
1615 * if the target inode is in extent form and the temp inode is in btree
1616 * form then we will end up with the target inode in the wrong format
1617 * as we already know there are less extents in the temp inode.
1619 if (ip
->i_d
.di_format
== XFS_DINODE_FMT_EXTENTS
&&
1620 tip
->i_d
.di_format
== XFS_DINODE_FMT_BTREE
)
1623 /* Check temp in extent form to max in target */
1624 if (tip
->i_d
.di_format
== XFS_DINODE_FMT_EXTENTS
&&
1625 XFS_IFORK_NEXTENTS(tip
, XFS_DATA_FORK
) >
1626 XFS_IFORK_MAXEXT(ip
, XFS_DATA_FORK
))
1629 /* Check target in extent form to max in temp */
1630 if (ip
->i_d
.di_format
== XFS_DINODE_FMT_EXTENTS
&&
1631 XFS_IFORK_NEXTENTS(ip
, XFS_DATA_FORK
) >
1632 XFS_IFORK_MAXEXT(tip
, XFS_DATA_FORK
))
1636 * If we are in a btree format, check that the temp root block will fit
1637 * in the target and that it has enough extents to be in btree format
1640 * Note that we have to be careful to allow btree->extent conversions
1641 * (a common defrag case) which will occur when the temp inode is in
1644 if (tip
->i_d
.di_format
== XFS_DINODE_FMT_BTREE
) {
1645 if (XFS_IFORK_Q(ip
) &&
1646 XFS_BMAP_BMDR_SPACE(tip
->i_df
.if_broot
) > XFS_IFORK_BOFF(ip
))
1648 if (XFS_IFORK_NEXTENTS(tip
, XFS_DATA_FORK
) <=
1649 XFS_IFORK_MAXEXT(ip
, XFS_DATA_FORK
))
1653 /* Reciprocal target->temp btree format checks */
1654 if (ip
->i_d
.di_format
== XFS_DINODE_FMT_BTREE
) {
1655 if (XFS_IFORK_Q(tip
) &&
1656 XFS_BMAP_BMDR_SPACE(ip
->i_df
.if_broot
) > XFS_IFORK_BOFF(tip
))
1658 if (XFS_IFORK_NEXTENTS(ip
, XFS_DATA_FORK
) <=
1659 XFS_IFORK_MAXEXT(tip
, XFS_DATA_FORK
))
1667 xfs_swap_extent_flush(
1668 struct xfs_inode
*ip
)
1672 error
= filemap_write_and_wait(VFS_I(ip
)->i_mapping
);
1675 truncate_pagecache_range(VFS_I(ip
), 0, -1);
1677 /* Verify O_DIRECT for ftmp */
1678 if (VFS_I(ip
)->i_mapping
->nrpages
)
1684 * Move extents from one file to another, when rmap is enabled.
1687 xfs_swap_extent_rmap(
1688 struct xfs_trans
**tpp
,
1689 struct xfs_inode
*ip
,
1690 struct xfs_inode
*tip
)
1692 struct xfs_bmbt_irec irec
;
1693 struct xfs_bmbt_irec uirec
;
1694 struct xfs_bmbt_irec tirec
;
1695 xfs_fileoff_t offset_fsb
;
1696 xfs_fileoff_t end_fsb
;
1697 xfs_filblks_t count_fsb
;
1698 xfs_fsblock_t firstfsb
;
1699 struct xfs_defer_ops dfops
;
1704 uint64_t tip_flags2
;
1707 * If the source file has shared blocks, we must flag the donor
1708 * file as having shared blocks so that we get the shared-block
1709 * rmap functions when we go to fix up the rmaps. The flags
1710 * will be switch for reals later.
1712 tip_flags2
= tip
->i_d
.di_flags2
;
1713 if (ip
->i_d
.di_flags2
& XFS_DIFLAG2_REFLINK
)
1714 tip
->i_d
.di_flags2
|= XFS_DIFLAG2_REFLINK
;
1717 end_fsb
= XFS_B_TO_FSB(ip
->i_mount
, i_size_read(VFS_I(ip
)));
1718 count_fsb
= (xfs_filblks_t
)(end_fsb
- offset_fsb
);
1721 /* Read extent from the donor file */
1723 error
= xfs_bmapi_read(tip
, offset_fsb
, count_fsb
, &tirec
,
1727 ASSERT(nimaps
== 1);
1728 ASSERT(tirec
.br_startblock
!= DELAYSTARTBLOCK
);
1730 trace_xfs_swap_extent_rmap_remap(tip
, &tirec
);
1731 ilen
= tirec
.br_blockcount
;
1733 /* Unmap the old blocks in the source file. */
1734 while (tirec
.br_blockcount
) {
1735 xfs_defer_init(&dfops
, &firstfsb
);
1736 trace_xfs_swap_extent_rmap_remap_piece(tip
, &tirec
);
1738 /* Read extent from the source file */
1740 error
= xfs_bmapi_read(ip
, tirec
.br_startoff
,
1741 tirec
.br_blockcount
, &irec
,
1745 ASSERT(nimaps
== 1);
1746 ASSERT(tirec
.br_startoff
== irec
.br_startoff
);
1747 trace_xfs_swap_extent_rmap_remap_piece(ip
, &irec
);
1749 /* Trim the extent. */
1751 uirec
.br_blockcount
= rlen
= min_t(xfs_filblks_t
,
1752 tirec
.br_blockcount
,
1753 irec
.br_blockcount
);
1754 trace_xfs_swap_extent_rmap_remap_piece(tip
, &uirec
);
1756 /* Remove the mapping from the donor file. */
1757 error
= xfs_bmap_unmap_extent((*tpp
)->t_mountp
, &dfops
,
1762 /* Remove the mapping from the source file. */
1763 error
= xfs_bmap_unmap_extent((*tpp
)->t_mountp
, &dfops
,
1768 /* Map the donor file's blocks into the source file. */
1769 error
= xfs_bmap_map_extent((*tpp
)->t_mountp
, &dfops
,
1774 /* Map the source file's blocks into the donor file. */
1775 error
= xfs_bmap_map_extent((*tpp
)->t_mountp
, &dfops
,
1780 error
= xfs_defer_finish(tpp
, &dfops
, ip
);
1784 tirec
.br_startoff
+= rlen
;
1785 if (tirec
.br_startblock
!= HOLESTARTBLOCK
&&
1786 tirec
.br_startblock
!= DELAYSTARTBLOCK
)
1787 tirec
.br_startblock
+= rlen
;
1788 tirec
.br_blockcount
-= rlen
;
1796 tip
->i_d
.di_flags2
= tip_flags2
;
1800 xfs_defer_cancel(&dfops
);
1802 trace_xfs_swap_extent_rmap_error(ip
, error
, _RET_IP_
);
1803 tip
->i_d
.di_flags2
= tip_flags2
;
1807 /* Swap the extents of two files by swapping data forks. */
1809 xfs_swap_extent_forks(
1810 struct xfs_trans
*tp
,
1811 struct xfs_inode
*ip
,
1812 struct xfs_inode
*tip
,
1814 int *target_log_flags
)
1816 struct xfs_ifork tempifp
, *ifp
, *tifp
;
1817 xfs_filblks_t aforkblks
= 0;
1818 xfs_filblks_t taforkblks
= 0;
1820 xfs_extnum_t nextents
;
1825 * Count the number of extended attribute blocks
1827 if ( ((XFS_IFORK_Q(ip
) != 0) && (ip
->i_d
.di_anextents
> 0)) &&
1828 (ip
->i_d
.di_aformat
!= XFS_DINODE_FMT_LOCAL
)) {
1829 error
= xfs_bmap_count_blocks(tp
, ip
, XFS_ATTR_FORK
, &junk
,
1834 if ( ((XFS_IFORK_Q(tip
) != 0) && (tip
->i_d
.di_anextents
> 0)) &&
1835 (tip
->i_d
.di_aformat
!= XFS_DINODE_FMT_LOCAL
)) {
1836 error
= xfs_bmap_count_blocks(tp
, tip
, XFS_ATTR_FORK
, &junk
,
1843 * Before we've swapped the forks, lets set the owners of the forks
1844 * appropriately. We have to do this as we are demand paging the btree
1845 * buffers, and so the validation done on read will expect the owner
1846 * field to be correctly set. Once we change the owners, we can swap the
1849 if (ip
->i_d
.di_version
== 3 &&
1850 ip
->i_d
.di_format
== XFS_DINODE_FMT_BTREE
) {
1851 (*target_log_flags
) |= XFS_ILOG_DOWNER
;
1852 error
= xfs_bmbt_change_owner(tp
, ip
, XFS_DATA_FORK
,
1858 if (tip
->i_d
.di_version
== 3 &&
1859 tip
->i_d
.di_format
== XFS_DINODE_FMT_BTREE
) {
1860 (*src_log_flags
) |= XFS_ILOG_DOWNER
;
1861 error
= xfs_bmbt_change_owner(tp
, tip
, XFS_DATA_FORK
,
1868 * Swap the data forks of the inodes
1872 tempifp
= *ifp
; /* struct copy */
1873 *ifp
= *tifp
; /* struct copy */
1874 *tifp
= tempifp
; /* struct copy */
1877 * Fix the on-disk inode values
1879 tmp
= (uint64_t)ip
->i_d
.di_nblocks
;
1880 ip
->i_d
.di_nblocks
= tip
->i_d
.di_nblocks
- taforkblks
+ aforkblks
;
1881 tip
->i_d
.di_nblocks
= tmp
+ taforkblks
- aforkblks
;
1883 tmp
= (uint64_t) ip
->i_d
.di_nextents
;
1884 ip
->i_d
.di_nextents
= tip
->i_d
.di_nextents
;
1885 tip
->i_d
.di_nextents
= tmp
;
1887 tmp
= (uint64_t) ip
->i_d
.di_format
;
1888 ip
->i_d
.di_format
= tip
->i_d
.di_format
;
1889 tip
->i_d
.di_format
= tmp
;
1892 * The extents in the source inode could still contain speculative
1893 * preallocation beyond EOF (e.g. the file is open but not modified
1894 * while defrag is in progress). In that case, we need to copy over the
1895 * number of delalloc blocks the data fork in the source inode is
1896 * tracking beyond EOF so that when the fork is truncated away when the
1897 * temporary inode is unlinked we don't underrun the i_delayed_blks
1898 * counter on that inode.
1900 ASSERT(tip
->i_delayed_blks
== 0);
1901 tip
->i_delayed_blks
= ip
->i_delayed_blks
;
1902 ip
->i_delayed_blks
= 0;
1904 switch (ip
->i_d
.di_format
) {
1905 case XFS_DINODE_FMT_EXTENTS
:
1907 * If the extents fit in the inode, fix the pointer. Otherwise
1908 * it's already NULL or pointing to the extent.
1910 nextents
= xfs_iext_count(&ip
->i_df
);
1911 if (nextents
<= XFS_INLINE_EXTS
)
1912 ifp
->if_u1
.if_extents
= ifp
->if_u2
.if_inline_ext
;
1913 (*src_log_flags
) |= XFS_ILOG_DEXT
;
1915 case XFS_DINODE_FMT_BTREE
:
1916 ASSERT(ip
->i_d
.di_version
< 3 ||
1917 (*src_log_flags
& XFS_ILOG_DOWNER
));
1918 (*src_log_flags
) |= XFS_ILOG_DBROOT
;
1922 switch (tip
->i_d
.di_format
) {
1923 case XFS_DINODE_FMT_EXTENTS
:
1925 * If the extents fit in the inode, fix the pointer. Otherwise
1926 * it's already NULL or pointing to the extent.
1928 nextents
= xfs_iext_count(&tip
->i_df
);
1929 if (nextents
<= XFS_INLINE_EXTS
)
1930 tifp
->if_u1
.if_extents
= tifp
->if_u2
.if_inline_ext
;
1931 (*target_log_flags
) |= XFS_ILOG_DEXT
;
1933 case XFS_DINODE_FMT_BTREE
:
1934 (*target_log_flags
) |= XFS_ILOG_DBROOT
;
1935 ASSERT(tip
->i_d
.di_version
< 3 ||
1936 (*target_log_flags
& XFS_ILOG_DOWNER
));
1945 struct xfs_inode
*ip
, /* target inode */
1946 struct xfs_inode
*tip
, /* tmp inode */
1947 struct xfs_swapext
*sxp
)
1949 struct xfs_mount
*mp
= ip
->i_mount
;
1950 struct xfs_trans
*tp
;
1951 struct xfs_bstat
*sbp
= &sxp
->sx_stat
;
1952 int src_log_flags
, target_log_flags
;
1955 struct xfs_ifork
*cowfp
;
1960 * Lock the inodes against other IO, page faults and truncate to
1961 * begin with. Then we can ensure the inodes are flushed and have no
1962 * page cache safely. Once we have done this we can take the ilocks and
1963 * do the rest of the checks.
1965 lock_two_nondirectories(VFS_I(ip
), VFS_I(tip
));
1966 lock_flags
= XFS_MMAPLOCK_EXCL
;
1967 xfs_lock_two_inodes(ip
, tip
, XFS_MMAPLOCK_EXCL
);
1969 /* Verify that both files have the same format */
1970 if ((VFS_I(ip
)->i_mode
& S_IFMT
) != (VFS_I(tip
)->i_mode
& S_IFMT
)) {
1975 /* Verify both files are either real-time or non-realtime */
1976 if (XFS_IS_REALTIME_INODE(ip
) != XFS_IS_REALTIME_INODE(tip
)) {
1981 error
= xfs_swap_extent_flush(ip
);
1984 error
= xfs_swap_extent_flush(tip
);
1989 * Extent "swapping" with rmap requires a permanent reservation and
1990 * a block reservation because it's really just a remap operation
1991 * performed with log redo items!
1993 if (xfs_sb_version_hasrmapbt(&mp
->m_sb
)) {
1995 * Conceptually this shouldn't affect the shape of either
1996 * bmbt, but since we atomically move extents one by one,
1997 * we reserve enough space to rebuild both trees.
1999 resblks
= XFS_SWAP_RMAP_SPACE_RES(mp
,
2000 XFS_IFORK_NEXTENTS(ip
, XFS_DATA_FORK
),
2002 XFS_SWAP_RMAP_SPACE_RES(mp
,
2003 XFS_IFORK_NEXTENTS(tip
, XFS_DATA_FORK
),
2005 error
= xfs_trans_alloc(mp
, &M_RES(mp
)->tr_write
, resblks
,
2008 error
= xfs_trans_alloc(mp
, &M_RES(mp
)->tr_ichange
, 0,
2014 * Lock and join the inodes to the tansaction so that transaction commit
2015 * or cancel will unlock the inodes from this point onwards.
2017 xfs_lock_two_inodes(ip
, tip
, XFS_ILOCK_EXCL
);
2018 lock_flags
|= XFS_ILOCK_EXCL
;
2019 xfs_trans_ijoin(tp
, ip
, 0);
2020 xfs_trans_ijoin(tp
, tip
, 0);
2023 /* Verify all data are being swapped */
2024 if (sxp
->sx_offset
!= 0 ||
2025 sxp
->sx_length
!= ip
->i_d
.di_size
||
2026 sxp
->sx_length
!= tip
->i_d
.di_size
) {
2028 goto out_trans_cancel
;
2031 trace_xfs_swap_extent_before(ip
, 0);
2032 trace_xfs_swap_extent_before(tip
, 1);
2034 /* check inode formats now that data is flushed */
2035 error
= xfs_swap_extents_check_format(ip
, tip
);
2038 "%s: inode 0x%llx format is incompatible for exchanging.",
2039 __func__
, ip
->i_ino
);
2040 goto out_trans_cancel
;
2044 * Compare the current change & modify times with that
2045 * passed in. If they differ, we abort this swap.
2046 * This is the mechanism used to ensure the calling
2047 * process that the file was not changed out from
2050 if ((sbp
->bs_ctime
.tv_sec
!= VFS_I(ip
)->i_ctime
.tv_sec
) ||
2051 (sbp
->bs_ctime
.tv_nsec
!= VFS_I(ip
)->i_ctime
.tv_nsec
) ||
2052 (sbp
->bs_mtime
.tv_sec
!= VFS_I(ip
)->i_mtime
.tv_sec
) ||
2053 (sbp
->bs_mtime
.tv_nsec
!= VFS_I(ip
)->i_mtime
.tv_nsec
)) {
2055 goto out_trans_cancel
;
2059 * Note the trickiness in setting the log flags - we set the owner log
2060 * flag on the opposite inode (i.e. the inode we are setting the new
2061 * owner to be) because once we swap the forks and log that, log
2062 * recovery is going to see the fork as owned by the swapped inode,
2063 * not the pre-swapped inodes.
2065 src_log_flags
= XFS_ILOG_CORE
;
2066 target_log_flags
= XFS_ILOG_CORE
;
2068 if (xfs_sb_version_hasrmapbt(&mp
->m_sb
))
2069 error
= xfs_swap_extent_rmap(&tp
, ip
, tip
);
2071 error
= xfs_swap_extent_forks(tp
, ip
, tip
, &src_log_flags
,
2074 goto out_trans_cancel
;
2076 /* Do we have to swap reflink flags? */
2077 if ((ip
->i_d
.di_flags2
& XFS_DIFLAG2_REFLINK
) ^
2078 (tip
->i_d
.di_flags2
& XFS_DIFLAG2_REFLINK
)) {
2079 f
= ip
->i_d
.di_flags2
& XFS_DIFLAG2_REFLINK
;
2080 ip
->i_d
.di_flags2
&= ~XFS_DIFLAG2_REFLINK
;
2081 ip
->i_d
.di_flags2
|= tip
->i_d
.di_flags2
& XFS_DIFLAG2_REFLINK
;
2082 tip
->i_d
.di_flags2
&= ~XFS_DIFLAG2_REFLINK
;
2083 tip
->i_d
.di_flags2
|= f
& XFS_DIFLAG2_REFLINK
;
2084 cowfp
= ip
->i_cowfp
;
2085 ip
->i_cowfp
= tip
->i_cowfp
;
2086 tip
->i_cowfp
= cowfp
;
2087 xfs_inode_set_cowblocks_tag(ip
);
2088 xfs_inode_set_cowblocks_tag(tip
);
2091 xfs_trans_log_inode(tp
, ip
, src_log_flags
);
2092 xfs_trans_log_inode(tp
, tip
, target_log_flags
);
2095 * If this is a synchronous mount, make sure that the
2096 * transaction goes to disk before returning to the user.
2098 if (mp
->m_flags
& XFS_MOUNT_WSYNC
)
2099 xfs_trans_set_sync(tp
);
2101 error
= xfs_trans_commit(tp
);
2103 trace_xfs_swap_extent_after(ip
, 0);
2104 trace_xfs_swap_extent_after(tip
, 1);
2107 xfs_iunlock(ip
, lock_flags
);
2108 xfs_iunlock(tip
, lock_flags
);
2109 unlock_two_nondirectories(VFS_I(ip
), VFS_I(tip
));
2113 xfs_trans_cancel(tp
);