2 * Copyright (c) 2000-2006 Silicon Graphics, Inc.
3 * Copyright (c) 2012 Red Hat, Inc.
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License as
8 * published by the Free Software Foundation.
10 * This program is distributed in the hope that it would be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
21 #include "xfs_shared.h"
22 #include "xfs_format.h"
23 #include "xfs_log_format.h"
24 #include "xfs_trans_resv.h"
26 #include "xfs_mount.h"
27 #include "xfs_da_format.h"
28 #include "xfs_defer.h"
29 #include "xfs_inode.h"
30 #include "xfs_btree.h"
31 #include "xfs_trans.h"
32 #include "xfs_extfree_item.h"
33 #include "xfs_alloc.h"
35 #include "xfs_bmap_util.h"
36 #include "xfs_bmap_btree.h"
37 #include "xfs_rtalloc.h"
38 #include "xfs_error.h"
39 #include "xfs_quota.h"
40 #include "xfs_trans_space.h"
41 #include "xfs_trace.h"
42 #include "xfs_icache.h"
44 #include "xfs_rmap_btree.h"
46 /* Kernel only BMAP related definitions and functions */
49 * Convert the given file system block to a disk block. We have to treat it
50 * differently based on whether the file is a real time file or not, because the
54 xfs_fsb_to_db(struct xfs_inode
*ip
, xfs_fsblock_t fsb
)
56 return (XFS_IS_REALTIME_INODE(ip
) ? \
57 (xfs_daddr_t
)XFS_FSB_TO_BB((ip
)->i_mount
, (fsb
)) : \
58 XFS_FSB_TO_DADDR((ip
)->i_mount
, (fsb
)));
62 * Routine to zero an extent on disk allocated to the specific inode.
64 * The VFS functions take a linearised filesystem block offset, so we have to
65 * convert the sparse xfs fsb to the right format first.
66 * VFS types are real funky, too.
71 xfs_fsblock_t start_fsb
,
74 struct xfs_mount
*mp
= ip
->i_mount
;
75 xfs_daddr_t sector
= xfs_fsb_to_db(ip
, start_fsb
);
76 sector_t block
= XFS_BB_TO_FSBT(mp
, sector
);
78 return blkdev_issue_zeroout(xfs_find_bdev_for_inode(VFS_I(ip
)),
79 block
<< (mp
->m_super
->s_blocksize_bits
- 9),
80 count_fsb
<< (mp
->m_super
->s_blocksize_bits
- 9),
86 struct xfs_bmalloca
*ap
) /* bmap alloc argument struct */
88 xfs_alloctype_t atype
= 0; /* type for allocation routines */
89 int error
; /* error return value */
90 xfs_mount_t
*mp
; /* mount point structure */
91 xfs_extlen_t prod
= 0; /* product factor for allocators */
92 xfs_extlen_t ralen
= 0; /* realtime allocation length */
93 xfs_extlen_t align
; /* minimum allocation alignment */
97 align
= xfs_get_extsz_hint(ap
->ip
);
98 prod
= align
/ mp
->m_sb
.sb_rextsize
;
99 error
= xfs_bmap_extsize_align(mp
, &ap
->got
, &ap
->prev
,
100 align
, 1, ap
->eof
, 0,
101 ap
->conv
, &ap
->offset
, &ap
->length
);
105 ASSERT(ap
->length
% mp
->m_sb
.sb_rextsize
== 0);
108 * If the offset & length are not perfectly aligned
109 * then kill prod, it will just get us in trouble.
111 if (do_mod(ap
->offset
, align
) || ap
->length
% align
)
114 * Set ralen to be the actual requested length in rtextents.
116 ralen
= ap
->length
/ mp
->m_sb
.sb_rextsize
;
118 * If the old value was close enough to MAXEXTLEN that
119 * we rounded up to it, cut it back so it's valid again.
120 * Note that if it's a really large request (bigger than
121 * MAXEXTLEN), we don't hear about that number, and can't
122 * adjust the starting point to match it.
124 if (ralen
* mp
->m_sb
.sb_rextsize
>= MAXEXTLEN
)
125 ralen
= MAXEXTLEN
/ mp
->m_sb
.sb_rextsize
;
128 * Lock out modifications to both the RT bitmap and summary inodes
130 xfs_ilock(mp
->m_rbmip
, XFS_ILOCK_EXCL
|XFS_ILOCK_RTBITMAP
);
131 xfs_trans_ijoin(ap
->tp
, mp
->m_rbmip
, XFS_ILOCK_EXCL
);
132 xfs_ilock(mp
->m_rsumip
, XFS_ILOCK_EXCL
|XFS_ILOCK_RTSUM
);
133 xfs_trans_ijoin(ap
->tp
, mp
->m_rsumip
, XFS_ILOCK_EXCL
);
136 * If it's an allocation to an empty file at offset 0,
137 * pick an extent that will space things out in the rt area.
139 if (ap
->eof
&& ap
->offset
== 0) {
140 xfs_rtblock_t
uninitialized_var(rtx
); /* realtime extent no */
142 error
= xfs_rtpick_extent(mp
, ap
->tp
, ralen
, &rtx
);
145 ap
->blkno
= rtx
* mp
->m_sb
.sb_rextsize
;
150 xfs_bmap_adjacent(ap
);
153 * Realtime allocation, done through xfs_rtallocate_extent.
155 atype
= ap
->blkno
== 0 ? XFS_ALLOCTYPE_ANY_AG
: XFS_ALLOCTYPE_NEAR_BNO
;
156 do_div(ap
->blkno
, mp
->m_sb
.sb_rextsize
);
159 if ((error
= xfs_rtallocate_extent(ap
->tp
, ap
->blkno
, 1, ap
->length
,
160 &ralen
, atype
, ap
->wasdel
, prod
, &rtb
)))
162 if (rtb
== NULLFSBLOCK
&& prod
> 1 &&
163 (error
= xfs_rtallocate_extent(ap
->tp
, ap
->blkno
, 1,
164 ap
->length
, &ralen
, atype
,
165 ap
->wasdel
, 1, &rtb
)))
168 if (ap
->blkno
!= NULLFSBLOCK
) {
169 ap
->blkno
*= mp
->m_sb
.sb_rextsize
;
170 ralen
*= mp
->m_sb
.sb_rextsize
;
172 ap
->ip
->i_d
.di_nblocks
+= ralen
;
173 xfs_trans_log_inode(ap
->tp
, ap
->ip
, XFS_ILOG_CORE
);
175 ap
->ip
->i_delayed_blks
-= ralen
;
177 * Adjust the disk quota also. This was reserved
180 xfs_trans_mod_dquot_byino(ap
->tp
, ap
->ip
,
181 ap
->wasdel
? XFS_TRANS_DQ_DELRTBCOUNT
:
182 XFS_TRANS_DQ_RTBCOUNT
, (long) ralen
);
184 /* Zero the extent if we were asked to do so */
185 if (ap
->userdata
& XFS_ALLOC_USERDATA_ZERO
) {
186 error
= xfs_zero_extent(ap
->ip
, ap
->blkno
, ap
->length
);
197 * Check if the endoff is outside the last extent. If so the caller will grow
198 * the allocation to a stripe unit boundary. All offsets are considered outside
199 * the end of file for an empty fork, so 1 is returned in *eof in that case.
203 struct xfs_inode
*ip
,
204 xfs_fileoff_t endoff
,
208 struct xfs_bmbt_irec rec
;
211 error
= xfs_bmap_last_extent(NULL
, ip
, whichfork
, &rec
, eof
);
215 *eof
= endoff
>= rec
.br_startoff
+ rec
.br_blockcount
;
220 * Extent tree block counting routines.
224 * Count leaf blocks given a range of extent records.
227 xfs_bmap_count_leaves(
235 for (b
= 0; b
< numrecs
; b
++) {
236 xfs_bmbt_rec_host_t
*frp
= xfs_iext_get_ext(ifp
, idx
+ b
);
237 *count
+= xfs_bmbt_get_blockcount(frp
);
242 * Count leaf blocks given a range of extent records originally
246 xfs_bmap_disk_count_leaves(
247 struct xfs_mount
*mp
,
248 struct xfs_btree_block
*block
,
255 for (b
= 1; b
<= numrecs
; b
++) {
256 frp
= XFS_BMBT_REC_ADDR(mp
, block
, b
);
257 *count
+= xfs_bmbt_disk_get_blockcount(frp
);
262 * Recursively walks each level of a btree
263 * to count total fsblocks in use.
265 STATIC
int /* error */
267 xfs_mount_t
*mp
, /* file system mount point */
268 xfs_trans_t
*tp
, /* transaction pointer */
269 xfs_ifork_t
*ifp
, /* inode fork pointer */
270 xfs_fsblock_t blockno
, /* file system block number */
271 int levelin
, /* level in btree */
272 int *count
) /* Count of blocks */
278 xfs_fsblock_t bno
= blockno
;
279 xfs_fsblock_t nextbno
;
280 struct xfs_btree_block
*block
, *nextblock
;
283 error
= xfs_btree_read_bufl(mp
, tp
, bno
, 0, &bp
, XFS_BMAP_BTREE_REF
,
288 block
= XFS_BUF_TO_BLOCK(bp
);
291 /* Not at node above leaves, count this level of nodes */
292 nextbno
= be64_to_cpu(block
->bb_u
.l
.bb_rightsib
);
293 while (nextbno
!= NULLFSBLOCK
) {
294 error
= xfs_btree_read_bufl(mp
, tp
, nextbno
, 0, &nbp
,
300 nextblock
= XFS_BUF_TO_BLOCK(nbp
);
301 nextbno
= be64_to_cpu(nextblock
->bb_u
.l
.bb_rightsib
);
302 xfs_trans_brelse(tp
, nbp
);
305 /* Dive to the next level */
306 pp
= XFS_BMBT_PTR_ADDR(mp
, block
, 1, mp
->m_bmap_dmxr
[1]);
307 bno
= be64_to_cpu(*pp
);
308 if (unlikely((error
=
309 xfs_bmap_count_tree(mp
, tp
, ifp
, bno
, level
, count
)) < 0)) {
310 xfs_trans_brelse(tp
, bp
);
311 XFS_ERROR_REPORT("xfs_bmap_count_tree(1)",
312 XFS_ERRLEVEL_LOW
, mp
);
313 return -EFSCORRUPTED
;
315 xfs_trans_brelse(tp
, bp
);
317 /* count all level 1 nodes and their leaves */
319 nextbno
= be64_to_cpu(block
->bb_u
.l
.bb_rightsib
);
320 numrecs
= be16_to_cpu(block
->bb_numrecs
);
321 xfs_bmap_disk_count_leaves(mp
, block
, numrecs
, count
);
322 xfs_trans_brelse(tp
, bp
);
323 if (nextbno
== NULLFSBLOCK
)
326 error
= xfs_btree_read_bufl(mp
, tp
, bno
, 0, &bp
,
332 block
= XFS_BUF_TO_BLOCK(bp
);
339 * Count fsblocks of the given fork.
341 static int /* error */
342 xfs_bmap_count_blocks(
343 xfs_trans_t
*tp
, /* transaction pointer */
344 xfs_inode_t
*ip
, /* incore inode */
345 int whichfork
, /* data or attr fork */
346 int *count
) /* out: count of blocks */
348 struct xfs_btree_block
*block
; /* current btree block */
349 xfs_fsblock_t bno
; /* block # of "block" */
350 xfs_ifork_t
*ifp
; /* fork structure */
351 int level
; /* btree level, for checking */
352 xfs_mount_t
*mp
; /* file system mount structure */
353 __be64
*pp
; /* pointer to block address */
357 ifp
= XFS_IFORK_PTR(ip
, whichfork
);
358 if ( XFS_IFORK_FORMAT(ip
, whichfork
) == XFS_DINODE_FMT_EXTENTS
) {
359 xfs_bmap_count_leaves(ifp
, 0,
360 ifp
->if_bytes
/ (uint
)sizeof(xfs_bmbt_rec_t
),
366 * Root level must use BMAP_BROOT_PTR_ADDR macro to get ptr out.
368 block
= ifp
->if_broot
;
369 level
= be16_to_cpu(block
->bb_level
);
371 pp
= XFS_BMAP_BROOT_PTR_ADDR(mp
, block
, 1, ifp
->if_broot_bytes
);
372 bno
= be64_to_cpu(*pp
);
373 ASSERT(bno
!= NULLFSBLOCK
);
374 ASSERT(XFS_FSB_TO_AGNO(mp
, bno
) < mp
->m_sb
.sb_agcount
);
375 ASSERT(XFS_FSB_TO_AGBNO(mp
, bno
) < mp
->m_sb
.sb_agblocks
);
377 if (unlikely(xfs_bmap_count_tree(mp
, tp
, ifp
, bno
, level
, count
) < 0)) {
378 XFS_ERROR_REPORT("xfs_bmap_count_blocks(2)", XFS_ERRLEVEL_LOW
,
380 return -EFSCORRUPTED
;
387 * returns 1 for success, 0 if we failed to map the extent.
390 xfs_getbmapx_fix_eof_hole(
391 xfs_inode_t
*ip
, /* xfs incore inode pointer */
392 struct getbmapx
*out
, /* output structure */
393 int prealloced
, /* this is a file with
394 * preallocated data space */
395 __int64_t end
, /* last block requested */
396 xfs_fsblock_t startblock
)
399 xfs_mount_t
*mp
; /* file system mount point */
400 xfs_ifork_t
*ifp
; /* inode fork pointer */
401 xfs_extnum_t lastx
; /* last extent pointer */
402 xfs_fileoff_t fileblock
;
404 if (startblock
== HOLESTARTBLOCK
) {
407 fixlen
= XFS_FSB_TO_BB(mp
, XFS_B_TO_FSB(mp
, XFS_ISIZE(ip
)));
408 fixlen
-= out
->bmv_offset
;
409 if (prealloced
&& out
->bmv_offset
+ out
->bmv_length
== end
) {
410 /* Came to hole at EOF. Trim it. */
413 out
->bmv_length
= fixlen
;
416 if (startblock
== DELAYSTARTBLOCK
)
419 out
->bmv_block
= xfs_fsb_to_db(ip
, startblock
);
420 fileblock
= XFS_BB_TO_FSB(ip
->i_mount
, out
->bmv_offset
);
421 ifp
= XFS_IFORK_PTR(ip
, XFS_DATA_FORK
);
422 if (xfs_iext_bno_to_ext(ifp
, fileblock
, &lastx
) &&
423 (lastx
== (ifp
->if_bytes
/ (uint
)sizeof(xfs_bmbt_rec_t
))-1))
424 out
->bmv_oflags
|= BMV_OF_LAST
;
431 * Get inode's extents as described in bmv, and format for output.
432 * Calls formatter to fill the user's buffer until all extents
433 * are mapped, until the passed-in bmv->bmv_count slots have
434 * been filled, or until the formatter short-circuits the loop,
435 * if it is tracking filled-in extents on its own.
440 struct getbmapx
*bmv
, /* user bmap structure */
441 xfs_bmap_format_t formatter
, /* format to user */
442 void *arg
) /* formatter arg */
444 __int64_t bmvend
; /* last block requested */
445 int error
= 0; /* return value */
446 __int64_t fixlen
; /* length for -1 case */
447 int i
; /* extent number */
448 int lock
; /* lock state */
449 xfs_bmbt_irec_t
*map
; /* buffer for user's data */
450 xfs_mount_t
*mp
; /* file system mount point */
451 int nex
; /* # of user extents can do */
452 int nexleft
; /* # of user extents left */
453 int subnex
; /* # of bmapi's can do */
454 int nmap
; /* number of map entries */
455 struct getbmapx
*out
; /* output structure */
456 int whichfork
; /* data or attr fork */
457 int prealloced
; /* this is a file with
458 * preallocated data space */
459 int iflags
; /* interface flags */
460 int bmapi_flags
; /* flags for xfs_bmapi */
464 iflags
= bmv
->bmv_iflags
;
465 whichfork
= iflags
& BMV_IF_ATTRFORK
? XFS_ATTR_FORK
: XFS_DATA_FORK
;
467 if (whichfork
== XFS_ATTR_FORK
) {
468 if (XFS_IFORK_Q(ip
)) {
469 if (ip
->i_d
.di_aformat
!= XFS_DINODE_FMT_EXTENTS
&&
470 ip
->i_d
.di_aformat
!= XFS_DINODE_FMT_BTREE
&&
471 ip
->i_d
.di_aformat
!= XFS_DINODE_FMT_LOCAL
)
474 ip
->i_d
.di_aformat
!= 0 &&
475 ip
->i_d
.di_aformat
!= XFS_DINODE_FMT_EXTENTS
)) {
476 XFS_ERROR_REPORT("xfs_getbmap", XFS_ERRLEVEL_LOW
,
478 return -EFSCORRUPTED
;
484 if (ip
->i_d
.di_format
!= XFS_DINODE_FMT_EXTENTS
&&
485 ip
->i_d
.di_format
!= XFS_DINODE_FMT_BTREE
&&
486 ip
->i_d
.di_format
!= XFS_DINODE_FMT_LOCAL
)
489 if (xfs_get_extsz_hint(ip
) ||
490 ip
->i_d
.di_flags
& (XFS_DIFLAG_PREALLOC
|XFS_DIFLAG_APPEND
)){
492 fixlen
= mp
->m_super
->s_maxbytes
;
495 fixlen
= XFS_ISIZE(ip
);
499 if (bmv
->bmv_length
== -1) {
500 fixlen
= XFS_FSB_TO_BB(mp
, XFS_B_TO_FSB(mp
, fixlen
));
502 max_t(__int64_t
, fixlen
- bmv
->bmv_offset
, 0);
503 } else if (bmv
->bmv_length
== 0) {
504 bmv
->bmv_entries
= 0;
506 } else if (bmv
->bmv_length
< 0) {
510 nex
= bmv
->bmv_count
- 1;
513 bmvend
= bmv
->bmv_offset
+ bmv
->bmv_length
;
516 if (bmv
->bmv_count
> ULONG_MAX
/ sizeof(struct getbmapx
))
518 out
= kmem_zalloc_large(bmv
->bmv_count
* sizeof(struct getbmapx
), 0);
522 xfs_ilock(ip
, XFS_IOLOCK_SHARED
);
523 if (whichfork
== XFS_DATA_FORK
) {
524 if (!(iflags
& BMV_IF_DELALLOC
) &&
525 (ip
->i_delayed_blks
|| XFS_ISIZE(ip
) > ip
->i_d
.di_size
)) {
526 error
= filemap_write_and_wait(VFS_I(ip
)->i_mapping
);
528 goto out_unlock_iolock
;
531 * Even after flushing the inode, there can still be
532 * delalloc blocks on the inode beyond EOF due to
533 * speculative preallocation. These are not removed
534 * until the release function is called or the inode
535 * is inactivated. Hence we cannot assert here that
536 * ip->i_delayed_blks == 0.
540 lock
= xfs_ilock_data_map_shared(ip
);
542 lock
= xfs_ilock_attr_map_shared(ip
);
546 * Don't let nex be bigger than the number of extents
547 * we can have assuming alternating holes and real extents.
549 if (nex
> XFS_IFORK_NEXTENTS(ip
, whichfork
) * 2 + 1)
550 nex
= XFS_IFORK_NEXTENTS(ip
, whichfork
) * 2 + 1;
552 bmapi_flags
= xfs_bmapi_aflag(whichfork
);
553 if (!(iflags
& BMV_IF_PREALLOC
))
554 bmapi_flags
|= XFS_BMAPI_IGSTATE
;
557 * Allocate enough space to handle "subnex" maps at a time.
561 map
= kmem_alloc(subnex
* sizeof(*map
), KM_MAYFAIL
| KM_NOFS
);
563 goto out_unlock_ilock
;
565 bmv
->bmv_entries
= 0;
567 if (XFS_IFORK_NEXTENTS(ip
, whichfork
) == 0 &&
568 (whichfork
== XFS_ATTR_FORK
|| !(iflags
& BMV_IF_DELALLOC
))) {
576 nmap
= (nexleft
> subnex
) ? subnex
: nexleft
;
577 error
= xfs_bmapi_read(ip
, XFS_BB_TO_FSBT(mp
, bmv
->bmv_offset
),
578 XFS_BB_TO_FSB(mp
, bmv
->bmv_length
),
579 map
, &nmap
, bmapi_flags
);
582 ASSERT(nmap
<= subnex
);
584 for (i
= 0; i
< nmap
&& nexleft
&& bmv
->bmv_length
; i
++) {
585 out
[cur_ext
].bmv_oflags
= 0;
586 if (map
[i
].br_state
== XFS_EXT_UNWRITTEN
)
587 out
[cur_ext
].bmv_oflags
|= BMV_OF_PREALLOC
;
588 else if (map
[i
].br_startblock
== DELAYSTARTBLOCK
)
589 out
[cur_ext
].bmv_oflags
|= BMV_OF_DELALLOC
;
590 out
[cur_ext
].bmv_offset
=
591 XFS_FSB_TO_BB(mp
, map
[i
].br_startoff
);
592 out
[cur_ext
].bmv_length
=
593 XFS_FSB_TO_BB(mp
, map
[i
].br_blockcount
);
594 out
[cur_ext
].bmv_unused1
= 0;
595 out
[cur_ext
].bmv_unused2
= 0;
598 * delayed allocation extents that start beyond EOF can
599 * occur due to speculative EOF allocation when the
600 * delalloc extent is larger than the largest freespace
601 * extent at conversion time. These extents cannot be
602 * converted by data writeback, so can exist here even
603 * if we are not supposed to be finding delalloc
606 if (map
[i
].br_startblock
== DELAYSTARTBLOCK
&&
607 map
[i
].br_startoff
<= XFS_B_TO_FSB(mp
, XFS_ISIZE(ip
)))
608 ASSERT((iflags
& BMV_IF_DELALLOC
) != 0);
610 if (map
[i
].br_startblock
== HOLESTARTBLOCK
&&
611 whichfork
== XFS_ATTR_FORK
) {
612 /* came to the end of attribute fork */
613 out
[cur_ext
].bmv_oflags
|= BMV_OF_LAST
;
617 if (!xfs_getbmapx_fix_eof_hole(ip
, &out
[cur_ext
],
619 map
[i
].br_startblock
))
623 out
[cur_ext
].bmv_offset
+
624 out
[cur_ext
].bmv_length
;
626 max_t(__int64_t
, 0, bmvend
- bmv
->bmv_offset
);
629 * In case we don't want to return the hole,
630 * don't increase cur_ext so that we can reuse
631 * it in the next loop.
633 if ((iflags
& BMV_IF_NO_HOLES
) &&
634 map
[i
].br_startblock
== HOLESTARTBLOCK
) {
635 memset(&out
[cur_ext
], 0, sizeof(out
[cur_ext
]));
643 } while (nmap
&& nexleft
&& bmv
->bmv_length
);
648 xfs_iunlock(ip
, lock
);
650 xfs_iunlock(ip
, XFS_IOLOCK_SHARED
);
652 for (i
= 0; i
< cur_ext
; i
++) {
653 int full
= 0; /* user array is full */
655 /* format results & advance arg */
656 error
= formatter(&arg
, &out
[i
], &full
);
666 * dead simple method of punching delalyed allocation blocks from a range in
667 * the inode. Walks a block at a time so will be slow, but is only executed in
668 * rare error cases so the overhead is not critical. This will always punch out
669 * both the start and end blocks, even if the ranges only partially overlap
670 * them, so it is up to the caller to ensure that partial blocks are not
674 xfs_bmap_punch_delalloc_range(
675 struct xfs_inode
*ip
,
676 xfs_fileoff_t start_fsb
,
677 xfs_fileoff_t length
)
679 xfs_fileoff_t remaining
= length
;
682 ASSERT(xfs_isilocked(ip
, XFS_ILOCK_EXCL
));
686 xfs_bmbt_irec_t imap
;
688 xfs_fsblock_t firstblock
;
689 struct xfs_defer_ops dfops
;
692 * Map the range first and check that it is a delalloc extent
693 * before trying to unmap the range. Otherwise we will be
694 * trying to remove a real extent (which requires a
695 * transaction) or a hole, which is probably a bad idea...
697 error
= xfs_bmapi_read(ip
, start_fsb
, 1, &imap
, &nimaps
,
701 /* something screwed, just bail */
702 if (!XFS_FORCED_SHUTDOWN(ip
->i_mount
)) {
703 xfs_alert(ip
->i_mount
,
704 "Failed delalloc mapping lookup ino %lld fsb %lld.",
705 ip
->i_ino
, start_fsb
);
713 if (imap
.br_startblock
!= DELAYSTARTBLOCK
) {
714 /* been converted, ignore */
717 WARN_ON(imap
.br_blockcount
== 0);
720 * Note: while we initialise the firstblock/dfops pair, they
721 * should never be used because blocks should never be
722 * allocated or freed for a delalloc extent and hence we need
723 * don't cancel or finish them after the xfs_bunmapi() call.
725 xfs_defer_init(&dfops
, &firstblock
);
726 error
= xfs_bunmapi(NULL
, ip
, start_fsb
, 1, 0, 1, &firstblock
,
731 ASSERT(!xfs_defer_has_unfinished_work(&dfops
));
735 } while(remaining
> 0);
741 * Test whether it is appropriate to check an inode for and free post EOF
742 * blocks. The 'force' parameter determines whether we should also consider
743 * regular files that are marked preallocated or append-only.
746 xfs_can_free_eofblocks(struct xfs_inode
*ip
, bool force
)
748 /* prealloc/delalloc exists only on regular files */
749 if (!S_ISREG(VFS_I(ip
)->i_mode
))
753 * Zero sized files with no cached pages and delalloc blocks will not
754 * have speculative prealloc/delalloc blocks to remove.
756 if (VFS_I(ip
)->i_size
== 0 &&
757 VFS_I(ip
)->i_mapping
->nrpages
== 0 &&
758 ip
->i_delayed_blks
== 0)
761 /* If we haven't read in the extent list, then don't do it now. */
762 if (!(ip
->i_df
.if_flags
& XFS_IFEXTENTS
))
766 * Do not free real preallocated or append-only files unless the file
767 * has delalloc blocks and we are forced to remove them.
769 if (ip
->i_d
.di_flags
& (XFS_DIFLAG_PREALLOC
| XFS_DIFLAG_APPEND
))
770 if (!force
|| ip
->i_delayed_blks
== 0)
777 * This is called by xfs_inactive to free any blocks beyond eof
778 * when the link count isn't zero and by xfs_dm_punch_hole() when
779 * punching a hole to EOF.
789 xfs_fileoff_t end_fsb
;
790 xfs_fileoff_t last_fsb
;
791 xfs_filblks_t map_len
;
793 xfs_bmbt_irec_t imap
;
796 * Figure out if there are any blocks beyond the end
797 * of the file. If not, then there is nothing to do.
799 end_fsb
= XFS_B_TO_FSB(mp
, (xfs_ufsize_t
)XFS_ISIZE(ip
));
800 last_fsb
= XFS_B_TO_FSB(mp
, mp
->m_super
->s_maxbytes
);
801 if (last_fsb
<= end_fsb
)
803 map_len
= last_fsb
- end_fsb
;
806 xfs_ilock(ip
, XFS_ILOCK_SHARED
);
807 error
= xfs_bmapi_read(ip
, end_fsb
, map_len
, &imap
, &nimaps
, 0);
808 xfs_iunlock(ip
, XFS_ILOCK_SHARED
);
810 if (!error
&& (nimaps
!= 0) &&
811 (imap
.br_startblock
!= HOLESTARTBLOCK
||
812 ip
->i_delayed_blks
)) {
814 * Attach the dquots to the inode up front.
816 error
= xfs_qm_dqattach(ip
, 0);
821 * There are blocks after the end of file.
822 * Free them up now by truncating the file to
826 if (!xfs_ilock_nowait(ip
, XFS_IOLOCK_EXCL
))
830 error
= xfs_trans_alloc(mp
, &M_RES(mp
)->tr_itruncate
, 0, 0, 0,
833 ASSERT(XFS_FORCED_SHUTDOWN(mp
));
835 xfs_iunlock(ip
, XFS_IOLOCK_EXCL
);
839 xfs_ilock(ip
, XFS_ILOCK_EXCL
);
840 xfs_trans_ijoin(tp
, ip
, 0);
843 * Do not update the on-disk file size. If we update the
844 * on-disk file size and then the system crashes before the
845 * contents of the file are flushed to disk then the files
846 * may be full of holes (ie NULL files bug).
848 error
= xfs_itruncate_extents(&tp
, ip
, XFS_DATA_FORK
,
852 * If we get an error at this point we simply don't
853 * bother truncating the file.
855 xfs_trans_cancel(tp
);
857 error
= xfs_trans_commit(tp
);
859 xfs_inode_clear_eofblocks_tag(ip
);
862 xfs_iunlock(ip
, XFS_ILOCK_EXCL
);
864 xfs_iunlock(ip
, XFS_IOLOCK_EXCL
);
870 xfs_alloc_file_space(
871 struct xfs_inode
*ip
,
876 xfs_mount_t
*mp
= ip
->i_mount
;
878 xfs_filblks_t allocated_fsb
;
879 xfs_filblks_t allocatesize_fsb
;
880 xfs_extlen_t extsz
, temp
;
881 xfs_fileoff_t startoffset_fsb
;
882 xfs_fsblock_t firstfsb
;
887 xfs_bmbt_irec_t imaps
[1], *imapp
;
888 struct xfs_defer_ops dfops
;
889 uint qblocks
, resblks
, resrtextents
;
892 trace_xfs_alloc_file_space(ip
);
894 if (XFS_FORCED_SHUTDOWN(mp
))
897 error
= xfs_qm_dqattach(ip
, 0);
904 rt
= XFS_IS_REALTIME_INODE(ip
);
905 extsz
= xfs_get_extsz_hint(ip
);
910 startoffset_fsb
= XFS_B_TO_FSBT(mp
, offset
);
911 allocatesize_fsb
= XFS_B_TO_FSB(mp
, count
);
914 * Allocate file space until done or until there is an error
916 while (allocatesize_fsb
&& !error
) {
920 * Determine space reservations for data/realtime.
922 if (unlikely(extsz
)) {
926 e
= startoffset_fsb
+ allocatesize_fsb
;
927 if ((temp
= do_mod(startoffset_fsb
, extsz
)))
929 if ((temp
= do_mod(e
, extsz
)))
933 e
= allocatesize_fsb
;
937 * The transaction reservation is limited to a 32-bit block
938 * count, hence we need to limit the number of blocks we are
939 * trying to reserve to avoid an overflow. We can't allocate
940 * more than @nimaps extents, and an extent is limited on disk
941 * to MAXEXTLEN (21 bits), so use that to enforce the limit.
943 resblks
= min_t(xfs_fileoff_t
, (e
- s
), (MAXEXTLEN
* nimaps
));
945 resrtextents
= qblocks
= resblks
;
946 resrtextents
/= mp
->m_sb
.sb_rextsize
;
947 resblks
= XFS_DIOSTRAT_SPACE_RES(mp
, 0);
948 quota_flag
= XFS_QMOPT_RES_RTBLKS
;
951 resblks
= qblocks
= XFS_DIOSTRAT_SPACE_RES(mp
, resblks
);
952 quota_flag
= XFS_QMOPT_RES_REGBLKS
;
956 * Allocate and setup the transaction.
958 error
= xfs_trans_alloc(mp
, &M_RES(mp
)->tr_write
, resblks
,
959 resrtextents
, 0, &tp
);
962 * Check for running out of space
966 * Free the transaction structure.
968 ASSERT(error
== -ENOSPC
|| XFS_FORCED_SHUTDOWN(mp
));
971 xfs_ilock(ip
, XFS_ILOCK_EXCL
);
972 error
= xfs_trans_reserve_quota_nblks(tp
, ip
, qblocks
,
977 xfs_trans_ijoin(tp
, ip
, 0);
979 xfs_defer_init(&dfops
, &firstfsb
);
980 error
= xfs_bmapi_write(tp
, ip
, startoffset_fsb
,
981 allocatesize_fsb
, alloc_type
, &firstfsb
,
982 resblks
, imapp
, &nimaps
, &dfops
);
987 * Complete the transaction
989 error
= xfs_defer_finish(&tp
, &dfops
, NULL
);
993 error
= xfs_trans_commit(tp
);
994 xfs_iunlock(ip
, XFS_ILOCK_EXCL
);
998 allocated_fsb
= imapp
->br_blockcount
;
1005 startoffset_fsb
+= allocated_fsb
;
1006 allocatesize_fsb
-= allocated_fsb
;
1011 error0
: /* Cancel bmap, unlock inode, unreserve quota blocks, cancel trans */
1012 xfs_defer_cancel(&dfops
);
1013 xfs_trans_unreserve_quota_nblks(tp
, ip
, (long)qblocks
, 0, quota_flag
);
1015 error1
: /* Just cancel transaction */
1016 xfs_trans_cancel(tp
);
1017 xfs_iunlock(ip
, XFS_ILOCK_EXCL
);
1023 struct xfs_inode
*ip
,
1024 xfs_fileoff_t startoffset_fsb
,
1025 xfs_filblks_t len_fsb
,
1028 struct xfs_mount
*mp
= ip
->i_mount
;
1029 struct xfs_trans
*tp
;
1030 struct xfs_defer_ops dfops
;
1031 xfs_fsblock_t firstfsb
;
1032 uint resblks
= XFS_DIOSTRAT_SPACE_RES(mp
, 0);
1035 error
= xfs_trans_alloc(mp
, &M_RES(mp
)->tr_write
, resblks
, 0, 0, &tp
);
1037 ASSERT(error
== -ENOSPC
|| XFS_FORCED_SHUTDOWN(mp
));
1041 xfs_ilock(ip
, XFS_ILOCK_EXCL
);
1042 error
= xfs_trans_reserve_quota(tp
, mp
, ip
->i_udquot
, ip
->i_gdquot
,
1043 ip
->i_pdquot
, resblks
, 0, XFS_QMOPT_RES_REGBLKS
);
1045 goto out_trans_cancel
;
1047 xfs_trans_ijoin(tp
, ip
, 0);
1049 xfs_defer_init(&dfops
, &firstfsb
);
1050 error
= xfs_bunmapi(tp
, ip
, startoffset_fsb
, len_fsb
, 0, 2, &firstfsb
,
1053 goto out_bmap_cancel
;
1055 error
= xfs_defer_finish(&tp
, &dfops
, ip
);
1057 goto out_bmap_cancel
;
1059 error
= xfs_trans_commit(tp
);
1061 xfs_iunlock(ip
, XFS_ILOCK_EXCL
);
1065 xfs_defer_cancel(&dfops
);
1067 xfs_trans_cancel(tp
);
1072 xfs_adjust_extent_unmap_boundaries(
1073 struct xfs_inode
*ip
,
1074 xfs_fileoff_t
*startoffset_fsb
,
1075 xfs_fileoff_t
*endoffset_fsb
)
1077 struct xfs_mount
*mp
= ip
->i_mount
;
1078 struct xfs_bmbt_irec imap
;
1080 xfs_extlen_t mod
= 0;
1083 error
= xfs_bmapi_read(ip
, *startoffset_fsb
, 1, &imap
, &nimap
, 0);
1087 if (nimap
&& imap
.br_startblock
!= HOLESTARTBLOCK
) {
1090 ASSERT(imap
.br_startblock
!= DELAYSTARTBLOCK
);
1091 block
= imap
.br_startblock
;
1092 mod
= do_div(block
, mp
->m_sb
.sb_rextsize
);
1094 *startoffset_fsb
+= mp
->m_sb
.sb_rextsize
- mod
;
1098 error
= xfs_bmapi_read(ip
, *endoffset_fsb
- 1, 1, &imap
, &nimap
, 0);
1102 if (nimap
&& imap
.br_startblock
!= HOLESTARTBLOCK
) {
1103 ASSERT(imap
.br_startblock
!= DELAYSTARTBLOCK
);
1105 if (mod
&& mod
!= mp
->m_sb
.sb_rextsize
)
1106 *endoffset_fsb
-= mod
;
1113 xfs_flush_unmap_range(
1114 struct xfs_inode
*ip
,
1118 struct xfs_mount
*mp
= ip
->i_mount
;
1119 struct inode
*inode
= VFS_I(ip
);
1120 xfs_off_t rounding
, start
, end
;
1123 /* wait for the completion of any pending DIOs */
1124 inode_dio_wait(inode
);
1126 rounding
= max_t(xfs_off_t
, 1 << mp
->m_sb
.sb_blocklog
, PAGE_SIZE
);
1127 start
= round_down(offset
, rounding
);
1128 end
= round_up(offset
+ len
, rounding
) - 1;
1130 error
= filemap_write_and_wait_range(inode
->i_mapping
, start
, end
);
1133 truncate_pagecache_range(inode
, start
, end
);
1138 xfs_free_file_space(
1139 struct xfs_inode
*ip
,
1143 struct xfs_mount
*mp
= ip
->i_mount
;
1144 xfs_fileoff_t startoffset_fsb
;
1145 xfs_fileoff_t endoffset_fsb
;
1146 int done
= 0, error
;
1148 trace_xfs_free_file_space(ip
);
1150 error
= xfs_qm_dqattach(ip
, 0);
1154 if (len
<= 0) /* if nothing being freed */
1157 error
= xfs_flush_unmap_range(ip
, offset
, len
);
1161 startoffset_fsb
= XFS_B_TO_FSB(mp
, offset
);
1162 endoffset_fsb
= XFS_B_TO_FSBT(mp
, offset
+ len
);
1165 * Need to zero the stuff we're not freeing, on disk. If it's a RT file
1166 * and we can't use unwritten extents then we actually need to ensure
1167 * to zero the whole extent, otherwise we just need to take of block
1168 * boundaries, and xfs_bunmapi will handle the rest.
1170 if (XFS_IS_REALTIME_INODE(ip
) &&
1171 !xfs_sb_version_hasextflgbit(&mp
->m_sb
)) {
1172 error
= xfs_adjust_extent_unmap_boundaries(ip
, &startoffset_fsb
,
1178 if (endoffset_fsb
> startoffset_fsb
) {
1180 error
= xfs_unmap_extent(ip
, startoffset_fsb
,
1181 endoffset_fsb
- startoffset_fsb
, &done
);
1188 * Now that we've unmap all full blocks we'll have to zero out any
1189 * partial block at the beginning and/or end. xfs_zero_range is
1190 * smart enough to skip any holes, including those we just created.
1192 return xfs_zero_range(ip
, offset
, len
, NULL
);
1196 * Preallocate and zero a range of a file. This mechanism has the allocation
1197 * semantics of fallocate and in addition converts data in the range to zeroes.
1200 xfs_zero_file_space(
1201 struct xfs_inode
*ip
,
1205 struct xfs_mount
*mp
= ip
->i_mount
;
1209 trace_xfs_zero_file_space(ip
);
1211 blksize
= 1 << mp
->m_sb
.sb_blocklog
;
1214 * Punch a hole and prealloc the range. We use hole punch rather than
1215 * unwritten extent conversion for two reasons:
1217 * 1.) Hole punch handles partial block zeroing for us.
1219 * 2.) If prealloc returns ENOSPC, the file range is still zero-valued
1220 * by virtue of the hole punch.
1222 error
= xfs_free_file_space(ip
, offset
, len
);
1226 error
= xfs_alloc_file_space(ip
, round_down(offset
, blksize
),
1227 round_up(offset
+ len
, blksize
) -
1228 round_down(offset
, blksize
),
1229 XFS_BMAPI_PREALLOC
);
1236 * @next_fsb will keep track of the extent currently undergoing shift.
1237 * @stop_fsb will keep track of the extent at which we have to stop.
1238 * If we are shifting left, we will start with block (offset + len) and
1239 * shift each extent till last extent.
1240 * If we are shifting right, we will start with last extent inside file space
1241 * and continue until we reach the block corresponding to offset.
1244 xfs_shift_file_space(
1245 struct xfs_inode
*ip
,
1248 enum shift_direction direction
)
1251 struct xfs_mount
*mp
= ip
->i_mount
;
1252 struct xfs_trans
*tp
;
1254 struct xfs_defer_ops dfops
;
1255 xfs_fsblock_t first_block
;
1256 xfs_fileoff_t stop_fsb
;
1257 xfs_fileoff_t next_fsb
;
1258 xfs_fileoff_t shift_fsb
;
1260 ASSERT(direction
== SHIFT_LEFT
|| direction
== SHIFT_RIGHT
);
1262 if (direction
== SHIFT_LEFT
) {
1263 next_fsb
= XFS_B_TO_FSB(mp
, offset
+ len
);
1264 stop_fsb
= XFS_B_TO_FSB(mp
, VFS_I(ip
)->i_size
);
1267 * If right shift, delegate the work of initialization of
1268 * next_fsb to xfs_bmap_shift_extent as it has ilock held.
1270 next_fsb
= NULLFSBLOCK
;
1271 stop_fsb
= XFS_B_TO_FSB(mp
, offset
);
1274 shift_fsb
= XFS_B_TO_FSB(mp
, len
);
1277 * Trim eofblocks to avoid shifting uninitialized post-eof preallocation
1278 * into the accessible region of the file.
1280 if (xfs_can_free_eofblocks(ip
, true)) {
1281 error
= xfs_free_eofblocks(mp
, ip
, false);
1287 * Writeback and invalidate cache for the remainder of the file as we're
1288 * about to shift down every extent from offset to EOF.
1290 error
= filemap_write_and_wait_range(VFS_I(ip
)->i_mapping
,
1294 error
= invalidate_inode_pages2_range(VFS_I(ip
)->i_mapping
,
1295 offset
>> PAGE_SHIFT
, -1);
1300 * The extent shiting code works on extent granularity. So, if
1301 * stop_fsb is not the starting block of extent, we need to split
1302 * the extent at stop_fsb.
1304 if (direction
== SHIFT_RIGHT
) {
1305 error
= xfs_bmap_split_extent(ip
, stop_fsb
);
1310 while (!error
&& !done
) {
1312 * We would need to reserve permanent block for transaction.
1313 * This will come into picture when after shifting extent into
1314 * hole we found that adjacent extents can be merged which
1315 * may lead to freeing of a block during record update.
1317 error
= xfs_trans_alloc(mp
, &M_RES(mp
)->tr_write
,
1318 XFS_DIOSTRAT_SPACE_RES(mp
, 0), 0, 0, &tp
);
1322 xfs_ilock(ip
, XFS_ILOCK_EXCL
);
1323 error
= xfs_trans_reserve_quota(tp
, mp
, ip
->i_udquot
,
1324 ip
->i_gdquot
, ip
->i_pdquot
,
1325 XFS_DIOSTRAT_SPACE_RES(mp
, 0), 0,
1326 XFS_QMOPT_RES_REGBLKS
);
1328 goto out_trans_cancel
;
1330 xfs_trans_ijoin(tp
, ip
, XFS_ILOCK_EXCL
);
1332 xfs_defer_init(&dfops
, &first_block
);
1335 * We are using the write transaction in which max 2 bmbt
1336 * updates are allowed
1338 error
= xfs_bmap_shift_extents(tp
, ip
, &next_fsb
, shift_fsb
,
1339 &done
, stop_fsb
, &first_block
, &dfops
,
1340 direction
, XFS_BMAP_MAX_SHIFT_EXTENTS
);
1342 goto out_bmap_cancel
;
1344 error
= xfs_defer_finish(&tp
, &dfops
, NULL
);
1346 goto out_bmap_cancel
;
1348 error
= xfs_trans_commit(tp
);
1354 xfs_defer_cancel(&dfops
);
1356 xfs_trans_cancel(tp
);
1361 * xfs_collapse_file_space()
1362 * This routine frees disk space and shift extent for the given file.
1363 * The first thing we do is to free data blocks in the specified range
1364 * by calling xfs_free_file_space(). It would also sync dirty data
1365 * and invalidate page cache over the region on which collapse range
1366 * is working. And Shift extent records to the left to cover a hole.
1373 xfs_collapse_file_space(
1374 struct xfs_inode
*ip
,
1380 ASSERT(xfs_isilocked(ip
, XFS_IOLOCK_EXCL
));
1381 trace_xfs_collapse_file_space(ip
);
1383 error
= xfs_free_file_space(ip
, offset
, len
);
1387 return xfs_shift_file_space(ip
, offset
, len
, SHIFT_LEFT
);
1391 * xfs_insert_file_space()
1392 * This routine create hole space by shifting extents for the given file.
1393 * The first thing we do is to sync dirty data and invalidate page cache
1394 * over the region on which insert range is working. And split an extent
1395 * to two extents at given offset by calling xfs_bmap_split_extent.
1396 * And shift all extent records which are laying between [offset,
1397 * last allocated extent] to the right to reserve hole range.
1403 xfs_insert_file_space(
1404 struct xfs_inode
*ip
,
1408 ASSERT(xfs_isilocked(ip
, XFS_IOLOCK_EXCL
));
1409 trace_xfs_insert_file_space(ip
);
1411 return xfs_shift_file_space(ip
, offset
, len
, SHIFT_RIGHT
);
1415 * We need to check that the format of the data fork in the temporary inode is
1416 * valid for the target inode before doing the swap. This is not a problem with
1417 * attr1 because of the fixed fork offset, but attr2 has a dynamically sized
1418 * data fork depending on the space the attribute fork is taking so we can get
1419 * invalid formats on the target inode.
1421 * E.g. target has space for 7 extents in extent format, temp inode only has
1422 * space for 6. If we defragment down to 7 extents, then the tmp format is a
1423 * btree, but when swapped it needs to be in extent format. Hence we can't just
1424 * blindly swap data forks on attr2 filesystems.
1426 * Note that we check the swap in both directions so that we don't end up with
1427 * a corrupt temporary inode, either.
1429 * Note that fixing the way xfs_fsr sets up the attribute fork in the source
1430 * inode will prevent this situation from occurring, so all we do here is
1431 * reject and log the attempt. basically we are putting the responsibility on
1432 * userspace to get this right.
1435 xfs_swap_extents_check_format(
1436 xfs_inode_t
*ip
, /* target inode */
1437 xfs_inode_t
*tip
) /* tmp inode */
1440 /* Should never get a local format */
1441 if (ip
->i_d
.di_format
== XFS_DINODE_FMT_LOCAL
||
1442 tip
->i_d
.di_format
== XFS_DINODE_FMT_LOCAL
)
1446 * if the target inode has less extents that then temporary inode then
1447 * why did userspace call us?
1449 if (ip
->i_d
.di_nextents
< tip
->i_d
.di_nextents
)
1453 * if the target inode is in extent form and the temp inode is in btree
1454 * form then we will end up with the target inode in the wrong format
1455 * as we already know there are less extents in the temp inode.
1457 if (ip
->i_d
.di_format
== XFS_DINODE_FMT_EXTENTS
&&
1458 tip
->i_d
.di_format
== XFS_DINODE_FMT_BTREE
)
1461 /* Check temp in extent form to max in target */
1462 if (tip
->i_d
.di_format
== XFS_DINODE_FMT_EXTENTS
&&
1463 XFS_IFORK_NEXTENTS(tip
, XFS_DATA_FORK
) >
1464 XFS_IFORK_MAXEXT(ip
, XFS_DATA_FORK
))
1467 /* Check target in extent form to max in temp */
1468 if (ip
->i_d
.di_format
== XFS_DINODE_FMT_EXTENTS
&&
1469 XFS_IFORK_NEXTENTS(ip
, XFS_DATA_FORK
) >
1470 XFS_IFORK_MAXEXT(tip
, XFS_DATA_FORK
))
1474 * If we are in a btree format, check that the temp root block will fit
1475 * in the target and that it has enough extents to be in btree format
1478 * Note that we have to be careful to allow btree->extent conversions
1479 * (a common defrag case) which will occur when the temp inode is in
1482 if (tip
->i_d
.di_format
== XFS_DINODE_FMT_BTREE
) {
1483 if (XFS_IFORK_BOFF(ip
) &&
1484 XFS_BMAP_BMDR_SPACE(tip
->i_df
.if_broot
) > XFS_IFORK_BOFF(ip
))
1486 if (XFS_IFORK_NEXTENTS(tip
, XFS_DATA_FORK
) <=
1487 XFS_IFORK_MAXEXT(ip
, XFS_DATA_FORK
))
1491 /* Reciprocal target->temp btree format checks */
1492 if (ip
->i_d
.di_format
== XFS_DINODE_FMT_BTREE
) {
1493 if (XFS_IFORK_BOFF(tip
) &&
1494 XFS_BMAP_BMDR_SPACE(ip
->i_df
.if_broot
) > XFS_IFORK_BOFF(tip
))
1496 if (XFS_IFORK_NEXTENTS(ip
, XFS_DATA_FORK
) <=
1497 XFS_IFORK_MAXEXT(tip
, XFS_DATA_FORK
))
1505 xfs_swap_extent_flush(
1506 struct xfs_inode
*ip
)
1510 error
= filemap_write_and_wait(VFS_I(ip
)->i_mapping
);
1513 truncate_pagecache_range(VFS_I(ip
), 0, -1);
1515 /* Verify O_DIRECT for ftmp */
1516 if (VFS_I(ip
)->i_mapping
->nrpages
)
1523 xfs_inode_t
*ip
, /* target inode */
1524 xfs_inode_t
*tip
, /* tmp inode */
1527 xfs_mount_t
*mp
= ip
->i_mount
;
1529 xfs_bstat_t
*sbp
= &sxp
->sx_stat
;
1530 xfs_ifork_t
*tempifp
, *ifp
, *tifp
;
1531 int src_log_flags
, target_log_flags
;
1538 /* XXX: we can't do this with rmap, will fix later */
1539 if (xfs_sb_version_hasrmapbt(&mp
->m_sb
))
1542 tempifp
= kmem_alloc(sizeof(xfs_ifork_t
), KM_MAYFAIL
);
1549 * Lock the inodes against other IO, page faults and truncate to
1550 * begin with. Then we can ensure the inodes are flushed and have no
1551 * page cache safely. Once we have done this we can take the ilocks and
1552 * do the rest of the checks.
1554 lock_flags
= XFS_IOLOCK_EXCL
| XFS_MMAPLOCK_EXCL
;
1555 xfs_lock_two_inodes(ip
, tip
, XFS_IOLOCK_EXCL
);
1556 xfs_lock_two_inodes(ip
, tip
, XFS_MMAPLOCK_EXCL
);
1558 /* Verify that both files have the same format */
1559 if ((VFS_I(ip
)->i_mode
& S_IFMT
) != (VFS_I(tip
)->i_mode
& S_IFMT
)) {
1564 /* Verify both files are either real-time or non-realtime */
1565 if (XFS_IS_REALTIME_INODE(ip
) != XFS_IS_REALTIME_INODE(tip
)) {
1570 error
= xfs_swap_extent_flush(ip
);
1573 error
= xfs_swap_extent_flush(tip
);
1577 error
= xfs_trans_alloc(mp
, &M_RES(mp
)->tr_ichange
, 0, 0, 0, &tp
);
1582 * Lock and join the inodes to the tansaction so that transaction commit
1583 * or cancel will unlock the inodes from this point onwards.
1585 xfs_lock_two_inodes(ip
, tip
, XFS_ILOCK_EXCL
);
1586 lock_flags
|= XFS_ILOCK_EXCL
;
1587 xfs_trans_ijoin(tp
, ip
, lock_flags
);
1588 xfs_trans_ijoin(tp
, tip
, lock_flags
);
1591 /* Verify all data are being swapped */
1592 if (sxp
->sx_offset
!= 0 ||
1593 sxp
->sx_length
!= ip
->i_d
.di_size
||
1594 sxp
->sx_length
!= tip
->i_d
.di_size
) {
1596 goto out_trans_cancel
;
1599 trace_xfs_swap_extent_before(ip
, 0);
1600 trace_xfs_swap_extent_before(tip
, 1);
1602 /* check inode formats now that data is flushed */
1603 error
= xfs_swap_extents_check_format(ip
, tip
);
1606 "%s: inode 0x%llx format is incompatible for exchanging.",
1607 __func__
, ip
->i_ino
);
1608 goto out_trans_cancel
;
1612 * Compare the current change & modify times with that
1613 * passed in. If they differ, we abort this swap.
1614 * This is the mechanism used to ensure the calling
1615 * process that the file was not changed out from
1618 if ((sbp
->bs_ctime
.tv_sec
!= VFS_I(ip
)->i_ctime
.tv_sec
) ||
1619 (sbp
->bs_ctime
.tv_nsec
!= VFS_I(ip
)->i_ctime
.tv_nsec
) ||
1620 (sbp
->bs_mtime
.tv_sec
!= VFS_I(ip
)->i_mtime
.tv_sec
) ||
1621 (sbp
->bs_mtime
.tv_nsec
!= VFS_I(ip
)->i_mtime
.tv_nsec
)) {
1623 goto out_trans_cancel
;
1626 * Count the number of extended attribute blocks
1628 if ( ((XFS_IFORK_Q(ip
) != 0) && (ip
->i_d
.di_anextents
> 0)) &&
1629 (ip
->i_d
.di_aformat
!= XFS_DINODE_FMT_LOCAL
)) {
1630 error
= xfs_bmap_count_blocks(tp
, ip
, XFS_ATTR_FORK
, &aforkblks
);
1632 goto out_trans_cancel
;
1634 if ( ((XFS_IFORK_Q(tip
) != 0) && (tip
->i_d
.di_anextents
> 0)) &&
1635 (tip
->i_d
.di_aformat
!= XFS_DINODE_FMT_LOCAL
)) {
1636 error
= xfs_bmap_count_blocks(tp
, tip
, XFS_ATTR_FORK
,
1639 goto out_trans_cancel
;
1643 * Before we've swapped the forks, lets set the owners of the forks
1644 * appropriately. We have to do this as we are demand paging the btree
1645 * buffers, and so the validation done on read will expect the owner
1646 * field to be correctly set. Once we change the owners, we can swap the
1649 * Note the trickiness in setting the log flags - we set the owner log
1650 * flag on the opposite inode (i.e. the inode we are setting the new
1651 * owner to be) because once we swap the forks and log that, log
1652 * recovery is going to see the fork as owned by the swapped inode,
1653 * not the pre-swapped inodes.
1655 src_log_flags
= XFS_ILOG_CORE
;
1656 target_log_flags
= XFS_ILOG_CORE
;
1657 if (ip
->i_d
.di_version
== 3 &&
1658 ip
->i_d
.di_format
== XFS_DINODE_FMT_BTREE
) {
1659 target_log_flags
|= XFS_ILOG_DOWNER
;
1660 error
= xfs_bmbt_change_owner(tp
, ip
, XFS_DATA_FORK
,
1663 goto out_trans_cancel
;
1666 if (tip
->i_d
.di_version
== 3 &&
1667 tip
->i_d
.di_format
== XFS_DINODE_FMT_BTREE
) {
1668 src_log_flags
|= XFS_ILOG_DOWNER
;
1669 error
= xfs_bmbt_change_owner(tp
, tip
, XFS_DATA_FORK
,
1672 goto out_trans_cancel
;
1676 * Swap the data forks of the inodes
1680 *tempifp
= *ifp
; /* struct copy */
1681 *ifp
= *tifp
; /* struct copy */
1682 *tifp
= *tempifp
; /* struct copy */
1685 * Fix the on-disk inode values
1687 tmp
= (__uint64_t
)ip
->i_d
.di_nblocks
;
1688 ip
->i_d
.di_nblocks
= tip
->i_d
.di_nblocks
- taforkblks
+ aforkblks
;
1689 tip
->i_d
.di_nblocks
= tmp
+ taforkblks
- aforkblks
;
1691 tmp
= (__uint64_t
) ip
->i_d
.di_nextents
;
1692 ip
->i_d
.di_nextents
= tip
->i_d
.di_nextents
;
1693 tip
->i_d
.di_nextents
= tmp
;
1695 tmp
= (__uint64_t
) ip
->i_d
.di_format
;
1696 ip
->i_d
.di_format
= tip
->i_d
.di_format
;
1697 tip
->i_d
.di_format
= tmp
;
1700 * The extents in the source inode could still contain speculative
1701 * preallocation beyond EOF (e.g. the file is open but not modified
1702 * while defrag is in progress). In that case, we need to copy over the
1703 * number of delalloc blocks the data fork in the source inode is
1704 * tracking beyond EOF so that when the fork is truncated away when the
1705 * temporary inode is unlinked we don't underrun the i_delayed_blks
1706 * counter on that inode.
1708 ASSERT(tip
->i_delayed_blks
== 0);
1709 tip
->i_delayed_blks
= ip
->i_delayed_blks
;
1710 ip
->i_delayed_blks
= 0;
1712 switch (ip
->i_d
.di_format
) {
1713 case XFS_DINODE_FMT_EXTENTS
:
1714 /* If the extents fit in the inode, fix the
1715 * pointer. Otherwise it's already NULL or
1716 * pointing to the extent.
1718 if (ip
->i_d
.di_nextents
<= XFS_INLINE_EXTS
) {
1719 ifp
->if_u1
.if_extents
=
1720 ifp
->if_u2
.if_inline_ext
;
1722 src_log_flags
|= XFS_ILOG_DEXT
;
1724 case XFS_DINODE_FMT_BTREE
:
1725 ASSERT(ip
->i_d
.di_version
< 3 ||
1726 (src_log_flags
& XFS_ILOG_DOWNER
));
1727 src_log_flags
|= XFS_ILOG_DBROOT
;
1731 switch (tip
->i_d
.di_format
) {
1732 case XFS_DINODE_FMT_EXTENTS
:
1733 /* If the extents fit in the inode, fix the
1734 * pointer. Otherwise it's already NULL or
1735 * pointing to the extent.
1737 if (tip
->i_d
.di_nextents
<= XFS_INLINE_EXTS
) {
1738 tifp
->if_u1
.if_extents
=
1739 tifp
->if_u2
.if_inline_ext
;
1741 target_log_flags
|= XFS_ILOG_DEXT
;
1743 case XFS_DINODE_FMT_BTREE
:
1744 target_log_flags
|= XFS_ILOG_DBROOT
;
1745 ASSERT(tip
->i_d
.di_version
< 3 ||
1746 (target_log_flags
& XFS_ILOG_DOWNER
));
1750 xfs_trans_log_inode(tp
, ip
, src_log_flags
);
1751 xfs_trans_log_inode(tp
, tip
, target_log_flags
);
1754 * If this is a synchronous mount, make sure that the
1755 * transaction goes to disk before returning to the user.
1757 if (mp
->m_flags
& XFS_MOUNT_WSYNC
)
1758 xfs_trans_set_sync(tp
);
1760 error
= xfs_trans_commit(tp
);
1762 trace_xfs_swap_extent_after(ip
, 0);
1763 trace_xfs_swap_extent_after(tip
, 1);
1769 xfs_iunlock(ip
, lock_flags
);
1770 xfs_iunlock(tip
, lock_flags
);
1774 xfs_trans_cancel(tp
);