2 * Copyright (c) 2000-2006 Silicon Graphics, Inc.
3 * Copyright (c) 2012 Red Hat, Inc.
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License as
8 * published by the Free Software Foundation.
10 * This program is distributed in the hope that it would be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
21 #include "xfs_shared.h"
22 #include "xfs_format.h"
23 #include "xfs_log_format.h"
24 #include "xfs_trans_resv.h"
26 #include "xfs_mount.h"
27 #include "xfs_da_format.h"
28 #include "xfs_inode.h"
29 #include "xfs_btree.h"
30 #include "xfs_trans.h"
31 #include "xfs_extfree_item.h"
32 #include "xfs_alloc.h"
34 #include "xfs_bmap_util.h"
35 #include "xfs_bmap_btree.h"
36 #include "xfs_rtalloc.h"
37 #include "xfs_error.h"
38 #include "xfs_quota.h"
39 #include "xfs_trans_space.h"
40 #include "xfs_trace.h"
41 #include "xfs_icache.h"
44 /* Kernel only BMAP related definitions and functions */
47 * Convert the given file system block to a disk block. We have to treat it
48 * differently based on whether the file is a real time file or not, because the
52 xfs_fsb_to_db(struct xfs_inode
*ip
, xfs_fsblock_t fsb
)
54 return (XFS_IS_REALTIME_INODE(ip
) ? \
55 (xfs_daddr_t
)XFS_FSB_TO_BB((ip
)->i_mount
, (fsb
)) : \
56 XFS_FSB_TO_DADDR((ip
)->i_mount
, (fsb
)));
60 * Routine to be called at transaction's end by xfs_bmapi, xfs_bunmapi
61 * caller. Frees all the extents that need freeing, which must be done
62 * last due to locking considerations. We never free any extents in
63 * the first transaction.
65 * Return 1 if the given transaction was committed and a new one
66 * started, and 0 otherwise in the committed parameter.
70 xfs_trans_t
**tp
, /* transaction pointer addr */
71 xfs_bmap_free_t
*flist
, /* i/o: list extents to free */
72 int *committed
) /* xact committed or not */
74 xfs_efd_log_item_t
*efd
; /* extent free data */
75 xfs_efi_log_item_t
*efi
; /* extent free intention */
76 int error
; /* error return value */
77 xfs_bmap_free_item_t
*free
; /* free extent item */
78 xfs_mount_t
*mp
; /* filesystem mount structure */
79 xfs_bmap_free_item_t
*next
; /* next item on free list */
81 ASSERT((*tp
)->t_flags
& XFS_TRANS_PERM_LOG_RES
);
82 if (flist
->xbf_count
== 0) {
86 efi
= xfs_trans_get_efi(*tp
, flist
->xbf_count
);
87 for (free
= flist
->xbf_first
; free
; free
= free
->xbfi_next
)
88 xfs_trans_log_efi_extent(*tp
, efi
, free
->xbfi_startblock
,
89 free
->xbfi_blockcount
);
91 error
= xfs_trans_roll(tp
, NULL
);
94 * We have a new transaction, so we should return committed=1,
95 * even though we're returning an error.
100 efd
= xfs_trans_get_efd(*tp
, efi
, flist
->xbf_count
);
101 for (free
= flist
->xbf_first
; free
!= NULL
; free
= next
) {
102 next
= free
->xbfi_next
;
103 if ((error
= xfs_free_extent(*tp
, free
->xbfi_startblock
,
104 free
->xbfi_blockcount
))) {
106 * The bmap free list will be cleaned up at a
107 * higher level. The EFI will be canceled when
108 * this transaction is aborted.
109 * Need to force shutdown here to make sure it
110 * happens, since this transaction may not be
113 mp
= (*tp
)->t_mountp
;
114 if (!XFS_FORCED_SHUTDOWN(mp
))
115 xfs_force_shutdown(mp
,
116 (error
== -EFSCORRUPTED
) ?
117 SHUTDOWN_CORRUPT_INCORE
:
118 SHUTDOWN_META_IO_ERROR
);
121 xfs_trans_log_efd_extent(*tp
, efd
, free
->xbfi_startblock
,
122 free
->xbfi_blockcount
);
123 xfs_bmap_del_free(flist
, NULL
, free
);
130 struct xfs_bmalloca
*ap
) /* bmap alloc argument struct */
132 xfs_alloctype_t atype
= 0; /* type for allocation routines */
133 int error
; /* error return value */
134 xfs_mount_t
*mp
; /* mount point structure */
135 xfs_extlen_t prod
= 0; /* product factor for allocators */
136 xfs_extlen_t ralen
= 0; /* realtime allocation length */
137 xfs_extlen_t align
; /* minimum allocation alignment */
140 mp
= ap
->ip
->i_mount
;
141 align
= xfs_get_extsz_hint(ap
->ip
);
142 prod
= align
/ mp
->m_sb
.sb_rextsize
;
143 error
= xfs_bmap_extsize_align(mp
, &ap
->got
, &ap
->prev
,
144 align
, 1, ap
->eof
, 0,
145 ap
->conv
, &ap
->offset
, &ap
->length
);
149 ASSERT(ap
->length
% mp
->m_sb
.sb_rextsize
== 0);
152 * If the offset & length are not perfectly aligned
153 * then kill prod, it will just get us in trouble.
155 if (do_mod(ap
->offset
, align
) || ap
->length
% align
)
158 * Set ralen to be the actual requested length in rtextents.
160 ralen
= ap
->length
/ mp
->m_sb
.sb_rextsize
;
162 * If the old value was close enough to MAXEXTLEN that
163 * we rounded up to it, cut it back so it's valid again.
164 * Note that if it's a really large request (bigger than
165 * MAXEXTLEN), we don't hear about that number, and can't
166 * adjust the starting point to match it.
168 if (ralen
* mp
->m_sb
.sb_rextsize
>= MAXEXTLEN
)
169 ralen
= MAXEXTLEN
/ mp
->m_sb
.sb_rextsize
;
172 * Lock out other modifications to the RT bitmap inode.
174 xfs_ilock(mp
->m_rbmip
, XFS_ILOCK_EXCL
);
175 xfs_trans_ijoin(ap
->tp
, mp
->m_rbmip
, XFS_ILOCK_EXCL
);
178 * If it's an allocation to an empty file at offset 0,
179 * pick an extent that will space things out in the rt area.
181 if (ap
->eof
&& ap
->offset
== 0) {
182 xfs_rtblock_t
uninitialized_var(rtx
); /* realtime extent no */
184 error
= xfs_rtpick_extent(mp
, ap
->tp
, ralen
, &rtx
);
187 ap
->blkno
= rtx
* mp
->m_sb
.sb_rextsize
;
192 xfs_bmap_adjacent(ap
);
195 * Realtime allocation, done through xfs_rtallocate_extent.
197 atype
= ap
->blkno
== 0 ? XFS_ALLOCTYPE_ANY_AG
: XFS_ALLOCTYPE_NEAR_BNO
;
198 do_div(ap
->blkno
, mp
->m_sb
.sb_rextsize
);
201 if ((error
= xfs_rtallocate_extent(ap
->tp
, ap
->blkno
, 1, ap
->length
,
202 &ralen
, atype
, ap
->wasdel
, prod
, &rtb
)))
204 if (rtb
== NULLFSBLOCK
&& prod
> 1 &&
205 (error
= xfs_rtallocate_extent(ap
->tp
, ap
->blkno
, 1,
206 ap
->length
, &ralen
, atype
,
207 ap
->wasdel
, 1, &rtb
)))
210 if (ap
->blkno
!= NULLFSBLOCK
) {
211 ap
->blkno
*= mp
->m_sb
.sb_rextsize
;
212 ralen
*= mp
->m_sb
.sb_rextsize
;
214 ap
->ip
->i_d
.di_nblocks
+= ralen
;
215 xfs_trans_log_inode(ap
->tp
, ap
->ip
, XFS_ILOG_CORE
);
217 ap
->ip
->i_delayed_blks
-= ralen
;
219 * Adjust the disk quota also. This was reserved
222 xfs_trans_mod_dquot_byino(ap
->tp
, ap
->ip
,
223 ap
->wasdel
? XFS_TRANS_DQ_DELRTBCOUNT
:
224 XFS_TRANS_DQ_RTBCOUNT
, (long) ralen
);
232 * Check if the endoff is outside the last extent. If so the caller will grow
233 * the allocation to a stripe unit boundary. All offsets are considered outside
234 * the end of file for an empty fork, so 1 is returned in *eof in that case.
238 struct xfs_inode
*ip
,
239 xfs_fileoff_t endoff
,
243 struct xfs_bmbt_irec rec
;
246 error
= xfs_bmap_last_extent(NULL
, ip
, whichfork
, &rec
, eof
);
250 *eof
= endoff
>= rec
.br_startoff
+ rec
.br_blockcount
;
255 * Extent tree block counting routines.
259 * Count leaf blocks given a range of extent records.
262 xfs_bmap_count_leaves(
270 for (b
= 0; b
< numrecs
; b
++) {
271 xfs_bmbt_rec_host_t
*frp
= xfs_iext_get_ext(ifp
, idx
+ b
);
272 *count
+= xfs_bmbt_get_blockcount(frp
);
277 * Count leaf blocks given a range of extent records originally
281 xfs_bmap_disk_count_leaves(
282 struct xfs_mount
*mp
,
283 struct xfs_btree_block
*block
,
290 for (b
= 1; b
<= numrecs
; b
++) {
291 frp
= XFS_BMBT_REC_ADDR(mp
, block
, b
);
292 *count
+= xfs_bmbt_disk_get_blockcount(frp
);
297 * Recursively walks each level of a btree
298 * to count total fsblocks in use.
300 STATIC
int /* error */
302 xfs_mount_t
*mp
, /* file system mount point */
303 xfs_trans_t
*tp
, /* transaction pointer */
304 xfs_ifork_t
*ifp
, /* inode fork pointer */
305 xfs_fsblock_t blockno
, /* file system block number */
306 int levelin
, /* level in btree */
307 int *count
) /* Count of blocks */
313 xfs_fsblock_t bno
= blockno
;
314 xfs_fsblock_t nextbno
;
315 struct xfs_btree_block
*block
, *nextblock
;
318 error
= xfs_btree_read_bufl(mp
, tp
, bno
, 0, &bp
, XFS_BMAP_BTREE_REF
,
323 block
= XFS_BUF_TO_BLOCK(bp
);
326 /* Not at node above leaves, count this level of nodes */
327 nextbno
= be64_to_cpu(block
->bb_u
.l
.bb_rightsib
);
328 while (nextbno
!= NULLFSBLOCK
) {
329 error
= xfs_btree_read_bufl(mp
, tp
, nextbno
, 0, &nbp
,
335 nextblock
= XFS_BUF_TO_BLOCK(nbp
);
336 nextbno
= be64_to_cpu(nextblock
->bb_u
.l
.bb_rightsib
);
337 xfs_trans_brelse(tp
, nbp
);
340 /* Dive to the next level */
341 pp
= XFS_BMBT_PTR_ADDR(mp
, block
, 1, mp
->m_bmap_dmxr
[1]);
342 bno
= be64_to_cpu(*pp
);
343 if (unlikely((error
=
344 xfs_bmap_count_tree(mp
, tp
, ifp
, bno
, level
, count
)) < 0)) {
345 xfs_trans_brelse(tp
, bp
);
346 XFS_ERROR_REPORT("xfs_bmap_count_tree(1)",
347 XFS_ERRLEVEL_LOW
, mp
);
348 return -EFSCORRUPTED
;
350 xfs_trans_brelse(tp
, bp
);
352 /* count all level 1 nodes and their leaves */
354 nextbno
= be64_to_cpu(block
->bb_u
.l
.bb_rightsib
);
355 numrecs
= be16_to_cpu(block
->bb_numrecs
);
356 xfs_bmap_disk_count_leaves(mp
, block
, numrecs
, count
);
357 xfs_trans_brelse(tp
, bp
);
358 if (nextbno
== NULLFSBLOCK
)
361 error
= xfs_btree_read_bufl(mp
, tp
, bno
, 0, &bp
,
367 block
= XFS_BUF_TO_BLOCK(bp
);
374 * Count fsblocks of the given fork.
377 xfs_bmap_count_blocks(
378 xfs_trans_t
*tp
, /* transaction pointer */
379 xfs_inode_t
*ip
, /* incore inode */
380 int whichfork
, /* data or attr fork */
381 int *count
) /* out: count of blocks */
383 struct xfs_btree_block
*block
; /* current btree block */
384 xfs_fsblock_t bno
; /* block # of "block" */
385 xfs_ifork_t
*ifp
; /* fork structure */
386 int level
; /* btree level, for checking */
387 xfs_mount_t
*mp
; /* file system mount structure */
388 __be64
*pp
; /* pointer to block address */
392 ifp
= XFS_IFORK_PTR(ip
, whichfork
);
393 if ( XFS_IFORK_FORMAT(ip
, whichfork
) == XFS_DINODE_FMT_EXTENTS
) {
394 xfs_bmap_count_leaves(ifp
, 0,
395 ifp
->if_bytes
/ (uint
)sizeof(xfs_bmbt_rec_t
),
401 * Root level must use BMAP_BROOT_PTR_ADDR macro to get ptr out.
403 block
= ifp
->if_broot
;
404 level
= be16_to_cpu(block
->bb_level
);
406 pp
= XFS_BMAP_BROOT_PTR_ADDR(mp
, block
, 1, ifp
->if_broot_bytes
);
407 bno
= be64_to_cpu(*pp
);
408 ASSERT(bno
!= NULLFSBLOCK
);
409 ASSERT(XFS_FSB_TO_AGNO(mp
, bno
) < mp
->m_sb
.sb_agcount
);
410 ASSERT(XFS_FSB_TO_AGBNO(mp
, bno
) < mp
->m_sb
.sb_agblocks
);
412 if (unlikely(xfs_bmap_count_tree(mp
, tp
, ifp
, bno
, level
, count
) < 0)) {
413 XFS_ERROR_REPORT("xfs_bmap_count_blocks(2)", XFS_ERRLEVEL_LOW
,
415 return -EFSCORRUPTED
;
422 * returns 1 for success, 0 if we failed to map the extent.
425 xfs_getbmapx_fix_eof_hole(
426 xfs_inode_t
*ip
, /* xfs incore inode pointer */
427 struct getbmapx
*out
, /* output structure */
428 int prealloced
, /* this is a file with
429 * preallocated data space */
430 __int64_t end
, /* last block requested */
431 xfs_fsblock_t startblock
)
434 xfs_mount_t
*mp
; /* file system mount point */
435 xfs_ifork_t
*ifp
; /* inode fork pointer */
436 xfs_extnum_t lastx
; /* last extent pointer */
437 xfs_fileoff_t fileblock
;
439 if (startblock
== HOLESTARTBLOCK
) {
442 fixlen
= XFS_FSB_TO_BB(mp
, XFS_B_TO_FSB(mp
, XFS_ISIZE(ip
)));
443 fixlen
-= out
->bmv_offset
;
444 if (prealloced
&& out
->bmv_offset
+ out
->bmv_length
== end
) {
445 /* Came to hole at EOF. Trim it. */
448 out
->bmv_length
= fixlen
;
451 if (startblock
== DELAYSTARTBLOCK
)
454 out
->bmv_block
= xfs_fsb_to_db(ip
, startblock
);
455 fileblock
= XFS_BB_TO_FSB(ip
->i_mount
, out
->bmv_offset
);
456 ifp
= XFS_IFORK_PTR(ip
, XFS_DATA_FORK
);
457 if (xfs_iext_bno_to_ext(ifp
, fileblock
, &lastx
) &&
458 (lastx
== (ifp
->if_bytes
/ (uint
)sizeof(xfs_bmbt_rec_t
))-1))
459 out
->bmv_oflags
|= BMV_OF_LAST
;
466 * Get inode's extents as described in bmv, and format for output.
467 * Calls formatter to fill the user's buffer until all extents
468 * are mapped, until the passed-in bmv->bmv_count slots have
469 * been filled, or until the formatter short-circuits the loop,
470 * if it is tracking filled-in extents on its own.
475 struct getbmapx
*bmv
, /* user bmap structure */
476 xfs_bmap_format_t formatter
, /* format to user */
477 void *arg
) /* formatter arg */
479 __int64_t bmvend
; /* last block requested */
480 int error
= 0; /* return value */
481 __int64_t fixlen
; /* length for -1 case */
482 int i
; /* extent number */
483 int lock
; /* lock state */
484 xfs_bmbt_irec_t
*map
; /* buffer for user's data */
485 xfs_mount_t
*mp
; /* file system mount point */
486 int nex
; /* # of user extents can do */
487 int nexleft
; /* # of user extents left */
488 int subnex
; /* # of bmapi's can do */
489 int nmap
; /* number of map entries */
490 struct getbmapx
*out
; /* output structure */
491 int whichfork
; /* data or attr fork */
492 int prealloced
; /* this is a file with
493 * preallocated data space */
494 int iflags
; /* interface flags */
495 int bmapi_flags
; /* flags for xfs_bmapi */
499 iflags
= bmv
->bmv_iflags
;
500 whichfork
= iflags
& BMV_IF_ATTRFORK
? XFS_ATTR_FORK
: XFS_DATA_FORK
;
502 if (whichfork
== XFS_ATTR_FORK
) {
503 if (XFS_IFORK_Q(ip
)) {
504 if (ip
->i_d
.di_aformat
!= XFS_DINODE_FMT_EXTENTS
&&
505 ip
->i_d
.di_aformat
!= XFS_DINODE_FMT_BTREE
&&
506 ip
->i_d
.di_aformat
!= XFS_DINODE_FMT_LOCAL
)
509 ip
->i_d
.di_aformat
!= 0 &&
510 ip
->i_d
.di_aformat
!= XFS_DINODE_FMT_EXTENTS
)) {
511 XFS_ERROR_REPORT("xfs_getbmap", XFS_ERRLEVEL_LOW
,
513 return -EFSCORRUPTED
;
519 if (ip
->i_d
.di_format
!= XFS_DINODE_FMT_EXTENTS
&&
520 ip
->i_d
.di_format
!= XFS_DINODE_FMT_BTREE
&&
521 ip
->i_d
.di_format
!= XFS_DINODE_FMT_LOCAL
)
524 if (xfs_get_extsz_hint(ip
) ||
525 ip
->i_d
.di_flags
& (XFS_DIFLAG_PREALLOC
|XFS_DIFLAG_APPEND
)){
527 fixlen
= mp
->m_super
->s_maxbytes
;
530 fixlen
= XFS_ISIZE(ip
);
534 if (bmv
->bmv_length
== -1) {
535 fixlen
= XFS_FSB_TO_BB(mp
, XFS_B_TO_FSB(mp
, fixlen
));
537 max_t(__int64_t
, fixlen
- bmv
->bmv_offset
, 0);
538 } else if (bmv
->bmv_length
== 0) {
539 bmv
->bmv_entries
= 0;
541 } else if (bmv
->bmv_length
< 0) {
545 nex
= bmv
->bmv_count
- 1;
548 bmvend
= bmv
->bmv_offset
+ bmv
->bmv_length
;
551 if (bmv
->bmv_count
> ULONG_MAX
/ sizeof(struct getbmapx
))
553 out
= kmem_zalloc_large(bmv
->bmv_count
* sizeof(struct getbmapx
), 0);
557 xfs_ilock(ip
, XFS_IOLOCK_SHARED
);
558 if (whichfork
== XFS_DATA_FORK
) {
559 if (!(iflags
& BMV_IF_DELALLOC
) &&
560 (ip
->i_delayed_blks
|| XFS_ISIZE(ip
) > ip
->i_d
.di_size
)) {
561 error
= filemap_write_and_wait(VFS_I(ip
)->i_mapping
);
563 goto out_unlock_iolock
;
566 * Even after flushing the inode, there can still be
567 * delalloc blocks on the inode beyond EOF due to
568 * speculative preallocation. These are not removed
569 * until the release function is called or the inode
570 * is inactivated. Hence we cannot assert here that
571 * ip->i_delayed_blks == 0.
575 lock
= xfs_ilock_data_map_shared(ip
);
577 lock
= xfs_ilock_attr_map_shared(ip
);
581 * Don't let nex be bigger than the number of extents
582 * we can have assuming alternating holes and real extents.
584 if (nex
> XFS_IFORK_NEXTENTS(ip
, whichfork
) * 2 + 1)
585 nex
= XFS_IFORK_NEXTENTS(ip
, whichfork
) * 2 + 1;
587 bmapi_flags
= xfs_bmapi_aflag(whichfork
);
588 if (!(iflags
& BMV_IF_PREALLOC
))
589 bmapi_flags
|= XFS_BMAPI_IGSTATE
;
592 * Allocate enough space to handle "subnex" maps at a time.
596 map
= kmem_alloc(subnex
* sizeof(*map
), KM_MAYFAIL
| KM_NOFS
);
598 goto out_unlock_ilock
;
600 bmv
->bmv_entries
= 0;
602 if (XFS_IFORK_NEXTENTS(ip
, whichfork
) == 0 &&
603 (whichfork
== XFS_ATTR_FORK
|| !(iflags
& BMV_IF_DELALLOC
))) {
611 nmap
= (nexleft
> subnex
) ? subnex
: nexleft
;
612 error
= xfs_bmapi_read(ip
, XFS_BB_TO_FSBT(mp
, bmv
->bmv_offset
),
613 XFS_BB_TO_FSB(mp
, bmv
->bmv_length
),
614 map
, &nmap
, bmapi_flags
);
617 ASSERT(nmap
<= subnex
);
619 for (i
= 0; i
< nmap
&& nexleft
&& bmv
->bmv_length
; i
++) {
620 out
[cur_ext
].bmv_oflags
= 0;
621 if (map
[i
].br_state
== XFS_EXT_UNWRITTEN
)
622 out
[cur_ext
].bmv_oflags
|= BMV_OF_PREALLOC
;
623 else if (map
[i
].br_startblock
== DELAYSTARTBLOCK
)
624 out
[cur_ext
].bmv_oflags
|= BMV_OF_DELALLOC
;
625 out
[cur_ext
].bmv_offset
=
626 XFS_FSB_TO_BB(mp
, map
[i
].br_startoff
);
627 out
[cur_ext
].bmv_length
=
628 XFS_FSB_TO_BB(mp
, map
[i
].br_blockcount
);
629 out
[cur_ext
].bmv_unused1
= 0;
630 out
[cur_ext
].bmv_unused2
= 0;
633 * delayed allocation extents that start beyond EOF can
634 * occur due to speculative EOF allocation when the
635 * delalloc extent is larger than the largest freespace
636 * extent at conversion time. These extents cannot be
637 * converted by data writeback, so can exist here even
638 * if we are not supposed to be finding delalloc
641 if (map
[i
].br_startblock
== DELAYSTARTBLOCK
&&
642 map
[i
].br_startoff
<= XFS_B_TO_FSB(mp
, XFS_ISIZE(ip
)))
643 ASSERT((iflags
& BMV_IF_DELALLOC
) != 0);
645 if (map
[i
].br_startblock
== HOLESTARTBLOCK
&&
646 whichfork
== XFS_ATTR_FORK
) {
647 /* came to the end of attribute fork */
648 out
[cur_ext
].bmv_oflags
|= BMV_OF_LAST
;
652 if (!xfs_getbmapx_fix_eof_hole(ip
, &out
[cur_ext
],
654 map
[i
].br_startblock
))
658 out
[cur_ext
].bmv_offset
+
659 out
[cur_ext
].bmv_length
;
661 max_t(__int64_t
, 0, bmvend
- bmv
->bmv_offset
);
664 * In case we don't want to return the hole,
665 * don't increase cur_ext so that we can reuse
666 * it in the next loop.
668 if ((iflags
& BMV_IF_NO_HOLES
) &&
669 map
[i
].br_startblock
== HOLESTARTBLOCK
) {
670 memset(&out
[cur_ext
], 0, sizeof(out
[cur_ext
]));
678 } while (nmap
&& nexleft
&& bmv
->bmv_length
);
683 xfs_iunlock(ip
, lock
);
685 xfs_iunlock(ip
, XFS_IOLOCK_SHARED
);
687 for (i
= 0; i
< cur_ext
; i
++) {
688 int full
= 0; /* user array is full */
690 /* format results & advance arg */
691 error
= formatter(&arg
, &out
[i
], &full
);
701 * dead simple method of punching delalyed allocation blocks from a range in
702 * the inode. Walks a block at a time so will be slow, but is only executed in
703 * rare error cases so the overhead is not critical. This will always punch out
704 * both the start and end blocks, even if the ranges only partially overlap
705 * them, so it is up to the caller to ensure that partial blocks are not
709 xfs_bmap_punch_delalloc_range(
710 struct xfs_inode
*ip
,
711 xfs_fileoff_t start_fsb
,
712 xfs_fileoff_t length
)
714 xfs_fileoff_t remaining
= length
;
717 ASSERT(xfs_isilocked(ip
, XFS_ILOCK_EXCL
));
721 xfs_bmbt_irec_t imap
;
723 xfs_fsblock_t firstblock
;
724 xfs_bmap_free_t flist
;
727 * Map the range first and check that it is a delalloc extent
728 * before trying to unmap the range. Otherwise we will be
729 * trying to remove a real extent (which requires a
730 * transaction) or a hole, which is probably a bad idea...
732 error
= xfs_bmapi_read(ip
, start_fsb
, 1, &imap
, &nimaps
,
736 /* something screwed, just bail */
737 if (!XFS_FORCED_SHUTDOWN(ip
->i_mount
)) {
738 xfs_alert(ip
->i_mount
,
739 "Failed delalloc mapping lookup ino %lld fsb %lld.",
740 ip
->i_ino
, start_fsb
);
748 if (imap
.br_startblock
!= DELAYSTARTBLOCK
) {
749 /* been converted, ignore */
752 WARN_ON(imap
.br_blockcount
== 0);
755 * Note: while we initialise the firstblock/flist pair, they
756 * should never be used because blocks should never be
757 * allocated or freed for a delalloc extent and hence we need
758 * don't cancel or finish them after the xfs_bunmapi() call.
760 xfs_bmap_init(&flist
, &firstblock
);
761 error
= xfs_bunmapi(NULL
, ip
, start_fsb
, 1, 0, 1, &firstblock
,
766 ASSERT(!flist
.xbf_count
&& !flist
.xbf_first
);
770 } while(remaining
> 0);
776 * Test whether it is appropriate to check an inode for and free post EOF
777 * blocks. The 'force' parameter determines whether we should also consider
778 * regular files that are marked preallocated or append-only.
781 xfs_can_free_eofblocks(struct xfs_inode
*ip
, bool force
)
783 /* prealloc/delalloc exists only on regular files */
784 if (!S_ISREG(ip
->i_d
.di_mode
))
788 * Zero sized files with no cached pages and delalloc blocks will not
789 * have speculative prealloc/delalloc blocks to remove.
791 if (VFS_I(ip
)->i_size
== 0 &&
792 VFS_I(ip
)->i_mapping
->nrpages
== 0 &&
793 ip
->i_delayed_blks
== 0)
796 /* If we haven't read in the extent list, then don't do it now. */
797 if (!(ip
->i_df
.if_flags
& XFS_IFEXTENTS
))
801 * Do not free real preallocated or append-only files unless the file
802 * has delalloc blocks and we are forced to remove them.
804 if (ip
->i_d
.di_flags
& (XFS_DIFLAG_PREALLOC
| XFS_DIFLAG_APPEND
))
805 if (!force
|| ip
->i_delayed_blks
== 0)
812 * This is called by xfs_inactive to free any blocks beyond eof
813 * when the link count isn't zero and by xfs_dm_punch_hole() when
814 * punching a hole to EOF.
824 xfs_fileoff_t end_fsb
;
825 xfs_fileoff_t last_fsb
;
826 xfs_filblks_t map_len
;
828 xfs_bmbt_irec_t imap
;
831 * Figure out if there are any blocks beyond the end
832 * of the file. If not, then there is nothing to do.
834 end_fsb
= XFS_B_TO_FSB(mp
, (xfs_ufsize_t
)XFS_ISIZE(ip
));
835 last_fsb
= XFS_B_TO_FSB(mp
, mp
->m_super
->s_maxbytes
);
836 if (last_fsb
<= end_fsb
)
838 map_len
= last_fsb
- end_fsb
;
841 xfs_ilock(ip
, XFS_ILOCK_SHARED
);
842 error
= xfs_bmapi_read(ip
, end_fsb
, map_len
, &imap
, &nimaps
, 0);
843 xfs_iunlock(ip
, XFS_ILOCK_SHARED
);
845 if (!error
&& (nimaps
!= 0) &&
846 (imap
.br_startblock
!= HOLESTARTBLOCK
||
847 ip
->i_delayed_blks
)) {
849 * Attach the dquots to the inode up front.
851 error
= xfs_qm_dqattach(ip
, 0);
856 * There are blocks after the end of file.
857 * Free them up now by truncating the file to
860 tp
= xfs_trans_alloc(mp
, XFS_TRANS_INACTIVE
);
863 if (!xfs_ilock_nowait(ip
, XFS_IOLOCK_EXCL
)) {
864 xfs_trans_cancel(tp
);
869 error
= xfs_trans_reserve(tp
, &M_RES(mp
)->tr_itruncate
, 0, 0);
871 ASSERT(XFS_FORCED_SHUTDOWN(mp
));
872 xfs_trans_cancel(tp
);
874 xfs_iunlock(ip
, XFS_IOLOCK_EXCL
);
878 xfs_ilock(ip
, XFS_ILOCK_EXCL
);
879 xfs_trans_ijoin(tp
, ip
, 0);
882 * Do not update the on-disk file size. If we update the
883 * on-disk file size and then the system crashes before the
884 * contents of the file are flushed to disk then the files
885 * may be full of holes (ie NULL files bug).
887 error
= xfs_itruncate_extents(&tp
, ip
, XFS_DATA_FORK
,
891 * If we get an error at this point we simply don't
892 * bother truncating the file.
894 xfs_trans_cancel(tp
);
896 error
= xfs_trans_commit(tp
);
898 xfs_inode_clear_eofblocks_tag(ip
);
901 xfs_iunlock(ip
, XFS_ILOCK_EXCL
);
903 xfs_iunlock(ip
, XFS_IOLOCK_EXCL
);
909 xfs_alloc_file_space(
910 struct xfs_inode
*ip
,
915 xfs_mount_t
*mp
= ip
->i_mount
;
917 xfs_filblks_t allocated_fsb
;
918 xfs_filblks_t allocatesize_fsb
;
919 xfs_extlen_t extsz
, temp
;
920 xfs_fileoff_t startoffset_fsb
;
921 xfs_fsblock_t firstfsb
;
926 xfs_bmbt_irec_t imaps
[1], *imapp
;
927 xfs_bmap_free_t free_list
;
928 uint qblocks
, resblks
, resrtextents
;
932 trace_xfs_alloc_file_space(ip
);
934 if (XFS_FORCED_SHUTDOWN(mp
))
937 error
= xfs_qm_dqattach(ip
, 0);
944 rt
= XFS_IS_REALTIME_INODE(ip
);
945 extsz
= xfs_get_extsz_hint(ip
);
950 startoffset_fsb
= XFS_B_TO_FSBT(mp
, offset
);
951 allocatesize_fsb
= XFS_B_TO_FSB(mp
, count
);
954 * Allocate file space until done or until there is an error
956 while (allocatesize_fsb
&& !error
) {
960 * Determine space reservations for data/realtime.
962 if (unlikely(extsz
)) {
966 e
= startoffset_fsb
+ allocatesize_fsb
;
967 if ((temp
= do_mod(startoffset_fsb
, extsz
)))
969 if ((temp
= do_mod(e
, extsz
)))
973 e
= allocatesize_fsb
;
977 * The transaction reservation is limited to a 32-bit block
978 * count, hence we need to limit the number of blocks we are
979 * trying to reserve to avoid an overflow. We can't allocate
980 * more than @nimaps extents, and an extent is limited on disk
981 * to MAXEXTLEN (21 bits), so use that to enforce the limit.
983 resblks
= min_t(xfs_fileoff_t
, (e
- s
), (MAXEXTLEN
* nimaps
));
985 resrtextents
= qblocks
= resblks
;
986 resrtextents
/= mp
->m_sb
.sb_rextsize
;
987 resblks
= XFS_DIOSTRAT_SPACE_RES(mp
, 0);
988 quota_flag
= XFS_QMOPT_RES_RTBLKS
;
991 resblks
= qblocks
= XFS_DIOSTRAT_SPACE_RES(mp
, resblks
);
992 quota_flag
= XFS_QMOPT_RES_REGBLKS
;
996 * Allocate and setup the transaction.
998 tp
= xfs_trans_alloc(mp
, XFS_TRANS_DIOSTRAT
);
999 error
= xfs_trans_reserve(tp
, &M_RES(mp
)->tr_write
,
1000 resblks
, resrtextents
);
1002 * Check for running out of space
1006 * Free the transaction structure.
1008 ASSERT(error
== -ENOSPC
|| XFS_FORCED_SHUTDOWN(mp
));
1009 xfs_trans_cancel(tp
);
1012 xfs_ilock(ip
, XFS_ILOCK_EXCL
);
1013 error
= xfs_trans_reserve_quota_nblks(tp
, ip
, qblocks
,
1018 xfs_trans_ijoin(tp
, ip
, 0);
1020 xfs_bmap_init(&free_list
, &firstfsb
);
1021 error
= xfs_bmapi_write(tp
, ip
, startoffset_fsb
,
1022 allocatesize_fsb
, alloc_type
, &firstfsb
,
1023 0, imapp
, &nimaps
, &free_list
);
1029 * Complete the transaction
1031 error
= xfs_bmap_finish(&tp
, &free_list
, &committed
);
1036 error
= xfs_trans_commit(tp
);
1037 xfs_iunlock(ip
, XFS_ILOCK_EXCL
);
1042 allocated_fsb
= imapp
->br_blockcount
;
1049 startoffset_fsb
+= allocated_fsb
;
1050 allocatesize_fsb
-= allocated_fsb
;
1055 error0
: /* Cancel bmap, unlock inode, unreserve quota blocks, cancel trans */
1056 xfs_bmap_cancel(&free_list
);
1057 xfs_trans_unreserve_quota_nblks(tp
, ip
, (long)qblocks
, 0, quota_flag
);
1059 error1
: /* Just cancel transaction */
1060 xfs_trans_cancel(tp
);
1061 xfs_iunlock(ip
, XFS_ILOCK_EXCL
);
1066 * Zero file bytes between startoff and endoff inclusive.
1067 * The iolock is held exclusive and no blocks are buffered.
1069 * This function is used by xfs_free_file_space() to zero
1070 * partial blocks when the range to free is not block aligned.
1071 * When unreserving space with boundaries that are not block
1072 * aligned we round up the start and round down the end
1073 * boundaries and then use this function to zero the parts of
1074 * the blocks that got dropped during the rounding.
1077 xfs_zero_remaining_bytes(
1082 xfs_bmbt_irec_t imap
;
1083 xfs_fileoff_t offset_fsb
;
1084 xfs_off_t lastoffset
;
1087 xfs_mount_t
*mp
= ip
->i_mount
;
1092 * Avoid doing I/O beyond eof - it's not necessary
1093 * since nothing can read beyond eof. The space will
1094 * be zeroed when the file is extended anyway.
1096 if (startoff
>= XFS_ISIZE(ip
))
1099 if (endoff
> XFS_ISIZE(ip
))
1100 endoff
= XFS_ISIZE(ip
);
1102 for (offset
= startoff
; offset
<= endoff
; offset
= lastoffset
+ 1) {
1105 offset_fsb
= XFS_B_TO_FSBT(mp
, offset
);
1108 lock_mode
= xfs_ilock_data_map_shared(ip
);
1109 error
= xfs_bmapi_read(ip
, offset_fsb
, 1, &imap
, &nimap
, 0);
1110 xfs_iunlock(ip
, lock_mode
);
1112 if (error
|| nimap
< 1)
1114 ASSERT(imap
.br_blockcount
>= 1);
1115 ASSERT(imap
.br_startoff
== offset_fsb
);
1116 ASSERT(imap
.br_startblock
!= DELAYSTARTBLOCK
);
1118 if (imap
.br_startblock
== HOLESTARTBLOCK
||
1119 imap
.br_state
== XFS_EXT_UNWRITTEN
) {
1120 /* skip the entire extent */
1121 lastoffset
= XFS_FSB_TO_B(mp
, imap
.br_startoff
+
1122 imap
.br_blockcount
) - 1;
1126 lastoffset
= XFS_FSB_TO_B(mp
, imap
.br_startoff
+ 1) - 1;
1127 if (lastoffset
> endoff
)
1128 lastoffset
= endoff
;
1130 /* DAX can just zero the backing device directly */
1131 if (IS_DAX(VFS_I(ip
))) {
1132 error
= dax_zero_page_range(VFS_I(ip
), offset
,
1133 lastoffset
- offset
+ 1,
1134 xfs_get_blocks_direct
);
1140 error
= xfs_buf_read_uncached(XFS_IS_REALTIME_INODE(ip
) ?
1141 mp
->m_rtdev_targp
: mp
->m_ddev_targp
,
1142 xfs_fsb_to_db(ip
, imap
.br_startblock
),
1143 BTOBB(mp
->m_sb
.sb_blocksize
),
1149 (offset
- XFS_FSB_TO_B(mp
, imap
.br_startoff
)),
1150 0, lastoffset
- offset
+ 1);
1152 error
= xfs_bwrite(bp
);
1161 xfs_free_file_space(
1162 struct xfs_inode
*ip
,
1168 xfs_fileoff_t endoffset_fsb
;
1170 xfs_fsblock_t firstfsb
;
1171 xfs_bmap_free_t free_list
;
1172 xfs_bmbt_irec_t imap
;
1174 xfs_off_t iendoffset
;
1181 xfs_fileoff_t startoffset_fsb
;
1186 trace_xfs_free_file_space(ip
);
1188 error
= xfs_qm_dqattach(ip
, 0);
1193 if (len
<= 0) /* if nothing being freed */
1195 rt
= XFS_IS_REALTIME_INODE(ip
);
1196 startoffset_fsb
= XFS_B_TO_FSB(mp
, offset
);
1197 endoffset_fsb
= XFS_B_TO_FSBT(mp
, offset
+ len
);
1199 /* wait for the completion of any pending DIOs */
1200 inode_dio_wait(VFS_I(ip
));
1202 rounding
= max_t(xfs_off_t
, 1 << mp
->m_sb
.sb_blocklog
, PAGE_CACHE_SIZE
);
1203 ioffset
= round_down(offset
, rounding
);
1204 iendoffset
= round_up(offset
+ len
, rounding
) - 1;
1205 error
= filemap_write_and_wait_range(VFS_I(ip
)->i_mapping
, ioffset
,
1209 truncate_pagecache_range(VFS_I(ip
), ioffset
, iendoffset
);
1212 * Need to zero the stuff we're not freeing, on disk.
1213 * If it's a realtime file & can't use unwritten extents then we
1214 * actually need to zero the extent edges. Otherwise xfs_bunmapi
1215 * will take care of it for us.
1217 if (rt
&& !xfs_sb_version_hasextflgbit(&mp
->m_sb
)) {
1219 error
= xfs_bmapi_read(ip
, startoffset_fsb
, 1,
1223 ASSERT(nimap
== 0 || nimap
== 1);
1224 if (nimap
&& imap
.br_startblock
!= HOLESTARTBLOCK
) {
1227 ASSERT(imap
.br_startblock
!= DELAYSTARTBLOCK
);
1228 block
= imap
.br_startblock
;
1229 mod
= do_div(block
, mp
->m_sb
.sb_rextsize
);
1231 startoffset_fsb
+= mp
->m_sb
.sb_rextsize
- mod
;
1234 error
= xfs_bmapi_read(ip
, endoffset_fsb
- 1, 1,
1238 ASSERT(nimap
== 0 || nimap
== 1);
1239 if (nimap
&& imap
.br_startblock
!= HOLESTARTBLOCK
) {
1240 ASSERT(imap
.br_startblock
!= DELAYSTARTBLOCK
);
1242 if (mod
&& (mod
!= mp
->m_sb
.sb_rextsize
))
1243 endoffset_fsb
-= mod
;
1246 if ((done
= (endoffset_fsb
<= startoffset_fsb
)))
1248 * One contiguous piece to clear
1250 error
= xfs_zero_remaining_bytes(ip
, offset
, offset
+ len
- 1);
1253 * Some full blocks, possibly two pieces to clear
1255 if (offset
< XFS_FSB_TO_B(mp
, startoffset_fsb
))
1256 error
= xfs_zero_remaining_bytes(ip
, offset
,
1257 XFS_FSB_TO_B(mp
, startoffset_fsb
) - 1);
1259 XFS_FSB_TO_B(mp
, endoffset_fsb
) < offset
+ len
)
1260 error
= xfs_zero_remaining_bytes(ip
,
1261 XFS_FSB_TO_B(mp
, endoffset_fsb
),
1266 * free file space until done or until there is an error
1268 resblks
= XFS_DIOSTRAT_SPACE_RES(mp
, 0);
1269 while (!error
&& !done
) {
1272 * allocate and setup the transaction. Allow this
1273 * transaction to dip into the reserve blocks to ensure
1274 * the freeing of the space succeeds at ENOSPC.
1276 tp
= xfs_trans_alloc(mp
, XFS_TRANS_DIOSTRAT
);
1277 error
= xfs_trans_reserve(tp
, &M_RES(mp
)->tr_write
, resblks
, 0);
1280 * check for running out of space
1284 * Free the transaction structure.
1286 ASSERT(error
== -ENOSPC
|| XFS_FORCED_SHUTDOWN(mp
));
1287 xfs_trans_cancel(tp
);
1290 xfs_ilock(ip
, XFS_ILOCK_EXCL
);
1291 error
= xfs_trans_reserve_quota(tp
, mp
,
1292 ip
->i_udquot
, ip
->i_gdquot
, ip
->i_pdquot
,
1293 resblks
, 0, XFS_QMOPT_RES_REGBLKS
);
1297 xfs_trans_ijoin(tp
, ip
, 0);
1300 * issue the bunmapi() call to free the blocks
1302 xfs_bmap_init(&free_list
, &firstfsb
);
1303 error
= xfs_bunmapi(tp
, ip
, startoffset_fsb
,
1304 endoffset_fsb
- startoffset_fsb
,
1305 0, 2, &firstfsb
, &free_list
, &done
);
1311 * complete the transaction
1313 error
= xfs_bmap_finish(&tp
, &free_list
, &committed
);
1318 error
= xfs_trans_commit(tp
);
1319 xfs_iunlock(ip
, XFS_ILOCK_EXCL
);
1326 xfs_bmap_cancel(&free_list
);
1328 xfs_trans_cancel(tp
);
1329 xfs_iunlock(ip
, XFS_ILOCK_EXCL
);
1334 * Preallocate and zero a range of a file. This mechanism has the allocation
1335 * semantics of fallocate and in addition converts data in the range to zeroes.
1338 xfs_zero_file_space(
1339 struct xfs_inode
*ip
,
1343 struct xfs_mount
*mp
= ip
->i_mount
;
1347 trace_xfs_zero_file_space(ip
);
1349 blksize
= 1 << mp
->m_sb
.sb_blocklog
;
1352 * Punch a hole and prealloc the range. We use hole punch rather than
1353 * unwritten extent conversion for two reasons:
1355 * 1.) Hole punch handles partial block zeroing for us.
1357 * 2.) If prealloc returns ENOSPC, the file range is still zero-valued
1358 * by virtue of the hole punch.
1360 error
= xfs_free_file_space(ip
, offset
, len
);
1364 error
= xfs_alloc_file_space(ip
, round_down(offset
, blksize
),
1365 round_up(offset
+ len
, blksize
) -
1366 round_down(offset
, blksize
),
1367 XFS_BMAPI_PREALLOC
);
1374 * @next_fsb will keep track of the extent currently undergoing shift.
1375 * @stop_fsb will keep track of the extent at which we have to stop.
1376 * If we are shifting left, we will start with block (offset + len) and
1377 * shift each extent till last extent.
1378 * If we are shifting right, we will start with last extent inside file space
1379 * and continue until we reach the block corresponding to offset.
1382 xfs_shift_file_space(
1383 struct xfs_inode
*ip
,
1386 enum shift_direction direction
)
1389 struct xfs_mount
*mp
= ip
->i_mount
;
1390 struct xfs_trans
*tp
;
1392 struct xfs_bmap_free free_list
;
1393 xfs_fsblock_t first_block
;
1395 xfs_fileoff_t stop_fsb
;
1396 xfs_fileoff_t next_fsb
;
1397 xfs_fileoff_t shift_fsb
;
1399 ASSERT(direction
== SHIFT_LEFT
|| direction
== SHIFT_RIGHT
);
1401 if (direction
== SHIFT_LEFT
) {
1402 next_fsb
= XFS_B_TO_FSB(mp
, offset
+ len
);
1403 stop_fsb
= XFS_B_TO_FSB(mp
, VFS_I(ip
)->i_size
);
1406 * If right shift, delegate the work of initialization of
1407 * next_fsb to xfs_bmap_shift_extent as it has ilock held.
1409 next_fsb
= NULLFSBLOCK
;
1410 stop_fsb
= XFS_B_TO_FSB(mp
, offset
);
1413 shift_fsb
= XFS_B_TO_FSB(mp
, len
);
1416 * Trim eofblocks to avoid shifting uninitialized post-eof preallocation
1417 * into the accessible region of the file.
1419 if (xfs_can_free_eofblocks(ip
, true)) {
1420 error
= xfs_free_eofblocks(mp
, ip
, false);
1426 * Writeback and invalidate cache for the remainder of the file as we're
1427 * about to shift down every extent from offset to EOF.
1429 error
= filemap_write_and_wait_range(VFS_I(ip
)->i_mapping
,
1433 error
= invalidate_inode_pages2_range(VFS_I(ip
)->i_mapping
,
1434 offset
>> PAGE_CACHE_SHIFT
, -1);
1439 * The extent shiting code works on extent granularity. So, if
1440 * stop_fsb is not the starting block of extent, we need to split
1441 * the extent at stop_fsb.
1443 if (direction
== SHIFT_RIGHT
) {
1444 error
= xfs_bmap_split_extent(ip
, stop_fsb
);
1449 while (!error
&& !done
) {
1450 tp
= xfs_trans_alloc(mp
, XFS_TRANS_DIOSTRAT
);
1452 * We would need to reserve permanent block for transaction.
1453 * This will come into picture when after shifting extent into
1454 * hole we found that adjacent extents can be merged which
1455 * may lead to freeing of a block during record update.
1457 error
= xfs_trans_reserve(tp
, &M_RES(mp
)->tr_write
,
1458 XFS_DIOSTRAT_SPACE_RES(mp
, 0), 0);
1460 xfs_trans_cancel(tp
);
1464 xfs_ilock(ip
, XFS_ILOCK_EXCL
);
1465 error
= xfs_trans_reserve_quota(tp
, mp
, ip
->i_udquot
,
1466 ip
->i_gdquot
, ip
->i_pdquot
,
1467 XFS_DIOSTRAT_SPACE_RES(mp
, 0), 0,
1468 XFS_QMOPT_RES_REGBLKS
);
1472 xfs_trans_ijoin(tp
, ip
, XFS_ILOCK_EXCL
);
1474 xfs_bmap_init(&free_list
, &first_block
);
1477 * We are using the write transaction in which max 2 bmbt
1478 * updates are allowed
1480 error
= xfs_bmap_shift_extents(tp
, ip
, &next_fsb
, shift_fsb
,
1481 &done
, stop_fsb
, &first_block
, &free_list
,
1482 direction
, XFS_BMAP_MAX_SHIFT_EXTENTS
);
1486 error
= xfs_bmap_finish(&tp
, &free_list
, &committed
);
1490 error
= xfs_trans_commit(tp
);
1496 xfs_trans_cancel(tp
);
1501 * xfs_collapse_file_space()
1502 * This routine frees disk space and shift extent for the given file.
1503 * The first thing we do is to free data blocks in the specified range
1504 * by calling xfs_free_file_space(). It would also sync dirty data
1505 * and invalidate page cache over the region on which collapse range
1506 * is working. And Shift extent records to the left to cover a hole.
1513 xfs_collapse_file_space(
1514 struct xfs_inode
*ip
,
1520 ASSERT(xfs_isilocked(ip
, XFS_IOLOCK_EXCL
));
1521 trace_xfs_collapse_file_space(ip
);
1523 error
= xfs_free_file_space(ip
, offset
, len
);
1527 return xfs_shift_file_space(ip
, offset
, len
, SHIFT_LEFT
);
1531 * xfs_insert_file_space()
1532 * This routine create hole space by shifting extents for the given file.
1533 * The first thing we do is to sync dirty data and invalidate page cache
1534 * over the region on which insert range is working. And split an extent
1535 * to two extents at given offset by calling xfs_bmap_split_extent.
1536 * And shift all extent records which are laying between [offset,
1537 * last allocated extent] to the right to reserve hole range.
1543 xfs_insert_file_space(
1544 struct xfs_inode
*ip
,
1548 ASSERT(xfs_isilocked(ip
, XFS_IOLOCK_EXCL
));
1549 trace_xfs_insert_file_space(ip
);
1551 return xfs_shift_file_space(ip
, offset
, len
, SHIFT_RIGHT
);
1555 * We need to check that the format of the data fork in the temporary inode is
1556 * valid for the target inode before doing the swap. This is not a problem with
1557 * attr1 because of the fixed fork offset, but attr2 has a dynamically sized
1558 * data fork depending on the space the attribute fork is taking so we can get
1559 * invalid formats on the target inode.
1561 * E.g. target has space for 7 extents in extent format, temp inode only has
1562 * space for 6. If we defragment down to 7 extents, then the tmp format is a
1563 * btree, but when swapped it needs to be in extent format. Hence we can't just
1564 * blindly swap data forks on attr2 filesystems.
1566 * Note that we check the swap in both directions so that we don't end up with
1567 * a corrupt temporary inode, either.
1569 * Note that fixing the way xfs_fsr sets up the attribute fork in the source
1570 * inode will prevent this situation from occurring, so all we do here is
1571 * reject and log the attempt. basically we are putting the responsibility on
1572 * userspace to get this right.
1575 xfs_swap_extents_check_format(
1576 xfs_inode_t
*ip
, /* target inode */
1577 xfs_inode_t
*tip
) /* tmp inode */
1580 /* Should never get a local format */
1581 if (ip
->i_d
.di_format
== XFS_DINODE_FMT_LOCAL
||
1582 tip
->i_d
.di_format
== XFS_DINODE_FMT_LOCAL
)
1586 * if the target inode has less extents that then temporary inode then
1587 * why did userspace call us?
1589 if (ip
->i_d
.di_nextents
< tip
->i_d
.di_nextents
)
1593 * if the target inode is in extent form and the temp inode is in btree
1594 * form then we will end up with the target inode in the wrong format
1595 * as we already know there are less extents in the temp inode.
1597 if (ip
->i_d
.di_format
== XFS_DINODE_FMT_EXTENTS
&&
1598 tip
->i_d
.di_format
== XFS_DINODE_FMT_BTREE
)
1601 /* Check temp in extent form to max in target */
1602 if (tip
->i_d
.di_format
== XFS_DINODE_FMT_EXTENTS
&&
1603 XFS_IFORK_NEXTENTS(tip
, XFS_DATA_FORK
) >
1604 XFS_IFORK_MAXEXT(ip
, XFS_DATA_FORK
))
1607 /* Check target in extent form to max in temp */
1608 if (ip
->i_d
.di_format
== XFS_DINODE_FMT_EXTENTS
&&
1609 XFS_IFORK_NEXTENTS(ip
, XFS_DATA_FORK
) >
1610 XFS_IFORK_MAXEXT(tip
, XFS_DATA_FORK
))
1614 * If we are in a btree format, check that the temp root block will fit
1615 * in the target and that it has enough extents to be in btree format
1618 * Note that we have to be careful to allow btree->extent conversions
1619 * (a common defrag case) which will occur when the temp inode is in
1622 if (tip
->i_d
.di_format
== XFS_DINODE_FMT_BTREE
) {
1623 if (XFS_IFORK_BOFF(ip
) &&
1624 XFS_BMAP_BMDR_SPACE(tip
->i_df
.if_broot
) > XFS_IFORK_BOFF(ip
))
1626 if (XFS_IFORK_NEXTENTS(tip
, XFS_DATA_FORK
) <=
1627 XFS_IFORK_MAXEXT(ip
, XFS_DATA_FORK
))
1631 /* Reciprocal target->temp btree format checks */
1632 if (ip
->i_d
.di_format
== XFS_DINODE_FMT_BTREE
) {
1633 if (XFS_IFORK_BOFF(tip
) &&
1634 XFS_BMAP_BMDR_SPACE(ip
->i_df
.if_broot
) > XFS_IFORK_BOFF(tip
))
1636 if (XFS_IFORK_NEXTENTS(ip
, XFS_DATA_FORK
) <=
1637 XFS_IFORK_MAXEXT(tip
, XFS_DATA_FORK
))
1645 xfs_swap_extent_flush(
1646 struct xfs_inode
*ip
)
1650 error
= filemap_write_and_wait(VFS_I(ip
)->i_mapping
);
1653 truncate_pagecache_range(VFS_I(ip
), 0, -1);
1655 /* Verify O_DIRECT for ftmp */
1656 if (VFS_I(ip
)->i_mapping
->nrpages
)
1663 xfs_inode_t
*ip
, /* target inode */
1664 xfs_inode_t
*tip
, /* tmp inode */
1667 xfs_mount_t
*mp
= ip
->i_mount
;
1669 xfs_bstat_t
*sbp
= &sxp
->sx_stat
;
1670 xfs_ifork_t
*tempifp
, *ifp
, *tifp
;
1671 int src_log_flags
, target_log_flags
;
1678 tempifp
= kmem_alloc(sizeof(xfs_ifork_t
), KM_MAYFAIL
);
1685 * Lock the inodes against other IO, page faults and truncate to
1686 * begin with. Then we can ensure the inodes are flushed and have no
1687 * page cache safely. Once we have done this we can take the ilocks and
1688 * do the rest of the checks.
1690 lock_flags
= XFS_IOLOCK_EXCL
| XFS_MMAPLOCK_EXCL
;
1691 xfs_lock_two_inodes(ip
, tip
, XFS_IOLOCK_EXCL
);
1692 xfs_lock_two_inodes(ip
, tip
, XFS_MMAPLOCK_EXCL
);
1694 /* Verify that both files have the same format */
1695 if ((ip
->i_d
.di_mode
& S_IFMT
) != (tip
->i_d
.di_mode
& S_IFMT
)) {
1700 /* Verify both files are either real-time or non-realtime */
1701 if (XFS_IS_REALTIME_INODE(ip
) != XFS_IS_REALTIME_INODE(tip
)) {
1706 error
= xfs_swap_extent_flush(ip
);
1709 error
= xfs_swap_extent_flush(tip
);
1713 tp
= xfs_trans_alloc(mp
, XFS_TRANS_SWAPEXT
);
1714 error
= xfs_trans_reserve(tp
, &M_RES(mp
)->tr_ichange
, 0, 0);
1716 xfs_trans_cancel(tp
);
1721 * Lock and join the inodes to the tansaction so that transaction commit
1722 * or cancel will unlock the inodes from this point onwards.
1724 xfs_lock_two_inodes(ip
, tip
, XFS_ILOCK_EXCL
);
1725 lock_flags
|= XFS_ILOCK_EXCL
;
1726 xfs_trans_ijoin(tp
, ip
, lock_flags
);
1727 xfs_trans_ijoin(tp
, tip
, lock_flags
);
1730 /* Verify all data are being swapped */
1731 if (sxp
->sx_offset
!= 0 ||
1732 sxp
->sx_length
!= ip
->i_d
.di_size
||
1733 sxp
->sx_length
!= tip
->i_d
.di_size
) {
1735 goto out_trans_cancel
;
1738 trace_xfs_swap_extent_before(ip
, 0);
1739 trace_xfs_swap_extent_before(tip
, 1);
1741 /* check inode formats now that data is flushed */
1742 error
= xfs_swap_extents_check_format(ip
, tip
);
1745 "%s: inode 0x%llx format is incompatible for exchanging.",
1746 __func__
, ip
->i_ino
);
1747 goto out_trans_cancel
;
1751 * Compare the current change & modify times with that
1752 * passed in. If they differ, we abort this swap.
1753 * This is the mechanism used to ensure the calling
1754 * process that the file was not changed out from
1757 if ((sbp
->bs_ctime
.tv_sec
!= VFS_I(ip
)->i_ctime
.tv_sec
) ||
1758 (sbp
->bs_ctime
.tv_nsec
!= VFS_I(ip
)->i_ctime
.tv_nsec
) ||
1759 (sbp
->bs_mtime
.tv_sec
!= VFS_I(ip
)->i_mtime
.tv_sec
) ||
1760 (sbp
->bs_mtime
.tv_nsec
!= VFS_I(ip
)->i_mtime
.tv_nsec
)) {
1762 goto out_trans_cancel
;
1765 * Count the number of extended attribute blocks
1767 if ( ((XFS_IFORK_Q(ip
) != 0) && (ip
->i_d
.di_anextents
> 0)) &&
1768 (ip
->i_d
.di_aformat
!= XFS_DINODE_FMT_LOCAL
)) {
1769 error
= xfs_bmap_count_blocks(tp
, ip
, XFS_ATTR_FORK
, &aforkblks
);
1771 goto out_trans_cancel
;
1773 if ( ((XFS_IFORK_Q(tip
) != 0) && (tip
->i_d
.di_anextents
> 0)) &&
1774 (tip
->i_d
.di_aformat
!= XFS_DINODE_FMT_LOCAL
)) {
1775 error
= xfs_bmap_count_blocks(tp
, tip
, XFS_ATTR_FORK
,
1778 goto out_trans_cancel
;
1782 * Before we've swapped the forks, lets set the owners of the forks
1783 * appropriately. We have to do this as we are demand paging the btree
1784 * buffers, and so the validation done on read will expect the owner
1785 * field to be correctly set. Once we change the owners, we can swap the
1788 * Note the trickiness in setting the log flags - we set the owner log
1789 * flag on the opposite inode (i.e. the inode we are setting the new
1790 * owner to be) because once we swap the forks and log that, log
1791 * recovery is going to see the fork as owned by the swapped inode,
1792 * not the pre-swapped inodes.
1794 src_log_flags
= XFS_ILOG_CORE
;
1795 target_log_flags
= XFS_ILOG_CORE
;
1796 if (ip
->i_d
.di_version
== 3 &&
1797 ip
->i_d
.di_format
== XFS_DINODE_FMT_BTREE
) {
1798 target_log_flags
|= XFS_ILOG_DOWNER
;
1799 error
= xfs_bmbt_change_owner(tp
, ip
, XFS_DATA_FORK
,
1802 goto out_trans_cancel
;
1805 if (tip
->i_d
.di_version
== 3 &&
1806 tip
->i_d
.di_format
== XFS_DINODE_FMT_BTREE
) {
1807 src_log_flags
|= XFS_ILOG_DOWNER
;
1808 error
= xfs_bmbt_change_owner(tp
, tip
, XFS_DATA_FORK
,
1811 goto out_trans_cancel
;
1815 * Swap the data forks of the inodes
1819 *tempifp
= *ifp
; /* struct copy */
1820 *ifp
= *tifp
; /* struct copy */
1821 *tifp
= *tempifp
; /* struct copy */
1824 * Fix the on-disk inode values
1826 tmp
= (__uint64_t
)ip
->i_d
.di_nblocks
;
1827 ip
->i_d
.di_nblocks
= tip
->i_d
.di_nblocks
- taforkblks
+ aforkblks
;
1828 tip
->i_d
.di_nblocks
= tmp
+ taforkblks
- aforkblks
;
1830 tmp
= (__uint64_t
) ip
->i_d
.di_nextents
;
1831 ip
->i_d
.di_nextents
= tip
->i_d
.di_nextents
;
1832 tip
->i_d
.di_nextents
= tmp
;
1834 tmp
= (__uint64_t
) ip
->i_d
.di_format
;
1835 ip
->i_d
.di_format
= tip
->i_d
.di_format
;
1836 tip
->i_d
.di_format
= tmp
;
1839 * The extents in the source inode could still contain speculative
1840 * preallocation beyond EOF (e.g. the file is open but not modified
1841 * while defrag is in progress). In that case, we need to copy over the
1842 * number of delalloc blocks the data fork in the source inode is
1843 * tracking beyond EOF so that when the fork is truncated away when the
1844 * temporary inode is unlinked we don't underrun the i_delayed_blks
1845 * counter on that inode.
1847 ASSERT(tip
->i_delayed_blks
== 0);
1848 tip
->i_delayed_blks
= ip
->i_delayed_blks
;
1849 ip
->i_delayed_blks
= 0;
1851 switch (ip
->i_d
.di_format
) {
1852 case XFS_DINODE_FMT_EXTENTS
:
1853 /* If the extents fit in the inode, fix the
1854 * pointer. Otherwise it's already NULL or
1855 * pointing to the extent.
1857 if (ip
->i_d
.di_nextents
<= XFS_INLINE_EXTS
) {
1858 ifp
->if_u1
.if_extents
=
1859 ifp
->if_u2
.if_inline_ext
;
1861 src_log_flags
|= XFS_ILOG_DEXT
;
1863 case XFS_DINODE_FMT_BTREE
:
1864 ASSERT(ip
->i_d
.di_version
< 3 ||
1865 (src_log_flags
& XFS_ILOG_DOWNER
));
1866 src_log_flags
|= XFS_ILOG_DBROOT
;
1870 switch (tip
->i_d
.di_format
) {
1871 case XFS_DINODE_FMT_EXTENTS
:
1872 /* If the extents fit in the inode, fix the
1873 * pointer. Otherwise it's already NULL or
1874 * pointing to the extent.
1876 if (tip
->i_d
.di_nextents
<= XFS_INLINE_EXTS
) {
1877 tifp
->if_u1
.if_extents
=
1878 tifp
->if_u2
.if_inline_ext
;
1880 target_log_flags
|= XFS_ILOG_DEXT
;
1882 case XFS_DINODE_FMT_BTREE
:
1883 target_log_flags
|= XFS_ILOG_DBROOT
;
1884 ASSERT(tip
->i_d
.di_version
< 3 ||
1885 (target_log_flags
& XFS_ILOG_DOWNER
));
1889 xfs_trans_log_inode(tp
, ip
, src_log_flags
);
1890 xfs_trans_log_inode(tp
, tip
, target_log_flags
);
1893 * If this is a synchronous mount, make sure that the
1894 * transaction goes to disk before returning to the user.
1896 if (mp
->m_flags
& XFS_MOUNT_WSYNC
)
1897 xfs_trans_set_sync(tp
);
1899 error
= xfs_trans_commit(tp
);
1901 trace_xfs_swap_extent_after(ip
, 0);
1902 trace_xfs_swap_extent_after(tip
, 1);
1908 xfs_iunlock(ip
, lock_flags
);
1909 xfs_iunlock(tip
, lock_flags
);
1913 xfs_trans_cancel(tp
);