2 * Copyright (c) 2000-2006 Silicon Graphics, Inc.
3 * Copyright (c) 2016 Christoph Hellwig.
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License as
8 * published by the Free Software Foundation.
10 * This program is distributed in the hope that it would be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
19 #include <linux/iomap.h>
22 #include "xfs_shared.h"
23 #include "xfs_format.h"
24 #include "xfs_log_format.h"
25 #include "xfs_trans_resv.h"
26 #include "xfs_mount.h"
27 #include "xfs_defer.h"
28 #include "xfs_inode.h"
29 #include "xfs_btree.h"
30 #include "xfs_bmap_btree.h"
32 #include "xfs_bmap_util.h"
33 #include "xfs_error.h"
34 #include "xfs_trans.h"
35 #include "xfs_trans_space.h"
36 #include "xfs_iomap.h"
37 #include "xfs_trace.h"
38 #include "xfs_icache.h"
39 #include "xfs_quota.h"
40 #include "xfs_dquot_item.h"
41 #include "xfs_dquot.h"
42 #include "xfs_reflink.h"
45 #define XFS_WRITEIO_ALIGN(mp,off) (((off) >> mp->m_writeio_log) \
52 struct xfs_bmbt_irec
*imap
)
54 struct xfs_mount
*mp
= ip
->i_mount
;
56 if (imap
->br_startblock
== HOLESTARTBLOCK
) {
57 iomap
->blkno
= IOMAP_NULL_BLOCK
;
58 iomap
->type
= IOMAP_HOLE
;
59 } else if (imap
->br_startblock
== DELAYSTARTBLOCK
) {
60 iomap
->blkno
= IOMAP_NULL_BLOCK
;
61 iomap
->type
= IOMAP_DELALLOC
;
63 iomap
->blkno
= xfs_fsb_to_db(ip
, imap
->br_startblock
);
64 if (imap
->br_state
== XFS_EXT_UNWRITTEN
)
65 iomap
->type
= IOMAP_UNWRITTEN
;
67 iomap
->type
= IOMAP_MAPPED
;
69 iomap
->offset
= XFS_FSB_TO_B(mp
, imap
->br_startoff
);
70 iomap
->length
= XFS_FSB_TO_B(mp
, imap
->br_blockcount
);
71 iomap
->bdev
= xfs_find_bdev_for_inode(VFS_I(ip
));
79 struct xfs_mount
*mp
= ip
->i_mount
;
80 xfs_extlen_t align
= 0;
82 if (!XFS_IS_REALTIME_INODE(ip
)) {
84 * Round up the allocation request to a stripe unit
85 * (m_dalign) boundary if the file size is >= stripe unit
86 * size, and we are allocating past the allocation eof.
88 * If mounted with the "-o swalloc" option the alignment is
89 * increased from the strip unit size to the stripe width.
91 if (mp
->m_swidth
&& (mp
->m_flags
& XFS_MOUNT_SWALLOC
))
93 else if (mp
->m_dalign
)
96 if (align
&& XFS_ISIZE(ip
) < XFS_FSB_TO_B(mp
, align
))
101 * Always round up the allocation request to an extent boundary
102 * (when file on a real-time subvolume or has di_extsize hint).
106 align
= roundup_64(align
, extsize
);
115 xfs_iomap_eof_align_last_fsb(
116 struct xfs_inode
*ip
,
117 xfs_extlen_t extsize
,
118 xfs_fileoff_t
*last_fsb
)
120 xfs_extlen_t align
= xfs_eof_alignment(ip
, extsize
);
123 xfs_fileoff_t new_last_fsb
= roundup_64(*last_fsb
, align
);
126 error
= xfs_bmap_eof(ip
, new_last_fsb
, XFS_DATA_FORK
, &eof
);
130 *last_fsb
= new_last_fsb
;
136 xfs_alert_fsblock_zero(
138 xfs_bmbt_irec_t
*imap
)
140 xfs_alert_tag(ip
->i_mount
, XFS_PTAG_FSBLOCK_ZERO
,
141 "Access to block zero in inode %llu "
142 "start_block: %llx start_off: %llx "
143 "blkcnt: %llx extent-state: %x",
144 (unsigned long long)ip
->i_ino
,
145 (unsigned long long)imap
->br_startblock
,
146 (unsigned long long)imap
->br_startoff
,
147 (unsigned long long)imap
->br_blockcount
,
149 return -EFSCORRUPTED
;
153 xfs_iomap_write_direct(
157 xfs_bmbt_irec_t
*imap
,
160 xfs_mount_t
*mp
= ip
->i_mount
;
161 xfs_fileoff_t offset_fsb
;
162 xfs_fileoff_t last_fsb
;
163 xfs_filblks_t count_fsb
, resaligned
;
164 xfs_fsblock_t firstfsb
;
165 xfs_extlen_t extsz
, temp
;
170 struct xfs_defer_ops dfops
;
171 uint qblocks
, resblks
, resrtextents
;
174 int bmapi_flags
= XFS_BMAPI_PREALLOC
;
177 rt
= XFS_IS_REALTIME_INODE(ip
);
178 extsz
= xfs_get_extsz_hint(ip
);
179 lockmode
= XFS_ILOCK_SHARED
; /* locked by caller */
181 ASSERT(xfs_isilocked(ip
, lockmode
));
183 offset_fsb
= XFS_B_TO_FSBT(mp
, offset
);
184 last_fsb
= XFS_B_TO_FSB(mp
, ((xfs_ufsize_t
)(offset
+ count
)));
185 if ((offset
+ count
) > XFS_ISIZE(ip
)) {
187 * Assert that the in-core extent list is present since this can
188 * call xfs_iread_extents() and we only have the ilock shared.
189 * This should be safe because the lock was held around a bmapi
190 * call in the caller and we only need it to access the in-core
193 ASSERT(XFS_IFORK_PTR(ip
, XFS_DATA_FORK
)->if_flags
&
195 error
= xfs_iomap_eof_align_last_fsb(ip
, extsz
, &last_fsb
);
199 if (nmaps
&& (imap
->br_startblock
== HOLESTARTBLOCK
))
200 last_fsb
= MIN(last_fsb
, (xfs_fileoff_t
)
201 imap
->br_blockcount
+
204 count_fsb
= last_fsb
- offset_fsb
;
205 ASSERT(count_fsb
> 0);
207 resaligned
= count_fsb
;
208 if (unlikely(extsz
)) {
209 if ((temp
= do_mod(offset_fsb
, extsz
)))
211 if ((temp
= do_mod(resaligned
, extsz
)))
212 resaligned
+= extsz
- temp
;
216 resrtextents
= qblocks
= resaligned
;
217 resrtextents
/= mp
->m_sb
.sb_rextsize
;
218 resblks
= XFS_DIOSTRAT_SPACE_RES(mp
, 0);
219 quota_flag
= XFS_QMOPT_RES_RTBLKS
;
222 resblks
= qblocks
= XFS_DIOSTRAT_SPACE_RES(mp
, resaligned
);
223 quota_flag
= XFS_QMOPT_RES_REGBLKS
;
227 * Drop the shared lock acquired by the caller, attach the dquot if
228 * necessary and move on to transaction setup.
230 xfs_iunlock(ip
, lockmode
);
231 error
= xfs_qm_dqattach(ip
, 0);
236 * For DAX, we do not allocate unwritten extents, but instead we zero
237 * the block before we commit the transaction. Ideally we'd like to do
238 * this outside the transaction context, but if we commit and then crash
239 * we may not have zeroed the blocks and this will be exposed on
240 * recovery of the allocation. Hence we must zero before commit.
242 * Further, if we are mapping unwritten extents here, we need to zero
243 * and convert them to written so that we don't need an unwritten extent
244 * callback for DAX. This also means that we need to be able to dip into
245 * the reserve block pool for bmbt block allocation if there is no space
246 * left but we need to do unwritten extent conversion.
248 if (IS_DAX(VFS_I(ip
))) {
249 bmapi_flags
= XFS_BMAPI_CONVERT
| XFS_BMAPI_ZERO
;
250 if (ISUNWRITTEN(imap
)) {
251 tflags
|= XFS_TRANS_RESERVE
;
252 resblks
= XFS_DIOSTRAT_SPACE_RES(mp
, 0) << 1;
255 error
= xfs_trans_alloc(mp
, &M_RES(mp
)->tr_write
, resblks
, resrtextents
,
260 lockmode
= XFS_ILOCK_EXCL
;
261 xfs_ilock(ip
, lockmode
);
263 error
= xfs_trans_reserve_quota_nblks(tp
, ip
, qblocks
, 0, quota_flag
);
265 goto out_trans_cancel
;
267 xfs_trans_ijoin(tp
, ip
, 0);
270 * From this point onwards we overwrite the imap pointer that the
273 xfs_defer_init(&dfops
, &firstfsb
);
275 error
= xfs_bmapi_write(tp
, ip
, offset_fsb
, count_fsb
,
276 bmapi_flags
, &firstfsb
, resblks
, imap
,
279 goto out_bmap_cancel
;
282 * Complete the transaction
284 error
= xfs_defer_finish(&tp
, &dfops
, NULL
);
286 goto out_bmap_cancel
;
288 error
= xfs_trans_commit(tp
);
293 * Copy any maps to caller's array and return any error.
300 if (!(imap
->br_startblock
|| XFS_IS_REALTIME_INODE(ip
)))
301 error
= xfs_alert_fsblock_zero(ip
, imap
);
304 xfs_iunlock(ip
, lockmode
);
308 xfs_defer_cancel(&dfops
);
309 xfs_trans_unreserve_quota_nblks(tp
, ip
, (long)qblocks
, 0, quota_flag
);
311 xfs_trans_cancel(tp
);
316 xfs_quota_need_throttle(
317 struct xfs_inode
*ip
,
319 xfs_fsblock_t alloc_blocks
)
321 struct xfs_dquot
*dq
= xfs_inode_dquot(ip
, type
);
323 if (!dq
|| !xfs_this_quota_on(ip
->i_mount
, type
))
326 /* no hi watermark, no throttle */
327 if (!dq
->q_prealloc_hi_wmark
)
330 /* under the lo watermark, no throttle */
331 if (dq
->q_res_bcount
+ alloc_blocks
< dq
->q_prealloc_lo_wmark
)
338 xfs_quota_calc_throttle(
339 struct xfs_inode
*ip
,
341 xfs_fsblock_t
*qblocks
,
347 struct xfs_dquot
*dq
= xfs_inode_dquot(ip
, type
);
349 /* no dq, or over hi wmark, squash the prealloc completely */
350 if (!dq
|| dq
->q_res_bcount
>= dq
->q_prealloc_hi_wmark
) {
356 freesp
= dq
->q_prealloc_hi_wmark
- dq
->q_res_bcount
;
357 if (freesp
< dq
->q_low_space
[XFS_QLOWSP_5_PCNT
]) {
359 if (freesp
< dq
->q_low_space
[XFS_QLOWSP_3_PCNT
])
361 if (freesp
< dq
->q_low_space
[XFS_QLOWSP_1_PCNT
])
365 if (freesp
< *qfreesp
)
368 /* only overwrite the throttle values if we are more aggressive */
369 if ((freesp
>> shift
) < (*qblocks
>> *qshift
)) {
376 * If we are doing a write at the end of the file and there are no allocations
377 * past this one, then extend the allocation out to the file system's write
380 * If we don't have a user specified preallocation size, dynamically increase
381 * the preallocation size as the size of the file grows. Cap the maximum size
382 * at a single extent or less if the filesystem is near full. The closer the
383 * filesystem is to full, the smaller the maximum prealocation.
385 * As an exception we don't do any preallocation at all if the file is smaller
386 * than the minimum preallocation and we are using the default dynamic
387 * preallocation scheme, as it is likely this is the only write to the file that
388 * is going to be done.
390 * We clean up any extra space left over when the file is closed in
394 xfs_iomap_prealloc_size(
395 struct xfs_inode
*ip
,
400 struct xfs_mount
*mp
= ip
->i_mount
;
401 struct xfs_ifork
*ifp
= XFS_IFORK_PTR(ip
, XFS_DATA_FORK
);
402 xfs_fileoff_t offset_fsb
= XFS_B_TO_FSBT(mp
, offset
);
403 struct xfs_bmbt_irec prev
;
406 xfs_fsblock_t qblocks
;
408 xfs_fsblock_t alloc_blocks
= 0;
410 if (offset
+ count
<= XFS_ISIZE(ip
))
413 if (!(mp
->m_flags
& XFS_MOUNT_DFLT_IOSIZE
) &&
414 (XFS_ISIZE(ip
) < XFS_FSB_TO_B(mp
, mp
->m_writeio_blocks
)))
418 * If an explicit allocsize is set, the file is small, or we
419 * are writing behind a hole, then use the minimum prealloc:
421 if ((mp
->m_flags
& XFS_MOUNT_DFLT_IOSIZE
) ||
422 XFS_ISIZE(ip
) < XFS_FSB_TO_B(mp
, mp
->m_dalign
) ||
423 !xfs_iext_get_extent(ifp
, idx
- 1, &prev
) ||
424 prev
.br_startoff
+ prev
.br_blockcount
< offset_fsb
)
425 return mp
->m_writeio_blocks
;
428 * Determine the initial size of the preallocation. We are beyond the
429 * current EOF here, but we need to take into account whether this is
430 * a sparse write or an extending write when determining the
431 * preallocation size. Hence we need to look up the extent that ends
432 * at the current write offset and use the result to determine the
433 * preallocation size.
435 * If the extent is a hole, then preallocation is essentially disabled.
436 * Otherwise we take the size of the preceding data extent as the basis
437 * for the preallocation size. If the size of the extent is greater than
438 * half the maximum extent length, then use the current offset as the
439 * basis. This ensures that for large files the preallocation size
440 * always extends to MAXEXTLEN rather than falling short due to things
441 * like stripe unit/width alignment of real extents.
443 if (prev
.br_blockcount
<= (MAXEXTLEN
>> 1))
444 alloc_blocks
= prev
.br_blockcount
<< 1;
446 alloc_blocks
= XFS_B_TO_FSB(mp
, offset
);
449 qblocks
= alloc_blocks
;
452 * MAXEXTLEN is not a power of two value but we round the prealloc down
453 * to the nearest power of two value after throttling. To prevent the
454 * round down from unconditionally reducing the maximum supported prealloc
455 * size, we round up first, apply appropriate throttling, round down and
456 * cap the value to MAXEXTLEN.
458 alloc_blocks
= XFS_FILEOFF_MIN(roundup_pow_of_two(MAXEXTLEN
),
461 freesp
= percpu_counter_read_positive(&mp
->m_fdblocks
);
462 if (freesp
< mp
->m_low_space
[XFS_LOWSP_5_PCNT
]) {
464 if (freesp
< mp
->m_low_space
[XFS_LOWSP_4_PCNT
])
466 if (freesp
< mp
->m_low_space
[XFS_LOWSP_3_PCNT
])
468 if (freesp
< mp
->m_low_space
[XFS_LOWSP_2_PCNT
])
470 if (freesp
< mp
->m_low_space
[XFS_LOWSP_1_PCNT
])
475 * Check each quota to cap the prealloc size, provide a shift value to
476 * throttle with and adjust amount of available space.
478 if (xfs_quota_need_throttle(ip
, XFS_DQ_USER
, alloc_blocks
))
479 xfs_quota_calc_throttle(ip
, XFS_DQ_USER
, &qblocks
, &qshift
,
481 if (xfs_quota_need_throttle(ip
, XFS_DQ_GROUP
, alloc_blocks
))
482 xfs_quota_calc_throttle(ip
, XFS_DQ_GROUP
, &qblocks
, &qshift
,
484 if (xfs_quota_need_throttle(ip
, XFS_DQ_PROJ
, alloc_blocks
))
485 xfs_quota_calc_throttle(ip
, XFS_DQ_PROJ
, &qblocks
, &qshift
,
489 * The final prealloc size is set to the minimum of free space available
490 * in each of the quotas and the overall filesystem.
492 * The shift throttle value is set to the maximum value as determined by
493 * the global low free space values and per-quota low free space values.
495 alloc_blocks
= MIN(alloc_blocks
, qblocks
);
496 shift
= MAX(shift
, qshift
);
499 alloc_blocks
>>= shift
;
501 * rounddown_pow_of_two() returns an undefined result if we pass in
505 alloc_blocks
= rounddown_pow_of_two(alloc_blocks
);
506 if (alloc_blocks
> MAXEXTLEN
)
507 alloc_blocks
= MAXEXTLEN
;
510 * If we are still trying to allocate more space than is
511 * available, squash the prealloc hard. This can happen if we
512 * have a large file on a small filesystem and the above
513 * lowspace thresholds are smaller than MAXEXTLEN.
515 while (alloc_blocks
&& alloc_blocks
>= freesp
)
518 if (alloc_blocks
< mp
->m_writeio_blocks
)
519 alloc_blocks
= mp
->m_writeio_blocks
;
520 trace_xfs_iomap_prealloc_size(ip
, alloc_blocks
, shift
,
521 mp
->m_writeio_blocks
);
526 xfs_file_iomap_begin_delay(
533 struct xfs_inode
*ip
= XFS_I(inode
);
534 struct xfs_mount
*mp
= ip
->i_mount
;
535 struct xfs_ifork
*ifp
= XFS_IFORK_PTR(ip
, XFS_DATA_FORK
);
536 xfs_fileoff_t offset_fsb
= XFS_B_TO_FSBT(mp
, offset
);
537 xfs_fileoff_t maxbytes_fsb
=
538 XFS_B_TO_FSB(mp
, mp
->m_super
->s_maxbytes
);
539 xfs_fileoff_t end_fsb
;
540 int error
= 0, eof
= 0;
541 struct xfs_bmbt_irec got
;
543 xfs_fsblock_t prealloc_blocks
= 0;
545 ASSERT(!XFS_IS_REALTIME_INODE(ip
));
546 ASSERT(!xfs_get_extsz_hint(ip
));
548 xfs_ilock(ip
, XFS_ILOCK_EXCL
);
550 if (unlikely(XFS_TEST_ERROR(
551 (XFS_IFORK_FORMAT(ip
, XFS_DATA_FORK
) != XFS_DINODE_FMT_EXTENTS
&&
552 XFS_IFORK_FORMAT(ip
, XFS_DATA_FORK
) != XFS_DINODE_FMT_BTREE
),
553 mp
, XFS_ERRTAG_BMAPIFORMAT
, XFS_RANDOM_BMAPIFORMAT
))) {
554 XFS_ERROR_REPORT(__func__
, XFS_ERRLEVEL_LOW
, mp
);
555 error
= -EFSCORRUPTED
;
559 XFS_STATS_INC(mp
, xs_blk_mapw
);
561 if (!(ifp
->if_flags
& XFS_IFEXTENTS
)) {
562 error
= xfs_iread_extents(NULL
, ip
, XFS_DATA_FORK
);
567 eof
= !xfs_iext_lookup_extent(ip
, ifp
, offset_fsb
, &idx
, &got
);
568 if (!eof
&& got
.br_startoff
<= offset_fsb
) {
569 if (xfs_is_reflink_inode(ip
)) {
572 end_fsb
= min(XFS_B_TO_FSB(mp
, offset
+ count
),
574 xfs_trim_extent(&got
, offset_fsb
, end_fsb
- offset_fsb
);
575 error
= xfs_reflink_reserve_cow(ip
, &got
, &shared
);
580 trace_xfs_iomap_found(ip
, offset
, count
, 0, &got
);
584 error
= xfs_qm_dqattach_locked(ip
, 0);
589 * We cap the maximum length we map here to MAX_WRITEBACK_PAGES pages
590 * to keep the chunks of work done where somewhat symmetric with the
591 * work writeback does. This is a completely arbitrary number pulled
592 * out of thin air as a best guess for initial testing.
594 * Note that the values needs to be less than 32-bits wide until
595 * the lower level functions are updated.
597 count
= min_t(loff_t
, count
, 1024 * PAGE_SIZE
);
598 end_fsb
= min(XFS_B_TO_FSB(mp
, offset
+ count
), maxbytes_fsb
);
601 prealloc_blocks
= xfs_iomap_prealloc_size(ip
, offset
, count
, idx
);
602 if (prealloc_blocks
) {
604 xfs_off_t end_offset
;
605 xfs_fileoff_t p_end_fsb
;
607 end_offset
= XFS_WRITEIO_ALIGN(mp
, offset
+ count
- 1);
608 p_end_fsb
= XFS_B_TO_FSBT(mp
, end_offset
) +
611 align
= xfs_eof_alignment(ip
, 0);
613 p_end_fsb
= roundup_64(p_end_fsb
, align
);
615 p_end_fsb
= min(p_end_fsb
, maxbytes_fsb
);
616 ASSERT(p_end_fsb
> offset_fsb
);
617 prealloc_blocks
= p_end_fsb
- end_fsb
;
622 error
= xfs_bmapi_reserve_delalloc(ip
, XFS_DATA_FORK
, offset_fsb
,
623 end_fsb
- offset_fsb
, prealloc_blocks
, &got
, &idx
, eof
);
629 /* retry without any preallocation */
630 trace_xfs_delalloc_enospc(ip
, offset
, count
);
631 if (prealloc_blocks
) {
640 trace_xfs_iomap_alloc(ip
, offset
, count
, 0, &got
);
642 if (isnullstartblock(got
.br_startblock
))
643 got
.br_startblock
= DELAYSTARTBLOCK
;
645 if (!got
.br_startblock
) {
646 error
= xfs_alert_fsblock_zero(ip
, &got
);
651 xfs_bmbt_to_iomap(ip
, iomap
, &got
);
654 xfs_iunlock(ip
, XFS_ILOCK_EXCL
);
659 * Pass in a delayed allocate extent, convert it to real extents;
660 * return to the caller the extent we create which maps on top of
661 * the originating callers request.
663 * Called without a lock on the inode.
665 * We no longer bother to look at the incoming map - all we have to
666 * guarantee is that whatever we allocate fills the required range.
669 xfs_iomap_write_allocate(
673 xfs_bmbt_irec_t
*imap
)
675 xfs_mount_t
*mp
= ip
->i_mount
;
676 xfs_fileoff_t offset_fsb
, last_block
;
677 xfs_fileoff_t end_fsb
, map_start_fsb
;
678 xfs_fsblock_t first_block
;
679 struct xfs_defer_ops dfops
;
680 xfs_filblks_t count_fsb
;
687 if (whichfork
== XFS_COW_FORK
)
688 flags
|= XFS_BMAPI_COWFORK
;
691 * Make sure that the dquots are there.
693 error
= xfs_qm_dqattach(ip
, 0);
697 offset_fsb
= XFS_B_TO_FSBT(mp
, offset
);
698 count_fsb
= imap
->br_blockcount
;
699 map_start_fsb
= imap
->br_startoff
;
701 XFS_STATS_ADD(mp
, xs_xstrat_bytes
, XFS_FSB_TO_B(mp
, count_fsb
));
703 while (count_fsb
!= 0) {
705 * Set up a transaction with which to allocate the
706 * backing store for the file. Do allocations in a
707 * loop until we get some space in the range we are
708 * interested in. The other space that might be allocated
709 * is in the delayed allocation extent on which we sit
710 * but before our buffer starts.
713 while (nimaps
== 0) {
714 nres
= XFS_EXTENTADD_SPACE_RES(mp
, XFS_DATA_FORK
);
716 * We have already reserved space for the extent and any
717 * indirect blocks when creating the delalloc extent,
718 * there is no need to reserve space in this transaction
721 error
= xfs_trans_alloc(mp
, &M_RES(mp
)->tr_write
, 0,
722 0, XFS_TRANS_RESERVE
, &tp
);
726 xfs_ilock(ip
, XFS_ILOCK_EXCL
);
727 xfs_trans_ijoin(tp
, ip
, 0);
729 xfs_defer_init(&dfops
, &first_block
);
732 * it is possible that the extents have changed since
733 * we did the read call as we dropped the ilock for a
734 * while. We have to be careful about truncates or hole
735 * punchs here - we are not allowed to allocate
736 * non-delalloc blocks here.
738 * The only protection against truncation is the pages
739 * for the range we are being asked to convert are
740 * locked and hence a truncate will block on them
743 * As a result, if we go beyond the range we really
744 * need and hit an delalloc extent boundary followed by
745 * a hole while we have excess blocks in the map, we
746 * will fill the hole incorrectly and overrun the
747 * transaction reservation.
749 * Using a single map prevents this as we are forced to
750 * check each map we look for overlap with the desired
751 * range and abort as soon as we find it. Also, given
752 * that we only return a single map, having one beyond
753 * what we can return is probably a bit silly.
755 * We also need to check that we don't go beyond EOF;
756 * this is a truncate optimisation as a truncate sets
757 * the new file size before block on the pages we
758 * currently have locked under writeback. Because they
759 * are about to be tossed, we don't need to write them
763 end_fsb
= XFS_B_TO_FSB(mp
, XFS_ISIZE(ip
));
764 error
= xfs_bmap_last_offset(ip
, &last_block
,
769 last_block
= XFS_FILEOFF_MAX(last_block
, end_fsb
);
770 if ((map_start_fsb
+ count_fsb
) > last_block
) {
771 count_fsb
= last_block
- map_start_fsb
;
772 if (count_fsb
== 0) {
779 * From this point onwards we overwrite the imap
780 * pointer that the caller gave to us.
782 error
= xfs_bmapi_write(tp
, ip
, map_start_fsb
,
783 count_fsb
, flags
, &first_block
,
789 error
= xfs_defer_finish(&tp
, &dfops
, NULL
);
793 error
= xfs_trans_commit(tp
);
797 xfs_iunlock(ip
, XFS_ILOCK_EXCL
);
801 * See if we were able to allocate an extent that
802 * covers at least part of the callers request
804 if (!(imap
->br_startblock
|| XFS_IS_REALTIME_INODE(ip
)))
805 return xfs_alert_fsblock_zero(ip
, imap
);
807 if ((offset_fsb
>= imap
->br_startoff
) &&
808 (offset_fsb
< (imap
->br_startoff
+
809 imap
->br_blockcount
))) {
810 XFS_STATS_INC(mp
, xs_xstrat_quick
);
815 * So far we have not mapped the requested part of the
816 * file, just surrounding data, try again.
818 count_fsb
-= imap
->br_blockcount
;
819 map_start_fsb
= imap
->br_startoff
+ imap
->br_blockcount
;
823 xfs_defer_cancel(&dfops
);
824 xfs_trans_cancel(tp
);
826 xfs_iunlock(ip
, XFS_ILOCK_EXCL
);
831 xfs_iomap_write_unwritten(
836 xfs_mount_t
*mp
= ip
->i_mount
;
837 xfs_fileoff_t offset_fsb
;
838 xfs_filblks_t count_fsb
;
839 xfs_filblks_t numblks_fsb
;
840 xfs_fsblock_t firstfsb
;
843 xfs_bmbt_irec_t imap
;
844 struct xfs_defer_ops dfops
;
849 trace_xfs_unwritten_convert(ip
, offset
, count
);
851 offset_fsb
= XFS_B_TO_FSBT(mp
, offset
);
852 count_fsb
= XFS_B_TO_FSB(mp
, (xfs_ufsize_t
)offset
+ count
);
853 count_fsb
= (xfs_filblks_t
)(count_fsb
- offset_fsb
);
856 * Reserve enough blocks in this transaction for two complete extent
857 * btree splits. We may be converting the middle part of an unwritten
858 * extent and in this case we will insert two new extents in the btree
859 * each of which could cause a full split.
861 * This reservation amount will be used in the first call to
862 * xfs_bmbt_split() to select an AG with enough space to satisfy the
863 * rest of the operation.
865 resblks
= XFS_DIOSTRAT_SPACE_RES(mp
, 0) << 1;
869 * Set up a transaction to convert the range of extents
870 * from unwritten to real. Do allocations in a loop until
871 * we have covered the range passed in.
873 * Note that we can't risk to recursing back into the filesystem
874 * here as we might be asked to write out the same inode that we
875 * complete here and might deadlock on the iolock.
877 error
= xfs_trans_alloc(mp
, &M_RES(mp
)->tr_write
, resblks
, 0,
878 XFS_TRANS_RESERVE
| XFS_TRANS_NOFS
, &tp
);
882 xfs_ilock(ip
, XFS_ILOCK_EXCL
);
883 xfs_trans_ijoin(tp
, ip
, 0);
886 * Modify the unwritten extent state of the buffer.
888 xfs_defer_init(&dfops
, &firstfsb
);
890 error
= xfs_bmapi_write(tp
, ip
, offset_fsb
, count_fsb
,
891 XFS_BMAPI_CONVERT
, &firstfsb
, resblks
,
892 &imap
, &nimaps
, &dfops
);
894 goto error_on_bmapi_transaction
;
897 * Log the updated inode size as we go. We have to be careful
898 * to only log it up to the actual write offset if it is
899 * halfway into a block.
901 i_size
= XFS_FSB_TO_B(mp
, offset_fsb
+ count_fsb
);
902 if (i_size
> offset
+ count
)
903 i_size
= offset
+ count
;
905 i_size
= xfs_new_eof(ip
, i_size
);
907 ip
->i_d
.di_size
= i_size
;
908 xfs_trans_log_inode(tp
, ip
, XFS_ILOG_CORE
);
911 error
= xfs_defer_finish(&tp
, &dfops
, NULL
);
913 goto error_on_bmapi_transaction
;
915 error
= xfs_trans_commit(tp
);
916 xfs_iunlock(ip
, XFS_ILOCK_EXCL
);
920 if (!(imap
.br_startblock
|| XFS_IS_REALTIME_INODE(ip
)))
921 return xfs_alert_fsblock_zero(ip
, &imap
);
923 if ((numblks_fsb
= imap
.br_blockcount
) == 0) {
925 * The numblks_fsb value should always get
926 * smaller, otherwise the loop is stuck.
928 ASSERT(imap
.br_blockcount
);
931 offset_fsb
+= numblks_fsb
;
932 count_fsb
-= numblks_fsb
;
933 } while (count_fsb
> 0);
937 error_on_bmapi_transaction
:
938 xfs_defer_cancel(&dfops
);
939 xfs_trans_cancel(tp
);
940 xfs_iunlock(ip
, XFS_ILOCK_EXCL
);
944 static inline bool imap_needs_alloc(struct inode
*inode
,
945 struct xfs_bmbt_irec
*imap
, int nimaps
)
948 imap
->br_startblock
== HOLESTARTBLOCK
||
949 imap
->br_startblock
== DELAYSTARTBLOCK
||
950 (IS_DAX(inode
) && ISUNWRITTEN(imap
));
953 static inline bool need_excl_ilock(struct xfs_inode
*ip
, unsigned flags
)
956 * COW writes will allocate delalloc space, so we need to make sure
957 * to take the lock exclusively here.
959 if (xfs_is_reflink_inode(ip
) && (flags
& (IOMAP_WRITE
| IOMAP_ZERO
)))
961 if ((flags
& IOMAP_DIRECT
) && (flags
& IOMAP_WRITE
))
967 xfs_file_iomap_begin(
974 struct xfs_inode
*ip
= XFS_I(inode
);
975 struct xfs_mount
*mp
= ip
->i_mount
;
976 struct xfs_bmbt_irec imap
;
977 xfs_fileoff_t offset_fsb
, end_fsb
;
978 int nimaps
= 1, error
= 0;
979 bool shared
= false, trimmed
= false;
982 if (XFS_FORCED_SHUTDOWN(mp
))
985 if (((flags
& (IOMAP_WRITE
| IOMAP_DIRECT
)) == IOMAP_WRITE
) &&
986 !IS_DAX(inode
) && !xfs_get_extsz_hint(ip
)) {
987 /* Reserve delalloc blocks for regular writeback. */
988 return xfs_file_iomap_begin_delay(inode
, offset
, length
, flags
,
992 if (need_excl_ilock(ip
, flags
)) {
993 lockmode
= XFS_ILOCK_EXCL
;
994 xfs_ilock(ip
, XFS_ILOCK_EXCL
);
996 lockmode
= xfs_ilock_data_map_shared(ip
);
999 ASSERT(offset
<= mp
->m_super
->s_maxbytes
);
1000 if ((xfs_fsize_t
)offset
+ length
> mp
->m_super
->s_maxbytes
)
1001 length
= mp
->m_super
->s_maxbytes
- offset
;
1002 offset_fsb
= XFS_B_TO_FSBT(mp
, offset
);
1003 end_fsb
= XFS_B_TO_FSB(mp
, offset
+ length
);
1005 if (xfs_is_reflink_inode(ip
) &&
1006 (flags
& IOMAP_WRITE
) && (flags
& IOMAP_DIRECT
)) {
1007 shared
= xfs_reflink_find_cow_mapping(ip
, offset
, &imap
);
1009 xfs_iunlock(ip
, lockmode
);
1012 ASSERT(!isnullstartblock(imap
.br_startblock
));
1015 error
= xfs_bmapi_read(ip
, offset_fsb
, end_fsb
- offset_fsb
, &imap
,
1020 if ((flags
& IOMAP_REPORT
) ||
1021 (xfs_is_reflink_inode(ip
) &&
1022 (flags
& IOMAP_WRITE
) && (flags
& IOMAP_DIRECT
))) {
1023 /* Trim the mapping to the nearest shared extent boundary. */
1024 error
= xfs_reflink_trim_around_shared(ip
, &imap
, &shared
,
1030 * We're here because we're trying to do a directio write to a
1031 * region that isn't aligned to a filesystem block. If the
1032 * extent is shared, fall back to buffered mode to handle the
1035 if (!(flags
& IOMAP_REPORT
) && shared
) {
1036 trace_xfs_reflink_bounce_dio_write(ip
, &imap
);
1042 if ((flags
& (IOMAP_WRITE
| IOMAP_ZERO
)) && xfs_is_reflink_inode(ip
)) {
1043 error
= xfs_reflink_reserve_cow(ip
, &imap
, &shared
);
1047 end_fsb
= imap
.br_startoff
+ imap
.br_blockcount
;
1048 length
= XFS_FSB_TO_B(mp
, end_fsb
) - offset
;
1051 if ((flags
& IOMAP_WRITE
) && imap_needs_alloc(inode
, &imap
, nimaps
)) {
1053 * We cap the maximum length we map here to MAX_WRITEBACK_PAGES
1054 * pages to keep the chunks of work done where somewhat symmetric
1055 * with the work writeback does. This is a completely arbitrary
1056 * number pulled out of thin air as a best guess for initial
1059 * Note that the values needs to be less than 32-bits wide until
1060 * the lower level functions are updated.
1062 length
= min_t(loff_t
, length
, 1024 * PAGE_SIZE
);
1064 * xfs_iomap_write_direct() expects the shared lock. It
1065 * is unlocked on return.
1067 if (lockmode
== XFS_ILOCK_EXCL
)
1068 xfs_ilock_demote(ip
, lockmode
);
1069 error
= xfs_iomap_write_direct(ip
, offset
, length
, &imap
,
1075 iomap
->flags
= IOMAP_F_NEW
;
1076 trace_xfs_iomap_alloc(ip
, offset
, length
, 0, &imap
);
1080 xfs_iunlock(ip
, lockmode
);
1081 trace_xfs_iomap_found(ip
, offset
, length
, 0, &imap
);
1084 xfs_bmbt_to_iomap(ip
, iomap
, &imap
);
1086 iomap
->flags
|= IOMAP_F_SHARED
;
1089 xfs_iunlock(ip
, lockmode
);
1094 xfs_file_iomap_end_delalloc(
1095 struct xfs_inode
*ip
,
1100 struct xfs_mount
*mp
= ip
->i_mount
;
1101 xfs_fileoff_t start_fsb
;
1102 xfs_fileoff_t end_fsb
;
1105 start_fsb
= XFS_B_TO_FSB(mp
, offset
+ written
);
1106 end_fsb
= XFS_B_TO_FSB(mp
, offset
+ length
);
1109 * Trim back delalloc blocks if we didn't manage to write the whole
1112 * We don't need to care about racing delalloc as we hold i_mutex
1113 * across the reserve/allocate/unreserve calls. If there are delalloc
1114 * blocks in the range, they are ours.
1116 if (start_fsb
< end_fsb
) {
1117 xfs_ilock(ip
, XFS_ILOCK_EXCL
);
1118 error
= xfs_bmap_punch_delalloc_range(ip
, start_fsb
,
1119 end_fsb
- start_fsb
);
1120 xfs_iunlock(ip
, XFS_ILOCK_EXCL
);
1122 if (error
&& !XFS_FORCED_SHUTDOWN(mp
)) {
1123 xfs_alert(mp
, "%s: unable to clean up ino %lld",
1124 __func__
, ip
->i_ino
);
1134 struct inode
*inode
,
1139 struct iomap
*iomap
)
1141 if ((flags
& IOMAP_WRITE
) && iomap
->type
== IOMAP_DELALLOC
)
1142 return xfs_file_iomap_end_delalloc(XFS_I(inode
), offset
,
1147 struct iomap_ops xfs_iomap_ops
= {
1148 .iomap_begin
= xfs_file_iomap_begin
,
1149 .iomap_end
= xfs_file_iomap_end
,
1153 xfs_xattr_iomap_begin(
1154 struct inode
*inode
,
1158 struct iomap
*iomap
)
1160 struct xfs_inode
*ip
= XFS_I(inode
);
1161 struct xfs_mount
*mp
= ip
->i_mount
;
1162 xfs_fileoff_t offset_fsb
= XFS_B_TO_FSBT(mp
, offset
);
1163 xfs_fileoff_t end_fsb
= XFS_B_TO_FSB(mp
, offset
+ length
);
1164 struct xfs_bmbt_irec imap
;
1165 int nimaps
= 1, error
= 0;
1168 if (XFS_FORCED_SHUTDOWN(mp
))
1171 lockmode
= xfs_ilock_data_map_shared(ip
);
1173 /* if there are no attribute fork or extents, return ENOENT */
1174 if (XFS_IFORK_Q(ip
) || !ip
->i_d
.di_anextents
) {
1179 ASSERT(ip
->i_d
.di_aformat
!= XFS_DINODE_FMT_LOCAL
);
1180 error
= xfs_bmapi_read(ip
, offset_fsb
, end_fsb
- offset_fsb
, &imap
,
1181 &nimaps
, XFS_BMAPI_ENTIRE
| XFS_BMAPI_ATTRFORK
);
1183 xfs_iunlock(ip
, lockmode
);
1187 xfs_bmbt_to_iomap(ip
, iomap
, &imap
);
1193 struct iomap_ops xfs_xattr_iomap_ops
= {
1194 .iomap_begin
= xfs_xattr_iomap_begin
,