1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (c) 2000-2006 Silicon Graphics, Inc.
4 * Copyright (c) 2016-2018 Christoph Hellwig.
7 #include <linux/iomap.h>
10 #include "xfs_shared.h"
11 #include "xfs_format.h"
12 #include "xfs_log_format.h"
13 #include "xfs_trans_resv.h"
14 #include "xfs_mount.h"
15 #include "xfs_defer.h"
16 #include "xfs_inode.h"
17 #include "xfs_btree.h"
18 #include "xfs_bmap_btree.h"
20 #include "xfs_bmap_util.h"
21 #include "xfs_errortag.h"
22 #include "xfs_error.h"
23 #include "xfs_trans.h"
24 #include "xfs_trans_space.h"
25 #include "xfs_inode_item.h"
26 #include "xfs_iomap.h"
27 #include "xfs_trace.h"
28 #include "xfs_icache.h"
29 #include "xfs_quota.h"
30 #include "xfs_dquot_item.h"
31 #include "xfs_dquot.h"
32 #include "xfs_reflink.h"
35 #define XFS_WRITEIO_ALIGN(mp,off) (((off) >> mp->m_writeio_log) \
42 struct xfs_bmbt_irec
*imap
)
44 struct xfs_mount
*mp
= ip
->i_mount
;
46 if (imap
->br_startblock
== HOLESTARTBLOCK
) {
47 iomap
->addr
= IOMAP_NULL_ADDR
;
48 iomap
->type
= IOMAP_HOLE
;
49 } else if (imap
->br_startblock
== DELAYSTARTBLOCK
) {
50 iomap
->addr
= IOMAP_NULL_ADDR
;
51 iomap
->type
= IOMAP_DELALLOC
;
53 iomap
->addr
= BBTOB(xfs_fsb_to_db(ip
, imap
->br_startblock
));
54 if (imap
->br_state
== XFS_EXT_UNWRITTEN
)
55 iomap
->type
= IOMAP_UNWRITTEN
;
57 iomap
->type
= IOMAP_MAPPED
;
59 iomap
->offset
= XFS_FSB_TO_B(mp
, imap
->br_startoff
);
60 iomap
->length
= XFS_FSB_TO_B(mp
, imap
->br_blockcount
);
61 iomap
->bdev
= xfs_find_bdev_for_inode(VFS_I(ip
));
62 iomap
->dax_dev
= xfs_find_daxdev_for_inode(VFS_I(ip
));
69 xfs_fileoff_t offset_fsb
,
70 xfs_fileoff_t end_fsb
)
72 iomap
->addr
= IOMAP_NULL_ADDR
;
73 iomap
->type
= IOMAP_HOLE
;
74 iomap
->offset
= XFS_FSB_TO_B(ip
->i_mount
, offset_fsb
);
75 iomap
->length
= XFS_FSB_TO_B(ip
->i_mount
, end_fsb
- offset_fsb
);
76 iomap
->bdev
= xfs_find_bdev_for_inode(VFS_I(ip
));
77 iomap
->dax_dev
= xfs_find_daxdev_for_inode(VFS_I(ip
));
85 struct xfs_mount
*mp
= ip
->i_mount
;
86 xfs_extlen_t align
= 0;
88 if (!XFS_IS_REALTIME_INODE(ip
)) {
90 * Round up the allocation request to a stripe unit
91 * (m_dalign) boundary if the file size is >= stripe unit
92 * size, and we are allocating past the allocation eof.
94 * If mounted with the "-o swalloc" option the alignment is
95 * increased from the strip unit size to the stripe width.
97 if (mp
->m_swidth
&& (mp
->m_flags
& XFS_MOUNT_SWALLOC
))
99 else if (mp
->m_dalign
)
100 align
= mp
->m_dalign
;
102 if (align
&& XFS_ISIZE(ip
) < XFS_FSB_TO_B(mp
, align
))
107 * Always round up the allocation request to an extent boundary
108 * (when file on a real-time subvolume or has di_extsize hint).
112 align
= roundup_64(align
, extsize
);
121 xfs_iomap_eof_align_last_fsb(
122 struct xfs_inode
*ip
,
123 xfs_extlen_t extsize
,
124 xfs_fileoff_t
*last_fsb
)
126 xfs_extlen_t align
= xfs_eof_alignment(ip
, extsize
);
129 xfs_fileoff_t new_last_fsb
= roundup_64(*last_fsb
, align
);
132 error
= xfs_bmap_eof(ip
, new_last_fsb
, XFS_DATA_FORK
, &eof
);
136 *last_fsb
= new_last_fsb
;
142 xfs_alert_fsblock_zero(
144 xfs_bmbt_irec_t
*imap
)
146 xfs_alert_tag(ip
->i_mount
, XFS_PTAG_FSBLOCK_ZERO
,
147 "Access to block zero in inode %llu "
148 "start_block: %llx start_off: %llx "
149 "blkcnt: %llx extent-state: %x",
150 (unsigned long long)ip
->i_ino
,
151 (unsigned long long)imap
->br_startblock
,
152 (unsigned long long)imap
->br_startoff
,
153 (unsigned long long)imap
->br_blockcount
,
155 return -EFSCORRUPTED
;
159 xfs_iomap_write_direct(
163 xfs_bmbt_irec_t
*imap
,
166 xfs_mount_t
*mp
= ip
->i_mount
;
167 xfs_fileoff_t offset_fsb
;
168 xfs_fileoff_t last_fsb
;
169 xfs_filblks_t count_fsb
, resaligned
;
175 uint qblocks
, resblks
, resrtextents
;
178 int bmapi_flags
= XFS_BMAPI_PREALLOC
;
181 rt
= XFS_IS_REALTIME_INODE(ip
);
182 extsz
= xfs_get_extsz_hint(ip
);
183 lockmode
= XFS_ILOCK_SHARED
; /* locked by caller */
185 ASSERT(xfs_isilocked(ip
, lockmode
));
187 offset_fsb
= XFS_B_TO_FSBT(mp
, offset
);
188 last_fsb
= XFS_B_TO_FSB(mp
, ((xfs_ufsize_t
)(offset
+ count
)));
189 if ((offset
+ count
) > XFS_ISIZE(ip
)) {
191 * Assert that the in-core extent list is present since this can
192 * call xfs_iread_extents() and we only have the ilock shared.
193 * This should be safe because the lock was held around a bmapi
194 * call in the caller and we only need it to access the in-core
197 ASSERT(XFS_IFORK_PTR(ip
, XFS_DATA_FORK
)->if_flags
&
199 error
= xfs_iomap_eof_align_last_fsb(ip
, extsz
, &last_fsb
);
203 if (nmaps
&& (imap
->br_startblock
== HOLESTARTBLOCK
))
204 last_fsb
= min(last_fsb
, (xfs_fileoff_t
)
205 imap
->br_blockcount
+
208 count_fsb
= last_fsb
- offset_fsb
;
209 ASSERT(count_fsb
> 0);
210 resaligned
= xfs_aligned_fsb_count(offset_fsb
, count_fsb
, extsz
);
213 resrtextents
= qblocks
= resaligned
;
214 resrtextents
/= mp
->m_sb
.sb_rextsize
;
215 resblks
= XFS_DIOSTRAT_SPACE_RES(mp
, 0);
216 quota_flag
= XFS_QMOPT_RES_RTBLKS
;
219 resblks
= qblocks
= XFS_DIOSTRAT_SPACE_RES(mp
, resaligned
);
220 quota_flag
= XFS_QMOPT_RES_REGBLKS
;
224 * Drop the shared lock acquired by the caller, attach the dquot if
225 * necessary and move on to transaction setup.
227 xfs_iunlock(ip
, lockmode
);
228 error
= xfs_qm_dqattach(ip
);
233 * For DAX, we do not allocate unwritten extents, but instead we zero
234 * the block before we commit the transaction. Ideally we'd like to do
235 * this outside the transaction context, but if we commit and then crash
236 * we may not have zeroed the blocks and this will be exposed on
237 * recovery of the allocation. Hence we must zero before commit.
239 * Further, if we are mapping unwritten extents here, we need to zero
240 * and convert them to written so that we don't need an unwritten extent
241 * callback for DAX. This also means that we need to be able to dip into
242 * the reserve block pool for bmbt block allocation if there is no space
243 * left but we need to do unwritten extent conversion.
245 if (IS_DAX(VFS_I(ip
))) {
246 bmapi_flags
= XFS_BMAPI_CONVERT
| XFS_BMAPI_ZERO
;
247 if (imap
->br_state
== XFS_EXT_UNWRITTEN
) {
248 tflags
|= XFS_TRANS_RESERVE
;
249 resblks
= XFS_DIOSTRAT_SPACE_RES(mp
, 0) << 1;
252 error
= xfs_trans_alloc(mp
, &M_RES(mp
)->tr_write
, resblks
, resrtextents
,
257 lockmode
= XFS_ILOCK_EXCL
;
258 xfs_ilock(ip
, lockmode
);
260 error
= xfs_trans_reserve_quota_nblks(tp
, ip
, qblocks
, 0, quota_flag
);
262 goto out_trans_cancel
;
264 xfs_trans_ijoin(tp
, ip
, 0);
267 * From this point onwards we overwrite the imap pointer that the
271 error
= xfs_bmapi_write(tp
, ip
, offset_fsb
, count_fsb
,
272 bmapi_flags
, resblks
, imap
, &nimaps
);
277 * Complete the transaction
279 error
= xfs_trans_commit(tp
);
284 * Copy any maps to caller's array and return any error.
291 if (!(imap
->br_startblock
|| XFS_IS_REALTIME_INODE(ip
)))
292 error
= xfs_alert_fsblock_zero(ip
, imap
);
295 xfs_iunlock(ip
, lockmode
);
299 xfs_trans_unreserve_quota_nblks(tp
, ip
, (long)qblocks
, 0, quota_flag
);
301 xfs_trans_cancel(tp
);
306 xfs_quota_need_throttle(
307 struct xfs_inode
*ip
,
309 xfs_fsblock_t alloc_blocks
)
311 struct xfs_dquot
*dq
= xfs_inode_dquot(ip
, type
);
313 if (!dq
|| !xfs_this_quota_on(ip
->i_mount
, type
))
316 /* no hi watermark, no throttle */
317 if (!dq
->q_prealloc_hi_wmark
)
320 /* under the lo watermark, no throttle */
321 if (dq
->q_res_bcount
+ alloc_blocks
< dq
->q_prealloc_lo_wmark
)
328 xfs_quota_calc_throttle(
329 struct xfs_inode
*ip
,
331 xfs_fsblock_t
*qblocks
,
337 struct xfs_dquot
*dq
= xfs_inode_dquot(ip
, type
);
339 /* no dq, or over hi wmark, squash the prealloc completely */
340 if (!dq
|| dq
->q_res_bcount
>= dq
->q_prealloc_hi_wmark
) {
346 freesp
= dq
->q_prealloc_hi_wmark
- dq
->q_res_bcount
;
347 if (freesp
< dq
->q_low_space
[XFS_QLOWSP_5_PCNT
]) {
349 if (freesp
< dq
->q_low_space
[XFS_QLOWSP_3_PCNT
])
351 if (freesp
< dq
->q_low_space
[XFS_QLOWSP_1_PCNT
])
355 if (freesp
< *qfreesp
)
358 /* only overwrite the throttle values if we are more aggressive */
359 if ((freesp
>> shift
) < (*qblocks
>> *qshift
)) {
366 * If we are doing a write at the end of the file and there are no allocations
367 * past this one, then extend the allocation out to the file system's write
370 * If we don't have a user specified preallocation size, dynamically increase
371 * the preallocation size as the size of the file grows. Cap the maximum size
372 * at a single extent or less if the filesystem is near full. The closer the
373 * filesystem is to full, the smaller the maximum prealocation.
375 * As an exception we don't do any preallocation at all if the file is smaller
376 * than the minimum preallocation and we are using the default dynamic
377 * preallocation scheme, as it is likely this is the only write to the file that
378 * is going to be done.
380 * We clean up any extra space left over when the file is closed in
384 xfs_iomap_prealloc_size(
385 struct xfs_inode
*ip
,
388 struct xfs_iext_cursor
*icur
)
390 struct xfs_mount
*mp
= ip
->i_mount
;
391 struct xfs_ifork
*ifp
= XFS_IFORK_PTR(ip
, XFS_DATA_FORK
);
392 xfs_fileoff_t offset_fsb
= XFS_B_TO_FSBT(mp
, offset
);
393 struct xfs_bmbt_irec prev
;
396 xfs_fsblock_t qblocks
;
398 xfs_fsblock_t alloc_blocks
= 0;
400 if (offset
+ count
<= XFS_ISIZE(ip
))
403 if (!(mp
->m_flags
& XFS_MOUNT_DFLT_IOSIZE
) &&
404 (XFS_ISIZE(ip
) < XFS_FSB_TO_B(mp
, mp
->m_writeio_blocks
)))
408 * If an explicit allocsize is set, the file is small, or we
409 * are writing behind a hole, then use the minimum prealloc:
411 if ((mp
->m_flags
& XFS_MOUNT_DFLT_IOSIZE
) ||
412 XFS_ISIZE(ip
) < XFS_FSB_TO_B(mp
, mp
->m_dalign
) ||
413 !xfs_iext_peek_prev_extent(ifp
, icur
, &prev
) ||
414 prev
.br_startoff
+ prev
.br_blockcount
< offset_fsb
)
415 return mp
->m_writeio_blocks
;
418 * Determine the initial size of the preallocation. We are beyond the
419 * current EOF here, but we need to take into account whether this is
420 * a sparse write or an extending write when determining the
421 * preallocation size. Hence we need to look up the extent that ends
422 * at the current write offset and use the result to determine the
423 * preallocation size.
425 * If the extent is a hole, then preallocation is essentially disabled.
426 * Otherwise we take the size of the preceding data extent as the basis
427 * for the preallocation size. If the size of the extent is greater than
428 * half the maximum extent length, then use the current offset as the
429 * basis. This ensures that for large files the preallocation size
430 * always extends to MAXEXTLEN rather than falling short due to things
431 * like stripe unit/width alignment of real extents.
433 if (prev
.br_blockcount
<= (MAXEXTLEN
>> 1))
434 alloc_blocks
= prev
.br_blockcount
<< 1;
436 alloc_blocks
= XFS_B_TO_FSB(mp
, offset
);
439 qblocks
= alloc_blocks
;
442 * MAXEXTLEN is not a power of two value but we round the prealloc down
443 * to the nearest power of two value after throttling. To prevent the
444 * round down from unconditionally reducing the maximum supported prealloc
445 * size, we round up first, apply appropriate throttling, round down and
446 * cap the value to MAXEXTLEN.
448 alloc_blocks
= XFS_FILEOFF_MIN(roundup_pow_of_two(MAXEXTLEN
),
451 freesp
= percpu_counter_read_positive(&mp
->m_fdblocks
);
452 if (freesp
< mp
->m_low_space
[XFS_LOWSP_5_PCNT
]) {
454 if (freesp
< mp
->m_low_space
[XFS_LOWSP_4_PCNT
])
456 if (freesp
< mp
->m_low_space
[XFS_LOWSP_3_PCNT
])
458 if (freesp
< mp
->m_low_space
[XFS_LOWSP_2_PCNT
])
460 if (freesp
< mp
->m_low_space
[XFS_LOWSP_1_PCNT
])
465 * Check each quota to cap the prealloc size, provide a shift value to
466 * throttle with and adjust amount of available space.
468 if (xfs_quota_need_throttle(ip
, XFS_DQ_USER
, alloc_blocks
))
469 xfs_quota_calc_throttle(ip
, XFS_DQ_USER
, &qblocks
, &qshift
,
471 if (xfs_quota_need_throttle(ip
, XFS_DQ_GROUP
, alloc_blocks
))
472 xfs_quota_calc_throttle(ip
, XFS_DQ_GROUP
, &qblocks
, &qshift
,
474 if (xfs_quota_need_throttle(ip
, XFS_DQ_PROJ
, alloc_blocks
))
475 xfs_quota_calc_throttle(ip
, XFS_DQ_PROJ
, &qblocks
, &qshift
,
479 * The final prealloc size is set to the minimum of free space available
480 * in each of the quotas and the overall filesystem.
482 * The shift throttle value is set to the maximum value as determined by
483 * the global low free space values and per-quota low free space values.
485 alloc_blocks
= min(alloc_blocks
, qblocks
);
486 shift
= max(shift
, qshift
);
489 alloc_blocks
>>= shift
;
491 * rounddown_pow_of_two() returns an undefined result if we pass in
495 alloc_blocks
= rounddown_pow_of_two(alloc_blocks
);
496 if (alloc_blocks
> MAXEXTLEN
)
497 alloc_blocks
= MAXEXTLEN
;
500 * If we are still trying to allocate more space than is
501 * available, squash the prealloc hard. This can happen if we
502 * have a large file on a small filesystem and the above
503 * lowspace thresholds are smaller than MAXEXTLEN.
505 while (alloc_blocks
&& alloc_blocks
>= freesp
)
508 if (alloc_blocks
< mp
->m_writeio_blocks
)
509 alloc_blocks
= mp
->m_writeio_blocks
;
510 trace_xfs_iomap_prealloc_size(ip
, alloc_blocks
, shift
,
511 mp
->m_writeio_blocks
);
516 xfs_file_iomap_begin_delay(
523 struct xfs_inode
*ip
= XFS_I(inode
);
524 struct xfs_mount
*mp
= ip
->i_mount
;
525 struct xfs_ifork
*ifp
= XFS_IFORK_PTR(ip
, XFS_DATA_FORK
);
526 xfs_fileoff_t offset_fsb
= XFS_B_TO_FSBT(mp
, offset
);
527 xfs_fileoff_t maxbytes_fsb
=
528 XFS_B_TO_FSB(mp
, mp
->m_super
->s_maxbytes
);
529 xfs_fileoff_t end_fsb
;
530 int error
= 0, eof
= 0;
531 struct xfs_bmbt_irec got
;
532 struct xfs_iext_cursor icur
;
533 xfs_fsblock_t prealloc_blocks
= 0;
535 ASSERT(!XFS_IS_REALTIME_INODE(ip
));
536 ASSERT(!xfs_get_extsz_hint(ip
));
538 xfs_ilock(ip
, XFS_ILOCK_EXCL
);
540 if (unlikely(XFS_TEST_ERROR(
541 (XFS_IFORK_FORMAT(ip
, XFS_DATA_FORK
) != XFS_DINODE_FMT_EXTENTS
&&
542 XFS_IFORK_FORMAT(ip
, XFS_DATA_FORK
) != XFS_DINODE_FMT_BTREE
),
543 mp
, XFS_ERRTAG_BMAPIFORMAT
))) {
544 XFS_ERROR_REPORT(__func__
, XFS_ERRLEVEL_LOW
, mp
);
545 error
= -EFSCORRUPTED
;
549 XFS_STATS_INC(mp
, xs_blk_mapw
);
551 if (!(ifp
->if_flags
& XFS_IFEXTENTS
)) {
552 error
= xfs_iread_extents(NULL
, ip
, XFS_DATA_FORK
);
557 end_fsb
= min(XFS_B_TO_FSB(mp
, offset
+ count
), maxbytes_fsb
);
559 eof
= !xfs_iext_lookup_extent(ip
, ifp
, offset_fsb
, &icur
, &got
);
561 got
.br_startoff
= end_fsb
; /* fake hole until the end */
563 if (got
.br_startoff
<= offset_fsb
) {
565 * For reflink files we may need a delalloc reservation when
566 * overwriting shared extents. This includes zeroing of
567 * existing extents that contain data.
569 if (xfs_is_reflink_inode(ip
) &&
570 ((flags
& IOMAP_WRITE
) ||
571 got
.br_state
!= XFS_EXT_UNWRITTEN
)) {
572 xfs_trim_extent(&got
, offset_fsb
, end_fsb
- offset_fsb
);
573 error
= xfs_reflink_reserve_cow(ip
, &got
);
578 trace_xfs_iomap_found(ip
, offset
, count
, 0, &got
);
582 if (flags
& IOMAP_ZERO
) {
583 xfs_hole_to_iomap(ip
, iomap
, offset_fsb
, got
.br_startoff
);
587 error
= xfs_qm_dqattach_locked(ip
, false);
592 * We cap the maximum length we map here to MAX_WRITEBACK_PAGES pages
593 * to keep the chunks of work done where somewhat symmetric with the
594 * work writeback does. This is a completely arbitrary number pulled
595 * out of thin air as a best guess for initial testing.
597 * Note that the values needs to be less than 32-bits wide until
598 * the lower level functions are updated.
600 count
= min_t(loff_t
, count
, 1024 * PAGE_SIZE
);
601 end_fsb
= min(XFS_B_TO_FSB(mp
, offset
+ count
), maxbytes_fsb
);
604 prealloc_blocks
= xfs_iomap_prealloc_size(ip
, offset
, count
,
606 if (prealloc_blocks
) {
608 xfs_off_t end_offset
;
609 xfs_fileoff_t p_end_fsb
;
611 end_offset
= XFS_WRITEIO_ALIGN(mp
, offset
+ count
- 1);
612 p_end_fsb
= XFS_B_TO_FSBT(mp
, end_offset
) +
615 align
= xfs_eof_alignment(ip
, 0);
617 p_end_fsb
= roundup_64(p_end_fsb
, align
);
619 p_end_fsb
= min(p_end_fsb
, maxbytes_fsb
);
620 ASSERT(p_end_fsb
> offset_fsb
);
621 prealloc_blocks
= p_end_fsb
- end_fsb
;
626 error
= xfs_bmapi_reserve_delalloc(ip
, XFS_DATA_FORK
, offset_fsb
,
627 end_fsb
- offset_fsb
, prealloc_blocks
, &got
, &icur
,
634 /* retry without any preallocation */
635 trace_xfs_delalloc_enospc(ip
, offset
, count
);
636 if (prealloc_blocks
) {
646 * Flag newly allocated delalloc blocks with IOMAP_F_NEW so we punch
647 * them out if the write happens to fail.
649 iomap
->flags
|= IOMAP_F_NEW
;
650 trace_xfs_iomap_alloc(ip
, offset
, count
, 0, &got
);
652 if (isnullstartblock(got
.br_startblock
))
653 got
.br_startblock
= DELAYSTARTBLOCK
;
655 if (!got
.br_startblock
) {
656 error
= xfs_alert_fsblock_zero(ip
, &got
);
661 xfs_bmbt_to_iomap(ip
, iomap
, &got
);
664 xfs_iunlock(ip
, XFS_ILOCK_EXCL
);
669 * Pass in a delayed allocate extent, convert it to real extents;
670 * return to the caller the extent we create which maps on top of
671 * the originating callers request.
673 * Called without a lock on the inode.
675 * We no longer bother to look at the incoming map - all we have to
676 * guarantee is that whatever we allocate fills the required range.
679 xfs_iomap_write_allocate(
683 xfs_bmbt_irec_t
*imap
,
684 unsigned int *cow_seq
)
686 xfs_mount_t
*mp
= ip
->i_mount
;
687 struct xfs_ifork
*ifp
= XFS_IFORK_PTR(ip
, whichfork
);
688 xfs_fileoff_t offset_fsb
, last_block
;
689 xfs_fileoff_t end_fsb
, map_start_fsb
;
690 xfs_filblks_t count_fsb
;
694 int flags
= XFS_BMAPI_DELALLOC
;
697 if (whichfork
== XFS_COW_FORK
)
698 flags
|= XFS_BMAPI_COWFORK
| XFS_BMAPI_PREALLOC
;
701 * Make sure that the dquots are there.
703 error
= xfs_qm_dqattach(ip
);
707 offset_fsb
= XFS_B_TO_FSBT(mp
, offset
);
708 count_fsb
= imap
->br_blockcount
;
709 map_start_fsb
= imap
->br_startoff
;
711 XFS_STATS_ADD(mp
, xs_xstrat_bytes
, XFS_FSB_TO_B(mp
, count_fsb
));
713 while (count_fsb
!= 0) {
715 * Set up a transaction with which to allocate the
716 * backing store for the file. Do allocations in a
717 * loop until we get some space in the range we are
718 * interested in. The other space that might be allocated
719 * is in the delayed allocation extent on which we sit
720 * but before our buffer starts.
723 while (nimaps
== 0) {
724 nres
= XFS_EXTENTADD_SPACE_RES(mp
, XFS_DATA_FORK
);
726 * We have already reserved space for the extent and any
727 * indirect blocks when creating the delalloc extent,
728 * there is no need to reserve space in this transaction
731 error
= xfs_trans_alloc(mp
, &M_RES(mp
)->tr_write
, 0,
732 0, XFS_TRANS_RESERVE
, &tp
);
736 xfs_ilock(ip
, XFS_ILOCK_EXCL
);
737 xfs_trans_ijoin(tp
, ip
, 0);
740 * it is possible that the extents have changed since
741 * we did the read call as we dropped the ilock for a
742 * while. We have to be careful about truncates or hole
743 * punchs here - we are not allowed to allocate
744 * non-delalloc blocks here.
746 * The only protection against truncation is the pages
747 * for the range we are being asked to convert are
748 * locked and hence a truncate will block on them
751 * As a result, if we go beyond the range we really
752 * need and hit an delalloc extent boundary followed by
753 * a hole while we have excess blocks in the map, we
754 * will fill the hole incorrectly and overrun the
755 * transaction reservation.
757 * Using a single map prevents this as we are forced to
758 * check each map we look for overlap with the desired
759 * range and abort as soon as we find it. Also, given
760 * that we only return a single map, having one beyond
761 * what we can return is probably a bit silly.
763 * We also need to check that we don't go beyond EOF;
764 * this is a truncate optimisation as a truncate sets
765 * the new file size before block on the pages we
766 * currently have locked under writeback. Because they
767 * are about to be tossed, we don't need to write them
771 end_fsb
= XFS_B_TO_FSB(mp
, XFS_ISIZE(ip
));
772 error
= xfs_bmap_last_offset(ip
, &last_block
,
777 last_block
= XFS_FILEOFF_MAX(last_block
, end_fsb
);
778 if ((map_start_fsb
+ count_fsb
) > last_block
) {
779 count_fsb
= last_block
- map_start_fsb
;
780 if (count_fsb
== 0) {
787 * From this point onwards we overwrite the imap
788 * pointer that the caller gave to us.
790 error
= xfs_bmapi_write(tp
, ip
, map_start_fsb
,
791 count_fsb
, flags
, nres
, imap
,
796 error
= xfs_trans_commit(tp
);
800 if (whichfork
== XFS_COW_FORK
)
801 *cow_seq
= READ_ONCE(ifp
->if_seq
);
802 xfs_iunlock(ip
, XFS_ILOCK_EXCL
);
806 * See if we were able to allocate an extent that
807 * covers at least part of the callers request
809 if (!(imap
->br_startblock
|| XFS_IS_REALTIME_INODE(ip
)))
810 return xfs_alert_fsblock_zero(ip
, imap
);
812 if ((offset_fsb
>= imap
->br_startoff
) &&
813 (offset_fsb
< (imap
->br_startoff
+
814 imap
->br_blockcount
))) {
815 XFS_STATS_INC(mp
, xs_xstrat_quick
);
820 * So far we have not mapped the requested part of the
821 * file, just surrounding data, try again.
823 count_fsb
-= imap
->br_blockcount
;
824 map_start_fsb
= imap
->br_startoff
+ imap
->br_blockcount
;
828 xfs_trans_cancel(tp
);
830 xfs_iunlock(ip
, XFS_ILOCK_EXCL
);
835 xfs_iomap_write_unwritten(
841 xfs_mount_t
*mp
= ip
->i_mount
;
842 xfs_fileoff_t offset_fsb
;
843 xfs_filblks_t count_fsb
;
844 xfs_filblks_t numblks_fsb
;
847 xfs_bmbt_irec_t imap
;
848 struct inode
*inode
= VFS_I(ip
);
853 trace_xfs_unwritten_convert(ip
, offset
, count
);
855 offset_fsb
= XFS_B_TO_FSBT(mp
, offset
);
856 count_fsb
= XFS_B_TO_FSB(mp
, (xfs_ufsize_t
)offset
+ count
);
857 count_fsb
= (xfs_filblks_t
)(count_fsb
- offset_fsb
);
860 * Reserve enough blocks in this transaction for two complete extent
861 * btree splits. We may be converting the middle part of an unwritten
862 * extent and in this case we will insert two new extents in the btree
863 * each of which could cause a full split.
865 * This reservation amount will be used in the first call to
866 * xfs_bmbt_split() to select an AG with enough space to satisfy the
867 * rest of the operation.
869 resblks
= XFS_DIOSTRAT_SPACE_RES(mp
, 0) << 1;
873 * Set up a transaction to convert the range of extents
874 * from unwritten to real. Do allocations in a loop until
875 * we have covered the range passed in.
877 * Note that we can't risk to recursing back into the filesystem
878 * here as we might be asked to write out the same inode that we
879 * complete here and might deadlock on the iolock.
881 error
= xfs_trans_alloc(mp
, &M_RES(mp
)->tr_write
, resblks
, 0,
882 XFS_TRANS_RESERVE
| XFS_TRANS_NOFS
, &tp
);
886 xfs_ilock(ip
, XFS_ILOCK_EXCL
);
887 xfs_trans_ijoin(tp
, ip
, 0);
890 * Modify the unwritten extent state of the buffer.
893 error
= xfs_bmapi_write(tp
, ip
, offset_fsb
, count_fsb
,
894 XFS_BMAPI_CONVERT
, resblks
, &imap
,
897 goto error_on_bmapi_transaction
;
900 * Log the updated inode size as we go. We have to be careful
901 * to only log it up to the actual write offset if it is
902 * halfway into a block.
904 i_size
= XFS_FSB_TO_B(mp
, offset_fsb
+ count_fsb
);
905 if (i_size
> offset
+ count
)
906 i_size
= offset
+ count
;
907 if (update_isize
&& i_size
> i_size_read(inode
))
908 i_size_write(inode
, i_size
);
909 i_size
= xfs_new_eof(ip
, i_size
);
911 ip
->i_d
.di_size
= i_size
;
912 xfs_trans_log_inode(tp
, ip
, XFS_ILOG_CORE
);
915 error
= xfs_trans_commit(tp
);
916 xfs_iunlock(ip
, XFS_ILOCK_EXCL
);
920 if (!(imap
.br_startblock
|| XFS_IS_REALTIME_INODE(ip
)))
921 return xfs_alert_fsblock_zero(ip
, &imap
);
923 if ((numblks_fsb
= imap
.br_blockcount
) == 0) {
925 * The numblks_fsb value should always get
926 * smaller, otherwise the loop is stuck.
928 ASSERT(imap
.br_blockcount
);
931 offset_fsb
+= numblks_fsb
;
932 count_fsb
-= numblks_fsb
;
933 } while (count_fsb
> 0);
937 error_on_bmapi_transaction
:
938 xfs_trans_cancel(tp
);
939 xfs_iunlock(ip
, XFS_ILOCK_EXCL
);
946 struct xfs_bmbt_irec
*imap
,
950 imap
->br_startblock
== HOLESTARTBLOCK
||
951 imap
->br_startblock
== DELAYSTARTBLOCK
||
952 (IS_DAX(inode
) && imap
->br_state
== XFS_EXT_UNWRITTEN
);
956 needs_cow_for_zeroing(
957 struct xfs_bmbt_irec
*imap
,
961 imap
->br_startblock
!= HOLESTARTBLOCK
&&
962 imap
->br_state
!= XFS_EXT_UNWRITTEN
;
967 struct xfs_inode
*ip
,
971 unsigned mode
= XFS_ILOCK_SHARED
;
972 bool is_write
= flags
& (IOMAP_WRITE
| IOMAP_ZERO
);
975 * COW writes may allocate delalloc space or convert unwritten COW
976 * extents, so we need to make sure to take the lock exclusively here.
978 if (xfs_is_reflink_inode(ip
) && is_write
) {
980 * FIXME: It could still overwrite on unshared extents and not
983 if (flags
& IOMAP_NOWAIT
)
985 mode
= XFS_ILOCK_EXCL
;
989 * Extents not yet cached requires exclusive access, don't block. This
990 * is an opencoded xfs_ilock_data_map_shared() call but with
991 * non-blocking behaviour.
993 if (!(ip
->i_df
.if_flags
& XFS_IFEXTENTS
)) {
994 if (flags
& IOMAP_NOWAIT
)
996 mode
= XFS_ILOCK_EXCL
;
1000 if (flags
& IOMAP_NOWAIT
) {
1001 if (!xfs_ilock_nowait(ip
, mode
))
1004 xfs_ilock(ip
, mode
);
1008 * The reflink iflag could have changed since the earlier unlocked
1009 * check, so if we got ILOCK_SHARED for a write and but we're now a
1010 * reflink inode we have to switch to ILOCK_EXCL and relock.
1012 if (mode
== XFS_ILOCK_SHARED
&& is_write
&& xfs_is_reflink_inode(ip
)) {
1013 xfs_iunlock(ip
, mode
);
1014 mode
= XFS_ILOCK_EXCL
;
1023 xfs_file_iomap_begin(
1024 struct inode
*inode
,
1028 struct iomap
*iomap
)
1030 struct xfs_inode
*ip
= XFS_I(inode
);
1031 struct xfs_mount
*mp
= ip
->i_mount
;
1032 struct xfs_bmbt_irec imap
;
1033 xfs_fileoff_t offset_fsb
, end_fsb
;
1034 int nimaps
= 1, error
= 0;
1035 bool shared
= false;
1038 if (XFS_FORCED_SHUTDOWN(mp
))
1041 if ((flags
& (IOMAP_WRITE
| IOMAP_ZERO
)) && !(flags
& IOMAP_DIRECT
) &&
1042 !IS_DAX(inode
) && !xfs_get_extsz_hint(ip
)) {
1043 /* Reserve delalloc blocks for regular writeback. */
1044 return xfs_file_iomap_begin_delay(inode
, offset
, length
, flags
,
1049 * Lock the inode in the manner required for the specified operation and
1050 * check for as many conditions that would result in blocking as
1051 * possible. This removes most of the non-blocking checks from the
1052 * mapping code below.
1054 error
= xfs_ilock_for_iomap(ip
, flags
, &lockmode
);
1058 ASSERT(offset
<= mp
->m_super
->s_maxbytes
);
1059 if (offset
> mp
->m_super
->s_maxbytes
- length
)
1060 length
= mp
->m_super
->s_maxbytes
- offset
;
1061 offset_fsb
= XFS_B_TO_FSBT(mp
, offset
);
1062 end_fsb
= XFS_B_TO_FSB(mp
, offset
+ length
);
1064 error
= xfs_bmapi_read(ip
, offset_fsb
, end_fsb
- offset_fsb
, &imap
,
1069 if (flags
& IOMAP_REPORT
) {
1070 /* Trim the mapping to the nearest shared extent boundary. */
1071 error
= xfs_reflink_trim_around_shared(ip
, &imap
, &shared
);
1076 /* Non-modifying mapping requested, so we are done */
1077 if (!(flags
& (IOMAP_WRITE
| IOMAP_ZERO
)))
1081 * Break shared extents if necessary. Checks for non-blocking IO have
1082 * been done up front, so we don't need to do them here.
1084 if (xfs_is_reflink_inode(ip
)) {
1085 /* if zeroing doesn't need COW allocation, then we are done. */
1086 if ((flags
& IOMAP_ZERO
) &&
1087 !needs_cow_for_zeroing(&imap
, nimaps
))
1090 if (flags
& IOMAP_DIRECT
) {
1091 /* may drop and re-acquire the ilock */
1092 error
= xfs_reflink_allocate_cow(ip
, &imap
, &shared
,
1097 error
= xfs_reflink_reserve_cow(ip
, &imap
);
1102 end_fsb
= imap
.br_startoff
+ imap
.br_blockcount
;
1103 length
= XFS_FSB_TO_B(mp
, end_fsb
) - offset
;
1106 /* Don't need to allocate over holes when doing zeroing operations. */
1107 if (flags
& IOMAP_ZERO
)
1110 if (!imap_needs_alloc(inode
, &imap
, nimaps
))
1113 /* If nowait is set bail since we are going to make allocations. */
1114 if (flags
& IOMAP_NOWAIT
) {
1120 * We cap the maximum length we map to a sane size to keep the chunks
1121 * of work done where somewhat symmetric with the work writeback does.
1122 * This is a completely arbitrary number pulled out of thin air as a
1123 * best guess for initial testing.
1125 * Note that the values needs to be less than 32-bits wide until the
1126 * lower level functions are updated.
1128 length
= min_t(loff_t
, length
, 1024 * PAGE_SIZE
);
1131 * xfs_iomap_write_direct() expects the shared lock. It is unlocked on
1134 if (lockmode
== XFS_ILOCK_EXCL
)
1135 xfs_ilock_demote(ip
, lockmode
);
1136 error
= xfs_iomap_write_direct(ip
, offset
, length
, &imap
,
1141 iomap
->flags
|= IOMAP_F_NEW
;
1142 trace_xfs_iomap_alloc(ip
, offset
, length
, 0, &imap
);
1145 if (xfs_ipincount(ip
) && (ip
->i_itemp
->ili_fsync_fields
1146 & ~XFS_ILOG_TIMESTAMP
))
1147 iomap
->flags
|= IOMAP_F_DIRTY
;
1149 xfs_bmbt_to_iomap(ip
, iomap
, &imap
);
1152 iomap
->flags
|= IOMAP_F_SHARED
;
1157 xfs_iunlock(ip
, lockmode
);
1158 trace_xfs_iomap_found(ip
, offset
, length
, 0, &imap
);
1162 xfs_iunlock(ip
, lockmode
);
1167 xfs_file_iomap_end_delalloc(
1168 struct xfs_inode
*ip
,
1172 struct iomap
*iomap
)
1174 struct xfs_mount
*mp
= ip
->i_mount
;
1175 xfs_fileoff_t start_fsb
;
1176 xfs_fileoff_t end_fsb
;
1180 * Behave as if the write failed if drop writes is enabled. Set the NEW
1181 * flag to force delalloc cleanup.
1183 if (XFS_TEST_ERROR(false, mp
, XFS_ERRTAG_DROP_WRITES
)) {
1184 iomap
->flags
|= IOMAP_F_NEW
;
1189 * start_fsb refers to the first unused block after a short write. If
1190 * nothing was written, round offset down to point at the first block in
1193 if (unlikely(!written
))
1194 start_fsb
= XFS_B_TO_FSBT(mp
, offset
);
1196 start_fsb
= XFS_B_TO_FSB(mp
, offset
+ written
);
1197 end_fsb
= XFS_B_TO_FSB(mp
, offset
+ length
);
1200 * Trim delalloc blocks if they were allocated by this write and we
1201 * didn't manage to write the whole range.
1203 * We don't need to care about racing delalloc as we hold i_mutex
1204 * across the reserve/allocate/unreserve calls. If there are delalloc
1205 * blocks in the range, they are ours.
1207 if ((iomap
->flags
& IOMAP_F_NEW
) && start_fsb
< end_fsb
) {
1208 truncate_pagecache_range(VFS_I(ip
), XFS_FSB_TO_B(mp
, start_fsb
),
1209 XFS_FSB_TO_B(mp
, end_fsb
) - 1);
1211 error
= xfs_bmap_punch_delalloc_range(ip
, start_fsb
,
1212 end_fsb
- start_fsb
);
1213 if (error
&& !XFS_FORCED_SHUTDOWN(mp
)) {
1214 xfs_alert(mp
, "%s: unable to clean up ino %lld",
1215 __func__
, ip
->i_ino
);
1225 struct inode
*inode
,
1230 struct iomap
*iomap
)
1232 if ((flags
& IOMAP_WRITE
) && iomap
->type
== IOMAP_DELALLOC
)
1233 return xfs_file_iomap_end_delalloc(XFS_I(inode
), offset
,
1234 length
, written
, iomap
);
1238 const struct iomap_ops xfs_iomap_ops
= {
1239 .iomap_begin
= xfs_file_iomap_begin
,
1240 .iomap_end
= xfs_file_iomap_end
,
1244 xfs_xattr_iomap_begin(
1245 struct inode
*inode
,
1249 struct iomap
*iomap
)
1251 struct xfs_inode
*ip
= XFS_I(inode
);
1252 struct xfs_mount
*mp
= ip
->i_mount
;
1253 xfs_fileoff_t offset_fsb
= XFS_B_TO_FSBT(mp
, offset
);
1254 xfs_fileoff_t end_fsb
= XFS_B_TO_FSB(mp
, offset
+ length
);
1255 struct xfs_bmbt_irec imap
;
1256 int nimaps
= 1, error
= 0;
1259 if (XFS_FORCED_SHUTDOWN(mp
))
1262 lockmode
= xfs_ilock_attr_map_shared(ip
);
1264 /* if there are no attribute fork or extents, return ENOENT */
1265 if (!XFS_IFORK_Q(ip
) || !ip
->i_d
.di_anextents
) {
1270 ASSERT(ip
->i_d
.di_aformat
!= XFS_DINODE_FMT_LOCAL
);
1271 error
= xfs_bmapi_read(ip
, offset_fsb
, end_fsb
- offset_fsb
, &imap
,
1272 &nimaps
, XFS_BMAPI_ATTRFORK
);
1274 xfs_iunlock(ip
, lockmode
);
1278 xfs_bmbt_to_iomap(ip
, iomap
, &imap
);
1284 const struct iomap_ops xfs_xattr_iomap_ops
= {
1285 .iomap_begin
= xfs_xattr_iomap_begin
,