1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (c) 2000-2006 Silicon Graphics, Inc.
4 * Copyright (c) 2016-2018 Christoph Hellwig.
9 #include "xfs_shared.h"
10 #include "xfs_format.h"
11 #include "xfs_log_format.h"
12 #include "xfs_trans_resv.h"
13 #include "xfs_mount.h"
14 #include "xfs_inode.h"
15 #include "xfs_btree.h"
16 #include "xfs_bmap_btree.h"
18 #include "xfs_bmap_util.h"
19 #include "xfs_errortag.h"
20 #include "xfs_error.h"
21 #include "xfs_trans.h"
22 #include "xfs_trans_space.h"
23 #include "xfs_inode_item.h"
24 #include "xfs_iomap.h"
25 #include "xfs_trace.h"
26 #include "xfs_quota.h"
27 #include "xfs_rtgroup.h"
28 #include "xfs_dquot_item.h"
29 #include "xfs_dquot.h"
30 #include "xfs_reflink.h"
31 #include "xfs_health.h"
32 #include "xfs_rtbitmap.h"
34 #define XFS_ALLOC_ALIGN(mp, off) \
35 (((off) >> mp->m_allocsize_log) << mp->m_allocsize_log)
38 xfs_alert_fsblock_zero(
40 xfs_bmbt_irec_t
*imap
)
42 xfs_alert_tag(ip
->i_mount
, XFS_PTAG_FSBLOCK_ZERO
,
43 "Access to block zero in inode %llu "
44 "start_block: %llx start_off: %llx "
45 "blkcnt: %llx extent-state: %x",
46 (unsigned long long)ip
->i_ino
,
47 (unsigned long long)imap
->br_startblock
,
48 (unsigned long long)imap
->br_startoff
,
49 (unsigned long long)imap
->br_blockcount
,
51 xfs_bmap_mark_sick(ip
, XFS_DATA_FORK
);
56 xfs_iomap_inode_sequence(
62 if (iomap_flags
& IOMAP_F_XATTR
)
63 return READ_ONCE(ip
->i_af
.if_seq
);
64 if ((iomap_flags
& IOMAP_F_SHARED
) && ip
->i_cowfp
)
65 cookie
= (u64
)READ_ONCE(ip
->i_cowfp
->if_seq
) << 32;
66 return cookie
| READ_ONCE(ip
->i_df
.if_seq
);
70 * Check that the iomap passed to us is still valid for the given offset and
76 const struct iomap
*iomap
)
78 struct xfs_inode
*ip
= XFS_I(inode
);
80 if (iomap
->validity_cookie
!=
81 xfs_iomap_inode_sequence(ip
, iomap
->flags
)) {
82 trace_xfs_iomap_invalid(ip
, iomap
);
86 XFS_ERRORTAG_DELAY(ip
->i_mount
, XFS_ERRTAG_WRITE_DELAY_MS
);
90 static const struct iomap_folio_ops xfs_iomap_folio_ops
= {
91 .iomap_valid
= xfs_iomap_valid
,
98 struct xfs_bmbt_irec
*imap
,
99 unsigned int mapping_flags
,
103 struct xfs_mount
*mp
= ip
->i_mount
;
104 struct xfs_buftarg
*target
= xfs_inode_buftarg(ip
);
106 if (unlikely(!xfs_valid_startblock(ip
, imap
->br_startblock
))) {
107 xfs_bmap_mark_sick(ip
, XFS_DATA_FORK
);
108 return xfs_alert_fsblock_zero(ip
, imap
);
111 if (imap
->br_startblock
== HOLESTARTBLOCK
) {
112 iomap
->addr
= IOMAP_NULL_ADDR
;
113 iomap
->type
= IOMAP_HOLE
;
114 } else if (imap
->br_startblock
== DELAYSTARTBLOCK
||
115 isnullstartblock(imap
->br_startblock
)) {
116 iomap
->addr
= IOMAP_NULL_ADDR
;
117 iomap
->type
= IOMAP_DELALLOC
;
119 xfs_daddr_t daddr
= xfs_fsb_to_db(ip
, imap
->br_startblock
);
121 iomap
->addr
= BBTOB(daddr
);
122 if (mapping_flags
& IOMAP_DAX
)
123 iomap
->addr
+= target
->bt_dax_part_off
;
125 if (imap
->br_state
== XFS_EXT_UNWRITTEN
)
126 iomap
->type
= IOMAP_UNWRITTEN
;
128 iomap
->type
= IOMAP_MAPPED
;
131 * Mark iomaps starting at the first sector of a RTG as merge
132 * boundary so that each I/O completions is contained to a
135 if (XFS_IS_REALTIME_INODE(ip
) && xfs_has_rtgroups(mp
) &&
136 xfs_rtbno_is_group_start(mp
, imap
->br_startblock
))
137 iomap
->flags
|= IOMAP_F_BOUNDARY
;
139 iomap
->offset
= XFS_FSB_TO_B(mp
, imap
->br_startoff
);
140 iomap
->length
= XFS_FSB_TO_B(mp
, imap
->br_blockcount
);
141 if (mapping_flags
& IOMAP_DAX
)
142 iomap
->dax_dev
= target
->bt_daxdev
;
144 iomap
->bdev
= target
->bt_bdev
;
145 iomap
->flags
= iomap_flags
;
147 if (xfs_ipincount(ip
) &&
148 (ip
->i_itemp
->ili_fsync_fields
& ~XFS_ILOG_TIMESTAMP
))
149 iomap
->flags
|= IOMAP_F_DIRTY
;
151 iomap
->validity_cookie
= sequence_cookie
;
152 iomap
->folio_ops
= &xfs_iomap_folio_ops
;
158 struct xfs_inode
*ip
,
160 xfs_fileoff_t offset_fsb
,
161 xfs_fileoff_t end_fsb
)
163 struct xfs_buftarg
*target
= xfs_inode_buftarg(ip
);
165 iomap
->addr
= IOMAP_NULL_ADDR
;
166 iomap
->type
= IOMAP_HOLE
;
167 iomap
->offset
= XFS_FSB_TO_B(ip
->i_mount
, offset_fsb
);
168 iomap
->length
= XFS_FSB_TO_B(ip
->i_mount
, end_fsb
- offset_fsb
);
169 iomap
->bdev
= target
->bt_bdev
;
170 iomap
->dax_dev
= target
->bt_daxdev
;
173 static inline xfs_fileoff_t
175 struct xfs_mount
*mp
,
179 ASSERT(offset
<= mp
->m_super
->s_maxbytes
);
180 return min(XFS_B_TO_FSB(mp
, offset
+ count
),
181 XFS_B_TO_FSB(mp
, mp
->m_super
->s_maxbytes
));
186 struct xfs_inode
*ip
)
188 struct xfs_mount
*mp
= ip
->i_mount
;
189 xfs_extlen_t align
= 0;
191 if (!XFS_IS_REALTIME_INODE(ip
)) {
193 * Round up the allocation request to a stripe unit
194 * (m_dalign) boundary if the file size is >= stripe unit
195 * size, and we are allocating past the allocation eof.
197 * If mounted with the "-o swalloc" option the alignment is
198 * increased from the strip unit size to the stripe width.
200 if (mp
->m_swidth
&& xfs_has_swalloc(mp
))
201 align
= mp
->m_swidth
;
202 else if (mp
->m_dalign
)
203 align
= mp
->m_dalign
;
205 if (align
&& XFS_ISIZE(ip
) < XFS_FSB_TO_B(mp
, align
))
213 * Check if last_fsb is outside the last extent, and if so grow it to the next
214 * stripe unit boundary.
217 xfs_iomap_eof_align_last_fsb(
218 struct xfs_inode
*ip
,
219 xfs_fileoff_t end_fsb
)
221 struct xfs_ifork
*ifp
= xfs_ifork_ptr(ip
, XFS_DATA_FORK
);
222 xfs_extlen_t extsz
= xfs_get_extsz_hint(ip
);
223 xfs_extlen_t align
= xfs_eof_alignment(ip
);
224 struct xfs_bmbt_irec irec
;
225 struct xfs_iext_cursor icur
;
227 ASSERT(!xfs_need_iread_extents(ifp
));
230 * Always round up the allocation request to the extent hint boundary.
234 align
= roundup_64(align
, extsz
);
240 xfs_fileoff_t aligned_end_fsb
= roundup_64(end_fsb
, align
);
242 xfs_iext_last(ifp
, &icur
);
243 if (!xfs_iext_get_extent(ifp
, &icur
, &irec
) ||
244 aligned_end_fsb
>= irec
.br_startoff
+ irec
.br_blockcount
)
245 return aligned_end_fsb
;
252 xfs_iomap_write_direct(
253 struct xfs_inode
*ip
,
254 xfs_fileoff_t offset_fsb
,
255 xfs_fileoff_t count_fsb
,
257 struct xfs_bmbt_irec
*imap
,
260 struct xfs_mount
*mp
= ip
->i_mount
;
261 struct xfs_trans
*tp
;
262 xfs_filblks_t resaligned
;
264 unsigned int dblocks
, rblocks
;
267 int bmapi_flags
= XFS_BMAPI_PREALLOC
;
268 int nr_exts
= XFS_IEXT_ADD_NOSPLIT_CNT
;
270 ASSERT(count_fsb
> 0);
272 resaligned
= xfs_aligned_fsb_count(offset_fsb
, count_fsb
,
273 xfs_get_extsz_hint(ip
));
274 if (unlikely(XFS_IS_REALTIME_INODE(ip
))) {
275 dblocks
= XFS_DIOSTRAT_SPACE_RES(mp
, 0);
276 rblocks
= resaligned
;
278 dblocks
= XFS_DIOSTRAT_SPACE_RES(mp
, resaligned
);
282 error
= xfs_qm_dqattach(ip
);
287 * For DAX, we do not allocate unwritten extents, but instead we zero
288 * the block before we commit the transaction. Ideally we'd like to do
289 * this outside the transaction context, but if we commit and then crash
290 * we may not have zeroed the blocks and this will be exposed on
291 * recovery of the allocation. Hence we must zero before commit.
293 * Further, if we are mapping unwritten extents here, we need to zero
294 * and convert them to written so that we don't need an unwritten extent
295 * callback for DAX. This also means that we need to be able to dip into
296 * the reserve block pool for bmbt block allocation if there is no space
297 * left but we need to do unwritten extent conversion.
299 if (flags
& IOMAP_DAX
) {
300 bmapi_flags
= XFS_BMAPI_CONVERT
| XFS_BMAPI_ZERO
;
301 if (imap
->br_state
== XFS_EXT_UNWRITTEN
) {
303 nr_exts
= XFS_IEXT_WRITE_UNWRITTEN_CNT
;
304 dblocks
= XFS_DIOSTRAT_SPACE_RES(mp
, 0) << 1;
308 error
= xfs_trans_alloc_inode(ip
, &M_RES(mp
)->tr_write
, dblocks
,
309 rblocks
, force
, &tp
);
313 error
= xfs_iext_count_extend(tp
, ip
, XFS_DATA_FORK
, nr_exts
);
315 goto out_trans_cancel
;
318 * From this point onwards we overwrite the imap pointer that the
322 error
= xfs_bmapi_write(tp
, ip
, offset_fsb
, count_fsb
, bmapi_flags
, 0,
325 goto out_trans_cancel
;
328 * Complete the transaction
330 error
= xfs_trans_commit(tp
);
334 if (unlikely(!xfs_valid_startblock(ip
, imap
->br_startblock
))) {
335 xfs_bmap_mark_sick(ip
, XFS_DATA_FORK
);
336 error
= xfs_alert_fsblock_zero(ip
, imap
);
340 *seq
= xfs_iomap_inode_sequence(ip
, 0);
341 xfs_iunlock(ip
, XFS_ILOCK_EXCL
);
345 xfs_trans_cancel(tp
);
350 xfs_quota_need_throttle(
351 struct xfs_inode
*ip
,
353 xfs_fsblock_t alloc_blocks
)
355 struct xfs_dquot
*dq
= xfs_inode_dquot(ip
, type
);
356 struct xfs_dquot_res
*res
;
357 struct xfs_dquot_pre
*pre
;
359 if (!dq
|| !xfs_this_quota_on(ip
->i_mount
, type
))
362 if (XFS_IS_REALTIME_INODE(ip
)) {
364 pre
= &dq
->q_rtb_prealloc
;
367 pre
= &dq
->q_blk_prealloc
;
370 /* no hi watermark, no throttle */
371 if (!pre
->q_prealloc_hi_wmark
)
374 /* under the lo watermark, no throttle */
375 if (res
->reserved
+ alloc_blocks
< pre
->q_prealloc_lo_wmark
)
382 xfs_quota_calc_throttle(
383 struct xfs_inode
*ip
,
385 xfs_fsblock_t
*qblocks
,
389 struct xfs_dquot
*dq
= xfs_inode_dquot(ip
, type
);
390 struct xfs_dquot_res
*res
;
391 struct xfs_dquot_pre
*pre
;
398 } else if (XFS_IS_REALTIME_INODE(ip
)) {
400 pre
= &dq
->q_rtb_prealloc
;
403 pre
= &dq
->q_blk_prealloc
;
406 /* no dq, or over hi wmark, squash the prealloc completely */
407 if (!res
|| res
->reserved
>= pre
->q_prealloc_hi_wmark
) {
413 freesp
= pre
->q_prealloc_hi_wmark
- res
->reserved
;
414 if (freesp
< pre
->q_low_space
[XFS_QLOWSP_5_PCNT
]) {
416 if (freesp
< pre
->q_low_space
[XFS_QLOWSP_3_PCNT
])
418 if (freesp
< pre
->q_low_space
[XFS_QLOWSP_1_PCNT
])
422 if (freesp
< *qfreesp
)
425 /* only overwrite the throttle values if we are more aggressive */
426 if ((freesp
>> shift
) < (*qblocks
>> *qshift
)) {
434 struct percpu_counter
*counter
,
435 uint64_t low_space
[XFS_LOWSP_MAX
],
440 freesp
= percpu_counter_read_positive(counter
);
441 if (freesp
< low_space
[XFS_LOWSP_5_PCNT
]) {
443 if (freesp
< low_space
[XFS_LOWSP_4_PCNT
])
445 if (freesp
< low_space
[XFS_LOWSP_3_PCNT
])
447 if (freesp
< low_space
[XFS_LOWSP_2_PCNT
])
449 if (freesp
< low_space
[XFS_LOWSP_1_PCNT
])
456 * If we don't have a user specified preallocation size, dynamically increase
457 * the preallocation size as the size of the file grows. Cap the maximum size
458 * at a single extent or less if the filesystem is near full. The closer the
459 * filesystem is to being full, the smaller the maximum preallocation.
462 xfs_iomap_prealloc_size(
463 struct xfs_inode
*ip
,
467 struct xfs_iext_cursor
*icur
)
469 struct xfs_iext_cursor ncur
= *icur
;
470 struct xfs_bmbt_irec prev
, got
;
471 struct xfs_mount
*mp
= ip
->i_mount
;
472 struct xfs_ifork
*ifp
= xfs_ifork_ptr(ip
, whichfork
);
473 xfs_fileoff_t offset_fsb
= XFS_B_TO_FSBT(mp
, offset
);
475 xfs_fsblock_t qblocks
;
476 xfs_fsblock_t alloc_blocks
= 0;
482 * As an exception we don't do any preallocation at all if the file is
483 * smaller than the minimum preallocation and we are using the default
484 * dynamic preallocation scheme, as it is likely this is the only write
485 * to the file that is going to be done.
487 if (XFS_ISIZE(ip
) < XFS_FSB_TO_B(mp
, mp
->m_allocsize_blocks
))
491 * Use the minimum preallocation size for small files or if we are
492 * writing right after a hole.
494 if (XFS_ISIZE(ip
) < XFS_FSB_TO_B(mp
, mp
->m_dalign
) ||
495 !xfs_iext_prev_extent(ifp
, &ncur
, &prev
) ||
496 prev
.br_startoff
+ prev
.br_blockcount
< offset_fsb
)
497 return mp
->m_allocsize_blocks
;
500 * Take the size of the preceding data extents as the basis for the
501 * preallocation size. Note that we don't care if the previous extents
502 * are written or not.
504 plen
= prev
.br_blockcount
;
505 while (xfs_iext_prev_extent(ifp
, &ncur
, &got
)) {
506 if (plen
> XFS_MAX_BMBT_EXTLEN
/ 2 ||
507 isnullstartblock(got
.br_startblock
) ||
508 got
.br_startoff
+ got
.br_blockcount
!= prev
.br_startoff
||
509 got
.br_startblock
+ got
.br_blockcount
!= prev
.br_startblock
)
511 plen
+= got
.br_blockcount
;
516 * If the size of the extents is greater than half the maximum extent
517 * length, then use the current offset as the basis. This ensures that
518 * for large files the preallocation size always extends to
519 * XFS_BMBT_MAX_EXTLEN rather than falling short due to things like stripe
520 * unit/width alignment of real extents.
522 alloc_blocks
= plen
* 2;
523 if (alloc_blocks
> XFS_MAX_BMBT_EXTLEN
)
524 alloc_blocks
= XFS_B_TO_FSB(mp
, offset
);
525 qblocks
= alloc_blocks
;
528 * XFS_BMBT_MAX_EXTLEN is not a power of two value but we round the prealloc
529 * down to the nearest power of two value after throttling. To prevent
530 * the round down from unconditionally reducing the maximum supported
531 * prealloc size, we round up first, apply appropriate throttling, round
532 * down and cap the value to XFS_BMBT_MAX_EXTLEN.
534 alloc_blocks
= XFS_FILEOFF_MIN(roundup_pow_of_two(XFS_MAX_BMBT_EXTLEN
),
537 if (unlikely(XFS_IS_REALTIME_INODE(ip
)))
538 freesp
= xfs_rtbxlen_to_blen(mp
,
539 xfs_iomap_freesp(&mp
->m_frextents
,
540 mp
->m_low_rtexts
, &shift
));
542 freesp
= xfs_iomap_freesp(&mp
->m_fdblocks
, mp
->m_low_space
,
546 * Check each quota to cap the prealloc size, provide a shift value to
547 * throttle with and adjust amount of available space.
549 if (xfs_quota_need_throttle(ip
, XFS_DQTYPE_USER
, alloc_blocks
))
550 xfs_quota_calc_throttle(ip
, XFS_DQTYPE_USER
, &qblocks
, &qshift
,
552 if (xfs_quota_need_throttle(ip
, XFS_DQTYPE_GROUP
, alloc_blocks
))
553 xfs_quota_calc_throttle(ip
, XFS_DQTYPE_GROUP
, &qblocks
, &qshift
,
555 if (xfs_quota_need_throttle(ip
, XFS_DQTYPE_PROJ
, alloc_blocks
))
556 xfs_quota_calc_throttle(ip
, XFS_DQTYPE_PROJ
, &qblocks
, &qshift
,
560 * The final prealloc size is set to the minimum of free space available
561 * in each of the quotas and the overall filesystem.
563 * The shift throttle value is set to the maximum value as determined by
564 * the global low free space values and per-quota low free space values.
566 alloc_blocks
= min(alloc_blocks
, qblocks
);
567 shift
= max(shift
, qshift
);
570 alloc_blocks
>>= shift
;
572 * rounddown_pow_of_two() returns an undefined result if we pass in
576 alloc_blocks
= rounddown_pow_of_two(alloc_blocks
);
577 if (alloc_blocks
> XFS_MAX_BMBT_EXTLEN
)
578 alloc_blocks
= XFS_MAX_BMBT_EXTLEN
;
581 * If we are still trying to allocate more space than is
582 * available, squash the prealloc hard. This can happen if we
583 * have a large file on a small filesystem and the above
584 * lowspace thresholds are smaller than XFS_BMBT_MAX_EXTLEN.
586 while (alloc_blocks
&& alloc_blocks
>= freesp
)
588 if (alloc_blocks
< mp
->m_allocsize_blocks
)
589 alloc_blocks
= mp
->m_allocsize_blocks
;
590 trace_xfs_iomap_prealloc_size(ip
, alloc_blocks
, shift
,
591 mp
->m_allocsize_blocks
);
596 xfs_iomap_write_unwritten(
602 xfs_mount_t
*mp
= ip
->i_mount
;
603 xfs_fileoff_t offset_fsb
;
604 xfs_filblks_t count_fsb
;
605 xfs_filblks_t numblks_fsb
;
608 xfs_bmbt_irec_t imap
;
609 struct inode
*inode
= VFS_I(ip
);
614 trace_xfs_unwritten_convert(ip
, offset
, count
);
616 offset_fsb
= XFS_B_TO_FSBT(mp
, offset
);
617 count_fsb
= XFS_B_TO_FSB(mp
, (xfs_ufsize_t
)offset
+ count
);
618 count_fsb
= (xfs_filblks_t
)(count_fsb
- offset_fsb
);
621 * Reserve enough blocks in this transaction for two complete extent
622 * btree splits. We may be converting the middle part of an unwritten
623 * extent and in this case we will insert two new extents in the btree
624 * each of which could cause a full split.
626 * This reservation amount will be used in the first call to
627 * xfs_bmbt_split() to select an AG with enough space to satisfy the
628 * rest of the operation.
630 resblks
= XFS_DIOSTRAT_SPACE_RES(mp
, 0) << 1;
632 /* Attach dquots so that bmbt splits are accounted correctly. */
633 error
= xfs_qm_dqattach(ip
);
639 * Set up a transaction to convert the range of extents
640 * from unwritten to real. Do allocations in a loop until
641 * we have covered the range passed in.
643 * Note that we can't risk to recursing back into the filesystem
644 * here as we might be asked to write out the same inode that we
645 * complete here and might deadlock on the iolock.
647 error
= xfs_trans_alloc_inode(ip
, &M_RES(mp
)->tr_write
, resblks
,
652 error
= xfs_iext_count_extend(tp
, ip
, XFS_DATA_FORK
,
653 XFS_IEXT_WRITE_UNWRITTEN_CNT
);
655 goto error_on_bmapi_transaction
;
658 * Modify the unwritten extent state of the buffer.
661 error
= xfs_bmapi_write(tp
, ip
, offset_fsb
, count_fsb
,
662 XFS_BMAPI_CONVERT
, resblks
, &imap
,
665 goto error_on_bmapi_transaction
;
668 * Log the updated inode size as we go. We have to be careful
669 * to only log it up to the actual write offset if it is
670 * halfway into a block.
672 i_size
= XFS_FSB_TO_B(mp
, offset_fsb
+ count_fsb
);
673 if (i_size
> offset
+ count
)
674 i_size
= offset
+ count
;
675 if (update_isize
&& i_size
> i_size_read(inode
))
676 i_size_write(inode
, i_size
);
677 i_size
= xfs_new_eof(ip
, i_size
);
679 ip
->i_disk_size
= i_size
;
680 xfs_trans_log_inode(tp
, ip
, XFS_ILOG_CORE
);
683 error
= xfs_trans_commit(tp
);
684 xfs_iunlock(ip
, XFS_ILOCK_EXCL
);
688 if (unlikely(!xfs_valid_startblock(ip
, imap
.br_startblock
))) {
689 xfs_bmap_mark_sick(ip
, XFS_DATA_FORK
);
690 return xfs_alert_fsblock_zero(ip
, &imap
);
693 if ((numblks_fsb
= imap
.br_blockcount
) == 0) {
695 * The numblks_fsb value should always get
696 * smaller, otherwise the loop is stuck.
698 ASSERT(imap
.br_blockcount
);
701 offset_fsb
+= numblks_fsb
;
702 count_fsb
-= numblks_fsb
;
703 } while (count_fsb
> 0);
707 error_on_bmapi_transaction
:
708 xfs_trans_cancel(tp
);
709 xfs_iunlock(ip
, XFS_ILOCK_EXCL
);
717 struct xfs_bmbt_irec
*imap
,
720 /* don't allocate blocks when just zeroing */
721 if (flags
& IOMAP_ZERO
)
724 imap
->br_startblock
== HOLESTARTBLOCK
||
725 imap
->br_startblock
== DELAYSTARTBLOCK
)
727 /* we convert unwritten extents before copying the data for DAX */
728 if ((flags
& IOMAP_DAX
) && imap
->br_state
== XFS_EXT_UNWRITTEN
)
735 struct xfs_inode
*ip
,
737 struct xfs_bmbt_irec
*imap
,
740 if (!xfs_is_cow_inode(ip
))
743 /* when zeroing we don't have to COW holes or unwritten extents */
744 if (flags
& (IOMAP_UNSHARE
| IOMAP_ZERO
)) {
746 imap
->br_startblock
== HOLESTARTBLOCK
||
747 imap
->br_state
== XFS_EXT_UNWRITTEN
)
755 * Extents not yet cached requires exclusive access, don't block for
758 * This is basically an opencoded xfs_ilock_data_map_shared() call, but with
759 * support for IOMAP_NOWAIT.
763 struct xfs_inode
*ip
,
767 if (flags
& IOMAP_NOWAIT
) {
768 if (xfs_need_iread_extents(&ip
->i_df
))
770 if (!xfs_ilock_nowait(ip
, *lockmode
))
773 if (xfs_need_iread_extents(&ip
->i_df
))
774 *lockmode
= XFS_ILOCK_EXCL
;
775 xfs_ilock(ip
, *lockmode
);
782 * Check that the imap we are going to return to the caller spans the entire
783 * range that the caller requested for the IO.
787 struct xfs_bmbt_irec
*imap
,
788 xfs_fileoff_t offset_fsb
,
789 xfs_fileoff_t end_fsb
)
791 if (imap
->br_startoff
> offset_fsb
)
793 if (imap
->br_startoff
+ imap
->br_blockcount
< end_fsb
)
799 xfs_direct_write_iomap_begin(
805 struct iomap
*srcmap
)
807 struct xfs_inode
*ip
= XFS_I(inode
);
808 struct xfs_mount
*mp
= ip
->i_mount
;
809 struct xfs_bmbt_irec imap
, cmap
;
810 xfs_fileoff_t offset_fsb
= XFS_B_TO_FSBT(mp
, offset
);
811 xfs_fileoff_t end_fsb
= xfs_iomap_end_fsb(mp
, offset
, length
);
812 int nimaps
= 1, error
= 0;
815 unsigned int lockmode
;
818 ASSERT(flags
& (IOMAP_WRITE
| IOMAP_ZERO
));
820 if (xfs_is_shutdown(mp
))
824 * Writes that span EOF might trigger an IO size update on completion,
825 * so consider them to be dirty for the purposes of O_DSYNC even if
826 * there is no other metadata changes pending or have been made here.
828 if (offset
+ length
> i_size_read(inode
))
829 iomap_flags
|= IOMAP_F_DIRTY
;
832 * COW writes may allocate delalloc space or convert unwritten COW
833 * extents, so we need to make sure to take the lock exclusively here.
835 if (xfs_is_cow_inode(ip
))
836 lockmode
= XFS_ILOCK_EXCL
;
838 lockmode
= XFS_ILOCK_SHARED
;
841 error
= xfs_ilock_for_iomap(ip
, flags
, &lockmode
);
846 * The reflink iflag could have changed since the earlier unlocked
847 * check, check if it again and relock if needed.
849 if (xfs_is_cow_inode(ip
) && lockmode
== XFS_ILOCK_SHARED
) {
850 xfs_iunlock(ip
, lockmode
);
851 lockmode
= XFS_ILOCK_EXCL
;
855 error
= xfs_bmapi_read(ip
, offset_fsb
, end_fsb
- offset_fsb
, &imap
,
860 if (imap_needs_cow(ip
, flags
, &imap
, nimaps
)) {
862 if (flags
& IOMAP_NOWAIT
)
865 /* may drop and re-acquire the ilock */
866 error
= xfs_reflink_allocate_cow(ip
, &imap
, &cmap
, &shared
,
868 (flags
& IOMAP_DIRECT
) || IS_DAX(inode
));
873 end_fsb
= imap
.br_startoff
+ imap
.br_blockcount
;
874 length
= XFS_FSB_TO_B(mp
, end_fsb
) - offset
;
877 if (imap_needs_alloc(inode
, flags
, &imap
, nimaps
))
878 goto allocate_blocks
;
881 * NOWAIT and OVERWRITE I/O needs to span the entire requested I/O with
882 * a single map so that we avoid partial IO failures due to the rest of
883 * the I/O range not covered by this map triggering an EAGAIN condition
884 * when it is subsequently mapped and aborting the I/O.
886 if (flags
& (IOMAP_NOWAIT
| IOMAP_OVERWRITE_ONLY
)) {
888 if (!imap_spans_range(&imap
, offset_fsb
, end_fsb
))
893 * For overwrite only I/O, we cannot convert unwritten extents without
894 * requiring sub-block zeroing. This can only be done under an
895 * exclusive IOLOCK, hence return -EAGAIN if this is not a written
896 * extent to tell the caller to try again.
898 if (flags
& IOMAP_OVERWRITE_ONLY
) {
900 if (imap
.br_state
!= XFS_EXT_NORM
&&
901 ((offset
| length
) & mp
->m_blockmask
))
905 seq
= xfs_iomap_inode_sequence(ip
, iomap_flags
);
906 xfs_iunlock(ip
, lockmode
);
907 trace_xfs_iomap_found(ip
, offset
, length
, XFS_DATA_FORK
, &imap
);
908 return xfs_bmbt_to_iomap(ip
, iomap
, &imap
, flags
, iomap_flags
, seq
);
912 if (flags
& (IOMAP_NOWAIT
| IOMAP_OVERWRITE_ONLY
))
916 * We cap the maximum length we map to a sane size to keep the chunks
917 * of work done where somewhat symmetric with the work writeback does.
918 * This is a completely arbitrary number pulled out of thin air as a
919 * best guess for initial testing.
921 * Note that the values needs to be less than 32-bits wide until the
922 * lower level functions are updated.
924 length
= min_t(loff_t
, length
, 1024 * PAGE_SIZE
);
925 end_fsb
= xfs_iomap_end_fsb(mp
, offset
, length
);
927 if (offset
+ length
> XFS_ISIZE(ip
))
928 end_fsb
= xfs_iomap_eof_align_last_fsb(ip
, end_fsb
);
929 else if (nimaps
&& imap
.br_startblock
== HOLESTARTBLOCK
)
930 end_fsb
= min(end_fsb
, imap
.br_startoff
+ imap
.br_blockcount
);
931 xfs_iunlock(ip
, lockmode
);
933 error
= xfs_iomap_write_direct(ip
, offset_fsb
, end_fsb
- offset_fsb
,
938 trace_xfs_iomap_alloc(ip
, offset
, length
, XFS_DATA_FORK
, &imap
);
939 return xfs_bmbt_to_iomap(ip
, iomap
, &imap
, flags
,
940 iomap_flags
| IOMAP_F_NEW
, seq
);
943 length
= XFS_FSB_TO_B(mp
, cmap
.br_startoff
+ cmap
.br_blockcount
);
944 trace_xfs_iomap_found(ip
, offset
, length
- offset
, XFS_COW_FORK
, &cmap
);
945 if (imap
.br_startblock
!= HOLESTARTBLOCK
) {
946 seq
= xfs_iomap_inode_sequence(ip
, 0);
947 error
= xfs_bmbt_to_iomap(ip
, srcmap
, &imap
, flags
, 0, seq
);
951 seq
= xfs_iomap_inode_sequence(ip
, IOMAP_F_SHARED
);
952 xfs_iunlock(ip
, lockmode
);
953 return xfs_bmbt_to_iomap(ip
, iomap
, &cmap
, flags
, IOMAP_F_SHARED
, seq
);
957 xfs_iunlock(ip
, lockmode
);
961 const struct iomap_ops xfs_direct_write_iomap_ops
= {
962 .iomap_begin
= xfs_direct_write_iomap_begin
,
966 xfs_dax_write_iomap_end(
974 struct xfs_inode
*ip
= XFS_I(inode
);
976 if (!xfs_is_cow_inode(ip
))
980 xfs_reflink_cancel_cow_range(ip
, pos
, length
, true);
984 return xfs_reflink_end_cow(ip
, pos
, written
);
987 const struct iomap_ops xfs_dax_write_iomap_ops
= {
988 .iomap_begin
= xfs_direct_write_iomap_begin
,
989 .iomap_end
= xfs_dax_write_iomap_end
,
993 xfs_buffered_write_iomap_begin(
999 struct iomap
*srcmap
)
1001 struct xfs_inode
*ip
= XFS_I(inode
);
1002 struct xfs_mount
*mp
= ip
->i_mount
;
1003 xfs_fileoff_t offset_fsb
= XFS_B_TO_FSBT(mp
, offset
);
1004 xfs_fileoff_t end_fsb
= xfs_iomap_end_fsb(mp
, offset
, count
);
1005 struct xfs_bmbt_irec imap
, cmap
;
1006 struct xfs_iext_cursor icur
, ccur
;
1007 xfs_fsblock_t prealloc_blocks
= 0;
1008 bool eof
= false, cow_eof
= false, shared
= false;
1009 int allocfork
= XFS_DATA_FORK
;
1011 unsigned int lockmode
= XFS_ILOCK_EXCL
;
1012 unsigned int iomap_flags
= 0;
1015 if (xfs_is_shutdown(mp
))
1018 /* we can't use delayed allocations when using extent size hints */
1019 if (xfs_get_extsz_hint(ip
))
1020 return xfs_direct_write_iomap_begin(inode
, offset
, count
,
1021 flags
, iomap
, srcmap
);
1023 error
= xfs_qm_dqattach(ip
);
1027 error
= xfs_ilock_for_iomap(ip
, flags
, &lockmode
);
1031 if (XFS_IS_CORRUPT(mp
, !xfs_ifork_has_extents(&ip
->i_df
)) ||
1032 XFS_TEST_ERROR(false, mp
, XFS_ERRTAG_BMAPIFORMAT
)) {
1033 xfs_bmap_mark_sick(ip
, XFS_DATA_FORK
);
1034 error
= -EFSCORRUPTED
;
1038 XFS_STATS_INC(mp
, xs_blk_mapw
);
1040 error
= xfs_iread_extents(NULL
, ip
, XFS_DATA_FORK
);
1045 * Search the data fork first to look up our source mapping. We
1046 * always need the data fork map, as we have to return it to the
1047 * iomap code so that the higher level write code can read data in to
1048 * perform read-modify-write cycles for unaligned writes.
1050 eof
= !xfs_iext_lookup_extent(ip
, &ip
->i_df
, offset_fsb
, &icur
, &imap
);
1052 imap
.br_startoff
= end_fsb
; /* fake hole until the end */
1054 /* We never need to allocate blocks for zeroing or unsharing a hole. */
1055 if ((flags
& (IOMAP_UNSHARE
| IOMAP_ZERO
)) &&
1056 imap
.br_startoff
> offset_fsb
) {
1057 xfs_hole_to_iomap(ip
, iomap
, offset_fsb
, imap
.br_startoff
);
1062 * For zeroing, trim a delalloc extent that extends beyond the EOF
1063 * block. If it starts beyond the EOF block, convert it to an
1066 if ((flags
& IOMAP_ZERO
) && imap
.br_startoff
<= offset_fsb
&&
1067 isnullstartblock(imap
.br_startblock
)) {
1068 xfs_fileoff_t eof_fsb
= XFS_B_TO_FSB(mp
, XFS_ISIZE(ip
));
1070 if (offset_fsb
>= eof_fsb
)
1072 if (end_fsb
> eof_fsb
) {
1074 xfs_trim_extent(&imap
, offset_fsb
,
1075 end_fsb
- offset_fsb
);
1080 * Search the COW fork extent list even if we did not find a data fork
1081 * extent. This serves two purposes: first this implements the
1082 * speculative preallocation using cowextsize, so that we also unshare
1083 * block adjacent to shared blocks instead of just the shared blocks
1084 * themselves. Second the lookup in the extent list is generally faster
1085 * than going out to the shared extent tree.
1087 if (xfs_is_cow_inode(ip
)) {
1089 ASSERT(!xfs_is_reflink_inode(ip
));
1090 xfs_ifork_init_cow(ip
);
1092 cow_eof
= !xfs_iext_lookup_extent(ip
, ip
->i_cowfp
, offset_fsb
,
1094 if (!cow_eof
&& cmap
.br_startoff
<= offset_fsb
) {
1095 trace_xfs_reflink_cow_found(ip
, &cmap
);
1100 if (imap
.br_startoff
<= offset_fsb
) {
1102 * For reflink files we may need a delalloc reservation when
1103 * overwriting shared extents. This includes zeroing of
1104 * existing extents that contain data.
1106 if (!xfs_is_cow_inode(ip
) ||
1107 ((flags
& IOMAP_ZERO
) && imap
.br_state
!= XFS_EXT_NORM
)) {
1108 trace_xfs_iomap_found(ip
, offset
, count
, XFS_DATA_FORK
,
1113 xfs_trim_extent(&imap
, offset_fsb
, end_fsb
- offset_fsb
);
1115 /* Trim the mapping to the nearest shared extent boundary. */
1116 error
= xfs_bmap_trim_cow(ip
, &imap
, &shared
);
1120 /* Not shared? Just report the (potentially capped) extent. */
1122 trace_xfs_iomap_found(ip
, offset
, count
, XFS_DATA_FORK
,
1128 * Fork all the shared blocks from our write offset until the
1129 * end of the extent.
1131 allocfork
= XFS_COW_FORK
;
1132 end_fsb
= imap
.br_startoff
+ imap
.br_blockcount
;
1135 * We cap the maximum length we map here to MAX_WRITEBACK_PAGES
1136 * pages to keep the chunks of work done where somewhat
1137 * symmetric with the work writeback does. This is a completely
1138 * arbitrary number pulled out of thin air.
1140 * Note that the values needs to be less than 32-bits wide until
1141 * the lower level functions are updated.
1143 count
= min_t(loff_t
, count
, 1024 * PAGE_SIZE
);
1144 end_fsb
= xfs_iomap_end_fsb(mp
, offset
, count
);
1146 if (xfs_is_always_cow_inode(ip
))
1147 allocfork
= XFS_COW_FORK
;
1150 if (eof
&& offset
+ count
> XFS_ISIZE(ip
)) {
1152 * Determine the initial size of the preallocation.
1153 * We clean up any extra preallocation when the file is closed.
1155 if (xfs_has_allocsize(mp
))
1156 prealloc_blocks
= mp
->m_allocsize_blocks
;
1157 else if (allocfork
== XFS_DATA_FORK
)
1158 prealloc_blocks
= xfs_iomap_prealloc_size(ip
, allocfork
,
1159 offset
, count
, &icur
);
1161 prealloc_blocks
= xfs_iomap_prealloc_size(ip
, allocfork
,
1162 offset
, count
, &ccur
);
1163 if (prealloc_blocks
) {
1165 xfs_off_t end_offset
;
1166 xfs_fileoff_t p_end_fsb
;
1168 end_offset
= XFS_ALLOC_ALIGN(mp
, offset
+ count
- 1);
1169 p_end_fsb
= XFS_B_TO_FSBT(mp
, end_offset
) +
1172 align
= xfs_eof_alignment(ip
);
1174 p_end_fsb
= roundup_64(p_end_fsb
, align
);
1176 p_end_fsb
= min(p_end_fsb
,
1177 XFS_B_TO_FSB(mp
, mp
->m_super
->s_maxbytes
));
1178 ASSERT(p_end_fsb
> offset_fsb
);
1179 prealloc_blocks
= p_end_fsb
- end_fsb
;
1184 * Flag newly allocated delalloc blocks with IOMAP_F_NEW so we punch
1185 * them out if the write happens to fail.
1187 iomap_flags
|= IOMAP_F_NEW
;
1188 if (allocfork
== XFS_COW_FORK
) {
1189 error
= xfs_bmapi_reserve_delalloc(ip
, allocfork
, offset_fsb
,
1190 end_fsb
- offset_fsb
, prealloc_blocks
, &cmap
,
1195 trace_xfs_iomap_alloc(ip
, offset
, count
, allocfork
, &cmap
);
1199 error
= xfs_bmapi_reserve_delalloc(ip
, allocfork
, offset_fsb
,
1200 end_fsb
- offset_fsb
, prealloc_blocks
, &imap
, &icur
,
1205 trace_xfs_iomap_alloc(ip
, offset
, count
, allocfork
, &imap
);
1207 seq
= xfs_iomap_inode_sequence(ip
, iomap_flags
);
1208 xfs_iunlock(ip
, lockmode
);
1209 return xfs_bmbt_to_iomap(ip
, iomap
, &imap
, flags
, iomap_flags
, seq
);
1212 xfs_iunlock(ip
, lockmode
);
1213 truncate_pagecache(inode
, offset
);
1214 error
= xfs_bmapi_convert_delalloc(ip
, XFS_DATA_FORK
, offset
,
1219 trace_xfs_iomap_alloc(ip
, offset
, count
, XFS_DATA_FORK
, &imap
);
1223 if (imap
.br_startoff
<= offset_fsb
) {
1224 error
= xfs_bmbt_to_iomap(ip
, srcmap
, &imap
, flags
, 0,
1225 xfs_iomap_inode_sequence(ip
, 0));
1229 xfs_trim_extent(&cmap
, offset_fsb
,
1230 imap
.br_startoff
- offset_fsb
);
1233 iomap_flags
|= IOMAP_F_SHARED
;
1234 seq
= xfs_iomap_inode_sequence(ip
, iomap_flags
);
1235 xfs_iunlock(ip
, lockmode
);
1236 return xfs_bmbt_to_iomap(ip
, iomap
, &cmap
, flags
, iomap_flags
, seq
);
1239 xfs_iunlock(ip
, lockmode
);
1244 xfs_buffered_write_delalloc_punch(
1245 struct inode
*inode
,
1248 struct iomap
*iomap
)
1250 xfs_bmap_punch_delalloc_range(XFS_I(inode
),
1251 (iomap
->flags
& IOMAP_F_SHARED
) ?
1252 XFS_COW_FORK
: XFS_DATA_FORK
,
1253 offset
, offset
+ length
);
1257 xfs_buffered_write_iomap_end(
1258 struct inode
*inode
,
1263 struct iomap
*iomap
)
1265 loff_t start_byte
, end_byte
;
1267 /* If we didn't reserve the blocks, we're not allowed to punch them. */
1268 if (iomap
->type
!= IOMAP_DELALLOC
|| !(iomap
->flags
& IOMAP_F_NEW
))
1272 * iomap_page_mkwrite() will never fail in a way that requires delalloc
1273 * extents that it allocated to be revoked. Hence never try to release
1276 if (flags
& IOMAP_FAULT
)
1279 /* Nothing to do if we've written the entire delalloc extent */
1280 start_byte
= iomap_last_written_block(inode
, offset
, written
);
1281 end_byte
= round_up(offset
+ length
, i_blocksize(inode
));
1282 if (start_byte
>= end_byte
)
1285 /* For zeroing operations the callers already hold invalidate_lock. */
1286 if (flags
& (IOMAP_UNSHARE
| IOMAP_ZERO
)) {
1287 rwsem_assert_held_write(&inode
->i_mapping
->invalidate_lock
);
1288 iomap_write_delalloc_release(inode
, start_byte
, end_byte
, flags
,
1289 iomap
, xfs_buffered_write_delalloc_punch
);
1291 filemap_invalidate_lock(inode
->i_mapping
);
1292 iomap_write_delalloc_release(inode
, start_byte
, end_byte
, flags
,
1293 iomap
, xfs_buffered_write_delalloc_punch
);
1294 filemap_invalidate_unlock(inode
->i_mapping
);
1300 const struct iomap_ops xfs_buffered_write_iomap_ops
= {
1301 .iomap_begin
= xfs_buffered_write_iomap_begin
,
1302 .iomap_end
= xfs_buffered_write_iomap_end
,
1306 xfs_read_iomap_begin(
1307 struct inode
*inode
,
1311 struct iomap
*iomap
,
1312 struct iomap
*srcmap
)
1314 struct xfs_inode
*ip
= XFS_I(inode
);
1315 struct xfs_mount
*mp
= ip
->i_mount
;
1316 struct xfs_bmbt_irec imap
;
1317 xfs_fileoff_t offset_fsb
= XFS_B_TO_FSBT(mp
, offset
);
1318 xfs_fileoff_t end_fsb
= xfs_iomap_end_fsb(mp
, offset
, length
);
1319 int nimaps
= 1, error
= 0;
1320 bool shared
= false;
1321 unsigned int lockmode
= XFS_ILOCK_SHARED
;
1324 ASSERT(!(flags
& (IOMAP_WRITE
| IOMAP_ZERO
)));
1326 if (xfs_is_shutdown(mp
))
1329 error
= xfs_ilock_for_iomap(ip
, flags
, &lockmode
);
1332 error
= xfs_bmapi_read(ip
, offset_fsb
, end_fsb
- offset_fsb
, &imap
,
1334 if (!error
&& ((flags
& IOMAP_REPORT
) || IS_DAX(inode
)))
1335 error
= xfs_reflink_trim_around_shared(ip
, &imap
, &shared
);
1336 seq
= xfs_iomap_inode_sequence(ip
, shared
? IOMAP_F_SHARED
: 0);
1337 xfs_iunlock(ip
, lockmode
);
1341 trace_xfs_iomap_found(ip
, offset
, length
, XFS_DATA_FORK
, &imap
);
1342 return xfs_bmbt_to_iomap(ip
, iomap
, &imap
, flags
,
1343 shared
? IOMAP_F_SHARED
: 0, seq
);
1346 const struct iomap_ops xfs_read_iomap_ops
= {
1347 .iomap_begin
= xfs_read_iomap_begin
,
1351 xfs_seek_iomap_begin(
1352 struct inode
*inode
,
1356 struct iomap
*iomap
,
1357 struct iomap
*srcmap
)
1359 struct xfs_inode
*ip
= XFS_I(inode
);
1360 struct xfs_mount
*mp
= ip
->i_mount
;
1361 xfs_fileoff_t offset_fsb
= XFS_B_TO_FSBT(mp
, offset
);
1362 xfs_fileoff_t end_fsb
= XFS_B_TO_FSB(mp
, offset
+ length
);
1363 xfs_fileoff_t cow_fsb
= NULLFILEOFF
, data_fsb
= NULLFILEOFF
;
1364 struct xfs_iext_cursor icur
;
1365 struct xfs_bmbt_irec imap
, cmap
;
1370 if (xfs_is_shutdown(mp
))
1373 lockmode
= xfs_ilock_data_map_shared(ip
);
1374 error
= xfs_iread_extents(NULL
, ip
, XFS_DATA_FORK
);
1378 if (xfs_iext_lookup_extent(ip
, &ip
->i_df
, offset_fsb
, &icur
, &imap
)) {
1380 * If we found a data extent we are done.
1382 if (imap
.br_startoff
<= offset_fsb
)
1384 data_fsb
= imap
.br_startoff
;
1387 * Fake a hole until the end of the file.
1389 data_fsb
= xfs_iomap_end_fsb(mp
, offset
, length
);
1393 * If a COW fork extent covers the hole, report it - capped to the next
1396 if (xfs_inode_has_cow_data(ip
) &&
1397 xfs_iext_lookup_extent(ip
, ip
->i_cowfp
, offset_fsb
, &icur
, &cmap
))
1398 cow_fsb
= cmap
.br_startoff
;
1399 if (cow_fsb
!= NULLFILEOFF
&& cow_fsb
<= offset_fsb
) {
1400 if (data_fsb
< cow_fsb
+ cmap
.br_blockcount
)
1401 end_fsb
= min(end_fsb
, data_fsb
);
1402 xfs_trim_extent(&cmap
, offset_fsb
, end_fsb
- offset_fsb
);
1403 seq
= xfs_iomap_inode_sequence(ip
, IOMAP_F_SHARED
);
1404 error
= xfs_bmbt_to_iomap(ip
, iomap
, &cmap
, flags
,
1405 IOMAP_F_SHARED
, seq
);
1407 * This is a COW extent, so we must probe the page cache
1408 * because there could be dirty page cache being backed
1411 iomap
->type
= IOMAP_UNWRITTEN
;
1416 * Else report a hole, capped to the next found data or COW extent.
1418 if (cow_fsb
!= NULLFILEOFF
&& cow_fsb
< data_fsb
)
1419 imap
.br_blockcount
= cow_fsb
- offset_fsb
;
1421 imap
.br_blockcount
= data_fsb
- offset_fsb
;
1422 imap
.br_startoff
= offset_fsb
;
1423 imap
.br_startblock
= HOLESTARTBLOCK
;
1424 imap
.br_state
= XFS_EXT_NORM
;
1426 seq
= xfs_iomap_inode_sequence(ip
, 0);
1427 xfs_trim_extent(&imap
, offset_fsb
, end_fsb
- offset_fsb
);
1428 error
= xfs_bmbt_to_iomap(ip
, iomap
, &imap
, flags
, 0, seq
);
1430 xfs_iunlock(ip
, lockmode
);
1434 const struct iomap_ops xfs_seek_iomap_ops
= {
1435 .iomap_begin
= xfs_seek_iomap_begin
,
1439 xfs_xattr_iomap_begin(
1440 struct inode
*inode
,
1444 struct iomap
*iomap
,
1445 struct iomap
*srcmap
)
1447 struct xfs_inode
*ip
= XFS_I(inode
);
1448 struct xfs_mount
*mp
= ip
->i_mount
;
1449 xfs_fileoff_t offset_fsb
= XFS_B_TO_FSBT(mp
, offset
);
1450 xfs_fileoff_t end_fsb
= XFS_B_TO_FSB(mp
, offset
+ length
);
1451 struct xfs_bmbt_irec imap
;
1452 int nimaps
= 1, error
= 0;
1456 if (xfs_is_shutdown(mp
))
1459 lockmode
= xfs_ilock_attr_map_shared(ip
);
1461 /* if there are no attribute fork or extents, return ENOENT */
1462 if (!xfs_inode_has_attr_fork(ip
) || !ip
->i_af
.if_nextents
) {
1467 ASSERT(ip
->i_af
.if_format
!= XFS_DINODE_FMT_LOCAL
);
1468 error
= xfs_bmapi_read(ip
, offset_fsb
, end_fsb
- offset_fsb
, &imap
,
1469 &nimaps
, XFS_BMAPI_ATTRFORK
);
1472 seq
= xfs_iomap_inode_sequence(ip
, IOMAP_F_XATTR
);
1473 xfs_iunlock(ip
, lockmode
);
1478 return xfs_bmbt_to_iomap(ip
, iomap
, &imap
, flags
, IOMAP_F_XATTR
, seq
);
1481 const struct iomap_ops xfs_xattr_iomap_ops
= {
1482 .iomap_begin
= xfs_xattr_iomap_begin
,
1487 struct xfs_inode
*ip
,
1492 struct inode
*inode
= VFS_I(ip
);
1494 xfs_assert_ilocked(ip
, XFS_IOLOCK_EXCL
| XFS_MMAPLOCK_EXCL
);
1497 return dax_zero_range(inode
, pos
, len
, did_zero
,
1498 &xfs_dax_write_iomap_ops
);
1499 return iomap_zero_range(inode
, pos
, len
, did_zero
,
1500 &xfs_buffered_write_iomap_ops
);
1505 struct xfs_inode
*ip
,
1509 struct inode
*inode
= VFS_I(ip
);
1512 return dax_truncate_page(inode
, pos
, did_zero
,
1513 &xfs_dax_write_iomap_ops
);
1514 return iomap_truncate_page(inode
, pos
, did_zero
,
1515 &xfs_buffered_write_iomap_ops
);