1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (c) 2000-2005 Silicon Graphics, Inc.
4 * Copyright (c) 2016-2018 Christoph Hellwig.
8 #include "xfs_shared.h"
9 #include "xfs_format.h"
10 #include "xfs_log_format.h"
11 #include "xfs_trans_resv.h"
12 #include "xfs_mount.h"
13 #include "xfs_inode.h"
14 #include "xfs_trans.h"
15 #include "xfs_inode_item.h"
16 #include "xfs_alloc.h"
17 #include "xfs_error.h"
18 #include "xfs_iomap.h"
19 #include "xfs_trace.h"
21 #include "xfs_bmap_util.h"
22 #include "xfs_bmap_btree.h"
23 #include "xfs_reflink.h"
24 #include <linux/writeback.h>
27 * structure owned by writepages passed to individual writepage calls
29 struct xfs_writepage_ctx
{
30 struct xfs_bmbt_irec imap
;
32 unsigned int data_seq
;
34 struct xfs_ioend
*ioend
;
38 xfs_find_bdev_for_inode(
41 struct xfs_inode
*ip
= XFS_I(inode
);
42 struct xfs_mount
*mp
= ip
->i_mount
;
44 if (XFS_IS_REALTIME_INODE(ip
))
45 return mp
->m_rtdev_targp
->bt_bdev
;
47 return mp
->m_ddev_targp
->bt_bdev
;
51 xfs_find_daxdev_for_inode(
54 struct xfs_inode
*ip
= XFS_I(inode
);
55 struct xfs_mount
*mp
= ip
->i_mount
;
57 if (XFS_IS_REALTIME_INODE(ip
))
58 return mp
->m_rtdev_targp
->bt_daxdev
;
60 return mp
->m_ddev_targp
->bt_daxdev
;
64 xfs_finish_page_writeback(
69 struct iomap_page
*iop
= to_iomap_page(bvec
->bv_page
);
72 SetPageError(bvec
->bv_page
);
73 mapping_set_error(inode
->i_mapping
, -EIO
);
76 ASSERT(iop
|| i_blocksize(inode
) == PAGE_SIZE
);
77 ASSERT(!iop
|| atomic_read(&iop
->write_count
) > 0);
79 if (!iop
|| atomic_dec_and_test(&iop
->write_count
))
80 end_page_writeback(bvec
->bv_page
);
84 * We're now finished for good with this ioend structure. Update the page
85 * state, release holds on bios, and finally free up memory. Do not use the
90 struct xfs_ioend
*ioend
,
93 struct inode
*inode
= ioend
->io_inode
;
94 struct bio
*bio
= &ioend
->io_inline_bio
;
95 struct bio
*last
= ioend
->io_bio
, *next
;
96 u64 start
= bio
->bi_iter
.bi_sector
;
97 bool quiet
= bio_flagged(bio
, BIO_QUIET
);
99 for (bio
= &ioend
->io_inline_bio
; bio
; bio
= next
) {
100 struct bio_vec
*bvec
;
101 struct bvec_iter_all iter_all
;
104 * For the last bio, bi_private points to the ioend, so we
105 * need to explicitly end the iteration here.
110 next
= bio
->bi_private
;
112 /* walk each page on bio, ending page IO on them */
113 bio_for_each_segment_all(bvec
, bio
, iter_all
)
114 xfs_finish_page_writeback(inode
, bvec
, error
);
118 if (unlikely(error
&& !quiet
)) {
119 xfs_err_ratelimited(XFS_I(inode
)->i_mount
,
120 "writeback error on sector %llu", start
);
125 * Fast and loose check if this write could update the on-disk inode size.
127 static inline bool xfs_ioend_is_append(struct xfs_ioend
*ioend
)
129 return ioend
->io_offset
+ ioend
->io_size
>
130 XFS_I(ioend
->io_inode
)->i_d
.di_size
;
134 xfs_setfilesize_trans_alloc(
135 struct xfs_ioend
*ioend
)
137 struct xfs_mount
*mp
= XFS_I(ioend
->io_inode
)->i_mount
;
138 struct xfs_trans
*tp
;
141 error
= xfs_trans_alloc(mp
, &M_RES(mp
)->tr_fsyncts
, 0, 0,
142 XFS_TRANS_NOFS
, &tp
);
146 ioend
->io_append_trans
= tp
;
149 * We may pass freeze protection with a transaction. So tell lockdep
152 __sb_writers_release(ioend
->io_inode
->i_sb
, SB_FREEZE_FS
);
154 * We hand off the transaction to the completion thread now, so
155 * clear the flag here.
157 current_restore_flags_nested(&tp
->t_pflags
, PF_MEMALLOC_NOFS
);
162 * Update on-disk file size now that data has been written to disk.
166 struct xfs_inode
*ip
,
167 struct xfs_trans
*tp
,
173 xfs_ilock(ip
, XFS_ILOCK_EXCL
);
174 isize
= xfs_new_eof(ip
, offset
+ size
);
176 xfs_iunlock(ip
, XFS_ILOCK_EXCL
);
177 xfs_trans_cancel(tp
);
181 trace_xfs_setfilesize(ip
, offset
, size
);
183 ip
->i_d
.di_size
= isize
;
184 xfs_trans_ijoin(tp
, ip
, XFS_ILOCK_EXCL
);
185 xfs_trans_log_inode(tp
, ip
, XFS_ILOG_CORE
);
187 return xfs_trans_commit(tp
);
192 struct xfs_inode
*ip
,
196 struct xfs_mount
*mp
= ip
->i_mount
;
197 struct xfs_trans
*tp
;
200 error
= xfs_trans_alloc(mp
, &M_RES(mp
)->tr_fsyncts
, 0, 0, 0, &tp
);
204 return __xfs_setfilesize(ip
, tp
, offset
, size
);
208 xfs_setfilesize_ioend(
209 struct xfs_ioend
*ioend
,
212 struct xfs_inode
*ip
= XFS_I(ioend
->io_inode
);
213 struct xfs_trans
*tp
= ioend
->io_append_trans
;
216 * The transaction may have been allocated in the I/O submission thread,
217 * thus we need to mark ourselves as being in a transaction manually.
218 * Similarly for freeze protection.
220 current_set_flags_nested(&tp
->t_pflags
, PF_MEMALLOC_NOFS
);
221 __sb_writers_acquired(VFS_I(ip
)->i_sb
, SB_FREEZE_FS
);
223 /* we abort the update if there was an IO error */
225 xfs_trans_cancel(tp
);
229 return __xfs_setfilesize(ip
, tp
, ioend
->io_offset
, ioend
->io_size
);
233 * IO write completion.
237 struct work_struct
*work
)
239 struct xfs_ioend
*ioend
=
240 container_of(work
, struct xfs_ioend
, io_work
);
241 struct xfs_inode
*ip
= XFS_I(ioend
->io_inode
);
242 xfs_off_t offset
= ioend
->io_offset
;
243 size_t size
= ioend
->io_size
;
247 * Just clean up the in-memory strutures if the fs has been shut down.
249 if (XFS_FORCED_SHUTDOWN(ip
->i_mount
)) {
255 * Clean up any COW blocks on an I/O error.
257 error
= blk_status_to_errno(ioend
->io_bio
->bi_status
);
258 if (unlikely(error
)) {
259 if (ioend
->io_fork
== XFS_COW_FORK
)
260 xfs_reflink_cancel_cow_range(ip
, offset
, size
, true);
265 * Success: commit the COW or unwritten blocks if needed.
267 if (ioend
->io_fork
== XFS_COW_FORK
)
268 error
= xfs_reflink_end_cow(ip
, offset
, size
);
269 else if (ioend
->io_state
== XFS_EXT_UNWRITTEN
)
270 error
= xfs_iomap_write_unwritten(ip
, offset
, size
, false);
272 ASSERT(!xfs_ioend_is_append(ioend
) || ioend
->io_append_trans
);
275 if (ioend
->io_append_trans
)
276 error
= xfs_setfilesize_ioend(ioend
, error
);
277 xfs_destroy_ioend(ioend
, error
);
284 struct xfs_ioend
*ioend
= bio
->bi_private
;
285 struct xfs_mount
*mp
= XFS_I(ioend
->io_inode
)->i_mount
;
287 if (ioend
->io_fork
== XFS_COW_FORK
||
288 ioend
->io_state
== XFS_EXT_UNWRITTEN
)
289 queue_work(mp
->m_unwritten_workqueue
, &ioend
->io_work
);
290 else if (ioend
->io_append_trans
)
291 queue_work(mp
->m_data_workqueue
, &ioend
->io_work
);
293 xfs_destroy_ioend(ioend
, blk_status_to_errno(bio
->bi_status
));
297 * Fast revalidation of the cached writeback mapping. Return true if the current
298 * mapping is valid, false otherwise.
302 struct xfs_writepage_ctx
*wpc
,
303 struct xfs_inode
*ip
,
304 xfs_fileoff_t offset_fsb
)
306 if (offset_fsb
< wpc
->imap
.br_startoff
||
307 offset_fsb
>= wpc
->imap
.br_startoff
+ wpc
->imap
.br_blockcount
)
310 * If this is a COW mapping, it is sufficient to check that the mapping
311 * covers the offset. Be careful to check this first because the caller
312 * can revalidate a COW mapping without updating the data seqno.
314 if (wpc
->fork
== XFS_COW_FORK
)
318 * This is not a COW mapping. Check the sequence number of the data fork
319 * because concurrent changes could have invalidated the extent. Check
320 * the COW fork because concurrent changes since the last time we
321 * checked (and found nothing at this offset) could have added
322 * overlapping blocks.
324 if (wpc
->data_seq
!= READ_ONCE(ip
->i_df
.if_seq
))
326 if (xfs_inode_has_cow_data(ip
) &&
327 wpc
->cow_seq
!= READ_ONCE(ip
->i_cowfp
->if_seq
))
333 * Pass in a dellalloc extent and convert it to real extents, return the real
334 * extent that maps offset_fsb in wpc->imap.
336 * The current page is held locked so nothing could have removed the block
337 * backing offset_fsb, although it could have moved from the COW to the data
338 * fork by another thread.
342 struct xfs_writepage_ctx
*wpc
,
343 struct xfs_inode
*ip
,
344 xfs_fileoff_t offset_fsb
)
349 * Attempt to allocate whatever delalloc extent currently backs
350 * offset_fsb and put the result into wpc->imap. Allocate in a loop
351 * because it may take several attempts to allocate real blocks for a
352 * contiguous delalloc extent if free space is sufficiently fragmented.
355 error
= xfs_bmapi_convert_delalloc(ip
, wpc
->fork
, offset_fsb
,
356 &wpc
->imap
, wpc
->fork
== XFS_COW_FORK
?
357 &wpc
->cow_seq
: &wpc
->data_seq
);
360 } while (wpc
->imap
.br_startoff
+ wpc
->imap
.br_blockcount
<= offset_fsb
);
367 struct xfs_writepage_ctx
*wpc
,
371 struct xfs_inode
*ip
= XFS_I(inode
);
372 struct xfs_mount
*mp
= ip
->i_mount
;
373 ssize_t count
= i_blocksize(inode
);
374 xfs_fileoff_t offset_fsb
= XFS_B_TO_FSBT(mp
, offset
);
375 xfs_fileoff_t end_fsb
= XFS_B_TO_FSB(mp
, offset
+ count
);
376 xfs_fileoff_t cow_fsb
= NULLFILEOFF
;
377 struct xfs_bmbt_irec imap
;
378 struct xfs_iext_cursor icur
;
382 if (XFS_FORCED_SHUTDOWN(mp
))
386 * COW fork blocks can overlap data fork blocks even if the blocks
387 * aren't shared. COW I/O always takes precedent, so we must always
388 * check for overlap on reflink inodes unless the mapping is already a
389 * COW one, or the COW fork hasn't changed from the last time we looked
392 * It's safe to check the COW fork if_seq here without the ILOCK because
393 * we've indirectly protected against concurrent updates: writeback has
394 * the page locked, which prevents concurrent invalidations by reflink
395 * and directio and prevents concurrent buffered writes to the same
396 * page. Changes to if_seq always happen under i_lock, which protects
397 * against concurrent updates and provides a memory barrier on the way
398 * out that ensures that we always see the current value.
400 if (xfs_imap_valid(wpc
, ip
, offset_fsb
))
404 * If we don't have a valid map, now it's time to get a new one for this
405 * offset. This will convert delayed allocations (including COW ones)
406 * into real extents. If we return without a valid map, it means we
407 * landed in a hole and we skip the block.
410 xfs_ilock(ip
, XFS_ILOCK_SHARED
);
411 ASSERT(ip
->i_d
.di_format
!= XFS_DINODE_FMT_BTREE
||
412 (ip
->i_df
.if_flags
& XFS_IFEXTENTS
));
415 * Check if this is offset is covered by a COW extents, and if yes use
416 * it directly instead of looking up anything in the data fork.
418 if (xfs_inode_has_cow_data(ip
) &&
419 xfs_iext_lookup_extent(ip
, ip
->i_cowfp
, offset_fsb
, &icur
, &imap
))
420 cow_fsb
= imap
.br_startoff
;
421 if (cow_fsb
!= NULLFILEOFF
&& cow_fsb
<= offset_fsb
) {
422 wpc
->cow_seq
= READ_ONCE(ip
->i_cowfp
->if_seq
);
423 xfs_iunlock(ip
, XFS_ILOCK_SHARED
);
425 wpc
->fork
= XFS_COW_FORK
;
426 goto allocate_blocks
;
430 * No COW extent overlap. Revalidate now that we may have updated
431 * ->cow_seq. If the data mapping is still valid, we're done.
433 if (xfs_imap_valid(wpc
, ip
, offset_fsb
)) {
434 xfs_iunlock(ip
, XFS_ILOCK_SHARED
);
439 * If we don't have a valid map, now it's time to get a new one for this
440 * offset. This will convert delayed allocations (including COW ones)
443 if (!xfs_iext_lookup_extent(ip
, &ip
->i_df
, offset_fsb
, &icur
, &imap
))
444 imap
.br_startoff
= end_fsb
; /* fake a hole past EOF */
445 wpc
->data_seq
= READ_ONCE(ip
->i_df
.if_seq
);
446 xfs_iunlock(ip
, XFS_ILOCK_SHARED
);
448 wpc
->fork
= XFS_DATA_FORK
;
450 /* landed in a hole or beyond EOF? */
451 if (imap
.br_startoff
> offset_fsb
) {
452 imap
.br_blockcount
= imap
.br_startoff
- offset_fsb
;
453 imap
.br_startoff
= offset_fsb
;
454 imap
.br_startblock
= HOLESTARTBLOCK
;
455 imap
.br_state
= XFS_EXT_NORM
;
459 * Truncate to the next COW extent if there is one. This is the only
460 * opportunity to do this because we can skip COW fork lookups for the
461 * subsequent blocks in the mapping; however, the requirement to treat
462 * the COW range separately remains.
464 if (cow_fsb
!= NULLFILEOFF
&&
465 cow_fsb
< imap
.br_startoff
+ imap
.br_blockcount
)
466 imap
.br_blockcount
= cow_fsb
- imap
.br_startoff
;
468 /* got a delalloc extent? */
469 if (imap
.br_startblock
!= HOLESTARTBLOCK
&&
470 isnullstartblock(imap
.br_startblock
))
471 goto allocate_blocks
;
474 trace_xfs_map_blocks_found(ip
, offset
, count
, wpc
->fork
, &imap
);
477 error
= xfs_convert_blocks(wpc
, ip
, offset_fsb
);
480 * If we failed to find the extent in the COW fork we might have
481 * raced with a COW to data fork conversion or truncate.
482 * Restart the lookup to catch the extent in the data fork for
483 * the former case, but prevent additional retries to avoid
484 * looping forever for the latter case.
486 if (error
== -EAGAIN
&& wpc
->fork
== XFS_COW_FORK
&& !retries
++)
488 ASSERT(error
!= -EAGAIN
);
493 * Due to merging the return real extent might be larger than the
494 * original delalloc one. Trim the return extent to the next COW
495 * boundary again to force a re-lookup.
497 if (wpc
->fork
!= XFS_COW_FORK
&& cow_fsb
!= NULLFILEOFF
&&
498 cow_fsb
< wpc
->imap
.br_startoff
+ wpc
->imap
.br_blockcount
)
499 wpc
->imap
.br_blockcount
= cow_fsb
- wpc
->imap
.br_startoff
;
501 ASSERT(wpc
->imap
.br_startoff
<= offset_fsb
);
502 ASSERT(wpc
->imap
.br_startoff
+ wpc
->imap
.br_blockcount
> offset_fsb
);
503 trace_xfs_map_blocks_alloc(ip
, offset
, count
, wpc
->fork
, &imap
);
508 * Submit the bio for an ioend. We are passed an ioend with a bio attached to
509 * it, and we submit that bio. The ioend may be used for multiple bio
510 * submissions, so we only want to allocate an append transaction for the ioend
511 * once. In the case of multiple bio submission, each bio will take an IO
512 * reference to the ioend to ensure that the ioend completion is only done once
513 * all bios have been submitted and the ioend is really done.
515 * If @fail is non-zero, it means that we have a situation where some part of
516 * the submission process has failed after we have marked paged for writeback
517 * and unlocked them. In this situation, we need to fail the bio and ioend
518 * rather than submit it to IO. This typically only happens on a filesystem
523 struct writeback_control
*wbc
,
524 struct xfs_ioend
*ioend
,
527 /* Convert CoW extents to regular */
528 if (!status
&& ioend
->io_fork
== XFS_COW_FORK
) {
530 * Yuk. This can do memory allocation, but is not a
531 * transactional operation so everything is done in GFP_KERNEL
532 * context. That can deadlock, because we hold pages in
533 * writeback state and GFP_KERNEL allocations can block on them.
534 * Hence we must operate in nofs conditions here.
538 nofs_flag
= memalloc_nofs_save();
539 status
= xfs_reflink_convert_cow(XFS_I(ioend
->io_inode
),
540 ioend
->io_offset
, ioend
->io_size
);
541 memalloc_nofs_restore(nofs_flag
);
544 /* Reserve log space if we might write beyond the on-disk inode size. */
546 (ioend
->io_fork
== XFS_COW_FORK
||
547 ioend
->io_state
!= XFS_EXT_UNWRITTEN
) &&
548 xfs_ioend_is_append(ioend
) &&
549 !ioend
->io_append_trans
)
550 status
= xfs_setfilesize_trans_alloc(ioend
);
552 ioend
->io_bio
->bi_private
= ioend
;
553 ioend
->io_bio
->bi_end_io
= xfs_end_bio
;
554 ioend
->io_bio
->bi_opf
= REQ_OP_WRITE
| wbc_to_write_flags(wbc
);
557 * If we are failing the IO now, just mark the ioend with an
558 * error and finish it. This will run IO completion immediately
559 * as there is only one reference to the ioend at this point in
563 ioend
->io_bio
->bi_status
= errno_to_blk_status(status
);
564 bio_endio(ioend
->io_bio
);
568 ioend
->io_bio
->bi_write_hint
= ioend
->io_inode
->i_write_hint
;
569 submit_bio(ioend
->io_bio
);
573 static struct xfs_ioend
*
579 struct block_device
*bdev
,
582 struct xfs_ioend
*ioend
;
585 bio
= bio_alloc_bioset(GFP_NOFS
, BIO_MAX_PAGES
, &xfs_ioend_bioset
);
586 bio_set_dev(bio
, bdev
);
587 bio
->bi_iter
.bi_sector
= sector
;
589 ioend
= container_of(bio
, struct xfs_ioend
, io_inline_bio
);
590 INIT_LIST_HEAD(&ioend
->io_list
);
591 ioend
->io_fork
= fork
;
592 ioend
->io_state
= state
;
593 ioend
->io_inode
= inode
;
595 ioend
->io_offset
= offset
;
596 INIT_WORK(&ioend
->io_work
, xfs_end_io
);
597 ioend
->io_append_trans
= NULL
;
603 * Allocate a new bio, and chain the old bio to the new one.
605 * Note that we have to do perform the chaining in this unintuitive order
606 * so that the bi_private linkage is set up in the right direction for the
607 * traversal in xfs_destroy_ioend().
611 struct xfs_ioend
*ioend
,
612 struct writeback_control
*wbc
,
613 struct block_device
*bdev
,
618 new = bio_alloc(GFP_NOFS
, BIO_MAX_PAGES
);
619 bio_set_dev(new, bdev
);
620 new->bi_iter
.bi_sector
= sector
;
621 bio_chain(ioend
->io_bio
, new);
622 bio_get(ioend
->io_bio
); /* for xfs_destroy_ioend */
623 ioend
->io_bio
->bi_opf
= REQ_OP_WRITE
| wbc_to_write_flags(wbc
);
624 ioend
->io_bio
->bi_write_hint
= ioend
->io_inode
->i_write_hint
;
625 submit_bio(ioend
->io_bio
);
630 * Test to see if we have an existing ioend structure that we could append to
631 * first, otherwise finish off the current ioend and start another.
638 struct iomap_page
*iop
,
639 struct xfs_writepage_ctx
*wpc
,
640 struct writeback_control
*wbc
,
641 struct list_head
*iolist
)
643 struct xfs_inode
*ip
= XFS_I(inode
);
644 struct xfs_mount
*mp
= ip
->i_mount
;
645 struct block_device
*bdev
= xfs_find_bdev_for_inode(inode
);
646 unsigned len
= i_blocksize(inode
);
647 unsigned poff
= offset
& (PAGE_SIZE
- 1);
650 sector
= xfs_fsb_to_db(ip
, wpc
->imap
.br_startblock
) +
651 ((offset
- XFS_FSB_TO_B(mp
, wpc
->imap
.br_startoff
)) >> 9);
654 wpc
->fork
!= wpc
->ioend
->io_fork
||
655 wpc
->imap
.br_state
!= wpc
->ioend
->io_state
||
656 sector
!= bio_end_sector(wpc
->ioend
->io_bio
) ||
657 offset
!= wpc
->ioend
->io_offset
+ wpc
->ioend
->io_size
) {
659 list_add(&wpc
->ioend
->io_list
, iolist
);
660 wpc
->ioend
= xfs_alloc_ioend(inode
, wpc
->fork
,
661 wpc
->imap
.br_state
, offset
, bdev
, sector
);
664 if (!__bio_try_merge_page(wpc
->ioend
->io_bio
, page
, len
, poff
, true)) {
666 atomic_inc(&iop
->write_count
);
667 if (bio_full(wpc
->ioend
->io_bio
))
668 xfs_chain_bio(wpc
->ioend
, wbc
, bdev
, sector
);
669 bio_add_page(wpc
->ioend
->io_bio
, page
, len
, poff
);
672 wpc
->ioend
->io_size
+= len
;
676 xfs_vm_invalidatepage(
681 trace_xfs_invalidatepage(page
->mapping
->host
, page
, offset
, length
);
682 iomap_invalidatepage(page
, offset
, length
);
686 * If the page has delalloc blocks on it, we need to punch them out before we
687 * invalidate the page. If we don't, we leave a stale delalloc mapping on the
688 * inode that can trip up a later direct I/O read operation on the same region.
690 * We prevent this by truncating away the delalloc regions on the page. Because
691 * they are delalloc, we can do this without needing a transaction. Indeed - if
692 * we get ENOSPC errors, we have to be able to do this truncation without a
693 * transaction as there is no space left for block reservation (typically why we
694 * see a ENOSPC in writeback).
697 xfs_aops_discard_page(
700 struct inode
*inode
= page
->mapping
->host
;
701 struct xfs_inode
*ip
= XFS_I(inode
);
702 struct xfs_mount
*mp
= ip
->i_mount
;
703 loff_t offset
= page_offset(page
);
704 xfs_fileoff_t start_fsb
= XFS_B_TO_FSBT(mp
, offset
);
707 if (XFS_FORCED_SHUTDOWN(mp
))
711 "page discard on page "PTR_FMT
", inode 0x%llx, offset %llu.",
712 page
, ip
->i_ino
, offset
);
714 error
= xfs_bmap_punch_delalloc_range(ip
, start_fsb
,
715 PAGE_SIZE
/ i_blocksize(inode
));
716 if (error
&& !XFS_FORCED_SHUTDOWN(mp
))
717 xfs_alert(mp
, "page discard unable to remove delalloc mapping.");
719 xfs_vm_invalidatepage(page
, 0, PAGE_SIZE
);
723 * We implement an immediate ioend submission policy here to avoid needing to
724 * chain multiple ioends and hence nest mempool allocations which can violate
725 * forward progress guarantees we need to provide. The current ioend we are
726 * adding blocks to is cached on the writepage context, and if the new block
727 * does not append to the cached ioend it will create a new ioend and cache that
730 * If a new ioend is created and cached, the old ioend is returned and queued
731 * locally for submission once the entire page is processed or an error has been
732 * detected. While ioends are submitted immediately after they are completed,
733 * batching optimisations are provided by higher level block plugging.
735 * At the end of a writeback pass, there will be a cached ioend remaining on the
736 * writepage context that the caller will need to submit.
740 struct xfs_writepage_ctx
*wpc
,
741 struct writeback_control
*wbc
,
746 LIST_HEAD(submit_list
);
747 struct iomap_page
*iop
= to_iomap_page(page
);
748 unsigned len
= i_blocksize(inode
);
749 struct xfs_ioend
*ioend
, *next
;
750 uint64_t file_offset
; /* file offset of page */
751 int error
= 0, count
= 0, i
;
753 ASSERT(iop
|| i_blocksize(inode
) == PAGE_SIZE
);
754 ASSERT(!iop
|| atomic_read(&iop
->write_count
) == 0);
757 * Walk through the page to find areas to write back. If we run off the
758 * end of the current map or find the current map invalid, grab a new
761 for (i
= 0, file_offset
= page_offset(page
);
762 i
< (PAGE_SIZE
>> inode
->i_blkbits
) && file_offset
< end_offset
;
763 i
++, file_offset
+= len
) {
764 if (iop
&& !test_bit(i
, iop
->uptodate
))
767 error
= xfs_map_blocks(wpc
, inode
, file_offset
);
770 if (wpc
->imap
.br_startblock
== HOLESTARTBLOCK
)
772 xfs_add_to_ioend(inode
, file_offset
, page
, iop
, wpc
, wbc
,
777 ASSERT(wpc
->ioend
|| list_empty(&submit_list
));
778 ASSERT(PageLocked(page
));
779 ASSERT(!PageWriteback(page
));
782 * On error, we have to fail the ioend here because we may have set
783 * pages under writeback, we have to make sure we run IO completion to
784 * mark the error state of the IO appropriately, so we can't cancel the
785 * ioend directly here. That means we have to mark this page as under
786 * writeback if we included any blocks from it in the ioend chain so
787 * that completion treats it correctly.
789 * If we didn't include the page in the ioend, the on error we can
790 * simply discard and unlock it as there are no other users of the page
791 * now. The caller will still need to trigger submission of outstanding
792 * ioends on the writepage context so they are treated correctly on
795 if (unlikely(error
)) {
797 xfs_aops_discard_page(page
);
798 ClearPageUptodate(page
);
804 * If the page was not fully cleaned, we need to ensure that the
805 * higher layers come back to it correctly. That means we need
806 * to keep the page dirty, and for WB_SYNC_ALL writeback we need
807 * to ensure the PAGECACHE_TAG_TOWRITE index mark is not removed
808 * so another attempt to write this page in this writeback sweep
811 set_page_writeback_keepwrite(page
);
813 clear_page_dirty_for_io(page
);
814 set_page_writeback(page
);
820 * Preserve the original error if there was one, otherwise catch
821 * submission errors here and propagate into subsequent ioend
824 list_for_each_entry_safe(ioend
, next
, &submit_list
, io_list
) {
827 list_del_init(&ioend
->io_list
);
828 error2
= xfs_submit_ioend(wbc
, ioend
, error
);
829 if (error2
&& !error
)
834 * We can end up here with no error and nothing to write only if we race
835 * with a partial page truncate on a sub-page block sized filesystem.
838 end_page_writeback(page
);
840 mapping_set_error(page
->mapping
, error
);
845 * Write out a dirty page.
847 * For delalloc space on the page we need to allocate space and flush it.
848 * For unwritten space on the page we need to start the conversion to
849 * regular allocated space.
854 struct writeback_control
*wbc
,
857 struct xfs_writepage_ctx
*wpc
= data
;
858 struct inode
*inode
= page
->mapping
->host
;
863 trace_xfs_writepage(inode
, page
, 0, 0);
866 * Refuse to write the page out if we are called from reclaim context.
868 * This avoids stack overflows when called from deeply used stacks in
869 * random callers for direct reclaim or memcg reclaim. We explicitly
870 * allow reclaim from kswapd as the stack usage there is relatively low.
872 * This should never happen except in the case of a VM regression so
875 if (WARN_ON_ONCE((current
->flags
& (PF_MEMALLOC
|PF_KSWAPD
)) ==
880 * Given that we do not allow direct reclaim to call us, we should
881 * never be called while in a filesystem transaction.
883 if (WARN_ON_ONCE(current
->flags
& PF_MEMALLOC_NOFS
))
887 * Is this page beyond the end of the file?
889 * The page index is less than the end_index, adjust the end_offset
890 * to the highest offset that this page should represent.
891 * -----------------------------------------------------
892 * | file mapping | <EOF> |
893 * -----------------------------------------------------
894 * | Page ... | Page N-2 | Page N-1 | Page N | |
895 * ^--------------------------------^----------|--------
896 * | desired writeback range | see else |
897 * ---------------------------------^------------------|
899 offset
= i_size_read(inode
);
900 end_index
= offset
>> PAGE_SHIFT
;
901 if (page
->index
< end_index
)
902 end_offset
= (xfs_off_t
)(page
->index
+ 1) << PAGE_SHIFT
;
905 * Check whether the page to write out is beyond or straddles
907 * -------------------------------------------------------
908 * | file mapping | <EOF> |
909 * -------------------------------------------------------
910 * | Page ... | Page N-2 | Page N-1 | Page N | Beyond |
911 * ^--------------------------------^-----------|---------
913 * ---------------------------------^-----------|--------|
915 unsigned offset_into_page
= offset
& (PAGE_SIZE
- 1);
918 * Skip the page if it is fully outside i_size, e.g. due to a
919 * truncate operation that is in progress. We must redirty the
920 * page so that reclaim stops reclaiming it. Otherwise
921 * xfs_vm_releasepage() is called on it and gets confused.
923 * Note that the end_index is unsigned long, it would overflow
924 * if the given offset is greater than 16TB on 32-bit system
925 * and if we do check the page is fully outside i_size or not
926 * via "if (page->index >= end_index + 1)" as "end_index + 1"
927 * will be evaluated to 0. Hence this page will be redirtied
928 * and be written out repeatedly which would result in an
929 * infinite loop, the user program that perform this operation
930 * will hang. Instead, we can verify this situation by checking
931 * if the page to write is totally beyond the i_size or if it's
932 * offset is just equal to the EOF.
934 if (page
->index
> end_index
||
935 (page
->index
== end_index
&& offset_into_page
== 0))
939 * The page straddles i_size. It must be zeroed out on each
940 * and every writepage invocation because it may be mmapped.
941 * "A file is mapped in multiples of the page size. For a file
942 * that is not a multiple of the page size, the remaining
943 * memory is zeroed when mapped, and writes to that region are
944 * not written out to the file."
946 zero_user_segment(page
, offset_into_page
, PAGE_SIZE
);
948 /* Adjust the end_offset to the end of file */
952 return xfs_writepage_map(wpc
, wbc
, inode
, page
, end_offset
);
955 redirty_page_for_writepage(wbc
, page
);
963 struct writeback_control
*wbc
)
965 struct xfs_writepage_ctx wpc
= { };
968 ret
= xfs_do_writepage(page
, wbc
, &wpc
);
970 ret
= xfs_submit_ioend(wbc
, wpc
.ioend
, ret
);
976 struct address_space
*mapping
,
977 struct writeback_control
*wbc
)
979 struct xfs_writepage_ctx wpc
= { };
982 xfs_iflags_clear(XFS_I(mapping
->host
), XFS_ITRUNCATED
);
983 ret
= write_cache_pages(mapping
, wbc
, xfs_do_writepage
, &wpc
);
985 ret
= xfs_submit_ioend(wbc
, wpc
.ioend
, ret
);
991 struct address_space
*mapping
,
992 struct writeback_control
*wbc
)
994 xfs_iflags_clear(XFS_I(mapping
->host
), XFS_ITRUNCATED
);
995 return dax_writeback_mapping_range(mapping
,
996 xfs_find_bdev_for_inode(mapping
->host
), wbc
);
1004 trace_xfs_releasepage(page
->mapping
->host
, page
, 0, 0);
1005 return iomap_releasepage(page
, gfp_mask
);
1010 struct address_space
*mapping
,
1013 struct xfs_inode
*ip
= XFS_I(mapping
->host
);
1015 trace_xfs_vm_bmap(ip
);
1018 * The swap code (ab-)uses ->bmap to get a block mapping and then
1019 * bypasses the file system for actual I/O. We really can't allow
1020 * that on reflinks inodes, so we have to skip out here. And yes,
1021 * 0 is the magic code for a bmap error.
1023 * Since we don't pass back blockdev info, we can't return bmap
1024 * information for rt files either.
1026 if (xfs_is_cow_inode(ip
) || XFS_IS_REALTIME_INODE(ip
))
1028 return iomap_bmap(mapping
, block
, &xfs_iomap_ops
);
1033 struct file
*unused
,
1036 trace_xfs_vm_readpage(page
->mapping
->host
, 1);
1037 return iomap_readpage(page
, &xfs_iomap_ops
);
1042 struct file
*unused
,
1043 struct address_space
*mapping
,
1044 struct list_head
*pages
,
1047 trace_xfs_vm_readpages(mapping
->host
, nr_pages
);
1048 return iomap_readpages(mapping
, pages
, nr_pages
, &xfs_iomap_ops
);
1052 xfs_iomap_swapfile_activate(
1053 struct swap_info_struct
*sis
,
1054 struct file
*swap_file
,
1057 sis
->bdev
= xfs_find_bdev_for_inode(file_inode(swap_file
));
1058 return iomap_swapfile_activate(sis
, swap_file
, span
, &xfs_iomap_ops
);
1061 const struct address_space_operations xfs_address_space_operations
= {
1062 .readpage
= xfs_vm_readpage
,
1063 .readpages
= xfs_vm_readpages
,
1064 .writepage
= xfs_vm_writepage
,
1065 .writepages
= xfs_vm_writepages
,
1066 .set_page_dirty
= iomap_set_page_dirty
,
1067 .releasepage
= xfs_vm_releasepage
,
1068 .invalidatepage
= xfs_vm_invalidatepage
,
1069 .bmap
= xfs_vm_bmap
,
1070 .direct_IO
= noop_direct_IO
,
1071 .migratepage
= iomap_migrate_page
,
1072 .is_partially_uptodate
= iomap_is_partially_uptodate
,
1073 .error_remove_page
= generic_error_remove_page
,
1074 .swap_activate
= xfs_iomap_swapfile_activate
,
1077 const struct address_space_operations xfs_dax_aops
= {
1078 .writepages
= xfs_dax_writepages
,
1079 .direct_IO
= noop_direct_IO
,
1080 .set_page_dirty
= noop_set_page_dirty
,
1081 .invalidatepage
= noop_invalidatepage
,
1082 .swap_activate
= xfs_iomap_swapfile_activate
,