2 * Copyright (c) 2000-2005 Silicon Graphics, Inc.
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License as
7 * published by the Free Software Foundation.
9 * This program is distributed in the hope that it would be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write the Free Software Foundation,
16 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
19 #include "xfs_shared.h"
20 #include "xfs_format.h"
21 #include "xfs_log_format.h"
22 #include "xfs_trans_resv.h"
23 #include "xfs_mount.h"
24 #include "xfs_inode.h"
25 #include "xfs_trans.h"
26 #include "xfs_inode_item.h"
27 #include "xfs_alloc.h"
28 #include "xfs_error.h"
29 #include "xfs_iomap.h"
30 #include "xfs_trace.h"
32 #include "xfs_bmap_util.h"
33 #include "xfs_bmap_btree.h"
34 #include "xfs_reflink.h"
35 #include <linux/gfp.h>
36 #include <linux/mpage.h>
37 #include <linux/pagevec.h>
38 #include <linux/writeback.h>
41 * structure owned by writepages passed to individual writepage calls
43 struct xfs_writepage_ctx
{
44 struct xfs_bmbt_irec imap
;
47 struct xfs_ioend
*ioend
;
57 struct buffer_head
*bh
, *head
;
59 *delalloc
= *unwritten
= 0;
61 bh
= head
= page_buffers(page
);
63 if (buffer_unwritten(bh
))
65 else if (buffer_delay(bh
))
67 } while ((bh
= bh
->b_this_page
) != head
);
71 xfs_find_bdev_for_inode(
74 struct xfs_inode
*ip
= XFS_I(inode
);
75 struct xfs_mount
*mp
= ip
->i_mount
;
77 if (XFS_IS_REALTIME_INODE(ip
))
78 return mp
->m_rtdev_targp
->bt_bdev
;
80 return mp
->m_ddev_targp
->bt_bdev
;
84 xfs_find_daxdev_for_inode(
87 struct xfs_inode
*ip
= XFS_I(inode
);
88 struct xfs_mount
*mp
= ip
->i_mount
;
90 if (XFS_IS_REALTIME_INODE(ip
))
91 return mp
->m_rtdev_targp
->bt_daxdev
;
93 return mp
->m_ddev_targp
->bt_daxdev
;
97 * We're now finished for good with this page. Update the page state via the
98 * associated buffer_heads, paying attention to the start and end offsets that
99 * we need to process on the page.
101 * Note that we open code the action in end_buffer_async_write here so that we
102 * only have to iterate over the buffers attached to the page once. This is not
103 * only more efficient, but also ensures that we only calls end_page_writeback
104 * at the end of the iteration, and thus avoids the pitfall of having the page
105 * and buffers potentially freed after every call to end_buffer_async_write.
108 xfs_finish_page_writeback(
110 struct bio_vec
*bvec
,
113 struct buffer_head
*head
= page_buffers(bvec
->bv_page
), *bh
= head
;
115 unsigned int off
= 0;
118 ASSERT(bvec
->bv_offset
< PAGE_SIZE
);
119 ASSERT((bvec
->bv_offset
& (i_blocksize(inode
) - 1)) == 0);
120 ASSERT(bvec
->bv_offset
+ bvec
->bv_len
<= PAGE_SIZE
);
121 ASSERT((bvec
->bv_len
& (i_blocksize(inode
) - 1)) == 0);
123 local_irq_save(flags
);
124 bit_spin_lock(BH_Uptodate_Lock
, &head
->b_state
);
126 if (off
>= bvec
->bv_offset
&&
127 off
< bvec
->bv_offset
+ bvec
->bv_len
) {
128 ASSERT(buffer_async_write(bh
));
129 ASSERT(bh
->b_end_io
== NULL
);
132 mark_buffer_write_io_error(bh
);
133 clear_buffer_uptodate(bh
);
134 SetPageError(bvec
->bv_page
);
136 set_buffer_uptodate(bh
);
138 clear_buffer_async_write(bh
);
140 } else if (buffer_async_write(bh
)) {
141 ASSERT(buffer_locked(bh
));
145 } while ((bh
= bh
->b_this_page
) != head
);
146 bit_spin_unlock(BH_Uptodate_Lock
, &head
->b_state
);
147 local_irq_restore(flags
);
150 end_page_writeback(bvec
->bv_page
);
154 * We're now finished for good with this ioend structure. Update the page
155 * state, release holds on bios, and finally free up memory. Do not use the
160 struct xfs_ioend
*ioend
,
163 struct inode
*inode
= ioend
->io_inode
;
164 struct bio
*bio
= &ioend
->io_inline_bio
;
165 struct bio
*last
= ioend
->io_bio
, *next
;
166 u64 start
= bio
->bi_iter
.bi_sector
;
167 bool quiet
= bio_flagged(bio
, BIO_QUIET
);
169 for (bio
= &ioend
->io_inline_bio
; bio
; bio
= next
) {
170 struct bio_vec
*bvec
;
174 * For the last bio, bi_private points to the ioend, so we
175 * need to explicitly end the iteration here.
180 next
= bio
->bi_private
;
182 /* walk each page on bio, ending page IO on them */
183 bio_for_each_segment_all(bvec
, bio
, i
)
184 xfs_finish_page_writeback(inode
, bvec
, error
);
189 if (unlikely(error
&& !quiet
)) {
190 xfs_err_ratelimited(XFS_I(inode
)->i_mount
,
191 "writeback error on sector %llu", start
);
196 * Fast and loose check if this write could update the on-disk inode size.
198 static inline bool xfs_ioend_is_append(struct xfs_ioend
*ioend
)
200 return ioend
->io_offset
+ ioend
->io_size
>
201 XFS_I(ioend
->io_inode
)->i_d
.di_size
;
205 xfs_setfilesize_trans_alloc(
206 struct xfs_ioend
*ioend
)
208 struct xfs_mount
*mp
= XFS_I(ioend
->io_inode
)->i_mount
;
209 struct xfs_trans
*tp
;
212 error
= xfs_trans_alloc(mp
, &M_RES(mp
)->tr_fsyncts
, 0, 0, 0, &tp
);
216 ioend
->io_append_trans
= tp
;
219 * We may pass freeze protection with a transaction. So tell lockdep
222 __sb_writers_release(ioend
->io_inode
->i_sb
, SB_FREEZE_FS
);
224 * We hand off the transaction to the completion thread now, so
225 * clear the flag here.
227 current_restore_flags_nested(&tp
->t_pflags
, PF_MEMALLOC_NOFS
);
232 * Update on-disk file size now that data has been written to disk.
236 struct xfs_inode
*ip
,
237 struct xfs_trans
*tp
,
243 xfs_ilock(ip
, XFS_ILOCK_EXCL
);
244 isize
= xfs_new_eof(ip
, offset
+ size
);
246 xfs_iunlock(ip
, XFS_ILOCK_EXCL
);
247 xfs_trans_cancel(tp
);
251 trace_xfs_setfilesize(ip
, offset
, size
);
253 ip
->i_d
.di_size
= isize
;
254 xfs_trans_ijoin(tp
, ip
, XFS_ILOCK_EXCL
);
255 xfs_trans_log_inode(tp
, ip
, XFS_ILOG_CORE
);
257 return xfs_trans_commit(tp
);
262 struct xfs_inode
*ip
,
266 struct xfs_mount
*mp
= ip
->i_mount
;
267 struct xfs_trans
*tp
;
270 error
= xfs_trans_alloc(mp
, &M_RES(mp
)->tr_fsyncts
, 0, 0, 0, &tp
);
274 return __xfs_setfilesize(ip
, tp
, offset
, size
);
278 xfs_setfilesize_ioend(
279 struct xfs_ioend
*ioend
,
282 struct xfs_inode
*ip
= XFS_I(ioend
->io_inode
);
283 struct xfs_trans
*tp
= ioend
->io_append_trans
;
286 * The transaction may have been allocated in the I/O submission thread,
287 * thus we need to mark ourselves as being in a transaction manually.
288 * Similarly for freeze protection.
290 current_set_flags_nested(&tp
->t_pflags
, PF_MEMALLOC_NOFS
);
291 __sb_writers_acquired(VFS_I(ip
)->i_sb
, SB_FREEZE_FS
);
293 /* we abort the update if there was an IO error */
295 xfs_trans_cancel(tp
);
299 return __xfs_setfilesize(ip
, tp
, ioend
->io_offset
, ioend
->io_size
);
303 * IO write completion.
307 struct work_struct
*work
)
309 struct xfs_ioend
*ioend
=
310 container_of(work
, struct xfs_ioend
, io_work
);
311 struct xfs_inode
*ip
= XFS_I(ioend
->io_inode
);
312 xfs_off_t offset
= ioend
->io_offset
;
313 size_t size
= ioend
->io_size
;
317 * Just clean up the in-memory strutures if the fs has been shut down.
319 if (XFS_FORCED_SHUTDOWN(ip
->i_mount
)) {
325 * Clean up any COW blocks on an I/O error.
327 error
= blk_status_to_errno(ioend
->io_bio
->bi_status
);
328 if (unlikely(error
)) {
329 switch (ioend
->io_type
) {
331 xfs_reflink_cancel_cow_range(ip
, offset
, size
, true);
339 * Success: commit the COW or unwritten blocks if needed.
341 switch (ioend
->io_type
) {
343 error
= xfs_reflink_end_cow(ip
, offset
, size
);
345 case XFS_IO_UNWRITTEN
:
346 /* writeback should never update isize */
347 error
= xfs_iomap_write_unwritten(ip
, offset
, size
, false);
350 ASSERT(!xfs_ioend_is_append(ioend
) || ioend
->io_append_trans
);
355 if (ioend
->io_append_trans
)
356 error
= xfs_setfilesize_ioend(ioend
, error
);
357 xfs_destroy_ioend(ioend
, error
);
364 struct xfs_ioend
*ioend
= bio
->bi_private
;
365 struct xfs_mount
*mp
= XFS_I(ioend
->io_inode
)->i_mount
;
367 if (ioend
->io_type
== XFS_IO_UNWRITTEN
|| ioend
->io_type
== XFS_IO_COW
)
368 queue_work(mp
->m_unwritten_workqueue
, &ioend
->io_work
);
369 else if (ioend
->io_append_trans
)
370 queue_work(mp
->m_data_workqueue
, &ioend
->io_work
);
372 xfs_destroy_ioend(ioend
, blk_status_to_errno(bio
->bi_status
));
379 struct xfs_bmbt_irec
*imap
,
382 struct xfs_inode
*ip
= XFS_I(inode
);
383 struct xfs_mount
*mp
= ip
->i_mount
;
384 ssize_t count
= i_blocksize(inode
);
385 xfs_fileoff_t offset_fsb
, end_fsb
;
387 int bmapi_flags
= XFS_BMAPI_ENTIRE
;
390 if (XFS_FORCED_SHUTDOWN(mp
))
393 ASSERT(type
!= XFS_IO_COW
);
394 if (type
== XFS_IO_UNWRITTEN
)
395 bmapi_flags
|= XFS_BMAPI_IGSTATE
;
397 xfs_ilock(ip
, XFS_ILOCK_SHARED
);
398 ASSERT(ip
->i_d
.di_format
!= XFS_DINODE_FMT_BTREE
||
399 (ip
->i_df
.if_flags
& XFS_IFEXTENTS
));
400 ASSERT(offset
<= mp
->m_super
->s_maxbytes
);
402 if ((xfs_ufsize_t
)offset
+ count
> mp
->m_super
->s_maxbytes
)
403 count
= mp
->m_super
->s_maxbytes
- offset
;
404 end_fsb
= XFS_B_TO_FSB(mp
, (xfs_ufsize_t
)offset
+ count
);
405 offset_fsb
= XFS_B_TO_FSBT(mp
, offset
);
406 error
= xfs_bmapi_read(ip
, offset_fsb
, end_fsb
- offset_fsb
,
407 imap
, &nimaps
, bmapi_flags
);
409 * Truncate an overwrite extent if there's a pending CoW
410 * reservation before the end of this extent. This forces us
411 * to come back to writepage to take care of the CoW.
413 if (nimaps
&& type
== XFS_IO_OVERWRITE
)
414 xfs_reflink_trim_irec_to_next_cow(ip
, offset_fsb
, imap
);
415 xfs_iunlock(ip
, XFS_ILOCK_SHARED
);
420 if (type
== XFS_IO_DELALLOC
&&
421 (!nimaps
|| isnullstartblock(imap
->br_startblock
))) {
422 error
= xfs_iomap_write_allocate(ip
, XFS_DATA_FORK
, offset
,
425 trace_xfs_map_blocks_alloc(ip
, offset
, count
, type
, imap
);
430 if (type
== XFS_IO_UNWRITTEN
) {
432 ASSERT(imap
->br_startblock
!= HOLESTARTBLOCK
);
433 ASSERT(imap
->br_startblock
!= DELAYSTARTBLOCK
);
437 trace_xfs_map_blocks_found(ip
, offset
, count
, type
, imap
);
444 struct xfs_bmbt_irec
*imap
,
447 offset
>>= inode
->i_blkbits
;
450 * We have to make sure the cached mapping is within EOF to protect
451 * against eofblocks trimming on file release leaving us with a stale
452 * mapping. Otherwise, a page for a subsequent file extending buffered
453 * write could get picked up by this writeback cycle and written to the
456 * Note that what we really want here is a generic mapping invalidation
457 * mechanism to protect us from arbitrary extent modifying contexts, not
460 xfs_trim_extent_eof(imap
, XFS_I(inode
));
462 return offset
>= imap
->br_startoff
&&
463 offset
< imap
->br_startoff
+ imap
->br_blockcount
;
467 xfs_start_buffer_writeback(
468 struct buffer_head
*bh
)
470 ASSERT(buffer_mapped(bh
));
471 ASSERT(buffer_locked(bh
));
472 ASSERT(!buffer_delay(bh
));
473 ASSERT(!buffer_unwritten(bh
));
476 set_buffer_async_write(bh
);
477 set_buffer_uptodate(bh
);
478 clear_buffer_dirty(bh
);
482 xfs_start_page_writeback(
486 ASSERT(PageLocked(page
));
487 ASSERT(!PageWriteback(page
));
490 * if the page was not fully cleaned, we need to ensure that the higher
491 * layers come back to it correctly. That means we need to keep the page
492 * dirty, and for WB_SYNC_ALL writeback we need to ensure the
493 * PAGECACHE_TAG_TOWRITE index mark is not removed so another attempt to
494 * write this page in this writeback sweep will be made.
497 clear_page_dirty_for_io(page
);
498 set_page_writeback(page
);
500 set_page_writeback_keepwrite(page
);
505 static inline int xfs_bio_add_buffer(struct bio
*bio
, struct buffer_head
*bh
)
507 return bio_add_page(bio
, bh
->b_page
, bh
->b_size
, bh_offset(bh
));
511 * Submit the bio for an ioend. We are passed an ioend with a bio attached to
512 * it, and we submit that bio. The ioend may be used for multiple bio
513 * submissions, so we only want to allocate an append transaction for the ioend
514 * once. In the case of multiple bio submission, each bio will take an IO
515 * reference to the ioend to ensure that the ioend completion is only done once
516 * all bios have been submitted and the ioend is really done.
518 * If @fail is non-zero, it means that we have a situation where some part of
519 * the submission process has failed after we have marked paged for writeback
520 * and unlocked them. In this situation, we need to fail the bio and ioend
521 * rather than submit it to IO. This typically only happens on a filesystem
526 struct writeback_control
*wbc
,
527 struct xfs_ioend
*ioend
,
530 /* Convert CoW extents to regular */
531 if (!status
&& ioend
->io_type
== XFS_IO_COW
) {
532 status
= xfs_reflink_convert_cow(XFS_I(ioend
->io_inode
),
533 ioend
->io_offset
, ioend
->io_size
);
536 /* Reserve log space if we might write beyond the on-disk inode size. */
538 ioend
->io_type
!= XFS_IO_UNWRITTEN
&&
539 xfs_ioend_is_append(ioend
) &&
540 !ioend
->io_append_trans
)
541 status
= xfs_setfilesize_trans_alloc(ioend
);
543 ioend
->io_bio
->bi_private
= ioend
;
544 ioend
->io_bio
->bi_end_io
= xfs_end_bio
;
545 ioend
->io_bio
->bi_opf
= REQ_OP_WRITE
| wbc_to_write_flags(wbc
);
548 * If we are failing the IO now, just mark the ioend with an
549 * error and finish it. This will run IO completion immediately
550 * as there is only one reference to the ioend at this point in
554 ioend
->io_bio
->bi_status
= errno_to_blk_status(status
);
555 bio_endio(ioend
->io_bio
);
559 ioend
->io_bio
->bi_write_hint
= ioend
->io_inode
->i_write_hint
;
560 submit_bio(ioend
->io_bio
);
565 xfs_init_bio_from_bh(
567 struct buffer_head
*bh
)
569 bio
->bi_iter
.bi_sector
= bh
->b_blocknr
* (bh
->b_size
>> 9);
570 bio_set_dev(bio
, bh
->b_bdev
);
573 static struct xfs_ioend
*
578 struct buffer_head
*bh
)
580 struct xfs_ioend
*ioend
;
583 bio
= bio_alloc_bioset(GFP_NOFS
, BIO_MAX_PAGES
, xfs_ioend_bioset
);
584 xfs_init_bio_from_bh(bio
, bh
);
586 ioend
= container_of(bio
, struct xfs_ioend
, io_inline_bio
);
587 INIT_LIST_HEAD(&ioend
->io_list
);
588 ioend
->io_type
= type
;
589 ioend
->io_inode
= inode
;
591 ioend
->io_offset
= offset
;
592 INIT_WORK(&ioend
->io_work
, xfs_end_io
);
593 ioend
->io_append_trans
= NULL
;
599 * Allocate a new bio, and chain the old bio to the new one.
601 * Note that we have to do perform the chaining in this unintuitive order
602 * so that the bi_private linkage is set up in the right direction for the
603 * traversal in xfs_destroy_ioend().
607 struct xfs_ioend
*ioend
,
608 struct writeback_control
*wbc
,
609 struct buffer_head
*bh
)
613 new = bio_alloc(GFP_NOFS
, BIO_MAX_PAGES
);
614 xfs_init_bio_from_bh(new, bh
);
616 bio_chain(ioend
->io_bio
, new);
617 bio_get(ioend
->io_bio
); /* for xfs_destroy_ioend */
618 ioend
->io_bio
->bi_opf
= REQ_OP_WRITE
| wbc_to_write_flags(wbc
);
619 ioend
->io_bio
->bi_write_hint
= ioend
->io_inode
->i_write_hint
;
620 submit_bio(ioend
->io_bio
);
625 * Test to see if we've been building up a completion structure for
626 * earlier buffers -- if so, we try to append to this ioend if we
627 * can, otherwise we finish off any current ioend and start another.
628 * Return the ioend we finished off so that the caller can submit it
629 * once it has finished processing the dirty page.
634 struct buffer_head
*bh
,
636 struct xfs_writepage_ctx
*wpc
,
637 struct writeback_control
*wbc
,
638 struct list_head
*iolist
)
640 if (!wpc
->ioend
|| wpc
->io_type
!= wpc
->ioend
->io_type
||
641 bh
->b_blocknr
!= wpc
->last_block
+ 1 ||
642 offset
!= wpc
->ioend
->io_offset
+ wpc
->ioend
->io_size
) {
644 list_add(&wpc
->ioend
->io_list
, iolist
);
645 wpc
->ioend
= xfs_alloc_ioend(inode
, wpc
->io_type
, offset
, bh
);
649 * If the buffer doesn't fit into the bio we need to allocate a new
650 * one. This shouldn't happen more than once for a given buffer.
652 while (xfs_bio_add_buffer(wpc
->ioend
->io_bio
, bh
) != bh
->b_size
)
653 xfs_chain_bio(wpc
->ioend
, wbc
, bh
);
655 wpc
->ioend
->io_size
+= bh
->b_size
;
656 wpc
->last_block
= bh
->b_blocknr
;
657 xfs_start_buffer_writeback(bh
);
663 struct buffer_head
*bh
,
664 struct xfs_bmbt_irec
*imap
,
668 struct xfs_mount
*m
= XFS_I(inode
)->i_mount
;
669 xfs_off_t iomap_offset
= XFS_FSB_TO_B(m
, imap
->br_startoff
);
670 xfs_daddr_t iomap_bn
= xfs_fsb_to_db(XFS_I(inode
), imap
->br_startblock
);
672 ASSERT(imap
->br_startblock
!= HOLESTARTBLOCK
);
673 ASSERT(imap
->br_startblock
!= DELAYSTARTBLOCK
);
675 bn
= (iomap_bn
>> (inode
->i_blkbits
- BBSHIFT
)) +
676 ((offset
- iomap_offset
) >> inode
->i_blkbits
);
678 ASSERT(bn
|| XFS_IS_REALTIME_INODE(XFS_I(inode
)));
681 set_buffer_mapped(bh
);
687 struct buffer_head
*bh
,
688 struct xfs_bmbt_irec
*imap
,
691 ASSERT(imap
->br_startblock
!= HOLESTARTBLOCK
);
692 ASSERT(imap
->br_startblock
!= DELAYSTARTBLOCK
);
694 xfs_map_buffer(inode
, bh
, imap
, offset
);
695 set_buffer_mapped(bh
);
696 clear_buffer_delay(bh
);
697 clear_buffer_unwritten(bh
);
701 * Test if a given page contains at least one buffer of a given @type.
702 * If @check_all_buffers is true, then we walk all the buffers in the page to
703 * try to find one of the type passed in. If it is not set, then the caller only
704 * needs to check the first buffer on the page for a match.
710 bool check_all_buffers
)
712 struct buffer_head
*bh
;
713 struct buffer_head
*head
;
715 if (PageWriteback(page
))
719 if (!page_has_buffers(page
))
722 bh
= head
= page_buffers(page
);
724 if (buffer_unwritten(bh
)) {
725 if (type
== XFS_IO_UNWRITTEN
)
727 } else if (buffer_delay(bh
)) {
728 if (type
== XFS_IO_DELALLOC
)
730 } else if (buffer_dirty(bh
) && buffer_mapped(bh
)) {
731 if (type
== XFS_IO_OVERWRITE
)
735 /* If we are only checking the first buffer, we are done now. */
736 if (!check_all_buffers
)
738 } while ((bh
= bh
->b_this_page
) != head
);
744 xfs_vm_invalidatepage(
749 trace_xfs_invalidatepage(page
->mapping
->host
, page
, offset
,
753 * If we are invalidating the entire page, clear the dirty state from it
754 * so that we can check for attempts to release dirty cached pages in
755 * xfs_vm_releasepage().
757 if (offset
== 0 && length
>= PAGE_SIZE
)
758 cancel_dirty_page(page
);
759 block_invalidatepage(page
, offset
, length
);
763 * If the page has delalloc buffers on it, we need to punch them out before we
764 * invalidate the page. If we don't, we leave a stale delalloc mapping on the
765 * inode that can trip a BUG() in xfs_get_blocks() later on if a direct IO read
766 * is done on that same region - the delalloc extent is returned when none is
767 * supposed to be there.
769 * We prevent this by truncating away the delalloc regions on the page before
770 * invalidating it. Because they are delalloc, we can do this without needing a
771 * transaction. Indeed - if we get ENOSPC errors, we have to be able to do this
772 * truncation without a transaction as there is no space left for block
773 * reservation (typically why we see a ENOSPC in writeback).
775 * This is not a performance critical path, so for now just do the punching a
776 * buffer head at a time.
779 xfs_aops_discard_page(
782 struct inode
*inode
= page
->mapping
->host
;
783 struct xfs_inode
*ip
= XFS_I(inode
);
784 struct buffer_head
*bh
, *head
;
785 loff_t offset
= page_offset(page
);
787 if (!xfs_check_page_type(page
, XFS_IO_DELALLOC
, true))
790 if (XFS_FORCED_SHUTDOWN(ip
->i_mount
))
793 xfs_alert(ip
->i_mount
,
794 "page discard on page %p, inode 0x%llx, offset %llu.",
795 page
, ip
->i_ino
, offset
);
797 xfs_ilock(ip
, XFS_ILOCK_EXCL
);
798 bh
= head
= page_buffers(page
);
801 xfs_fileoff_t start_fsb
;
803 if (!buffer_delay(bh
))
806 start_fsb
= XFS_B_TO_FSBT(ip
->i_mount
, offset
);
807 error
= xfs_bmap_punch_delalloc_range(ip
, start_fsb
, 1);
809 /* something screwed, just bail */
810 if (!XFS_FORCED_SHUTDOWN(ip
->i_mount
)) {
811 xfs_alert(ip
->i_mount
,
812 "page discard unable to remove delalloc mapping.");
817 offset
+= i_blocksize(inode
);
819 } while ((bh
= bh
->b_this_page
) != head
);
821 xfs_iunlock(ip
, XFS_ILOCK_EXCL
);
823 xfs_vm_invalidatepage(page
, 0, PAGE_SIZE
);
829 struct xfs_writepage_ctx
*wpc
,
832 unsigned int *new_type
)
834 struct xfs_inode
*ip
= XFS_I(inode
);
835 struct xfs_bmbt_irec imap
;
840 * If we already have a valid COW mapping keep using it.
842 if (wpc
->io_type
== XFS_IO_COW
) {
843 wpc
->imap_valid
= xfs_imap_valid(inode
, &wpc
->imap
, offset
);
844 if (wpc
->imap_valid
) {
845 *new_type
= XFS_IO_COW
;
851 * Else we need to check if there is a COW mapping at this offset.
853 xfs_ilock(ip
, XFS_ILOCK_SHARED
);
854 is_cow
= xfs_reflink_find_cow_mapping(ip
, offset
, &imap
);
855 xfs_iunlock(ip
, XFS_ILOCK_SHARED
);
861 * And if the COW mapping has a delayed extent here we need to
862 * allocate real space for it now.
864 if (isnullstartblock(imap
.br_startblock
)) {
865 error
= xfs_iomap_write_allocate(ip
, XFS_COW_FORK
, offset
,
871 wpc
->io_type
= *new_type
= XFS_IO_COW
;
872 wpc
->imap_valid
= true;
878 * We implement an immediate ioend submission policy here to avoid needing to
879 * chain multiple ioends and hence nest mempool allocations which can violate
880 * forward progress guarantees we need to provide. The current ioend we are
881 * adding buffers to is cached on the writepage context, and if the new buffer
882 * does not append to the cached ioend it will create a new ioend and cache that
885 * If a new ioend is created and cached, the old ioend is returned and queued
886 * locally for submission once the entire page is processed or an error has been
887 * detected. While ioends are submitted immediately after they are completed,
888 * batching optimisations are provided by higher level block plugging.
890 * At the end of a writeback pass, there will be a cached ioend remaining on the
891 * writepage context that the caller will need to submit.
895 struct xfs_writepage_ctx
*wpc
,
896 struct writeback_control
*wbc
,
902 LIST_HEAD(submit_list
);
903 struct xfs_ioend
*ioend
, *next
;
904 struct buffer_head
*bh
, *head
;
905 ssize_t len
= i_blocksize(inode
);
909 unsigned int new_type
;
911 bh
= head
= page_buffers(page
);
912 offset
= page_offset(page
);
914 if (offset
>= end_offset
)
916 if (!buffer_uptodate(bh
))
920 * set_page_dirty dirties all buffers in a page, independent
921 * of their state. The dirty state however is entirely
922 * meaningless for holes (!mapped && uptodate), so skip
923 * buffers covering holes here.
925 if (!buffer_mapped(bh
) && buffer_uptodate(bh
)) {
926 wpc
->imap_valid
= false;
930 if (buffer_unwritten(bh
))
931 new_type
= XFS_IO_UNWRITTEN
;
932 else if (buffer_delay(bh
))
933 new_type
= XFS_IO_DELALLOC
;
934 else if (buffer_uptodate(bh
))
935 new_type
= XFS_IO_OVERWRITE
;
937 if (PageUptodate(page
))
938 ASSERT(buffer_mapped(bh
));
940 * This buffer is not uptodate and will not be
941 * written to disk. Ensure that we will put any
942 * subsequent writeable buffers into a new
945 wpc
->imap_valid
= false;
949 if (xfs_is_reflink_inode(XFS_I(inode
))) {
950 error
= xfs_map_cow(wpc
, inode
, offset
, &new_type
);
955 if (wpc
->io_type
!= new_type
) {
956 wpc
->io_type
= new_type
;
957 wpc
->imap_valid
= false;
961 wpc
->imap_valid
= xfs_imap_valid(inode
, &wpc
->imap
,
963 if (!wpc
->imap_valid
) {
964 error
= xfs_map_blocks(inode
, offset
, &wpc
->imap
,
968 wpc
->imap_valid
= xfs_imap_valid(inode
, &wpc
->imap
,
971 if (wpc
->imap_valid
) {
973 if (wpc
->io_type
!= XFS_IO_OVERWRITE
)
974 xfs_map_at_offset(inode
, bh
, &wpc
->imap
, offset
);
975 xfs_add_to_ioend(inode
, bh
, offset
, wpc
, wbc
, &submit_list
);
979 } while (offset
+= len
, ((bh
= bh
->b_this_page
) != head
));
981 if (uptodate
&& bh
== head
)
982 SetPageUptodate(page
);
984 ASSERT(wpc
->ioend
|| list_empty(&submit_list
));
988 * On error, we have to fail the ioend here because we have locked
989 * buffers in the ioend. If we don't do this, we'll deadlock
990 * invalidating the page as that tries to lock the buffers on the page.
991 * Also, because we may have set pages under writeback, we have to make
992 * sure we run IO completion to mark the error state of the IO
993 * appropriately, so we can't cancel the ioend directly here. That means
994 * we have to mark this page as under writeback if we included any
995 * buffers from it in the ioend chain so that completion treats it
998 * If we didn't include the page in the ioend, the on error we can
999 * simply discard and unlock it as there are no other users of the page
1000 * or it's buffers right now. The caller will still need to trigger
1001 * submission of outstanding ioends on the writepage context so they are
1002 * treated correctly on error.
1005 xfs_start_page_writeback(page
, !error
);
1008 * Preserve the original error if there was one, otherwise catch
1009 * submission errors here and propagate into subsequent ioend
1012 list_for_each_entry_safe(ioend
, next
, &submit_list
, io_list
) {
1015 list_del_init(&ioend
->io_list
);
1016 error2
= xfs_submit_ioend(wbc
, ioend
, error
);
1017 if (error2
&& !error
)
1021 xfs_aops_discard_page(page
);
1022 ClearPageUptodate(page
);
1026 * We can end up here with no error and nothing to write if we
1027 * race with a partial page truncate on a sub-page block sized
1028 * filesystem. In that case we need to mark the page clean.
1030 xfs_start_page_writeback(page
, 1);
1031 end_page_writeback(page
);
1034 mapping_set_error(page
->mapping
, error
);
1039 * Write out a dirty page.
1041 * For delalloc space on the page we need to allocate space and flush it.
1042 * For unwritten space on the page we need to start the conversion to
1043 * regular allocated space.
1044 * For any other dirty buffer heads on the page we should flush them.
1049 struct writeback_control
*wbc
,
1052 struct xfs_writepage_ctx
*wpc
= data
;
1053 struct inode
*inode
= page
->mapping
->host
;
1055 uint64_t end_offset
;
1058 trace_xfs_writepage(inode
, page
, 0, 0);
1060 ASSERT(page_has_buffers(page
));
1063 * Refuse to write the page out if we are called from reclaim context.
1065 * This avoids stack overflows when called from deeply used stacks in
1066 * random callers for direct reclaim or memcg reclaim. We explicitly
1067 * allow reclaim from kswapd as the stack usage there is relatively low.
1069 * This should never happen except in the case of a VM regression so
1072 if (WARN_ON_ONCE((current
->flags
& (PF_MEMALLOC
|PF_KSWAPD
)) ==
1077 * Given that we do not allow direct reclaim to call us, we should
1078 * never be called while in a filesystem transaction.
1080 if (WARN_ON_ONCE(current
->flags
& PF_MEMALLOC_NOFS
))
1084 * Is this page beyond the end of the file?
1086 * The page index is less than the end_index, adjust the end_offset
1087 * to the highest offset that this page should represent.
1088 * -----------------------------------------------------
1089 * | file mapping | <EOF> |
1090 * -----------------------------------------------------
1091 * | Page ... | Page N-2 | Page N-1 | Page N | |
1092 * ^--------------------------------^----------|--------
1093 * | desired writeback range | see else |
1094 * ---------------------------------^------------------|
1096 offset
= i_size_read(inode
);
1097 end_index
= offset
>> PAGE_SHIFT
;
1098 if (page
->index
< end_index
)
1099 end_offset
= (xfs_off_t
)(page
->index
+ 1) << PAGE_SHIFT
;
1102 * Check whether the page to write out is beyond or straddles
1104 * -------------------------------------------------------
1105 * | file mapping | <EOF> |
1106 * -------------------------------------------------------
1107 * | Page ... | Page N-2 | Page N-1 | Page N | Beyond |
1108 * ^--------------------------------^-----------|---------
1110 * ---------------------------------^-----------|--------|
1112 unsigned offset_into_page
= offset
& (PAGE_SIZE
- 1);
1115 * Skip the page if it is fully outside i_size, e.g. due to a
1116 * truncate operation that is in progress. We must redirty the
1117 * page so that reclaim stops reclaiming it. Otherwise
1118 * xfs_vm_releasepage() is called on it and gets confused.
1120 * Note that the end_index is unsigned long, it would overflow
1121 * if the given offset is greater than 16TB on 32-bit system
1122 * and if we do check the page is fully outside i_size or not
1123 * via "if (page->index >= end_index + 1)" as "end_index + 1"
1124 * will be evaluated to 0. Hence this page will be redirtied
1125 * and be written out repeatedly which would result in an
1126 * infinite loop, the user program that perform this operation
1127 * will hang. Instead, we can verify this situation by checking
1128 * if the page to write is totally beyond the i_size or if it's
1129 * offset is just equal to the EOF.
1131 if (page
->index
> end_index
||
1132 (page
->index
== end_index
&& offset_into_page
== 0))
1136 * The page straddles i_size. It must be zeroed out on each
1137 * and every writepage invocation because it may be mmapped.
1138 * "A file is mapped in multiples of the page size. For a file
1139 * that is not a multiple of the page size, the remaining
1140 * memory is zeroed when mapped, and writes to that region are
1141 * not written out to the file."
1143 zero_user_segment(page
, offset_into_page
, PAGE_SIZE
);
1145 /* Adjust the end_offset to the end of file */
1146 end_offset
= offset
;
1149 return xfs_writepage_map(wpc
, wbc
, inode
, page
, offset
, end_offset
);
1152 redirty_page_for_writepage(wbc
, page
);
1160 struct writeback_control
*wbc
)
1162 struct xfs_writepage_ctx wpc
= {
1163 .io_type
= XFS_IO_INVALID
,
1167 ret
= xfs_do_writepage(page
, wbc
, &wpc
);
1169 ret
= xfs_submit_ioend(wbc
, wpc
.ioend
, ret
);
1175 struct address_space
*mapping
,
1176 struct writeback_control
*wbc
)
1178 struct xfs_writepage_ctx wpc
= {
1179 .io_type
= XFS_IO_INVALID
,
1183 xfs_iflags_clear(XFS_I(mapping
->host
), XFS_ITRUNCATED
);
1184 if (dax_mapping(mapping
))
1185 return dax_writeback_mapping_range(mapping
,
1186 xfs_find_bdev_for_inode(mapping
->host
), wbc
);
1188 ret
= write_cache_pages(mapping
, wbc
, xfs_do_writepage
, &wpc
);
1190 ret
= xfs_submit_ioend(wbc
, wpc
.ioend
, ret
);
1195 * Called to move a page into cleanable state - and from there
1196 * to be released. The page should already be clean. We always
1197 * have buffer heads in this call.
1199 * Returns 1 if the page is ok to release, 0 otherwise.
1206 int delalloc
, unwritten
;
1208 trace_xfs_releasepage(page
->mapping
->host
, page
, 0, 0);
1211 * mm accommodates an old ext3 case where clean pages might not have had
1212 * the dirty bit cleared. Thus, it can send actual dirty pages to
1213 * ->releasepage() via shrink_active_list(). Conversely,
1214 * block_invalidatepage() can send pages that are still marked dirty but
1215 * otherwise have invalidated buffers.
1217 * We want to release the latter to avoid unnecessary buildup of the
1218 * LRU, so xfs_vm_invalidatepage() clears the page dirty flag on pages
1219 * that are entirely invalidated and need to be released. Hence the
1220 * only time we should get dirty pages here is through
1221 * shrink_active_list() and so we can simply skip those now.
1223 * warn if we've left any lingering delalloc/unwritten buffers on clean
1224 * or invalidated pages we are about to release.
1226 if (PageDirty(page
))
1229 xfs_count_page_state(page
, &delalloc
, &unwritten
);
1231 if (WARN_ON_ONCE(delalloc
))
1233 if (WARN_ON_ONCE(unwritten
))
1236 return try_to_free_buffers(page
);
1240 * If this is O_DIRECT or the mpage code calling tell them how large the mapping
1241 * is, so that we can avoid repeated get_blocks calls.
1243 * If the mapping spans EOF, then we have to break the mapping up as the mapping
1244 * for blocks beyond EOF must be marked new so that sub block regions can be
1245 * correctly zeroed. We can't do this for mappings within EOF unless the mapping
1246 * was just allocated or is unwritten, otherwise the callers would overwrite
1247 * existing data with zeros. Hence we have to split the mapping into a range up
1248 * to and including EOF, and a second mapping for beyond EOF.
1252 struct inode
*inode
,
1254 struct buffer_head
*bh_result
,
1255 struct xfs_bmbt_irec
*imap
,
1259 xfs_off_t mapping_size
;
1261 mapping_size
= imap
->br_startoff
+ imap
->br_blockcount
- iblock
;
1262 mapping_size
<<= inode
->i_blkbits
;
1264 ASSERT(mapping_size
> 0);
1265 if (mapping_size
> size
)
1266 mapping_size
= size
;
1267 if (offset
< i_size_read(inode
) &&
1268 (xfs_ufsize_t
)offset
+ mapping_size
>= i_size_read(inode
)) {
1269 /* limit mapping to block that spans EOF */
1270 mapping_size
= roundup_64(i_size_read(inode
) - offset
,
1271 i_blocksize(inode
));
1273 if (mapping_size
> LONG_MAX
)
1274 mapping_size
= LONG_MAX
;
1276 bh_result
->b_size
= mapping_size
;
1281 struct inode
*inode
,
1283 struct buffer_head
*bh_result
,
1286 struct xfs_inode
*ip
= XFS_I(inode
);
1287 struct xfs_mount
*mp
= ip
->i_mount
;
1288 xfs_fileoff_t offset_fsb
, end_fsb
;
1291 struct xfs_bmbt_irec imap
;
1298 if (XFS_FORCED_SHUTDOWN(mp
))
1301 offset
= (xfs_off_t
)iblock
<< inode
->i_blkbits
;
1302 ASSERT(bh_result
->b_size
>= i_blocksize(inode
));
1303 size
= bh_result
->b_size
;
1305 if (offset
>= i_size_read(inode
))
1309 * Direct I/O is usually done on preallocated files, so try getting
1310 * a block mapping without an exclusive lock first.
1312 lockmode
= xfs_ilock_data_map_shared(ip
);
1314 ASSERT(offset
<= mp
->m_super
->s_maxbytes
);
1315 if ((xfs_ufsize_t
)offset
+ size
> mp
->m_super
->s_maxbytes
)
1316 size
= mp
->m_super
->s_maxbytes
- offset
;
1317 end_fsb
= XFS_B_TO_FSB(mp
, (xfs_ufsize_t
)offset
+ size
);
1318 offset_fsb
= XFS_B_TO_FSBT(mp
, offset
);
1320 error
= xfs_bmapi_read(ip
, offset_fsb
, end_fsb
- offset_fsb
,
1321 &imap
, &nimaps
, XFS_BMAPI_ENTIRE
);
1326 trace_xfs_get_blocks_found(ip
, offset
, size
,
1327 imap
.br_state
== XFS_EXT_UNWRITTEN
?
1328 XFS_IO_UNWRITTEN
: XFS_IO_OVERWRITE
, &imap
);
1329 xfs_iunlock(ip
, lockmode
);
1331 trace_xfs_get_blocks_notfound(ip
, offset
, size
);
1335 /* trim mapping down to size requested */
1336 xfs_map_trim_size(inode
, iblock
, bh_result
, &imap
, offset
, size
);
1339 * For unwritten extents do not report a disk address in the buffered
1340 * read case (treat as if we're reading into a hole).
1342 if (xfs_bmap_is_real_extent(&imap
))
1343 xfs_map_buffer(inode
, bh_result
, &imap
, offset
);
1346 * If this is a realtime file, data may be on a different device.
1347 * to that pointed to from the buffer_head b_bdev currently.
1349 bh_result
->b_bdev
= xfs_find_bdev_for_inode(inode
);
1353 xfs_iunlock(ip
, lockmode
);
1360 struct iov_iter
*iter
)
1363 * We just need the method present so that open/fcntl allow direct I/O.
1370 struct address_space
*mapping
,
1373 struct inode
*inode
= (struct inode
*)mapping
->host
;
1374 struct xfs_inode
*ip
= XFS_I(inode
);
1376 trace_xfs_vm_bmap(XFS_I(inode
));
1379 * The swap code (ab-)uses ->bmap to get a block mapping and then
1380 * bypasseѕ the file system for actual I/O. We really can't allow
1381 * that on reflinks inodes, so we have to skip out here. And yes,
1382 * 0 is the magic code for a bmap error.
1384 * Since we don't pass back blockdev info, we can't return bmap
1385 * information for rt files either.
1387 if (xfs_is_reflink_inode(ip
) || XFS_IS_REALTIME_INODE(ip
))
1390 filemap_write_and_wait(mapping
);
1391 return generic_block_bmap(mapping
, block
, xfs_get_blocks
);
1396 struct file
*unused
,
1399 trace_xfs_vm_readpage(page
->mapping
->host
, 1);
1400 return mpage_readpage(page
, xfs_get_blocks
);
1405 struct file
*unused
,
1406 struct address_space
*mapping
,
1407 struct list_head
*pages
,
1410 trace_xfs_vm_readpages(mapping
->host
, nr_pages
);
1411 return mpage_readpages(mapping
, pages
, nr_pages
, xfs_get_blocks
);
1415 * This is basically a copy of __set_page_dirty_buffers() with one
1416 * small tweak: buffers beyond EOF do not get marked dirty. If we mark them
1417 * dirty, we'll never be able to clean them because we don't write buffers
1418 * beyond EOF, and that means we can't invalidate pages that span EOF
1419 * that have been marked dirty. Further, the dirty state can leak into
1420 * the file interior if the file is extended, resulting in all sorts of
1421 * bad things happening as the state does not match the underlying data.
1423 * XXX: this really indicates that bufferheads in XFS need to die. Warts like
1424 * this only exist because of bufferheads and how the generic code manages them.
1427 xfs_vm_set_page_dirty(
1430 struct address_space
*mapping
= page
->mapping
;
1431 struct inode
*inode
= mapping
->host
;
1436 if (unlikely(!mapping
))
1437 return !TestSetPageDirty(page
);
1439 end_offset
= i_size_read(inode
);
1440 offset
= page_offset(page
);
1442 spin_lock(&mapping
->private_lock
);
1443 if (page_has_buffers(page
)) {
1444 struct buffer_head
*head
= page_buffers(page
);
1445 struct buffer_head
*bh
= head
;
1448 if (offset
< end_offset
)
1449 set_buffer_dirty(bh
);
1450 bh
= bh
->b_this_page
;
1451 offset
+= i_blocksize(inode
);
1452 } while (bh
!= head
);
1455 * Lock out page->mem_cgroup migration to keep PageDirty
1456 * synchronized with per-memcg dirty page counters.
1458 lock_page_memcg(page
);
1459 newly_dirty
= !TestSetPageDirty(page
);
1460 spin_unlock(&mapping
->private_lock
);
1463 /* sigh - __set_page_dirty() is static, so copy it here, too */
1464 unsigned long flags
;
1466 spin_lock_irqsave(&mapping
->tree_lock
, flags
);
1467 if (page
->mapping
) { /* Race with truncate? */
1468 WARN_ON_ONCE(!PageUptodate(page
));
1469 account_page_dirtied(page
, mapping
);
1470 radix_tree_tag_set(&mapping
->page_tree
,
1471 page_index(page
), PAGECACHE_TAG_DIRTY
);
1473 spin_unlock_irqrestore(&mapping
->tree_lock
, flags
);
1475 unlock_page_memcg(page
);
1477 __mark_inode_dirty(mapping
->host
, I_DIRTY_PAGES
);
1481 const struct address_space_operations xfs_address_space_operations
= {
1482 .readpage
= xfs_vm_readpage
,
1483 .readpages
= xfs_vm_readpages
,
1484 .writepage
= xfs_vm_writepage
,
1485 .writepages
= xfs_vm_writepages
,
1486 .set_page_dirty
= xfs_vm_set_page_dirty
,
1487 .releasepage
= xfs_vm_releasepage
,
1488 .invalidatepage
= xfs_vm_invalidatepage
,
1489 .bmap
= xfs_vm_bmap
,
1490 .direct_IO
= xfs_vm_direct_IO
,
1491 .migratepage
= buffer_migrate_page
,
1492 .is_partially_uptodate
= block_is_partially_uptodate
,
1493 .error_remove_page
= generic_error_remove_page
,