1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (C) 2010 Red Hat, Inc.
4 * Copyright (C) 2016-2019 Christoph Hellwig.
6 #include <linux/module.h>
7 #include <linux/compiler.h>
9 #include <linux/iomap.h>
10 #include <linux/pagemap.h>
11 #include <linux/uio.h>
12 #include <linux/buffer_head.h>
13 #include <linux/dax.h>
14 #include <linux/writeback.h>
15 #include <linux/list_sort.h>
16 #include <linux/swap.h>
17 #include <linux/bio.h>
18 #include <linux/sched/signal.h>
19 #include <linux/migrate.h>
22 #include "../internal.h"
25 * Structure allocated for each page when block size < PAGE_SIZE to track
26 * sub-page uptodate status and I/O completions.
31 spinlock_t uptodate_lock
;
32 DECLARE_BITMAP(uptodate
, PAGE_SIZE
/ 512);
35 static inline struct iomap_page
*to_iomap_page(struct page
*page
)
37 if (page_has_private(page
))
38 return (struct iomap_page
*)page_private(page
);
42 static struct bio_set iomap_ioend_bioset
;
44 static struct iomap_page
*
45 iomap_page_create(struct inode
*inode
, struct page
*page
)
47 struct iomap_page
*iop
= to_iomap_page(page
);
49 if (iop
|| i_blocksize(inode
) == PAGE_SIZE
)
52 iop
= kmalloc(sizeof(*iop
), GFP_NOFS
| __GFP_NOFAIL
);
53 atomic_set(&iop
->read_count
, 0);
54 atomic_set(&iop
->write_count
, 0);
55 spin_lock_init(&iop
->uptodate_lock
);
56 bitmap_zero(iop
->uptodate
, PAGE_SIZE
/ SECTOR_SIZE
);
59 * migrate_page_move_mapping() assumes that pages with private data have
60 * their count elevated by 1.
63 set_page_private(page
, (unsigned long)iop
);
69 iomap_page_release(struct page
*page
)
71 struct iomap_page
*iop
= to_iomap_page(page
);
75 WARN_ON_ONCE(atomic_read(&iop
->read_count
));
76 WARN_ON_ONCE(atomic_read(&iop
->write_count
));
77 ClearPagePrivate(page
);
78 set_page_private(page
, 0);
84 * Calculate the range inside the page that we actually need to read.
87 iomap_adjust_read_range(struct inode
*inode
, struct iomap_page
*iop
,
88 loff_t
*pos
, loff_t length
, unsigned *offp
, unsigned *lenp
)
90 loff_t orig_pos
= *pos
;
91 loff_t isize
= i_size_read(inode
);
92 unsigned block_bits
= inode
->i_blkbits
;
93 unsigned block_size
= (1 << block_bits
);
94 unsigned poff
= offset_in_page(*pos
);
95 unsigned plen
= min_t(loff_t
, PAGE_SIZE
- poff
, length
);
96 unsigned first
= poff
>> block_bits
;
97 unsigned last
= (poff
+ plen
- 1) >> block_bits
;
100 * If the block size is smaller than the page size we need to check the
101 * per-block uptodate status and adjust the offset and length if needed
102 * to avoid reading in already uptodate ranges.
107 /* move forward for each leading block marked uptodate */
108 for (i
= first
; i
<= last
; i
++) {
109 if (!test_bit(i
, iop
->uptodate
))
117 /* truncate len if we find any trailing uptodate block(s) */
118 for ( ; i
<= last
; i
++) {
119 if (test_bit(i
, iop
->uptodate
)) {
120 plen
-= (last
- i
+ 1) * block_size
;
128 * If the extent spans the block that contains the i_size we need to
129 * handle both halves separately so that we properly zero data in the
130 * page cache for blocks that are entirely outside of i_size.
132 if (orig_pos
<= isize
&& orig_pos
+ length
> isize
) {
133 unsigned end
= offset_in_page(isize
- 1) >> block_bits
;
135 if (first
<= end
&& last
> end
)
136 plen
-= (last
- end
) * block_size
;
144 iomap_iop_set_range_uptodate(struct page
*page
, unsigned off
, unsigned len
)
146 struct iomap_page
*iop
= to_iomap_page(page
);
147 struct inode
*inode
= page
->mapping
->host
;
148 unsigned first
= off
>> inode
->i_blkbits
;
149 unsigned last
= (off
+ len
- 1) >> inode
->i_blkbits
;
150 bool uptodate
= true;
154 spin_lock_irqsave(&iop
->uptodate_lock
, flags
);
155 for (i
= 0; i
< PAGE_SIZE
/ i_blocksize(inode
); i
++) {
156 if (i
>= first
&& i
<= last
)
157 set_bit(i
, iop
->uptodate
);
158 else if (!test_bit(i
, iop
->uptodate
))
163 SetPageUptodate(page
);
164 spin_unlock_irqrestore(&iop
->uptodate_lock
, flags
);
168 iomap_set_range_uptodate(struct page
*page
, unsigned off
, unsigned len
)
173 if (page_has_private(page
))
174 iomap_iop_set_range_uptodate(page
, off
, len
);
176 SetPageUptodate(page
);
180 iomap_read_finish(struct iomap_page
*iop
, struct page
*page
)
182 if (!iop
|| atomic_dec_and_test(&iop
->read_count
))
187 iomap_read_page_end_io(struct bio_vec
*bvec
, int error
)
189 struct page
*page
= bvec
->bv_page
;
190 struct iomap_page
*iop
= to_iomap_page(page
);
192 if (unlikely(error
)) {
193 ClearPageUptodate(page
);
196 iomap_set_range_uptodate(page
, bvec
->bv_offset
, bvec
->bv_len
);
199 iomap_read_finish(iop
, page
);
203 iomap_read_end_io(struct bio
*bio
)
205 int error
= blk_status_to_errno(bio
->bi_status
);
206 struct bio_vec
*bvec
;
207 struct bvec_iter_all iter_all
;
209 bio_for_each_segment_all(bvec
, bio
, iter_all
)
210 iomap_read_page_end_io(bvec
, error
);
214 struct iomap_readpage_ctx
{
215 struct page
*cur_page
;
216 bool cur_page_in_bio
;
219 struct list_head
*pages
;
223 iomap_read_inline_data(struct inode
*inode
, struct page
*page
,
226 size_t size
= i_size_read(inode
);
229 if (PageUptodate(page
))
233 BUG_ON(size
> PAGE_SIZE
- offset_in_page(iomap
->inline_data
));
235 addr
= kmap_atomic(page
);
236 memcpy(addr
, iomap
->inline_data
, size
);
237 memset(addr
+ size
, 0, PAGE_SIZE
- size
);
239 SetPageUptodate(page
);
242 static inline bool iomap_block_needs_zeroing(struct inode
*inode
,
243 struct iomap
*iomap
, loff_t pos
)
245 return iomap
->type
!= IOMAP_MAPPED
||
246 (iomap
->flags
& IOMAP_F_NEW
) ||
247 pos
>= i_size_read(inode
);
251 iomap_readpage_actor(struct inode
*inode
, loff_t pos
, loff_t length
, void *data
,
252 struct iomap
*iomap
, struct iomap
*srcmap
)
254 struct iomap_readpage_ctx
*ctx
= data
;
255 struct page
*page
= ctx
->cur_page
;
256 struct iomap_page
*iop
= iomap_page_create(inode
, page
);
257 bool same_page
= false, is_contig
= false;
258 loff_t orig_pos
= pos
;
262 if (iomap
->type
== IOMAP_INLINE
) {
264 iomap_read_inline_data(inode
, page
, iomap
);
268 /* zero post-eof blocks as the page may be mapped */
269 iomap_adjust_read_range(inode
, iop
, &pos
, length
, &poff
, &plen
);
273 if (iomap_block_needs_zeroing(inode
, iomap
, pos
)) {
274 zero_user(page
, poff
, plen
);
275 iomap_set_range_uptodate(page
, poff
, plen
);
279 ctx
->cur_page_in_bio
= true;
282 * Try to merge into a previous segment if we can.
284 sector
= iomap_sector(iomap
, pos
);
285 if (ctx
->bio
&& bio_end_sector(ctx
->bio
) == sector
)
289 __bio_try_merge_page(ctx
->bio
, page
, plen
, poff
, &same_page
)) {
290 if (!same_page
&& iop
)
291 atomic_inc(&iop
->read_count
);
296 * If we start a new segment we need to increase the read count, and we
297 * need to do so before submitting any previous full bio to make sure
298 * that we don't prematurely unlock the page.
301 atomic_inc(&iop
->read_count
);
303 if (!ctx
->bio
|| !is_contig
|| bio_full(ctx
->bio
, plen
)) {
304 gfp_t gfp
= mapping_gfp_constraint(page
->mapping
, GFP_KERNEL
);
305 gfp_t orig_gfp
= gfp
;
306 int nr_vecs
= (length
+ PAGE_SIZE
- 1) >> PAGE_SHIFT
;
309 submit_bio(ctx
->bio
);
311 if (ctx
->is_readahead
) /* same as readahead_gfp_mask */
312 gfp
|= __GFP_NORETRY
| __GFP_NOWARN
;
313 ctx
->bio
= bio_alloc(gfp
, min(BIO_MAX_PAGES
, nr_vecs
));
315 * If the bio_alloc fails, try it again for a single page to
316 * avoid having to deal with partial page reads. This emulates
317 * what do_mpage_readpage does.
320 ctx
->bio
= bio_alloc(orig_gfp
, 1);
321 ctx
->bio
->bi_opf
= REQ_OP_READ
;
322 if (ctx
->is_readahead
)
323 ctx
->bio
->bi_opf
|= REQ_RAHEAD
;
324 ctx
->bio
->bi_iter
.bi_sector
= sector
;
325 bio_set_dev(ctx
->bio
, iomap
->bdev
);
326 ctx
->bio
->bi_end_io
= iomap_read_end_io
;
329 bio_add_page(ctx
->bio
, page
, plen
, poff
);
332 * Move the caller beyond our range so that it keeps making progress.
333 * For that we have to include any leading non-uptodate ranges, but
334 * we can skip trailing ones as they will be handled in the next
337 return pos
- orig_pos
+ plen
;
341 iomap_readpage(struct page
*page
, const struct iomap_ops
*ops
)
343 struct iomap_readpage_ctx ctx
= { .cur_page
= page
};
344 struct inode
*inode
= page
->mapping
->host
;
348 trace_iomap_readpage(page
->mapping
->host
, 1);
350 for (poff
= 0; poff
< PAGE_SIZE
; poff
+= ret
) {
351 ret
= iomap_apply(inode
, page_offset(page
) + poff
,
352 PAGE_SIZE
- poff
, 0, ops
, &ctx
,
353 iomap_readpage_actor
);
355 WARN_ON_ONCE(ret
== 0);
363 WARN_ON_ONCE(!ctx
.cur_page_in_bio
);
365 WARN_ON_ONCE(ctx
.cur_page_in_bio
);
370 * Just like mpage_readpages and block_read_full_page we always
371 * return 0 and just mark the page as PageError on errors. This
372 * should be cleaned up all through the stack eventually.
376 EXPORT_SYMBOL_GPL(iomap_readpage
);
379 iomap_next_page(struct inode
*inode
, struct list_head
*pages
, loff_t pos
,
380 loff_t length
, loff_t
*done
)
382 while (!list_empty(pages
)) {
383 struct page
*page
= lru_to_page(pages
);
385 if (page_offset(page
) >= (u64
)pos
+ length
)
388 list_del(&page
->lru
);
389 if (!add_to_page_cache_lru(page
, inode
->i_mapping
, page
->index
,
394 * If we already have a page in the page cache at index we are
395 * done. Upper layers don't care if it is uptodate after the
396 * readpages call itself as every page gets checked again once
407 iomap_readpages_actor(struct inode
*inode
, loff_t pos
, loff_t length
,
408 void *data
, struct iomap
*iomap
, struct iomap
*srcmap
)
410 struct iomap_readpage_ctx
*ctx
= data
;
413 for (done
= 0; done
< length
; done
+= ret
) {
414 if (ctx
->cur_page
&& offset_in_page(pos
+ done
) == 0) {
415 if (!ctx
->cur_page_in_bio
)
416 unlock_page(ctx
->cur_page
);
417 put_page(ctx
->cur_page
);
418 ctx
->cur_page
= NULL
;
420 if (!ctx
->cur_page
) {
421 ctx
->cur_page
= iomap_next_page(inode
, ctx
->pages
,
425 ctx
->cur_page_in_bio
= false;
427 ret
= iomap_readpage_actor(inode
, pos
+ done
, length
- done
,
435 iomap_readpages(struct address_space
*mapping
, struct list_head
*pages
,
436 unsigned nr_pages
, const struct iomap_ops
*ops
)
438 struct iomap_readpage_ctx ctx
= {
440 .is_readahead
= true,
442 loff_t pos
= page_offset(list_entry(pages
->prev
, struct page
, lru
));
443 loff_t last
= page_offset(list_entry(pages
->next
, struct page
, lru
));
444 loff_t length
= last
- pos
+ PAGE_SIZE
, ret
= 0;
446 trace_iomap_readpages(mapping
->host
, nr_pages
);
449 ret
= iomap_apply(mapping
->host
, pos
, length
, 0, ops
,
450 &ctx
, iomap_readpages_actor
);
452 WARN_ON_ONCE(ret
== 0);
463 if (!ctx
.cur_page_in_bio
)
464 unlock_page(ctx
.cur_page
);
465 put_page(ctx
.cur_page
);
469 * Check that we didn't lose a page due to the arcance calling
472 WARN_ON_ONCE(!ret
&& !list_empty(ctx
.pages
));
475 EXPORT_SYMBOL_GPL(iomap_readpages
);
478 * iomap_is_partially_uptodate checks whether blocks within a page are
481 * Returns true if all blocks which correspond to a file portion
482 * we want to read within the page are uptodate.
485 iomap_is_partially_uptodate(struct page
*page
, unsigned long from
,
488 struct iomap_page
*iop
= to_iomap_page(page
);
489 struct inode
*inode
= page
->mapping
->host
;
490 unsigned len
, first
, last
;
493 /* Limit range to one page */
494 len
= min_t(unsigned, PAGE_SIZE
- from
, count
);
496 /* First and last blocks in range within page */
497 first
= from
>> inode
->i_blkbits
;
498 last
= (from
+ len
- 1) >> inode
->i_blkbits
;
501 for (i
= first
; i
<= last
; i
++)
502 if (!test_bit(i
, iop
->uptodate
))
509 EXPORT_SYMBOL_GPL(iomap_is_partially_uptodate
);
512 iomap_releasepage(struct page
*page
, gfp_t gfp_mask
)
514 trace_iomap_releasepage(page
->mapping
->host
, page_offset(page
),
518 * mm accommodates an old ext3 case where clean pages might not have had
519 * the dirty bit cleared. Thus, it can send actual dirty pages to
520 * ->releasepage() via shrink_active_list(), skip those here.
522 if (PageDirty(page
) || PageWriteback(page
))
524 iomap_page_release(page
);
527 EXPORT_SYMBOL_GPL(iomap_releasepage
);
530 iomap_invalidatepage(struct page
*page
, unsigned int offset
, unsigned int len
)
532 trace_iomap_invalidatepage(page
->mapping
->host
, offset
, len
);
535 * If we are invalidating the entire page, clear the dirty state from it
536 * and release it to avoid unnecessary buildup of the LRU.
538 if (offset
== 0 && len
== PAGE_SIZE
) {
539 WARN_ON_ONCE(PageWriteback(page
));
540 cancel_dirty_page(page
);
541 iomap_page_release(page
);
544 EXPORT_SYMBOL_GPL(iomap_invalidatepage
);
546 #ifdef CONFIG_MIGRATION
548 iomap_migrate_page(struct address_space
*mapping
, struct page
*newpage
,
549 struct page
*page
, enum migrate_mode mode
)
553 ret
= migrate_page_move_mapping(mapping
, newpage
, page
, 0);
554 if (ret
!= MIGRATEPAGE_SUCCESS
)
557 if (page_has_private(page
)) {
558 ClearPagePrivate(page
);
560 set_page_private(newpage
, page_private(page
));
561 set_page_private(page
, 0);
563 SetPagePrivate(newpage
);
566 if (mode
!= MIGRATE_SYNC_NO_COPY
)
567 migrate_page_copy(newpage
, page
);
569 migrate_page_states(newpage
, page
);
570 return MIGRATEPAGE_SUCCESS
;
572 EXPORT_SYMBOL_GPL(iomap_migrate_page
);
573 #endif /* CONFIG_MIGRATION */
576 IOMAP_WRITE_F_UNSHARE
= (1 << 0),
580 iomap_write_failed(struct inode
*inode
, loff_t pos
, unsigned len
)
582 loff_t i_size
= i_size_read(inode
);
585 * Only truncate newly allocated pages beyoned EOF, even if the
586 * write started inside the existing inode size.
588 if (pos
+ len
> i_size
)
589 truncate_pagecache_range(inode
, max(pos
, i_size
), pos
+ len
);
593 iomap_read_page_sync(loff_t block_start
, struct page
*page
, unsigned poff
,
594 unsigned plen
, struct iomap
*iomap
)
599 bio_init(&bio
, &bvec
, 1);
600 bio
.bi_opf
= REQ_OP_READ
;
601 bio
.bi_iter
.bi_sector
= iomap_sector(iomap
, block_start
);
602 bio_set_dev(&bio
, iomap
->bdev
);
603 __bio_add_page(&bio
, page
, plen
, poff
);
604 return submit_bio_wait(&bio
);
608 __iomap_write_begin(struct inode
*inode
, loff_t pos
, unsigned len
, int flags
,
609 struct page
*page
, struct iomap
*srcmap
)
611 struct iomap_page
*iop
= iomap_page_create(inode
, page
);
612 loff_t block_size
= i_blocksize(inode
);
613 loff_t block_start
= pos
& ~(block_size
- 1);
614 loff_t block_end
= (pos
+ len
+ block_size
- 1) & ~(block_size
- 1);
615 unsigned from
= offset_in_page(pos
), to
= from
+ len
, poff
, plen
;
618 if (PageUptodate(page
))
622 iomap_adjust_read_range(inode
, iop
, &block_start
,
623 block_end
- block_start
, &poff
, &plen
);
627 if (!(flags
& IOMAP_WRITE_F_UNSHARE
) &&
628 (from
<= poff
|| from
>= poff
+ plen
) &&
629 (to
<= poff
|| to
>= poff
+ plen
))
632 if (iomap_block_needs_zeroing(inode
, srcmap
, block_start
)) {
633 if (WARN_ON_ONCE(flags
& IOMAP_WRITE_F_UNSHARE
))
635 zero_user_segments(page
, poff
, from
, to
, poff
+ plen
);
636 iomap_set_range_uptodate(page
, poff
, plen
);
640 status
= iomap_read_page_sync(block_start
, page
, poff
, plen
,
644 } while ((block_start
+= plen
) < block_end
);
650 iomap_write_begin(struct inode
*inode
, loff_t pos
, unsigned len
, unsigned flags
,
651 struct page
**pagep
, struct iomap
*iomap
, struct iomap
*srcmap
)
653 const struct iomap_page_ops
*page_ops
= iomap
->page_ops
;
657 BUG_ON(pos
+ len
> iomap
->offset
+ iomap
->length
);
659 BUG_ON(pos
+ len
> srcmap
->offset
+ srcmap
->length
);
661 if (fatal_signal_pending(current
))
664 if (page_ops
&& page_ops
->page_prepare
) {
665 status
= page_ops
->page_prepare(inode
, pos
, len
, iomap
);
670 page
= grab_cache_page_write_begin(inode
->i_mapping
, pos
>> PAGE_SHIFT
,
677 if (srcmap
->type
== IOMAP_INLINE
)
678 iomap_read_inline_data(inode
, page
, srcmap
);
679 else if (iomap
->flags
& IOMAP_F_BUFFER_HEAD
)
680 status
= __block_write_begin_int(page
, pos
, len
, NULL
, srcmap
);
682 status
= __iomap_write_begin(inode
, pos
, len
, flags
, page
,
685 if (unlikely(status
))
694 iomap_write_failed(inode
, pos
, len
);
697 if (page_ops
&& page_ops
->page_done
)
698 page_ops
->page_done(inode
, pos
, 0, NULL
, iomap
);
703 iomap_set_page_dirty(struct page
*page
)
705 struct address_space
*mapping
= page_mapping(page
);
708 if (unlikely(!mapping
))
709 return !TestSetPageDirty(page
);
712 * Lock out page->mem_cgroup migration to keep PageDirty
713 * synchronized with per-memcg dirty page counters.
715 lock_page_memcg(page
);
716 newly_dirty
= !TestSetPageDirty(page
);
718 __set_page_dirty(page
, mapping
, 0);
719 unlock_page_memcg(page
);
722 __mark_inode_dirty(mapping
->host
, I_DIRTY_PAGES
);
725 EXPORT_SYMBOL_GPL(iomap_set_page_dirty
);
728 __iomap_write_end(struct inode
*inode
, loff_t pos
, unsigned len
,
729 unsigned copied
, struct page
*page
)
731 flush_dcache_page(page
);
734 * The blocks that were entirely written will now be uptodate, so we
735 * don't have to worry about a readpage reading them and overwriting a
736 * partial write. However if we have encountered a short write and only
737 * partially written into a block, it will not be marked uptodate, so a
738 * readpage might come in and destroy our partial write.
740 * Do the simplest thing, and just treat any short write to a non
741 * uptodate page as a zero-length write, and force the caller to redo
744 if (unlikely(copied
< len
&& !PageUptodate(page
)))
746 iomap_set_range_uptodate(page
, offset_in_page(pos
), len
);
747 iomap_set_page_dirty(page
);
752 iomap_write_end_inline(struct inode
*inode
, struct page
*page
,
753 struct iomap
*iomap
, loff_t pos
, unsigned copied
)
757 WARN_ON_ONCE(!PageUptodate(page
));
758 BUG_ON(pos
+ copied
> PAGE_SIZE
- offset_in_page(iomap
->inline_data
));
760 addr
= kmap_atomic(page
);
761 memcpy(iomap
->inline_data
+ pos
, addr
+ pos
, copied
);
764 mark_inode_dirty(inode
);
769 iomap_write_end(struct inode
*inode
, loff_t pos
, unsigned len
, unsigned copied
,
770 struct page
*page
, struct iomap
*iomap
, struct iomap
*srcmap
)
772 const struct iomap_page_ops
*page_ops
= iomap
->page_ops
;
773 loff_t old_size
= inode
->i_size
;
776 if (srcmap
->type
== IOMAP_INLINE
) {
777 ret
= iomap_write_end_inline(inode
, page
, iomap
, pos
, copied
);
778 } else if (srcmap
->flags
& IOMAP_F_BUFFER_HEAD
) {
779 ret
= block_write_end(NULL
, inode
->i_mapping
, pos
, len
, copied
,
782 ret
= __iomap_write_end(inode
, pos
, len
, copied
, page
);
786 * Update the in-memory inode size after copying the data into the page
787 * cache. It's up to the file system to write the updated size to disk,
788 * preferably after I/O completion so that no stale data is exposed.
790 if (pos
+ ret
> old_size
) {
791 i_size_write(inode
, pos
+ ret
);
792 iomap
->flags
|= IOMAP_F_SIZE_CHANGED
;
797 pagecache_isize_extended(inode
, old_size
, pos
);
798 if (page_ops
&& page_ops
->page_done
)
799 page_ops
->page_done(inode
, pos
, ret
, page
, iomap
);
803 iomap_write_failed(inode
, pos
, len
);
808 iomap_write_actor(struct inode
*inode
, loff_t pos
, loff_t length
, void *data
,
809 struct iomap
*iomap
, struct iomap
*srcmap
)
811 struct iov_iter
*i
= data
;
817 unsigned long offset
; /* Offset into pagecache page */
818 unsigned long bytes
; /* Bytes to write to page */
819 size_t copied
; /* Bytes copied from user */
821 offset
= offset_in_page(pos
);
822 bytes
= min_t(unsigned long, PAGE_SIZE
- offset
,
829 * Bring in the user page that we will copy from _first_.
830 * Otherwise there's a nasty deadlock on copying from the
831 * same page as we're writing to, without it being marked
834 * Not only is this an optimisation, but it is also required
835 * to check that the address is actually valid, when atomic
836 * usercopies are used, below.
838 if (unlikely(iov_iter_fault_in_readable(i
, bytes
))) {
843 status
= iomap_write_begin(inode
, pos
, bytes
, 0, &page
, iomap
,
845 if (unlikely(status
))
848 if (mapping_writably_mapped(inode
->i_mapping
))
849 flush_dcache_page(page
);
851 copied
= iov_iter_copy_from_user_atomic(page
, i
, offset
, bytes
);
853 flush_dcache_page(page
);
855 status
= iomap_write_end(inode
, pos
, bytes
, copied
, page
, iomap
,
857 if (unlikely(status
< 0))
863 iov_iter_advance(i
, copied
);
864 if (unlikely(copied
== 0)) {
866 * If we were unable to copy any data at all, we must
867 * fall back to a single segment length write.
869 * If we didn't fallback here, we could livelock
870 * because not all segments in the iov can be copied at
871 * once without a pagefault.
873 bytes
= min_t(unsigned long, PAGE_SIZE
- offset
,
874 iov_iter_single_seg_count(i
));
881 balance_dirty_pages_ratelimited(inode
->i_mapping
);
882 } while (iov_iter_count(i
) && length
);
884 return written
? written
: status
;
888 iomap_file_buffered_write(struct kiocb
*iocb
, struct iov_iter
*iter
,
889 const struct iomap_ops
*ops
)
891 struct inode
*inode
= iocb
->ki_filp
->f_mapping
->host
;
892 loff_t pos
= iocb
->ki_pos
, ret
= 0, written
= 0;
894 while (iov_iter_count(iter
)) {
895 ret
= iomap_apply(inode
, pos
, iov_iter_count(iter
),
896 IOMAP_WRITE
, ops
, iter
, iomap_write_actor
);
903 return written
? written
: ret
;
905 EXPORT_SYMBOL_GPL(iomap_file_buffered_write
);
908 iomap_unshare_actor(struct inode
*inode
, loff_t pos
, loff_t length
, void *data
,
909 struct iomap
*iomap
, struct iomap
*srcmap
)
914 /* don't bother with blocks that are not shared to start with */
915 if (!(iomap
->flags
& IOMAP_F_SHARED
))
917 /* don't bother with holes or unwritten extents */
918 if (srcmap
->type
== IOMAP_HOLE
|| srcmap
->type
== IOMAP_UNWRITTEN
)
922 unsigned long offset
= offset_in_page(pos
);
923 unsigned long bytes
= min_t(loff_t
, PAGE_SIZE
- offset
, length
);
926 status
= iomap_write_begin(inode
, pos
, bytes
,
927 IOMAP_WRITE_F_UNSHARE
, &page
, iomap
, srcmap
);
928 if (unlikely(status
))
931 status
= iomap_write_end(inode
, pos
, bytes
, bytes
, page
, iomap
,
933 if (unlikely(status
<= 0)) {
934 if (WARN_ON_ONCE(status
== 0))
945 balance_dirty_pages_ratelimited(inode
->i_mapping
);
952 iomap_file_unshare(struct inode
*inode
, loff_t pos
, loff_t len
,
953 const struct iomap_ops
*ops
)
958 ret
= iomap_apply(inode
, pos
, len
, IOMAP_WRITE
, ops
, NULL
,
959 iomap_unshare_actor
);
968 EXPORT_SYMBOL_GPL(iomap_file_unshare
);
970 static int iomap_zero(struct inode
*inode
, loff_t pos
, unsigned offset
,
971 unsigned bytes
, struct iomap
*iomap
, struct iomap
*srcmap
)
976 status
= iomap_write_begin(inode
, pos
, bytes
, 0, &page
, iomap
, srcmap
);
980 zero_user(page
, offset
, bytes
);
981 mark_page_accessed(page
);
983 return iomap_write_end(inode
, pos
, bytes
, bytes
, page
, iomap
, srcmap
);
987 iomap_zero_range_actor(struct inode
*inode
, loff_t pos
, loff_t count
,
988 void *data
, struct iomap
*iomap
, struct iomap
*srcmap
)
990 bool *did_zero
= data
;
994 /* already zeroed? we're done. */
995 if (srcmap
->type
== IOMAP_HOLE
|| srcmap
->type
== IOMAP_UNWRITTEN
)
999 unsigned offset
, bytes
;
1001 offset
= offset_in_page(pos
);
1002 bytes
= min_t(loff_t
, PAGE_SIZE
- offset
, count
);
1005 status
= dax_iomap_zero(pos
, offset
, bytes
, iomap
);
1007 status
= iomap_zero(inode
, pos
, offset
, bytes
, iomap
,
1017 } while (count
> 0);
1023 iomap_zero_range(struct inode
*inode
, loff_t pos
, loff_t len
, bool *did_zero
,
1024 const struct iomap_ops
*ops
)
1029 ret
= iomap_apply(inode
, pos
, len
, IOMAP_ZERO
,
1030 ops
, did_zero
, iomap_zero_range_actor
);
1040 EXPORT_SYMBOL_GPL(iomap_zero_range
);
1043 iomap_truncate_page(struct inode
*inode
, loff_t pos
, bool *did_zero
,
1044 const struct iomap_ops
*ops
)
1046 unsigned int blocksize
= i_blocksize(inode
);
1047 unsigned int off
= pos
& (blocksize
- 1);
1049 /* Block boundary? Nothing to do */
1052 return iomap_zero_range(inode
, pos
, blocksize
- off
, did_zero
, ops
);
1054 EXPORT_SYMBOL_GPL(iomap_truncate_page
);
1057 iomap_page_mkwrite_actor(struct inode
*inode
, loff_t pos
, loff_t length
,
1058 void *data
, struct iomap
*iomap
, struct iomap
*srcmap
)
1060 struct page
*page
= data
;
1063 if (iomap
->flags
& IOMAP_F_BUFFER_HEAD
) {
1064 ret
= __block_write_begin_int(page
, pos
, length
, NULL
, iomap
);
1067 block_commit_write(page
, 0, length
);
1069 WARN_ON_ONCE(!PageUptodate(page
));
1070 iomap_page_create(inode
, page
);
1071 set_page_dirty(page
);
1077 vm_fault_t
iomap_page_mkwrite(struct vm_fault
*vmf
, const struct iomap_ops
*ops
)
1079 struct page
*page
= vmf
->page
;
1080 struct inode
*inode
= file_inode(vmf
->vma
->vm_file
);
1081 unsigned long length
;
1086 ret
= page_mkwrite_check_truncate(page
, inode
);
1091 offset
= page_offset(page
);
1092 while (length
> 0) {
1093 ret
= iomap_apply(inode
, offset
, length
,
1094 IOMAP_WRITE
| IOMAP_FAULT
, ops
, page
,
1095 iomap_page_mkwrite_actor
);
1096 if (unlikely(ret
<= 0))
1102 wait_for_stable_page(page
);
1103 return VM_FAULT_LOCKED
;
1106 return block_page_mkwrite_return(ret
);
1108 EXPORT_SYMBOL_GPL(iomap_page_mkwrite
);
1111 iomap_finish_page_writeback(struct inode
*inode
, struct page
*page
,
1114 struct iomap_page
*iop
= to_iomap_page(page
);
1118 mapping_set_error(inode
->i_mapping
, -EIO
);
1121 WARN_ON_ONCE(i_blocksize(inode
) < PAGE_SIZE
&& !iop
);
1122 WARN_ON_ONCE(iop
&& atomic_read(&iop
->write_count
) <= 0);
1124 if (!iop
|| atomic_dec_and_test(&iop
->write_count
))
1125 end_page_writeback(page
);
1129 * We're now finished for good with this ioend structure. Update the page
1130 * state, release holds on bios, and finally free up memory. Do not use the
1134 iomap_finish_ioend(struct iomap_ioend
*ioend
, int error
)
1136 struct inode
*inode
= ioend
->io_inode
;
1137 struct bio
*bio
= &ioend
->io_inline_bio
;
1138 struct bio
*last
= ioend
->io_bio
, *next
;
1139 u64 start
= bio
->bi_iter
.bi_sector
;
1140 loff_t offset
= ioend
->io_offset
;
1141 bool quiet
= bio_flagged(bio
, BIO_QUIET
);
1143 for (bio
= &ioend
->io_inline_bio
; bio
; bio
= next
) {
1145 struct bvec_iter_all iter_all
;
1148 * For the last bio, bi_private points to the ioend, so we
1149 * need to explicitly end the iteration here.
1154 next
= bio
->bi_private
;
1156 /* walk each page on bio, ending page IO on them */
1157 bio_for_each_segment_all(bv
, bio
, iter_all
)
1158 iomap_finish_page_writeback(inode
, bv
->bv_page
, error
);
1161 /* The ioend has been freed by bio_put() */
1163 if (unlikely(error
&& !quiet
)) {
1164 printk_ratelimited(KERN_ERR
1165 "%s: writeback error on inode %lu, offset %lld, sector %llu",
1166 inode
->i_sb
->s_id
, inode
->i_ino
, offset
, start
);
1171 iomap_finish_ioends(struct iomap_ioend
*ioend
, int error
)
1173 struct list_head tmp
;
1175 list_replace_init(&ioend
->io_list
, &tmp
);
1176 iomap_finish_ioend(ioend
, error
);
1178 while (!list_empty(&tmp
)) {
1179 ioend
= list_first_entry(&tmp
, struct iomap_ioend
, io_list
);
1180 list_del_init(&ioend
->io_list
);
1181 iomap_finish_ioend(ioend
, error
);
1184 EXPORT_SYMBOL_GPL(iomap_finish_ioends
);
1187 * We can merge two adjacent ioends if they have the same set of work to do.
1190 iomap_ioend_can_merge(struct iomap_ioend
*ioend
, struct iomap_ioend
*next
)
1192 if (ioend
->io_bio
->bi_status
!= next
->io_bio
->bi_status
)
1194 if ((ioend
->io_flags
& IOMAP_F_SHARED
) ^
1195 (next
->io_flags
& IOMAP_F_SHARED
))
1197 if ((ioend
->io_type
== IOMAP_UNWRITTEN
) ^
1198 (next
->io_type
== IOMAP_UNWRITTEN
))
1200 if (ioend
->io_offset
+ ioend
->io_size
!= next
->io_offset
)
1206 iomap_ioend_try_merge(struct iomap_ioend
*ioend
, struct list_head
*more_ioends
,
1207 void (*merge_private
)(struct iomap_ioend
*ioend
,
1208 struct iomap_ioend
*next
))
1210 struct iomap_ioend
*next
;
1212 INIT_LIST_HEAD(&ioend
->io_list
);
1214 while ((next
= list_first_entry_or_null(more_ioends
, struct iomap_ioend
,
1216 if (!iomap_ioend_can_merge(ioend
, next
))
1218 list_move_tail(&next
->io_list
, &ioend
->io_list
);
1219 ioend
->io_size
+= next
->io_size
;
1220 if (next
->io_private
&& merge_private
)
1221 merge_private(ioend
, next
);
1224 EXPORT_SYMBOL_GPL(iomap_ioend_try_merge
);
1227 iomap_ioend_compare(void *priv
, struct list_head
*a
, struct list_head
*b
)
1229 struct iomap_ioend
*ia
= container_of(a
, struct iomap_ioend
, io_list
);
1230 struct iomap_ioend
*ib
= container_of(b
, struct iomap_ioend
, io_list
);
1232 if (ia
->io_offset
< ib
->io_offset
)
1234 if (ia
->io_offset
> ib
->io_offset
)
1240 iomap_sort_ioends(struct list_head
*ioend_list
)
1242 list_sort(NULL
, ioend_list
, iomap_ioend_compare
);
1244 EXPORT_SYMBOL_GPL(iomap_sort_ioends
);
1246 static void iomap_writepage_end_bio(struct bio
*bio
)
1248 struct iomap_ioend
*ioend
= bio
->bi_private
;
1250 iomap_finish_ioend(ioend
, blk_status_to_errno(bio
->bi_status
));
1254 * Submit the final bio for an ioend.
1256 * If @error is non-zero, it means that we have a situation where some part of
1257 * the submission process has failed after we have marked paged for writeback
1258 * and unlocked them. In this situation, we need to fail the bio instead of
1259 * submitting it. This typically only happens on a filesystem shutdown.
1262 iomap_submit_ioend(struct iomap_writepage_ctx
*wpc
, struct iomap_ioend
*ioend
,
1265 ioend
->io_bio
->bi_private
= ioend
;
1266 ioend
->io_bio
->bi_end_io
= iomap_writepage_end_bio
;
1268 if (wpc
->ops
->prepare_ioend
)
1269 error
= wpc
->ops
->prepare_ioend(ioend
, error
);
1272 * If we are failing the IO now, just mark the ioend with an
1273 * error and finish it. This will run IO completion immediately
1274 * as there is only one reference to the ioend at this point in
1277 ioend
->io_bio
->bi_status
= errno_to_blk_status(error
);
1278 bio_endio(ioend
->io_bio
);
1282 submit_bio(ioend
->io_bio
);
1286 static struct iomap_ioend
*
1287 iomap_alloc_ioend(struct inode
*inode
, struct iomap_writepage_ctx
*wpc
,
1288 loff_t offset
, sector_t sector
, struct writeback_control
*wbc
)
1290 struct iomap_ioend
*ioend
;
1293 bio
= bio_alloc_bioset(GFP_NOFS
, BIO_MAX_PAGES
, &iomap_ioend_bioset
);
1294 bio_set_dev(bio
, wpc
->iomap
.bdev
);
1295 bio
->bi_iter
.bi_sector
= sector
;
1296 bio
->bi_opf
= REQ_OP_WRITE
| wbc_to_write_flags(wbc
);
1297 bio
->bi_write_hint
= inode
->i_write_hint
;
1298 wbc_init_bio(wbc
, bio
);
1300 ioend
= container_of(bio
, struct iomap_ioend
, io_inline_bio
);
1301 INIT_LIST_HEAD(&ioend
->io_list
);
1302 ioend
->io_type
= wpc
->iomap
.type
;
1303 ioend
->io_flags
= wpc
->iomap
.flags
;
1304 ioend
->io_inode
= inode
;
1306 ioend
->io_offset
= offset
;
1307 ioend
->io_private
= NULL
;
1308 ioend
->io_bio
= bio
;
1313 * Allocate a new bio, and chain the old bio to the new one.
1315 * Note that we have to do perform the chaining in this unintuitive order
1316 * so that the bi_private linkage is set up in the right direction for the
1317 * traversal in iomap_finish_ioend().
1320 iomap_chain_bio(struct bio
*prev
)
1324 new = bio_alloc(GFP_NOFS
, BIO_MAX_PAGES
);
1325 bio_copy_dev(new, prev
);/* also copies over blkcg information */
1326 new->bi_iter
.bi_sector
= bio_end_sector(prev
);
1327 new->bi_opf
= prev
->bi_opf
;
1328 new->bi_write_hint
= prev
->bi_write_hint
;
1330 bio_chain(prev
, new);
1331 bio_get(prev
); /* for iomap_finish_ioend */
1337 iomap_can_add_to_ioend(struct iomap_writepage_ctx
*wpc
, loff_t offset
,
1340 if ((wpc
->iomap
.flags
& IOMAP_F_SHARED
) !=
1341 (wpc
->ioend
->io_flags
& IOMAP_F_SHARED
))
1343 if (wpc
->iomap
.type
!= wpc
->ioend
->io_type
)
1345 if (offset
!= wpc
->ioend
->io_offset
+ wpc
->ioend
->io_size
)
1347 if (sector
!= bio_end_sector(wpc
->ioend
->io_bio
))
1353 * Test to see if we have an existing ioend structure that we could append to
1354 * first, otherwise finish off the current ioend and start another.
1357 iomap_add_to_ioend(struct inode
*inode
, loff_t offset
, struct page
*page
,
1358 struct iomap_page
*iop
, struct iomap_writepage_ctx
*wpc
,
1359 struct writeback_control
*wbc
, struct list_head
*iolist
)
1361 sector_t sector
= iomap_sector(&wpc
->iomap
, offset
);
1362 unsigned len
= i_blocksize(inode
);
1363 unsigned poff
= offset
& (PAGE_SIZE
- 1);
1364 bool merged
, same_page
= false;
1366 if (!wpc
->ioend
|| !iomap_can_add_to_ioend(wpc
, offset
, sector
)) {
1368 list_add(&wpc
->ioend
->io_list
, iolist
);
1369 wpc
->ioend
= iomap_alloc_ioend(inode
, wpc
, offset
, sector
, wbc
);
1372 merged
= __bio_try_merge_page(wpc
->ioend
->io_bio
, page
, len
, poff
,
1374 if (iop
&& !same_page
)
1375 atomic_inc(&iop
->write_count
);
1378 if (bio_full(wpc
->ioend
->io_bio
, len
)) {
1379 wpc
->ioend
->io_bio
=
1380 iomap_chain_bio(wpc
->ioend
->io_bio
);
1382 bio_add_page(wpc
->ioend
->io_bio
, page
, len
, poff
);
1385 wpc
->ioend
->io_size
+= len
;
1386 wbc_account_cgroup_owner(wbc
, page
, len
);
1390 * We implement an immediate ioend submission policy here to avoid needing to
1391 * chain multiple ioends and hence nest mempool allocations which can violate
1392 * forward progress guarantees we need to provide. The current ioend we are
1393 * adding blocks to is cached on the writepage context, and if the new block
1394 * does not append to the cached ioend it will create a new ioend and cache that
1397 * If a new ioend is created and cached, the old ioend is returned and queued
1398 * locally for submission once the entire page is processed or an error has been
1399 * detected. While ioends are submitted immediately after they are completed,
1400 * batching optimisations are provided by higher level block plugging.
1402 * At the end of a writeback pass, there will be a cached ioend remaining on the
1403 * writepage context that the caller will need to submit.
1406 iomap_writepage_map(struct iomap_writepage_ctx
*wpc
,
1407 struct writeback_control
*wbc
, struct inode
*inode
,
1408 struct page
*page
, u64 end_offset
)
1410 struct iomap_page
*iop
= to_iomap_page(page
);
1411 struct iomap_ioend
*ioend
, *next
;
1412 unsigned len
= i_blocksize(inode
);
1413 u64 file_offset
; /* file offset of page */
1414 int error
= 0, count
= 0, i
;
1415 LIST_HEAD(submit_list
);
1417 WARN_ON_ONCE(i_blocksize(inode
) < PAGE_SIZE
&& !iop
);
1418 WARN_ON_ONCE(iop
&& atomic_read(&iop
->write_count
) != 0);
1421 * Walk through the page to find areas to write back. If we run off the
1422 * end of the current map or find the current map invalid, grab a new
1425 for (i
= 0, file_offset
= page_offset(page
);
1426 i
< (PAGE_SIZE
>> inode
->i_blkbits
) && file_offset
< end_offset
;
1427 i
++, file_offset
+= len
) {
1428 if (iop
&& !test_bit(i
, iop
->uptodate
))
1431 error
= wpc
->ops
->map_blocks(wpc
, inode
, file_offset
);
1434 if (WARN_ON_ONCE(wpc
->iomap
.type
== IOMAP_INLINE
))
1436 if (wpc
->iomap
.type
== IOMAP_HOLE
)
1438 iomap_add_to_ioend(inode
, file_offset
, page
, iop
, wpc
, wbc
,
1443 WARN_ON_ONCE(!wpc
->ioend
&& !list_empty(&submit_list
));
1444 WARN_ON_ONCE(!PageLocked(page
));
1445 WARN_ON_ONCE(PageWriteback(page
));
1448 * We cannot cancel the ioend directly here on error. We may have
1449 * already set other pages under writeback and hence we have to run I/O
1450 * completion to mark the error state of the pages under writeback
1453 if (unlikely(error
)) {
1456 * If the current page hasn't been added to ioend, it
1457 * won't be affected by I/O completions and we must
1458 * discard and unlock it right here.
1460 if (wpc
->ops
->discard_page
)
1461 wpc
->ops
->discard_page(page
);
1462 ClearPageUptodate(page
);
1468 * If the page was not fully cleaned, we need to ensure that the
1469 * higher layers come back to it correctly. That means we need
1470 * to keep the page dirty, and for WB_SYNC_ALL writeback we need
1471 * to ensure the PAGECACHE_TAG_TOWRITE index mark is not removed
1472 * so another attempt to write this page in this writeback sweep
1475 set_page_writeback_keepwrite(page
);
1477 clear_page_dirty_for_io(page
);
1478 set_page_writeback(page
);
1484 * Preserve the original error if there was one, otherwise catch
1485 * submission errors here and propagate into subsequent ioend
1488 list_for_each_entry_safe(ioend
, next
, &submit_list
, io_list
) {
1491 list_del_init(&ioend
->io_list
);
1492 error2
= iomap_submit_ioend(wpc
, ioend
, error
);
1493 if (error2
&& !error
)
1498 * We can end up here with no error and nothing to write only if we race
1499 * with a partial page truncate on a sub-page block sized filesystem.
1502 end_page_writeback(page
);
1504 mapping_set_error(page
->mapping
, error
);
1509 * Write out a dirty page.
1511 * For delalloc space on the page we need to allocate space and flush it.
1512 * For unwritten space on the page we need to start the conversion to
1513 * regular allocated space.
1516 iomap_do_writepage(struct page
*page
, struct writeback_control
*wbc
, void *data
)
1518 struct iomap_writepage_ctx
*wpc
= data
;
1519 struct inode
*inode
= page
->mapping
->host
;
1524 trace_iomap_writepage(inode
, page_offset(page
), PAGE_SIZE
);
1527 * Refuse to write the page out if we are called from reclaim context.
1529 * This avoids stack overflows when called from deeply used stacks in
1530 * random callers for direct reclaim or memcg reclaim. We explicitly
1531 * allow reclaim from kswapd as the stack usage there is relatively low.
1533 * This should never happen except in the case of a VM regression so
1536 if (WARN_ON_ONCE((current
->flags
& (PF_MEMALLOC
|PF_KSWAPD
)) ==
1541 * Given that we do not allow direct reclaim to call us, we should
1542 * never be called in a recursive filesystem reclaim context.
1544 if (WARN_ON_ONCE(current
->flags
& PF_MEMALLOC_NOFS
))
1548 * Is this page beyond the end of the file?
1550 * The page index is less than the end_index, adjust the end_offset
1551 * to the highest offset that this page should represent.
1552 * -----------------------------------------------------
1553 * | file mapping | <EOF> |
1554 * -----------------------------------------------------
1555 * | Page ... | Page N-2 | Page N-1 | Page N | |
1556 * ^--------------------------------^----------|--------
1557 * | desired writeback range | see else |
1558 * ---------------------------------^------------------|
1560 offset
= i_size_read(inode
);
1561 end_index
= offset
>> PAGE_SHIFT
;
1562 if (page
->index
< end_index
)
1563 end_offset
= (loff_t
)(page
->index
+ 1) << PAGE_SHIFT
;
1566 * Check whether the page to write out is beyond or straddles
1568 * -------------------------------------------------------
1569 * | file mapping | <EOF> |
1570 * -------------------------------------------------------
1571 * | Page ... | Page N-2 | Page N-1 | Page N | Beyond |
1572 * ^--------------------------------^-----------|---------
1574 * ---------------------------------^-----------|--------|
1576 unsigned offset_into_page
= offset
& (PAGE_SIZE
- 1);
1579 * Skip the page if it is fully outside i_size, e.g. due to a
1580 * truncate operation that is in progress. We must redirty the
1581 * page so that reclaim stops reclaiming it. Otherwise
1582 * iomap_vm_releasepage() is called on it and gets confused.
1584 * Note that the end_index is unsigned long, it would overflow
1585 * if the given offset is greater than 16TB on 32-bit system
1586 * and if we do check the page is fully outside i_size or not
1587 * via "if (page->index >= end_index + 1)" as "end_index + 1"
1588 * will be evaluated to 0. Hence this page will be redirtied
1589 * and be written out repeatedly which would result in an
1590 * infinite loop, the user program that perform this operation
1591 * will hang. Instead, we can verify this situation by checking
1592 * if the page to write is totally beyond the i_size or if it's
1593 * offset is just equal to the EOF.
1595 if (page
->index
> end_index
||
1596 (page
->index
== end_index
&& offset_into_page
== 0))
1600 * The page straddles i_size. It must be zeroed out on each
1601 * and every writepage invocation because it may be mmapped.
1602 * "A file is mapped in multiples of the page size. For a file
1603 * that is not a multiple of the page size, the remaining
1604 * memory is zeroed when mapped, and writes to that region are
1605 * not written out to the file."
1607 zero_user_segment(page
, offset_into_page
, PAGE_SIZE
);
1609 /* Adjust the end_offset to the end of file */
1610 end_offset
= offset
;
1613 return iomap_writepage_map(wpc
, wbc
, inode
, page
, end_offset
);
1616 redirty_page_for_writepage(wbc
, page
);
1622 iomap_writepage(struct page
*page
, struct writeback_control
*wbc
,
1623 struct iomap_writepage_ctx
*wpc
,
1624 const struct iomap_writeback_ops
*ops
)
1629 ret
= iomap_do_writepage(page
, wbc
, wpc
);
1632 return iomap_submit_ioend(wpc
, wpc
->ioend
, ret
);
1634 EXPORT_SYMBOL_GPL(iomap_writepage
);
1637 iomap_writepages(struct address_space
*mapping
, struct writeback_control
*wbc
,
1638 struct iomap_writepage_ctx
*wpc
,
1639 const struct iomap_writeback_ops
*ops
)
1644 ret
= write_cache_pages(mapping
, wbc
, iomap_do_writepage
, wpc
);
1647 return iomap_submit_ioend(wpc
, wpc
->ioend
, ret
);
1649 EXPORT_SYMBOL_GPL(iomap_writepages
);
1651 static int __init
iomap_init(void)
1653 return bioset_init(&iomap_ioend_bioset
, 4 * (PAGE_SIZE
/ SECTOR_SIZE
),
1654 offsetof(struct iomap_ioend
, io_inline_bio
),
1657 fs_initcall(iomap_init
);