1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (C) 2010 Red Hat, Inc.
4 * Copyright (C) 2016-2023 Christoph Hellwig.
6 #include <linux/module.h>
7 #include <linux/compiler.h>
9 #include <linux/iomap.h>
10 #include <linux/pagemap.h>
11 #include <linux/uio.h>
12 #include <linux/buffer_head.h>
13 #include <linux/dax.h>
14 #include <linux/writeback.h>
15 #include <linux/list_sort.h>
16 #include <linux/swap.h>
17 #include <linux/bio.h>
18 #include <linux/sched/signal.h>
19 #include <linux/migrate.h>
22 #include "../internal.h"
24 #define IOEND_BATCH_SIZE 4096
27 * Structure allocated for each folio to track per-block uptodate, dirty state
28 * and I/O completions.
30 struct iomap_folio_state
{
31 spinlock_t state_lock
;
32 unsigned int read_bytes_pending
;
33 atomic_t write_bytes_pending
;
36 * Each block has two bits in this bitmap:
37 * Bits [0..blocks_per_folio) has the uptodate status.
38 * Bits [b_p_f...(2*b_p_f)) has the dirty status.
40 unsigned long state
[];
43 static struct bio_set iomap_ioend_bioset
;
45 static inline bool ifs_is_fully_uptodate(struct folio
*folio
,
46 struct iomap_folio_state
*ifs
)
48 struct inode
*inode
= folio
->mapping
->host
;
50 return bitmap_full(ifs
->state
, i_blocks_per_folio(inode
, folio
));
53 static inline bool ifs_block_is_uptodate(struct iomap_folio_state
*ifs
,
56 return test_bit(block
, ifs
->state
);
59 static bool ifs_set_range_uptodate(struct folio
*folio
,
60 struct iomap_folio_state
*ifs
, size_t off
, size_t len
)
62 struct inode
*inode
= folio
->mapping
->host
;
63 unsigned int first_blk
= off
>> inode
->i_blkbits
;
64 unsigned int last_blk
= (off
+ len
- 1) >> inode
->i_blkbits
;
65 unsigned int nr_blks
= last_blk
- first_blk
+ 1;
67 bitmap_set(ifs
->state
, first_blk
, nr_blks
);
68 return ifs_is_fully_uptodate(folio
, ifs
);
71 static void iomap_set_range_uptodate(struct folio
*folio
, size_t off
,
74 struct iomap_folio_state
*ifs
= folio
->private;
79 spin_lock_irqsave(&ifs
->state_lock
, flags
);
80 uptodate
= ifs_set_range_uptodate(folio
, ifs
, off
, len
);
81 spin_unlock_irqrestore(&ifs
->state_lock
, flags
);
85 folio_mark_uptodate(folio
);
88 static inline bool ifs_block_is_dirty(struct folio
*folio
,
89 struct iomap_folio_state
*ifs
, int block
)
91 struct inode
*inode
= folio
->mapping
->host
;
92 unsigned int blks_per_folio
= i_blocks_per_folio(inode
, folio
);
94 return test_bit(block
+ blks_per_folio
, ifs
->state
);
97 static unsigned ifs_find_dirty_range(struct folio
*folio
,
98 struct iomap_folio_state
*ifs
, u64
*range_start
, u64 range_end
)
100 struct inode
*inode
= folio
->mapping
->host
;
102 offset_in_folio(folio
, *range_start
) >> inode
->i_blkbits
;
103 unsigned end_blk
= min_not_zero(
104 offset_in_folio(folio
, range_end
) >> inode
->i_blkbits
,
105 i_blocks_per_folio(inode
, folio
));
108 while (!ifs_block_is_dirty(folio
, ifs
, start_blk
))
109 if (++start_blk
== end_blk
)
112 while (start_blk
+ nblks
< end_blk
) {
113 if (!ifs_block_is_dirty(folio
, ifs
, start_blk
+ nblks
))
118 *range_start
= folio_pos(folio
) + (start_blk
<< inode
->i_blkbits
);
119 return nblks
<< inode
->i_blkbits
;
122 static unsigned iomap_find_dirty_range(struct folio
*folio
, u64
*range_start
,
125 struct iomap_folio_state
*ifs
= folio
->private;
127 if (*range_start
>= range_end
)
131 return ifs_find_dirty_range(folio
, ifs
, range_start
, range_end
);
132 return range_end
- *range_start
;
135 static void ifs_clear_range_dirty(struct folio
*folio
,
136 struct iomap_folio_state
*ifs
, size_t off
, size_t len
)
138 struct inode
*inode
= folio
->mapping
->host
;
139 unsigned int blks_per_folio
= i_blocks_per_folio(inode
, folio
);
140 unsigned int first_blk
= (off
>> inode
->i_blkbits
);
141 unsigned int last_blk
= (off
+ len
- 1) >> inode
->i_blkbits
;
142 unsigned int nr_blks
= last_blk
- first_blk
+ 1;
145 spin_lock_irqsave(&ifs
->state_lock
, flags
);
146 bitmap_clear(ifs
->state
, first_blk
+ blks_per_folio
, nr_blks
);
147 spin_unlock_irqrestore(&ifs
->state_lock
, flags
);
150 static void iomap_clear_range_dirty(struct folio
*folio
, size_t off
, size_t len
)
152 struct iomap_folio_state
*ifs
= folio
->private;
155 ifs_clear_range_dirty(folio
, ifs
, off
, len
);
158 static void ifs_set_range_dirty(struct folio
*folio
,
159 struct iomap_folio_state
*ifs
, size_t off
, size_t len
)
161 struct inode
*inode
= folio
->mapping
->host
;
162 unsigned int blks_per_folio
= i_blocks_per_folio(inode
, folio
);
163 unsigned int first_blk
= (off
>> inode
->i_blkbits
);
164 unsigned int last_blk
= (off
+ len
- 1) >> inode
->i_blkbits
;
165 unsigned int nr_blks
= last_blk
- first_blk
+ 1;
168 spin_lock_irqsave(&ifs
->state_lock
, flags
);
169 bitmap_set(ifs
->state
, first_blk
+ blks_per_folio
, nr_blks
);
170 spin_unlock_irqrestore(&ifs
->state_lock
, flags
);
173 static void iomap_set_range_dirty(struct folio
*folio
, size_t off
, size_t len
)
175 struct iomap_folio_state
*ifs
= folio
->private;
178 ifs_set_range_dirty(folio
, ifs
, off
, len
);
181 static struct iomap_folio_state
*ifs_alloc(struct inode
*inode
,
182 struct folio
*folio
, unsigned int flags
)
184 struct iomap_folio_state
*ifs
= folio
->private;
185 unsigned int nr_blocks
= i_blocks_per_folio(inode
, folio
);
188 if (ifs
|| nr_blocks
<= 1)
191 if (flags
& IOMAP_NOWAIT
)
194 gfp
= GFP_NOFS
| __GFP_NOFAIL
;
197 * ifs->state tracks two sets of state flags when the
198 * filesystem block size is smaller than the folio size.
199 * The first state tracks per-block uptodate and the
200 * second tracks per-block dirty state.
202 ifs
= kzalloc(struct_size(ifs
, state
,
203 BITS_TO_LONGS(2 * nr_blocks
)), gfp
);
207 spin_lock_init(&ifs
->state_lock
);
208 if (folio_test_uptodate(folio
))
209 bitmap_set(ifs
->state
, 0, nr_blocks
);
210 if (folio_test_dirty(folio
))
211 bitmap_set(ifs
->state
, nr_blocks
, nr_blocks
);
212 folio_attach_private(folio
, ifs
);
217 static void ifs_free(struct folio
*folio
)
219 struct iomap_folio_state
*ifs
= folio_detach_private(folio
);
223 WARN_ON_ONCE(ifs
->read_bytes_pending
!= 0);
224 WARN_ON_ONCE(atomic_read(&ifs
->write_bytes_pending
));
225 WARN_ON_ONCE(ifs_is_fully_uptodate(folio
, ifs
) !=
226 folio_test_uptodate(folio
));
231 * Calculate the range inside the folio that we actually need to read.
233 static void iomap_adjust_read_range(struct inode
*inode
, struct folio
*folio
,
234 loff_t
*pos
, loff_t length
, size_t *offp
, size_t *lenp
)
236 struct iomap_folio_state
*ifs
= folio
->private;
237 loff_t orig_pos
= *pos
;
238 loff_t isize
= i_size_read(inode
);
239 unsigned block_bits
= inode
->i_blkbits
;
240 unsigned block_size
= (1 << block_bits
);
241 size_t poff
= offset_in_folio(folio
, *pos
);
242 size_t plen
= min_t(loff_t
, folio_size(folio
) - poff
, length
);
243 size_t orig_plen
= plen
;
244 unsigned first
= poff
>> block_bits
;
245 unsigned last
= (poff
+ plen
- 1) >> block_bits
;
248 * If the block size is smaller than the page size, we need to check the
249 * per-block uptodate status and adjust the offset and length if needed
250 * to avoid reading in already uptodate ranges.
255 /* move forward for each leading block marked uptodate */
256 for (i
= first
; i
<= last
; i
++) {
257 if (!ifs_block_is_uptodate(ifs
, i
))
265 /* truncate len if we find any trailing uptodate block(s) */
266 for ( ; i
<= last
; i
++) {
267 if (ifs_block_is_uptodate(ifs
, i
)) {
268 plen
-= (last
- i
+ 1) * block_size
;
276 * If the extent spans the block that contains the i_size, we need to
277 * handle both halves separately so that we properly zero data in the
278 * page cache for blocks that are entirely outside of i_size.
280 if (orig_pos
<= isize
&& orig_pos
+ orig_plen
> isize
) {
281 unsigned end
= offset_in_folio(folio
, isize
- 1) >> block_bits
;
283 if (first
<= end
&& last
> end
)
284 plen
-= (last
- end
) * block_size
;
291 static void iomap_finish_folio_read(struct folio
*folio
, size_t off
,
292 size_t len
, int error
)
294 struct iomap_folio_state
*ifs
= folio
->private;
295 bool uptodate
= !error
;
296 bool finished
= true;
301 spin_lock_irqsave(&ifs
->state_lock
, flags
);
303 uptodate
= ifs_set_range_uptodate(folio
, ifs
, off
, len
);
304 ifs
->read_bytes_pending
-= len
;
305 finished
= !ifs
->read_bytes_pending
;
306 spin_unlock_irqrestore(&ifs
->state_lock
, flags
);
310 folio_end_read(folio
, uptodate
);
313 static void iomap_read_end_io(struct bio
*bio
)
315 int error
= blk_status_to_errno(bio
->bi_status
);
316 struct folio_iter fi
;
318 bio_for_each_folio_all(fi
, bio
)
319 iomap_finish_folio_read(fi
.folio
, fi
.offset
, fi
.length
, error
);
323 struct iomap_readpage_ctx
{
324 struct folio
*cur_folio
;
325 bool cur_folio_in_bio
;
327 struct readahead_control
*rac
;
331 * iomap_read_inline_data - copy inline data into the page cache
332 * @iter: iteration structure
333 * @folio: folio to copy to
335 * Copy the inline data in @iter into @folio and zero out the rest of the folio.
336 * Only a single IOMAP_INLINE extent is allowed at the end of each file.
337 * Returns zero for success to complete the read, or the usual negative errno.
339 static int iomap_read_inline_data(const struct iomap_iter
*iter
,
342 const struct iomap
*iomap
= iomap_iter_srcmap(iter
);
343 size_t size
= i_size_read(iter
->inode
) - iomap
->offset
;
344 size_t offset
= offset_in_folio(folio
, iomap
->offset
);
346 if (folio_test_uptodate(folio
))
349 if (WARN_ON_ONCE(size
> iomap
->length
))
352 ifs_alloc(iter
->inode
, folio
, iter
->flags
);
354 folio_fill_tail(folio
, offset
, iomap
->inline_data
, size
);
355 iomap_set_range_uptodate(folio
, offset
, folio_size(folio
) - offset
);
359 static inline bool iomap_block_needs_zeroing(const struct iomap_iter
*iter
,
362 const struct iomap
*srcmap
= iomap_iter_srcmap(iter
);
364 return srcmap
->type
!= IOMAP_MAPPED
||
365 (srcmap
->flags
& IOMAP_F_NEW
) ||
366 pos
>= i_size_read(iter
->inode
);
369 static loff_t
iomap_readpage_iter(const struct iomap_iter
*iter
,
370 struct iomap_readpage_ctx
*ctx
, loff_t offset
)
372 const struct iomap
*iomap
= &iter
->iomap
;
373 loff_t pos
= iter
->pos
+ offset
;
374 loff_t length
= iomap_length(iter
) - offset
;
375 struct folio
*folio
= ctx
->cur_folio
;
376 struct iomap_folio_state
*ifs
;
377 loff_t orig_pos
= pos
;
381 if (iomap
->type
== IOMAP_INLINE
)
382 return iomap_read_inline_data(iter
, folio
);
384 /* zero post-eof blocks as the page may be mapped */
385 ifs
= ifs_alloc(iter
->inode
, folio
, iter
->flags
);
386 iomap_adjust_read_range(iter
->inode
, folio
, &pos
, length
, &poff
, &plen
);
390 if (iomap_block_needs_zeroing(iter
, pos
)) {
391 folio_zero_range(folio
, poff
, plen
);
392 iomap_set_range_uptodate(folio
, poff
, plen
);
396 ctx
->cur_folio_in_bio
= true;
398 spin_lock_irq(&ifs
->state_lock
);
399 ifs
->read_bytes_pending
+= plen
;
400 spin_unlock_irq(&ifs
->state_lock
);
403 sector
= iomap_sector(iomap
, pos
);
405 bio_end_sector(ctx
->bio
) != sector
||
406 !bio_add_folio(ctx
->bio
, folio
, plen
, poff
)) {
407 gfp_t gfp
= mapping_gfp_constraint(folio
->mapping
, GFP_KERNEL
);
408 gfp_t orig_gfp
= gfp
;
409 unsigned int nr_vecs
= DIV_ROUND_UP(length
, PAGE_SIZE
);
412 submit_bio(ctx
->bio
);
414 if (ctx
->rac
) /* same as readahead_gfp_mask */
415 gfp
|= __GFP_NORETRY
| __GFP_NOWARN
;
416 ctx
->bio
= bio_alloc(iomap
->bdev
, bio_max_segs(nr_vecs
),
419 * If the bio_alloc fails, try it again for a single page to
420 * avoid having to deal with partial page reads. This emulates
421 * what do_mpage_read_folio does.
424 ctx
->bio
= bio_alloc(iomap
->bdev
, 1, REQ_OP_READ
,
428 ctx
->bio
->bi_opf
|= REQ_RAHEAD
;
429 ctx
->bio
->bi_iter
.bi_sector
= sector
;
430 ctx
->bio
->bi_end_io
= iomap_read_end_io
;
431 bio_add_folio_nofail(ctx
->bio
, folio
, plen
, poff
);
436 * Move the caller beyond our range so that it keeps making progress.
437 * For that, we have to include any leading non-uptodate ranges, but
438 * we can skip trailing ones as they will be handled in the next
441 return pos
- orig_pos
+ plen
;
444 static loff_t
iomap_read_folio_iter(const struct iomap_iter
*iter
,
445 struct iomap_readpage_ctx
*ctx
)
447 struct folio
*folio
= ctx
->cur_folio
;
448 size_t offset
= offset_in_folio(folio
, iter
->pos
);
449 loff_t length
= min_t(loff_t
, folio_size(folio
) - offset
,
453 for (done
= 0; done
< length
; done
+= ret
) {
454 ret
= iomap_readpage_iter(iter
, ctx
, done
);
462 int iomap_read_folio(struct folio
*folio
, const struct iomap_ops
*ops
)
464 struct iomap_iter iter
= {
465 .inode
= folio
->mapping
->host
,
466 .pos
= folio_pos(folio
),
467 .len
= folio_size(folio
),
469 struct iomap_readpage_ctx ctx
= {
474 trace_iomap_readpage(iter
.inode
, 1);
476 while ((ret
= iomap_iter(&iter
, ops
)) > 0)
477 iter
.processed
= iomap_read_folio_iter(&iter
, &ctx
);
481 WARN_ON_ONCE(!ctx
.cur_folio_in_bio
);
483 WARN_ON_ONCE(ctx
.cur_folio_in_bio
);
488 * Just like mpage_readahead and block_read_full_folio, we always
489 * return 0 and just set the folio error flag on errors. This
490 * should be cleaned up throughout the stack eventually.
494 EXPORT_SYMBOL_GPL(iomap_read_folio
);
496 static loff_t
iomap_readahead_iter(const struct iomap_iter
*iter
,
497 struct iomap_readpage_ctx
*ctx
)
499 loff_t length
= iomap_length(iter
);
502 for (done
= 0; done
< length
; done
+= ret
) {
503 if (ctx
->cur_folio
&&
504 offset_in_folio(ctx
->cur_folio
, iter
->pos
+ done
) == 0) {
505 if (!ctx
->cur_folio_in_bio
)
506 folio_unlock(ctx
->cur_folio
);
507 ctx
->cur_folio
= NULL
;
509 if (!ctx
->cur_folio
) {
510 ctx
->cur_folio
= readahead_folio(ctx
->rac
);
511 ctx
->cur_folio_in_bio
= false;
513 ret
= iomap_readpage_iter(iter
, ctx
, done
);
522 * iomap_readahead - Attempt to read pages from a file.
523 * @rac: Describes the pages to be read.
524 * @ops: The operations vector for the filesystem.
526 * This function is for filesystems to call to implement their readahead
527 * address_space operation.
529 * Context: The @ops callbacks may submit I/O (eg to read the addresses of
530 * blocks from disc), and may wait for it. The caller may be trying to
531 * access a different page, and so sleeping excessively should be avoided.
532 * It may allocate memory, but should avoid costly allocations. This
533 * function is called with memalloc_nofs set, so allocations will not cause
534 * the filesystem to be reentered.
536 void iomap_readahead(struct readahead_control
*rac
, const struct iomap_ops
*ops
)
538 struct iomap_iter iter
= {
539 .inode
= rac
->mapping
->host
,
540 .pos
= readahead_pos(rac
),
541 .len
= readahead_length(rac
),
543 struct iomap_readpage_ctx ctx
= {
547 trace_iomap_readahead(rac
->mapping
->host
, readahead_count(rac
));
549 while (iomap_iter(&iter
, ops
) > 0)
550 iter
.processed
= iomap_readahead_iter(&iter
, &ctx
);
555 if (!ctx
.cur_folio_in_bio
)
556 folio_unlock(ctx
.cur_folio
);
559 EXPORT_SYMBOL_GPL(iomap_readahead
);
562 * iomap_is_partially_uptodate checks whether blocks within a folio are
565 * Returns true if all blocks which correspond to the specified part
566 * of the folio are uptodate.
568 bool iomap_is_partially_uptodate(struct folio
*folio
, size_t from
, size_t count
)
570 struct iomap_folio_state
*ifs
= folio
->private;
571 struct inode
*inode
= folio
->mapping
->host
;
572 unsigned first
, last
, i
;
577 /* Caller's range may extend past the end of this folio */
578 count
= min(folio_size(folio
) - from
, count
);
580 /* First and last blocks in range within folio */
581 first
= from
>> inode
->i_blkbits
;
582 last
= (from
+ count
- 1) >> inode
->i_blkbits
;
584 for (i
= first
; i
<= last
; i
++)
585 if (!ifs_block_is_uptodate(ifs
, i
))
589 EXPORT_SYMBOL_GPL(iomap_is_partially_uptodate
);
592 * iomap_get_folio - get a folio reference for writing
593 * @iter: iteration structure
594 * @pos: start offset of write
595 * @len: Suggested size of folio to create.
597 * Returns a locked reference to the folio at @pos, or an error pointer if the
598 * folio could not be obtained.
600 struct folio
*iomap_get_folio(struct iomap_iter
*iter
, loff_t pos
, size_t len
)
602 fgf_t fgp
= FGP_WRITEBEGIN
| FGP_NOFS
;
604 if (iter
->flags
& IOMAP_NOWAIT
)
606 fgp
|= fgf_set_order(len
);
608 return __filemap_get_folio(iter
->inode
->i_mapping
, pos
>> PAGE_SHIFT
,
609 fgp
, mapping_gfp_mask(iter
->inode
->i_mapping
));
611 EXPORT_SYMBOL_GPL(iomap_get_folio
);
613 bool iomap_release_folio(struct folio
*folio
, gfp_t gfp_flags
)
615 trace_iomap_release_folio(folio
->mapping
->host
, folio_pos(folio
),
619 * If the folio is dirty, we refuse to release our metadata because
620 * it may be partially dirty. Once we track per-block dirty state,
621 * we can release the metadata if every block is dirty.
623 if (folio_test_dirty(folio
))
628 EXPORT_SYMBOL_GPL(iomap_release_folio
);
630 void iomap_invalidate_folio(struct folio
*folio
, size_t offset
, size_t len
)
632 trace_iomap_invalidate_folio(folio
->mapping
->host
,
633 folio_pos(folio
) + offset
, len
);
636 * If we're invalidating the entire folio, clear the dirty state
637 * from it and release it to avoid unnecessary buildup of the LRU.
639 if (offset
== 0 && len
== folio_size(folio
)) {
640 WARN_ON_ONCE(folio_test_writeback(folio
));
641 folio_cancel_dirty(folio
);
645 EXPORT_SYMBOL_GPL(iomap_invalidate_folio
);
647 bool iomap_dirty_folio(struct address_space
*mapping
, struct folio
*folio
)
649 struct inode
*inode
= mapping
->host
;
650 size_t len
= folio_size(folio
);
652 ifs_alloc(inode
, folio
, 0);
653 iomap_set_range_dirty(folio
, 0, len
);
654 return filemap_dirty_folio(mapping
, folio
);
656 EXPORT_SYMBOL_GPL(iomap_dirty_folio
);
659 iomap_write_failed(struct inode
*inode
, loff_t pos
, unsigned len
)
661 loff_t i_size
= i_size_read(inode
);
664 * Only truncate newly allocated pages beyoned EOF, even if the
665 * write started inside the existing inode size.
667 if (pos
+ len
> i_size
)
668 truncate_pagecache_range(inode
, max(pos
, i_size
),
672 static int iomap_read_folio_sync(loff_t block_start
, struct folio
*folio
,
673 size_t poff
, size_t plen
, const struct iomap
*iomap
)
678 bio_init(&bio
, iomap
->bdev
, &bvec
, 1, REQ_OP_READ
);
679 bio
.bi_iter
.bi_sector
= iomap_sector(iomap
, block_start
);
680 bio_add_folio_nofail(&bio
, folio
, plen
, poff
);
681 return submit_bio_wait(&bio
);
684 static int __iomap_write_begin(const struct iomap_iter
*iter
, loff_t pos
,
685 size_t len
, struct folio
*folio
)
687 const struct iomap
*srcmap
= iomap_iter_srcmap(iter
);
688 struct iomap_folio_state
*ifs
;
689 loff_t block_size
= i_blocksize(iter
->inode
);
690 loff_t block_start
= round_down(pos
, block_size
);
691 loff_t block_end
= round_up(pos
+ len
, block_size
);
692 unsigned int nr_blocks
= i_blocks_per_folio(iter
->inode
, folio
);
693 size_t from
= offset_in_folio(folio
, pos
), to
= from
+ len
;
697 * If the write or zeroing completely overlaps the current folio, then
698 * entire folio will be dirtied so there is no need for
699 * per-block state tracking structures to be attached to this folio.
700 * For the unshare case, we must read in the ondisk contents because we
701 * are not changing pagecache contents.
703 if (!(iter
->flags
& IOMAP_UNSHARE
) && pos
<= folio_pos(folio
) &&
704 pos
+ len
>= folio_pos(folio
) + folio_size(folio
))
707 ifs
= ifs_alloc(iter
->inode
, folio
, iter
->flags
);
708 if ((iter
->flags
& IOMAP_NOWAIT
) && !ifs
&& nr_blocks
> 1)
711 if (folio_test_uptodate(folio
))
715 iomap_adjust_read_range(iter
->inode
, folio
, &block_start
,
716 block_end
- block_start
, &poff
, &plen
);
720 if (!(iter
->flags
& IOMAP_UNSHARE
) &&
721 (from
<= poff
|| from
>= poff
+ plen
) &&
722 (to
<= poff
|| to
>= poff
+ plen
))
725 if (iomap_block_needs_zeroing(iter
, block_start
)) {
726 if (WARN_ON_ONCE(iter
->flags
& IOMAP_UNSHARE
))
728 folio_zero_segments(folio
, poff
, from
, to
, poff
+ plen
);
732 if (iter
->flags
& IOMAP_NOWAIT
)
735 status
= iomap_read_folio_sync(block_start
, folio
,
740 iomap_set_range_uptodate(folio
, poff
, plen
);
741 } while ((block_start
+= plen
) < block_end
);
746 static struct folio
*__iomap_get_folio(struct iomap_iter
*iter
, loff_t pos
,
749 const struct iomap_folio_ops
*folio_ops
= iter
->iomap
.folio_ops
;
751 if (folio_ops
&& folio_ops
->get_folio
)
752 return folio_ops
->get_folio(iter
, pos
, len
);
754 return iomap_get_folio(iter
, pos
, len
);
757 static void __iomap_put_folio(struct iomap_iter
*iter
, loff_t pos
, size_t ret
,
760 const struct iomap_folio_ops
*folio_ops
= iter
->iomap
.folio_ops
;
762 if (folio_ops
&& folio_ops
->put_folio
) {
763 folio_ops
->put_folio(iter
->inode
, pos
, ret
, folio
);
770 static int iomap_write_begin_inline(const struct iomap_iter
*iter
,
773 /* needs more work for the tailpacking case; disable for now */
774 if (WARN_ON_ONCE(iomap_iter_srcmap(iter
)->offset
!= 0))
776 return iomap_read_inline_data(iter
, folio
);
779 static int iomap_write_begin(struct iomap_iter
*iter
, loff_t pos
,
780 size_t len
, struct folio
**foliop
)
782 const struct iomap_folio_ops
*folio_ops
= iter
->iomap
.folio_ops
;
783 const struct iomap
*srcmap
= iomap_iter_srcmap(iter
);
787 BUG_ON(pos
+ len
> iter
->iomap
.offset
+ iter
->iomap
.length
);
788 if (srcmap
!= &iter
->iomap
)
789 BUG_ON(pos
+ len
> srcmap
->offset
+ srcmap
->length
);
791 if (fatal_signal_pending(current
))
794 if (!mapping_large_folio_support(iter
->inode
->i_mapping
))
795 len
= min_t(size_t, len
, PAGE_SIZE
- offset_in_page(pos
));
797 folio
= __iomap_get_folio(iter
, pos
, len
);
799 return PTR_ERR(folio
);
802 * Now we have a locked folio, before we do anything with it we need to
803 * check that the iomap we have cached is not stale. The inode extent
804 * mapping can change due to concurrent IO in flight (e.g.
805 * IOMAP_UNWRITTEN state can change and memory reclaim could have
806 * reclaimed a previously partially written page at this index after IO
807 * completion before this write reaches this file offset) and hence we
808 * could do the wrong thing here (zero a page range incorrectly or fail
809 * to zero) and corrupt data.
811 if (folio_ops
&& folio_ops
->iomap_valid
) {
812 bool iomap_valid
= folio_ops
->iomap_valid(iter
->inode
,
815 iter
->iomap
.flags
|= IOMAP_F_STALE
;
821 if (pos
+ len
> folio_pos(folio
) + folio_size(folio
))
822 len
= folio_pos(folio
) + folio_size(folio
) - pos
;
824 if (srcmap
->type
== IOMAP_INLINE
)
825 status
= iomap_write_begin_inline(iter
, folio
);
826 else if (srcmap
->flags
& IOMAP_F_BUFFER_HEAD
)
827 status
= __block_write_begin_int(folio
, pos
, len
, NULL
, srcmap
);
829 status
= __iomap_write_begin(iter
, pos
, len
, folio
);
831 if (unlikely(status
))
838 __iomap_put_folio(iter
, pos
, 0, folio
);
843 static bool __iomap_write_end(struct inode
*inode
, loff_t pos
, size_t len
,
844 size_t copied
, struct folio
*folio
)
846 flush_dcache_folio(folio
);
849 * The blocks that were entirely written will now be uptodate, so we
850 * don't have to worry about a read_folio reading them and overwriting a
851 * partial write. However, if we've encountered a short write and only
852 * partially written into a block, it will not be marked uptodate, so a
853 * read_folio might come in and destroy our partial write.
855 * Do the simplest thing and just treat any short write to a
856 * non-uptodate page as a zero-length write, and force the caller to
857 * redo the whole thing.
859 if (unlikely(copied
< len
&& !folio_test_uptodate(folio
)))
861 iomap_set_range_uptodate(folio
, offset_in_folio(folio
, pos
), len
);
862 iomap_set_range_dirty(folio
, offset_in_folio(folio
, pos
), copied
);
863 filemap_dirty_folio(inode
->i_mapping
, folio
);
867 static void iomap_write_end_inline(const struct iomap_iter
*iter
,
868 struct folio
*folio
, loff_t pos
, size_t copied
)
870 const struct iomap
*iomap
= &iter
->iomap
;
873 WARN_ON_ONCE(!folio_test_uptodate(folio
));
874 BUG_ON(!iomap_inline_data_valid(iomap
));
876 flush_dcache_folio(folio
);
877 addr
= kmap_local_folio(folio
, pos
);
878 memcpy(iomap_inline_data(iomap
, pos
), addr
, copied
);
881 mark_inode_dirty(iter
->inode
);
885 * Returns true if all copied bytes have been written to the pagecache,
886 * otherwise return false.
888 static bool iomap_write_end(struct iomap_iter
*iter
, loff_t pos
, size_t len
,
889 size_t copied
, struct folio
*folio
)
891 const struct iomap
*srcmap
= iomap_iter_srcmap(iter
);
893 if (srcmap
->type
== IOMAP_INLINE
) {
894 iomap_write_end_inline(iter
, folio
, pos
, copied
);
898 if (srcmap
->flags
& IOMAP_F_BUFFER_HEAD
) {
901 bh_written
= block_write_end(NULL
, iter
->inode
->i_mapping
, pos
,
902 len
, copied
, folio
, NULL
);
903 WARN_ON_ONCE(bh_written
!= copied
&& bh_written
!= 0);
904 return bh_written
== copied
;
907 return __iomap_write_end(iter
->inode
, pos
, len
, copied
, folio
);
910 static loff_t
iomap_write_iter(struct iomap_iter
*iter
, struct iov_iter
*i
)
912 loff_t length
= iomap_length(iter
);
913 loff_t pos
= iter
->pos
;
914 ssize_t total_written
= 0;
916 struct address_space
*mapping
= iter
->inode
->i_mapping
;
917 size_t chunk
= mapping_max_folio_size(mapping
);
918 unsigned int bdp_flags
= (iter
->flags
& IOMAP_NOWAIT
) ? BDP_ASYNC
: 0;
923 size_t offset
; /* Offset into folio */
924 size_t bytes
; /* Bytes to write to folio */
925 size_t copied
; /* Bytes copied from user */
926 size_t written
; /* Bytes have been written */
928 bytes
= iov_iter_count(i
);
930 offset
= pos
& (chunk
- 1);
931 bytes
= min(chunk
- offset
, bytes
);
932 status
= balance_dirty_pages_ratelimited_flags(mapping
,
934 if (unlikely(status
))
941 * Bring in the user page that we'll copy from _first_.
942 * Otherwise there's a nasty deadlock on copying from the
943 * same page as we're writing to, without it being marked
946 * For async buffered writes the assumption is that the user
947 * page has already been faulted in. This can be optimized by
948 * faulting the user page.
950 if (unlikely(fault_in_iov_iter_readable(i
, bytes
) == bytes
)) {
955 status
= iomap_write_begin(iter
, pos
, bytes
, &folio
);
956 if (unlikely(status
)) {
957 iomap_write_failed(iter
->inode
, pos
, bytes
);
960 if (iter
->iomap
.flags
& IOMAP_F_STALE
)
963 offset
= offset_in_folio(folio
, pos
);
964 if (bytes
> folio_size(folio
) - offset
)
965 bytes
= folio_size(folio
) - offset
;
967 if (mapping_writably_mapped(mapping
))
968 flush_dcache_folio(folio
);
970 copied
= copy_folio_from_iter_atomic(folio
, offset
, bytes
, i
);
971 written
= iomap_write_end(iter
, pos
, bytes
, copied
, folio
) ?
975 * Update the in-memory inode size after copying the data into
976 * the page cache. It's up to the file system to write the
977 * updated size to disk, preferably after I/O completion so that
978 * no stale data is exposed. Only once that's done can we
979 * unlock and release the folio.
981 old_size
= iter
->inode
->i_size
;
982 if (pos
+ written
> old_size
) {
983 i_size_write(iter
->inode
, pos
+ written
);
984 iter
->iomap
.flags
|= IOMAP_F_SIZE_CHANGED
;
986 __iomap_put_folio(iter
, pos
, written
, folio
);
989 pagecache_isize_extended(iter
->inode
, old_size
, pos
);
992 if (unlikely(written
== 0)) {
994 * A short copy made iomap_write_end() reject the
995 * thing entirely. Might be memory poisoning
996 * halfway through, might be a race with munmap,
997 * might be severe memory pressure.
999 iomap_write_failed(iter
->inode
, pos
, bytes
);
1000 iov_iter_revert(i
, copied
);
1002 if (chunk
> PAGE_SIZE
)
1010 total_written
+= written
;
1013 } while (iov_iter_count(i
) && length
);
1015 if (status
== -EAGAIN
) {
1016 iov_iter_revert(i
, total_written
);
1019 return total_written
? total_written
: status
;
1023 iomap_file_buffered_write(struct kiocb
*iocb
, struct iov_iter
*i
,
1024 const struct iomap_ops
*ops
, void *private)
1026 struct iomap_iter iter
= {
1027 .inode
= iocb
->ki_filp
->f_mapping
->host
,
1028 .pos
= iocb
->ki_pos
,
1029 .len
= iov_iter_count(i
),
1030 .flags
= IOMAP_WRITE
,
1035 if (iocb
->ki_flags
& IOCB_NOWAIT
)
1036 iter
.flags
|= IOMAP_NOWAIT
;
1038 while ((ret
= iomap_iter(&iter
, ops
)) > 0)
1039 iter
.processed
= iomap_write_iter(&iter
, i
);
1041 if (unlikely(iter
.pos
== iocb
->ki_pos
))
1043 ret
= iter
.pos
- iocb
->ki_pos
;
1044 iocb
->ki_pos
= iter
.pos
;
1047 EXPORT_SYMBOL_GPL(iomap_file_buffered_write
);
1049 static void iomap_write_delalloc_ifs_punch(struct inode
*inode
,
1050 struct folio
*folio
, loff_t start_byte
, loff_t end_byte
,
1051 struct iomap
*iomap
, iomap_punch_t punch
)
1053 unsigned int first_blk
, last_blk
, i
;
1055 u8 blkbits
= inode
->i_blkbits
;
1056 struct iomap_folio_state
*ifs
;
1059 * When we have per-block dirty tracking, there can be
1060 * blocks within a folio which are marked uptodate
1061 * but not dirty. In that case it is necessary to punch
1062 * out such blocks to avoid leaking any delalloc blocks.
1064 ifs
= folio
->private;
1068 last_byte
= min_t(loff_t
, end_byte
- 1,
1069 folio_pos(folio
) + folio_size(folio
) - 1);
1070 first_blk
= offset_in_folio(folio
, start_byte
) >> blkbits
;
1071 last_blk
= offset_in_folio(folio
, last_byte
) >> blkbits
;
1072 for (i
= first_blk
; i
<= last_blk
; i
++) {
1073 if (!ifs_block_is_dirty(folio
, ifs
, i
))
1074 punch(inode
, folio_pos(folio
) + (i
<< blkbits
),
1075 1 << blkbits
, iomap
);
1079 static void iomap_write_delalloc_punch(struct inode
*inode
, struct folio
*folio
,
1080 loff_t
*punch_start_byte
, loff_t start_byte
, loff_t end_byte
,
1081 struct iomap
*iomap
, iomap_punch_t punch
)
1083 if (!folio_test_dirty(folio
))
1086 /* if dirty, punch up to offset */
1087 if (start_byte
> *punch_start_byte
) {
1088 punch(inode
, *punch_start_byte
, start_byte
- *punch_start_byte
,
1092 /* Punch non-dirty blocks within folio */
1093 iomap_write_delalloc_ifs_punch(inode
, folio
, start_byte
, end_byte
,
1097 * Make sure the next punch start is correctly bound to
1098 * the end of this data range, not the end of the folio.
1100 *punch_start_byte
= min_t(loff_t
, end_byte
,
1101 folio_pos(folio
) + folio_size(folio
));
1105 * Scan the data range passed to us for dirty page cache folios. If we find a
1106 * dirty folio, punch out the preceding range and update the offset from which
1107 * the next punch will start from.
1109 * We can punch out storage reservations under clean pages because they either
1110 * contain data that has been written back - in which case the delalloc punch
1111 * over that range is a no-op - or they have been read faults in which case they
1112 * contain zeroes and we can remove the delalloc backing range and any new
1113 * writes to those pages will do the normal hole filling operation...
1115 * This makes the logic simple: we only need to keep the delalloc extents only
1116 * over the dirty ranges of the page cache.
1118 * This function uses [start_byte, end_byte) intervals (i.e. open ended) to
1119 * simplify range iterations.
1121 static void iomap_write_delalloc_scan(struct inode
*inode
,
1122 loff_t
*punch_start_byte
, loff_t start_byte
, loff_t end_byte
,
1123 struct iomap
*iomap
, iomap_punch_t punch
)
1125 while (start_byte
< end_byte
) {
1126 struct folio
*folio
;
1128 /* grab locked page */
1129 folio
= filemap_lock_folio(inode
->i_mapping
,
1130 start_byte
>> PAGE_SHIFT
);
1131 if (IS_ERR(folio
)) {
1132 start_byte
= ALIGN_DOWN(start_byte
, PAGE_SIZE
) +
1137 iomap_write_delalloc_punch(inode
, folio
, punch_start_byte
,
1138 start_byte
, end_byte
, iomap
, punch
);
1140 /* move offset to start of next folio in range */
1141 start_byte
= folio_next_index(folio
) << PAGE_SHIFT
;
1142 folio_unlock(folio
);
1148 * When a short write occurs, the filesystem might need to use ->iomap_end
1149 * to remove space reservations created in ->iomap_begin.
1151 * For filesystems that use delayed allocation, there can be dirty pages over
1152 * the delalloc extent outside the range of a short write but still within the
1153 * delalloc extent allocated for this iomap if the write raced with page
1156 * Punch out all the delalloc blocks in the range given except for those that
1157 * have dirty data still pending in the page cache - those are going to be
1158 * written and so must still retain the delalloc backing for writeback.
1160 * The punch() callback *must* only punch delalloc extents in the range passed
1161 * to it. It must skip over all other types of extents in the range and leave
1162 * them completely unchanged. It must do this punch atomically with respect to
1163 * other extent modifications.
1165 * The punch() callback may be called with a folio locked to prevent writeback
1166 * extent allocation racing at the edge of the range we are currently punching.
1167 * The locked folio may or may not cover the range being punched, so it is not
1168 * safe for the punch() callback to lock folios itself.
1172 * inode->i_rwsem (shared or exclusive)
1173 * inode->i_mapping->invalidate_lock (exclusive)
1176 * internal filesystem allocation lock
1178 * As we are scanning the page cache for data, we don't need to reimplement the
1179 * wheel - mapping_seek_hole_data() does exactly what we need to identify the
1180 * start and end of data ranges correctly even for sub-folio block sizes. This
1181 * byte range based iteration is especially convenient because it means we
1182 * don't have to care about variable size folios, nor where the start or end of
1183 * the data range lies within a folio, if they lie within the same folio or even
1184 * if there are multiple discontiguous data ranges within the folio.
1186 * It should be noted that mapping_seek_hole_data() is not aware of EOF, and so
1187 * can return data ranges that exist in the cache beyond EOF. e.g. a page fault
1188 * spanning EOF will initialise the post-EOF data to zeroes and mark it up to
1189 * date. A write page fault can then mark it dirty. If we then fail a write()
1190 * beyond EOF into that up to date cached range, we allocate a delalloc block
1191 * beyond EOF and then have to punch it out. Because the range is up to date,
1192 * mapping_seek_hole_data() will return it, and we will skip the punch because
1193 * the folio is dirty. THis is incorrect - we always need to punch out delalloc
1194 * beyond EOF in this case as writeback will never write back and covert that
1195 * delalloc block beyond EOF. Hence we limit the cached data scan range to EOF,
1196 * resulting in always punching out the range from the EOF to the end of the
1197 * range the iomap spans.
1199 * Intervals are of the form [start_byte, end_byte) (i.e. open ended) because it
1200 * matches the intervals returned by mapping_seek_hole_data(). i.e. SEEK_DATA
1201 * returns the start of a data range (start_byte), and SEEK_HOLE(start_byte)
1202 * returns the end of the data range (data_end). Using closed intervals would
1203 * require sprinkling this code with magic "+ 1" and "- 1" arithmetic and expose
1204 * the code to subtle off-by-one bugs....
1206 void iomap_write_delalloc_release(struct inode
*inode
, loff_t start_byte
,
1207 loff_t end_byte
, unsigned flags
, struct iomap
*iomap
,
1208 iomap_punch_t punch
)
1210 loff_t punch_start_byte
= start_byte
;
1211 loff_t scan_end_byte
= min(i_size_read(inode
), end_byte
);
1214 * The caller must hold invalidate_lock to avoid races with page faults
1215 * re-instantiating folios and dirtying them via ->page_mkwrite whilst
1216 * we walk the cache and perform delalloc extent removal. Failing to do
1217 * this can leave dirty pages with no space reservation in the cache.
1219 lockdep_assert_held_write(&inode
->i_mapping
->invalidate_lock
);
1221 while (start_byte
< scan_end_byte
) {
1224 start_byte
= mapping_seek_hole_data(inode
->i_mapping
,
1225 start_byte
, scan_end_byte
, SEEK_DATA
);
1227 * If there is no more data to scan, all that is left is to
1228 * punch out the remaining range.
1230 * Note that mapping_seek_hole_data is only supposed to return
1231 * either an offset or -ENXIO, so WARN on any other error as
1232 * that would be an API change without updating the callers.
1234 if (start_byte
== -ENXIO
|| start_byte
== scan_end_byte
)
1236 if (WARN_ON_ONCE(start_byte
< 0))
1238 WARN_ON_ONCE(start_byte
< punch_start_byte
);
1239 WARN_ON_ONCE(start_byte
> scan_end_byte
);
1242 * We find the end of this contiguous cached data range by
1243 * seeking from start_byte to the beginning of the next hole.
1245 data_end
= mapping_seek_hole_data(inode
->i_mapping
, start_byte
,
1246 scan_end_byte
, SEEK_HOLE
);
1247 if (WARN_ON_ONCE(data_end
< 0))
1251 * If we race with post-direct I/O invalidation of the page cache,
1252 * there might be no data left at start_byte.
1254 if (data_end
== start_byte
)
1257 WARN_ON_ONCE(data_end
< start_byte
);
1258 WARN_ON_ONCE(data_end
> scan_end_byte
);
1260 iomap_write_delalloc_scan(inode
, &punch_start_byte
, start_byte
,
1261 data_end
, iomap
, punch
);
1263 /* The next data search starts at the end of this one. */
1264 start_byte
= data_end
;
1267 if (punch_start_byte
< end_byte
)
1268 punch(inode
, punch_start_byte
, end_byte
- punch_start_byte
,
1271 EXPORT_SYMBOL_GPL(iomap_write_delalloc_release
);
1273 static loff_t
iomap_unshare_iter(struct iomap_iter
*iter
)
1275 struct iomap
*iomap
= &iter
->iomap
;
1276 loff_t pos
= iter
->pos
;
1277 loff_t length
= iomap_length(iter
);
1280 if (!iomap_want_unshare_iter(iter
))
1284 struct folio
*folio
;
1287 size_t bytes
= min_t(u64
, SIZE_MAX
, length
);
1290 status
= iomap_write_begin(iter
, pos
, bytes
, &folio
);
1291 if (unlikely(status
))
1293 if (iomap
->flags
& IOMAP_F_STALE
)
1296 offset
= offset_in_folio(folio
, pos
);
1297 if (bytes
> folio_size(folio
) - offset
)
1298 bytes
= folio_size(folio
) - offset
;
1300 ret
= iomap_write_end(iter
, pos
, bytes
, bytes
, folio
);
1301 __iomap_put_folio(iter
, pos
, bytes
, folio
);
1302 if (WARN_ON_ONCE(!ret
))
1311 balance_dirty_pages_ratelimited(iter
->inode
->i_mapping
);
1312 } while (length
> 0);
1318 iomap_file_unshare(struct inode
*inode
, loff_t pos
, loff_t len
,
1319 const struct iomap_ops
*ops
)
1321 struct iomap_iter iter
= {
1324 .flags
= IOMAP_WRITE
| IOMAP_UNSHARE
,
1326 loff_t size
= i_size_read(inode
);
1329 if (pos
< 0 || pos
>= size
)
1332 iter
.len
= min(len
, size
- pos
);
1333 while ((ret
= iomap_iter(&iter
, ops
)) > 0)
1334 iter
.processed
= iomap_unshare_iter(&iter
);
1337 EXPORT_SYMBOL_GPL(iomap_file_unshare
);
1340 * Flush the remaining range of the iter and mark the current mapping stale.
1341 * This is used when zero range sees an unwritten mapping that may have had
1342 * dirty pagecache over it.
1344 static inline int iomap_zero_iter_flush_and_stale(struct iomap_iter
*i
)
1346 struct address_space
*mapping
= i
->inode
->i_mapping
;
1347 loff_t end
= i
->pos
+ i
->len
- 1;
1349 i
->iomap
.flags
|= IOMAP_F_STALE
;
1350 return filemap_write_and_wait_range(mapping
, i
->pos
, end
);
1353 static loff_t
iomap_zero_iter(struct iomap_iter
*iter
, bool *did_zero
)
1355 loff_t pos
= iter
->pos
;
1356 loff_t length
= iomap_length(iter
);
1360 struct folio
*folio
;
1363 size_t bytes
= min_t(u64
, SIZE_MAX
, length
);
1366 status
= iomap_write_begin(iter
, pos
, bytes
, &folio
);
1369 if (iter
->iomap
.flags
& IOMAP_F_STALE
)
1372 /* warn about zeroing folios beyond eof that won't write back */
1373 WARN_ON_ONCE(folio_pos(folio
) > iter
->inode
->i_size
);
1374 offset
= offset_in_folio(folio
, pos
);
1375 if (bytes
> folio_size(folio
) - offset
)
1376 bytes
= folio_size(folio
) - offset
;
1378 folio_zero_range(folio
, offset
, bytes
);
1379 folio_mark_accessed(folio
);
1381 ret
= iomap_write_end(iter
, pos
, bytes
, bytes
, folio
);
1382 __iomap_put_folio(iter
, pos
, bytes
, folio
);
1383 if (WARN_ON_ONCE(!ret
))
1389 } while (length
> 0);
1397 iomap_zero_range(struct inode
*inode
, loff_t pos
, loff_t len
, bool *did_zero
,
1398 const struct iomap_ops
*ops
)
1400 struct iomap_iter iter
= {
1404 .flags
= IOMAP_ZERO
,
1406 struct address_space
*mapping
= inode
->i_mapping
;
1407 unsigned int blocksize
= i_blocksize(inode
);
1408 unsigned int off
= pos
& (blocksize
- 1);
1409 loff_t plen
= min_t(loff_t
, len
, blocksize
- off
);
1414 * Zero range can skip mappings that are zero on disk so long as
1415 * pagecache is clean. If pagecache was dirty prior to zero range, the
1416 * mapping converts on writeback completion and so must be zeroed.
1418 * The simplest way to deal with this across a range is to flush
1419 * pagecache and process the updated mappings. To avoid excessive
1420 * flushing on partial eof zeroing, special case it to zero the
1421 * unaligned start portion if already dirty in pagecache.
1424 filemap_range_needs_writeback(mapping
, pos
, pos
+ plen
- 1)) {
1426 while ((ret
= iomap_iter(&iter
, ops
)) > 0)
1427 iter
.processed
= iomap_zero_iter(&iter
, did_zero
);
1429 iter
.len
= len
- (iter
.pos
- pos
);
1430 if (ret
|| !iter
.len
)
1435 * To avoid an unconditional flush, check pagecache state and only flush
1436 * if dirty and the fs returns a mapping that might convert on
1439 range_dirty
= filemap_range_needs_writeback(inode
->i_mapping
,
1440 iter
.pos
, iter
.pos
+ iter
.len
- 1);
1441 while ((ret
= iomap_iter(&iter
, ops
)) > 0) {
1442 const struct iomap
*srcmap
= iomap_iter_srcmap(&iter
);
1444 if (srcmap
->type
== IOMAP_HOLE
||
1445 srcmap
->type
== IOMAP_UNWRITTEN
) {
1446 loff_t proc
= iomap_length(&iter
);
1449 range_dirty
= false;
1450 proc
= iomap_zero_iter_flush_and_stale(&iter
);
1452 iter
.processed
= proc
;
1456 iter
.processed
= iomap_zero_iter(&iter
, did_zero
);
1460 EXPORT_SYMBOL_GPL(iomap_zero_range
);
1463 iomap_truncate_page(struct inode
*inode
, loff_t pos
, bool *did_zero
,
1464 const struct iomap_ops
*ops
)
1466 unsigned int blocksize
= i_blocksize(inode
);
1467 unsigned int off
= pos
& (blocksize
- 1);
1469 /* Block boundary? Nothing to do */
1472 return iomap_zero_range(inode
, pos
, blocksize
- off
, did_zero
, ops
);
1474 EXPORT_SYMBOL_GPL(iomap_truncate_page
);
1476 static loff_t
iomap_folio_mkwrite_iter(struct iomap_iter
*iter
,
1477 struct folio
*folio
)
1479 loff_t length
= iomap_length(iter
);
1482 if (iter
->iomap
.flags
& IOMAP_F_BUFFER_HEAD
) {
1483 ret
= __block_write_begin_int(folio
, iter
->pos
, length
, NULL
,
1487 block_commit_write(&folio
->page
, 0, length
);
1489 WARN_ON_ONCE(!folio_test_uptodate(folio
));
1490 folio_mark_dirty(folio
);
1496 vm_fault_t
iomap_page_mkwrite(struct vm_fault
*vmf
, const struct iomap_ops
*ops
)
1498 struct iomap_iter iter
= {
1499 .inode
= file_inode(vmf
->vma
->vm_file
),
1500 .flags
= IOMAP_WRITE
| IOMAP_FAULT
,
1502 struct folio
*folio
= page_folio(vmf
->page
);
1506 ret
= folio_mkwrite_check_truncate(folio
, iter
.inode
);
1509 iter
.pos
= folio_pos(folio
);
1511 while ((ret
= iomap_iter(&iter
, ops
)) > 0)
1512 iter
.processed
= iomap_folio_mkwrite_iter(&iter
, folio
);
1516 folio_wait_stable(folio
);
1517 return VM_FAULT_LOCKED
;
1519 folio_unlock(folio
);
1520 return vmf_fs_error(ret
);
1522 EXPORT_SYMBOL_GPL(iomap_page_mkwrite
);
1524 static void iomap_finish_folio_write(struct inode
*inode
, struct folio
*folio
,
1527 struct iomap_folio_state
*ifs
= folio
->private;
1529 WARN_ON_ONCE(i_blocks_per_folio(inode
, folio
) > 1 && !ifs
);
1530 WARN_ON_ONCE(ifs
&& atomic_read(&ifs
->write_bytes_pending
) <= 0);
1532 if (!ifs
|| atomic_sub_and_test(len
, &ifs
->write_bytes_pending
))
1533 folio_end_writeback(folio
);
1537 * We're now finished for good with this ioend structure. Update the page
1538 * state, release holds on bios, and finally free up memory. Do not use the
1542 iomap_finish_ioend(struct iomap_ioend
*ioend
, int error
)
1544 struct inode
*inode
= ioend
->io_inode
;
1545 struct bio
*bio
= &ioend
->io_bio
;
1546 struct folio_iter fi
;
1547 u32 folio_count
= 0;
1550 mapping_set_error(inode
->i_mapping
, error
);
1551 if (!bio_flagged(bio
, BIO_QUIET
)) {
1553 "%s: writeback error on inode %lu, offset %lld, sector %llu",
1554 inode
->i_sb
->s_id
, inode
->i_ino
,
1555 ioend
->io_offset
, ioend
->io_sector
);
1559 /* walk all folios in bio, ending page IO on them */
1560 bio_for_each_folio_all(fi
, bio
) {
1561 iomap_finish_folio_write(inode
, fi
.folio
, fi
.length
);
1565 bio_put(bio
); /* frees the ioend */
1570 * Ioend completion routine for merged bios. This can only be called from task
1571 * contexts as merged ioends can be of unbound length. Hence we have to break up
1572 * the writeback completions into manageable chunks to avoid long scheduler
1573 * holdoffs. We aim to keep scheduler holdoffs down below 10ms so that we get
1574 * good batch processing throughput without creating adverse scheduler latency
1578 iomap_finish_ioends(struct iomap_ioend
*ioend
, int error
)
1580 struct list_head tmp
;
1585 list_replace_init(&ioend
->io_list
, &tmp
);
1586 completions
= iomap_finish_ioend(ioend
, error
);
1588 while (!list_empty(&tmp
)) {
1589 if (completions
> IOEND_BATCH_SIZE
* 8) {
1593 ioend
= list_first_entry(&tmp
, struct iomap_ioend
, io_list
);
1594 list_del_init(&ioend
->io_list
);
1595 completions
+= iomap_finish_ioend(ioend
, error
);
1598 EXPORT_SYMBOL_GPL(iomap_finish_ioends
);
1601 * We can merge two adjacent ioends if they have the same set of work to do.
1604 iomap_ioend_can_merge(struct iomap_ioend
*ioend
, struct iomap_ioend
*next
)
1606 if (ioend
->io_bio
.bi_status
!= next
->io_bio
.bi_status
)
1608 if (next
->io_flags
& IOMAP_F_BOUNDARY
)
1610 if ((ioend
->io_flags
& IOMAP_F_SHARED
) ^
1611 (next
->io_flags
& IOMAP_F_SHARED
))
1613 if ((ioend
->io_type
== IOMAP_UNWRITTEN
) ^
1614 (next
->io_type
== IOMAP_UNWRITTEN
))
1616 if (ioend
->io_offset
+ ioend
->io_size
!= next
->io_offset
)
1619 * Do not merge physically discontiguous ioends. The filesystem
1620 * completion functions will have to iterate the physical
1621 * discontiguities even if we merge the ioends at a logical level, so
1622 * we don't gain anything by merging physical discontiguities here.
1624 * We cannot use bio->bi_iter.bi_sector here as it is modified during
1625 * submission so does not point to the start sector of the bio at
1628 if (ioend
->io_sector
+ (ioend
->io_size
>> 9) != next
->io_sector
)
1634 iomap_ioend_try_merge(struct iomap_ioend
*ioend
, struct list_head
*more_ioends
)
1636 struct iomap_ioend
*next
;
1638 INIT_LIST_HEAD(&ioend
->io_list
);
1640 while ((next
= list_first_entry_or_null(more_ioends
, struct iomap_ioend
,
1642 if (!iomap_ioend_can_merge(ioend
, next
))
1644 list_move_tail(&next
->io_list
, &ioend
->io_list
);
1645 ioend
->io_size
+= next
->io_size
;
1648 EXPORT_SYMBOL_GPL(iomap_ioend_try_merge
);
1651 iomap_ioend_compare(void *priv
, const struct list_head
*a
,
1652 const struct list_head
*b
)
1654 struct iomap_ioend
*ia
= container_of(a
, struct iomap_ioend
, io_list
);
1655 struct iomap_ioend
*ib
= container_of(b
, struct iomap_ioend
, io_list
);
1657 if (ia
->io_offset
< ib
->io_offset
)
1659 if (ia
->io_offset
> ib
->io_offset
)
1665 iomap_sort_ioends(struct list_head
*ioend_list
)
1667 list_sort(NULL
, ioend_list
, iomap_ioend_compare
);
1669 EXPORT_SYMBOL_GPL(iomap_sort_ioends
);
1671 static void iomap_writepage_end_bio(struct bio
*bio
)
1673 iomap_finish_ioend(iomap_ioend_from_bio(bio
),
1674 blk_status_to_errno(bio
->bi_status
));
1678 * Submit the final bio for an ioend.
1680 * If @error is non-zero, it means that we have a situation where some part of
1681 * the submission process has failed after we've marked pages for writeback.
1682 * We cannot cancel ioend directly in that case, so call the bio end I/O handler
1683 * with the error status here to run the normal I/O completion handler to clear
1684 * the writeback bit and let the file system proess the errors.
1686 static int iomap_submit_ioend(struct iomap_writepage_ctx
*wpc
, int error
)
1692 * Let the file systems prepare the I/O submission and hook in an I/O
1693 * comletion handler. This also needs to happen in case after a
1694 * failure happened so that the file system end I/O handler gets called
1697 if (wpc
->ops
->prepare_ioend
)
1698 error
= wpc
->ops
->prepare_ioend(wpc
->ioend
, error
);
1701 wpc
->ioend
->io_bio
.bi_status
= errno_to_blk_status(error
);
1702 bio_endio(&wpc
->ioend
->io_bio
);
1704 submit_bio(&wpc
->ioend
->io_bio
);
1711 static struct iomap_ioend
*iomap_alloc_ioend(struct iomap_writepage_ctx
*wpc
,
1712 struct writeback_control
*wbc
, struct inode
*inode
, loff_t pos
)
1714 struct iomap_ioend
*ioend
;
1717 bio
= bio_alloc_bioset(wpc
->iomap
.bdev
, BIO_MAX_VECS
,
1718 REQ_OP_WRITE
| wbc_to_write_flags(wbc
),
1719 GFP_NOFS
, &iomap_ioend_bioset
);
1720 bio
->bi_iter
.bi_sector
= iomap_sector(&wpc
->iomap
, pos
);
1721 bio
->bi_end_io
= iomap_writepage_end_bio
;
1722 wbc_init_bio(wbc
, bio
);
1723 bio
->bi_write_hint
= inode
->i_write_hint
;
1725 ioend
= iomap_ioend_from_bio(bio
);
1726 INIT_LIST_HEAD(&ioend
->io_list
);
1727 ioend
->io_type
= wpc
->iomap
.type
;
1728 ioend
->io_flags
= wpc
->iomap
.flags
;
1729 if (pos
> wpc
->iomap
.offset
)
1730 wpc
->iomap
.flags
&= ~IOMAP_F_BOUNDARY
;
1731 ioend
->io_inode
= inode
;
1733 ioend
->io_offset
= pos
;
1734 ioend
->io_sector
= bio
->bi_iter
.bi_sector
;
1740 static bool iomap_can_add_to_ioend(struct iomap_writepage_ctx
*wpc
, loff_t pos
)
1742 if (wpc
->iomap
.offset
== pos
&& (wpc
->iomap
.flags
& IOMAP_F_BOUNDARY
))
1744 if ((wpc
->iomap
.flags
& IOMAP_F_SHARED
) !=
1745 (wpc
->ioend
->io_flags
& IOMAP_F_SHARED
))
1747 if (wpc
->iomap
.type
!= wpc
->ioend
->io_type
)
1749 if (pos
!= wpc
->ioend
->io_offset
+ wpc
->ioend
->io_size
)
1751 if (iomap_sector(&wpc
->iomap
, pos
) !=
1752 bio_end_sector(&wpc
->ioend
->io_bio
))
1755 * Limit ioend bio chain lengths to minimise IO completion latency. This
1756 * also prevents long tight loops ending page writeback on all the
1757 * folios in the ioend.
1759 if (wpc
->nr_folios
>= IOEND_BATCH_SIZE
)
1765 * Test to see if we have an existing ioend structure that we could append to
1766 * first; otherwise finish off the current ioend and start another.
1768 * If a new ioend is created and cached, the old ioend is submitted to the block
1769 * layer instantly. Batching optimisations are provided by higher level block
1772 * At the end of a writeback pass, there will be a cached ioend remaining on the
1773 * writepage context that the caller will need to submit.
1775 static int iomap_add_to_ioend(struct iomap_writepage_ctx
*wpc
,
1776 struct writeback_control
*wbc
, struct folio
*folio
,
1777 struct inode
*inode
, loff_t pos
, unsigned len
)
1779 struct iomap_folio_state
*ifs
= folio
->private;
1780 size_t poff
= offset_in_folio(folio
, pos
);
1783 if (!wpc
->ioend
|| !iomap_can_add_to_ioend(wpc
, pos
)) {
1785 error
= iomap_submit_ioend(wpc
, 0);
1788 wpc
->ioend
= iomap_alloc_ioend(wpc
, wbc
, inode
, pos
);
1791 if (!bio_add_folio(&wpc
->ioend
->io_bio
, folio
, len
, poff
))
1795 atomic_add(len
, &ifs
->write_bytes_pending
);
1796 wpc
->ioend
->io_size
+= len
;
1797 wbc_account_cgroup_owner(wbc
, folio
, len
);
1801 static int iomap_writepage_map_blocks(struct iomap_writepage_ctx
*wpc
,
1802 struct writeback_control
*wbc
, struct folio
*folio
,
1803 struct inode
*inode
, u64 pos
, unsigned dirty_len
,
1811 error
= wpc
->ops
->map_blocks(wpc
, inode
, pos
, dirty_len
);
1814 trace_iomap_writepage_map(inode
, pos
, dirty_len
, &wpc
->iomap
);
1816 map_len
= min_t(u64
, dirty_len
,
1817 wpc
->iomap
.offset
+ wpc
->iomap
.length
- pos
);
1818 WARN_ON_ONCE(!folio
->private && map_len
< dirty_len
);
1820 switch (wpc
->iomap
.type
) {
1828 error
= iomap_add_to_ioend(wpc
, wbc
, folio
, inode
, pos
,
1834 dirty_len
-= map_len
;
1836 } while (dirty_len
&& !error
);
1839 * We cannot cancel the ioend directly here on error. We may have
1840 * already set other pages under writeback and hence we have to run I/O
1841 * completion to mark the error state of the pages under writeback
1844 * Just let the file system know what portion of the folio failed to
1847 if (error
&& wpc
->ops
->discard_folio
)
1848 wpc
->ops
->discard_folio(folio
, pos
);
1853 * Check interaction of the folio with the file end.
1855 * If the folio is entirely beyond i_size, return false. If it straddles
1856 * i_size, adjust end_pos and zero all data beyond i_size.
1858 static bool iomap_writepage_handle_eof(struct folio
*folio
, struct inode
*inode
,
1861 u64 isize
= i_size_read(inode
);
1863 if (*end_pos
> isize
) {
1864 size_t poff
= offset_in_folio(folio
, isize
);
1865 pgoff_t end_index
= isize
>> PAGE_SHIFT
;
1868 * If the folio is entirely ouside of i_size, skip it.
1870 * This can happen due to a truncate operation that is in
1871 * progress and in that case truncate will finish it off once
1872 * we've dropped the folio lock.
1874 * Note that the pgoff_t used for end_index is an unsigned long.
1875 * If the given offset is greater than 16TB on a 32-bit system,
1876 * then if we checked if the folio is fully outside i_size with
1877 * "if (folio->index >= end_index + 1)", "end_index + 1" would
1878 * overflow and evaluate to 0. Hence this folio would be
1879 * redirtied and written out repeatedly, which would result in
1880 * an infinite loop; the user program performing this operation
1881 * would hang. Instead, we can detect this situation by
1882 * checking if the folio is totally beyond i_size or if its
1883 * offset is just equal to the EOF.
1885 if (folio
->index
> end_index
||
1886 (folio
->index
== end_index
&& poff
== 0))
1890 * The folio straddles i_size.
1892 * It must be zeroed out on each and every writepage invocation
1893 * because it may be mmapped:
1895 * A file is mapped in multiples of the page size. For a
1896 * file that is not a multiple of the page size, the
1897 * remaining memory is zeroed when mapped, and writes to that
1898 * region are not written out to the file.
1900 * Also adjust the writeback range to skip all blocks entirely
1903 folio_zero_segment(folio
, poff
, folio_size(folio
));
1904 *end_pos
= round_up(isize
, i_blocksize(inode
));
1910 static int iomap_writepage_map(struct iomap_writepage_ctx
*wpc
,
1911 struct writeback_control
*wbc
, struct folio
*folio
)
1913 struct iomap_folio_state
*ifs
= folio
->private;
1914 struct inode
*inode
= folio
->mapping
->host
;
1915 u64 pos
= folio_pos(folio
);
1916 u64 end_pos
= pos
+ folio_size(folio
);
1921 WARN_ON_ONCE(!folio_test_locked(folio
));
1922 WARN_ON_ONCE(folio_test_dirty(folio
));
1923 WARN_ON_ONCE(folio_test_writeback(folio
));
1925 trace_iomap_writepage(inode
, pos
, folio_size(folio
));
1927 if (!iomap_writepage_handle_eof(folio
, inode
, &end_pos
)) {
1928 folio_unlock(folio
);
1931 WARN_ON_ONCE(end_pos
<= pos
);
1933 if (i_blocks_per_folio(inode
, folio
) > 1) {
1935 ifs
= ifs_alloc(inode
, folio
, 0);
1936 iomap_set_range_dirty(folio
, 0, end_pos
- pos
);
1940 * Keep the I/O completion handler from clearing the writeback
1941 * bit until we have submitted all blocks by adding a bias to
1942 * ifs->write_bytes_pending, which is dropped after submitting
1945 WARN_ON_ONCE(atomic_read(&ifs
->write_bytes_pending
) != 0);
1946 atomic_inc(&ifs
->write_bytes_pending
);
1950 * Set the writeback bit ASAP, as the I/O completion for the single
1951 * block per folio case happen hit as soon as we're submitting the bio.
1953 folio_start_writeback(folio
);
1956 * Walk through the folio to find dirty areas to write back.
1958 while ((rlen
= iomap_find_dirty_range(folio
, &pos
, end_pos
))) {
1959 error
= iomap_writepage_map_blocks(wpc
, wbc
, folio
, inode
,
1970 * We can have dirty bits set past end of file in page_mkwrite path
1971 * while mapping the last partial folio. Hence it's better to clear
1972 * all the dirty bits in the folio here.
1974 iomap_clear_range_dirty(folio
, 0, folio_size(folio
));
1977 * Usually the writeback bit is cleared by the I/O completion handler.
1978 * But we may end up either not actually writing any blocks, or (when
1979 * there are multiple blocks in a folio) all I/O might have finished
1980 * already at this point. In that case we need to clear the writeback
1981 * bit ourselves right after unlocking the page.
1983 folio_unlock(folio
);
1985 if (atomic_dec_and_test(&ifs
->write_bytes_pending
))
1986 folio_end_writeback(folio
);
1989 folio_end_writeback(folio
);
1991 mapping_set_error(inode
->i_mapping
, error
);
1996 iomap_writepages(struct address_space
*mapping
, struct writeback_control
*wbc
,
1997 struct iomap_writepage_ctx
*wpc
,
1998 const struct iomap_writeback_ops
*ops
)
2000 struct folio
*folio
= NULL
;
2004 * Writeback from reclaim context should never happen except in the case
2005 * of a VM regression so warn about it and refuse to write the data.
2007 if (WARN_ON_ONCE((current
->flags
& (PF_MEMALLOC
| PF_KSWAPD
)) ==
2012 while ((folio
= writeback_iter(mapping
, wbc
, folio
, &error
)))
2013 error
= iomap_writepage_map(wpc
, wbc
, folio
);
2014 return iomap_submit_ioend(wpc
, error
);
2016 EXPORT_SYMBOL_GPL(iomap_writepages
);
2018 static int __init
iomap_buffered_init(void)
2020 return bioset_init(&iomap_ioend_bioset
, 4 * (PAGE_SIZE
/ SECTOR_SIZE
),
2021 offsetof(struct iomap_ioend
, io_bio
),
2024 fs_initcall(iomap_buffered_init
);