2 * Copyright (C) 2010 Red Hat, Inc.
3 * Copyright (c) 2016 Christoph Hellwig.
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms and conditions of the GNU General Public License,
7 * version 2, as published by the Free Software Foundation.
9 * This program is distributed in the hope it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
14 #include <linux/module.h>
15 #include <linux/compiler.h>
17 #include <linux/iomap.h>
18 #include <linux/uaccess.h>
19 #include <linux/gfp.h>
21 #include <linux/swap.h>
22 #include <linux/pagemap.h>
23 #include <linux/file.h>
24 #include <linux/uio.h>
25 #include <linux/backing-dev.h>
26 #include <linux/buffer_head.h>
27 #include <linux/task_io_accounting_ops.h>
28 #include <linux/dax.h>
29 #include <linux/sched/signal.h>
34 * Execute a iomap write on a segment of the mapping that spans a
35 * contiguous range of pages that have identical block mapping state.
37 * This avoids the need to map pages individually, do individual allocations
38 * for each page and most importantly avoid the need for filesystem specific
39 * locking per page. Instead, all the operations are amortised over the entire
40 * range of pages. It is assumed that the filesystems will lock whatever
41 * resources they require in the iomap_begin call, and release them in the
45 iomap_apply(struct inode
*inode
, loff_t pos
, loff_t length
, unsigned flags
,
46 const struct iomap_ops
*ops
, void *data
, iomap_actor_t actor
)
48 struct iomap iomap
= { 0 };
49 loff_t written
= 0, ret
;
52 * Need to map a range from start position for length bytes. This can
53 * span multiple pages - it is only guaranteed to return a range of a
54 * single type of pages (e.g. all into a hole, all mapped or all
55 * unwritten). Failure at this point has nothing to undo.
57 * If allocation is required for this range, reserve the space now so
58 * that the allocation is guaranteed to succeed later on. Once we copy
59 * the data into the page cache pages, then we cannot fail otherwise we
60 * expose transient stale data. If the reserve fails, we can safely
61 * back out at this point as there is nothing to undo.
63 ret
= ops
->iomap_begin(inode
, pos
, length
, flags
, &iomap
);
66 if (WARN_ON(iomap
.offset
> pos
))
68 if (WARN_ON(iomap
.length
== 0))
72 * Cut down the length to the one actually provided by the filesystem,
73 * as it might not be able to give us the whole size that we requested.
75 if (iomap
.offset
+ iomap
.length
< pos
+ length
)
76 length
= iomap
.offset
+ iomap
.length
- pos
;
79 * Now that we have guaranteed that the space allocation will succeed.
80 * we can do the copy-in page by page without having to worry about
81 * failures exposing transient data.
83 written
= actor(inode
, pos
, length
, data
, &iomap
);
86 * Now the data has been copied, commit the range we've copied. This
87 * should not fail unless the filesystem has had a fatal error.
90 ret
= ops
->iomap_end(inode
, pos
, length
,
91 written
> 0 ? written
: 0,
95 return written
? written
: ret
;
99 iomap_write_failed(struct inode
*inode
, loff_t pos
, unsigned len
)
101 loff_t i_size
= i_size_read(inode
);
104 * Only truncate newly allocated pages beyoned EOF, even if the
105 * write started inside the existing inode size.
107 if (pos
+ len
> i_size
)
108 truncate_pagecache_range(inode
, max(pos
, i_size
), pos
+ len
);
112 iomap_write_begin(struct inode
*inode
, loff_t pos
, unsigned len
, unsigned flags
,
113 struct page
**pagep
, struct iomap
*iomap
)
115 pgoff_t index
= pos
>> PAGE_SHIFT
;
119 BUG_ON(pos
+ len
> iomap
->offset
+ iomap
->length
);
121 if (fatal_signal_pending(current
))
124 page
= grab_cache_page_write_begin(inode
->i_mapping
, index
, flags
);
128 status
= __block_write_begin_int(page
, pos
, len
, NULL
, iomap
);
129 if (unlikely(status
)) {
134 iomap_write_failed(inode
, pos
, len
);
142 iomap_write_end(struct inode
*inode
, loff_t pos
, unsigned len
,
143 unsigned copied
, struct page
*page
)
147 ret
= generic_write_end(NULL
, inode
->i_mapping
, pos
, len
,
150 iomap_write_failed(inode
, pos
, len
);
155 iomap_write_actor(struct inode
*inode
, loff_t pos
, loff_t length
, void *data
,
158 struct iov_iter
*i
= data
;
161 unsigned int flags
= AOP_FLAG_NOFS
;
165 unsigned long offset
; /* Offset into pagecache page */
166 unsigned long bytes
; /* Bytes to write to page */
167 size_t copied
; /* Bytes copied from user */
169 offset
= (pos
& (PAGE_SIZE
- 1));
170 bytes
= min_t(unsigned long, PAGE_SIZE
- offset
,
177 * Bring in the user page that we will copy from _first_.
178 * Otherwise there's a nasty deadlock on copying from the
179 * same page as we're writing to, without it being marked
182 * Not only is this an optimisation, but it is also required
183 * to check that the address is actually valid, when atomic
184 * usercopies are used, below.
186 if (unlikely(iov_iter_fault_in_readable(i
, bytes
))) {
191 status
= iomap_write_begin(inode
, pos
, bytes
, flags
, &page
,
193 if (unlikely(status
))
196 if (mapping_writably_mapped(inode
->i_mapping
))
197 flush_dcache_page(page
);
199 copied
= iov_iter_copy_from_user_atomic(page
, i
, offset
, bytes
);
201 flush_dcache_page(page
);
203 status
= iomap_write_end(inode
, pos
, bytes
, copied
, page
);
204 if (unlikely(status
< 0))
210 iov_iter_advance(i
, copied
);
211 if (unlikely(copied
== 0)) {
213 * If we were unable to copy any data at all, we must
214 * fall back to a single segment length write.
216 * If we didn't fallback here, we could livelock
217 * because not all segments in the iov can be copied at
218 * once without a pagefault.
220 bytes
= min_t(unsigned long, PAGE_SIZE
- offset
,
221 iov_iter_single_seg_count(i
));
228 balance_dirty_pages_ratelimited(inode
->i_mapping
);
229 } while (iov_iter_count(i
) && length
);
231 return written
? written
: status
;
235 iomap_file_buffered_write(struct kiocb
*iocb
, struct iov_iter
*iter
,
236 const struct iomap_ops
*ops
)
238 struct inode
*inode
= iocb
->ki_filp
->f_mapping
->host
;
239 loff_t pos
= iocb
->ki_pos
, ret
= 0, written
= 0;
241 while (iov_iter_count(iter
)) {
242 ret
= iomap_apply(inode
, pos
, iov_iter_count(iter
),
243 IOMAP_WRITE
, ops
, iter
, iomap_write_actor
);
250 return written
? written
: ret
;
252 EXPORT_SYMBOL_GPL(iomap_file_buffered_write
);
255 __iomap_read_page(struct inode
*inode
, loff_t offset
)
257 struct address_space
*mapping
= inode
->i_mapping
;
260 page
= read_mapping_page(mapping
, offset
>> PAGE_SHIFT
, NULL
);
263 if (!PageUptodate(page
)) {
265 return ERR_PTR(-EIO
);
271 iomap_dirty_actor(struct inode
*inode
, loff_t pos
, loff_t length
, void *data
,
278 struct page
*page
, *rpage
;
279 unsigned long offset
; /* Offset into pagecache page */
280 unsigned long bytes
; /* Bytes to write to page */
282 offset
= (pos
& (PAGE_SIZE
- 1));
283 bytes
= min_t(loff_t
, PAGE_SIZE
- offset
, length
);
285 rpage
= __iomap_read_page(inode
, pos
);
287 return PTR_ERR(rpage
);
289 status
= iomap_write_begin(inode
, pos
, bytes
,
290 AOP_FLAG_NOFS
, &page
, iomap
);
292 if (unlikely(status
))
295 WARN_ON_ONCE(!PageUptodate(page
));
297 status
= iomap_write_end(inode
, pos
, bytes
, bytes
, page
);
298 if (unlikely(status
<= 0)) {
299 if (WARN_ON_ONCE(status
== 0))
310 balance_dirty_pages_ratelimited(inode
->i_mapping
);
317 iomap_file_dirty(struct inode
*inode
, loff_t pos
, loff_t len
,
318 const struct iomap_ops
*ops
)
323 ret
= iomap_apply(inode
, pos
, len
, IOMAP_WRITE
, ops
, NULL
,
333 EXPORT_SYMBOL_GPL(iomap_file_dirty
);
335 static int iomap_zero(struct inode
*inode
, loff_t pos
, unsigned offset
,
336 unsigned bytes
, struct iomap
*iomap
)
341 status
= iomap_write_begin(inode
, pos
, bytes
, AOP_FLAG_NOFS
, &page
,
346 zero_user(page
, offset
, bytes
);
347 mark_page_accessed(page
);
349 return iomap_write_end(inode
, pos
, bytes
, bytes
, page
);
352 static int iomap_dax_zero(loff_t pos
, unsigned offset
, unsigned bytes
,
355 sector_t sector
= (iomap
->addr
+
356 (pos
& PAGE_MASK
) - iomap
->offset
) >> 9;
358 return __dax_zero_page_range(iomap
->bdev
, iomap
->dax_dev
, sector
,
363 iomap_zero_range_actor(struct inode
*inode
, loff_t pos
, loff_t count
,
364 void *data
, struct iomap
*iomap
)
366 bool *did_zero
= data
;
370 /* already zeroed? we're done. */
371 if (iomap
->type
== IOMAP_HOLE
|| iomap
->type
== IOMAP_UNWRITTEN
)
375 unsigned offset
, bytes
;
377 offset
= pos
& (PAGE_SIZE
- 1); /* Within page */
378 bytes
= min_t(loff_t
, PAGE_SIZE
- offset
, count
);
381 status
= iomap_dax_zero(pos
, offset
, bytes
, iomap
);
383 status
= iomap_zero(inode
, pos
, offset
, bytes
, iomap
);
398 iomap_zero_range(struct inode
*inode
, loff_t pos
, loff_t len
, bool *did_zero
,
399 const struct iomap_ops
*ops
)
404 ret
= iomap_apply(inode
, pos
, len
, IOMAP_ZERO
,
405 ops
, did_zero
, iomap_zero_range_actor
);
415 EXPORT_SYMBOL_GPL(iomap_zero_range
);
418 iomap_truncate_page(struct inode
*inode
, loff_t pos
, bool *did_zero
,
419 const struct iomap_ops
*ops
)
421 unsigned int blocksize
= i_blocksize(inode
);
422 unsigned int off
= pos
& (blocksize
- 1);
424 /* Block boundary? Nothing to do */
427 return iomap_zero_range(inode
, pos
, blocksize
- off
, did_zero
, ops
);
429 EXPORT_SYMBOL_GPL(iomap_truncate_page
);
432 iomap_page_mkwrite_actor(struct inode
*inode
, loff_t pos
, loff_t length
,
433 void *data
, struct iomap
*iomap
)
435 struct page
*page
= data
;
438 ret
= __block_write_begin_int(page
, pos
, length
, NULL
, iomap
);
442 block_commit_write(page
, 0, length
);
446 int iomap_page_mkwrite(struct vm_fault
*vmf
, const struct iomap_ops
*ops
)
448 struct page
*page
= vmf
->page
;
449 struct inode
*inode
= file_inode(vmf
->vma
->vm_file
);
450 unsigned long length
;
455 size
= i_size_read(inode
);
456 if ((page
->mapping
!= inode
->i_mapping
) ||
457 (page_offset(page
) > size
)) {
458 /* We overload EFAULT to mean page got truncated */
463 /* page is wholly or partially inside EOF */
464 if (((page
->index
+ 1) << PAGE_SHIFT
) > size
)
465 length
= size
& ~PAGE_MASK
;
469 offset
= page_offset(page
);
471 ret
= iomap_apply(inode
, offset
, length
,
472 IOMAP_WRITE
| IOMAP_FAULT
, ops
, page
,
473 iomap_page_mkwrite_actor
);
474 if (unlikely(ret
<= 0))
480 set_page_dirty(page
);
481 wait_for_stable_page(page
);
482 return VM_FAULT_LOCKED
;
485 return block_page_mkwrite_return(ret
);
487 EXPORT_SYMBOL_GPL(iomap_page_mkwrite
);
490 struct fiemap_extent_info
*fi
;
494 static int iomap_to_fiemap(struct fiemap_extent_info
*fi
,
495 struct iomap
*iomap
, u32 flags
)
497 switch (iomap
->type
) {
502 flags
|= FIEMAP_EXTENT_DELALLOC
| FIEMAP_EXTENT_UNKNOWN
;
504 case IOMAP_UNWRITTEN
:
505 flags
|= FIEMAP_EXTENT_UNWRITTEN
;
511 if (iomap
->flags
& IOMAP_F_MERGED
)
512 flags
|= FIEMAP_EXTENT_MERGED
;
513 if (iomap
->flags
& IOMAP_F_SHARED
)
514 flags
|= FIEMAP_EXTENT_SHARED
;
515 if (iomap
->flags
& IOMAP_F_DATA_INLINE
)
516 flags
|= FIEMAP_EXTENT_DATA_INLINE
;
518 return fiemap_fill_next_extent(fi
, iomap
->offset
,
519 iomap
->addr
!= IOMAP_NULL_ADDR
? iomap
->addr
: 0,
520 iomap
->length
, flags
);
524 iomap_fiemap_actor(struct inode
*inode
, loff_t pos
, loff_t length
, void *data
,
527 struct fiemap_ctx
*ctx
= data
;
530 if (iomap
->type
== IOMAP_HOLE
)
533 ret
= iomap_to_fiemap(ctx
->fi
, &ctx
->prev
, 0);
536 case 0: /* success */
538 case 1: /* extent array full */
545 int iomap_fiemap(struct inode
*inode
, struct fiemap_extent_info
*fi
,
546 loff_t start
, loff_t len
, const struct iomap_ops
*ops
)
548 struct fiemap_ctx ctx
;
551 memset(&ctx
, 0, sizeof(ctx
));
553 ctx
.prev
.type
= IOMAP_HOLE
;
555 ret
= fiemap_check_flags(fi
, FIEMAP_FLAG_SYNC
);
559 if (fi
->fi_flags
& FIEMAP_FLAG_SYNC
) {
560 ret
= filemap_write_and_wait(inode
->i_mapping
);
566 ret
= iomap_apply(inode
, start
, len
, IOMAP_REPORT
, ops
, &ctx
,
568 /* inode with no (attribute) mapping will give ENOENT */
580 if (ctx
.prev
.type
!= IOMAP_HOLE
) {
581 ret
= iomap_to_fiemap(fi
, &ctx
.prev
, FIEMAP_EXTENT_LAST
);
588 EXPORT_SYMBOL_GPL(iomap_fiemap
);
591 iomap_seek_hole_actor(struct inode
*inode
, loff_t offset
, loff_t length
,
592 void *data
, struct iomap
*iomap
)
594 switch (iomap
->type
) {
595 case IOMAP_UNWRITTEN
:
596 offset
= page_cache_seek_hole_data(inode
, offset
, length
,
602 *(loff_t
*)data
= offset
;
610 iomap_seek_hole(struct inode
*inode
, loff_t offset
, const struct iomap_ops
*ops
)
612 loff_t size
= i_size_read(inode
);
613 loff_t length
= size
- offset
;
616 /* Nothing to be found before or beyond the end of the file. */
617 if (offset
< 0 || offset
>= size
)
621 ret
= iomap_apply(inode
, offset
, length
, IOMAP_REPORT
, ops
,
622 &offset
, iomap_seek_hole_actor
);
634 EXPORT_SYMBOL_GPL(iomap_seek_hole
);
637 iomap_seek_data_actor(struct inode
*inode
, loff_t offset
, loff_t length
,
638 void *data
, struct iomap
*iomap
)
640 switch (iomap
->type
) {
643 case IOMAP_UNWRITTEN
:
644 offset
= page_cache_seek_hole_data(inode
, offset
, length
,
650 *(loff_t
*)data
= offset
;
656 iomap_seek_data(struct inode
*inode
, loff_t offset
, const struct iomap_ops
*ops
)
658 loff_t size
= i_size_read(inode
);
659 loff_t length
= size
- offset
;
662 /* Nothing to be found before or beyond the end of the file. */
663 if (offset
< 0 || offset
>= size
)
667 ret
= iomap_apply(inode
, offset
, length
, IOMAP_REPORT
, ops
,
668 &offset
, iomap_seek_data_actor
);
682 EXPORT_SYMBOL_GPL(iomap_seek_data
);
685 * Private flags for iomap_dio, must not overlap with the public ones in
688 #define IOMAP_DIO_WRITE (1 << 30)
689 #define IOMAP_DIO_DIRTY (1 << 31)
693 iomap_dio_end_io_t
*end_io
;
701 /* used during submission and for synchronous completion: */
703 struct iov_iter
*iter
;
704 struct task_struct
*waiter
;
705 struct request_queue
*last_queue
;
709 /* used for aio completion: */
711 struct work_struct work
;
716 static ssize_t
iomap_dio_complete(struct iomap_dio
*dio
)
718 struct kiocb
*iocb
= dio
->iocb
;
719 struct inode
*inode
= file_inode(iocb
->ki_filp
);
720 loff_t offset
= iocb
->ki_pos
;
724 ret
= dio
->end_io(iocb
,
725 dio
->error
? dio
->error
: dio
->size
,
733 /* check for short read */
734 if (offset
+ ret
> dio
->i_size
&&
735 !(dio
->flags
& IOMAP_DIO_WRITE
))
736 ret
= dio
->i_size
- offset
;
741 * Try again to invalidate clean pages which might have been cached by
742 * non-direct readahead, or faulted in by get_user_pages() if the source
743 * of the write was an mmap'ed region of the file we're writing. Either
744 * one is a pretty crazy thing to do, so we don't support it 100%. If
745 * this invalidation fails, tough, the write still worked...
747 * And this page cache invalidation has to be after dio->end_io(), as
748 * some filesystems convert unwritten extents to real allocations in
749 * end_io() when necessary, otherwise a racing buffer read would cache
750 * zeros from unwritten extents.
753 (dio
->flags
& IOMAP_DIO_WRITE
) && inode
->i_mapping
->nrpages
) {
755 err
= invalidate_inode_pages2_range(inode
->i_mapping
,
756 offset
>> PAGE_SHIFT
,
757 (offset
+ dio
->size
- 1) >> PAGE_SHIFT
);
759 dio_warn_stale_pagecache(iocb
->ki_filp
);
762 inode_dio_end(file_inode(iocb
->ki_filp
));
768 static void iomap_dio_complete_work(struct work_struct
*work
)
770 struct iomap_dio
*dio
= container_of(work
, struct iomap_dio
, aio
.work
);
771 struct kiocb
*iocb
= dio
->iocb
;
772 bool is_write
= (dio
->flags
& IOMAP_DIO_WRITE
);
775 ret
= iomap_dio_complete(dio
);
776 if (is_write
&& ret
> 0)
777 ret
= generic_write_sync(iocb
, ret
);
778 iocb
->ki_complete(iocb
, ret
, 0);
782 * Set an error in the dio if none is set yet. We have to use cmpxchg
783 * as the submission context and the completion context(s) can race to
786 static inline void iomap_dio_set_error(struct iomap_dio
*dio
, int ret
)
788 cmpxchg(&dio
->error
, 0, ret
);
791 static void iomap_dio_bio_end_io(struct bio
*bio
)
793 struct iomap_dio
*dio
= bio
->bi_private
;
794 bool should_dirty
= (dio
->flags
& IOMAP_DIO_DIRTY
);
797 iomap_dio_set_error(dio
, blk_status_to_errno(bio
->bi_status
));
799 if (atomic_dec_and_test(&dio
->ref
)) {
800 if (is_sync_kiocb(dio
->iocb
)) {
801 struct task_struct
*waiter
= dio
->submit
.waiter
;
803 WRITE_ONCE(dio
->submit
.waiter
, NULL
);
804 wake_up_process(waiter
);
805 } else if (dio
->flags
& IOMAP_DIO_WRITE
) {
806 struct inode
*inode
= file_inode(dio
->iocb
->ki_filp
);
808 INIT_WORK(&dio
->aio
.work
, iomap_dio_complete_work
);
809 queue_work(inode
->i_sb
->s_dio_done_wq
, &dio
->aio
.work
);
811 iomap_dio_complete_work(&dio
->aio
.work
);
816 bio_check_pages_dirty(bio
);
818 struct bio_vec
*bvec
;
821 bio_for_each_segment_all(bvec
, bio
, i
)
822 put_page(bvec
->bv_page
);
828 iomap_dio_zero(struct iomap_dio
*dio
, struct iomap
*iomap
, loff_t pos
,
831 struct page
*page
= ZERO_PAGE(0);
834 bio
= bio_alloc(GFP_KERNEL
, 1);
835 bio_set_dev(bio
, iomap
->bdev
);
836 bio
->bi_iter
.bi_sector
=
837 (iomap
->addr
+ pos
- iomap
->offset
) >> 9;
838 bio
->bi_private
= dio
;
839 bio
->bi_end_io
= iomap_dio_bio_end_io
;
842 if (bio_add_page(bio
, page
, len
, 0) != len
)
844 bio_set_op_attrs(bio
, REQ_OP_WRITE
, REQ_SYNC
| REQ_IDLE
);
846 atomic_inc(&dio
->ref
);
847 return submit_bio(bio
);
851 iomap_dio_actor(struct inode
*inode
, loff_t pos
, loff_t length
,
852 void *data
, struct iomap
*iomap
)
854 struct iomap_dio
*dio
= data
;
855 unsigned int blkbits
= blksize_bits(bdev_logical_block_size(iomap
->bdev
));
856 unsigned int fs_block_size
= i_blocksize(inode
), pad
;
857 unsigned int align
= iov_iter_alignment(dio
->submit
.iter
);
858 struct iov_iter iter
;
860 bool need_zeroout
= false;
864 if ((pos
| length
| align
) & ((1 << blkbits
) - 1))
867 switch (iomap
->type
) {
869 if (WARN_ON_ONCE(dio
->flags
& IOMAP_DIO_WRITE
))
872 case IOMAP_UNWRITTEN
:
873 if (!(dio
->flags
& IOMAP_DIO_WRITE
)) {
874 length
= iov_iter_zero(length
, dio
->submit
.iter
);
878 dio
->flags
|= IOMAP_DIO_UNWRITTEN
;
882 if (iomap
->flags
& IOMAP_F_SHARED
)
883 dio
->flags
|= IOMAP_DIO_COW
;
884 if (iomap
->flags
& IOMAP_F_NEW
)
893 * Operate on a partial iter trimmed to the extent we were called for.
894 * We'll update the iter in the dio once we're done with this extent.
896 iter
= *dio
->submit
.iter
;
897 iov_iter_truncate(&iter
, length
);
899 nr_pages
= iov_iter_npages(&iter
, BIO_MAX_PAGES
);
904 /* zero out from the start of the block to the write offset */
905 pad
= pos
& (fs_block_size
- 1);
907 iomap_dio_zero(dio
, iomap
, pos
- pad
, pad
);
913 iov_iter_revert(dio
->submit
.iter
, copied
);
917 bio
= bio_alloc(GFP_KERNEL
, nr_pages
);
918 bio_set_dev(bio
, iomap
->bdev
);
919 bio
->bi_iter
.bi_sector
=
920 (iomap
->addr
+ pos
- iomap
->offset
) >> 9;
921 bio
->bi_write_hint
= dio
->iocb
->ki_hint
;
922 bio
->bi_private
= dio
;
923 bio
->bi_end_io
= iomap_dio_bio_end_io
;
925 ret
= bio_iov_iter_get_pages(bio
, &iter
);
928 return copied
? copied
: ret
;
931 n
= bio
->bi_iter
.bi_size
;
932 if (dio
->flags
& IOMAP_DIO_WRITE
) {
933 bio_set_op_attrs(bio
, REQ_OP_WRITE
, REQ_SYNC
| REQ_IDLE
);
934 task_io_account_write(n
);
936 bio_set_op_attrs(bio
, REQ_OP_READ
, 0);
937 if (dio
->flags
& IOMAP_DIO_DIRTY
)
938 bio_set_pages_dirty(bio
);
941 iov_iter_advance(dio
->submit
.iter
, n
);
947 nr_pages
= iov_iter_npages(&iter
, BIO_MAX_PAGES
);
949 atomic_inc(&dio
->ref
);
951 dio
->submit
.last_queue
= bdev_get_queue(iomap
->bdev
);
952 dio
->submit
.cookie
= submit_bio(bio
);
956 /* zero out from the end of the write to the end of the block */
957 pad
= pos
& (fs_block_size
- 1);
959 iomap_dio_zero(dio
, iomap
, pos
, fs_block_size
- pad
);
965 iomap_dio_rw(struct kiocb
*iocb
, struct iov_iter
*iter
,
966 const struct iomap_ops
*ops
, iomap_dio_end_io_t end_io
)
968 struct address_space
*mapping
= iocb
->ki_filp
->f_mapping
;
969 struct inode
*inode
= file_inode(iocb
->ki_filp
);
970 size_t count
= iov_iter_count(iter
);
971 loff_t pos
= iocb
->ki_pos
, start
= pos
;
972 loff_t end
= iocb
->ki_pos
+ count
- 1, ret
= 0;
973 unsigned int flags
= IOMAP_DIRECT
;
974 struct blk_plug plug
;
975 struct iomap_dio
*dio
;
977 lockdep_assert_held(&inode
->i_rwsem
);
982 dio
= kmalloc(sizeof(*dio
), GFP_KERNEL
);
987 atomic_set(&dio
->ref
, 1);
989 dio
->i_size
= i_size_read(inode
);
990 dio
->end_io
= end_io
;
994 dio
->submit
.iter
= iter
;
995 if (is_sync_kiocb(iocb
)) {
996 dio
->submit
.waiter
= current
;
997 dio
->submit
.cookie
= BLK_QC_T_NONE
;
998 dio
->submit
.last_queue
= NULL
;
1001 if (iov_iter_rw(iter
) == READ
) {
1002 if (pos
>= dio
->i_size
)
1005 if (iter
->type
== ITER_IOVEC
)
1006 dio
->flags
|= IOMAP_DIO_DIRTY
;
1008 dio
->flags
|= IOMAP_DIO_WRITE
;
1009 flags
|= IOMAP_WRITE
;
1012 if (iocb
->ki_flags
& IOCB_NOWAIT
) {
1013 if (filemap_range_has_page(mapping
, start
, end
)) {
1017 flags
|= IOMAP_NOWAIT
;
1020 ret
= filemap_write_and_wait_range(mapping
, start
, end
);
1025 * Try to invalidate cache pages for the range we're direct
1026 * writing. If this invalidation fails, tough, the write will
1027 * still work, but racing two incompatible write paths is a
1028 * pretty crazy thing to do, so we don't support it 100%.
1030 ret
= invalidate_inode_pages2_range(mapping
,
1031 start
>> PAGE_SHIFT
, end
>> PAGE_SHIFT
);
1033 dio_warn_stale_pagecache(iocb
->ki_filp
);
1036 if (iov_iter_rw(iter
) == WRITE
&& !is_sync_kiocb(iocb
) &&
1037 !inode
->i_sb
->s_dio_done_wq
) {
1038 ret
= sb_init_dio_done_wq(inode
->i_sb
);
1043 inode_dio_begin(inode
);
1045 blk_start_plug(&plug
);
1047 ret
= iomap_apply(inode
, pos
, count
, flags
, ops
, dio
,
1050 /* magic error code to fall back to buffered I/O */
1051 if (ret
== -ENOTBLK
)
1057 if (iov_iter_rw(iter
) == READ
&& pos
>= dio
->i_size
)
1059 } while ((count
= iov_iter_count(iter
)) > 0);
1060 blk_finish_plug(&plug
);
1063 iomap_dio_set_error(dio
, ret
);
1065 if (!atomic_dec_and_test(&dio
->ref
)) {
1066 if (!is_sync_kiocb(iocb
))
1067 return -EIOCBQUEUED
;
1070 set_current_state(TASK_UNINTERRUPTIBLE
);
1071 if (!READ_ONCE(dio
->submit
.waiter
))
1074 if (!(iocb
->ki_flags
& IOCB_HIPRI
) ||
1075 !dio
->submit
.last_queue
||
1076 !blk_poll(dio
->submit
.last_queue
,
1077 dio
->submit
.cookie
))
1080 __set_current_state(TASK_RUNNING
);
1083 ret
= iomap_dio_complete(dio
);
1091 EXPORT_SYMBOL_GPL(iomap_dio_rw
);