2 * Copyright (C) 2010 Red Hat, Inc.
3 * Copyright (c) 2016 Christoph Hellwig.
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms and conditions of the GNU General Public License,
7 * version 2, as published by the Free Software Foundation.
9 * This program is distributed in the hope it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
14 #include <linux/module.h>
15 #include <linux/compiler.h>
17 #include <linux/iomap.h>
18 #include <linux/uaccess.h>
19 #include <linux/gfp.h>
21 #include <linux/swap.h>
22 #include <linux/pagemap.h>
23 #include <linux/file.h>
24 #include <linux/uio.h>
25 #include <linux/backing-dev.h>
26 #include <linux/buffer_head.h>
27 #include <linux/task_io_accounting_ops.h>
28 #include <linux/dax.h>
29 #include <linux/sched/signal.h>
34 * Execute a iomap write on a segment of the mapping that spans a
35 * contiguous range of pages that have identical block mapping state.
37 * This avoids the need to map pages individually, do individual allocations
38 * for each page and most importantly avoid the need for filesystem specific
39 * locking per page. Instead, all the operations are amortised over the entire
40 * range of pages. It is assumed that the filesystems will lock whatever
41 * resources they require in the iomap_begin call, and release them in the
45 iomap_apply(struct inode
*inode
, loff_t pos
, loff_t length
, unsigned flags
,
46 const struct iomap_ops
*ops
, void *data
, iomap_actor_t actor
)
48 struct iomap iomap
= { 0 };
49 loff_t written
= 0, ret
;
52 * Need to map a range from start position for length bytes. This can
53 * span multiple pages - it is only guaranteed to return a range of a
54 * single type of pages (e.g. all into a hole, all mapped or all
55 * unwritten). Failure at this point has nothing to undo.
57 * If allocation is required for this range, reserve the space now so
58 * that the allocation is guaranteed to succeed later on. Once we copy
59 * the data into the page cache pages, then we cannot fail otherwise we
60 * expose transient stale data. If the reserve fails, we can safely
61 * back out at this point as there is nothing to undo.
63 ret
= ops
->iomap_begin(inode
, pos
, length
, flags
, &iomap
);
66 if (WARN_ON(iomap
.offset
> pos
))
70 * Cut down the length to the one actually provided by the filesystem,
71 * as it might not be able to give us the whole size that we requested.
73 if (iomap
.offset
+ iomap
.length
< pos
+ length
)
74 length
= iomap
.offset
+ iomap
.length
- pos
;
77 * Now that we have guaranteed that the space allocation will succeed.
78 * we can do the copy-in page by page without having to worry about
79 * failures exposing transient data.
81 written
= actor(inode
, pos
, length
, data
, &iomap
);
84 * Now the data has been copied, commit the range we've copied. This
85 * should not fail unless the filesystem has had a fatal error.
88 ret
= ops
->iomap_end(inode
, pos
, length
,
89 written
> 0 ? written
: 0,
93 return written
? written
: ret
;
97 iomap_write_failed(struct inode
*inode
, loff_t pos
, unsigned len
)
99 loff_t i_size
= i_size_read(inode
);
102 * Only truncate newly allocated pages beyoned EOF, even if the
103 * write started inside the existing inode size.
105 if (pos
+ len
> i_size
)
106 truncate_pagecache_range(inode
, max(pos
, i_size
), pos
+ len
);
110 iomap_write_begin(struct inode
*inode
, loff_t pos
, unsigned len
, unsigned flags
,
111 struct page
**pagep
, struct iomap
*iomap
)
113 pgoff_t index
= pos
>> PAGE_SHIFT
;
117 BUG_ON(pos
+ len
> iomap
->offset
+ iomap
->length
);
119 if (fatal_signal_pending(current
))
122 page
= grab_cache_page_write_begin(inode
->i_mapping
, index
, flags
);
126 status
= __block_write_begin_int(page
, pos
, len
, NULL
, iomap
);
127 if (unlikely(status
)) {
132 iomap_write_failed(inode
, pos
, len
);
140 iomap_write_end(struct inode
*inode
, loff_t pos
, unsigned len
,
141 unsigned copied
, struct page
*page
)
145 ret
= generic_write_end(NULL
, inode
->i_mapping
, pos
, len
,
148 iomap_write_failed(inode
, pos
, len
);
153 iomap_write_actor(struct inode
*inode
, loff_t pos
, loff_t length
, void *data
,
156 struct iov_iter
*i
= data
;
159 unsigned int flags
= AOP_FLAG_NOFS
;
163 unsigned long offset
; /* Offset into pagecache page */
164 unsigned long bytes
; /* Bytes to write to page */
165 size_t copied
; /* Bytes copied from user */
167 offset
= (pos
& (PAGE_SIZE
- 1));
168 bytes
= min_t(unsigned long, PAGE_SIZE
- offset
,
175 * Bring in the user page that we will copy from _first_.
176 * Otherwise there's a nasty deadlock on copying from the
177 * same page as we're writing to, without it being marked
180 * Not only is this an optimisation, but it is also required
181 * to check that the address is actually valid, when atomic
182 * usercopies are used, below.
184 if (unlikely(iov_iter_fault_in_readable(i
, bytes
))) {
189 status
= iomap_write_begin(inode
, pos
, bytes
, flags
, &page
,
191 if (unlikely(status
))
194 if (mapping_writably_mapped(inode
->i_mapping
))
195 flush_dcache_page(page
);
197 copied
= iov_iter_copy_from_user_atomic(page
, i
, offset
, bytes
);
199 flush_dcache_page(page
);
201 status
= iomap_write_end(inode
, pos
, bytes
, copied
, page
);
202 if (unlikely(status
< 0))
208 iov_iter_advance(i
, copied
);
209 if (unlikely(copied
== 0)) {
211 * If we were unable to copy any data at all, we must
212 * fall back to a single segment length write.
214 * If we didn't fallback here, we could livelock
215 * because not all segments in the iov can be copied at
216 * once without a pagefault.
218 bytes
= min_t(unsigned long, PAGE_SIZE
- offset
,
219 iov_iter_single_seg_count(i
));
226 balance_dirty_pages_ratelimited(inode
->i_mapping
);
227 } while (iov_iter_count(i
) && length
);
229 return written
? written
: status
;
233 iomap_file_buffered_write(struct kiocb
*iocb
, struct iov_iter
*iter
,
234 const struct iomap_ops
*ops
)
236 struct inode
*inode
= iocb
->ki_filp
->f_mapping
->host
;
237 loff_t pos
= iocb
->ki_pos
, ret
= 0, written
= 0;
239 while (iov_iter_count(iter
)) {
240 ret
= iomap_apply(inode
, pos
, iov_iter_count(iter
),
241 IOMAP_WRITE
, ops
, iter
, iomap_write_actor
);
248 return written
? written
: ret
;
250 EXPORT_SYMBOL_GPL(iomap_file_buffered_write
);
253 __iomap_read_page(struct inode
*inode
, loff_t offset
)
255 struct address_space
*mapping
= inode
->i_mapping
;
258 page
= read_mapping_page(mapping
, offset
>> PAGE_SHIFT
, NULL
);
261 if (!PageUptodate(page
)) {
263 return ERR_PTR(-EIO
);
269 iomap_dirty_actor(struct inode
*inode
, loff_t pos
, loff_t length
, void *data
,
276 struct page
*page
, *rpage
;
277 unsigned long offset
; /* Offset into pagecache page */
278 unsigned long bytes
; /* Bytes to write to page */
280 offset
= (pos
& (PAGE_SIZE
- 1));
281 bytes
= min_t(unsigned long, PAGE_SIZE
- offset
, length
);
283 rpage
= __iomap_read_page(inode
, pos
);
285 return PTR_ERR(rpage
);
287 status
= iomap_write_begin(inode
, pos
, bytes
,
288 AOP_FLAG_NOFS
, &page
, iomap
);
290 if (unlikely(status
))
293 WARN_ON_ONCE(!PageUptodate(page
));
295 status
= iomap_write_end(inode
, pos
, bytes
, bytes
, page
);
296 if (unlikely(status
<= 0)) {
297 if (WARN_ON_ONCE(status
== 0))
308 balance_dirty_pages_ratelimited(inode
->i_mapping
);
315 iomap_file_dirty(struct inode
*inode
, loff_t pos
, loff_t len
,
316 const struct iomap_ops
*ops
)
321 ret
= iomap_apply(inode
, pos
, len
, IOMAP_WRITE
, ops
, NULL
,
331 EXPORT_SYMBOL_GPL(iomap_file_dirty
);
333 static int iomap_zero(struct inode
*inode
, loff_t pos
, unsigned offset
,
334 unsigned bytes
, struct iomap
*iomap
)
339 status
= iomap_write_begin(inode
, pos
, bytes
, AOP_FLAG_NOFS
, &page
,
344 zero_user(page
, offset
, bytes
);
345 mark_page_accessed(page
);
347 return iomap_write_end(inode
, pos
, bytes
, bytes
, page
);
350 static int iomap_dax_zero(loff_t pos
, unsigned offset
, unsigned bytes
,
353 sector_t sector
= iomap
->blkno
+
354 (((pos
& ~(PAGE_SIZE
- 1)) - iomap
->offset
) >> 9);
356 return __dax_zero_page_range(iomap
->bdev
, iomap
->dax_dev
, sector
,
361 iomap_zero_range_actor(struct inode
*inode
, loff_t pos
, loff_t count
,
362 void *data
, struct iomap
*iomap
)
364 bool *did_zero
= data
;
368 /* already zeroed? we're done. */
369 if (iomap
->type
== IOMAP_HOLE
|| iomap
->type
== IOMAP_UNWRITTEN
)
373 unsigned offset
, bytes
;
375 offset
= pos
& (PAGE_SIZE
- 1); /* Within page */
376 bytes
= min_t(unsigned, PAGE_SIZE
- offset
, count
);
379 status
= iomap_dax_zero(pos
, offset
, bytes
, iomap
);
381 status
= iomap_zero(inode
, pos
, offset
, bytes
, iomap
);
396 iomap_zero_range(struct inode
*inode
, loff_t pos
, loff_t len
, bool *did_zero
,
397 const struct iomap_ops
*ops
)
402 ret
= iomap_apply(inode
, pos
, len
, IOMAP_ZERO
,
403 ops
, did_zero
, iomap_zero_range_actor
);
413 EXPORT_SYMBOL_GPL(iomap_zero_range
);
416 iomap_truncate_page(struct inode
*inode
, loff_t pos
, bool *did_zero
,
417 const struct iomap_ops
*ops
)
419 unsigned int blocksize
= i_blocksize(inode
);
420 unsigned int off
= pos
& (blocksize
- 1);
422 /* Block boundary? Nothing to do */
425 return iomap_zero_range(inode
, pos
, blocksize
- off
, did_zero
, ops
);
427 EXPORT_SYMBOL_GPL(iomap_truncate_page
);
430 iomap_page_mkwrite_actor(struct inode
*inode
, loff_t pos
, loff_t length
,
431 void *data
, struct iomap
*iomap
)
433 struct page
*page
= data
;
436 ret
= __block_write_begin_int(page
, pos
, length
, NULL
, iomap
);
440 block_commit_write(page
, 0, length
);
444 int iomap_page_mkwrite(struct vm_fault
*vmf
, const struct iomap_ops
*ops
)
446 struct page
*page
= vmf
->page
;
447 struct inode
*inode
= file_inode(vmf
->vma
->vm_file
);
448 unsigned long length
;
453 size
= i_size_read(inode
);
454 if ((page
->mapping
!= inode
->i_mapping
) ||
455 (page_offset(page
) > size
)) {
456 /* We overload EFAULT to mean page got truncated */
461 /* page is wholly or partially inside EOF */
462 if (((page
->index
+ 1) << PAGE_SHIFT
) > size
)
463 length
= size
& ~PAGE_MASK
;
467 offset
= page_offset(page
);
469 ret
= iomap_apply(inode
, offset
, length
,
470 IOMAP_WRITE
| IOMAP_FAULT
, ops
, page
,
471 iomap_page_mkwrite_actor
);
472 if (unlikely(ret
<= 0))
478 set_page_dirty(page
);
479 wait_for_stable_page(page
);
485 EXPORT_SYMBOL_GPL(iomap_page_mkwrite
);
488 struct fiemap_extent_info
*fi
;
492 static int iomap_to_fiemap(struct fiemap_extent_info
*fi
,
493 struct iomap
*iomap
, u32 flags
)
495 switch (iomap
->type
) {
500 flags
|= FIEMAP_EXTENT_DELALLOC
| FIEMAP_EXTENT_UNKNOWN
;
502 case IOMAP_UNWRITTEN
:
503 flags
|= FIEMAP_EXTENT_UNWRITTEN
;
509 if (iomap
->flags
& IOMAP_F_MERGED
)
510 flags
|= FIEMAP_EXTENT_MERGED
;
511 if (iomap
->flags
& IOMAP_F_SHARED
)
512 flags
|= FIEMAP_EXTENT_SHARED
;
514 return fiemap_fill_next_extent(fi
, iomap
->offset
,
515 iomap
->blkno
!= IOMAP_NULL_BLOCK
? iomap
->blkno
<< 9: 0,
516 iomap
->length
, flags
);
521 iomap_fiemap_actor(struct inode
*inode
, loff_t pos
, loff_t length
, void *data
,
524 struct fiemap_ctx
*ctx
= data
;
527 if (iomap
->type
== IOMAP_HOLE
)
530 ret
= iomap_to_fiemap(ctx
->fi
, &ctx
->prev
, 0);
533 case 0: /* success */
535 case 1: /* extent array full */
542 int iomap_fiemap(struct inode
*inode
, struct fiemap_extent_info
*fi
,
543 loff_t start
, loff_t len
, const struct iomap_ops
*ops
)
545 struct fiemap_ctx ctx
;
548 memset(&ctx
, 0, sizeof(ctx
));
550 ctx
.prev
.type
= IOMAP_HOLE
;
552 ret
= fiemap_check_flags(fi
, FIEMAP_FLAG_SYNC
);
556 if (fi
->fi_flags
& FIEMAP_FLAG_SYNC
) {
557 ret
= filemap_write_and_wait(inode
->i_mapping
);
563 ret
= iomap_apply(inode
, start
, len
, IOMAP_REPORT
, ops
, &ctx
,
565 /* inode with no (attribute) mapping will give ENOENT */
577 if (ctx
.prev
.type
!= IOMAP_HOLE
) {
578 ret
= iomap_to_fiemap(fi
, &ctx
.prev
, FIEMAP_EXTENT_LAST
);
585 EXPORT_SYMBOL_GPL(iomap_fiemap
);
588 * Private flags for iomap_dio, must not overlap with the public ones in
591 #define IOMAP_DIO_WRITE (1 << 30)
592 #define IOMAP_DIO_DIRTY (1 << 31)
596 iomap_dio_end_io_t
*end_io
;
604 /* used during submission and for synchronous completion: */
606 struct iov_iter
*iter
;
607 struct task_struct
*waiter
;
608 struct request_queue
*last_queue
;
612 /* used for aio completion: */
614 struct work_struct work
;
619 static ssize_t
iomap_dio_complete(struct iomap_dio
*dio
)
621 struct kiocb
*iocb
= dio
->iocb
;
625 ret
= dio
->end_io(iocb
,
626 dio
->error
? dio
->error
: dio
->size
,
634 /* check for short read */
635 if (iocb
->ki_pos
+ ret
> dio
->i_size
&&
636 !(dio
->flags
& IOMAP_DIO_WRITE
))
637 ret
= dio
->i_size
- iocb
->ki_pos
;
641 inode_dio_end(file_inode(iocb
->ki_filp
));
647 static void iomap_dio_complete_work(struct work_struct
*work
)
649 struct iomap_dio
*dio
= container_of(work
, struct iomap_dio
, aio
.work
);
650 struct kiocb
*iocb
= dio
->iocb
;
651 bool is_write
= (dio
->flags
& IOMAP_DIO_WRITE
);
654 ret
= iomap_dio_complete(dio
);
655 if (is_write
&& ret
> 0)
656 ret
= generic_write_sync(iocb
, ret
);
657 iocb
->ki_complete(iocb
, ret
, 0);
661 * Set an error in the dio if none is set yet. We have to use cmpxchg
662 * as the submission context and the completion context(s) can race to
665 static inline void iomap_dio_set_error(struct iomap_dio
*dio
, int ret
)
667 cmpxchg(&dio
->error
, 0, ret
);
670 static void iomap_dio_bio_end_io(struct bio
*bio
)
672 struct iomap_dio
*dio
= bio
->bi_private
;
673 bool should_dirty
= (dio
->flags
& IOMAP_DIO_DIRTY
);
676 iomap_dio_set_error(dio
, bio
->bi_error
);
678 if (atomic_dec_and_test(&dio
->ref
)) {
679 if (is_sync_kiocb(dio
->iocb
)) {
680 struct task_struct
*waiter
= dio
->submit
.waiter
;
682 WRITE_ONCE(dio
->submit
.waiter
, NULL
);
683 wake_up_process(waiter
);
684 } else if (dio
->flags
& IOMAP_DIO_WRITE
) {
685 struct inode
*inode
= file_inode(dio
->iocb
->ki_filp
);
687 INIT_WORK(&dio
->aio
.work
, iomap_dio_complete_work
);
688 queue_work(inode
->i_sb
->s_dio_done_wq
, &dio
->aio
.work
);
690 iomap_dio_complete_work(&dio
->aio
.work
);
695 bio_check_pages_dirty(bio
);
697 struct bio_vec
*bvec
;
700 bio_for_each_segment_all(bvec
, bio
, i
)
701 put_page(bvec
->bv_page
);
707 iomap_dio_zero(struct iomap_dio
*dio
, struct iomap
*iomap
, loff_t pos
,
710 struct page
*page
= ZERO_PAGE(0);
713 bio
= bio_alloc(GFP_KERNEL
, 1);
714 bio
->bi_bdev
= iomap
->bdev
;
715 bio
->bi_iter
.bi_sector
=
716 iomap
->blkno
+ ((pos
- iomap
->offset
) >> 9);
717 bio
->bi_private
= dio
;
718 bio
->bi_end_io
= iomap_dio_bio_end_io
;
721 if (bio_add_page(bio
, page
, len
, 0) != len
)
723 bio_set_op_attrs(bio
, REQ_OP_WRITE
, REQ_SYNC
| REQ_IDLE
);
725 atomic_inc(&dio
->ref
);
726 return submit_bio(bio
);
730 iomap_dio_actor(struct inode
*inode
, loff_t pos
, loff_t length
,
731 void *data
, struct iomap
*iomap
)
733 struct iomap_dio
*dio
= data
;
734 unsigned int blkbits
= blksize_bits(bdev_logical_block_size(iomap
->bdev
));
735 unsigned int fs_block_size
= i_blocksize(inode
), pad
;
736 unsigned int align
= iov_iter_alignment(dio
->submit
.iter
);
737 struct iov_iter iter
;
739 bool need_zeroout
= false;
742 if ((pos
| length
| align
) & ((1 << blkbits
) - 1))
745 switch (iomap
->type
) {
747 if (WARN_ON_ONCE(dio
->flags
& IOMAP_DIO_WRITE
))
750 case IOMAP_UNWRITTEN
:
751 if (!(dio
->flags
& IOMAP_DIO_WRITE
)) {
752 iov_iter_zero(length
, dio
->submit
.iter
);
756 dio
->flags
|= IOMAP_DIO_UNWRITTEN
;
760 if (iomap
->flags
& IOMAP_F_SHARED
)
761 dio
->flags
|= IOMAP_DIO_COW
;
762 if (iomap
->flags
& IOMAP_F_NEW
)
771 * Operate on a partial iter trimmed to the extent we were called for.
772 * We'll update the iter in the dio once we're done with this extent.
774 iter
= *dio
->submit
.iter
;
775 iov_iter_truncate(&iter
, length
);
777 nr_pages
= iov_iter_npages(&iter
, BIO_MAX_PAGES
);
782 /* zero out from the start of the block to the write offset */
783 pad
= pos
& (fs_block_size
- 1);
785 iomap_dio_zero(dio
, iomap
, pos
- pad
, pad
);
792 bio
= bio_alloc(GFP_KERNEL
, nr_pages
);
793 bio
->bi_bdev
= iomap
->bdev
;
794 bio
->bi_iter
.bi_sector
=
795 iomap
->blkno
+ ((pos
- iomap
->offset
) >> 9);
796 bio
->bi_private
= dio
;
797 bio
->bi_end_io
= iomap_dio_bio_end_io
;
799 ret
= bio_iov_iter_get_pages(bio
, &iter
);
805 if (dio
->flags
& IOMAP_DIO_WRITE
) {
806 bio_set_op_attrs(bio
, REQ_OP_WRITE
, REQ_SYNC
| REQ_IDLE
);
807 task_io_account_write(bio
->bi_iter
.bi_size
);
809 bio_set_op_attrs(bio
, REQ_OP_READ
, 0);
810 if (dio
->flags
& IOMAP_DIO_DIRTY
)
811 bio_set_pages_dirty(bio
);
814 dio
->size
+= bio
->bi_iter
.bi_size
;
815 pos
+= bio
->bi_iter
.bi_size
;
817 nr_pages
= iov_iter_npages(&iter
, BIO_MAX_PAGES
);
819 atomic_inc(&dio
->ref
);
821 dio
->submit
.last_queue
= bdev_get_queue(iomap
->bdev
);
822 dio
->submit
.cookie
= submit_bio(bio
);
826 /* zero out from the end of the write to the end of the block */
827 pad
= pos
& (fs_block_size
- 1);
829 iomap_dio_zero(dio
, iomap
, pos
, fs_block_size
- pad
);
832 iov_iter_advance(dio
->submit
.iter
, length
);
837 iomap_dio_rw(struct kiocb
*iocb
, struct iov_iter
*iter
,
838 const struct iomap_ops
*ops
, iomap_dio_end_io_t end_io
)
840 struct address_space
*mapping
= iocb
->ki_filp
->f_mapping
;
841 struct inode
*inode
= file_inode(iocb
->ki_filp
);
842 size_t count
= iov_iter_count(iter
);
843 loff_t pos
= iocb
->ki_pos
, start
= pos
;
844 loff_t end
= iocb
->ki_pos
+ count
- 1, ret
= 0;
845 unsigned int flags
= IOMAP_DIRECT
;
846 struct blk_plug plug
;
847 struct iomap_dio
*dio
;
849 lockdep_assert_held(&inode
->i_rwsem
);
854 dio
= kmalloc(sizeof(*dio
), GFP_KERNEL
);
859 atomic_set(&dio
->ref
, 1);
861 dio
->i_size
= i_size_read(inode
);
862 dio
->end_io
= end_io
;
866 dio
->submit
.iter
= iter
;
867 if (is_sync_kiocb(iocb
)) {
868 dio
->submit
.waiter
= current
;
869 dio
->submit
.cookie
= BLK_QC_T_NONE
;
870 dio
->submit
.last_queue
= NULL
;
873 if (iov_iter_rw(iter
) == READ
) {
874 if (pos
>= dio
->i_size
)
877 if (iter
->type
== ITER_IOVEC
)
878 dio
->flags
|= IOMAP_DIO_DIRTY
;
880 dio
->flags
|= IOMAP_DIO_WRITE
;
881 flags
|= IOMAP_WRITE
;
884 ret
= filemap_write_and_wait_range(mapping
, start
, end
);
888 ret
= invalidate_inode_pages2_range(mapping
,
889 start
>> PAGE_SHIFT
, end
>> PAGE_SHIFT
);
893 inode_dio_begin(inode
);
895 blk_start_plug(&plug
);
897 ret
= iomap_apply(inode
, pos
, count
, flags
, ops
, dio
,
900 /* magic error code to fall back to buffered I/O */
907 if (iov_iter_rw(iter
) == READ
&& pos
>= dio
->i_size
)
909 } while ((count
= iov_iter_count(iter
)) > 0);
910 blk_finish_plug(&plug
);
913 iomap_dio_set_error(dio
, ret
);
915 if (ret
>= 0 && iov_iter_rw(iter
) == WRITE
&& !is_sync_kiocb(iocb
) &&
916 !inode
->i_sb
->s_dio_done_wq
) {
917 ret
= sb_init_dio_done_wq(inode
->i_sb
);
919 iomap_dio_set_error(dio
, ret
);
922 if (!atomic_dec_and_test(&dio
->ref
)) {
923 if (!is_sync_kiocb(iocb
))
927 set_current_state(TASK_UNINTERRUPTIBLE
);
928 if (!READ_ONCE(dio
->submit
.waiter
))
931 if (!(iocb
->ki_flags
& IOCB_HIPRI
) ||
932 !dio
->submit
.last_queue
||
933 !blk_mq_poll(dio
->submit
.last_queue
,
937 __set_current_state(TASK_RUNNING
);
940 ret
= iomap_dio_complete(dio
);
943 * Try again to invalidate clean pages which might have been cached by
944 * non-direct readahead, or faulted in by get_user_pages() if the source
945 * of the write was an mmap'ed region of the file we're writing. Either
946 * one is a pretty crazy thing to do, so we don't support it 100%. If
947 * this invalidation fails, tough, the write still worked...
949 if (iov_iter_rw(iter
) == WRITE
) {
950 int err
= invalidate_inode_pages2_range(mapping
,
951 start
>> PAGE_SHIFT
, end
>> PAGE_SHIFT
);
961 EXPORT_SYMBOL_GPL(iomap_dio_rw
);