2 * Copyright (C) 2010 Red Hat, Inc.
3 * Copyright (c) 2016 Christoph Hellwig.
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms and conditions of the GNU General Public License,
7 * version 2, as published by the Free Software Foundation.
9 * This program is distributed in the hope it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
14 #include <linux/module.h>
15 #include <linux/compiler.h>
17 #include <linux/iomap.h>
18 #include <linux/uaccess.h>
19 #include <linux/gfp.h>
21 #include <linux/swap.h>
22 #include <linux/pagemap.h>
23 #include <linux/file.h>
24 #include <linux/uio.h>
25 #include <linux/backing-dev.h>
26 #include <linux/buffer_head.h>
27 #include <linux/task_io_accounting_ops.h>
28 #include <linux/dax.h>
32 * Execute a iomap write on a segment of the mapping that spans a
33 * contiguous range of pages that have identical block mapping state.
35 * This avoids the need to map pages individually, do individual allocations
36 * for each page and most importantly avoid the need for filesystem specific
37 * locking per page. Instead, all the operations are amortised over the entire
38 * range of pages. It is assumed that the filesystems will lock whatever
39 * resources they require in the iomap_begin call, and release them in the
43 iomap_apply(struct inode
*inode
, loff_t pos
, loff_t length
, unsigned flags
,
44 const struct iomap_ops
*ops
, void *data
, iomap_actor_t actor
)
46 struct iomap iomap
= { 0 };
47 loff_t written
= 0, ret
;
50 * Need to map a range from start position for length bytes. This can
51 * span multiple pages - it is only guaranteed to return a range of a
52 * single type of pages (e.g. all into a hole, all mapped or all
53 * unwritten). Failure at this point has nothing to undo.
55 * If allocation is required for this range, reserve the space now so
56 * that the allocation is guaranteed to succeed later on. Once we copy
57 * the data into the page cache pages, then we cannot fail otherwise we
58 * expose transient stale data. If the reserve fails, we can safely
59 * back out at this point as there is nothing to undo.
61 ret
= ops
->iomap_begin(inode
, pos
, length
, flags
, &iomap
);
64 if (WARN_ON(iomap
.offset
> pos
))
68 * Cut down the length to the one actually provided by the filesystem,
69 * as it might not be able to give us the whole size that we requested.
71 if (iomap
.offset
+ iomap
.length
< pos
+ length
)
72 length
= iomap
.offset
+ iomap
.length
- pos
;
75 * Now that we have guaranteed that the space allocation will succeed.
76 * we can do the copy-in page by page without having to worry about
77 * failures exposing transient data.
79 written
= actor(inode
, pos
, length
, data
, &iomap
);
82 * Now the data has been copied, commit the range we've copied. This
83 * should not fail unless the filesystem has had a fatal error.
86 ret
= ops
->iomap_end(inode
, pos
, length
,
87 written
> 0 ? written
: 0,
91 return written
? written
: ret
;
95 iomap_write_failed(struct inode
*inode
, loff_t pos
, unsigned len
)
97 loff_t i_size
= i_size_read(inode
);
100 * Only truncate newly allocated pages beyoned EOF, even if the
101 * write started inside the existing inode size.
103 if (pos
+ len
> i_size
)
104 truncate_pagecache_range(inode
, max(pos
, i_size
), pos
+ len
);
108 iomap_write_begin(struct inode
*inode
, loff_t pos
, unsigned len
, unsigned flags
,
109 struct page
**pagep
, struct iomap
*iomap
)
111 pgoff_t index
= pos
>> PAGE_SHIFT
;
115 BUG_ON(pos
+ len
> iomap
->offset
+ iomap
->length
);
117 if (fatal_signal_pending(current
))
120 page
= grab_cache_page_write_begin(inode
->i_mapping
, index
, flags
);
124 status
= __block_write_begin_int(page
, pos
, len
, NULL
, iomap
);
125 if (unlikely(status
)) {
130 iomap_write_failed(inode
, pos
, len
);
138 iomap_write_end(struct inode
*inode
, loff_t pos
, unsigned len
,
139 unsigned copied
, struct page
*page
)
143 ret
= generic_write_end(NULL
, inode
->i_mapping
, pos
, len
,
146 iomap_write_failed(inode
, pos
, len
);
151 iomap_write_actor(struct inode
*inode
, loff_t pos
, loff_t length
, void *data
,
154 struct iov_iter
*i
= data
;
157 unsigned int flags
= AOP_FLAG_NOFS
;
160 * Copies from kernel address space cannot fail (NFSD is a big user).
162 if (!iter_is_iovec(i
))
163 flags
|= AOP_FLAG_UNINTERRUPTIBLE
;
167 unsigned long offset
; /* Offset into pagecache page */
168 unsigned long bytes
; /* Bytes to write to page */
169 size_t copied
; /* Bytes copied from user */
171 offset
= (pos
& (PAGE_SIZE
- 1));
172 bytes
= min_t(unsigned long, PAGE_SIZE
- offset
,
179 * Bring in the user page that we will copy from _first_.
180 * Otherwise there's a nasty deadlock on copying from the
181 * same page as we're writing to, without it being marked
184 * Not only is this an optimisation, but it is also required
185 * to check that the address is actually valid, when atomic
186 * usercopies are used, below.
188 if (unlikely(iov_iter_fault_in_readable(i
, bytes
))) {
193 status
= iomap_write_begin(inode
, pos
, bytes
, flags
, &page
,
195 if (unlikely(status
))
198 if (mapping_writably_mapped(inode
->i_mapping
))
199 flush_dcache_page(page
);
201 copied
= iov_iter_copy_from_user_atomic(page
, i
, offset
, bytes
);
203 flush_dcache_page(page
);
205 status
= iomap_write_end(inode
, pos
, bytes
, copied
, page
);
206 if (unlikely(status
< 0))
212 iov_iter_advance(i
, copied
);
213 if (unlikely(copied
== 0)) {
215 * If we were unable to copy any data at all, we must
216 * fall back to a single segment length write.
218 * If we didn't fallback here, we could livelock
219 * because not all segments in the iov can be copied at
220 * once without a pagefault.
222 bytes
= min_t(unsigned long, PAGE_SIZE
- offset
,
223 iov_iter_single_seg_count(i
));
230 balance_dirty_pages_ratelimited(inode
->i_mapping
);
231 } while (iov_iter_count(i
) && length
);
233 return written
? written
: status
;
237 iomap_file_buffered_write(struct kiocb
*iocb
, struct iov_iter
*iter
,
238 const struct iomap_ops
*ops
)
240 struct inode
*inode
= iocb
->ki_filp
->f_mapping
->host
;
241 loff_t pos
= iocb
->ki_pos
, ret
= 0, written
= 0;
243 while (iov_iter_count(iter
)) {
244 ret
= iomap_apply(inode
, pos
, iov_iter_count(iter
),
245 IOMAP_WRITE
, ops
, iter
, iomap_write_actor
);
252 return written
? written
: ret
;
254 EXPORT_SYMBOL_GPL(iomap_file_buffered_write
);
257 __iomap_read_page(struct inode
*inode
, loff_t offset
)
259 struct address_space
*mapping
= inode
->i_mapping
;
262 page
= read_mapping_page(mapping
, offset
>> PAGE_SHIFT
, NULL
);
265 if (!PageUptodate(page
)) {
267 return ERR_PTR(-EIO
);
273 iomap_dirty_actor(struct inode
*inode
, loff_t pos
, loff_t length
, void *data
,
280 struct page
*page
, *rpage
;
281 unsigned long offset
; /* Offset into pagecache page */
282 unsigned long bytes
; /* Bytes to write to page */
284 offset
= (pos
& (PAGE_SIZE
- 1));
285 bytes
= min_t(unsigned long, PAGE_SIZE
- offset
, length
);
287 rpage
= __iomap_read_page(inode
, pos
);
289 return PTR_ERR(rpage
);
291 status
= iomap_write_begin(inode
, pos
, bytes
,
292 AOP_FLAG_NOFS
| AOP_FLAG_UNINTERRUPTIBLE
,
295 if (unlikely(status
))
298 WARN_ON_ONCE(!PageUptodate(page
));
300 status
= iomap_write_end(inode
, pos
, bytes
, bytes
, page
);
301 if (unlikely(status
<= 0)) {
302 if (WARN_ON_ONCE(status
== 0))
313 balance_dirty_pages_ratelimited(inode
->i_mapping
);
320 iomap_file_dirty(struct inode
*inode
, loff_t pos
, loff_t len
,
321 const struct iomap_ops
*ops
)
326 ret
= iomap_apply(inode
, pos
, len
, IOMAP_WRITE
, ops
, NULL
,
336 EXPORT_SYMBOL_GPL(iomap_file_dirty
);
338 static int iomap_zero(struct inode
*inode
, loff_t pos
, unsigned offset
,
339 unsigned bytes
, struct iomap
*iomap
)
344 status
= iomap_write_begin(inode
, pos
, bytes
,
345 AOP_FLAG_UNINTERRUPTIBLE
| AOP_FLAG_NOFS
, &page
, iomap
);
349 zero_user(page
, offset
, bytes
);
350 mark_page_accessed(page
);
352 return iomap_write_end(inode
, pos
, bytes
, bytes
, page
);
355 static int iomap_dax_zero(loff_t pos
, unsigned offset
, unsigned bytes
,
358 sector_t sector
= iomap
->blkno
+
359 (((pos
& ~(PAGE_SIZE
- 1)) - iomap
->offset
) >> 9);
361 return __dax_zero_page_range(iomap
->bdev
, sector
, offset
, bytes
);
365 iomap_zero_range_actor(struct inode
*inode
, loff_t pos
, loff_t count
,
366 void *data
, struct iomap
*iomap
)
368 bool *did_zero
= data
;
372 /* already zeroed? we're done. */
373 if (iomap
->type
== IOMAP_HOLE
|| iomap
->type
== IOMAP_UNWRITTEN
)
377 unsigned offset
, bytes
;
379 offset
= pos
& (PAGE_SIZE
- 1); /* Within page */
380 bytes
= min_t(unsigned, PAGE_SIZE
- offset
, count
);
383 status
= iomap_dax_zero(pos
, offset
, bytes
, iomap
);
385 status
= iomap_zero(inode
, pos
, offset
, bytes
, iomap
);
400 iomap_zero_range(struct inode
*inode
, loff_t pos
, loff_t len
, bool *did_zero
,
401 const struct iomap_ops
*ops
)
406 ret
= iomap_apply(inode
, pos
, len
, IOMAP_ZERO
,
407 ops
, did_zero
, iomap_zero_range_actor
);
417 EXPORT_SYMBOL_GPL(iomap_zero_range
);
420 iomap_truncate_page(struct inode
*inode
, loff_t pos
, bool *did_zero
,
421 const struct iomap_ops
*ops
)
423 unsigned blocksize
= (1 << inode
->i_blkbits
);
424 unsigned off
= pos
& (blocksize
- 1);
426 /* Block boundary? Nothing to do */
429 return iomap_zero_range(inode
, pos
, blocksize
- off
, did_zero
, ops
);
431 EXPORT_SYMBOL_GPL(iomap_truncate_page
);
434 iomap_page_mkwrite_actor(struct inode
*inode
, loff_t pos
, loff_t length
,
435 void *data
, struct iomap
*iomap
)
437 struct page
*page
= data
;
440 ret
= __block_write_begin_int(page
, pos
, length
, NULL
, iomap
);
444 block_commit_write(page
, 0, length
);
448 int iomap_page_mkwrite(struct vm_fault
*vmf
, const struct iomap_ops
*ops
)
450 struct page
*page
= vmf
->page
;
451 struct inode
*inode
= file_inode(vmf
->vma
->vm_file
);
452 unsigned long length
;
457 size
= i_size_read(inode
);
458 if ((page
->mapping
!= inode
->i_mapping
) ||
459 (page_offset(page
) > size
)) {
460 /* We overload EFAULT to mean page got truncated */
465 /* page is wholly or partially inside EOF */
466 if (((page
->index
+ 1) << PAGE_SHIFT
) > size
)
467 length
= size
& ~PAGE_MASK
;
471 offset
= page_offset(page
);
473 ret
= iomap_apply(inode
, offset
, length
,
474 IOMAP_WRITE
| IOMAP_FAULT
, ops
, page
,
475 iomap_page_mkwrite_actor
);
476 if (unlikely(ret
<= 0))
482 set_page_dirty(page
);
483 wait_for_stable_page(page
);
489 EXPORT_SYMBOL_GPL(iomap_page_mkwrite
);
492 struct fiemap_extent_info
*fi
;
496 static int iomap_to_fiemap(struct fiemap_extent_info
*fi
,
497 struct iomap
*iomap
, u32 flags
)
499 switch (iomap
->type
) {
504 flags
|= FIEMAP_EXTENT_DELALLOC
| FIEMAP_EXTENT_UNKNOWN
;
506 case IOMAP_UNWRITTEN
:
507 flags
|= FIEMAP_EXTENT_UNWRITTEN
;
513 if (iomap
->flags
& IOMAP_F_MERGED
)
514 flags
|= FIEMAP_EXTENT_MERGED
;
515 if (iomap
->flags
& IOMAP_F_SHARED
)
516 flags
|= FIEMAP_EXTENT_SHARED
;
518 return fiemap_fill_next_extent(fi
, iomap
->offset
,
519 iomap
->blkno
!= IOMAP_NULL_BLOCK
? iomap
->blkno
<< 9: 0,
520 iomap
->length
, flags
);
525 iomap_fiemap_actor(struct inode
*inode
, loff_t pos
, loff_t length
, void *data
,
528 struct fiemap_ctx
*ctx
= data
;
531 if (iomap
->type
== IOMAP_HOLE
)
534 ret
= iomap_to_fiemap(ctx
->fi
, &ctx
->prev
, 0);
537 case 0: /* success */
539 case 1: /* extent array full */
546 int iomap_fiemap(struct inode
*inode
, struct fiemap_extent_info
*fi
,
547 loff_t start
, loff_t len
, const struct iomap_ops
*ops
)
549 struct fiemap_ctx ctx
;
552 memset(&ctx
, 0, sizeof(ctx
));
554 ctx
.prev
.type
= IOMAP_HOLE
;
556 ret
= fiemap_check_flags(fi
, FIEMAP_FLAG_SYNC
);
560 if (fi
->fi_flags
& FIEMAP_FLAG_SYNC
) {
561 ret
= filemap_write_and_wait(inode
->i_mapping
);
567 ret
= iomap_apply(inode
, start
, len
, IOMAP_REPORT
, ops
, &ctx
,
569 /* inode with no (attribute) mapping will give ENOENT */
581 if (ctx
.prev
.type
!= IOMAP_HOLE
) {
582 ret
= iomap_to_fiemap(fi
, &ctx
.prev
, FIEMAP_EXTENT_LAST
);
589 EXPORT_SYMBOL_GPL(iomap_fiemap
);
592 * Private flags for iomap_dio, must not overlap with the public ones in
595 #define IOMAP_DIO_WRITE (1 << 30)
596 #define IOMAP_DIO_DIRTY (1 << 31)
600 iomap_dio_end_io_t
*end_io
;
608 /* used during submission and for synchronous completion: */
610 struct iov_iter
*iter
;
611 struct task_struct
*waiter
;
612 struct request_queue
*last_queue
;
616 /* used for aio completion: */
618 struct work_struct work
;
623 static ssize_t
iomap_dio_complete(struct iomap_dio
*dio
)
625 struct kiocb
*iocb
= dio
->iocb
;
629 ret
= dio
->end_io(iocb
,
630 dio
->error
? dio
->error
: dio
->size
,
638 /* check for short read */
639 if (iocb
->ki_pos
+ ret
> dio
->i_size
&&
640 !(dio
->flags
& IOMAP_DIO_WRITE
))
641 ret
= dio
->i_size
- iocb
->ki_pos
;
645 inode_dio_end(file_inode(iocb
->ki_filp
));
651 static void iomap_dio_complete_work(struct work_struct
*work
)
653 struct iomap_dio
*dio
= container_of(work
, struct iomap_dio
, aio
.work
);
654 struct kiocb
*iocb
= dio
->iocb
;
655 bool is_write
= (dio
->flags
& IOMAP_DIO_WRITE
);
658 ret
= iomap_dio_complete(dio
);
659 if (is_write
&& ret
> 0)
660 ret
= generic_write_sync(iocb
, ret
);
661 iocb
->ki_complete(iocb
, ret
, 0);
665 * Set an error in the dio if none is set yet. We have to use cmpxchg
666 * as the submission context and the completion context(s) can race to
669 static inline void iomap_dio_set_error(struct iomap_dio
*dio
, int ret
)
671 cmpxchg(&dio
->error
, 0, ret
);
674 static void iomap_dio_bio_end_io(struct bio
*bio
)
676 struct iomap_dio
*dio
= bio
->bi_private
;
677 bool should_dirty
= (dio
->flags
& IOMAP_DIO_DIRTY
);
680 iomap_dio_set_error(dio
, bio
->bi_error
);
682 if (atomic_dec_and_test(&dio
->ref
)) {
683 if (is_sync_kiocb(dio
->iocb
)) {
684 struct task_struct
*waiter
= dio
->submit
.waiter
;
686 WRITE_ONCE(dio
->submit
.waiter
, NULL
);
687 wake_up_process(waiter
);
688 } else if (dio
->flags
& IOMAP_DIO_WRITE
) {
689 struct inode
*inode
= file_inode(dio
->iocb
->ki_filp
);
691 INIT_WORK(&dio
->aio
.work
, iomap_dio_complete_work
);
692 queue_work(inode
->i_sb
->s_dio_done_wq
, &dio
->aio
.work
);
694 iomap_dio_complete_work(&dio
->aio
.work
);
699 bio_check_pages_dirty(bio
);
701 struct bio_vec
*bvec
;
704 bio_for_each_segment_all(bvec
, bio
, i
)
705 put_page(bvec
->bv_page
);
711 iomap_dio_zero(struct iomap_dio
*dio
, struct iomap
*iomap
, loff_t pos
,
714 struct page
*page
= ZERO_PAGE(0);
717 bio
= bio_alloc(GFP_KERNEL
, 1);
718 bio
->bi_bdev
= iomap
->bdev
;
719 bio
->bi_iter
.bi_sector
=
720 iomap
->blkno
+ ((pos
- iomap
->offset
) >> 9);
721 bio
->bi_private
= dio
;
722 bio
->bi_end_io
= iomap_dio_bio_end_io
;
725 if (bio_add_page(bio
, page
, len
, 0) != len
)
727 bio_set_op_attrs(bio
, REQ_OP_WRITE
, REQ_SYNC
| REQ_IDLE
);
729 atomic_inc(&dio
->ref
);
730 return submit_bio(bio
);
734 iomap_dio_actor(struct inode
*inode
, loff_t pos
, loff_t length
,
735 void *data
, struct iomap
*iomap
)
737 struct iomap_dio
*dio
= data
;
738 unsigned blkbits
= blksize_bits(bdev_logical_block_size(iomap
->bdev
));
739 unsigned fs_block_size
= (1 << inode
->i_blkbits
), pad
;
740 unsigned align
= iov_iter_alignment(dio
->submit
.iter
);
741 struct iov_iter iter
;
743 bool need_zeroout
= false;
746 if ((pos
| length
| align
) & ((1 << blkbits
) - 1))
749 switch (iomap
->type
) {
751 if (WARN_ON_ONCE(dio
->flags
& IOMAP_DIO_WRITE
))
754 case IOMAP_UNWRITTEN
:
755 if (!(dio
->flags
& IOMAP_DIO_WRITE
)) {
756 iov_iter_zero(length
, dio
->submit
.iter
);
760 dio
->flags
|= IOMAP_DIO_UNWRITTEN
;
764 if (iomap
->flags
& IOMAP_F_SHARED
)
765 dio
->flags
|= IOMAP_DIO_COW
;
766 if (iomap
->flags
& IOMAP_F_NEW
)
775 * Operate on a partial iter trimmed to the extent we were called for.
776 * We'll update the iter in the dio once we're done with this extent.
778 iter
= *dio
->submit
.iter
;
779 iov_iter_truncate(&iter
, length
);
781 nr_pages
= iov_iter_npages(&iter
, BIO_MAX_PAGES
);
786 /* zero out from the start of the block to the write offset */
787 pad
= pos
& (fs_block_size
- 1);
789 iomap_dio_zero(dio
, iomap
, pos
- pad
, pad
);
796 bio
= bio_alloc(GFP_KERNEL
, nr_pages
);
797 bio
->bi_bdev
= iomap
->bdev
;
798 bio
->bi_iter
.bi_sector
=
799 iomap
->blkno
+ ((pos
- iomap
->offset
) >> 9);
800 bio
->bi_private
= dio
;
801 bio
->bi_end_io
= iomap_dio_bio_end_io
;
803 ret
= bio_iov_iter_get_pages(bio
, &iter
);
809 if (dio
->flags
& IOMAP_DIO_WRITE
) {
810 bio_set_op_attrs(bio
, REQ_OP_WRITE
, REQ_SYNC
| REQ_IDLE
);
811 task_io_account_write(bio
->bi_iter
.bi_size
);
813 bio_set_op_attrs(bio
, REQ_OP_READ
, 0);
814 if (dio
->flags
& IOMAP_DIO_DIRTY
)
815 bio_set_pages_dirty(bio
);
818 dio
->size
+= bio
->bi_iter
.bi_size
;
819 pos
+= bio
->bi_iter
.bi_size
;
821 nr_pages
= iov_iter_npages(&iter
, BIO_MAX_PAGES
);
823 atomic_inc(&dio
->ref
);
825 dio
->submit
.last_queue
= bdev_get_queue(iomap
->bdev
);
826 dio
->submit
.cookie
= submit_bio(bio
);
830 /* zero out from the end of the write to the end of the block */
831 pad
= pos
& (fs_block_size
- 1);
833 iomap_dio_zero(dio
, iomap
, pos
, fs_block_size
- pad
);
836 iov_iter_advance(dio
->submit
.iter
, length
);
841 iomap_dio_rw(struct kiocb
*iocb
, struct iov_iter
*iter
,
842 const struct iomap_ops
*ops
, iomap_dio_end_io_t end_io
)
844 struct address_space
*mapping
= iocb
->ki_filp
->f_mapping
;
845 struct inode
*inode
= file_inode(iocb
->ki_filp
);
846 size_t count
= iov_iter_count(iter
);
847 loff_t pos
= iocb
->ki_pos
, end
= iocb
->ki_pos
+ count
- 1, ret
= 0;
848 unsigned int flags
= IOMAP_DIRECT
;
849 struct blk_plug plug
;
850 struct iomap_dio
*dio
;
852 lockdep_assert_held(&inode
->i_rwsem
);
857 dio
= kmalloc(sizeof(*dio
), GFP_KERNEL
);
862 atomic_set(&dio
->ref
, 1);
864 dio
->i_size
= i_size_read(inode
);
865 dio
->end_io
= end_io
;
869 dio
->submit
.iter
= iter
;
870 if (is_sync_kiocb(iocb
)) {
871 dio
->submit
.waiter
= current
;
872 dio
->submit
.cookie
= BLK_QC_T_NONE
;
873 dio
->submit
.last_queue
= NULL
;
876 if (iov_iter_rw(iter
) == READ
) {
877 if (pos
>= dio
->i_size
)
880 if (iter
->type
== ITER_IOVEC
)
881 dio
->flags
|= IOMAP_DIO_DIRTY
;
883 dio
->flags
|= IOMAP_DIO_WRITE
;
884 flags
|= IOMAP_WRITE
;
887 if (mapping
->nrpages
) {
888 ret
= filemap_write_and_wait_range(mapping
, iocb
->ki_pos
, end
);
892 ret
= invalidate_inode_pages2_range(mapping
,
893 iocb
->ki_pos
>> PAGE_SHIFT
, end
>> PAGE_SHIFT
);
898 inode_dio_begin(inode
);
900 blk_start_plug(&plug
);
902 ret
= iomap_apply(inode
, pos
, count
, flags
, ops
, dio
,
905 /* magic error code to fall back to buffered I/O */
911 } while ((count
= iov_iter_count(iter
)) > 0);
912 blk_finish_plug(&plug
);
915 iomap_dio_set_error(dio
, ret
);
917 if (ret
>= 0 && iov_iter_rw(iter
) == WRITE
&& !is_sync_kiocb(iocb
) &&
918 !inode
->i_sb
->s_dio_done_wq
) {
919 ret
= sb_init_dio_done_wq(inode
->i_sb
);
921 iomap_dio_set_error(dio
, ret
);
924 if (!atomic_dec_and_test(&dio
->ref
)) {
925 if (!is_sync_kiocb(iocb
))
929 set_current_state(TASK_UNINTERRUPTIBLE
);
930 if (!READ_ONCE(dio
->submit
.waiter
))
933 if (!(iocb
->ki_flags
& IOCB_HIPRI
) ||
934 !dio
->submit
.last_queue
||
935 !blk_mq_poll(dio
->submit
.last_queue
,
939 __set_current_state(TASK_RUNNING
);
943 * Try again to invalidate clean pages which might have been cached by
944 * non-direct readahead, or faulted in by get_user_pages() if the source
945 * of the write was an mmap'ed region of the file we're writing. Either
946 * one is a pretty crazy thing to do, so we don't support it 100%. If
947 * this invalidation fails, tough, the write still worked...
949 if (iov_iter_rw(iter
) == WRITE
&& mapping
->nrpages
) {
950 ret
= invalidate_inode_pages2_range(mapping
,
951 iocb
->ki_pos
>> PAGE_SHIFT
, end
>> PAGE_SHIFT
);
955 return iomap_dio_complete(dio
);
961 EXPORT_SYMBOL_GPL(iomap_dio_rw
);