cfg80211: Fix array-bounds warning in fragment copy
[linux/fpc-iii.git] / fs / iomap.c
blob3ca1a8e44135ed757bc309cd750899d51f093970
1 /*
2 * Copyright (C) 2010 Red Hat, Inc.
3 * Copyright (c) 2016 Christoph Hellwig.
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms and conditions of the GNU General Public License,
7 * version 2, as published by the Free Software Foundation.
9 * This program is distributed in the hope it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
14 #include <linux/module.h>
15 #include <linux/compiler.h>
16 #include <linux/fs.h>
17 #include <linux/iomap.h>
18 #include <linux/uaccess.h>
19 #include <linux/gfp.h>
20 #include <linux/mm.h>
21 #include <linux/swap.h>
22 #include <linux/pagemap.h>
23 #include <linux/file.h>
24 #include <linux/uio.h>
25 #include <linux/backing-dev.h>
26 #include <linux/buffer_head.h>
27 #include <linux/task_io_accounting_ops.h>
28 #include <linux/dax.h>
29 #include <linux/sched/signal.h>
31 #include "internal.h"
34 * Execute a iomap write on a segment of the mapping that spans a
35 * contiguous range of pages that have identical block mapping state.
37 * This avoids the need to map pages individually, do individual allocations
38 * for each page and most importantly avoid the need for filesystem specific
39 * locking per page. Instead, all the operations are amortised over the entire
40 * range of pages. It is assumed that the filesystems will lock whatever
41 * resources they require in the iomap_begin call, and release them in the
42 * iomap_end call.
44 loff_t
45 iomap_apply(struct inode *inode, loff_t pos, loff_t length, unsigned flags,
46 const struct iomap_ops *ops, void *data, iomap_actor_t actor)
48 struct iomap iomap = { 0 };
49 loff_t written = 0, ret;
52 * Need to map a range from start position for length bytes. This can
53 * span multiple pages - it is only guaranteed to return a range of a
54 * single type of pages (e.g. all into a hole, all mapped or all
55 * unwritten). Failure at this point has nothing to undo.
57 * If allocation is required for this range, reserve the space now so
58 * that the allocation is guaranteed to succeed later on. Once we copy
59 * the data into the page cache pages, then we cannot fail otherwise we
60 * expose transient stale data. If the reserve fails, we can safely
61 * back out at this point as there is nothing to undo.
63 ret = ops->iomap_begin(inode, pos, length, flags, &iomap);
64 if (ret)
65 return ret;
66 if (WARN_ON(iomap.offset > pos))
67 return -EIO;
70 * Cut down the length to the one actually provided by the filesystem,
71 * as it might not be able to give us the whole size that we requested.
73 if (iomap.offset + iomap.length < pos + length)
74 length = iomap.offset + iomap.length - pos;
77 * Now that we have guaranteed that the space allocation will succeed.
78 * we can do the copy-in page by page without having to worry about
79 * failures exposing transient data.
81 written = actor(inode, pos, length, data, &iomap);
84 * Now the data has been copied, commit the range we've copied. This
85 * should not fail unless the filesystem has had a fatal error.
87 if (ops->iomap_end) {
88 ret = ops->iomap_end(inode, pos, length,
89 written > 0 ? written : 0,
90 flags, &iomap);
93 return written ? written : ret;
96 static void
97 iomap_write_failed(struct inode *inode, loff_t pos, unsigned len)
99 loff_t i_size = i_size_read(inode);
102 * Only truncate newly allocated pages beyoned EOF, even if the
103 * write started inside the existing inode size.
105 if (pos + len > i_size)
106 truncate_pagecache_range(inode, max(pos, i_size), pos + len);
109 static int
110 iomap_write_begin(struct inode *inode, loff_t pos, unsigned len, unsigned flags,
111 struct page **pagep, struct iomap *iomap)
113 pgoff_t index = pos >> PAGE_SHIFT;
114 struct page *page;
115 int status = 0;
117 BUG_ON(pos + len > iomap->offset + iomap->length);
119 if (fatal_signal_pending(current))
120 return -EINTR;
122 page = grab_cache_page_write_begin(inode->i_mapping, index, flags);
123 if (!page)
124 return -ENOMEM;
126 status = __block_write_begin_int(page, pos, len, NULL, iomap);
127 if (unlikely(status)) {
128 unlock_page(page);
129 put_page(page);
130 page = NULL;
132 iomap_write_failed(inode, pos, len);
135 *pagep = page;
136 return status;
139 static int
140 iomap_write_end(struct inode *inode, loff_t pos, unsigned len,
141 unsigned copied, struct page *page)
143 int ret;
145 ret = generic_write_end(NULL, inode->i_mapping, pos, len,
146 copied, page, NULL);
147 if (ret < len)
148 iomap_write_failed(inode, pos, len);
149 return ret;
152 static loff_t
153 iomap_write_actor(struct inode *inode, loff_t pos, loff_t length, void *data,
154 struct iomap *iomap)
156 struct iov_iter *i = data;
157 long status = 0;
158 ssize_t written = 0;
159 unsigned int flags = AOP_FLAG_NOFS;
162 * Copies from kernel address space cannot fail (NFSD is a big user).
164 if (!iter_is_iovec(i))
165 flags |= AOP_FLAG_UNINTERRUPTIBLE;
167 do {
168 struct page *page;
169 unsigned long offset; /* Offset into pagecache page */
170 unsigned long bytes; /* Bytes to write to page */
171 size_t copied; /* Bytes copied from user */
173 offset = (pos & (PAGE_SIZE - 1));
174 bytes = min_t(unsigned long, PAGE_SIZE - offset,
175 iov_iter_count(i));
176 again:
177 if (bytes > length)
178 bytes = length;
181 * Bring in the user page that we will copy from _first_.
182 * Otherwise there's a nasty deadlock on copying from the
183 * same page as we're writing to, without it being marked
184 * up-to-date.
186 * Not only is this an optimisation, but it is also required
187 * to check that the address is actually valid, when atomic
188 * usercopies are used, below.
190 if (unlikely(iov_iter_fault_in_readable(i, bytes))) {
191 status = -EFAULT;
192 break;
195 status = iomap_write_begin(inode, pos, bytes, flags, &page,
196 iomap);
197 if (unlikely(status))
198 break;
200 if (mapping_writably_mapped(inode->i_mapping))
201 flush_dcache_page(page);
203 copied = iov_iter_copy_from_user_atomic(page, i, offset, bytes);
205 flush_dcache_page(page);
207 status = iomap_write_end(inode, pos, bytes, copied, page);
208 if (unlikely(status < 0))
209 break;
210 copied = status;
212 cond_resched();
214 iov_iter_advance(i, copied);
215 if (unlikely(copied == 0)) {
217 * If we were unable to copy any data at all, we must
218 * fall back to a single segment length write.
220 * If we didn't fallback here, we could livelock
221 * because not all segments in the iov can be copied at
222 * once without a pagefault.
224 bytes = min_t(unsigned long, PAGE_SIZE - offset,
225 iov_iter_single_seg_count(i));
226 goto again;
228 pos += copied;
229 written += copied;
230 length -= copied;
232 balance_dirty_pages_ratelimited(inode->i_mapping);
233 } while (iov_iter_count(i) && length);
235 return written ? written : status;
238 ssize_t
239 iomap_file_buffered_write(struct kiocb *iocb, struct iov_iter *iter,
240 const struct iomap_ops *ops)
242 struct inode *inode = iocb->ki_filp->f_mapping->host;
243 loff_t pos = iocb->ki_pos, ret = 0, written = 0;
245 while (iov_iter_count(iter)) {
246 ret = iomap_apply(inode, pos, iov_iter_count(iter),
247 IOMAP_WRITE, ops, iter, iomap_write_actor);
248 if (ret <= 0)
249 break;
250 pos += ret;
251 written += ret;
254 return written ? written : ret;
256 EXPORT_SYMBOL_GPL(iomap_file_buffered_write);
258 static struct page *
259 __iomap_read_page(struct inode *inode, loff_t offset)
261 struct address_space *mapping = inode->i_mapping;
262 struct page *page;
264 page = read_mapping_page(mapping, offset >> PAGE_SHIFT, NULL);
265 if (IS_ERR(page))
266 return page;
267 if (!PageUptodate(page)) {
268 put_page(page);
269 return ERR_PTR(-EIO);
271 return page;
274 static loff_t
275 iomap_dirty_actor(struct inode *inode, loff_t pos, loff_t length, void *data,
276 struct iomap *iomap)
278 long status = 0;
279 ssize_t written = 0;
281 do {
282 struct page *page, *rpage;
283 unsigned long offset; /* Offset into pagecache page */
284 unsigned long bytes; /* Bytes to write to page */
286 offset = (pos & (PAGE_SIZE - 1));
287 bytes = min_t(unsigned long, PAGE_SIZE - offset, length);
289 rpage = __iomap_read_page(inode, pos);
290 if (IS_ERR(rpage))
291 return PTR_ERR(rpage);
293 status = iomap_write_begin(inode, pos, bytes,
294 AOP_FLAG_NOFS | AOP_FLAG_UNINTERRUPTIBLE,
295 &page, iomap);
296 put_page(rpage);
297 if (unlikely(status))
298 return status;
300 WARN_ON_ONCE(!PageUptodate(page));
302 status = iomap_write_end(inode, pos, bytes, bytes, page);
303 if (unlikely(status <= 0)) {
304 if (WARN_ON_ONCE(status == 0))
305 return -EIO;
306 return status;
309 cond_resched();
311 pos += status;
312 written += status;
313 length -= status;
315 balance_dirty_pages_ratelimited(inode->i_mapping);
316 } while (length);
318 return written;
322 iomap_file_dirty(struct inode *inode, loff_t pos, loff_t len,
323 const struct iomap_ops *ops)
325 loff_t ret;
327 while (len) {
328 ret = iomap_apply(inode, pos, len, IOMAP_WRITE, ops, NULL,
329 iomap_dirty_actor);
330 if (ret <= 0)
331 return ret;
332 pos += ret;
333 len -= ret;
336 return 0;
338 EXPORT_SYMBOL_GPL(iomap_file_dirty);
340 static int iomap_zero(struct inode *inode, loff_t pos, unsigned offset,
341 unsigned bytes, struct iomap *iomap)
343 struct page *page;
344 int status;
346 status = iomap_write_begin(inode, pos, bytes,
347 AOP_FLAG_UNINTERRUPTIBLE | AOP_FLAG_NOFS, &page, iomap);
348 if (status)
349 return status;
351 zero_user(page, offset, bytes);
352 mark_page_accessed(page);
354 return iomap_write_end(inode, pos, bytes, bytes, page);
357 static int iomap_dax_zero(loff_t pos, unsigned offset, unsigned bytes,
358 struct iomap *iomap)
360 sector_t sector = iomap->blkno +
361 (((pos & ~(PAGE_SIZE - 1)) - iomap->offset) >> 9);
363 return __dax_zero_page_range(iomap->bdev, sector, offset, bytes);
366 static loff_t
367 iomap_zero_range_actor(struct inode *inode, loff_t pos, loff_t count,
368 void *data, struct iomap *iomap)
370 bool *did_zero = data;
371 loff_t written = 0;
372 int status;
374 /* already zeroed? we're done. */
375 if (iomap->type == IOMAP_HOLE || iomap->type == IOMAP_UNWRITTEN)
376 return count;
378 do {
379 unsigned offset, bytes;
381 offset = pos & (PAGE_SIZE - 1); /* Within page */
382 bytes = min_t(unsigned, PAGE_SIZE - offset, count);
384 if (IS_DAX(inode))
385 status = iomap_dax_zero(pos, offset, bytes, iomap);
386 else
387 status = iomap_zero(inode, pos, offset, bytes, iomap);
388 if (status < 0)
389 return status;
391 pos += bytes;
392 count -= bytes;
393 written += bytes;
394 if (did_zero)
395 *did_zero = true;
396 } while (count > 0);
398 return written;
402 iomap_zero_range(struct inode *inode, loff_t pos, loff_t len, bool *did_zero,
403 const struct iomap_ops *ops)
405 loff_t ret;
407 while (len > 0) {
408 ret = iomap_apply(inode, pos, len, IOMAP_ZERO,
409 ops, did_zero, iomap_zero_range_actor);
410 if (ret <= 0)
411 return ret;
413 pos += ret;
414 len -= ret;
417 return 0;
419 EXPORT_SYMBOL_GPL(iomap_zero_range);
422 iomap_truncate_page(struct inode *inode, loff_t pos, bool *did_zero,
423 const struct iomap_ops *ops)
425 unsigned int blocksize = i_blocksize(inode);
426 unsigned int off = pos & (blocksize - 1);
428 /* Block boundary? Nothing to do */
429 if (!off)
430 return 0;
431 return iomap_zero_range(inode, pos, blocksize - off, did_zero, ops);
433 EXPORT_SYMBOL_GPL(iomap_truncate_page);
435 static loff_t
436 iomap_page_mkwrite_actor(struct inode *inode, loff_t pos, loff_t length,
437 void *data, struct iomap *iomap)
439 struct page *page = data;
440 int ret;
442 ret = __block_write_begin_int(page, pos, length, NULL, iomap);
443 if (ret)
444 return ret;
446 block_commit_write(page, 0, length);
447 return length;
450 int iomap_page_mkwrite(struct vm_fault *vmf, const struct iomap_ops *ops)
452 struct page *page = vmf->page;
453 struct inode *inode = file_inode(vmf->vma->vm_file);
454 unsigned long length;
455 loff_t offset, size;
456 ssize_t ret;
458 lock_page(page);
459 size = i_size_read(inode);
460 if ((page->mapping != inode->i_mapping) ||
461 (page_offset(page) > size)) {
462 /* We overload EFAULT to mean page got truncated */
463 ret = -EFAULT;
464 goto out_unlock;
467 /* page is wholly or partially inside EOF */
468 if (((page->index + 1) << PAGE_SHIFT) > size)
469 length = size & ~PAGE_MASK;
470 else
471 length = PAGE_SIZE;
473 offset = page_offset(page);
474 while (length > 0) {
475 ret = iomap_apply(inode, offset, length,
476 IOMAP_WRITE | IOMAP_FAULT, ops, page,
477 iomap_page_mkwrite_actor);
478 if (unlikely(ret <= 0))
479 goto out_unlock;
480 offset += ret;
481 length -= ret;
484 set_page_dirty(page);
485 wait_for_stable_page(page);
486 return 0;
487 out_unlock:
488 unlock_page(page);
489 return ret;
491 EXPORT_SYMBOL_GPL(iomap_page_mkwrite);
493 struct fiemap_ctx {
494 struct fiemap_extent_info *fi;
495 struct iomap prev;
498 static int iomap_to_fiemap(struct fiemap_extent_info *fi,
499 struct iomap *iomap, u32 flags)
501 switch (iomap->type) {
502 case IOMAP_HOLE:
503 /* skip holes */
504 return 0;
505 case IOMAP_DELALLOC:
506 flags |= FIEMAP_EXTENT_DELALLOC | FIEMAP_EXTENT_UNKNOWN;
507 break;
508 case IOMAP_UNWRITTEN:
509 flags |= FIEMAP_EXTENT_UNWRITTEN;
510 break;
511 case IOMAP_MAPPED:
512 break;
515 if (iomap->flags & IOMAP_F_MERGED)
516 flags |= FIEMAP_EXTENT_MERGED;
517 if (iomap->flags & IOMAP_F_SHARED)
518 flags |= FIEMAP_EXTENT_SHARED;
520 return fiemap_fill_next_extent(fi, iomap->offset,
521 iomap->blkno != IOMAP_NULL_BLOCK ? iomap->blkno << 9: 0,
522 iomap->length, flags);
526 static loff_t
527 iomap_fiemap_actor(struct inode *inode, loff_t pos, loff_t length, void *data,
528 struct iomap *iomap)
530 struct fiemap_ctx *ctx = data;
531 loff_t ret = length;
533 if (iomap->type == IOMAP_HOLE)
534 return length;
536 ret = iomap_to_fiemap(ctx->fi, &ctx->prev, 0);
537 ctx->prev = *iomap;
538 switch (ret) {
539 case 0: /* success */
540 return length;
541 case 1: /* extent array full */
542 return 0;
543 default:
544 return ret;
548 int iomap_fiemap(struct inode *inode, struct fiemap_extent_info *fi,
549 loff_t start, loff_t len, const struct iomap_ops *ops)
551 struct fiemap_ctx ctx;
552 loff_t ret;
554 memset(&ctx, 0, sizeof(ctx));
555 ctx.fi = fi;
556 ctx.prev.type = IOMAP_HOLE;
558 ret = fiemap_check_flags(fi, FIEMAP_FLAG_SYNC);
559 if (ret)
560 return ret;
562 if (fi->fi_flags & FIEMAP_FLAG_SYNC) {
563 ret = filemap_write_and_wait(inode->i_mapping);
564 if (ret)
565 return ret;
568 while (len > 0) {
569 ret = iomap_apply(inode, start, len, IOMAP_REPORT, ops, &ctx,
570 iomap_fiemap_actor);
571 /* inode with no (attribute) mapping will give ENOENT */
572 if (ret == -ENOENT)
573 break;
574 if (ret < 0)
575 return ret;
576 if (ret == 0)
577 break;
579 start += ret;
580 len -= ret;
583 if (ctx.prev.type != IOMAP_HOLE) {
584 ret = iomap_to_fiemap(fi, &ctx.prev, FIEMAP_EXTENT_LAST);
585 if (ret < 0)
586 return ret;
589 return 0;
591 EXPORT_SYMBOL_GPL(iomap_fiemap);
594 * Private flags for iomap_dio, must not overlap with the public ones in
595 * iomap.h:
597 #define IOMAP_DIO_WRITE (1 << 30)
598 #define IOMAP_DIO_DIRTY (1 << 31)
600 struct iomap_dio {
601 struct kiocb *iocb;
602 iomap_dio_end_io_t *end_io;
603 loff_t i_size;
604 loff_t size;
605 atomic_t ref;
606 unsigned flags;
607 int error;
609 union {
610 /* used during submission and for synchronous completion: */
611 struct {
612 struct iov_iter *iter;
613 struct task_struct *waiter;
614 struct request_queue *last_queue;
615 blk_qc_t cookie;
616 } submit;
618 /* used for aio completion: */
619 struct {
620 struct work_struct work;
621 } aio;
625 static ssize_t iomap_dio_complete(struct iomap_dio *dio)
627 struct kiocb *iocb = dio->iocb;
628 ssize_t ret;
630 if (dio->end_io) {
631 ret = dio->end_io(iocb,
632 dio->error ? dio->error : dio->size,
633 dio->flags);
634 } else {
635 ret = dio->error;
638 if (likely(!ret)) {
639 ret = dio->size;
640 /* check for short read */
641 if (iocb->ki_pos + ret > dio->i_size &&
642 !(dio->flags & IOMAP_DIO_WRITE))
643 ret = dio->i_size - iocb->ki_pos;
644 iocb->ki_pos += ret;
647 inode_dio_end(file_inode(iocb->ki_filp));
648 kfree(dio);
650 return ret;
653 static void iomap_dio_complete_work(struct work_struct *work)
655 struct iomap_dio *dio = container_of(work, struct iomap_dio, aio.work);
656 struct kiocb *iocb = dio->iocb;
657 bool is_write = (dio->flags & IOMAP_DIO_WRITE);
658 ssize_t ret;
660 ret = iomap_dio_complete(dio);
661 if (is_write && ret > 0)
662 ret = generic_write_sync(iocb, ret);
663 iocb->ki_complete(iocb, ret, 0);
667 * Set an error in the dio if none is set yet. We have to use cmpxchg
668 * as the submission context and the completion context(s) can race to
669 * update the error.
671 static inline void iomap_dio_set_error(struct iomap_dio *dio, int ret)
673 cmpxchg(&dio->error, 0, ret);
676 static void iomap_dio_bio_end_io(struct bio *bio)
678 struct iomap_dio *dio = bio->bi_private;
679 bool should_dirty = (dio->flags & IOMAP_DIO_DIRTY);
681 if (bio->bi_error)
682 iomap_dio_set_error(dio, bio->bi_error);
684 if (atomic_dec_and_test(&dio->ref)) {
685 if (is_sync_kiocb(dio->iocb)) {
686 struct task_struct *waiter = dio->submit.waiter;
688 WRITE_ONCE(dio->submit.waiter, NULL);
689 wake_up_process(waiter);
690 } else if (dio->flags & IOMAP_DIO_WRITE) {
691 struct inode *inode = file_inode(dio->iocb->ki_filp);
693 INIT_WORK(&dio->aio.work, iomap_dio_complete_work);
694 queue_work(inode->i_sb->s_dio_done_wq, &dio->aio.work);
695 } else {
696 iomap_dio_complete_work(&dio->aio.work);
700 if (should_dirty) {
701 bio_check_pages_dirty(bio);
702 } else {
703 struct bio_vec *bvec;
704 int i;
706 bio_for_each_segment_all(bvec, bio, i)
707 put_page(bvec->bv_page);
708 bio_put(bio);
712 static blk_qc_t
713 iomap_dio_zero(struct iomap_dio *dio, struct iomap *iomap, loff_t pos,
714 unsigned len)
716 struct page *page = ZERO_PAGE(0);
717 struct bio *bio;
719 bio = bio_alloc(GFP_KERNEL, 1);
720 bio->bi_bdev = iomap->bdev;
721 bio->bi_iter.bi_sector =
722 iomap->blkno + ((pos - iomap->offset) >> 9);
723 bio->bi_private = dio;
724 bio->bi_end_io = iomap_dio_bio_end_io;
726 get_page(page);
727 if (bio_add_page(bio, page, len, 0) != len)
728 BUG();
729 bio_set_op_attrs(bio, REQ_OP_WRITE, REQ_SYNC | REQ_IDLE);
731 atomic_inc(&dio->ref);
732 return submit_bio(bio);
735 static loff_t
736 iomap_dio_actor(struct inode *inode, loff_t pos, loff_t length,
737 void *data, struct iomap *iomap)
739 struct iomap_dio *dio = data;
740 unsigned int blkbits = blksize_bits(bdev_logical_block_size(iomap->bdev));
741 unsigned int fs_block_size = i_blocksize(inode), pad;
742 unsigned int align = iov_iter_alignment(dio->submit.iter);
743 struct iov_iter iter;
744 struct bio *bio;
745 bool need_zeroout = false;
746 int nr_pages, ret;
748 if ((pos | length | align) & ((1 << blkbits) - 1))
749 return -EINVAL;
751 switch (iomap->type) {
752 case IOMAP_HOLE:
753 if (WARN_ON_ONCE(dio->flags & IOMAP_DIO_WRITE))
754 return -EIO;
755 /*FALLTHRU*/
756 case IOMAP_UNWRITTEN:
757 if (!(dio->flags & IOMAP_DIO_WRITE)) {
758 iov_iter_zero(length, dio->submit.iter);
759 dio->size += length;
760 return length;
762 dio->flags |= IOMAP_DIO_UNWRITTEN;
763 need_zeroout = true;
764 break;
765 case IOMAP_MAPPED:
766 if (iomap->flags & IOMAP_F_SHARED)
767 dio->flags |= IOMAP_DIO_COW;
768 if (iomap->flags & IOMAP_F_NEW)
769 need_zeroout = true;
770 break;
771 default:
772 WARN_ON_ONCE(1);
773 return -EIO;
777 * Operate on a partial iter trimmed to the extent we were called for.
778 * We'll update the iter in the dio once we're done with this extent.
780 iter = *dio->submit.iter;
781 iov_iter_truncate(&iter, length);
783 nr_pages = iov_iter_npages(&iter, BIO_MAX_PAGES);
784 if (nr_pages <= 0)
785 return nr_pages;
787 if (need_zeroout) {
788 /* zero out from the start of the block to the write offset */
789 pad = pos & (fs_block_size - 1);
790 if (pad)
791 iomap_dio_zero(dio, iomap, pos - pad, pad);
794 do {
795 if (dio->error)
796 return 0;
798 bio = bio_alloc(GFP_KERNEL, nr_pages);
799 bio->bi_bdev = iomap->bdev;
800 bio->bi_iter.bi_sector =
801 iomap->blkno + ((pos - iomap->offset) >> 9);
802 bio->bi_private = dio;
803 bio->bi_end_io = iomap_dio_bio_end_io;
805 ret = bio_iov_iter_get_pages(bio, &iter);
806 if (unlikely(ret)) {
807 bio_put(bio);
808 return ret;
811 if (dio->flags & IOMAP_DIO_WRITE) {
812 bio_set_op_attrs(bio, REQ_OP_WRITE, REQ_SYNC | REQ_IDLE);
813 task_io_account_write(bio->bi_iter.bi_size);
814 } else {
815 bio_set_op_attrs(bio, REQ_OP_READ, 0);
816 if (dio->flags & IOMAP_DIO_DIRTY)
817 bio_set_pages_dirty(bio);
820 dio->size += bio->bi_iter.bi_size;
821 pos += bio->bi_iter.bi_size;
823 nr_pages = iov_iter_npages(&iter, BIO_MAX_PAGES);
825 atomic_inc(&dio->ref);
827 dio->submit.last_queue = bdev_get_queue(iomap->bdev);
828 dio->submit.cookie = submit_bio(bio);
829 } while (nr_pages);
831 if (need_zeroout) {
832 /* zero out from the end of the write to the end of the block */
833 pad = pos & (fs_block_size - 1);
834 if (pad)
835 iomap_dio_zero(dio, iomap, pos, fs_block_size - pad);
838 iov_iter_advance(dio->submit.iter, length);
839 return length;
842 ssize_t
843 iomap_dio_rw(struct kiocb *iocb, struct iov_iter *iter,
844 const struct iomap_ops *ops, iomap_dio_end_io_t end_io)
846 struct address_space *mapping = iocb->ki_filp->f_mapping;
847 struct inode *inode = file_inode(iocb->ki_filp);
848 size_t count = iov_iter_count(iter);
849 loff_t pos = iocb->ki_pos, end = iocb->ki_pos + count - 1, ret = 0;
850 unsigned int flags = IOMAP_DIRECT;
851 struct blk_plug plug;
852 struct iomap_dio *dio;
854 lockdep_assert_held(&inode->i_rwsem);
856 if (!count)
857 return 0;
859 dio = kmalloc(sizeof(*dio), GFP_KERNEL);
860 if (!dio)
861 return -ENOMEM;
863 dio->iocb = iocb;
864 atomic_set(&dio->ref, 1);
865 dio->size = 0;
866 dio->i_size = i_size_read(inode);
867 dio->end_io = end_io;
868 dio->error = 0;
869 dio->flags = 0;
871 dio->submit.iter = iter;
872 if (is_sync_kiocb(iocb)) {
873 dio->submit.waiter = current;
874 dio->submit.cookie = BLK_QC_T_NONE;
875 dio->submit.last_queue = NULL;
878 if (iov_iter_rw(iter) == READ) {
879 if (pos >= dio->i_size)
880 goto out_free_dio;
882 if (iter->type == ITER_IOVEC)
883 dio->flags |= IOMAP_DIO_DIRTY;
884 } else {
885 dio->flags |= IOMAP_DIO_WRITE;
886 flags |= IOMAP_WRITE;
889 if (mapping->nrpages) {
890 ret = filemap_write_and_wait_range(mapping, iocb->ki_pos, end);
891 if (ret)
892 goto out_free_dio;
894 ret = invalidate_inode_pages2_range(mapping,
895 iocb->ki_pos >> PAGE_SHIFT, end >> PAGE_SHIFT);
896 WARN_ON_ONCE(ret);
897 ret = 0;
900 inode_dio_begin(inode);
902 blk_start_plug(&plug);
903 do {
904 ret = iomap_apply(inode, pos, count, flags, ops, dio,
905 iomap_dio_actor);
906 if (ret <= 0) {
907 /* magic error code to fall back to buffered I/O */
908 if (ret == -ENOTBLK)
909 ret = 0;
910 break;
912 pos += ret;
913 } while ((count = iov_iter_count(iter)) > 0);
914 blk_finish_plug(&plug);
916 if (ret < 0)
917 iomap_dio_set_error(dio, ret);
919 if (ret >= 0 && iov_iter_rw(iter) == WRITE && !is_sync_kiocb(iocb) &&
920 !inode->i_sb->s_dio_done_wq) {
921 ret = sb_init_dio_done_wq(inode->i_sb);
922 if (ret < 0)
923 iomap_dio_set_error(dio, ret);
926 if (!atomic_dec_and_test(&dio->ref)) {
927 if (!is_sync_kiocb(iocb))
928 return -EIOCBQUEUED;
930 for (;;) {
931 set_current_state(TASK_UNINTERRUPTIBLE);
932 if (!READ_ONCE(dio->submit.waiter))
933 break;
935 if (!(iocb->ki_flags & IOCB_HIPRI) ||
936 !dio->submit.last_queue ||
937 !blk_mq_poll(dio->submit.last_queue,
938 dio->submit.cookie))
939 io_schedule();
941 __set_current_state(TASK_RUNNING);
945 * Try again to invalidate clean pages which might have been cached by
946 * non-direct readahead, or faulted in by get_user_pages() if the source
947 * of the write was an mmap'ed region of the file we're writing. Either
948 * one is a pretty crazy thing to do, so we don't support it 100%. If
949 * this invalidation fails, tough, the write still worked...
951 if (iov_iter_rw(iter) == WRITE && mapping->nrpages) {
952 ret = invalidate_inode_pages2_range(mapping,
953 iocb->ki_pos >> PAGE_SHIFT, end >> PAGE_SHIFT);
954 WARN_ON_ONCE(ret);
957 return iomap_dio_complete(dio);
959 out_free_dio:
960 kfree(dio);
961 return ret;
963 EXPORT_SYMBOL_GPL(iomap_dio_rw);