1 // SPDX-License-Identifier: GPL-2.0
2 #include <linux/kernel.h>
3 #include <linux/errno.h>
5 #include <linux/file.h>
6 #include <linux/blk-mq.h>
8 #include <linux/slab.h>
9 #include <linux/fsnotify.h>
10 #include <linux/poll.h>
11 #include <linux/nospec.h>
12 #include <linux/compat.h>
13 #include <linux/io_uring/cmd.h>
14 #include <linux/indirect_call_wrapper.h>
16 #include <uapi/linux/io_uring.h>
21 #include "alloc_cache.h"
27 /* NOTE: kiocb has the file as the first member, so don't do it here */
34 static bool io_file_supports_nowait(struct io_kiocb
*req
, __poll_t mask
)
36 /* If FMODE_NOWAIT is set for a file, we're golden */
37 if (req
->flags
& REQ_F_SUPPORT_NOWAIT
)
39 /* No FMODE_NOWAIT, if we can poll, check the status */
40 if (io_file_can_poll(req
)) {
41 struct poll_table_struct pt
= { ._key
= mask
};
43 return vfs_poll(req
->file
, &pt
) & mask
;
45 /* No FMODE_NOWAIT support, and file isn't pollable. Tough luck. */
50 static int io_iov_compat_buffer_select_prep(struct io_rw
*rw
)
52 struct compat_iovec __user
*uiov
;
55 uiov
= u64_to_user_ptr(rw
->addr
);
56 if (!access_ok(uiov
, sizeof(*uiov
)))
58 if (__get_user(clen
, &uiov
->iov_len
))
68 static int io_iov_buffer_select_prep(struct io_kiocb
*req
)
70 struct iovec __user
*uiov
;
72 struct io_rw
*rw
= io_kiocb_to_cmd(req
, struct io_rw
);
79 return io_iov_compat_buffer_select_prep(rw
);
82 uiov
= u64_to_user_ptr(rw
->addr
);
83 if (copy_from_user(&iov
, uiov
, sizeof(*uiov
)))
85 rw
->len
= iov
.iov_len
;
89 static int __io_import_iovec(int ddir
, struct io_kiocb
*req
,
90 struct io_async_rw
*io
,
91 unsigned int issue_flags
)
93 const struct io_issue_def
*def
= &io_issue_defs
[req
->opcode
];
94 struct io_rw
*rw
= io_kiocb_to_cmd(req
, struct io_rw
);
100 buf
= u64_to_user_ptr(rw
->addr
);
103 if (!def
->vectored
|| req
->flags
& REQ_F_BUFFER_SELECT
) {
104 if (io_do_buffer_select(req
)) {
105 buf
= io_buffer_select(req
, &sqe_len
, issue_flags
);
108 rw
->addr
= (unsigned long) buf
;
112 return import_ubuf(ddir
, buf
, sqe_len
, &io
->iter
);
115 if (io
->free_iovec
) {
116 nr_segs
= io
->free_iov_nr
;
117 iov
= io
->free_iovec
;
122 ret
= __import_iovec(ddir
, buf
, sqe_len
, nr_segs
, &iov
, &io
->iter
,
124 if (unlikely(ret
< 0))
127 req
->flags
|= REQ_F_NEED_CLEANUP
;
128 io
->free_iov_nr
= io
->iter
.nr_segs
;
129 kfree(io
->free_iovec
);
130 io
->free_iovec
= iov
;
135 static inline int io_import_iovec(int rw
, struct io_kiocb
*req
,
136 struct io_async_rw
*io
,
137 unsigned int issue_flags
)
141 ret
= __io_import_iovec(rw
, req
, io
, issue_flags
);
142 if (unlikely(ret
< 0))
145 iov_iter_save_state(&io
->iter
, &io
->iter_state
);
149 static void io_rw_iovec_free(struct io_async_rw
*rw
)
151 if (rw
->free_iovec
) {
152 kfree(rw
->free_iovec
);
154 rw
->free_iovec
= NULL
;
158 static void io_rw_recycle(struct io_kiocb
*req
, unsigned int issue_flags
)
160 struct io_async_rw
*rw
= req
->async_data
;
163 if (unlikely(issue_flags
& IO_URING_F_UNLOCKED
)) {
164 io_rw_iovec_free(rw
);
167 iov
= rw
->free_iovec
;
168 if (io_alloc_cache_put(&req
->ctx
->rw_cache
, rw
)) {
170 kasan_mempool_poison_object(iov
);
171 req
->async_data
= NULL
;
172 req
->flags
&= ~REQ_F_ASYNC_DATA
;
176 static void io_req_rw_cleanup(struct io_kiocb
*req
, unsigned int issue_flags
)
179 * Disable quick recycling for anything that's gone through io-wq.
180 * In theory, this should be fine to cleanup. However, some read or
181 * write iter handling touches the iovec AFTER having called into the
182 * handler, eg to reexpand or revert. This means we can have:
188 * blkdev_write_iter()
194 * iov_iter_count() <- look at iov_iter again
196 * which can lead to a UAF. This is only possible for io-wq offload
197 * as the cleanup can run in parallel. As io-wq is not the fast path,
198 * just leave cleanup to the end.
200 * This is really a bug in the core code that does this, any issue
201 * path should assume that a successful (or -EIOCBQUEUED) return can
202 * mean that the underlying data can be gone at any time. But that
203 * should be fixed seperately, and then this check could be killed.
205 if (!(req
->flags
& REQ_F_REFCOUNT
)) {
206 req
->flags
&= ~REQ_F_NEED_CLEANUP
;
207 io_rw_recycle(req
, issue_flags
);
211 static int io_rw_alloc_async(struct io_kiocb
*req
)
213 struct io_ring_ctx
*ctx
= req
->ctx
;
214 struct io_async_rw
*rw
;
216 rw
= io_alloc_cache_get(&ctx
->rw_cache
);
218 if (rw
->free_iovec
) {
219 kasan_mempool_unpoison_object(rw
->free_iovec
,
220 rw
->free_iov_nr
* sizeof(struct iovec
));
221 req
->flags
|= REQ_F_NEED_CLEANUP
;
223 req
->flags
|= REQ_F_ASYNC_DATA
;
224 req
->async_data
= rw
;
228 if (!io_alloc_async_data(req
)) {
229 rw
= req
->async_data
;
230 rw
->free_iovec
= NULL
;
240 static int io_prep_rw_setup(struct io_kiocb
*req
, int ddir
, bool do_import
)
242 struct io_async_rw
*rw
;
245 if (io_rw_alloc_async(req
))
248 if (!do_import
|| io_do_buffer_select(req
))
251 rw
= req
->async_data
;
252 ret
= io_import_iovec(ddir
, req
, rw
, 0);
253 if (unlikely(ret
< 0))
256 iov_iter_save_state(&rw
->iter
, &rw
->iter_state
);
260 static int io_prep_rw(struct io_kiocb
*req
, const struct io_uring_sqe
*sqe
,
261 int ddir
, bool do_import
)
263 struct io_rw
*rw
= io_kiocb_to_cmd(req
, struct io_rw
);
267 rw
->kiocb
.ki_pos
= READ_ONCE(sqe
->off
);
268 /* used for fixed read/write too - just read unconditionally */
269 req
->buf_index
= READ_ONCE(sqe
->buf_index
);
271 ioprio
= READ_ONCE(sqe
->ioprio
);
273 ret
= ioprio_check_cap(ioprio
);
277 rw
->kiocb
.ki_ioprio
= ioprio
;
279 rw
->kiocb
.ki_ioprio
= get_current_ioprio();
281 rw
->kiocb
.dio_complete
= NULL
;
283 rw
->addr
= READ_ONCE(sqe
->addr
);
284 rw
->len
= READ_ONCE(sqe
->len
);
285 rw
->flags
= READ_ONCE(sqe
->rw_flags
);
286 return io_prep_rw_setup(req
, ddir
, do_import
);
289 int io_prep_read(struct io_kiocb
*req
, const struct io_uring_sqe
*sqe
)
291 return io_prep_rw(req
, sqe
, ITER_DEST
, true);
294 int io_prep_write(struct io_kiocb
*req
, const struct io_uring_sqe
*sqe
)
296 return io_prep_rw(req
, sqe
, ITER_SOURCE
, true);
299 static int io_prep_rwv(struct io_kiocb
*req
, const struct io_uring_sqe
*sqe
,
302 const bool do_import
= !(req
->flags
& REQ_F_BUFFER_SELECT
);
305 ret
= io_prep_rw(req
, sqe
, ddir
, do_import
);
312 * Have to do this validation here, as this is in io_read() rw->len
313 * might have chanaged due to buffer selection
315 return io_iov_buffer_select_prep(req
);
318 int io_prep_readv(struct io_kiocb
*req
, const struct io_uring_sqe
*sqe
)
320 return io_prep_rwv(req
, sqe
, ITER_DEST
);
323 int io_prep_writev(struct io_kiocb
*req
, const struct io_uring_sqe
*sqe
)
325 return io_prep_rwv(req
, sqe
, ITER_SOURCE
);
328 static int io_prep_rw_fixed(struct io_kiocb
*req
, const struct io_uring_sqe
*sqe
,
331 struct io_rw
*rw
= io_kiocb_to_cmd(req
, struct io_rw
);
332 struct io_ring_ctx
*ctx
= req
->ctx
;
333 struct io_rsrc_node
*node
;
334 struct io_async_rw
*io
;
337 ret
= io_prep_rw(req
, sqe
, ddir
, false);
341 node
= io_rsrc_node_lookup(&ctx
->buf_table
, req
->buf_index
);
344 io_req_assign_buf_node(req
, node
);
346 io
= req
->async_data
;
347 ret
= io_import_fixed(ddir
, &io
->iter
, node
->buf
, rw
->addr
, rw
->len
);
348 iov_iter_save_state(&io
->iter
, &io
->iter_state
);
352 int io_prep_read_fixed(struct io_kiocb
*req
, const struct io_uring_sqe
*sqe
)
354 return io_prep_rw_fixed(req
, sqe
, ITER_DEST
);
357 int io_prep_write_fixed(struct io_kiocb
*req
, const struct io_uring_sqe
*sqe
)
359 return io_prep_rw_fixed(req
, sqe
, ITER_SOURCE
);
363 * Multishot read is prepared just like a normal read/write request, only
364 * difference is that we set the MULTISHOT flag.
366 int io_read_mshot_prep(struct io_kiocb
*req
, const struct io_uring_sqe
*sqe
)
368 struct io_rw
*rw
= io_kiocb_to_cmd(req
, struct io_rw
);
371 /* must be used with provided buffers */
372 if (!(req
->flags
& REQ_F_BUFFER_SELECT
))
375 ret
= io_prep_rw(req
, sqe
, ITER_DEST
, false);
379 if (rw
->addr
|| rw
->len
)
382 req
->flags
|= REQ_F_APOLL_MULTISHOT
;
386 void io_readv_writev_cleanup(struct io_kiocb
*req
)
388 io_rw_iovec_free(req
->async_data
);
391 static inline loff_t
*io_kiocb_update_pos(struct io_kiocb
*req
)
393 struct io_rw
*rw
= io_kiocb_to_cmd(req
, struct io_rw
);
395 if (rw
->kiocb
.ki_pos
!= -1)
396 return &rw
->kiocb
.ki_pos
;
398 if (!(req
->file
->f_mode
& FMODE_STREAM
)) {
399 req
->flags
|= REQ_F_CUR_POS
;
400 rw
->kiocb
.ki_pos
= req
->file
->f_pos
;
401 return &rw
->kiocb
.ki_pos
;
404 rw
->kiocb
.ki_pos
= 0;
409 static void io_resubmit_prep(struct io_kiocb
*req
)
411 struct io_async_rw
*io
= req
->async_data
;
413 iov_iter_restore(&io
->iter
, &io
->iter_state
);
416 static bool io_rw_should_reissue(struct io_kiocb
*req
)
418 umode_t mode
= file_inode(req
->file
)->i_mode
;
419 struct io_ring_ctx
*ctx
= req
->ctx
;
421 if (!S_ISBLK(mode
) && !S_ISREG(mode
))
423 if ((req
->flags
& REQ_F_NOWAIT
) || (io_wq_current_is_worker() &&
424 !(ctx
->flags
& IORING_SETUP_IOPOLL
)))
427 * If ref is dying, we might be running poll reap from the exit work.
428 * Don't attempt to reissue from that path, just let it fail with
431 if (percpu_ref_is_dying(&ctx
->refs
))
434 * Play it safe and assume not safe to re-import and reissue if we're
435 * not in the original thread group (or in task context).
437 if (!same_thread_group(req
->tctx
->task
, current
) || !in_task())
442 static void io_resubmit_prep(struct io_kiocb
*req
)
445 static bool io_rw_should_reissue(struct io_kiocb
*req
)
451 static void io_req_end_write(struct io_kiocb
*req
)
453 if (req
->flags
& REQ_F_ISREG
) {
454 struct io_rw
*rw
= io_kiocb_to_cmd(req
, struct io_rw
);
456 kiocb_end_write(&rw
->kiocb
);
461 * Trigger the notifications after having done some IO, and finish the write
462 * accounting, if any.
464 static void io_req_io_end(struct io_kiocb
*req
)
466 struct io_rw
*rw
= io_kiocb_to_cmd(req
, struct io_rw
);
468 if (rw
->kiocb
.ki_flags
& IOCB_WRITE
) {
469 io_req_end_write(req
);
470 fsnotify_modify(req
->file
);
472 fsnotify_access(req
->file
);
476 static bool __io_complete_rw_common(struct io_kiocb
*req
, long res
)
478 if (unlikely(res
!= req
->cqe
.res
)) {
479 if (res
== -EAGAIN
&& io_rw_should_reissue(req
)) {
481 * Reissue will start accounting again, finish the
485 req
->flags
|= REQ_F_REISSUE
| REQ_F_BL_NO_RECYCLE
;
494 static inline int io_fixup_rw_res(struct io_kiocb
*req
, long res
)
496 struct io_async_rw
*io
= req
->async_data
;
498 /* add previously done IO, if any */
499 if (req_has_async_data(req
) && io
->bytes_done
> 0) {
501 res
= io
->bytes_done
;
503 res
+= io
->bytes_done
;
508 void io_req_rw_complete(struct io_kiocb
*req
, struct io_tw_state
*ts
)
510 struct io_rw
*rw
= io_kiocb_to_cmd(req
, struct io_rw
);
511 struct kiocb
*kiocb
= &rw
->kiocb
;
513 if ((kiocb
->ki_flags
& IOCB_DIO_CALLER_COMP
) && kiocb
->dio_complete
) {
514 long res
= kiocb
->dio_complete(rw
->kiocb
.private);
516 io_req_set_res(req
, io_fixup_rw_res(req
, res
), 0);
521 if (req
->flags
& (REQ_F_BUFFER_SELECTED
|REQ_F_BUFFER_RING
))
522 req
->cqe
.flags
|= io_put_kbuf(req
, req
->cqe
.res
, 0);
524 io_req_rw_cleanup(req
, 0);
525 io_req_task_complete(req
, ts
);
528 static void io_complete_rw(struct kiocb
*kiocb
, long res
)
530 struct io_rw
*rw
= container_of(kiocb
, struct io_rw
, kiocb
);
531 struct io_kiocb
*req
= cmd_to_io_kiocb(rw
);
533 if (!kiocb
->dio_complete
|| !(kiocb
->ki_flags
& IOCB_DIO_CALLER_COMP
)) {
534 if (__io_complete_rw_common(req
, res
))
536 io_req_set_res(req
, io_fixup_rw_res(req
, res
), 0);
538 req
->io_task_work
.func
= io_req_rw_complete
;
539 __io_req_task_work_add(req
, IOU_F_TWQ_LAZY_WAKE
);
542 static void io_complete_rw_iopoll(struct kiocb
*kiocb
, long res
)
544 struct io_rw
*rw
= container_of(kiocb
, struct io_rw
, kiocb
);
545 struct io_kiocb
*req
= cmd_to_io_kiocb(rw
);
547 if (kiocb
->ki_flags
& IOCB_WRITE
)
548 io_req_end_write(req
);
549 if (unlikely(res
!= req
->cqe
.res
)) {
550 if (res
== -EAGAIN
&& io_rw_should_reissue(req
)) {
551 req
->flags
|= REQ_F_REISSUE
| REQ_F_BL_NO_RECYCLE
;
557 /* order with io_iopoll_complete() checking ->iopoll_completed */
558 smp_store_release(&req
->iopoll_completed
, 1);
561 static inline void io_rw_done(struct kiocb
*kiocb
, ssize_t ret
)
563 /* IO was queued async, completion will happen later */
564 if (ret
== -EIOCBQUEUED
)
567 /* transform internal restart error codes */
568 if (unlikely(ret
< 0)) {
571 case -ERESTARTNOINTR
:
572 case -ERESTARTNOHAND
:
573 case -ERESTART_RESTARTBLOCK
:
575 * We can't just restart the syscall, since previously
576 * submitted sqes may already be in progress. Just fail
577 * this IO with EINTR.
584 INDIRECT_CALL_2(kiocb
->ki_complete
, io_complete_rw_iopoll
,
585 io_complete_rw
, kiocb
, ret
);
588 static int kiocb_done(struct io_kiocb
*req
, ssize_t ret
,
589 unsigned int issue_flags
)
591 struct io_rw
*rw
= io_kiocb_to_cmd(req
, struct io_rw
);
592 unsigned final_ret
= io_fixup_rw_res(req
, ret
);
594 if (ret
>= 0 && req
->flags
& REQ_F_CUR_POS
)
595 req
->file
->f_pos
= rw
->kiocb
.ki_pos
;
596 if (ret
>= 0 && (rw
->kiocb
.ki_complete
== io_complete_rw
)) {
597 if (!__io_complete_rw_common(req
, ret
)) {
599 * Safe to call io_end from here as we're inline
600 * from the submission path.
603 io_req_set_res(req
, final_ret
,
604 io_put_kbuf(req
, ret
, issue_flags
));
605 io_req_rw_cleanup(req
, issue_flags
);
609 io_rw_done(&rw
->kiocb
, ret
);
612 if (req
->flags
& REQ_F_REISSUE
) {
613 req
->flags
&= ~REQ_F_REISSUE
;
614 io_resubmit_prep(req
);
617 return IOU_ISSUE_SKIP_COMPLETE
;
620 static inline loff_t
*io_kiocb_ppos(struct kiocb
*kiocb
)
622 return (kiocb
->ki_filp
->f_mode
& FMODE_STREAM
) ? NULL
: &kiocb
->ki_pos
;
626 * For files that don't have ->read_iter() and ->write_iter(), handle them
627 * by looping over ->read() or ->write() manually.
629 static ssize_t
loop_rw_iter(int ddir
, struct io_rw
*rw
, struct iov_iter
*iter
)
631 struct kiocb
*kiocb
= &rw
->kiocb
;
632 struct file
*file
= kiocb
->ki_filp
;
637 * Don't support polled IO through this interface, and we can't
638 * support non-blocking either. For the latter, this just causes
639 * the kiocb to be handled from an async context.
641 if (kiocb
->ki_flags
& IOCB_HIPRI
)
643 if ((kiocb
->ki_flags
& IOCB_NOWAIT
) &&
644 !(kiocb
->ki_filp
->f_flags
& O_NONBLOCK
))
647 ppos
= io_kiocb_ppos(kiocb
);
649 while (iov_iter_count(iter
)) {
654 if (iter_is_ubuf(iter
)) {
655 addr
= iter
->ubuf
+ iter
->iov_offset
;
656 len
= iov_iter_count(iter
);
657 } else if (!iov_iter_is_bvec(iter
)) {
658 addr
= iter_iov_addr(iter
);
659 len
= iter_iov_len(iter
);
661 addr
= u64_to_user_ptr(rw
->addr
);
666 nr
= file
->f_op
->read(file
, addr
, len
, ppos
);
668 nr
= file
->f_op
->write(file
, addr
, len
, ppos
);
676 if (!iov_iter_is_bvec(iter
)) {
677 iov_iter_advance(iter
, nr
);
692 * This is our waitqueue callback handler, registered through __folio_lock_async()
693 * when we initially tried to do the IO with the iocb armed our waitqueue.
694 * This gets called when the page is unlocked, and we generally expect that to
695 * happen when the page IO is completed and the page is now uptodate. This will
696 * queue a task_work based retry of the operation, attempting to copy the data
697 * again. If the latter fails because the page was NOT uptodate, then we will
698 * do a thread based blocking retry of the operation. That's the unexpected
701 static int io_async_buf_func(struct wait_queue_entry
*wait
, unsigned mode
,
704 struct wait_page_queue
*wpq
;
705 struct io_kiocb
*req
= wait
->private;
706 struct io_rw
*rw
= io_kiocb_to_cmd(req
, struct io_rw
);
707 struct wait_page_key
*key
= arg
;
709 wpq
= container_of(wait
, struct wait_page_queue
, wait
);
711 if (!wake_page_match(wpq
, key
))
714 rw
->kiocb
.ki_flags
&= ~IOCB_WAITQ
;
715 list_del_init(&wait
->entry
);
716 io_req_task_queue(req
);
721 * This controls whether a given IO request should be armed for async page
722 * based retry. If we return false here, the request is handed to the async
723 * worker threads for retry. If we're doing buffered reads on a regular file,
724 * we prepare a private wait_page_queue entry and retry the operation. This
725 * will either succeed because the page is now uptodate and unlocked, or it
726 * will register a callback when the page is unlocked at IO completion. Through
727 * that callback, io_uring uses task_work to setup a retry of the operation.
728 * That retry will attempt the buffered read again. The retry will generally
729 * succeed, or in rare cases where it fails, we then fall back to using the
730 * async worker threads for a blocking retry.
732 static bool io_rw_should_retry(struct io_kiocb
*req
)
734 struct io_async_rw
*io
= req
->async_data
;
735 struct wait_page_queue
*wait
= &io
->wpq
;
736 struct io_rw
*rw
= io_kiocb_to_cmd(req
, struct io_rw
);
737 struct kiocb
*kiocb
= &rw
->kiocb
;
739 /* never retry for NOWAIT, we just complete with -EAGAIN */
740 if (req
->flags
& REQ_F_NOWAIT
)
743 /* Only for buffered IO */
744 if (kiocb
->ki_flags
& (IOCB_DIRECT
| IOCB_HIPRI
))
748 * just use poll if we can, and don't attempt if the fs doesn't
749 * support callback based unlocks
751 if (io_file_can_poll(req
) ||
752 !(req
->file
->f_op
->fop_flags
& FOP_BUFFER_RASYNC
))
755 wait
->wait
.func
= io_async_buf_func
;
756 wait
->wait
.private = req
;
757 wait
->wait
.flags
= 0;
758 INIT_LIST_HEAD(&wait
->wait
.entry
);
759 kiocb
->ki_flags
|= IOCB_WAITQ
;
760 kiocb
->ki_flags
&= ~IOCB_NOWAIT
;
761 kiocb
->ki_waitq
= wait
;
765 static inline int io_iter_do_read(struct io_rw
*rw
, struct iov_iter
*iter
)
767 struct file
*file
= rw
->kiocb
.ki_filp
;
769 if (likely(file
->f_op
->read_iter
))
770 return file
->f_op
->read_iter(&rw
->kiocb
, iter
);
771 else if (file
->f_op
->read
)
772 return loop_rw_iter(READ
, rw
, iter
);
777 static bool need_complete_io(struct io_kiocb
*req
)
779 return req
->flags
& REQ_F_ISREG
||
780 S_ISBLK(file_inode(req
->file
)->i_mode
);
783 static int io_rw_init_file(struct io_kiocb
*req
, fmode_t mode
, int rw_type
)
785 struct io_rw
*rw
= io_kiocb_to_cmd(req
, struct io_rw
);
786 struct kiocb
*kiocb
= &rw
->kiocb
;
787 struct io_ring_ctx
*ctx
= req
->ctx
;
788 struct file
*file
= req
->file
;
791 if (unlikely(!(file
->f_mode
& mode
)))
794 if (!(req
->flags
& REQ_F_FIXED_FILE
))
795 req
->flags
|= io_file_get_flags(file
);
797 kiocb
->ki_flags
= file
->f_iocb_flags
;
798 ret
= kiocb_set_rw_flags(kiocb
, rw
->flags
, rw_type
);
801 kiocb
->ki_flags
|= IOCB_ALLOC_CACHE
;
804 * If the file is marked O_NONBLOCK, still allow retry for it if it
805 * supports async. Otherwise it's impossible to use O_NONBLOCK files
806 * reliably. If not, or it IOCB_NOWAIT is set, don't retry.
808 if (kiocb
->ki_flags
& IOCB_NOWAIT
||
809 ((file
->f_flags
& O_NONBLOCK
&& !(req
->flags
& REQ_F_SUPPORT_NOWAIT
))))
810 req
->flags
|= REQ_F_NOWAIT
;
812 if (ctx
->flags
& IORING_SETUP_IOPOLL
) {
813 if (!(kiocb
->ki_flags
& IOCB_DIRECT
) || !file
->f_op
->iopoll
)
816 kiocb
->private = NULL
;
817 kiocb
->ki_flags
|= IOCB_HIPRI
;
818 kiocb
->ki_complete
= io_complete_rw_iopoll
;
819 req
->iopoll_completed
= 0;
820 if (ctx
->flags
& IORING_SETUP_HYBRID_IOPOLL
) {
821 /* make sure every req only blocks once*/
822 req
->flags
&= ~REQ_F_IOPOLL_STATE
;
823 req
->iopoll_start
= ktime_get_ns();
826 if (kiocb
->ki_flags
& IOCB_HIPRI
)
828 kiocb
->ki_complete
= io_complete_rw
;
834 static int __io_read(struct io_kiocb
*req
, unsigned int issue_flags
)
836 bool force_nonblock
= issue_flags
& IO_URING_F_NONBLOCK
;
837 struct io_rw
*rw
= io_kiocb_to_cmd(req
, struct io_rw
);
838 struct io_async_rw
*io
= req
->async_data
;
839 struct kiocb
*kiocb
= &rw
->kiocb
;
843 if (io_do_buffer_select(req
)) {
844 ret
= io_import_iovec(ITER_DEST
, req
, io
, issue_flags
);
845 if (unlikely(ret
< 0))
848 ret
= io_rw_init_file(req
, FMODE_READ
, READ
);
851 req
->cqe
.res
= iov_iter_count(&io
->iter
);
853 if (force_nonblock
) {
854 /* If the file doesn't support async, just async punt */
855 if (unlikely(!io_file_supports_nowait(req
, EPOLLIN
)))
857 kiocb
->ki_flags
|= IOCB_NOWAIT
;
859 /* Ensure we clear previously set non-block flag */
860 kiocb
->ki_flags
&= ~IOCB_NOWAIT
;
863 ppos
= io_kiocb_update_pos(req
);
865 ret
= rw_verify_area(READ
, req
->file
, ppos
, req
->cqe
.res
);
869 ret
= io_iter_do_read(rw
, &io
->iter
);
872 * Some file systems like to return -EOPNOTSUPP for an IOCB_NOWAIT
873 * issue, even though they should be returning -EAGAIN. To be safe,
874 * retry from blocking context for either.
876 if (ret
== -EOPNOTSUPP
&& force_nonblock
)
879 if (ret
== -EAGAIN
|| (req
->flags
& REQ_F_REISSUE
)) {
880 req
->flags
&= ~REQ_F_REISSUE
;
881 /* If we can poll, just do that. */
882 if (io_file_can_poll(req
))
884 /* IOPOLL retry should happen for io-wq threads */
885 if (!force_nonblock
&& !(req
->ctx
->flags
& IORING_SETUP_IOPOLL
))
887 /* no retry on NONBLOCK nor RWF_NOWAIT */
888 if (req
->flags
& REQ_F_NOWAIT
)
891 } else if (ret
== -EIOCBQUEUED
) {
892 return IOU_ISSUE_SKIP_COMPLETE
;
893 } else if (ret
== req
->cqe
.res
|| ret
<= 0 || !force_nonblock
||
894 (req
->flags
& REQ_F_NOWAIT
) || !need_complete_io(req
)) {
895 /* read all, failed, already did sync or don't want to retry */
900 * Don't depend on the iter state matching what was consumed, or being
901 * untouched in case of error. Restore it and we'll advance it
902 * manually if we need to.
904 iov_iter_restore(&io
->iter
, &io
->iter_state
);
908 * We end up here because of a partial read, either from
909 * above or inside this loop. Advance the iter by the bytes
910 * that were consumed.
912 iov_iter_advance(&io
->iter
, ret
);
913 if (!iov_iter_count(&io
->iter
))
915 io
->bytes_done
+= ret
;
916 iov_iter_save_state(&io
->iter
, &io
->iter_state
);
918 /* if we can retry, do so with the callbacks armed */
919 if (!io_rw_should_retry(req
)) {
920 kiocb
->ki_flags
&= ~IOCB_WAITQ
;
924 req
->cqe
.res
= iov_iter_count(&io
->iter
);
926 * Now retry read with the IOCB_WAITQ parts set in the iocb. If
927 * we get -EIOCBQUEUED, then we'll get a notification when the
928 * desired page gets unlocked. We can also get a partial read
929 * here, and if we do, then just retry at the new offset.
931 ret
= io_iter_do_read(rw
, &io
->iter
);
932 if (ret
== -EIOCBQUEUED
)
933 return IOU_ISSUE_SKIP_COMPLETE
;
934 /* we got some bytes, but not all. retry. */
935 kiocb
->ki_flags
&= ~IOCB_WAITQ
;
936 iov_iter_restore(&io
->iter
, &io
->iter_state
);
939 /* it's faster to check here then delegate to kfree */
943 int io_read(struct io_kiocb
*req
, unsigned int issue_flags
)
947 ret
= __io_read(req
, issue_flags
);
949 return kiocb_done(req
, ret
, issue_flags
);
954 int io_read_mshot(struct io_kiocb
*req
, unsigned int issue_flags
)
956 struct io_rw
*rw
= io_kiocb_to_cmd(req
, struct io_rw
);
957 unsigned int cflags
= 0;
961 * Multishot MUST be used on a pollable file
963 if (!io_file_can_poll(req
))
966 ret
= __io_read(req
, issue_flags
);
969 * If we get -EAGAIN, recycle our buffer and just let normal poll
972 if (ret
== -EAGAIN
) {
974 * Reset rw->len to 0 again to avoid clamping future mshot
975 * reads, in case the buffer size varies.
977 if (io_kbuf_recycle(req
, issue_flags
))
979 if (issue_flags
& IO_URING_F_MULTISHOT
)
980 return IOU_ISSUE_SKIP_COMPLETE
;
982 } else if (ret
<= 0) {
983 io_kbuf_recycle(req
, issue_flags
);
988 * Any successful return value will keep the multishot read
989 * armed, if it's still set. Put our buffer and post a CQE. If
990 * we fail to post a CQE, or multishot is no longer set, then
991 * jump to the termination path. This request is then done.
993 cflags
= io_put_kbuf(req
, ret
, issue_flags
);
994 rw
->len
= 0; /* similarly to above, reset len to 0 */
996 if (io_req_post_cqe(req
, ret
, cflags
| IORING_CQE_F_MORE
)) {
997 if (issue_flags
& IO_URING_F_MULTISHOT
) {
999 * Force retry, as we might have more data to
1000 * be read and otherwise it won't get retried
1001 * until (if ever) another poll is triggered.
1003 io_poll_multishot_retry(req
);
1004 return IOU_ISSUE_SKIP_COMPLETE
;
1011 * Either an error, or we've hit overflow posting the CQE. For any
1012 * multishot request, hitting overflow will terminate it.
1014 io_req_set_res(req
, ret
, cflags
);
1015 io_req_rw_cleanup(req
, issue_flags
);
1016 if (issue_flags
& IO_URING_F_MULTISHOT
)
1017 return IOU_STOP_MULTISHOT
;
1021 static bool io_kiocb_start_write(struct io_kiocb
*req
, struct kiocb
*kiocb
)
1023 struct inode
*inode
;
1026 if (!(req
->flags
& REQ_F_ISREG
))
1028 if (!(kiocb
->ki_flags
& IOCB_NOWAIT
)) {
1029 kiocb_start_write(kiocb
);
1033 inode
= file_inode(kiocb
->ki_filp
);
1034 ret
= sb_start_write_trylock(inode
->i_sb
);
1036 __sb_writers_release(inode
->i_sb
, SB_FREEZE_WRITE
);
1040 int io_write(struct io_kiocb
*req
, unsigned int issue_flags
)
1042 bool force_nonblock
= issue_flags
& IO_URING_F_NONBLOCK
;
1043 struct io_rw
*rw
= io_kiocb_to_cmd(req
, struct io_rw
);
1044 struct io_async_rw
*io
= req
->async_data
;
1045 struct kiocb
*kiocb
= &rw
->kiocb
;
1049 ret
= io_rw_init_file(req
, FMODE_WRITE
, WRITE
);
1052 req
->cqe
.res
= iov_iter_count(&io
->iter
);
1054 if (force_nonblock
) {
1055 /* If the file doesn't support async, just async punt */
1056 if (unlikely(!io_file_supports_nowait(req
, EPOLLOUT
)))
1059 /* Check if we can support NOWAIT. */
1060 if (!(kiocb
->ki_flags
& IOCB_DIRECT
) &&
1061 !(req
->file
->f_op
->fop_flags
& FOP_BUFFER_WASYNC
) &&
1062 (req
->flags
& REQ_F_ISREG
))
1065 kiocb
->ki_flags
|= IOCB_NOWAIT
;
1067 /* Ensure we clear previously set non-block flag */
1068 kiocb
->ki_flags
&= ~IOCB_NOWAIT
;
1071 ppos
= io_kiocb_update_pos(req
);
1073 ret
= rw_verify_area(WRITE
, req
->file
, ppos
, req
->cqe
.res
);
1077 if (unlikely(!io_kiocb_start_write(req
, kiocb
)))
1079 kiocb
->ki_flags
|= IOCB_WRITE
;
1081 if (likely(req
->file
->f_op
->write_iter
))
1082 ret2
= req
->file
->f_op
->write_iter(kiocb
, &io
->iter
);
1083 else if (req
->file
->f_op
->write
)
1084 ret2
= loop_rw_iter(WRITE
, rw
, &io
->iter
);
1088 if (req
->flags
& REQ_F_REISSUE
) {
1089 req
->flags
&= ~REQ_F_REISSUE
;
1094 * Raw bdev writes will return -EOPNOTSUPP for IOCB_NOWAIT. Just
1095 * retry them without IOCB_NOWAIT.
1097 if (ret2
== -EOPNOTSUPP
&& (kiocb
->ki_flags
& IOCB_NOWAIT
))
1099 /* no retry on NONBLOCK nor RWF_NOWAIT */
1100 if (ret2
== -EAGAIN
&& (req
->flags
& REQ_F_NOWAIT
))
1102 if (!force_nonblock
|| ret2
!= -EAGAIN
) {
1103 /* IOPOLL retry should happen for io-wq threads */
1104 if (ret2
== -EAGAIN
&& (req
->ctx
->flags
& IORING_SETUP_IOPOLL
))
1107 if (ret2
!= req
->cqe
.res
&& ret2
>= 0 && need_complete_io(req
)) {
1108 trace_io_uring_short_write(req
->ctx
, kiocb
->ki_pos
- ret2
,
1109 req
->cqe
.res
, ret2
);
1111 /* This is a partial write. The file pos has already been
1112 * updated, setup the async struct to complete the request
1113 * in the worker. Also update bytes_done to account for
1114 * the bytes already written.
1116 iov_iter_save_state(&io
->iter
, &io
->iter_state
);
1117 io
->bytes_done
+= ret2
;
1119 if (kiocb
->ki_flags
& IOCB_WRITE
)
1120 io_req_end_write(req
);
1124 return kiocb_done(req
, ret2
, issue_flags
);
1127 iov_iter_restore(&io
->iter
, &io
->iter_state
);
1128 if (kiocb
->ki_flags
& IOCB_WRITE
)
1129 io_req_end_write(req
);
1134 void io_rw_fail(struct io_kiocb
*req
)
1138 res
= io_fixup_rw_res(req
, req
->cqe
.res
);
1139 io_req_set_res(req
, res
, req
->cqe
.flags
);
1142 static int io_uring_classic_poll(struct io_kiocb
*req
, struct io_comp_batch
*iob
,
1143 unsigned int poll_flags
)
1145 struct file
*file
= req
->file
;
1147 if (req
->opcode
== IORING_OP_URING_CMD
) {
1148 struct io_uring_cmd
*ioucmd
;
1150 ioucmd
= io_kiocb_to_cmd(req
, struct io_uring_cmd
);
1151 return file
->f_op
->uring_cmd_iopoll(ioucmd
, iob
, poll_flags
);
1153 struct io_rw
*rw
= io_kiocb_to_cmd(req
, struct io_rw
);
1155 return file
->f_op
->iopoll(&rw
->kiocb
, iob
, poll_flags
);
1159 static u64
io_hybrid_iopoll_delay(struct io_ring_ctx
*ctx
, struct io_kiocb
*req
)
1161 struct hrtimer_sleeper timer
;
1162 enum hrtimer_mode mode
;
1166 if (req
->flags
& REQ_F_IOPOLL_STATE
)
1169 if (ctx
->hybrid_poll_time
== LLONG_MAX
)
1172 /* Using half the running time to do schedule */
1173 sleep_time
= ctx
->hybrid_poll_time
/ 2;
1175 kt
= ktime_set(0, sleep_time
);
1176 req
->flags
|= REQ_F_IOPOLL_STATE
;
1178 mode
= HRTIMER_MODE_REL
;
1179 hrtimer_setup_sleeper_on_stack(&timer
, CLOCK_MONOTONIC
, mode
);
1180 hrtimer_set_expires(&timer
.timer
, kt
);
1181 set_current_state(TASK_INTERRUPTIBLE
);
1182 hrtimer_sleeper_start_expires(&timer
, mode
);
1187 hrtimer_cancel(&timer
.timer
);
1188 __set_current_state(TASK_RUNNING
);
1189 destroy_hrtimer_on_stack(&timer
.timer
);
1193 static int io_uring_hybrid_poll(struct io_kiocb
*req
,
1194 struct io_comp_batch
*iob
, unsigned int poll_flags
)
1196 struct io_ring_ctx
*ctx
= req
->ctx
;
1197 u64 runtime
, sleep_time
;
1200 sleep_time
= io_hybrid_iopoll_delay(ctx
, req
);
1201 ret
= io_uring_classic_poll(req
, iob
, poll_flags
);
1202 runtime
= ktime_get_ns() - req
->iopoll_start
- sleep_time
;
1205 * Use minimum sleep time if we're polling devices with different
1206 * latencies. We could get more completions from the faster ones.
1208 if (ctx
->hybrid_poll_time
> runtime
)
1209 ctx
->hybrid_poll_time
= runtime
;
1214 int io_do_iopoll(struct io_ring_ctx
*ctx
, bool force_nonspin
)
1216 struct io_wq_work_node
*pos
, *start
, *prev
;
1217 unsigned int poll_flags
= 0;
1218 DEFINE_IO_COMP_BATCH(iob
);
1222 * Only spin for completions if we don't have multiple devices hanging
1223 * off our complete list.
1225 if (ctx
->poll_multi_queue
|| force_nonspin
)
1226 poll_flags
|= BLK_POLL_ONESHOT
;
1228 wq_list_for_each(pos
, start
, &ctx
->iopoll_list
) {
1229 struct io_kiocb
*req
= container_of(pos
, struct io_kiocb
, comp_list
);
1233 * Move completed and retryable entries to our local lists.
1234 * If we find a request that requires polling, break out
1235 * and complete those lists first, if we have entries there.
1237 if (READ_ONCE(req
->iopoll_completed
))
1240 if (ctx
->flags
& IORING_SETUP_HYBRID_IOPOLL
)
1241 ret
= io_uring_hybrid_poll(req
, &iob
, poll_flags
);
1243 ret
= io_uring_classic_poll(req
, &iob
, poll_flags
);
1245 if (unlikely(ret
< 0))
1248 poll_flags
|= BLK_POLL_ONESHOT
;
1250 /* iopoll may have completed current req */
1251 if (!rq_list_empty(&iob
.req_list
) ||
1252 READ_ONCE(req
->iopoll_completed
))
1256 if (!rq_list_empty(&iob
.req_list
))
1262 wq_list_for_each_resume(pos
, prev
) {
1263 struct io_kiocb
*req
= container_of(pos
, struct io_kiocb
, comp_list
);
1265 /* order with io_complete_rw_iopoll(), e.g. ->result updates */
1266 if (!smp_load_acquire(&req
->iopoll_completed
))
1269 req
->cqe
.flags
= io_put_kbuf(req
, req
->cqe
.res
, 0);
1270 if (req
->opcode
!= IORING_OP_URING_CMD
)
1271 io_req_rw_cleanup(req
, 0);
1273 if (unlikely(!nr_events
))
1276 pos
= start
? start
->next
: ctx
->iopoll_list
.first
;
1277 wq_list_cut(&ctx
->iopoll_list
, prev
, start
);
1279 if (WARN_ON_ONCE(!wq_list_empty(&ctx
->submit_state
.compl_reqs
)))
1281 ctx
->submit_state
.compl_reqs
.first
= pos
;
1282 __io_submit_flush_completions(ctx
);
1286 void io_rw_cache_free(const void *entry
)
1288 struct io_async_rw
*rw
= (struct io_async_rw
*) entry
;
1290 if (rw
->free_iovec
) {
1291 kasan_mempool_unpoison_object(rw
->free_iovec
,
1292 rw
->free_iov_nr
* sizeof(struct iovec
));
1293 io_rw_iovec_free(rw
);